diff options
Diffstat (limited to 'drivers/cpuidle/cpuidle.c')
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 295 |
1 files changed, 295 insertions, 0 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c new file mode 100644 index 000000000000..fdf4106b817b --- /dev/null +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * cpuidle.c - core cpuidle infrastructure | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mutex.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/notifier.h> | ||
15 | #include <linux/latency.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/cpuidle.h> | ||
18 | |||
19 | #include "cpuidle.h" | ||
20 | |||
21 | DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | ||
22 | EXPORT_PER_CPU_SYMBOL_GPL(cpuidle_devices); | ||
23 | |||
24 | DEFINE_MUTEX(cpuidle_lock); | ||
25 | LIST_HEAD(cpuidle_detected_devices); | ||
26 | static void (*pm_idle_old)(void); | ||
27 | |||
28 | static int enabled_devices; | ||
29 | |||
30 | /** | ||
31 | * cpuidle_idle_call - the main idle loop | ||
32 | * | ||
33 | * NOTE: no locks or semaphores should be used here | ||
34 | */ | ||
35 | static void cpuidle_idle_call(void) | ||
36 | { | ||
37 | struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); | ||
38 | struct cpuidle_state *target_state; | ||
39 | int next_state; | ||
40 | |||
41 | /* check if the device is ready */ | ||
42 | if (!dev || !dev->enabled) { | ||
43 | if (pm_idle_old) | ||
44 | pm_idle_old(); | ||
45 | else | ||
46 | local_irq_enable(); | ||
47 | return; | ||
48 | } | ||
49 | |||
50 | /* ask the governor for the next state */ | ||
51 | next_state = cpuidle_curr_governor->select(dev); | ||
52 | if (need_resched()) | ||
53 | return; | ||
54 | target_state = &dev->states[next_state]; | ||
55 | |||
56 | /* enter the state and update stats */ | ||
57 | dev->last_residency = target_state->enter(dev, target_state); | ||
58 | dev->last_state = target_state; | ||
59 | target_state->time += dev->last_residency; | ||
60 | target_state->usage++; | ||
61 | |||
62 | /* give the governor an opportunity to reflect on the outcome */ | ||
63 | if (cpuidle_curr_governor->reflect) | ||
64 | cpuidle_curr_governor->reflect(dev); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * cpuidle_install_idle_handler - installs the cpuidle idle loop handler | ||
69 | */ | ||
70 | void cpuidle_install_idle_handler(void) | ||
71 | { | ||
72 | if (enabled_devices && (pm_idle != cpuidle_idle_call)) { | ||
73 | /* Make sure all changes finished before we switch to new idle */ | ||
74 | smp_wmb(); | ||
75 | pm_idle = cpuidle_idle_call; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler | ||
81 | */ | ||
82 | void cpuidle_uninstall_idle_handler(void) | ||
83 | { | ||
84 | if (enabled_devices && (pm_idle != pm_idle_old)) { | ||
85 | pm_idle = pm_idle_old; | ||
86 | cpu_idle_wait(); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * cpuidle_pause_and_lock - temporarily disables CPUIDLE | ||
92 | */ | ||
93 | void cpuidle_pause_and_lock(void) | ||
94 | { | ||
95 | mutex_lock(&cpuidle_lock); | ||
96 | cpuidle_uninstall_idle_handler(); | ||
97 | } | ||
98 | |||
99 | EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); | ||
100 | |||
101 | /** | ||
102 | * cpuidle_resume_and_unlock - resumes CPUIDLE operation | ||
103 | */ | ||
104 | void cpuidle_resume_and_unlock(void) | ||
105 | { | ||
106 | cpuidle_install_idle_handler(); | ||
107 | mutex_unlock(&cpuidle_lock); | ||
108 | } | ||
109 | |||
110 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | ||
111 | |||
112 | /** | ||
113 | * cpuidle_enable_device - enables idle PM for a CPU | ||
114 | * @dev: the CPU | ||
115 | * | ||
116 | * This function must be called between cpuidle_pause_and_lock and | ||
117 | * cpuidle_resume_and_unlock when used externally. | ||
118 | */ | ||
119 | int cpuidle_enable_device(struct cpuidle_device *dev) | ||
120 | { | ||
121 | int ret, i; | ||
122 | |||
123 | if (dev->enabled) | ||
124 | return 0; | ||
125 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | ||
126 | return -EIO; | ||
127 | if (!dev->state_count) | ||
128 | return -EINVAL; | ||
129 | |||
130 | if ((ret = cpuidle_add_state_sysfs(dev))) | ||
131 | return ret; | ||
132 | |||
133 | if (cpuidle_curr_governor->enable && | ||
134 | (ret = cpuidle_curr_governor->enable(dev))) | ||
135 | goto fail_sysfs; | ||
136 | |||
137 | for (i = 0; i < dev->state_count; i++) { | ||
138 | dev->states[i].usage = 0; | ||
139 | dev->states[i].time = 0; | ||
140 | } | ||
141 | dev->last_residency = 0; | ||
142 | dev->last_state = NULL; | ||
143 | |||
144 | smp_wmb(); | ||
145 | |||
146 | dev->enabled = 1; | ||
147 | |||
148 | enabled_devices++; | ||
149 | return 0; | ||
150 | |||
151 | fail_sysfs: | ||
152 | cpuidle_remove_state_sysfs(dev); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | EXPORT_SYMBOL_GPL(cpuidle_enable_device); | ||
158 | |||
159 | /** | ||
160 | * cpuidle_disable_device - disables idle PM for a CPU | ||
161 | * @dev: the CPU | ||
162 | * | ||
163 | * This function must be called between cpuidle_pause_and_lock and | ||
164 | * cpuidle_resume_and_unlock when used externally. | ||
165 | */ | ||
166 | void cpuidle_disable_device(struct cpuidle_device *dev) | ||
167 | { | ||
168 | if (!dev->enabled) | ||
169 | return; | ||
170 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | ||
171 | return; | ||
172 | |||
173 | dev->enabled = 0; | ||
174 | |||
175 | if (cpuidle_curr_governor->disable) | ||
176 | cpuidle_curr_governor->disable(dev); | ||
177 | |||
178 | cpuidle_remove_state_sysfs(dev); | ||
179 | enabled_devices--; | ||
180 | } | ||
181 | |||
182 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | ||
183 | |||
184 | /** | ||
185 | * cpuidle_register_device - registers a CPU's idle PM feature | ||
186 | * @dev: the cpu | ||
187 | */ | ||
188 | int cpuidle_register_device(struct cpuidle_device *dev) | ||
189 | { | ||
190 | int ret; | ||
191 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | ||
192 | |||
193 | if (!sys_dev) | ||
194 | return -EINVAL; | ||
195 | if (!try_module_get(cpuidle_curr_driver->owner)) | ||
196 | return -EINVAL; | ||
197 | |||
198 | init_completion(&dev->kobj_unregister); | ||
199 | |||
200 | mutex_lock(&cpuidle_lock); | ||
201 | |||
202 | per_cpu(cpuidle_devices, dev->cpu) = dev; | ||
203 | list_add(&dev->device_list, &cpuidle_detected_devices); | ||
204 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | ||
205 | mutex_unlock(&cpuidle_lock); | ||
206 | module_put(cpuidle_curr_driver->owner); | ||
207 | return ret; | ||
208 | } | ||
209 | |||
210 | cpuidle_enable_device(dev); | ||
211 | cpuidle_install_idle_handler(); | ||
212 | |||
213 | mutex_unlock(&cpuidle_lock); | ||
214 | |||
215 | return 0; | ||
216 | |||
217 | } | ||
218 | |||
219 | EXPORT_SYMBOL_GPL(cpuidle_register_device); | ||
220 | |||
221 | /** | ||
222 | * cpuidle_unregister_device - unregisters a CPU's idle PM feature | ||
223 | * @dev: the cpu | ||
224 | */ | ||
225 | void cpuidle_unregister_device(struct cpuidle_device *dev) | ||
226 | { | ||
227 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | ||
228 | |||
229 | cpuidle_pause_and_lock(); | ||
230 | |||
231 | cpuidle_disable_device(dev); | ||
232 | |||
233 | cpuidle_remove_sysfs(sys_dev); | ||
234 | list_del(&dev->device_list); | ||
235 | wait_for_completion(&dev->kobj_unregister); | ||
236 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | ||
237 | |||
238 | cpuidle_resume_and_unlock(); | ||
239 | |||
240 | module_put(cpuidle_curr_driver->owner); | ||
241 | } | ||
242 | |||
243 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | ||
244 | |||
245 | #ifdef CONFIG_SMP | ||
246 | |||
247 | static void smp_callback(void *v) | ||
248 | { | ||
249 | /* we already woke the CPU up, nothing more to do */ | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * This function gets called when a part of the kernel has a new latency | ||
254 | * requirement. This means we need to get all processors out of their C-state, | ||
255 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | ||
256 | * wakes them all right up. | ||
257 | */ | ||
258 | static int cpuidle_latency_notify(struct notifier_block *b, | ||
259 | unsigned long l, void *v) | ||
260 | { | ||
261 | smp_call_function(smp_callback, NULL, 0, 1); | ||
262 | return NOTIFY_OK; | ||
263 | } | ||
264 | |||
265 | static struct notifier_block cpuidle_latency_notifier = { | ||
266 | .notifier_call = cpuidle_latency_notify, | ||
267 | }; | ||
268 | |||
269 | #define latency_notifier_init(x) do { register_latency_notifier(x); } while (0) | ||
270 | |||
271 | #else /* CONFIG_SMP */ | ||
272 | |||
273 | #define latency_notifier_init(x) do { } while (0) | ||
274 | |||
275 | #endif /* CONFIG_SMP */ | ||
276 | |||
277 | /** | ||
278 | * cpuidle_init - core initializer | ||
279 | */ | ||
280 | static int __init cpuidle_init(void) | ||
281 | { | ||
282 | int ret; | ||
283 | |||
284 | pm_idle_old = pm_idle; | ||
285 | |||
286 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); | ||
287 | if (ret) | ||
288 | return ret; | ||
289 | |||
290 | latency_notifier_init(&cpuidle_latency_notifier); | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | core_initcall(cpuidle_init); | ||