diff options
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r-- | drivers/cpuidle/Kconfig | 20 | ||||
-rw-r--r-- | drivers/cpuidle/Makefile | 5 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 295 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.h | 33 | ||||
-rw-r--r-- | drivers/cpuidle/driver.c | 56 | ||||
-rw-r--r-- | drivers/cpuidle/governor.c | 141 | ||||
-rw-r--r-- | drivers/cpuidle/governors/Makefile | 6 | ||||
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 166 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 137 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 361 |
10 files changed, 1220 insertions, 0 deletions
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig new file mode 100644 index 000000000000..3bed4127d4ad --- /dev/null +++ b/drivers/cpuidle/Kconfig | |||
@@ -0,0 +1,20 @@ | |||
1 | |||
2 | config CPU_IDLE | ||
3 | bool "CPU idle PM support" | ||
4 | help | ||
5 | CPU idle is a generic framework for supporting software-controlled | ||
6 | idle processor power management. It includes modular cross-platform | ||
7 | governors that can be swapped during runtime. | ||
8 | |||
9 | If you're using a mobile platform that supports CPU idle PM (e.g. | ||
10 | an ACPI-capable notebook), you should say Y here. | ||
11 | |||
12 | config CPU_IDLE_GOV_LADDER | ||
13 | bool | ||
14 | depends on CPU_IDLE | ||
15 | default y | ||
16 | |||
17 | config CPU_IDLE_GOV_MENU | ||
18 | bool | ||
19 | depends on CPU_IDLE && NO_HZ | ||
20 | default y | ||
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile new file mode 100644 index 000000000000..5634f88379df --- /dev/null +++ b/drivers/cpuidle/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for cpuidle. | ||
3 | # | ||
4 | |||
5 | obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ | ||
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c new file mode 100644 index 000000000000..fdf4106b817b --- /dev/null +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * cpuidle.c - core cpuidle infrastructure | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mutex.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/notifier.h> | ||
15 | #include <linux/latency.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/cpuidle.h> | ||
18 | |||
19 | #include "cpuidle.h" | ||
20 | |||
21 | DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | ||
22 | EXPORT_PER_CPU_SYMBOL_GPL(cpuidle_devices); | ||
23 | |||
24 | DEFINE_MUTEX(cpuidle_lock); | ||
25 | LIST_HEAD(cpuidle_detected_devices); | ||
26 | static void (*pm_idle_old)(void); | ||
27 | |||
28 | static int enabled_devices; | ||
29 | |||
30 | /** | ||
31 | * cpuidle_idle_call - the main idle loop | ||
32 | * | ||
33 | * NOTE: no locks or semaphores should be used here | ||
34 | */ | ||
35 | static void cpuidle_idle_call(void) | ||
36 | { | ||
37 | struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); | ||
38 | struct cpuidle_state *target_state; | ||
39 | int next_state; | ||
40 | |||
41 | /* check if the device is ready */ | ||
42 | if (!dev || !dev->enabled) { | ||
43 | if (pm_idle_old) | ||
44 | pm_idle_old(); | ||
45 | else | ||
46 | local_irq_enable(); | ||
47 | return; | ||
48 | } | ||
49 | |||
50 | /* ask the governor for the next state */ | ||
51 | next_state = cpuidle_curr_governor->select(dev); | ||
52 | if (need_resched()) | ||
53 | return; | ||
54 | target_state = &dev->states[next_state]; | ||
55 | |||
56 | /* enter the state and update stats */ | ||
57 | dev->last_residency = target_state->enter(dev, target_state); | ||
58 | dev->last_state = target_state; | ||
59 | target_state->time += dev->last_residency; | ||
60 | target_state->usage++; | ||
61 | |||
62 | /* give the governor an opportunity to reflect on the outcome */ | ||
63 | if (cpuidle_curr_governor->reflect) | ||
64 | cpuidle_curr_governor->reflect(dev); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * cpuidle_install_idle_handler - installs the cpuidle idle loop handler | ||
69 | */ | ||
70 | void cpuidle_install_idle_handler(void) | ||
71 | { | ||
72 | if (enabled_devices && (pm_idle != cpuidle_idle_call)) { | ||
73 | /* Make sure all changes finished before we switch to new idle */ | ||
74 | smp_wmb(); | ||
75 | pm_idle = cpuidle_idle_call; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler | ||
81 | */ | ||
82 | void cpuidle_uninstall_idle_handler(void) | ||
83 | { | ||
84 | if (enabled_devices && (pm_idle != pm_idle_old)) { | ||
85 | pm_idle = pm_idle_old; | ||
86 | cpu_idle_wait(); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * cpuidle_pause_and_lock - temporarily disables CPUIDLE | ||
92 | */ | ||
93 | void cpuidle_pause_and_lock(void) | ||
94 | { | ||
95 | mutex_lock(&cpuidle_lock); | ||
96 | cpuidle_uninstall_idle_handler(); | ||
97 | } | ||
98 | |||
99 | EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); | ||
100 | |||
101 | /** | ||
102 | * cpuidle_resume_and_unlock - resumes CPUIDLE operation | ||
103 | */ | ||
104 | void cpuidle_resume_and_unlock(void) | ||
105 | { | ||
106 | cpuidle_install_idle_handler(); | ||
107 | mutex_unlock(&cpuidle_lock); | ||
108 | } | ||
109 | |||
110 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | ||
111 | |||
112 | /** | ||
113 | * cpuidle_enable_device - enables idle PM for a CPU | ||
114 | * @dev: the CPU | ||
115 | * | ||
116 | * This function must be called between cpuidle_pause_and_lock and | ||
117 | * cpuidle_resume_and_unlock when used externally. | ||
118 | */ | ||
119 | int cpuidle_enable_device(struct cpuidle_device *dev) | ||
120 | { | ||
121 | int ret, i; | ||
122 | |||
123 | if (dev->enabled) | ||
124 | return 0; | ||
125 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | ||
126 | return -EIO; | ||
127 | if (!dev->state_count) | ||
128 | return -EINVAL; | ||
129 | |||
130 | if ((ret = cpuidle_add_state_sysfs(dev))) | ||
131 | return ret; | ||
132 | |||
133 | if (cpuidle_curr_governor->enable && | ||
134 | (ret = cpuidle_curr_governor->enable(dev))) | ||
135 | goto fail_sysfs; | ||
136 | |||
137 | for (i = 0; i < dev->state_count; i++) { | ||
138 | dev->states[i].usage = 0; | ||
139 | dev->states[i].time = 0; | ||
140 | } | ||
141 | dev->last_residency = 0; | ||
142 | dev->last_state = NULL; | ||
143 | |||
144 | smp_wmb(); | ||
145 | |||
146 | dev->enabled = 1; | ||
147 | |||
148 | enabled_devices++; | ||
149 | return 0; | ||
150 | |||
151 | fail_sysfs: | ||
152 | cpuidle_remove_state_sysfs(dev); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | EXPORT_SYMBOL_GPL(cpuidle_enable_device); | ||
158 | |||
159 | /** | ||
160 | * cpuidle_disable_device - disables idle PM for a CPU | ||
161 | * @dev: the CPU | ||
162 | * | ||
163 | * This function must be called between cpuidle_pause_and_lock and | ||
164 | * cpuidle_resume_and_unlock when used externally. | ||
165 | */ | ||
166 | void cpuidle_disable_device(struct cpuidle_device *dev) | ||
167 | { | ||
168 | if (!dev->enabled) | ||
169 | return; | ||
170 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | ||
171 | return; | ||
172 | |||
173 | dev->enabled = 0; | ||
174 | |||
175 | if (cpuidle_curr_governor->disable) | ||
176 | cpuidle_curr_governor->disable(dev); | ||
177 | |||
178 | cpuidle_remove_state_sysfs(dev); | ||
179 | enabled_devices--; | ||
180 | } | ||
181 | |||
182 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | ||
183 | |||
184 | /** | ||
185 | * cpuidle_register_device - registers a CPU's idle PM feature | ||
186 | * @dev: the cpu | ||
187 | */ | ||
188 | int cpuidle_register_device(struct cpuidle_device *dev) | ||
189 | { | ||
190 | int ret; | ||
191 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | ||
192 | |||
193 | if (!sys_dev) | ||
194 | return -EINVAL; | ||
195 | if (!try_module_get(cpuidle_curr_driver->owner)) | ||
196 | return -EINVAL; | ||
197 | |||
198 | init_completion(&dev->kobj_unregister); | ||
199 | |||
200 | mutex_lock(&cpuidle_lock); | ||
201 | |||
202 | per_cpu(cpuidle_devices, dev->cpu) = dev; | ||
203 | list_add(&dev->device_list, &cpuidle_detected_devices); | ||
204 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | ||
205 | mutex_unlock(&cpuidle_lock); | ||
206 | module_put(cpuidle_curr_driver->owner); | ||
207 | return ret; | ||
208 | } | ||
209 | |||
210 | cpuidle_enable_device(dev); | ||
211 | cpuidle_install_idle_handler(); | ||
212 | |||
213 | mutex_unlock(&cpuidle_lock); | ||
214 | |||
215 | return 0; | ||
216 | |||
217 | } | ||
218 | |||
219 | EXPORT_SYMBOL_GPL(cpuidle_register_device); | ||
220 | |||
221 | /** | ||
222 | * cpuidle_unregister_device - unregisters a CPU's idle PM feature | ||
223 | * @dev: the cpu | ||
224 | */ | ||
225 | void cpuidle_unregister_device(struct cpuidle_device *dev) | ||
226 | { | ||
227 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | ||
228 | |||
229 | cpuidle_pause_and_lock(); | ||
230 | |||
231 | cpuidle_disable_device(dev); | ||
232 | |||
233 | cpuidle_remove_sysfs(sys_dev); | ||
234 | list_del(&dev->device_list); | ||
235 | wait_for_completion(&dev->kobj_unregister); | ||
236 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | ||
237 | |||
238 | cpuidle_resume_and_unlock(); | ||
239 | |||
240 | module_put(cpuidle_curr_driver->owner); | ||
241 | } | ||
242 | |||
243 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | ||
244 | |||
245 | #ifdef CONFIG_SMP | ||
246 | |||
247 | static void smp_callback(void *v) | ||
248 | { | ||
249 | /* we already woke the CPU up, nothing more to do */ | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * This function gets called when a part of the kernel has a new latency | ||
254 | * requirement. This means we need to get all processors out of their C-state, | ||
255 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | ||
256 | * wakes them all right up. | ||
257 | */ | ||
258 | static int cpuidle_latency_notify(struct notifier_block *b, | ||
259 | unsigned long l, void *v) | ||
260 | { | ||
261 | smp_call_function(smp_callback, NULL, 0, 1); | ||
262 | return NOTIFY_OK; | ||
263 | } | ||
264 | |||
265 | static struct notifier_block cpuidle_latency_notifier = { | ||
266 | .notifier_call = cpuidle_latency_notify, | ||
267 | }; | ||
268 | |||
269 | #define latency_notifier_init(x) do { register_latency_notifier(x); } while (0) | ||
270 | |||
271 | #else /* CONFIG_SMP */ | ||
272 | |||
273 | #define latency_notifier_init(x) do { } while (0) | ||
274 | |||
275 | #endif /* CONFIG_SMP */ | ||
276 | |||
277 | /** | ||
278 | * cpuidle_init - core initializer | ||
279 | */ | ||
280 | static int __init cpuidle_init(void) | ||
281 | { | ||
282 | int ret; | ||
283 | |||
284 | pm_idle_old = pm_idle; | ||
285 | |||
286 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); | ||
287 | if (ret) | ||
288 | return ret; | ||
289 | |||
290 | latency_notifier_init(&cpuidle_latency_notifier); | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | core_initcall(cpuidle_init); | ||
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h new file mode 100644 index 000000000000..9476ba33ee2c --- /dev/null +++ b/drivers/cpuidle/cpuidle.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * cpuidle.h - The internal header file | ||
3 | */ | ||
4 | |||
5 | #ifndef __DRIVER_CPUIDLE_H | ||
6 | #define __DRIVER_CPUIDLE_H | ||
7 | |||
8 | #include <linux/sysdev.h> | ||
9 | |||
10 | /* For internal use only */ | ||
11 | extern struct cpuidle_governor *cpuidle_curr_governor; | ||
12 | extern struct cpuidle_driver *cpuidle_curr_driver; | ||
13 | extern struct list_head cpuidle_governors; | ||
14 | extern struct list_head cpuidle_detected_devices; | ||
15 | extern struct mutex cpuidle_lock; | ||
16 | extern spinlock_t cpuidle_driver_lock; | ||
17 | |||
18 | /* idle loop */ | ||
19 | extern void cpuidle_install_idle_handler(void); | ||
20 | extern void cpuidle_uninstall_idle_handler(void); | ||
21 | |||
22 | /* governors */ | ||
23 | extern int cpuidle_switch_governor(struct cpuidle_governor *gov); | ||
24 | |||
25 | /* sysfs */ | ||
26 | extern int cpuidle_add_class_sysfs(struct sysdev_class *cls); | ||
27 | extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls); | ||
28 | extern int cpuidle_add_state_sysfs(struct cpuidle_device *device); | ||
29 | extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device); | ||
30 | extern int cpuidle_add_sysfs(struct sys_device *sysdev); | ||
31 | extern void cpuidle_remove_sysfs(struct sys_device *sysdev); | ||
32 | |||
33 | #endif /* __DRIVER_CPUIDLE_H */ | ||
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c new file mode 100644 index 000000000000..2257004fe33d --- /dev/null +++ b/drivers/cpuidle/driver.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * driver.c - driver support | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/cpuidle.h> | ||
14 | |||
15 | #include "cpuidle.h" | ||
16 | |||
17 | struct cpuidle_driver *cpuidle_curr_driver; | ||
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | ||
19 | |||
20 | /** | ||
21 | * cpuidle_register_driver - registers a driver | ||
22 | * @drv: the driver | ||
23 | */ | ||
24 | int cpuidle_register_driver(struct cpuidle_driver *drv) | ||
25 | { | ||
26 | if (!drv) | ||
27 | return -EINVAL; | ||
28 | |||
29 | spin_lock(&cpuidle_driver_lock); | ||
30 | if (cpuidle_curr_driver) { | ||
31 | spin_unlock(&cpuidle_driver_lock); | ||
32 | return -EBUSY; | ||
33 | } | ||
34 | cpuidle_curr_driver = drv; | ||
35 | spin_unlock(&cpuidle_driver_lock); | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | EXPORT_SYMBOL_GPL(cpuidle_register_driver); | ||
41 | |||
42 | /** | ||
43 | * cpuidle_unregister_driver - unregisters a driver | ||
44 | * @drv: the driver | ||
45 | */ | ||
46 | void cpuidle_unregister_driver(struct cpuidle_driver *drv) | ||
47 | { | ||
48 | if (!drv) | ||
49 | return; | ||
50 | |||
51 | spin_lock(&cpuidle_driver_lock); | ||
52 | cpuidle_curr_driver = NULL; | ||
53 | spin_unlock(&cpuidle_driver_lock); | ||
54 | } | ||
55 | |||
56 | EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); | ||
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c new file mode 100644 index 000000000000..bb699cb2dc5a --- /dev/null +++ b/drivers/cpuidle/governor.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * governor.c - governor support | ||
3 | * | ||
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Shaohua Li <shaohua.li@intel.com> | ||
6 | * Adam Belay <abelay@novell.com> | ||
7 | * | ||
8 | * This code is licenced under the GPL. | ||
9 | */ | ||
10 | |||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/cpuidle.h> | ||
14 | |||
15 | #include "cpuidle.h" | ||
16 | |||
17 | LIST_HEAD(cpuidle_governors); | ||
18 | struct cpuidle_governor *cpuidle_curr_governor; | ||
19 | |||
20 | /** | ||
21 | * __cpuidle_find_governor - finds a governor of the specified name | ||
22 | * @str: the name | ||
23 | * | ||
24 | * Must be called with cpuidle_lock aquired. | ||
25 | */ | ||
26 | static struct cpuidle_governor * __cpuidle_find_governor(const char *str) | ||
27 | { | ||
28 | struct cpuidle_governor *gov; | ||
29 | |||
30 | list_for_each_entry(gov, &cpuidle_governors, governor_list) | ||
31 | if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN)) | ||
32 | return gov; | ||
33 | |||
34 | return NULL; | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * cpuidle_switch_governor - changes the governor | ||
39 | * @gov: the new target governor | ||
40 | * | ||
41 | * NOTE: "gov" can be NULL to specify disabled | ||
42 | * Must be called with cpuidle_lock aquired. | ||
43 | */ | ||
44 | int cpuidle_switch_governor(struct cpuidle_governor *gov) | ||
45 | { | ||
46 | struct cpuidle_device *dev; | ||
47 | |||
48 | if (gov == cpuidle_curr_governor) | ||
49 | return 0; | ||
50 | |||
51 | cpuidle_uninstall_idle_handler(); | ||
52 | |||
53 | if (cpuidle_curr_governor) { | ||
54 | list_for_each_entry(dev, &cpuidle_detected_devices, device_list) | ||
55 | cpuidle_disable_device(dev); | ||
56 | module_put(cpuidle_curr_governor->owner); | ||
57 | } | ||
58 | |||
59 | cpuidle_curr_governor = gov; | ||
60 | |||
61 | if (gov) { | ||
62 | if (!try_module_get(cpuidle_curr_governor->owner)) | ||
63 | return -EINVAL; | ||
64 | list_for_each_entry(dev, &cpuidle_detected_devices, device_list) | ||
65 | cpuidle_enable_device(dev); | ||
66 | cpuidle_install_idle_handler(); | ||
67 | printk(KERN_INFO "cpuidle: using governor %s\n", gov->name); | ||
68 | } | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * cpuidle_register_governor - registers a governor | ||
75 | * @gov: the governor | ||
76 | */ | ||
77 | int cpuidle_register_governor(struct cpuidle_governor *gov) | ||
78 | { | ||
79 | int ret = -EEXIST; | ||
80 | |||
81 | if (!gov || !gov->select) | ||
82 | return -EINVAL; | ||
83 | |||
84 | mutex_lock(&cpuidle_lock); | ||
85 | if (__cpuidle_find_governor(gov->name) == NULL) { | ||
86 | ret = 0; | ||
87 | list_add_tail(&gov->governor_list, &cpuidle_governors); | ||
88 | if (!cpuidle_curr_governor || | ||
89 | cpuidle_curr_governor->rating < gov->rating) | ||
90 | cpuidle_switch_governor(gov); | ||
91 | } | ||
92 | mutex_unlock(&cpuidle_lock); | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | EXPORT_SYMBOL_GPL(cpuidle_register_governor); | ||
98 | |||
99 | /** | ||
100 | * cpuidle_replace_governor - find a replacement governor | ||
101 | * @exclude_rating: the rating that will be skipped while looking for | ||
102 | * new governor. | ||
103 | */ | ||
104 | static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating) | ||
105 | { | ||
106 | struct cpuidle_governor *gov; | ||
107 | struct cpuidle_governor *ret_gov = NULL; | ||
108 | unsigned int max_rating = 0; | ||
109 | |||
110 | list_for_each_entry(gov, &cpuidle_governors, governor_list) { | ||
111 | if (gov->rating == exclude_rating) | ||
112 | continue; | ||
113 | if (gov->rating > max_rating) { | ||
114 | max_rating = gov->rating; | ||
115 | ret_gov = gov; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | return ret_gov; | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * cpuidle_unregister_governor - unregisters a governor | ||
124 | * @gov: the governor | ||
125 | */ | ||
126 | void cpuidle_unregister_governor(struct cpuidle_governor *gov) | ||
127 | { | ||
128 | if (!gov) | ||
129 | return; | ||
130 | |||
131 | mutex_lock(&cpuidle_lock); | ||
132 | if (gov == cpuidle_curr_governor) { | ||
133 | struct cpuidle_governor *new_gov; | ||
134 | new_gov = cpuidle_replace_governor(gov->rating); | ||
135 | cpuidle_switch_governor(new_gov); | ||
136 | } | ||
137 | list_del(&gov->governor_list); | ||
138 | mutex_unlock(&cpuidle_lock); | ||
139 | } | ||
140 | |||
141 | EXPORT_SYMBOL_GPL(cpuidle_unregister_governor); | ||
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile new file mode 100644 index 000000000000..1b512722689f --- /dev/null +++ b/drivers/cpuidle/governors/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for cpuidle governors. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o | ||
6 | obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c new file mode 100644 index 000000000000..eb666ecae7c9 --- /dev/null +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * ladder.c - the residency ladder algorithm | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | ||
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
6 | * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> | ||
7 | * | ||
8 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
9 | * Shaohua Li <shaohua.li@intel.com> | ||
10 | * Adam Belay <abelay@novell.com> | ||
11 | * | ||
12 | * This code is licenced under the GPL. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/cpuidle.h> | ||
17 | #include <linux/latency.h> | ||
18 | #include <linux/moduleparam.h> | ||
19 | #include <linux/jiffies.h> | ||
20 | |||
21 | #include <asm/io.h> | ||
22 | #include <asm/uaccess.h> | ||
23 | |||
24 | #define PROMOTION_COUNT 4 | ||
25 | #define DEMOTION_COUNT 1 | ||
26 | |||
27 | struct ladder_device_state { | ||
28 | struct { | ||
29 | u32 promotion_count; | ||
30 | u32 demotion_count; | ||
31 | u32 promotion_time; | ||
32 | u32 demotion_time; | ||
33 | } threshold; | ||
34 | struct { | ||
35 | int promotion_count; | ||
36 | int demotion_count; | ||
37 | } stats; | ||
38 | }; | ||
39 | |||
40 | struct ladder_device { | ||
41 | struct ladder_device_state states[CPUIDLE_STATE_MAX]; | ||
42 | int last_state_idx; | ||
43 | }; | ||
44 | |||
45 | static DEFINE_PER_CPU(struct ladder_device, ladder_devices); | ||
46 | |||
47 | /** | ||
48 | * ladder_do_selection - prepares private data for a state change | ||
49 | * @ldev: the ladder device | ||
50 | * @old_idx: the current state index | ||
51 | * @new_idx: the new target state index | ||
52 | */ | ||
53 | static inline void ladder_do_selection(struct ladder_device *ldev, | ||
54 | int old_idx, int new_idx) | ||
55 | { | ||
56 | ldev->states[old_idx].stats.promotion_count = 0; | ||
57 | ldev->states[old_idx].stats.demotion_count = 0; | ||
58 | ldev->last_state_idx = new_idx; | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * ladder_select_state - selects the next state to enter | ||
63 | * @dev: the CPU | ||
64 | */ | ||
65 | static int ladder_select_state(struct cpuidle_device *dev) | ||
66 | { | ||
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | ||
68 | struct ladder_device_state *last_state; | ||
69 | int last_residency, last_idx = ldev->last_state_idx; | ||
70 | |||
71 | if (unlikely(!ldev)) | ||
72 | return 0; | ||
73 | |||
74 | last_state = &ldev->states[last_idx]; | ||
75 | |||
76 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | ||
77 | last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; | ||
78 | else | ||
79 | last_residency = last_state->threshold.promotion_time + 1; | ||
80 | |||
81 | /* consider promotion */ | ||
82 | if (last_idx < dev->state_count - 1 && | ||
83 | last_residency > last_state->threshold.promotion_time && | ||
84 | dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) { | ||
85 | last_state->stats.promotion_count++; | ||
86 | last_state->stats.demotion_count = 0; | ||
87 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | ||
88 | ladder_do_selection(ldev, last_idx, last_idx + 1); | ||
89 | return last_idx + 1; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* consider demotion */ | ||
94 | if (last_idx > 0 && | ||
95 | last_residency < last_state->threshold.demotion_time) { | ||
96 | last_state->stats.demotion_count++; | ||
97 | last_state->stats.promotion_count = 0; | ||
98 | if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { | ||
99 | ladder_do_selection(ldev, last_idx, last_idx - 1); | ||
100 | return last_idx - 1; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* otherwise remain at the current state */ | ||
105 | return last_idx; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * ladder_enable_device - setup for the governor | ||
110 | * @dev: the CPU | ||
111 | */ | ||
112 | static int ladder_enable_device(struct cpuidle_device *dev) | ||
113 | { | ||
114 | int i; | ||
115 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); | ||
116 | struct ladder_device_state *lstate; | ||
117 | struct cpuidle_state *state; | ||
118 | |||
119 | ldev->last_state_idx = 0; | ||
120 | |||
121 | for (i = 0; i < dev->state_count; i++) { | ||
122 | state = &dev->states[i]; | ||
123 | lstate = &ldev->states[i]; | ||
124 | |||
125 | lstate->stats.promotion_count = 0; | ||
126 | lstate->stats.demotion_count = 0; | ||
127 | |||
128 | lstate->threshold.promotion_count = PROMOTION_COUNT; | ||
129 | lstate->threshold.demotion_count = DEMOTION_COUNT; | ||
130 | |||
131 | if (i < dev->state_count - 1) | ||
132 | lstate->threshold.promotion_time = state->exit_latency; | ||
133 | if (i > 0) | ||
134 | lstate->threshold.demotion_time = state->exit_latency; | ||
135 | } | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static struct cpuidle_governor ladder_governor = { | ||
141 | .name = "ladder", | ||
142 | .rating = 10, | ||
143 | .enable = ladder_enable_device, | ||
144 | .select = ladder_select_state, | ||
145 | .owner = THIS_MODULE, | ||
146 | }; | ||
147 | |||
148 | /** | ||
149 | * init_ladder - initializes the governor | ||
150 | */ | ||
151 | static int __init init_ladder(void) | ||
152 | { | ||
153 | return cpuidle_register_governor(&ladder_governor); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * exit_ladder - exits the governor | ||
158 | */ | ||
159 | static void __exit exit_ladder(void) | ||
160 | { | ||
161 | cpuidle_unregister_governor(&ladder_governor); | ||
162 | } | ||
163 | |||
164 | MODULE_LICENSE("GPL"); | ||
165 | module_init(init_ladder); | ||
166 | module_exit(exit_ladder); | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c new file mode 100644 index 000000000000..299d45c3bdd2 --- /dev/null +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * menu.c - the menu idle governor | ||
3 | * | ||
4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> | ||
5 | * | ||
6 | * This code is licenced under the GPL. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/cpuidle.h> | ||
11 | #include <linux/latency.h> | ||
12 | #include <linux/time.h> | ||
13 | #include <linux/ktime.h> | ||
14 | #include <linux/hrtimer.h> | ||
15 | #include <linux/tick.h> | ||
16 | |||
17 | #define BREAK_FUZZ 4 /* 4 us */ | ||
18 | |||
19 | struct menu_device { | ||
20 | int last_state_idx; | ||
21 | |||
22 | unsigned int expected_us; | ||
23 | unsigned int predicted_us; | ||
24 | unsigned int last_measured_us; | ||
25 | unsigned int elapsed_us; | ||
26 | }; | ||
27 | |||
28 | static DEFINE_PER_CPU(struct menu_device, menu_devices); | ||
29 | |||
30 | /** | ||
31 | * menu_select - selects the next idle state to enter | ||
32 | * @dev: the CPU | ||
33 | */ | ||
34 | static int menu_select(struct cpuidle_device *dev) | ||
35 | { | ||
36 | struct menu_device *data = &__get_cpu_var(menu_devices); | ||
37 | int i; | ||
38 | |||
39 | /* determine the expected residency time */ | ||
40 | data->expected_us = | ||
41 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; | ||
42 | |||
43 | /* find the deepest idle state that satisfies our constraints */ | ||
44 | for (i = 1; i < dev->state_count; i++) { | ||
45 | struct cpuidle_state *s = &dev->states[i]; | ||
46 | |||
47 | if (s->target_residency > data->expected_us) | ||
48 | break; | ||
49 | if (s->target_residency > data->predicted_us) | ||
50 | break; | ||
51 | if (s->exit_latency > system_latency_constraint()) | ||
52 | break; | ||
53 | } | ||
54 | |||
55 | data->last_state_idx = i - 1; | ||
56 | return i - 1; | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * menu_reflect - attempts to guess what happened after entry | ||
61 | * @dev: the CPU | ||
62 | * | ||
63 | * NOTE: it's important to be fast here because this operation will add to | ||
64 | * the overall exit latency. | ||
65 | */ | ||
66 | static void menu_reflect(struct cpuidle_device *dev) | ||
67 | { | ||
68 | struct menu_device *data = &__get_cpu_var(menu_devices); | ||
69 | int last_idx = data->last_state_idx; | ||
70 | unsigned int measured_us = | ||
71 | cpuidle_get_last_residency(dev) + data->elapsed_us; | ||
72 | struct cpuidle_state *target = &dev->states[last_idx]; | ||
73 | |||
74 | /* | ||
75 | * Ugh, this idle state doesn't support residency measurements, so we | ||
76 | * are basically lost in the dark. As a compromise, assume we slept | ||
77 | * for one full standard timer tick. However, be aware that this | ||
78 | * could potentially result in a suboptimal state transition. | ||
79 | */ | ||
80 | if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) | ||
81 | measured_us = USEC_PER_SEC / HZ; | ||
82 | |||
83 | /* Predict time remaining until next break event */ | ||
84 | if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { | ||
85 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
86 | data->last_measured_us = measured_us; | ||
87 | data->elapsed_us = 0; | ||
88 | } else { | ||
89 | if (data->elapsed_us < data->elapsed_us + measured_us) | ||
90 | data->elapsed_us = measured_us; | ||
91 | else | ||
92 | data->elapsed_us = -1; | ||
93 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * menu_enable_device - scans a CPU's states and does setup | ||
99 | * @dev: the CPU | ||
100 | */ | ||
101 | static int menu_enable_device(struct cpuidle_device *dev) | ||
102 | { | ||
103 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | ||
104 | |||
105 | memset(data, 0, sizeof(struct menu_device)); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static struct cpuidle_governor menu_governor = { | ||
111 | .name = "menu", | ||
112 | .rating = 20, | ||
113 | .enable = menu_enable_device, | ||
114 | .select = menu_select, | ||
115 | .reflect = menu_reflect, | ||
116 | .owner = THIS_MODULE, | ||
117 | }; | ||
118 | |||
119 | /** | ||
120 | * init_menu - initializes the governor | ||
121 | */ | ||
122 | static int __init init_menu(void) | ||
123 | { | ||
124 | return cpuidle_register_governor(&menu_governor); | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * exit_menu - exits the governor | ||
129 | */ | ||
130 | static void __exit exit_menu(void) | ||
131 | { | ||
132 | cpuidle_unregister_governor(&menu_governor); | ||
133 | } | ||
134 | |||
135 | MODULE_LICENSE("GPL"); | ||
136 | module_init(init_menu); | ||
137 | module_exit(exit_menu); | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c new file mode 100644 index 000000000000..0f3515e77d4b --- /dev/null +++ b/drivers/cpuidle/sysfs.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | * sysfs.c - sysfs support | ||
3 | * | ||
4 | * (C) 2006-2007 Shaohua Li <shaohua.li@intel.com> | ||
5 | * | ||
6 | * This code is licenced under the GPL. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/cpuidle.h> | ||
11 | #include <linux/sysfs.h> | ||
12 | #include <linux/cpu.h> | ||
13 | |||
14 | #include "cpuidle.h" | ||
15 | |||
16 | static unsigned int sysfs_switch; | ||
17 | static int __init cpuidle_sysfs_setup(char *unused) | ||
18 | { | ||
19 | sysfs_switch = 1; | ||
20 | return 1; | ||
21 | } | ||
22 | __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); | ||
23 | |||
24 | static ssize_t show_available_governors(struct sys_device *dev, char *buf) | ||
25 | { | ||
26 | ssize_t i = 0; | ||
27 | struct cpuidle_governor *tmp; | ||
28 | |||
29 | mutex_lock(&cpuidle_lock); | ||
30 | list_for_each_entry(tmp, &cpuidle_governors, governor_list) { | ||
31 | if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2)) | ||
32 | goto out; | ||
33 | i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name); | ||
34 | } | ||
35 | |||
36 | out: | ||
37 | i+= sprintf(&buf[i], "\n"); | ||
38 | mutex_unlock(&cpuidle_lock); | ||
39 | return i; | ||
40 | } | ||
41 | |||
42 | static ssize_t show_current_driver(struct sys_device *dev, char *buf) | ||
43 | { | ||
44 | ssize_t ret; | ||
45 | |||
46 | spin_lock(&cpuidle_driver_lock); | ||
47 | if (cpuidle_curr_driver) | ||
48 | ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name); | ||
49 | else | ||
50 | ret = sprintf(buf, "none\n"); | ||
51 | spin_unlock(&cpuidle_driver_lock); | ||
52 | |||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | static ssize_t show_current_governor(struct sys_device *dev, char *buf) | ||
57 | { | ||
58 | ssize_t ret; | ||
59 | |||
60 | mutex_lock(&cpuidle_lock); | ||
61 | if (cpuidle_curr_governor) | ||
62 | ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name); | ||
63 | else | ||
64 | ret = sprintf(buf, "none\n"); | ||
65 | mutex_unlock(&cpuidle_lock); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static ssize_t store_current_governor(struct sys_device *dev, | ||
71 | const char *buf, size_t count) | ||
72 | { | ||
73 | char gov_name[CPUIDLE_NAME_LEN]; | ||
74 | int ret = -EINVAL; | ||
75 | size_t len = count; | ||
76 | struct cpuidle_governor *gov; | ||
77 | |||
78 | if (!len || len >= sizeof(gov_name)) | ||
79 | return -EINVAL; | ||
80 | |||
81 | memcpy(gov_name, buf, len); | ||
82 | gov_name[len] = '\0'; | ||
83 | if (gov_name[len - 1] == '\n') | ||
84 | gov_name[--len] = '\0'; | ||
85 | |||
86 | mutex_lock(&cpuidle_lock); | ||
87 | |||
88 | list_for_each_entry(gov, &cpuidle_governors, governor_list) { | ||
89 | if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) { | ||
90 | ret = cpuidle_switch_governor(gov); | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | mutex_unlock(&cpuidle_lock); | ||
96 | |||
97 | if (ret) | ||
98 | return ret; | ||
99 | else | ||
100 | return count; | ||
101 | } | ||
102 | |||
103 | static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL); | ||
104 | static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL); | ||
105 | |||
106 | static struct attribute *cpuclass_default_attrs[] = { | ||
107 | &attr_current_driver.attr, | ||
108 | &attr_current_governor_ro.attr, | ||
109 | NULL | ||
110 | }; | ||
111 | |||
112 | static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL); | ||
113 | static SYSDEV_ATTR(current_governor, 0644, show_current_governor, | ||
114 | store_current_governor); | ||
115 | |||
116 | static struct attribute *cpuclass_switch_attrs[] = { | ||
117 | &attr_available_governors.attr, | ||
118 | &attr_current_driver.attr, | ||
119 | &attr_current_governor.attr, | ||
120 | NULL | ||
121 | }; | ||
122 | |||
123 | static struct attribute_group cpuclass_attr_group = { | ||
124 | .attrs = cpuclass_default_attrs, | ||
125 | .name = "cpuidle", | ||
126 | }; | ||
127 | |||
128 | /** | ||
129 | * cpuidle_add_class_sysfs - add CPU global sysfs attributes | ||
130 | */ | ||
131 | int cpuidle_add_class_sysfs(struct sysdev_class *cls) | ||
132 | { | ||
133 | if (sysfs_switch) | ||
134 | cpuclass_attr_group.attrs = cpuclass_switch_attrs; | ||
135 | |||
136 | return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group); | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes | ||
141 | */ | ||
142 | void cpuidle_remove_class_sysfs(struct sysdev_class *cls) | ||
143 | { | ||
144 | sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group); | ||
145 | } | ||
146 | |||
147 | struct cpuidle_attr { | ||
148 | struct attribute attr; | ||
149 | ssize_t (*show)(struct cpuidle_device *, char *); | ||
150 | ssize_t (*store)(struct cpuidle_device *, const char *, size_t count); | ||
151 | }; | ||
152 | |||
153 | #define define_one_ro(_name, show) \ | ||
154 | static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | ||
155 | #define define_one_rw(_name, show, store) \ | ||
156 | static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store) | ||
157 | |||
158 | #define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj) | ||
159 | #define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr) | ||
160 | static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf) | ||
161 | { | ||
162 | int ret = -EIO; | ||
163 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
164 | struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); | ||
165 | |||
166 | if (cattr->show) { | ||
167 | mutex_lock(&cpuidle_lock); | ||
168 | ret = cattr->show(dev, buf); | ||
169 | mutex_unlock(&cpuidle_lock); | ||
170 | } | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr, | ||
175 | const char * buf, size_t count) | ||
176 | { | ||
177 | int ret = -EIO; | ||
178 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
179 | struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); | ||
180 | |||
181 | if (cattr->store) { | ||
182 | mutex_lock(&cpuidle_lock); | ||
183 | ret = cattr->store(dev, buf, count); | ||
184 | mutex_unlock(&cpuidle_lock); | ||
185 | } | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static struct sysfs_ops cpuidle_sysfs_ops = { | ||
190 | .show = cpuidle_show, | ||
191 | .store = cpuidle_store, | ||
192 | }; | ||
193 | |||
194 | static void cpuidle_sysfs_release(struct kobject *kobj) | ||
195 | { | ||
196 | struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); | ||
197 | |||
198 | complete(&dev->kobj_unregister); | ||
199 | } | ||
200 | |||
201 | static struct kobj_type ktype_cpuidle = { | ||
202 | .sysfs_ops = &cpuidle_sysfs_ops, | ||
203 | .release = cpuidle_sysfs_release, | ||
204 | }; | ||
205 | |||
206 | struct cpuidle_state_attr { | ||
207 | struct attribute attr; | ||
208 | ssize_t (*show)(struct cpuidle_state *, char *); | ||
209 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); | ||
210 | }; | ||
211 | |||
212 | #define define_one_state_ro(_name, show) \ | ||
213 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | ||
214 | |||
215 | #define define_show_state_function(_name) \ | ||
216 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | ||
217 | { \ | ||
218 | return sprintf(buf, "%u\n", state->_name);\ | ||
219 | } | ||
220 | |||
221 | static ssize_t show_state_name(struct cpuidle_state *state, char *buf) | ||
222 | { | ||
223 | return sprintf(buf, "%s\n", state->name); | ||
224 | } | ||
225 | |||
226 | define_show_state_function(exit_latency) | ||
227 | define_show_state_function(power_usage) | ||
228 | define_show_state_function(usage) | ||
229 | define_show_state_function(time) | ||
230 | define_one_state_ro(name, show_state_name); | ||
231 | define_one_state_ro(latency, show_state_exit_latency); | ||
232 | define_one_state_ro(power, show_state_power_usage); | ||
233 | define_one_state_ro(usage, show_state_usage); | ||
234 | define_one_state_ro(time, show_state_time); | ||
235 | |||
236 | static struct attribute *cpuidle_state_default_attrs[] = { | ||
237 | &attr_name.attr, | ||
238 | &attr_latency.attr, | ||
239 | &attr_power.attr, | ||
240 | &attr_usage.attr, | ||
241 | &attr_time.attr, | ||
242 | NULL | ||
243 | }; | ||
244 | |||
245 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | ||
246 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | ||
247 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) | ||
248 | static ssize_t cpuidle_state_show(struct kobject * kobj, | ||
249 | struct attribute * attr ,char * buf) | ||
250 | { | ||
251 | int ret = -EIO; | ||
252 | struct cpuidle_state *state = kobj_to_state(kobj); | ||
253 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); | ||
254 | |||
255 | if (cattr->show) | ||
256 | ret = cattr->show(state, buf); | ||
257 | |||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | static struct sysfs_ops cpuidle_state_sysfs_ops = { | ||
262 | .show = cpuidle_state_show, | ||
263 | }; | ||
264 | |||
265 | static void cpuidle_state_sysfs_release(struct kobject *kobj) | ||
266 | { | ||
267 | struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj); | ||
268 | |||
269 | complete(&state_obj->kobj_unregister); | ||
270 | } | ||
271 | |||
272 | static struct kobj_type ktype_state_cpuidle = { | ||
273 | .sysfs_ops = &cpuidle_state_sysfs_ops, | ||
274 | .default_attrs = cpuidle_state_default_attrs, | ||
275 | .release = cpuidle_state_sysfs_release, | ||
276 | }; | ||
277 | |||
278 | static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) | ||
279 | { | ||
280 | kobject_unregister(&device->kobjs[i]->kobj); | ||
281 | wait_for_completion(&device->kobjs[i]->kobj_unregister); | ||
282 | kfree(device->kobjs[i]); | ||
283 | device->kobjs[i] = NULL; | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes | ||
288 | * @device: the target device | ||
289 | */ | ||
290 | int cpuidle_add_state_sysfs(struct cpuidle_device *device) | ||
291 | { | ||
292 | int i, ret = -ENOMEM; | ||
293 | struct cpuidle_state_kobj *kobj; | ||
294 | |||
295 | /* state statistics */ | ||
296 | for (i = 0; i < device->state_count; i++) { | ||
297 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | ||
298 | if (!kobj) | ||
299 | goto error_state; | ||
300 | kobj->state = &device->states[i]; | ||
301 | init_completion(&kobj->kobj_unregister); | ||
302 | |||
303 | kobj->kobj.parent = &device->kobj; | ||
304 | kobj->kobj.ktype = &ktype_state_cpuidle; | ||
305 | kobject_set_name(&kobj->kobj, "state%d", i); | ||
306 | ret = kobject_register(&kobj->kobj); | ||
307 | if (ret) { | ||
308 | kfree(kobj); | ||
309 | goto error_state; | ||
310 | } | ||
311 | device->kobjs[i] = kobj; | ||
312 | } | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | error_state: | ||
317 | for (i = i - 1; i >= 0; i--) | ||
318 | cpuidle_free_state_kobj(device, i); | ||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes | ||
324 | * @device: the target device | ||
325 | */ | ||
326 | void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | for (i = 0; i < device->state_count; i++) | ||
331 | cpuidle_free_state_kobj(device, i); | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * cpuidle_add_sysfs - creates a sysfs instance for the target device | ||
336 | * @sysdev: the target device | ||
337 | */ | ||
338 | int cpuidle_add_sysfs(struct sys_device *sysdev) | ||
339 | { | ||
340 | int cpu = sysdev->id; | ||
341 | struct cpuidle_device *dev; | ||
342 | |||
343 | dev = per_cpu(cpuidle_devices, cpu); | ||
344 | dev->kobj.parent = &sysdev->kobj; | ||
345 | dev->kobj.ktype = &ktype_cpuidle; | ||
346 | kobject_set_name(&dev->kobj, "%s", "cpuidle"); | ||
347 | return kobject_register(&dev->kobj); | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * cpuidle_remove_sysfs - deletes a sysfs instance on the target device | ||
352 | * @sysdev: the target device | ||
353 | */ | ||
354 | void cpuidle_remove_sysfs(struct sys_device *sysdev) | ||
355 | { | ||
356 | int cpu = sysdev->id; | ||
357 | struct cpuidle_device *dev; | ||
358 | |||
359 | dev = per_cpu(cpuidle_devices, cpu); | ||
360 | kobject_unregister(&dev->kobj); | ||
361 | } | ||