diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/base/power | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/base/power')
-rw-r--r-- | drivers/base/power/Makefile | 8 | ||||
-rw-r--r-- | drivers/base/power/clock_ops.c | 431 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 49 | ||||
-rw-r--r-- | drivers/base/power/main.c | 413 | ||||
-rw-r--r-- | drivers/base/power/opp.c | 628 | ||||
-rw-r--r-- | drivers/base/power/power.h | 21 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 998 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 263 | ||||
-rw-r--r-- | drivers/base/power/trace.c | 42 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 697 |
10 files changed, 2758 insertions, 792 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index cbccf9a3cee4..3647e114d0e7 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,8 +1,8 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o | 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | ||
5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
5 | obj-$(CONFIG_PM_OPP) += opp.o | ||
6 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o | ||
6 | 7 | ||
7 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 8 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file |
8 | ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG | ||
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c new file mode 100644 index 000000000000..ad367c4139b1 --- /dev/null +++ b/drivers/base/power/clock_ops.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks | ||
3 | * | ||
4 | * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/pm.h> | ||
13 | #include <linux/pm_runtime.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/err.h> | ||
17 | |||
18 | #ifdef CONFIG_PM_RUNTIME | ||
19 | |||
20 | struct pm_runtime_clk_data { | ||
21 | struct list_head clock_list; | ||
22 | struct mutex lock; | ||
23 | }; | ||
24 | |||
25 | enum pce_status { | ||
26 | PCE_STATUS_NONE = 0, | ||
27 | PCE_STATUS_ACQUIRED, | ||
28 | PCE_STATUS_ENABLED, | ||
29 | PCE_STATUS_ERROR, | ||
30 | }; | ||
31 | |||
32 | struct pm_clock_entry { | ||
33 | struct list_head node; | ||
34 | char *con_id; | ||
35 | struct clk *clk; | ||
36 | enum pce_status status; | ||
37 | }; | ||
38 | |||
39 | static struct pm_runtime_clk_data *__to_prd(struct device *dev) | ||
40 | { | ||
41 | return dev ? dev->power.subsys_data : NULL; | ||
42 | } | ||
43 | |||
44 | /** | ||
45 | * pm_runtime_clk_add - Start using a device clock for runtime PM. | ||
46 | * @dev: Device whose clock is going to be used for runtime PM. | ||
47 | * @con_id: Connection ID of the clock. | ||
48 | * | ||
49 | * Add the clock represented by @con_id to the list of clocks used for | ||
50 | * the runtime PM of @dev. | ||
51 | */ | ||
52 | int pm_runtime_clk_add(struct device *dev, const char *con_id) | ||
53 | { | ||
54 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
55 | struct pm_clock_entry *ce; | ||
56 | |||
57 | if (!prd) | ||
58 | return -EINVAL; | ||
59 | |||
60 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | ||
61 | if (!ce) { | ||
62 | dev_err(dev, "Not enough memory for clock entry.\n"); | ||
63 | return -ENOMEM; | ||
64 | } | ||
65 | |||
66 | if (con_id) { | ||
67 | ce->con_id = kstrdup(con_id, GFP_KERNEL); | ||
68 | if (!ce->con_id) { | ||
69 | dev_err(dev, | ||
70 | "Not enough memory for clock connection ID.\n"); | ||
71 | kfree(ce); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | mutex_lock(&prd->lock); | ||
77 | list_add_tail(&ce->node, &prd->clock_list); | ||
78 | mutex_unlock(&prd->lock); | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * __pm_runtime_clk_remove - Destroy runtime PM clock entry. | ||
84 | * @ce: Runtime PM clock entry to destroy. | ||
85 | * | ||
86 | * This routine must be called under the mutex protecting the runtime PM list | ||
87 | * of clocks corresponding the the @ce's device. | ||
88 | */ | ||
89 | static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) | ||
90 | { | ||
91 | if (!ce) | ||
92 | return; | ||
93 | |||
94 | list_del(&ce->node); | ||
95 | |||
96 | if (ce->status < PCE_STATUS_ERROR) { | ||
97 | if (ce->status == PCE_STATUS_ENABLED) | ||
98 | clk_disable(ce->clk); | ||
99 | |||
100 | if (ce->status >= PCE_STATUS_ACQUIRED) | ||
101 | clk_put(ce->clk); | ||
102 | } | ||
103 | |||
104 | if (ce->con_id) | ||
105 | kfree(ce->con_id); | ||
106 | |||
107 | kfree(ce); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * pm_runtime_clk_remove - Stop using a device clock for runtime PM. | ||
112 | * @dev: Device whose clock should not be used for runtime PM any more. | ||
113 | * @con_id: Connection ID of the clock. | ||
114 | * | ||
115 | * Remove the clock represented by @con_id from the list of clocks used for | ||
116 | * the runtime PM of @dev. | ||
117 | */ | ||
118 | void pm_runtime_clk_remove(struct device *dev, const char *con_id) | ||
119 | { | ||
120 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
121 | struct pm_clock_entry *ce; | ||
122 | |||
123 | if (!prd) | ||
124 | return; | ||
125 | |||
126 | mutex_lock(&prd->lock); | ||
127 | |||
128 | list_for_each_entry(ce, &prd->clock_list, node) { | ||
129 | if (!con_id && !ce->con_id) { | ||
130 | __pm_runtime_clk_remove(ce); | ||
131 | break; | ||
132 | } else if (!con_id || !ce->con_id) { | ||
133 | continue; | ||
134 | } else if (!strcmp(con_id, ce->con_id)) { | ||
135 | __pm_runtime_clk_remove(ce); | ||
136 | break; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | mutex_unlock(&prd->lock); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. | ||
145 | * @dev: Device to initialize the list of runtime PM clocks for. | ||
146 | * | ||
147 | * Allocate a struct pm_runtime_clk_data object, initialize its lock member and | ||
148 | * make the @dev's power.subsys_data field point to it. | ||
149 | */ | ||
150 | int pm_runtime_clk_init(struct device *dev) | ||
151 | { | ||
152 | struct pm_runtime_clk_data *prd; | ||
153 | |||
154 | prd = kzalloc(sizeof(*prd), GFP_KERNEL); | ||
155 | if (!prd) { | ||
156 | dev_err(dev, "Not enough memory fo runtime PM data.\n"); | ||
157 | return -ENOMEM; | ||
158 | } | ||
159 | |||
160 | INIT_LIST_HEAD(&prd->clock_list); | ||
161 | mutex_init(&prd->lock); | ||
162 | dev->power.subsys_data = prd; | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. | ||
168 | * @dev: Device to destroy the list of runtime PM clocks for. | ||
169 | * | ||
170 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | ||
171 | * from the struct pm_runtime_clk_data object pointed to by it before and free | ||
172 | * that object. | ||
173 | */ | ||
174 | void pm_runtime_clk_destroy(struct device *dev) | ||
175 | { | ||
176 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
177 | struct pm_clock_entry *ce, *c; | ||
178 | |||
179 | if (!prd) | ||
180 | return; | ||
181 | |||
182 | dev->power.subsys_data = NULL; | ||
183 | |||
184 | mutex_lock(&prd->lock); | ||
185 | |||
186 | list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) | ||
187 | __pm_runtime_clk_remove(ce); | ||
188 | |||
189 | mutex_unlock(&prd->lock); | ||
190 | |||
191 | kfree(prd); | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * pm_runtime_clk_acquire - Acquire a device clock. | ||
196 | * @dev: Device whose clock is to be acquired. | ||
197 | * @con_id: Connection ID of the clock. | ||
198 | */ | ||
199 | static void pm_runtime_clk_acquire(struct device *dev, | ||
200 | struct pm_clock_entry *ce) | ||
201 | { | ||
202 | ce->clk = clk_get(dev, ce->con_id); | ||
203 | if (IS_ERR(ce->clk)) { | ||
204 | ce->status = PCE_STATUS_ERROR; | ||
205 | } else { | ||
206 | ce->status = PCE_STATUS_ACQUIRED; | ||
207 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. | ||
213 | * @dev: Device to disable the clocks for. | ||
214 | */ | ||
215 | int pm_runtime_clk_suspend(struct device *dev) | ||
216 | { | ||
217 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
218 | struct pm_clock_entry *ce; | ||
219 | |||
220 | dev_dbg(dev, "%s()\n", __func__); | ||
221 | |||
222 | if (!prd) | ||
223 | return 0; | ||
224 | |||
225 | mutex_lock(&prd->lock); | ||
226 | |||
227 | list_for_each_entry_reverse(ce, &prd->clock_list, node) { | ||
228 | if (ce->status == PCE_STATUS_NONE) | ||
229 | pm_runtime_clk_acquire(dev, ce); | ||
230 | |||
231 | if (ce->status < PCE_STATUS_ERROR) { | ||
232 | clk_disable(ce->clk); | ||
233 | ce->status = PCE_STATUS_ACQUIRED; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | mutex_unlock(&prd->lock); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. | ||
244 | * @dev: Device to enable the clocks for. | ||
245 | */ | ||
246 | int pm_runtime_clk_resume(struct device *dev) | ||
247 | { | ||
248 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
249 | struct pm_clock_entry *ce; | ||
250 | |||
251 | dev_dbg(dev, "%s()\n", __func__); | ||
252 | |||
253 | if (!prd) | ||
254 | return 0; | ||
255 | |||
256 | mutex_lock(&prd->lock); | ||
257 | |||
258 | list_for_each_entry(ce, &prd->clock_list, node) { | ||
259 | if (ce->status == PCE_STATUS_NONE) | ||
260 | pm_runtime_clk_acquire(dev, ce); | ||
261 | |||
262 | if (ce->status < PCE_STATUS_ERROR) { | ||
263 | clk_enable(ce->clk); | ||
264 | ce->status = PCE_STATUS_ENABLED; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | mutex_unlock(&prd->lock); | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /** | ||
274 | * pm_runtime_clk_notify - Notify routine for device addition and removal. | ||
275 | * @nb: Notifier block object this function is a member of. | ||
276 | * @action: Operation being carried out by the caller. | ||
277 | * @data: Device the routine is being run for. | ||
278 | * | ||
279 | * For this function to work, @nb must be a member of an object of type | ||
280 | * struct pm_clk_notifier_block containing all of the requisite data. | ||
281 | * Specifically, the pwr_domain member of that object is copied to the device's | ||
282 | * pwr_domain field and its con_ids member is used to populate the device's list | ||
283 | * of runtime PM clocks, depending on @action. | ||
284 | * | ||
285 | * If the device's pwr_domain field is already populated with a value different | ||
286 | * from the one stored in the struct pm_clk_notifier_block object, the function | ||
287 | * does nothing. | ||
288 | */ | ||
289 | static int pm_runtime_clk_notify(struct notifier_block *nb, | ||
290 | unsigned long action, void *data) | ||
291 | { | ||
292 | struct pm_clk_notifier_block *clknb; | ||
293 | struct device *dev = data; | ||
294 | char **con_id; | ||
295 | int error; | ||
296 | |||
297 | dev_dbg(dev, "%s() %ld\n", __func__, action); | ||
298 | |||
299 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | ||
300 | |||
301 | switch (action) { | ||
302 | case BUS_NOTIFY_ADD_DEVICE: | ||
303 | if (dev->pwr_domain) | ||
304 | break; | ||
305 | |||
306 | error = pm_runtime_clk_init(dev); | ||
307 | if (error) | ||
308 | break; | ||
309 | |||
310 | dev->pwr_domain = clknb->pwr_domain; | ||
311 | if (clknb->con_ids[0]) { | ||
312 | for (con_id = clknb->con_ids; *con_id; con_id++) | ||
313 | pm_runtime_clk_add(dev, *con_id); | ||
314 | } else { | ||
315 | pm_runtime_clk_add(dev, NULL); | ||
316 | } | ||
317 | |||
318 | break; | ||
319 | case BUS_NOTIFY_DEL_DEVICE: | ||
320 | if (dev->pwr_domain != clknb->pwr_domain) | ||
321 | break; | ||
322 | |||
323 | dev->pwr_domain = NULL; | ||
324 | pm_runtime_clk_destroy(dev); | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | #else /* !CONFIG_PM_RUNTIME */ | ||
332 | |||
333 | /** | ||
334 | * enable_clock - Enable a device clock. | ||
335 | * @dev: Device whose clock is to be enabled. | ||
336 | * @con_id: Connection ID of the clock. | ||
337 | */ | ||
338 | static void enable_clock(struct device *dev, const char *con_id) | ||
339 | { | ||
340 | struct clk *clk; | ||
341 | |||
342 | clk = clk_get(dev, con_id); | ||
343 | if (!IS_ERR(clk)) { | ||
344 | clk_enable(clk); | ||
345 | clk_put(clk); | ||
346 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * disable_clock - Disable a device clock. | ||
352 | * @dev: Device whose clock is to be disabled. | ||
353 | * @con_id: Connection ID of the clock. | ||
354 | */ | ||
355 | static void disable_clock(struct device *dev, const char *con_id) | ||
356 | { | ||
357 | struct clk *clk; | ||
358 | |||
359 | clk = clk_get(dev, con_id); | ||
360 | if (!IS_ERR(clk)) { | ||
361 | clk_disable(clk); | ||
362 | clk_put(clk); | ||
363 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * pm_runtime_clk_notify - Notify routine for device addition and removal. | ||
369 | * @nb: Notifier block object this function is a member of. | ||
370 | * @action: Operation being carried out by the caller. | ||
371 | * @data: Device the routine is being run for. | ||
372 | * | ||
373 | * For this function to work, @nb must be a member of an object of type | ||
374 | * struct pm_clk_notifier_block containing all of the requisite data. | ||
375 | * Specifically, the con_ids member of that object is used to enable or disable | ||
376 | * the device's clocks, depending on @action. | ||
377 | */ | ||
378 | static int pm_runtime_clk_notify(struct notifier_block *nb, | ||
379 | unsigned long action, void *data) | ||
380 | { | ||
381 | struct pm_clk_notifier_block *clknb; | ||
382 | struct device *dev = data; | ||
383 | char **con_id; | ||
384 | |||
385 | dev_dbg(dev, "%s() %ld\n", __func__, action); | ||
386 | |||
387 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | ||
388 | |||
389 | switch (action) { | ||
390 | case BUS_NOTIFY_BIND_DRIVER: | ||
391 | if (clknb->con_ids[0]) { | ||
392 | for (con_id = clknb->con_ids; *con_id; con_id++) | ||
393 | enable_clock(dev, *con_id); | ||
394 | } else { | ||
395 | enable_clock(dev, NULL); | ||
396 | } | ||
397 | break; | ||
398 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
399 | if (clknb->con_ids[0]) { | ||
400 | for (con_id = clknb->con_ids; *con_id; con_id++) | ||
401 | disable_clock(dev, *con_id); | ||
402 | } else { | ||
403 | disable_clock(dev, NULL); | ||
404 | } | ||
405 | break; | ||
406 | } | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | #endif /* !CONFIG_PM_RUNTIME */ | ||
412 | |||
413 | /** | ||
414 | * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. | ||
415 | * @bus: Bus type to add the notifier to. | ||
416 | * @clknb: Notifier to be added to the given bus type. | ||
417 | * | ||
418 | * The nb member of @clknb is not expected to be initialized and its | ||
419 | * notifier_call member will be replaced with pm_runtime_clk_notify(). However, | ||
420 | * the remaining members of @clknb should be populated prior to calling this | ||
421 | * routine. | ||
422 | */ | ||
423 | void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
424 | struct pm_clk_notifier_block *clknb) | ||
425 | { | ||
426 | if (!bus || !clknb) | ||
427 | return; | ||
428 | |||
429 | clknb->nb.notifier_call = pm_runtime_clk_notify; | ||
430 | bus_register_notifier(bus, &clknb->nb); | ||
431 | } | ||
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 4b29d4981253..cb3bb368681c 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); | |||
39 | * | 39 | * |
40 | * If PM operations are defined for the @dev's driver and they include | 40 | * If PM operations are defined for the @dev's driver and they include |
41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, | 41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, |
42 | * return -EINVAL. | 42 | * return 0. |
43 | */ | 43 | */ |
44 | int pm_generic_runtime_suspend(struct device *dev) | 44 | int pm_generic_runtime_suspend(struct device *dev) |
45 | { | 45 | { |
46 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 46 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
47 | int ret; | 47 | int ret; |
48 | 48 | ||
49 | ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL; | 49 | ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; |
50 | 50 | ||
51 | return ret; | 51 | return ret; |
52 | } | 52 | } |
@@ -58,14 +58,14 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); | |||
58 | * | 58 | * |
59 | * If PM operations are defined for the @dev's driver and they include | 59 | * If PM operations are defined for the @dev's driver and they include |
60 | * ->runtime_resume(), execute it and return its error code. Otherwise, | 60 | * ->runtime_resume(), execute it and return its error code. Otherwise, |
61 | * return -EINVAL. | 61 | * return 0. |
62 | */ | 62 | */ |
63 | int pm_generic_runtime_resume(struct device *dev) | 63 | int pm_generic_runtime_resume(struct device *dev) |
64 | { | 64 | { |
65 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 65 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
66 | int ret; | 66 | int ret; |
67 | 67 | ||
68 | ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL; | 68 | ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; |
69 | 69 | ||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
@@ -74,6 +74,23 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); | |||
74 | 74 | ||
75 | #ifdef CONFIG_PM_SLEEP | 75 | #ifdef CONFIG_PM_SLEEP |
76 | /** | 76 | /** |
77 | * pm_generic_prepare - Generic routine preparing a device for power transition. | ||
78 | * @dev: Device to prepare. | ||
79 | * | ||
80 | * Prepare a device for a system-wide power transition. | ||
81 | */ | ||
82 | int pm_generic_prepare(struct device *dev) | ||
83 | { | ||
84 | struct device_driver *drv = dev->driver; | ||
85 | int ret = 0; | ||
86 | |||
87 | if (drv && drv->pm && drv->pm->prepare) | ||
88 | ret = drv->pm->prepare(dev); | ||
89 | |||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | /** | ||
77 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | 94 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. |
78 | * @dev: Device to handle. | 95 | * @dev: Device to handle. |
79 | * @event: PM transition of the system under way. | 96 | * @event: PM transition of the system under way. |
@@ -185,7 +202,7 @@ static int __pm_generic_resume(struct device *dev, int event) | |||
185 | return 0; | 202 | return 0; |
186 | 203 | ||
187 | ret = callback(dev); | 204 | ret = callback(dev); |
188 | if (!ret) { | 205 | if (!ret && pm_runtime_enabled(dev)) { |
189 | pm_runtime_disable(dev); | 206 | pm_runtime_disable(dev); |
190 | pm_runtime_set_active(dev); | 207 | pm_runtime_set_active(dev); |
191 | pm_runtime_enable(dev); | 208 | pm_runtime_enable(dev); |
@@ -213,16 +230,38 @@ int pm_generic_restore(struct device *dev) | |||
213 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); | 230 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); |
214 | } | 231 | } |
215 | EXPORT_SYMBOL_GPL(pm_generic_restore); | 232 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
233 | |||
234 | /** | ||
235 | * pm_generic_complete - Generic routine competing a device power transition. | ||
236 | * @dev: Device to handle. | ||
237 | * | ||
238 | * Complete a device power transition during a system-wide power transition. | ||
239 | */ | ||
240 | void pm_generic_complete(struct device *dev) | ||
241 | { | ||
242 | struct device_driver *drv = dev->driver; | ||
243 | |||
244 | if (drv && drv->pm && drv->pm->complete) | ||
245 | drv->pm->complete(dev); | ||
246 | |||
247 | /* | ||
248 | * Let runtime PM try to suspend devices that haven't been in use before | ||
249 | * going into the system-wide sleep state we're resuming from. | ||
250 | */ | ||
251 | pm_runtime_idle(dev); | ||
252 | } | ||
216 | #endif /* CONFIG_PM_SLEEP */ | 253 | #endif /* CONFIG_PM_SLEEP */ |
217 | 254 | ||
218 | struct dev_pm_ops generic_subsys_pm_ops = { | 255 | struct dev_pm_ops generic_subsys_pm_ops = { |
219 | #ifdef CONFIG_PM_SLEEP | 256 | #ifdef CONFIG_PM_SLEEP |
257 | .prepare = pm_generic_prepare, | ||
220 | .suspend = pm_generic_suspend, | 258 | .suspend = pm_generic_suspend, |
221 | .resume = pm_generic_resume, | 259 | .resume = pm_generic_resume, |
222 | .freeze = pm_generic_freeze, | 260 | .freeze = pm_generic_freeze, |
223 | .thaw = pm_generic_thaw, | 261 | .thaw = pm_generic_thaw, |
224 | .poweroff = pm_generic_poweroff, | 262 | .poweroff = pm_generic_poweroff, |
225 | .restore = pm_generic_restore, | 263 | .restore = pm_generic_restore, |
264 | .complete = pm_generic_complete, | ||
226 | #endif | 265 | #endif |
227 | #ifdef CONFIG_PM_RUNTIME | 266 | #ifdef CONFIG_PM_RUNTIME |
228 | .runtime_suspend = pm_generic_runtime_suspend, | 267 | .runtime_suspend = pm_generic_runtime_suspend, |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 276d5a701dc3..06f09bf89cb2 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | * | 9 | * |
10 | * The driver model core calls device_pm_add() when a device is registered. | 10 | * The driver model core calls device_pm_add() when a device is registered. |
11 | * This will intialize the embedded device_pm_info object in the device | 11 | * This will initialize the embedded device_pm_info object in the device |
12 | * and add it to the list of power-controlled devices. sysfs entries for | 12 | * and add it to the list of power-controlled devices. sysfs entries for |
13 | * controlling device power management will also be added. | 13 | * controlling device power management will also be added. |
14 | * | 14 | * |
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/async.h> | 28 | #include <linux/async.h> |
29 | #include <linux/suspend.h> | ||
29 | 30 | ||
30 | #include "../base.h" | 31 | #include "../base.h" |
31 | #include "power.h" | 32 | #include "power.h" |
@@ -41,15 +42,14 @@ | |||
41 | */ | 42 | */ |
42 | 43 | ||
43 | LIST_HEAD(dpm_list); | 44 | LIST_HEAD(dpm_list); |
45 | LIST_HEAD(dpm_prepared_list); | ||
46 | LIST_HEAD(dpm_suspended_list); | ||
47 | LIST_HEAD(dpm_noirq_list); | ||
44 | 48 | ||
45 | static DEFINE_MUTEX(dpm_list_mtx); | 49 | static DEFINE_MUTEX(dpm_list_mtx); |
46 | static pm_message_t pm_transition; | 50 | static pm_message_t pm_transition; |
47 | 51 | ||
48 | /* | 52 | static int async_error; |
49 | * Set once the preparation of devices for a PM transition has started, reset | ||
50 | * before starting to resume devices. Protected by dpm_list_mtx. | ||
51 | */ | ||
52 | static bool transition_started; | ||
53 | 53 | ||
54 | /** | 54 | /** |
55 | * device_pm_init - Initialize the PM-related part of a device object. | 55 | * device_pm_init - Initialize the PM-related part of a device object. |
@@ -57,11 +57,14 @@ static bool transition_started; | |||
57 | */ | 57 | */ |
58 | void device_pm_init(struct device *dev) | 58 | void device_pm_init(struct device *dev) |
59 | { | 59 | { |
60 | dev->power.status = DPM_ON; | 60 | dev->power.is_prepared = false; |
61 | dev->power.is_suspended = false; | ||
61 | init_completion(&dev->power.completion); | 62 | init_completion(&dev->power.completion); |
62 | complete_all(&dev->power.completion); | 63 | complete_all(&dev->power.completion); |
63 | dev->power.wakeup_count = 0; | 64 | dev->power.wakeup = NULL; |
65 | spin_lock_init(&dev->power.lock); | ||
64 | pm_runtime_init(dev); | 66 | pm_runtime_init(dev); |
67 | INIT_LIST_HEAD(&dev->power.entry); | ||
65 | } | 68 | } |
66 | 69 | ||
67 | /** | 70 | /** |
@@ -87,22 +90,11 @@ void device_pm_unlock(void) | |||
87 | void device_pm_add(struct device *dev) | 90 | void device_pm_add(struct device *dev) |
88 | { | 91 | { |
89 | pr_debug("PM: Adding info for %s:%s\n", | 92 | pr_debug("PM: Adding info for %s:%s\n", |
90 | dev->bus ? dev->bus->name : "No Bus", | 93 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
91 | kobject_name(&dev->kobj)); | ||
92 | mutex_lock(&dpm_list_mtx); | 94 | mutex_lock(&dpm_list_mtx); |
93 | if (dev->parent) { | 95 | if (dev->parent && dev->parent->power.is_prepared) |
94 | if (dev->parent->power.status >= DPM_SUSPENDING) | 96 | dev_warn(dev, "parent %s should not be sleeping\n", |
95 | dev_warn(dev, "parent %s should not be sleeping\n", | 97 | dev_name(dev->parent)); |
96 | dev_name(dev->parent)); | ||
97 | } else if (transition_started) { | ||
98 | /* | ||
99 | * We refuse to register parentless devices while a PM | ||
100 | * transition is in progress in order to avoid leaving them | ||
101 | * unhandled down the road | ||
102 | */ | ||
103 | dev_WARN(dev, "Parentless device registered during a PM transaction\n"); | ||
104 | } | ||
105 | |||
106 | list_add_tail(&dev->power.entry, &dpm_list); | 98 | list_add_tail(&dev->power.entry, &dpm_list); |
107 | mutex_unlock(&dpm_list_mtx); | 99 | mutex_unlock(&dpm_list_mtx); |
108 | } | 100 | } |
@@ -114,12 +106,12 @@ void device_pm_add(struct device *dev) | |||
114 | void device_pm_remove(struct device *dev) | 106 | void device_pm_remove(struct device *dev) |
115 | { | 107 | { |
116 | pr_debug("PM: Removing info for %s:%s\n", | 108 | pr_debug("PM: Removing info for %s:%s\n", |
117 | dev->bus ? dev->bus->name : "No Bus", | 109 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
118 | kobject_name(&dev->kobj)); | ||
119 | complete_all(&dev->power.completion); | 110 | complete_all(&dev->power.completion); |
120 | mutex_lock(&dpm_list_mtx); | 111 | mutex_lock(&dpm_list_mtx); |
121 | list_del_init(&dev->power.entry); | 112 | list_del_init(&dev->power.entry); |
122 | mutex_unlock(&dpm_list_mtx); | 113 | mutex_unlock(&dpm_list_mtx); |
114 | device_wakeup_disable(dev); | ||
123 | pm_runtime_remove(dev); | 115 | pm_runtime_remove(dev); |
124 | } | 116 | } |
125 | 117 | ||
@@ -131,10 +123,8 @@ void device_pm_remove(struct device *dev) | |||
131 | void device_pm_move_before(struct device *deva, struct device *devb) | 123 | void device_pm_move_before(struct device *deva, struct device *devb) |
132 | { | 124 | { |
133 | pr_debug("PM: Moving %s:%s before %s:%s\n", | 125 | pr_debug("PM: Moving %s:%s before %s:%s\n", |
134 | deva->bus ? deva->bus->name : "No Bus", | 126 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), |
135 | kobject_name(&deva->kobj), | 127 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); |
136 | devb->bus ? devb->bus->name : "No Bus", | ||
137 | kobject_name(&devb->kobj)); | ||
138 | /* Delete deva from dpm_list and reinsert before devb. */ | 128 | /* Delete deva from dpm_list and reinsert before devb. */ |
139 | list_move_tail(&deva->power.entry, &devb->power.entry); | 129 | list_move_tail(&deva->power.entry, &devb->power.entry); |
140 | } | 130 | } |
@@ -147,10 +137,8 @@ void device_pm_move_before(struct device *deva, struct device *devb) | |||
147 | void device_pm_move_after(struct device *deva, struct device *devb) | 137 | void device_pm_move_after(struct device *deva, struct device *devb) |
148 | { | 138 | { |
149 | pr_debug("PM: Moving %s:%s after %s:%s\n", | 139 | pr_debug("PM: Moving %s:%s after %s:%s\n", |
150 | deva->bus ? deva->bus->name : "No Bus", | 140 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), |
151 | kobject_name(&deva->kobj), | 141 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); |
152 | devb->bus ? devb->bus->name : "No Bus", | ||
153 | kobject_name(&devb->kobj)); | ||
154 | /* Delete deva from dpm_list and reinsert after devb. */ | 142 | /* Delete deva from dpm_list and reinsert after devb. */ |
155 | list_move(&deva->power.entry, &devb->power.entry); | 143 | list_move(&deva->power.entry, &devb->power.entry); |
156 | } | 144 | } |
@@ -162,8 +150,7 @@ void device_pm_move_after(struct device *deva, struct device *devb) | |||
162 | void device_pm_move_last(struct device *dev) | 150 | void device_pm_move_last(struct device *dev) |
163 | { | 151 | { |
164 | pr_debug("PM: Moving %s:%s to end of list\n", | 152 | pr_debug("PM: Moving %s:%s to end of list\n", |
165 | dev->bus ? dev->bus->name : "No Bus", | 153 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
166 | kobject_name(&dev->kobj)); | ||
167 | list_move_tail(&dev->power.entry, &dpm_list); | 154 | list_move_tail(&dev->power.entry, &dpm_list); |
168 | } | 155 | } |
169 | 156 | ||
@@ -248,7 +235,7 @@ static int pm_op(struct device *dev, | |||
248 | } | 235 | } |
249 | break; | 236 | break; |
250 | #endif /* CONFIG_SUSPEND */ | 237 | #endif /* CONFIG_SUSPEND */ |
251 | #ifdef CONFIG_HIBERNATION | 238 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
252 | case PM_EVENT_FREEZE: | 239 | case PM_EVENT_FREEZE: |
253 | case PM_EVENT_QUIESCE: | 240 | case PM_EVENT_QUIESCE: |
254 | if (ops->freeze) { | 241 | if (ops->freeze) { |
@@ -275,7 +262,7 @@ static int pm_op(struct device *dev, | |||
275 | suspend_report_result(ops->restore, error); | 262 | suspend_report_result(ops->restore, error); |
276 | } | 263 | } |
277 | break; | 264 | break; |
278 | #endif /* CONFIG_HIBERNATION */ | 265 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
279 | default: | 266 | default: |
280 | error = -EINVAL; | 267 | error = -EINVAL; |
281 | } | 268 | } |
@@ -299,7 +286,7 @@ static int pm_noirq_op(struct device *dev, | |||
299 | pm_message_t state) | 286 | pm_message_t state) |
300 | { | 287 | { |
301 | int error = 0; | 288 | int error = 0; |
302 | ktime_t calltime, delta, rettime; | 289 | ktime_t calltime = ktime_set(0, 0), delta, rettime; |
303 | 290 | ||
304 | if (initcall_debug) { | 291 | if (initcall_debug) { |
305 | pr_info("calling %s+ @ %i, parent: %s\n", | 292 | pr_info("calling %s+ @ %i, parent: %s\n", |
@@ -323,7 +310,7 @@ static int pm_noirq_op(struct device *dev, | |||
323 | } | 310 | } |
324 | break; | 311 | break; |
325 | #endif /* CONFIG_SUSPEND */ | 312 | #endif /* CONFIG_SUSPEND */ |
326 | #ifdef CONFIG_HIBERNATION | 313 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
327 | case PM_EVENT_FREEZE: | 314 | case PM_EVENT_FREEZE: |
328 | case PM_EVENT_QUIESCE: | 315 | case PM_EVENT_QUIESCE: |
329 | if (ops->freeze_noirq) { | 316 | if (ops->freeze_noirq) { |
@@ -350,7 +337,7 @@ static int pm_noirq_op(struct device *dev, | |||
350 | suspend_report_result(ops->restore_noirq, error); | 337 | suspend_report_result(ops->restore_noirq, error); |
351 | } | 338 | } |
352 | break; | 339 | break; |
353 | #endif /* CONFIG_HIBERNATION */ | 340 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
354 | default: | 341 | default: |
355 | error = -EINVAL; | 342 | error = -EINVAL; |
356 | } | 343 | } |
@@ -401,13 +388,13 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | |||
401 | int error) | 388 | int error) |
402 | { | 389 | { |
403 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", | 390 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", |
404 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 391 | dev_name(dev), pm_verb(state.event), info, error); |
405 | } | 392 | } |
406 | 393 | ||
407 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | 394 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) |
408 | { | 395 | { |
409 | ktime_t calltime; | 396 | ktime_t calltime; |
410 | s64 usecs64; | 397 | u64 usecs64; |
411 | int usecs; | 398 | int usecs; |
412 | 399 | ||
413 | calltime = ktime_get(); | 400 | calltime = ktime_get(); |
@@ -438,26 +425,20 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
438 | TRACE_DEVICE(dev); | 425 | TRACE_DEVICE(dev); |
439 | TRACE_RESUME(0); | 426 | TRACE_RESUME(0); |
440 | 427 | ||
441 | if (dev->bus && dev->bus->pm) { | 428 | if (dev->pwr_domain) { |
442 | pm_dev_dbg(dev, state, "EARLY "); | 429 | pm_dev_dbg(dev, state, "EARLY power domain "); |
443 | error = pm_noirq_op(dev, dev->bus->pm, state); | 430 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); |
444 | if (error) | 431 | } else if (dev->type && dev->type->pm) { |
445 | goto End; | ||
446 | } | ||
447 | |||
448 | if (dev->type && dev->type->pm) { | ||
449 | pm_dev_dbg(dev, state, "EARLY type "); | 432 | pm_dev_dbg(dev, state, "EARLY type "); |
450 | error = pm_noirq_op(dev, dev->type->pm, state); | 433 | error = pm_noirq_op(dev, dev->type->pm, state); |
451 | if (error) | 434 | } else if (dev->class && dev->class->pm) { |
452 | goto End; | ||
453 | } | ||
454 | |||
455 | if (dev->class && dev->class->pm) { | ||
456 | pm_dev_dbg(dev, state, "EARLY class "); | 435 | pm_dev_dbg(dev, state, "EARLY class "); |
457 | error = pm_noirq_op(dev, dev->class->pm, state); | 436 | error = pm_noirq_op(dev, dev->class->pm, state); |
437 | } else if (dev->bus && dev->bus->pm) { | ||
438 | pm_dev_dbg(dev, state, "EARLY "); | ||
439 | error = pm_noirq_op(dev, dev->bus->pm, state); | ||
458 | } | 440 | } |
459 | 441 | ||
460 | End: | ||
461 | TRACE_RESUME(error); | 442 | TRACE_RESUME(error); |
462 | return error; | 443 | return error; |
463 | } | 444 | } |
@@ -471,20 +452,24 @@ End: | |||
471 | */ | 452 | */ |
472 | void dpm_resume_noirq(pm_message_t state) | 453 | void dpm_resume_noirq(pm_message_t state) |
473 | { | 454 | { |
474 | struct device *dev; | ||
475 | ktime_t starttime = ktime_get(); | 455 | ktime_t starttime = ktime_get(); |
476 | 456 | ||
477 | mutex_lock(&dpm_list_mtx); | 457 | mutex_lock(&dpm_list_mtx); |
478 | transition_started = false; | 458 | while (!list_empty(&dpm_noirq_list)) { |
479 | list_for_each_entry(dev, &dpm_list, power.entry) | 459 | struct device *dev = to_device(dpm_noirq_list.next); |
480 | if (dev->power.status > DPM_OFF) { | 460 | int error; |
481 | int error; | ||
482 | 461 | ||
483 | dev->power.status = DPM_OFF; | 462 | get_device(dev); |
484 | error = device_resume_noirq(dev, state); | 463 | list_move_tail(&dev->power.entry, &dpm_suspended_list); |
485 | if (error) | 464 | mutex_unlock(&dpm_list_mtx); |
486 | pm_dev_err(dev, state, " early", error); | 465 | |
487 | } | 466 | error = device_resume_noirq(dev, state); |
467 | if (error) | ||
468 | pm_dev_err(dev, state, " early", error); | ||
469 | |||
470 | mutex_lock(&dpm_list_mtx); | ||
471 | put_device(dev); | ||
472 | } | ||
488 | mutex_unlock(&dpm_list_mtx); | 473 | mutex_unlock(&dpm_list_mtx); |
489 | dpm_show_time(starttime, state, "early"); | 474 | dpm_show_time(starttime, state, "early"); |
490 | resume_device_irqs(); | 475 | resume_device_irqs(); |
@@ -527,39 +512,53 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
527 | dpm_wait(dev->parent, async); | 512 | dpm_wait(dev->parent, async); |
528 | device_lock(dev); | 513 | device_lock(dev); |
529 | 514 | ||
530 | dev->power.status = DPM_RESUMING; | 515 | /* |
516 | * This is a fib. But we'll allow new children to be added below | ||
517 | * a resumed device, even if the device hasn't been completed yet. | ||
518 | */ | ||
519 | dev->power.is_prepared = false; | ||
531 | 520 | ||
532 | if (dev->bus) { | 521 | if (!dev->power.is_suspended) |
533 | if (dev->bus->pm) { | 522 | goto Unlock; |
534 | pm_dev_dbg(dev, state, ""); | 523 | |
535 | error = pm_op(dev, dev->bus->pm, state); | 524 | if (dev->pwr_domain) { |
536 | } else if (dev->bus->resume) { | 525 | pm_dev_dbg(dev, state, "power domain "); |
537 | pm_dev_dbg(dev, state, "legacy "); | 526 | error = pm_op(dev, &dev->pwr_domain->ops, state); |
538 | error = legacy_resume(dev, dev->bus->resume); | 527 | goto End; |
539 | } | ||
540 | if (error) | ||
541 | goto End; | ||
542 | } | 528 | } |
543 | 529 | ||
544 | if (dev->type) { | 530 | if (dev->type && dev->type->pm) { |
545 | if (dev->type->pm) { | 531 | pm_dev_dbg(dev, state, "type "); |
546 | pm_dev_dbg(dev, state, "type "); | 532 | error = pm_op(dev, dev->type->pm, state); |
547 | error = pm_op(dev, dev->type->pm, state); | 533 | goto End; |
548 | } | ||
549 | if (error) | ||
550 | goto End; | ||
551 | } | 534 | } |
552 | 535 | ||
553 | if (dev->class) { | 536 | if (dev->class) { |
554 | if (dev->class->pm) { | 537 | if (dev->class->pm) { |
555 | pm_dev_dbg(dev, state, "class "); | 538 | pm_dev_dbg(dev, state, "class "); |
556 | error = pm_op(dev, dev->class->pm, state); | 539 | error = pm_op(dev, dev->class->pm, state); |
540 | goto End; | ||
557 | } else if (dev->class->resume) { | 541 | } else if (dev->class->resume) { |
558 | pm_dev_dbg(dev, state, "legacy class "); | 542 | pm_dev_dbg(dev, state, "legacy class "); |
559 | error = legacy_resume(dev, dev->class->resume); | 543 | error = legacy_resume(dev, dev->class->resume); |
544 | goto End; | ||
560 | } | 545 | } |
561 | } | 546 | } |
547 | |||
548 | if (dev->bus) { | ||
549 | if (dev->bus->pm) { | ||
550 | pm_dev_dbg(dev, state, ""); | ||
551 | error = pm_op(dev, dev->bus->pm, state); | ||
552 | } else if (dev->bus->resume) { | ||
553 | pm_dev_dbg(dev, state, "legacy "); | ||
554 | error = legacy_resume(dev, dev->bus->resume); | ||
555 | } | ||
556 | } | ||
557 | |||
562 | End: | 558 | End: |
559 | dev->power.is_suspended = false; | ||
560 | |||
561 | Unlock: | ||
563 | device_unlock(dev); | 562 | device_unlock(dev); |
564 | complete_all(&dev->power.completion); | 563 | complete_all(&dev->power.completion); |
565 | 564 | ||
@@ -591,20 +590,18 @@ static bool is_async(struct device *dev) | |||
591 | * Execute the appropriate "resume" callback for all devices whose status | 590 | * Execute the appropriate "resume" callback for all devices whose status |
592 | * indicates that they are suspended. | 591 | * indicates that they are suspended. |
593 | */ | 592 | */ |
594 | static void dpm_resume(pm_message_t state) | 593 | void dpm_resume(pm_message_t state) |
595 | { | 594 | { |
596 | struct list_head list; | ||
597 | struct device *dev; | 595 | struct device *dev; |
598 | ktime_t starttime = ktime_get(); | 596 | ktime_t starttime = ktime_get(); |
599 | 597 | ||
600 | INIT_LIST_HEAD(&list); | 598 | might_sleep(); |
599 | |||
601 | mutex_lock(&dpm_list_mtx); | 600 | mutex_lock(&dpm_list_mtx); |
602 | pm_transition = state; | 601 | pm_transition = state; |
602 | async_error = 0; | ||
603 | 603 | ||
604 | list_for_each_entry(dev, &dpm_list, power.entry) { | 604 | list_for_each_entry(dev, &dpm_suspended_list, power.entry) { |
605 | if (dev->power.status < DPM_OFF) | ||
606 | continue; | ||
607 | |||
608 | INIT_COMPLETION(dev->power.completion); | 605 | INIT_COMPLETION(dev->power.completion); |
609 | if (is_async(dev)) { | 606 | if (is_async(dev)) { |
610 | get_device(dev); | 607 | get_device(dev); |
@@ -612,28 +609,24 @@ static void dpm_resume(pm_message_t state) | |||
612 | } | 609 | } |
613 | } | 610 | } |
614 | 611 | ||
615 | while (!list_empty(&dpm_list)) { | 612 | while (!list_empty(&dpm_suspended_list)) { |
616 | dev = to_device(dpm_list.next); | 613 | dev = to_device(dpm_suspended_list.next); |
617 | get_device(dev); | 614 | get_device(dev); |
618 | if (dev->power.status >= DPM_OFF && !is_async(dev)) { | 615 | if (!is_async(dev)) { |
619 | int error; | 616 | int error; |
620 | 617 | ||
621 | mutex_unlock(&dpm_list_mtx); | 618 | mutex_unlock(&dpm_list_mtx); |
622 | 619 | ||
623 | error = device_resume(dev, state, false); | 620 | error = device_resume(dev, state, false); |
624 | |||
625 | mutex_lock(&dpm_list_mtx); | ||
626 | if (error) | 621 | if (error) |
627 | pm_dev_err(dev, state, "", error); | 622 | pm_dev_err(dev, state, "", error); |
628 | } else if (dev->power.status == DPM_SUSPENDING) { | 623 | |
629 | /* Allow new children of the device to be registered */ | 624 | mutex_lock(&dpm_list_mtx); |
630 | dev->power.status = DPM_RESUMING; | ||
631 | } | 625 | } |
632 | if (!list_empty(&dev->power.entry)) | 626 | if (!list_empty(&dev->power.entry)) |
633 | list_move_tail(&dev->power.entry, &list); | 627 | list_move_tail(&dev->power.entry, &dpm_prepared_list); |
634 | put_device(dev); | 628 | put_device(dev); |
635 | } | 629 | } |
636 | list_splice(&list, &dpm_list); | ||
637 | mutex_unlock(&dpm_list_mtx); | 630 | mutex_unlock(&dpm_list_mtx); |
638 | async_synchronize_full(); | 631 | async_synchronize_full(); |
639 | dpm_show_time(starttime, state, NULL); | 632 | dpm_show_time(starttime, state, NULL); |
@@ -648,19 +641,22 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
648 | { | 641 | { |
649 | device_lock(dev); | 642 | device_lock(dev); |
650 | 643 | ||
651 | if (dev->class && dev->class->pm && dev->class->pm->complete) { | 644 | if (dev->pwr_domain) { |
652 | pm_dev_dbg(dev, state, "completing class "); | 645 | pm_dev_dbg(dev, state, "completing power domain "); |
653 | dev->class->pm->complete(dev); | 646 | if (dev->pwr_domain->ops.complete) |
654 | } | 647 | dev->pwr_domain->ops.complete(dev); |
655 | 648 | } else if (dev->type && dev->type->pm) { | |
656 | if (dev->type && dev->type->pm && dev->type->pm->complete) { | ||
657 | pm_dev_dbg(dev, state, "completing type "); | 649 | pm_dev_dbg(dev, state, "completing type "); |
658 | dev->type->pm->complete(dev); | 650 | if (dev->type->pm->complete) |
659 | } | 651 | dev->type->pm->complete(dev); |
660 | 652 | } else if (dev->class && dev->class->pm) { | |
661 | if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { | 653 | pm_dev_dbg(dev, state, "completing class "); |
654 | if (dev->class->pm->complete) | ||
655 | dev->class->pm->complete(dev); | ||
656 | } else if (dev->bus && dev->bus->pm) { | ||
662 | pm_dev_dbg(dev, state, "completing "); | 657 | pm_dev_dbg(dev, state, "completing "); |
663 | dev->bus->pm->complete(dev); | 658 | if (dev->bus->pm->complete) |
659 | dev->bus->pm->complete(dev); | ||
664 | } | 660 | } |
665 | 661 | ||
666 | device_unlock(dev); | 662 | device_unlock(dev); |
@@ -673,28 +669,25 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
673 | * Execute the ->complete() callbacks for all devices whose PM status is not | 669 | * Execute the ->complete() callbacks for all devices whose PM status is not |
674 | * DPM_ON (this allows new devices to be registered). | 670 | * DPM_ON (this allows new devices to be registered). |
675 | */ | 671 | */ |
676 | static void dpm_complete(pm_message_t state) | 672 | void dpm_complete(pm_message_t state) |
677 | { | 673 | { |
678 | struct list_head list; | 674 | struct list_head list; |
679 | 675 | ||
676 | might_sleep(); | ||
677 | |||
680 | INIT_LIST_HEAD(&list); | 678 | INIT_LIST_HEAD(&list); |
681 | mutex_lock(&dpm_list_mtx); | 679 | mutex_lock(&dpm_list_mtx); |
682 | transition_started = false; | 680 | while (!list_empty(&dpm_prepared_list)) { |
683 | while (!list_empty(&dpm_list)) { | 681 | struct device *dev = to_device(dpm_prepared_list.prev); |
684 | struct device *dev = to_device(dpm_list.prev); | ||
685 | 682 | ||
686 | get_device(dev); | 683 | get_device(dev); |
687 | if (dev->power.status > DPM_ON) { | 684 | dev->power.is_prepared = false; |
688 | dev->power.status = DPM_ON; | 685 | list_move(&dev->power.entry, &list); |
689 | mutex_unlock(&dpm_list_mtx); | 686 | mutex_unlock(&dpm_list_mtx); |
690 | 687 | ||
691 | device_complete(dev, state); | 688 | device_complete(dev, state); |
692 | pm_runtime_put_sync(dev); | ||
693 | 689 | ||
694 | mutex_lock(&dpm_list_mtx); | 690 | mutex_lock(&dpm_list_mtx); |
695 | } | ||
696 | if (!list_empty(&dev->power.entry)) | ||
697 | list_move(&dev->power.entry, &list); | ||
698 | put_device(dev); | 691 | put_device(dev); |
699 | } | 692 | } |
700 | list_splice(&list, &dpm_list); | 693 | list_splice(&list, &dpm_list); |
@@ -710,7 +703,6 @@ static void dpm_complete(pm_message_t state) | |||
710 | */ | 703 | */ |
711 | void dpm_resume_end(pm_message_t state) | 704 | void dpm_resume_end(pm_message_t state) |
712 | { | 705 | { |
713 | might_sleep(); | ||
714 | dpm_resume(state); | 706 | dpm_resume(state); |
715 | dpm_complete(state); | 707 | dpm_complete(state); |
716 | } | 708 | } |
@@ -750,29 +742,31 @@ static pm_message_t resume_event(pm_message_t sleep_state) | |||
750 | */ | 742 | */ |
751 | static int device_suspend_noirq(struct device *dev, pm_message_t state) | 743 | static int device_suspend_noirq(struct device *dev, pm_message_t state) |
752 | { | 744 | { |
753 | int error = 0; | 745 | int error; |
754 | 746 | ||
755 | if (dev->class && dev->class->pm) { | 747 | if (dev->pwr_domain) { |
756 | pm_dev_dbg(dev, state, "LATE class "); | 748 | pm_dev_dbg(dev, state, "LATE power domain "); |
757 | error = pm_noirq_op(dev, dev->class->pm, state); | 749 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); |
758 | if (error) | 750 | if (error) |
759 | goto End; | 751 | return error; |
760 | } | 752 | } else if (dev->type && dev->type->pm) { |
761 | |||
762 | if (dev->type && dev->type->pm) { | ||
763 | pm_dev_dbg(dev, state, "LATE type "); | 753 | pm_dev_dbg(dev, state, "LATE type "); |
764 | error = pm_noirq_op(dev, dev->type->pm, state); | 754 | error = pm_noirq_op(dev, dev->type->pm, state); |
765 | if (error) | 755 | if (error) |
766 | goto End; | 756 | return error; |
767 | } | 757 | } else if (dev->class && dev->class->pm) { |
768 | 758 | pm_dev_dbg(dev, state, "LATE class "); | |
769 | if (dev->bus && dev->bus->pm) { | 759 | error = pm_noirq_op(dev, dev->class->pm, state); |
760 | if (error) | ||
761 | return error; | ||
762 | } else if (dev->bus && dev->bus->pm) { | ||
770 | pm_dev_dbg(dev, state, "LATE "); | 763 | pm_dev_dbg(dev, state, "LATE "); |
771 | error = pm_noirq_op(dev, dev->bus->pm, state); | 764 | error = pm_noirq_op(dev, dev->bus->pm, state); |
765 | if (error) | ||
766 | return error; | ||
772 | } | 767 | } |
773 | 768 | ||
774 | End: | 769 | return 0; |
775 | return error; | ||
776 | } | 770 | } |
777 | 771 | ||
778 | /** | 772 | /** |
@@ -784,19 +778,28 @@ End: | |||
784 | */ | 778 | */ |
785 | int dpm_suspend_noirq(pm_message_t state) | 779 | int dpm_suspend_noirq(pm_message_t state) |
786 | { | 780 | { |
787 | struct device *dev; | ||
788 | ktime_t starttime = ktime_get(); | 781 | ktime_t starttime = ktime_get(); |
789 | int error = 0; | 782 | int error = 0; |
790 | 783 | ||
791 | suspend_device_irqs(); | 784 | suspend_device_irqs(); |
792 | mutex_lock(&dpm_list_mtx); | 785 | mutex_lock(&dpm_list_mtx); |
793 | list_for_each_entry_reverse(dev, &dpm_list, power.entry) { | 786 | while (!list_empty(&dpm_suspended_list)) { |
787 | struct device *dev = to_device(dpm_suspended_list.prev); | ||
788 | |||
789 | get_device(dev); | ||
790 | mutex_unlock(&dpm_list_mtx); | ||
791 | |||
794 | error = device_suspend_noirq(dev, state); | 792 | error = device_suspend_noirq(dev, state); |
793 | |||
794 | mutex_lock(&dpm_list_mtx); | ||
795 | if (error) { | 795 | if (error) { |
796 | pm_dev_err(dev, state, " late", error); | 796 | pm_dev_err(dev, state, " late", error); |
797 | put_device(dev); | ||
797 | break; | 798 | break; |
798 | } | 799 | } |
799 | dev->power.status = DPM_OFF_IRQ; | 800 | if (!list_empty(&dev->power.entry)) |
801 | list_move(&dev->power.entry, &dpm_noirq_list); | ||
802 | put_device(dev); | ||
800 | } | 803 | } |
801 | mutex_unlock(&dpm_list_mtx); | 804 | mutex_unlock(&dpm_list_mtx); |
802 | if (error) | 805 | if (error) |
@@ -829,8 +832,6 @@ static int legacy_suspend(struct device *dev, pm_message_t state, | |||
829 | return error; | 832 | return error; |
830 | } | 833 | } |
831 | 834 | ||
832 | static int async_error; | ||
833 | |||
834 | /** | 835 | /** |
835 | * device_suspend - Execute "suspend" callbacks for given device. | 836 | * device_suspend - Execute "suspend" callbacks for given device. |
836 | * @dev: Device to handle. | 837 | * @dev: Device to handle. |
@@ -845,27 +846,35 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
845 | device_lock(dev); | 846 | device_lock(dev); |
846 | 847 | ||
847 | if (async_error) | 848 | if (async_error) |
849 | goto Unlock; | ||
850 | |||
851 | if (pm_wakeup_pending()) { | ||
852 | async_error = -EBUSY; | ||
853 | goto Unlock; | ||
854 | } | ||
855 | |||
856 | if (dev->pwr_domain) { | ||
857 | pm_dev_dbg(dev, state, "power domain "); | ||
858 | error = pm_op(dev, &dev->pwr_domain->ops, state); | ||
848 | goto End; | 859 | goto End; |
860 | } | ||
861 | |||
862 | if (dev->type && dev->type->pm) { | ||
863 | pm_dev_dbg(dev, state, "type "); | ||
864 | error = pm_op(dev, dev->type->pm, state); | ||
865 | goto End; | ||
866 | } | ||
849 | 867 | ||
850 | if (dev->class) { | 868 | if (dev->class) { |
851 | if (dev->class->pm) { | 869 | if (dev->class->pm) { |
852 | pm_dev_dbg(dev, state, "class "); | 870 | pm_dev_dbg(dev, state, "class "); |
853 | error = pm_op(dev, dev->class->pm, state); | 871 | error = pm_op(dev, dev->class->pm, state); |
872 | goto End; | ||
854 | } else if (dev->class->suspend) { | 873 | } else if (dev->class->suspend) { |
855 | pm_dev_dbg(dev, state, "legacy class "); | 874 | pm_dev_dbg(dev, state, "legacy class "); |
856 | error = legacy_suspend(dev, state, dev->class->suspend); | 875 | error = legacy_suspend(dev, state, dev->class->suspend); |
857 | } | ||
858 | if (error) | ||
859 | goto End; | 876 | goto End; |
860 | } | ||
861 | |||
862 | if (dev->type) { | ||
863 | if (dev->type->pm) { | ||
864 | pm_dev_dbg(dev, state, "type "); | ||
865 | error = pm_op(dev, dev->type->pm, state); | ||
866 | } | 877 | } |
867 | if (error) | ||
868 | goto End; | ||
869 | } | 878 | } |
870 | 879 | ||
871 | if (dev->bus) { | 880 | if (dev->bus) { |
@@ -878,13 +887,16 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
878 | } | 887 | } |
879 | } | 888 | } |
880 | 889 | ||
881 | if (!error) | ||
882 | dev->power.status = DPM_OFF; | ||
883 | |||
884 | End: | 890 | End: |
891 | dev->power.is_suspended = !error; | ||
892 | |||
893 | Unlock: | ||
885 | device_unlock(dev); | 894 | device_unlock(dev); |
886 | complete_all(&dev->power.completion); | 895 | complete_all(&dev->power.completion); |
887 | 896 | ||
897 | if (error) | ||
898 | async_error = error; | ||
899 | |||
888 | return error; | 900 | return error; |
889 | } | 901 | } |
890 | 902 | ||
@@ -894,10 +906,8 @@ static void async_suspend(void *data, async_cookie_t cookie) | |||
894 | int error; | 906 | int error; |
895 | 907 | ||
896 | error = __device_suspend(dev, pm_transition, true); | 908 | error = __device_suspend(dev, pm_transition, true); |
897 | if (error) { | 909 | if (error) |
898 | pm_dev_err(dev, pm_transition, " async", error); | 910 | pm_dev_err(dev, pm_transition, " async", error); |
899 | async_error = error; | ||
900 | } | ||
901 | 911 | ||
902 | put_device(dev); | 912 | put_device(dev); |
903 | } | 913 | } |
@@ -919,18 +929,18 @@ static int device_suspend(struct device *dev) | |||
919 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. | 929 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
920 | * @state: PM transition of the system being carried out. | 930 | * @state: PM transition of the system being carried out. |
921 | */ | 931 | */ |
922 | static int dpm_suspend(pm_message_t state) | 932 | int dpm_suspend(pm_message_t state) |
923 | { | 933 | { |
924 | struct list_head list; | ||
925 | ktime_t starttime = ktime_get(); | 934 | ktime_t starttime = ktime_get(); |
926 | int error = 0; | 935 | int error = 0; |
927 | 936 | ||
928 | INIT_LIST_HEAD(&list); | 937 | might_sleep(); |
938 | |||
929 | mutex_lock(&dpm_list_mtx); | 939 | mutex_lock(&dpm_list_mtx); |
930 | pm_transition = state; | 940 | pm_transition = state; |
931 | async_error = 0; | 941 | async_error = 0; |
932 | while (!list_empty(&dpm_list)) { | 942 | while (!list_empty(&dpm_prepared_list)) { |
933 | struct device *dev = to_device(dpm_list.prev); | 943 | struct device *dev = to_device(dpm_prepared_list.prev); |
934 | 944 | ||
935 | get_device(dev); | 945 | get_device(dev); |
936 | mutex_unlock(&dpm_list_mtx); | 946 | mutex_unlock(&dpm_list_mtx); |
@@ -944,12 +954,11 @@ static int dpm_suspend(pm_message_t state) | |||
944 | break; | 954 | break; |
945 | } | 955 | } |
946 | if (!list_empty(&dev->power.entry)) | 956 | if (!list_empty(&dev->power.entry)) |
947 | list_move(&dev->power.entry, &list); | 957 | list_move(&dev->power.entry, &dpm_suspended_list); |
948 | put_device(dev); | 958 | put_device(dev); |
949 | if (async_error) | 959 | if (async_error) |
950 | break; | 960 | break; |
951 | } | 961 | } |
952 | list_splice(&list, dpm_list.prev); | ||
953 | mutex_unlock(&dpm_list_mtx); | 962 | mutex_unlock(&dpm_list_mtx); |
954 | async_synchronize_full(); | 963 | async_synchronize_full(); |
955 | if (!error) | 964 | if (!error) |
@@ -973,27 +982,34 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
973 | 982 | ||
974 | device_lock(dev); | 983 | device_lock(dev); |
975 | 984 | ||
976 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { | 985 | if (dev->pwr_domain) { |
977 | pm_dev_dbg(dev, state, "preparing "); | 986 | pm_dev_dbg(dev, state, "preparing power domain "); |
978 | error = dev->bus->pm->prepare(dev); | 987 | if (dev->pwr_domain->ops.prepare) |
979 | suspend_report_result(dev->bus->pm->prepare, error); | 988 | error = dev->pwr_domain->ops.prepare(dev); |
989 | suspend_report_result(dev->pwr_domain->ops.prepare, error); | ||
980 | if (error) | 990 | if (error) |
981 | goto End; | 991 | goto End; |
982 | } | 992 | } else if (dev->type && dev->type->pm) { |
983 | |||
984 | if (dev->type && dev->type->pm && dev->type->pm->prepare) { | ||
985 | pm_dev_dbg(dev, state, "preparing type "); | 993 | pm_dev_dbg(dev, state, "preparing type "); |
986 | error = dev->type->pm->prepare(dev); | 994 | if (dev->type->pm->prepare) |
995 | error = dev->type->pm->prepare(dev); | ||
987 | suspend_report_result(dev->type->pm->prepare, error); | 996 | suspend_report_result(dev->type->pm->prepare, error); |
988 | if (error) | 997 | if (error) |
989 | goto End; | 998 | goto End; |
990 | } | 999 | } else if (dev->class && dev->class->pm) { |
991 | |||
992 | if (dev->class && dev->class->pm && dev->class->pm->prepare) { | ||
993 | pm_dev_dbg(dev, state, "preparing class "); | 1000 | pm_dev_dbg(dev, state, "preparing class "); |
994 | error = dev->class->pm->prepare(dev); | 1001 | if (dev->class->pm->prepare) |
1002 | error = dev->class->pm->prepare(dev); | ||
995 | suspend_report_result(dev->class->pm->prepare, error); | 1003 | suspend_report_result(dev->class->pm->prepare, error); |
1004 | if (error) | ||
1005 | goto End; | ||
1006 | } else if (dev->bus && dev->bus->pm) { | ||
1007 | pm_dev_dbg(dev, state, "preparing "); | ||
1008 | if (dev->bus->pm->prepare) | ||
1009 | error = dev->bus->pm->prepare(dev); | ||
1010 | suspend_report_result(dev->bus->pm->prepare, error); | ||
996 | } | 1011 | } |
1012 | |||
997 | End: | 1013 | End: |
998 | device_unlock(dev); | 1014 | device_unlock(dev); |
999 | 1015 | ||
@@ -1006,50 +1022,45 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1006 | * | 1022 | * |
1007 | * Execute the ->prepare() callback(s) for all devices. | 1023 | * Execute the ->prepare() callback(s) for all devices. |
1008 | */ | 1024 | */ |
1009 | static int dpm_prepare(pm_message_t state) | 1025 | int dpm_prepare(pm_message_t state) |
1010 | { | 1026 | { |
1011 | struct list_head list; | ||
1012 | int error = 0; | 1027 | int error = 0; |
1013 | 1028 | ||
1014 | INIT_LIST_HEAD(&list); | 1029 | might_sleep(); |
1030 | |||
1015 | mutex_lock(&dpm_list_mtx); | 1031 | mutex_lock(&dpm_list_mtx); |
1016 | transition_started = true; | ||
1017 | while (!list_empty(&dpm_list)) { | 1032 | while (!list_empty(&dpm_list)) { |
1018 | struct device *dev = to_device(dpm_list.next); | 1033 | struct device *dev = to_device(dpm_list.next); |
1019 | 1034 | ||
1020 | get_device(dev); | 1035 | get_device(dev); |
1021 | dev->power.status = DPM_PREPARING; | ||
1022 | mutex_unlock(&dpm_list_mtx); | 1036 | mutex_unlock(&dpm_list_mtx); |
1023 | 1037 | ||
1024 | pm_runtime_get_noresume(dev); | 1038 | pm_runtime_get_noresume(dev); |
1025 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { | 1039 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) |
1026 | /* Wake-up requested during system sleep transition. */ | 1040 | pm_wakeup_event(dev, 0); |
1027 | pm_runtime_put_sync(dev); | 1041 | |
1028 | error = -EBUSY; | 1042 | pm_runtime_put_sync(dev); |
1029 | } else { | 1043 | error = pm_wakeup_pending() ? |
1030 | error = device_prepare(dev, state); | 1044 | -EBUSY : device_prepare(dev, state); |
1031 | } | ||
1032 | 1045 | ||
1033 | mutex_lock(&dpm_list_mtx); | 1046 | mutex_lock(&dpm_list_mtx); |
1034 | if (error) { | 1047 | if (error) { |
1035 | dev->power.status = DPM_ON; | ||
1036 | if (error == -EAGAIN) { | 1048 | if (error == -EAGAIN) { |
1037 | put_device(dev); | 1049 | put_device(dev); |
1038 | error = 0; | 1050 | error = 0; |
1039 | continue; | 1051 | continue; |
1040 | } | 1052 | } |
1041 | printk(KERN_ERR "PM: Failed to prepare device %s " | 1053 | printk(KERN_INFO "PM: Device %s not prepared " |
1042 | "for power transition: error %d\n", | 1054 | "for power transition: code %d\n", |
1043 | kobject_name(&dev->kobj), error); | 1055 | dev_name(dev), error); |
1044 | put_device(dev); | 1056 | put_device(dev); |
1045 | break; | 1057 | break; |
1046 | } | 1058 | } |
1047 | dev->power.status = DPM_SUSPENDING; | 1059 | dev->power.is_prepared = true; |
1048 | if (!list_empty(&dev->power.entry)) | 1060 | if (!list_empty(&dev->power.entry)) |
1049 | list_move_tail(&dev->power.entry, &list); | 1061 | list_move_tail(&dev->power.entry, &dpm_prepared_list); |
1050 | put_device(dev); | 1062 | put_device(dev); |
1051 | } | 1063 | } |
1052 | list_splice(&list, &dpm_list); | ||
1053 | mutex_unlock(&dpm_list_mtx); | 1064 | mutex_unlock(&dpm_list_mtx); |
1054 | return error; | 1065 | return error; |
1055 | } | 1066 | } |
@@ -1065,7 +1076,6 @@ int dpm_suspend_start(pm_message_t state) | |||
1065 | { | 1076 | { |
1066 | int error; | 1077 | int error; |
1067 | 1078 | ||
1068 | might_sleep(); | ||
1069 | error = dpm_prepare(state); | 1079 | error = dpm_prepare(state); |
1070 | if (!error) | 1080 | if (!error) |
1071 | error = dpm_suspend(state); | 1081 | error = dpm_suspend(state); |
@@ -1085,8 +1095,9 @@ EXPORT_SYMBOL_GPL(__suspend_report_result); | |||
1085 | * @dev: Device to wait for. | 1095 | * @dev: Device to wait for. |
1086 | * @subordinate: Device that needs to wait for @dev. | 1096 | * @subordinate: Device that needs to wait for @dev. |
1087 | */ | 1097 | */ |
1088 | void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) | 1098 | int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) |
1089 | { | 1099 | { |
1090 | dpm_wait(dev, subordinate->power.async_suspend); | 1100 | dpm_wait(dev, subordinate->power.async_suspend); |
1101 | return async_error; | ||
1091 | } | 1102 | } |
1092 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); | 1103 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c new file mode 100644 index 000000000000..56a6899f5e9e --- /dev/null +++ b/drivers/base/power/opp.c | |||
@@ -0,0 +1,628 @@ | |||
1 | /* | ||
2 | * Generic OPP Interface | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/cpufreq.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/rculist.h> | ||
22 | #include <linux/rcupdate.h> | ||
23 | #include <linux/opp.h> | ||
24 | |||
25 | /* | ||
26 | * Internal data structure organization with the OPP layer library is as | ||
27 | * follows: | ||
28 | * dev_opp_list (root) | ||
29 | * |- device 1 (represents voltage domain 1) | ||
30 | * | |- opp 1 (availability, freq, voltage) | ||
31 | * | |- opp 2 .. | ||
32 | * ... ... | ||
33 | * | `- opp n .. | ||
34 | * |- device 2 (represents the next voltage domain) | ||
35 | * ... | ||
36 | * `- device m (represents mth voltage domain) | ||
37 | * device 1, 2.. are represented by dev_opp structure while each opp | ||
38 | * is represented by the opp structure. | ||
39 | */ | ||
40 | |||
41 | /** | ||
42 | * struct opp - Generic OPP description structure | ||
43 | * @node: opp list node. The nodes are maintained throughout the lifetime | ||
44 | * of boot. It is expected only an optimal set of OPPs are | ||
45 | * added to the library by the SoC framework. | ||
46 | * RCU usage: opp list is traversed with RCU locks. node | ||
47 | * modification is possible realtime, hence the modifications | ||
48 | * are protected by the dev_opp_list_lock for integrity. | ||
49 | * IMPORTANT: the opp nodes should be maintained in increasing | ||
50 | * order. | ||
51 | * @available: true/false - marks if this OPP as available or not | ||
52 | * @rate: Frequency in hertz | ||
53 | * @u_volt: Nominal voltage in microvolts corresponding to this OPP | ||
54 | * @dev_opp: points back to the device_opp struct this opp belongs to | ||
55 | * | ||
56 | * This structure stores the OPP information for a given device. | ||
57 | */ | ||
58 | struct opp { | ||
59 | struct list_head node; | ||
60 | |||
61 | bool available; | ||
62 | unsigned long rate; | ||
63 | unsigned long u_volt; | ||
64 | |||
65 | struct device_opp *dev_opp; | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * struct device_opp - Device opp structure | ||
70 | * @node: list node - contains the devices with OPPs that | ||
71 | * have been registered. Nodes once added are not modified in this | ||
72 | * list. | ||
73 | * RCU usage: nodes are not modified in the list of device_opp, | ||
74 | * however addition is possible and is secured by dev_opp_list_lock | ||
75 | * @dev: device pointer | ||
76 | * @opp_list: list of opps | ||
77 | * | ||
78 | * This is an internal data structure maintaining the link to opps attached to | ||
79 | * a device. This structure is not meant to be shared to users as it is | ||
80 | * meant for book keeping and private to OPP library | ||
81 | */ | ||
82 | struct device_opp { | ||
83 | struct list_head node; | ||
84 | |||
85 | struct device *dev; | ||
86 | struct list_head opp_list; | ||
87 | }; | ||
88 | |||
89 | /* | ||
90 | * The root of the list of all devices. All device_opp structures branch off | ||
91 | * from here, with each device_opp containing the list of opp it supports in | ||
92 | * various states of availability. | ||
93 | */ | ||
94 | static LIST_HEAD(dev_opp_list); | ||
95 | /* Lock to allow exclusive modification to the device and opp lists */ | ||
96 | static DEFINE_MUTEX(dev_opp_list_lock); | ||
97 | |||
98 | /** | ||
99 | * find_device_opp() - find device_opp struct using device pointer | ||
100 | * @dev: device pointer used to lookup device OPPs | ||
101 | * | ||
102 | * Search list of device OPPs for one containing matching device. Does a RCU | ||
103 | * reader operation to grab the pointer needed. | ||
104 | * | ||
105 | * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or | ||
106 | * -EINVAL based on type of error. | ||
107 | * | ||
108 | * Locking: This function must be called under rcu_read_lock(). device_opp | ||
109 | * is a RCU protected pointer. This means that device_opp is valid as long | ||
110 | * as we are under RCU lock. | ||
111 | */ | ||
112 | static struct device_opp *find_device_opp(struct device *dev) | ||
113 | { | ||
114 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); | ||
115 | |||
116 | if (unlikely(IS_ERR_OR_NULL(dev))) { | ||
117 | pr_err("%s: Invalid parameters\n", __func__); | ||
118 | return ERR_PTR(-EINVAL); | ||
119 | } | ||
120 | |||
121 | list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { | ||
122 | if (tmp_dev_opp->dev == dev) { | ||
123 | dev_opp = tmp_dev_opp; | ||
124 | break; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | return dev_opp; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * opp_get_voltage() - Gets the voltage corresponding to an available opp | ||
133 | * @opp: opp for which voltage has to be returned for | ||
134 | * | ||
135 | * Return voltage in micro volt corresponding to the opp, else | ||
136 | * return 0 | ||
137 | * | ||
138 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
139 | * protected pointer. This means that opp which could have been fetched by | ||
140 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
141 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
142 | * used in the same section as the usage of this function with the pointer | ||
143 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
144 | * pointer. | ||
145 | */ | ||
146 | unsigned long opp_get_voltage(struct opp *opp) | ||
147 | { | ||
148 | struct opp *tmp_opp; | ||
149 | unsigned long v = 0; | ||
150 | |||
151 | tmp_opp = rcu_dereference(opp); | ||
152 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) | ||
153 | pr_err("%s: Invalid parameters\n", __func__); | ||
154 | else | ||
155 | v = tmp_opp->u_volt; | ||
156 | |||
157 | return v; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * opp_get_freq() - Gets the frequency corresponding to an available opp | ||
162 | * @opp: opp for which frequency has to be returned for | ||
163 | * | ||
164 | * Return frequency in hertz corresponding to the opp, else | ||
165 | * return 0 | ||
166 | * | ||
167 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
168 | * protected pointer. This means that opp which could have been fetched by | ||
169 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
170 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
171 | * used in the same section as the usage of this function with the pointer | ||
172 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
173 | * pointer. | ||
174 | */ | ||
175 | unsigned long opp_get_freq(struct opp *opp) | ||
176 | { | ||
177 | struct opp *tmp_opp; | ||
178 | unsigned long f = 0; | ||
179 | |||
180 | tmp_opp = rcu_dereference(opp); | ||
181 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) | ||
182 | pr_err("%s: Invalid parameters\n", __func__); | ||
183 | else | ||
184 | f = tmp_opp->rate; | ||
185 | |||
186 | return f; | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * opp_get_opp_count() - Get number of opps available in the opp list | ||
191 | * @dev: device for which we do this operation | ||
192 | * | ||
193 | * This function returns the number of available opps if there are any, | ||
194 | * else returns 0 if none or the corresponding error value. | ||
195 | * | ||
196 | * Locking: This function must be called under rcu_read_lock(). This function | ||
197 | * internally references two RCU protected structures: device_opp and opp which | ||
198 | * are safe as long as we are under a common RCU locked section. | ||
199 | */ | ||
200 | int opp_get_opp_count(struct device *dev) | ||
201 | { | ||
202 | struct device_opp *dev_opp; | ||
203 | struct opp *temp_opp; | ||
204 | int count = 0; | ||
205 | |||
206 | dev_opp = find_device_opp(dev); | ||
207 | if (IS_ERR(dev_opp)) { | ||
208 | int r = PTR_ERR(dev_opp); | ||
209 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | ||
210 | return r; | ||
211 | } | ||
212 | |||
213 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
214 | if (temp_opp->available) | ||
215 | count++; | ||
216 | } | ||
217 | |||
218 | return count; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * opp_find_freq_exact() - search for an exact frequency | ||
223 | * @dev: device for which we do this operation | ||
224 | * @freq: frequency to search for | ||
225 | * @available: true/false - match for available opp | ||
226 | * | ||
227 | * Searches for exact match in the opp list and returns pointer to the matching | ||
228 | * opp if found, else returns ERR_PTR in case of error and should be handled | ||
229 | * using IS_ERR. | ||
230 | * | ||
231 | * Note: available is a modifier for the search. if available=true, then the | ||
232 | * match is for exact matching frequency and is available in the stored OPP | ||
233 | * table. if false, the match is for exact frequency which is not available. | ||
234 | * | ||
235 | * This provides a mechanism to enable an opp which is not available currently | ||
236 | * or the opposite as well. | ||
237 | * | ||
238 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
239 | * protected pointer. The reason for the same is that the opp pointer which is | ||
240 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
241 | * under the locked area. The pointer returned must be used prior to unlocking | ||
242 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
243 | */ | ||
244 | struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | ||
245 | bool available) | ||
246 | { | ||
247 | struct device_opp *dev_opp; | ||
248 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
249 | |||
250 | dev_opp = find_device_opp(dev); | ||
251 | if (IS_ERR(dev_opp)) { | ||
252 | int r = PTR_ERR(dev_opp); | ||
253 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | ||
254 | return ERR_PTR(r); | ||
255 | } | ||
256 | |||
257 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
258 | if (temp_opp->available == available && | ||
259 | temp_opp->rate == freq) { | ||
260 | opp = temp_opp; | ||
261 | break; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | return opp; | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * opp_find_freq_ceil() - Search for an rounded ceil freq | ||
270 | * @dev: device for which we do this operation | ||
271 | * @freq: Start frequency | ||
272 | * | ||
273 | * Search for the matching ceil *available* OPP from a starting freq | ||
274 | * for a device. | ||
275 | * | ||
276 | * Returns matching *opp and refreshes *freq accordingly, else returns | ||
277 | * ERR_PTR in case of error and should be handled using IS_ERR. | ||
278 | * | ||
279 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
280 | * protected pointer. The reason for the same is that the opp pointer which is | ||
281 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
282 | * under the locked area. The pointer returned must be used prior to unlocking | ||
283 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
284 | */ | ||
285 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | ||
286 | { | ||
287 | struct device_opp *dev_opp; | ||
288 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
289 | |||
290 | if (!dev || !freq) { | ||
291 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | ||
292 | return ERR_PTR(-EINVAL); | ||
293 | } | ||
294 | |||
295 | dev_opp = find_device_opp(dev); | ||
296 | if (IS_ERR(dev_opp)) | ||
297 | return opp; | ||
298 | |||
299 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
300 | if (temp_opp->available && temp_opp->rate >= *freq) { | ||
301 | opp = temp_opp; | ||
302 | *freq = opp->rate; | ||
303 | break; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | return opp; | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * opp_find_freq_floor() - Search for a rounded floor freq | ||
312 | * @dev: device for which we do this operation | ||
313 | * @freq: Start frequency | ||
314 | * | ||
315 | * Search for the matching floor *available* OPP from a starting freq | ||
316 | * for a device. | ||
317 | * | ||
318 | * Returns matching *opp and refreshes *freq accordingly, else returns | ||
319 | * ERR_PTR in case of error and should be handled using IS_ERR. | ||
320 | * | ||
321 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
322 | * protected pointer. The reason for the same is that the opp pointer which is | ||
323 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
324 | * under the locked area. The pointer returned must be used prior to unlocking | ||
325 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
326 | */ | ||
327 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | ||
328 | { | ||
329 | struct device_opp *dev_opp; | ||
330 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
331 | |||
332 | if (!dev || !freq) { | ||
333 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | ||
334 | return ERR_PTR(-EINVAL); | ||
335 | } | ||
336 | |||
337 | dev_opp = find_device_opp(dev); | ||
338 | if (IS_ERR(dev_opp)) | ||
339 | return opp; | ||
340 | |||
341 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
342 | if (temp_opp->available) { | ||
343 | /* go to the next node, before choosing prev */ | ||
344 | if (temp_opp->rate > *freq) | ||
345 | break; | ||
346 | else | ||
347 | opp = temp_opp; | ||
348 | } | ||
349 | } | ||
350 | if (!IS_ERR(opp)) | ||
351 | *freq = opp->rate; | ||
352 | |||
353 | return opp; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * opp_add() - Add an OPP table from a table definitions | ||
358 | * @dev: device for which we do this operation | ||
359 | * @freq: Frequency in Hz for this OPP | ||
360 | * @u_volt: Voltage in uVolts for this OPP | ||
361 | * | ||
362 | * This function adds an opp definition to the opp list and returns status. | ||
363 | * The opp is made available by default and it can be controlled using | ||
364 | * opp_enable/disable functions. | ||
365 | * | ||
366 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
367 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
368 | * to keep the integrity of the internal data structures. Callers should ensure | ||
369 | * that this function is *NOT* called under RCU protection or in contexts where | ||
370 | * mutex cannot be locked. | ||
371 | */ | ||
372 | int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | ||
373 | { | ||
374 | struct device_opp *dev_opp = NULL; | ||
375 | struct opp *opp, *new_opp; | ||
376 | struct list_head *head; | ||
377 | |||
378 | /* allocate new OPP node */ | ||
379 | new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); | ||
380 | if (!new_opp) { | ||
381 | dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); | ||
382 | return -ENOMEM; | ||
383 | } | ||
384 | |||
385 | /* Hold our list modification lock here */ | ||
386 | mutex_lock(&dev_opp_list_lock); | ||
387 | |||
388 | /* Check for existing list for 'dev' */ | ||
389 | dev_opp = find_device_opp(dev); | ||
390 | if (IS_ERR(dev_opp)) { | ||
391 | /* | ||
392 | * Allocate a new device OPP table. In the infrequent case | ||
393 | * where a new device is needed to be added, we pay this | ||
394 | * penalty. | ||
395 | */ | ||
396 | dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); | ||
397 | if (!dev_opp) { | ||
398 | mutex_unlock(&dev_opp_list_lock); | ||
399 | kfree(new_opp); | ||
400 | dev_warn(dev, | ||
401 | "%s: Unable to create device OPP structure\n", | ||
402 | __func__); | ||
403 | return -ENOMEM; | ||
404 | } | ||
405 | |||
406 | dev_opp->dev = dev; | ||
407 | INIT_LIST_HEAD(&dev_opp->opp_list); | ||
408 | |||
409 | /* Secure the device list modification */ | ||
410 | list_add_rcu(&dev_opp->node, &dev_opp_list); | ||
411 | } | ||
412 | |||
413 | /* populate the opp table */ | ||
414 | new_opp->dev_opp = dev_opp; | ||
415 | new_opp->rate = freq; | ||
416 | new_opp->u_volt = u_volt; | ||
417 | new_opp->available = true; | ||
418 | |||
419 | /* Insert new OPP in order of increasing frequency */ | ||
420 | head = &dev_opp->opp_list; | ||
421 | list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { | ||
422 | if (new_opp->rate < opp->rate) | ||
423 | break; | ||
424 | else | ||
425 | head = &opp->node; | ||
426 | } | ||
427 | |||
428 | list_add_rcu(&new_opp->node, head); | ||
429 | mutex_unlock(&dev_opp_list_lock); | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | * opp_set_availability() - helper to set the availability of an opp | ||
436 | * @dev: device for which we do this operation | ||
437 | * @freq: OPP frequency to modify availability | ||
438 | * @availability_req: availability status requested for this opp | ||
439 | * | ||
440 | * Set the availability of an OPP with an RCU operation, opp_{enable,disable} | ||
441 | * share a common logic which is isolated here. | ||
442 | * | ||
443 | * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the | ||
444 | * copy operation, returns 0 if no modifcation was done OR modification was | ||
445 | * successful. | ||
446 | * | ||
447 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
448 | * Hence this function internally uses RCU updater strategy with mutex locks to | ||
449 | * keep the integrity of the internal data structures. Callers should ensure | ||
450 | * that this function is *NOT* called under RCU protection or in contexts where | ||
451 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
452 | */ | ||
453 | static int opp_set_availability(struct device *dev, unsigned long freq, | ||
454 | bool availability_req) | ||
455 | { | ||
456 | struct device_opp *tmp_dev_opp, *dev_opp = NULL; | ||
457 | struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); | ||
458 | int r = 0; | ||
459 | |||
460 | /* keep the node allocated */ | ||
461 | new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); | ||
462 | if (!new_opp) { | ||
463 | dev_warn(dev, "%s: Unable to create OPP\n", __func__); | ||
464 | return -ENOMEM; | ||
465 | } | ||
466 | |||
467 | mutex_lock(&dev_opp_list_lock); | ||
468 | |||
469 | /* Find the device_opp */ | ||
470 | list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { | ||
471 | if (dev == tmp_dev_opp->dev) { | ||
472 | dev_opp = tmp_dev_opp; | ||
473 | break; | ||
474 | } | ||
475 | } | ||
476 | if (IS_ERR(dev_opp)) { | ||
477 | r = PTR_ERR(dev_opp); | ||
478 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); | ||
479 | goto unlock; | ||
480 | } | ||
481 | |||
482 | /* Do we have the frequency? */ | ||
483 | list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { | ||
484 | if (tmp_opp->rate == freq) { | ||
485 | opp = tmp_opp; | ||
486 | break; | ||
487 | } | ||
488 | } | ||
489 | if (IS_ERR(opp)) { | ||
490 | r = PTR_ERR(opp); | ||
491 | goto unlock; | ||
492 | } | ||
493 | |||
494 | /* Is update really needed? */ | ||
495 | if (opp->available == availability_req) | ||
496 | goto unlock; | ||
497 | /* copy the old data over */ | ||
498 | *new_opp = *opp; | ||
499 | |||
500 | /* plug in new node */ | ||
501 | new_opp->available = availability_req; | ||
502 | |||
503 | list_replace_rcu(&opp->node, &new_opp->node); | ||
504 | mutex_unlock(&dev_opp_list_lock); | ||
505 | synchronize_rcu(); | ||
506 | |||
507 | /* clean up old opp */ | ||
508 | new_opp = opp; | ||
509 | goto out; | ||
510 | |||
511 | unlock: | ||
512 | mutex_unlock(&dev_opp_list_lock); | ||
513 | out: | ||
514 | kfree(new_opp); | ||
515 | return r; | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * opp_enable() - Enable a specific OPP | ||
520 | * @dev: device for which we do this operation | ||
521 | * @freq: OPP frequency to enable | ||
522 | * | ||
523 | * Enables a provided opp. If the operation is valid, this returns 0, else the | ||
524 | * corresponding error value. It is meant to be used for users an OPP available | ||
525 | * after being temporarily made unavailable with opp_disable. | ||
526 | * | ||
527 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
528 | * Hence this function indirectly uses RCU and mutex locks to keep the | ||
529 | * integrity of the internal data structures. Callers should ensure that | ||
530 | * this function is *NOT* called under RCU protection or in contexts where | ||
531 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
532 | */ | ||
533 | int opp_enable(struct device *dev, unsigned long freq) | ||
534 | { | ||
535 | return opp_set_availability(dev, freq, true); | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * opp_disable() - Disable a specific OPP | ||
540 | * @dev: device for which we do this operation | ||
541 | * @freq: OPP frequency to disable | ||
542 | * | ||
543 | * Disables a provided opp. If the operation is valid, this returns | ||
544 | * 0, else the corresponding error value. It is meant to be a temporary | ||
545 | * control by users to make this OPP not available until the circumstances are | ||
546 | * right to make it available again (with a call to opp_enable). | ||
547 | * | ||
548 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
549 | * Hence this function indirectly uses RCU and mutex locks to keep the | ||
550 | * integrity of the internal data structures. Callers should ensure that | ||
551 | * this function is *NOT* called under RCU protection or in contexts where | ||
552 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
553 | */ | ||
554 | int opp_disable(struct device *dev, unsigned long freq) | ||
555 | { | ||
556 | return opp_set_availability(dev, freq, false); | ||
557 | } | ||
558 | |||
559 | #ifdef CONFIG_CPU_FREQ | ||
560 | /** | ||
561 | * opp_init_cpufreq_table() - create a cpufreq table for a device | ||
562 | * @dev: device for which we do this operation | ||
563 | * @table: Cpufreq table returned back to caller | ||
564 | * | ||
565 | * Generate a cpufreq table for a provided device- this assumes that the | ||
566 | * opp list is already initialized and ready for usage. | ||
567 | * | ||
568 | * This function allocates required memory for the cpufreq table. It is | ||
569 | * expected that the caller does the required maintenance such as freeing | ||
570 | * the table as required. | ||
571 | * | ||
572 | * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM | ||
573 | * if no memory available for the operation (table is not populated), returns 0 | ||
574 | * if successful and table is populated. | ||
575 | * | ||
576 | * WARNING: It is important for the callers to ensure refreshing their copy of | ||
577 | * the table if any of the mentioned functions have been invoked in the interim. | ||
578 | * | ||
579 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
580 | * To simplify the logic, we pretend we are updater and hold relevant mutex here | ||
581 | * Callers should ensure that this function is *NOT* called under RCU protection | ||
582 | * or in contexts where mutex locking cannot be used. | ||
583 | */ | ||
584 | int opp_init_cpufreq_table(struct device *dev, | ||
585 | struct cpufreq_frequency_table **table) | ||
586 | { | ||
587 | struct device_opp *dev_opp; | ||
588 | struct opp *opp; | ||
589 | struct cpufreq_frequency_table *freq_table; | ||
590 | int i = 0; | ||
591 | |||
592 | /* Pretend as if I am an updater */ | ||
593 | mutex_lock(&dev_opp_list_lock); | ||
594 | |||
595 | dev_opp = find_device_opp(dev); | ||
596 | if (IS_ERR(dev_opp)) { | ||
597 | int r = PTR_ERR(dev_opp); | ||
598 | mutex_unlock(&dev_opp_list_lock); | ||
599 | dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); | ||
600 | return r; | ||
601 | } | ||
602 | |||
603 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * | ||
604 | (opp_get_opp_count(dev) + 1), GFP_KERNEL); | ||
605 | if (!freq_table) { | ||
606 | mutex_unlock(&dev_opp_list_lock); | ||
607 | dev_warn(dev, "%s: Unable to allocate frequency table\n", | ||
608 | __func__); | ||
609 | return -ENOMEM; | ||
610 | } | ||
611 | |||
612 | list_for_each_entry(opp, &dev_opp->opp_list, node) { | ||
613 | if (opp->available) { | ||
614 | freq_table[i].index = i; | ||
615 | freq_table[i].frequency = opp->rate / 1000; | ||
616 | i++; | ||
617 | } | ||
618 | } | ||
619 | mutex_unlock(&dev_opp_list_lock); | ||
620 | |||
621 | freq_table[i].index = i; | ||
622 | freq_table[i].frequency = CPUFREQ_TABLE_END; | ||
623 | |||
624 | *table = &freq_table[0]; | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | #endif /* CONFIG_CPU_FREQ */ | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index c0bd03c83b9c..f2a25f18fde7 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -34,6 +34,7 @@ extern void device_pm_move_last(struct device *); | |||
34 | 34 | ||
35 | static inline void device_pm_init(struct device *dev) | 35 | static inline void device_pm_init(struct device *dev) |
36 | { | 36 | { |
37 | spin_lock_init(&dev->power.lock); | ||
37 | pm_runtime_init(dev); | 38 | pm_runtime_init(dev); |
38 | } | 39 | } |
39 | 40 | ||
@@ -57,18 +58,18 @@ static inline void device_pm_move_last(struct device *dev) {} | |||
57 | * sysfs.c | 58 | * sysfs.c |
58 | */ | 59 | */ |
59 | 60 | ||
60 | extern int dpm_sysfs_add(struct device *); | 61 | extern int dpm_sysfs_add(struct device *dev); |
61 | extern void dpm_sysfs_remove(struct device *); | 62 | extern void dpm_sysfs_remove(struct device *dev); |
63 | extern void rpm_sysfs_remove(struct device *dev); | ||
64 | extern int wakeup_sysfs_add(struct device *dev); | ||
65 | extern void wakeup_sysfs_remove(struct device *dev); | ||
62 | 66 | ||
63 | #else /* CONFIG_PM */ | 67 | #else /* CONFIG_PM */ |
64 | 68 | ||
65 | static inline int dpm_sysfs_add(struct device *dev) | 69 | static inline int dpm_sysfs_add(struct device *dev) { return 0; } |
66 | { | 70 | static inline void dpm_sysfs_remove(struct device *dev) {} |
67 | return 0; | 71 | static inline void rpm_sysfs_remove(struct device *dev) {} |
68 | } | 72 | static inline int wakeup_sysfs_add(struct device *dev) { return 0; } |
69 | 73 | static inline void wakeup_sysfs_remove(struct device *dev) {} | |
70 | static inline void dpm_sysfs_remove(struct device *dev) | ||
71 | { | ||
72 | } | ||
73 | 74 | ||
74 | #endif | 75 | #endif |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b78c401ffa73..0d4587b15c55 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -2,17 +2,55 @@ | |||
2 | * drivers/base/power/runtime.c - Helper functions for device run-time PM | 2 | * drivers/base/power/runtime.c - Helper functions for device run-time PM |
3 | * | 3 | * |
4 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | 4 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
5 | * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> | ||
5 | * | 6 | * |
6 | * This file is released under the GPLv2. | 7 | * This file is released under the GPLv2. |
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
10 | #include <linux/pm_runtime.h> | 11 | #include <linux/pm_runtime.h> |
11 | #include <linux/jiffies.h> | 12 | #include "power.h" |
12 | 13 | ||
13 | static int __pm_runtime_resume(struct device *dev, bool from_wq); | 14 | static int rpm_resume(struct device *dev, int rpmflags); |
14 | static int __pm_request_idle(struct device *dev); | 15 | static int rpm_suspend(struct device *dev, int rpmflags); |
15 | static int __pm_request_resume(struct device *dev); | 16 | |
17 | /** | ||
18 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
19 | * @dev: Device to update the accounting for | ||
20 | * | ||
21 | * In order to be able to have time accounting of the various power states | ||
22 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
23 | * PM), we need to track the time spent in each state. | ||
24 | * update_pm_runtime_accounting must be called each time before the | ||
25 | * runtime_status field is updated, to account the time in the old state | ||
26 | * correctly. | ||
27 | */ | ||
28 | void update_pm_runtime_accounting(struct device *dev) | ||
29 | { | ||
30 | unsigned long now = jiffies; | ||
31 | int delta; | ||
32 | |||
33 | delta = now - dev->power.accounting_timestamp; | ||
34 | |||
35 | if (delta < 0) | ||
36 | delta = 0; | ||
37 | |||
38 | dev->power.accounting_timestamp = now; | ||
39 | |||
40 | if (dev->power.disable_depth > 0) | ||
41 | return; | ||
42 | |||
43 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
44 | dev->power.suspended_jiffies += delta; | ||
45 | else | ||
46 | dev->power.active_jiffies += delta; | ||
47 | } | ||
48 | |||
49 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | ||
50 | { | ||
51 | update_pm_runtime_accounting(dev); | ||
52 | dev->power.runtime_status = status; | ||
53 | } | ||
16 | 54 | ||
17 | /** | 55 | /** |
18 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. | 56 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. |
@@ -40,62 +78,156 @@ static void pm_runtime_cancel_pending(struct device *dev) | |||
40 | dev->power.request = RPM_REQ_NONE; | 78 | dev->power.request = RPM_REQ_NONE; |
41 | } | 79 | } |
42 | 80 | ||
43 | /** | 81 | /* |
44 | * __pm_runtime_idle - Notify device bus type if the device can be suspended. | 82 | * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. |
45 | * @dev: Device to notify the bus type about. | 83 | * @dev: Device to handle. |
46 | * | 84 | * |
47 | * This function must be called under dev->power.lock with interrupts disabled. | 85 | * Compute the autosuspend-delay expiration time based on the device's |
86 | * power.last_busy time. If the delay has already expired or is disabled | ||
87 | * (negative) or the power.use_autosuspend flag isn't set, return 0. | ||
88 | * Otherwise return the expiration time in jiffies (adjusted to be nonzero). | ||
89 | * | ||
90 | * This function may be called either with or without dev->power.lock held. | ||
91 | * Either way it can be racy, since power.last_busy may be updated at any time. | ||
48 | */ | 92 | */ |
49 | static int __pm_runtime_idle(struct device *dev) | 93 | unsigned long pm_runtime_autosuspend_expiration(struct device *dev) |
50 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 94 | { |
95 | int autosuspend_delay; | ||
96 | long elapsed; | ||
97 | unsigned long last_busy; | ||
98 | unsigned long expires = 0; | ||
99 | |||
100 | if (!dev->power.use_autosuspend) | ||
101 | goto out; | ||
102 | |||
103 | autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); | ||
104 | if (autosuspend_delay < 0) | ||
105 | goto out; | ||
106 | |||
107 | last_busy = ACCESS_ONCE(dev->power.last_busy); | ||
108 | elapsed = jiffies - last_busy; | ||
109 | if (elapsed < 0) | ||
110 | goto out; /* jiffies has wrapped around. */ | ||
111 | |||
112 | /* | ||
113 | * If the autosuspend_delay is >= 1 second, align the timer by rounding | ||
114 | * up to the nearest second. | ||
115 | */ | ||
116 | expires = last_busy + msecs_to_jiffies(autosuspend_delay); | ||
117 | if (autosuspend_delay >= 1000) | ||
118 | expires = round_jiffies(expires); | ||
119 | expires += !expires; | ||
120 | if (elapsed >= expires - last_busy) | ||
121 | expires = 0; /* Already expired. */ | ||
122 | |||
123 | out: | ||
124 | return expires; | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); | ||
127 | |||
128 | /** | ||
129 | * rpm_check_suspend_allowed - Test whether a device may be suspended. | ||
130 | * @dev: Device to test. | ||
131 | */ | ||
132 | static int rpm_check_suspend_allowed(struct device *dev) | ||
51 | { | 133 | { |
52 | int retval = 0; | 134 | int retval = 0; |
53 | 135 | ||
54 | if (dev->power.runtime_error) | 136 | if (dev->power.runtime_error) |
55 | retval = -EINVAL; | 137 | retval = -EINVAL; |
56 | else if (dev->power.idle_notification) | ||
57 | retval = -EINPROGRESS; | ||
58 | else if (atomic_read(&dev->power.usage_count) > 0 | 138 | else if (atomic_read(&dev->power.usage_count) > 0 |
59 | || dev->power.disable_depth > 0 | 139 | || dev->power.disable_depth > 0) |
60 | || dev->power.runtime_status != RPM_ACTIVE) | ||
61 | retval = -EAGAIN; | 140 | retval = -EAGAIN; |
62 | else if (!pm_children_suspended(dev)) | 141 | else if (!pm_children_suspended(dev)) |
63 | retval = -EBUSY; | 142 | retval = -EBUSY; |
143 | |||
144 | /* Pending resume requests take precedence over suspends. */ | ||
145 | else if ((dev->power.deferred_resume | ||
146 | && dev->power.runtime_status == RPM_SUSPENDING) | ||
147 | || (dev->power.request_pending | ||
148 | && dev->power.request == RPM_REQ_RESUME)) | ||
149 | retval = -EAGAIN; | ||
150 | else if (dev->power.runtime_status == RPM_SUSPENDED) | ||
151 | retval = 1; | ||
152 | |||
153 | return retval; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * rpm_idle - Notify device bus type if the device can be suspended. | ||
158 | * @dev: Device to notify the bus type about. | ||
159 | * @rpmflags: Flag bits. | ||
160 | * | ||
161 | * Check if the device's run-time PM status allows it to be suspended. If | ||
162 | * another idle notification has been started earlier, return immediately. If | ||
163 | * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise | ||
164 | * run the ->runtime_idle() callback directly. | ||
165 | * | ||
166 | * This function must be called under dev->power.lock with interrupts disabled. | ||
167 | */ | ||
168 | static int rpm_idle(struct device *dev, int rpmflags) | ||
169 | { | ||
170 | int (*callback)(struct device *); | ||
171 | int retval; | ||
172 | |||
173 | retval = rpm_check_suspend_allowed(dev); | ||
174 | if (retval < 0) | ||
175 | ; /* Conditions are wrong. */ | ||
176 | |||
177 | /* Idle notifications are allowed only in the RPM_ACTIVE state. */ | ||
178 | else if (dev->power.runtime_status != RPM_ACTIVE) | ||
179 | retval = -EAGAIN; | ||
180 | |||
181 | /* | ||
182 | * Any pending request other than an idle notification takes | ||
183 | * precedence over us, except that the timer may be running. | ||
184 | */ | ||
185 | else if (dev->power.request_pending && | ||
186 | dev->power.request > RPM_REQ_IDLE) | ||
187 | retval = -EAGAIN; | ||
188 | |||
189 | /* Act as though RPM_NOWAIT is always set. */ | ||
190 | else if (dev->power.idle_notification) | ||
191 | retval = -EINPROGRESS; | ||
64 | if (retval) | 192 | if (retval) |
65 | goto out; | 193 | goto out; |
66 | 194 | ||
67 | if (dev->power.request_pending) { | 195 | /* Pending requests need to be canceled. */ |
68 | /* | 196 | dev->power.request = RPM_REQ_NONE; |
69 | * If an idle notification request is pending, cancel it. Any | 197 | |
70 | * other pending request takes precedence over us. | 198 | if (dev->power.no_callbacks) { |
71 | */ | 199 | /* Assume ->runtime_idle() callback would have suspended. */ |
72 | if (dev->power.request == RPM_REQ_IDLE) { | 200 | retval = rpm_suspend(dev, rpmflags); |
73 | dev->power.request = RPM_REQ_NONE; | 201 | goto out; |
74 | } else if (dev->power.request != RPM_REQ_NONE) { | 202 | } |
75 | retval = -EAGAIN; | 203 | |
76 | goto out; | 204 | /* Carry out an asynchronous or a synchronous idle notification. */ |
205 | if (rpmflags & RPM_ASYNC) { | ||
206 | dev->power.request = RPM_REQ_IDLE; | ||
207 | if (!dev->power.request_pending) { | ||
208 | dev->power.request_pending = true; | ||
209 | queue_work(pm_wq, &dev->power.work); | ||
77 | } | 210 | } |
211 | goto out; | ||
78 | } | 212 | } |
79 | 213 | ||
80 | dev->power.idle_notification = true; | 214 | dev->power.idle_notification = true; |
81 | 215 | ||
82 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) { | 216 | if (dev->pwr_domain) |
83 | spin_unlock_irq(&dev->power.lock); | 217 | callback = dev->pwr_domain->ops.runtime_idle; |
84 | 218 | else if (dev->type && dev->type->pm) | |
85 | dev->bus->pm->runtime_idle(dev); | 219 | callback = dev->type->pm->runtime_idle; |
86 | 220 | else if (dev->class && dev->class->pm) | |
87 | spin_lock_irq(&dev->power.lock); | 221 | callback = dev->class->pm->runtime_idle; |
88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | 222 | else if (dev->bus && dev->bus->pm) |
89 | spin_unlock_irq(&dev->power.lock); | 223 | callback = dev->bus->pm->runtime_idle; |
90 | 224 | else | |
91 | dev->type->pm->runtime_idle(dev); | 225 | callback = NULL; |
92 | 226 | ||
93 | spin_lock_irq(&dev->power.lock); | 227 | if (callback) { |
94 | } else if (dev->class && dev->class->pm | ||
95 | && dev->class->pm->runtime_idle) { | ||
96 | spin_unlock_irq(&dev->power.lock); | 228 | spin_unlock_irq(&dev->power.lock); |
97 | 229 | ||
98 | dev->class->pm->runtime_idle(dev); | 230 | callback(dev); |
99 | 231 | ||
100 | spin_lock_irq(&dev->power.lock); | 232 | spin_lock_irq(&dev->power.lock); |
101 | } | 233 | } |
@@ -108,113 +240,102 @@ static int __pm_runtime_idle(struct device *dev) | |||
108 | } | 240 | } |
109 | 241 | ||
110 | /** | 242 | /** |
111 | * pm_runtime_idle - Notify device bus type if the device can be suspended. | 243 | * rpm_callback - Run a given runtime PM callback for a given device. |
112 | * @dev: Device to notify the bus type about. | 244 | * @cb: Runtime PM callback to run. |
245 | * @dev: Device to run the callback for. | ||
113 | */ | 246 | */ |
114 | int pm_runtime_idle(struct device *dev) | 247 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) |
248 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
115 | { | 249 | { |
116 | int retval; | 250 | int retval; |
117 | 251 | ||
118 | spin_lock_irq(&dev->power.lock); | 252 | if (!cb) |
119 | retval = __pm_runtime_idle(dev); | 253 | return -ENOSYS; |
120 | spin_unlock_irq(&dev->power.lock); | ||
121 | |||
122 | return retval; | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(pm_runtime_idle); | ||
125 | |||
126 | |||
127 | /** | ||
128 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
129 | * @dev: Device to update the accounting for | ||
130 | * | ||
131 | * In order to be able to have time accounting of the various power states | ||
132 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
133 | * PM), we need to track the time spent in each state. | ||
134 | * update_pm_runtime_accounting must be called each time before the | ||
135 | * runtime_status field is updated, to account the time in the old state | ||
136 | * correctly. | ||
137 | */ | ||
138 | void update_pm_runtime_accounting(struct device *dev) | ||
139 | { | ||
140 | unsigned long now = jiffies; | ||
141 | int delta; | ||
142 | |||
143 | delta = now - dev->power.accounting_timestamp; | ||
144 | |||
145 | if (delta < 0) | ||
146 | delta = 0; | ||
147 | 254 | ||
148 | dev->power.accounting_timestamp = now; | 255 | if (dev->power.irq_safe) { |
149 | 256 | retval = cb(dev); | |
150 | if (dev->power.disable_depth > 0) | 257 | } else { |
151 | return; | 258 | spin_unlock_irq(&dev->power.lock); |
152 | 259 | ||
153 | if (dev->power.runtime_status == RPM_SUSPENDED) | 260 | retval = cb(dev); |
154 | dev->power.suspended_jiffies += delta; | ||
155 | else | ||
156 | dev->power.active_jiffies += delta; | ||
157 | } | ||
158 | 261 | ||
159 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | 262 | spin_lock_irq(&dev->power.lock); |
160 | { | 263 | } |
161 | update_pm_runtime_accounting(dev); | 264 | dev->power.runtime_error = retval; |
162 | dev->power.runtime_status = status; | 265 | return retval; |
163 | } | 266 | } |
164 | 267 | ||
165 | /** | 268 | /** |
166 | * __pm_runtime_suspend - Carry out run-time suspend of given device. | 269 | * rpm_suspend - Carry out run-time suspend of given device. |
167 | * @dev: Device to suspend. | 270 | * @dev: Device to suspend. |
168 | * @from_wq: If set, the function has been called via pm_wq. | 271 | * @rpmflags: Flag bits. |
169 | * | 272 | * |
170 | * Check if the device can be suspended and run the ->runtime_suspend() callback | 273 | * Check if the device's run-time PM status allows it to be suspended. If |
171 | * provided by its bus type. If another suspend has been started earlier, wait | 274 | * another suspend has been started earlier, either return immediately or wait |
172 | * for it to finish. If an idle notification or suspend request is pending or | 275 | * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a |
173 | * scheduled, cancel it. | 276 | * pending idle notification. If the RPM_ASYNC flag is set then queue a |
277 | * suspend request; otherwise run the ->runtime_suspend() callback directly. | ||
278 | * If a deferred resume was requested while the callback was running then carry | ||
279 | * it out; otherwise send an idle notification for the device (if the suspend | ||
280 | * failed) or for its parent (if the suspend succeeded). | ||
174 | * | 281 | * |
175 | * This function must be called under dev->power.lock with interrupts disabled. | 282 | * This function must be called under dev->power.lock with interrupts disabled. |
176 | */ | 283 | */ |
177 | int __pm_runtime_suspend(struct device *dev, bool from_wq) | 284 | static int rpm_suspend(struct device *dev, int rpmflags) |
178 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 285 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
179 | { | 286 | { |
287 | int (*callback)(struct device *); | ||
180 | struct device *parent = NULL; | 288 | struct device *parent = NULL; |
181 | bool notify = false; | 289 | int retval; |
182 | int retval = 0; | ||
183 | 290 | ||
184 | dev_dbg(dev, "__pm_runtime_suspend()%s!\n", | 291 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
185 | from_wq ? " from workqueue" : ""); | ||
186 | 292 | ||
187 | repeat: | 293 | repeat: |
188 | if (dev->power.runtime_error) { | 294 | retval = rpm_check_suspend_allowed(dev); |
189 | retval = -EINVAL; | ||
190 | goto out; | ||
191 | } | ||
192 | 295 | ||
193 | /* Pending resume requests take precedence over us. */ | 296 | if (retval < 0) |
194 | if (dev->power.request_pending | 297 | ; /* Conditions are wrong. */ |
195 | && dev->power.request == RPM_REQ_RESUME) { | 298 | |
299 | /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ | ||
300 | else if (dev->power.runtime_status == RPM_RESUMING && | ||
301 | !(rpmflags & RPM_ASYNC)) | ||
196 | retval = -EAGAIN; | 302 | retval = -EAGAIN; |
303 | if (retval) | ||
197 | goto out; | 304 | goto out; |
305 | |||
306 | /* If the autosuspend_delay time hasn't expired yet, reschedule. */ | ||
307 | if ((rpmflags & RPM_AUTO) | ||
308 | && dev->power.runtime_status != RPM_SUSPENDING) { | ||
309 | unsigned long expires = pm_runtime_autosuspend_expiration(dev); | ||
310 | |||
311 | if (expires != 0) { | ||
312 | /* Pending requests need to be canceled. */ | ||
313 | dev->power.request = RPM_REQ_NONE; | ||
314 | |||
315 | /* | ||
316 | * Optimization: If the timer is already running and is | ||
317 | * set to expire at or before the autosuspend delay, | ||
318 | * avoid the overhead of resetting it. Just let it | ||
319 | * expire; pm_suspend_timer_fn() will take care of the | ||
320 | * rest. | ||
321 | */ | ||
322 | if (!(dev->power.timer_expires && time_before_eq( | ||
323 | dev->power.timer_expires, expires))) { | ||
324 | dev->power.timer_expires = expires; | ||
325 | mod_timer(&dev->power.suspend_timer, expires); | ||
326 | } | ||
327 | dev->power.timer_autosuspends = 1; | ||
328 | goto out; | ||
329 | } | ||
198 | } | 330 | } |
199 | 331 | ||
200 | /* Other scheduled or pending requests need to be canceled. */ | 332 | /* Other scheduled or pending requests need to be canceled. */ |
201 | pm_runtime_cancel_pending(dev); | 333 | pm_runtime_cancel_pending(dev); |
202 | 334 | ||
203 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
204 | retval = 1; | ||
205 | else if (dev->power.runtime_status == RPM_RESUMING | ||
206 | || dev->power.disable_depth > 0 | ||
207 | || atomic_read(&dev->power.usage_count) > 0) | ||
208 | retval = -EAGAIN; | ||
209 | else if (!pm_children_suspended(dev)) | ||
210 | retval = -EBUSY; | ||
211 | if (retval) | ||
212 | goto out; | ||
213 | |||
214 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 335 | if (dev->power.runtime_status == RPM_SUSPENDING) { |
215 | DEFINE_WAIT(wait); | 336 | DEFINE_WAIT(wait); |
216 | 337 | ||
217 | if (from_wq) { | 338 | if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { |
218 | retval = -EINPROGRESS; | 339 | retval = -EINPROGRESS; |
219 | goto out; | 340 | goto out; |
220 | } | 341 | } |
@@ -236,46 +357,44 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
236 | goto repeat; | 357 | goto repeat; |
237 | } | 358 | } |
238 | 359 | ||
239 | __update_runtime_status(dev, RPM_SUSPENDING); | ||
240 | dev->power.deferred_resume = false; | 360 | dev->power.deferred_resume = false; |
361 | if (dev->power.no_callbacks) | ||
362 | goto no_callback; /* Assume success. */ | ||
363 | |||
364 | /* Carry out an asynchronous or a synchronous suspend. */ | ||
365 | if (rpmflags & RPM_ASYNC) { | ||
366 | dev->power.request = (rpmflags & RPM_AUTO) ? | ||
367 | RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; | ||
368 | if (!dev->power.request_pending) { | ||
369 | dev->power.request_pending = true; | ||
370 | queue_work(pm_wq, &dev->power.work); | ||
371 | } | ||
372 | goto out; | ||
373 | } | ||
241 | 374 | ||
242 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 375 | __update_runtime_status(dev, RPM_SUSPENDING); |
243 | spin_unlock_irq(&dev->power.lock); | ||
244 | |||
245 | retval = dev->bus->pm->runtime_suspend(dev); | ||
246 | |||
247 | spin_lock_irq(&dev->power.lock); | ||
248 | dev->power.runtime_error = retval; | ||
249 | } else if (dev->type && dev->type->pm | ||
250 | && dev->type->pm->runtime_suspend) { | ||
251 | spin_unlock_irq(&dev->power.lock); | ||
252 | |||
253 | retval = dev->type->pm->runtime_suspend(dev); | ||
254 | |||
255 | spin_lock_irq(&dev->power.lock); | ||
256 | dev->power.runtime_error = retval; | ||
257 | } else if (dev->class && dev->class->pm | ||
258 | && dev->class->pm->runtime_suspend) { | ||
259 | spin_unlock_irq(&dev->power.lock); | ||
260 | |||
261 | retval = dev->class->pm->runtime_suspend(dev); | ||
262 | 376 | ||
263 | spin_lock_irq(&dev->power.lock); | 377 | if (dev->pwr_domain) |
264 | dev->power.runtime_error = retval; | 378 | callback = dev->pwr_domain->ops.runtime_suspend; |
265 | } else { | 379 | else if (dev->type && dev->type->pm) |
266 | retval = -ENOSYS; | 380 | callback = dev->type->pm->runtime_suspend; |
267 | } | 381 | else if (dev->class && dev->class->pm) |
382 | callback = dev->class->pm->runtime_suspend; | ||
383 | else if (dev->bus && dev->bus->pm) | ||
384 | callback = dev->bus->pm->runtime_suspend; | ||
385 | else | ||
386 | callback = NULL; | ||
268 | 387 | ||
388 | retval = rpm_callback(callback, dev); | ||
269 | if (retval) { | 389 | if (retval) { |
270 | __update_runtime_status(dev, RPM_ACTIVE); | 390 | __update_runtime_status(dev, RPM_ACTIVE); |
271 | if (retval == -EAGAIN || retval == -EBUSY) { | 391 | dev->power.deferred_resume = 0; |
272 | if (dev->power.timer_expires == 0) | 392 | if (retval == -EAGAIN || retval == -EBUSY) |
273 | notify = true; | ||
274 | dev->power.runtime_error = 0; | 393 | dev->power.runtime_error = 0; |
275 | } else { | 394 | else |
276 | pm_runtime_cancel_pending(dev); | 395 | pm_runtime_cancel_pending(dev); |
277 | } | ||
278 | } else { | 396 | } else { |
397 | no_callback: | ||
279 | __update_runtime_status(dev, RPM_SUSPENDED); | 398 | __update_runtime_status(dev, RPM_SUSPENDED); |
280 | pm_runtime_deactivate_timer(dev); | 399 | pm_runtime_deactivate_timer(dev); |
281 | 400 | ||
@@ -287,89 +406,86 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
287 | wake_up_all(&dev->power.wait_queue); | 406 | wake_up_all(&dev->power.wait_queue); |
288 | 407 | ||
289 | if (dev->power.deferred_resume) { | 408 | if (dev->power.deferred_resume) { |
290 | __pm_runtime_resume(dev, false); | 409 | rpm_resume(dev, 0); |
291 | retval = -EAGAIN; | 410 | retval = -EAGAIN; |
292 | goto out; | 411 | goto out; |
293 | } | 412 | } |
294 | 413 | ||
295 | if (notify) | 414 | /* Maybe the parent is now able to suspend. */ |
296 | __pm_runtime_idle(dev); | 415 | if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { |
297 | 416 | spin_unlock(&dev->power.lock); | |
298 | if (parent && !parent->power.ignore_children) { | ||
299 | spin_unlock_irq(&dev->power.lock); | ||
300 | 417 | ||
301 | pm_request_idle(parent); | 418 | spin_lock(&parent->power.lock); |
419 | rpm_idle(parent, RPM_ASYNC); | ||
420 | spin_unlock(&parent->power.lock); | ||
302 | 421 | ||
303 | spin_lock_irq(&dev->power.lock); | 422 | spin_lock(&dev->power.lock); |
304 | } | 423 | } |
305 | 424 | ||
306 | out: | 425 | out: |
307 | dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); | 426 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
308 | 427 | ||
309 | return retval; | 428 | return retval; |
310 | } | 429 | } |
311 | 430 | ||
312 | /** | 431 | /** |
313 | * pm_runtime_suspend - Carry out run-time suspend of given device. | 432 | * rpm_resume - Carry out run-time resume of given device. |
314 | * @dev: Device to suspend. | ||
315 | */ | ||
316 | int pm_runtime_suspend(struct device *dev) | ||
317 | { | ||
318 | int retval; | ||
319 | |||
320 | spin_lock_irq(&dev->power.lock); | ||
321 | retval = __pm_runtime_suspend(dev, false); | ||
322 | spin_unlock_irq(&dev->power.lock); | ||
323 | |||
324 | return retval; | ||
325 | } | ||
326 | EXPORT_SYMBOL_GPL(pm_runtime_suspend); | ||
327 | |||
328 | /** | ||
329 | * __pm_runtime_resume - Carry out run-time resume of given device. | ||
330 | * @dev: Device to resume. | 433 | * @dev: Device to resume. |
331 | * @from_wq: If set, the function has been called via pm_wq. | 434 | * @rpmflags: Flag bits. |
332 | * | 435 | * |
333 | * Check if the device can be woken up and run the ->runtime_resume() callback | 436 | * Check if the device's run-time PM status allows it to be resumed. Cancel |
334 | * provided by its bus type. If another resume has been started earlier, wait | 437 | * any scheduled or pending requests. If another resume has been started |
335 | * for it to finish. If there's a suspend running in parallel with this | 438 | * earlier, either return immediately or wait for it to finish, depending on the |
336 | * function, wait for it to finish and resume the device. Cancel any scheduled | 439 | * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in |
337 | * or pending requests. | 440 | * parallel with this function, either tell the other process to resume after |
441 | * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC | ||
442 | * flag is set then queue a resume request; otherwise run the | ||
443 | * ->runtime_resume() callback directly. Queue an idle notification for the | ||
444 | * device if the resume succeeded. | ||
338 | * | 445 | * |
339 | * This function must be called under dev->power.lock with interrupts disabled. | 446 | * This function must be called under dev->power.lock with interrupts disabled. |
340 | */ | 447 | */ |
341 | int __pm_runtime_resume(struct device *dev, bool from_wq) | 448 | static int rpm_resume(struct device *dev, int rpmflags) |
342 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 449 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
343 | { | 450 | { |
451 | int (*callback)(struct device *); | ||
344 | struct device *parent = NULL; | 452 | struct device *parent = NULL; |
345 | int retval = 0; | 453 | int retval = 0; |
346 | 454 | ||
347 | dev_dbg(dev, "__pm_runtime_resume()%s!\n", | 455 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
348 | from_wq ? " from workqueue" : ""); | ||
349 | 456 | ||
350 | repeat: | 457 | repeat: |
351 | if (dev->power.runtime_error) { | 458 | if (dev->power.runtime_error) |
352 | retval = -EINVAL; | 459 | retval = -EINVAL; |
460 | else if (dev->power.disable_depth > 0) | ||
461 | retval = -EAGAIN; | ||
462 | if (retval) | ||
353 | goto out; | 463 | goto out; |
354 | } | ||
355 | 464 | ||
356 | pm_runtime_cancel_pending(dev); | 465 | /* |
466 | * Other scheduled or pending requests need to be canceled. Small | ||
467 | * optimization: If an autosuspend timer is running, leave it running | ||
468 | * rather than cancelling it now only to restart it again in the near | ||
469 | * future. | ||
470 | */ | ||
471 | dev->power.request = RPM_REQ_NONE; | ||
472 | if (!dev->power.timer_autosuspends) | ||
473 | pm_runtime_deactivate_timer(dev); | ||
357 | 474 | ||
358 | if (dev->power.runtime_status == RPM_ACTIVE) | 475 | if (dev->power.runtime_status == RPM_ACTIVE) { |
359 | retval = 1; | 476 | retval = 1; |
360 | else if (dev->power.disable_depth > 0) | ||
361 | retval = -EAGAIN; | ||
362 | if (retval) | ||
363 | goto out; | 477 | goto out; |
478 | } | ||
364 | 479 | ||
365 | if (dev->power.runtime_status == RPM_RESUMING | 480 | if (dev->power.runtime_status == RPM_RESUMING |
366 | || dev->power.runtime_status == RPM_SUSPENDING) { | 481 | || dev->power.runtime_status == RPM_SUSPENDING) { |
367 | DEFINE_WAIT(wait); | 482 | DEFINE_WAIT(wait); |
368 | 483 | ||
369 | if (from_wq) { | 484 | if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { |
370 | if (dev->power.runtime_status == RPM_SUSPENDING) | 485 | if (dev->power.runtime_status == RPM_SUSPENDING) |
371 | dev->power.deferred_resume = true; | 486 | dev->power.deferred_resume = true; |
372 | retval = -EINPROGRESS; | 487 | else |
488 | retval = -EINPROGRESS; | ||
373 | goto out; | 489 | goto out; |
374 | } | 490 | } |
375 | 491 | ||
@@ -391,12 +507,43 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
391 | goto repeat; | 507 | goto repeat; |
392 | } | 508 | } |
393 | 509 | ||
510 | /* | ||
511 | * See if we can skip waking up the parent. This is safe only if | ||
512 | * power.no_callbacks is set, because otherwise we don't know whether | ||
513 | * the resume will actually succeed. | ||
514 | */ | ||
515 | if (dev->power.no_callbacks && !parent && dev->parent) { | ||
516 | spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); | ||
517 | if (dev->parent->power.disable_depth > 0 | ||
518 | || dev->parent->power.ignore_children | ||
519 | || dev->parent->power.runtime_status == RPM_ACTIVE) { | ||
520 | atomic_inc(&dev->parent->power.child_count); | ||
521 | spin_unlock(&dev->parent->power.lock); | ||
522 | goto no_callback; /* Assume success. */ | ||
523 | } | ||
524 | spin_unlock(&dev->parent->power.lock); | ||
525 | } | ||
526 | |||
527 | /* Carry out an asynchronous or a synchronous resume. */ | ||
528 | if (rpmflags & RPM_ASYNC) { | ||
529 | dev->power.request = RPM_REQ_RESUME; | ||
530 | if (!dev->power.request_pending) { | ||
531 | dev->power.request_pending = true; | ||
532 | queue_work(pm_wq, &dev->power.work); | ||
533 | } | ||
534 | retval = 0; | ||
535 | goto out; | ||
536 | } | ||
537 | |||
394 | if (!parent && dev->parent) { | 538 | if (!parent && dev->parent) { |
395 | /* | 539 | /* |
396 | * Increment the parent's resume counter and resume it if | 540 | * Increment the parent's usage counter and resume it if |
397 | * necessary. | 541 | * necessary. Not needed if dev is irq-safe; then the |
542 | * parent is permanently resumed. | ||
398 | */ | 543 | */ |
399 | parent = dev->parent; | 544 | parent = dev->parent; |
545 | if (dev->power.irq_safe) | ||
546 | goto skip_parent; | ||
400 | spin_unlock(&dev->power.lock); | 547 | spin_unlock(&dev->power.lock); |
401 | 548 | ||
402 | pm_runtime_get_noresume(parent); | 549 | pm_runtime_get_noresume(parent); |
@@ -408,7 +555,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
408 | */ | 555 | */ |
409 | if (!parent->power.disable_depth | 556 | if (!parent->power.disable_depth |
410 | && !parent->power.ignore_children) { | 557 | && !parent->power.ignore_children) { |
411 | __pm_runtime_resume(parent, false); | 558 | rpm_resume(parent, 0); |
412 | if (parent->power.runtime_status != RPM_ACTIVE) | 559 | if (parent->power.runtime_status != RPM_ACTIVE) |
413 | retval = -EBUSY; | 560 | retval = -EBUSY; |
414 | } | 561 | } |
@@ -419,40 +566,30 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
419 | goto out; | 566 | goto out; |
420 | goto repeat; | 567 | goto repeat; |
421 | } | 568 | } |
569 | skip_parent: | ||
422 | 570 | ||
423 | __update_runtime_status(dev, RPM_RESUMING); | 571 | if (dev->power.no_callbacks) |
424 | 572 | goto no_callback; /* Assume success. */ | |
425 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { | ||
426 | spin_unlock_irq(&dev->power.lock); | ||
427 | |||
428 | retval = dev->bus->pm->runtime_resume(dev); | ||
429 | |||
430 | spin_lock_irq(&dev->power.lock); | ||
431 | dev->power.runtime_error = retval; | ||
432 | } else if (dev->type && dev->type->pm | ||
433 | && dev->type->pm->runtime_resume) { | ||
434 | spin_unlock_irq(&dev->power.lock); | ||
435 | |||
436 | retval = dev->type->pm->runtime_resume(dev); | ||
437 | |||
438 | spin_lock_irq(&dev->power.lock); | ||
439 | dev->power.runtime_error = retval; | ||
440 | } else if (dev->class && dev->class->pm | ||
441 | && dev->class->pm->runtime_resume) { | ||
442 | spin_unlock_irq(&dev->power.lock); | ||
443 | 573 | ||
444 | retval = dev->class->pm->runtime_resume(dev); | 574 | __update_runtime_status(dev, RPM_RESUMING); |
445 | 575 | ||
446 | spin_lock_irq(&dev->power.lock); | 576 | if (dev->pwr_domain) |
447 | dev->power.runtime_error = retval; | 577 | callback = dev->pwr_domain->ops.runtime_resume; |
448 | } else { | 578 | else if (dev->type && dev->type->pm) |
449 | retval = -ENOSYS; | 579 | callback = dev->type->pm->runtime_resume; |
450 | } | 580 | else if (dev->class && dev->class->pm) |
581 | callback = dev->class->pm->runtime_resume; | ||
582 | else if (dev->bus && dev->bus->pm) | ||
583 | callback = dev->bus->pm->runtime_resume; | ||
584 | else | ||
585 | callback = NULL; | ||
451 | 586 | ||
587 | retval = rpm_callback(callback, dev); | ||
452 | if (retval) { | 588 | if (retval) { |
453 | __update_runtime_status(dev, RPM_SUSPENDED); | 589 | __update_runtime_status(dev, RPM_SUSPENDED); |
454 | pm_runtime_cancel_pending(dev); | 590 | pm_runtime_cancel_pending(dev); |
455 | } else { | 591 | } else { |
592 | no_callback: | ||
456 | __update_runtime_status(dev, RPM_ACTIVE); | 593 | __update_runtime_status(dev, RPM_ACTIVE); |
457 | if (parent) | 594 | if (parent) |
458 | atomic_inc(&parent->power.child_count); | 595 | atomic_inc(&parent->power.child_count); |
@@ -460,10 +597,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
460 | wake_up_all(&dev->power.wait_queue); | 597 | wake_up_all(&dev->power.wait_queue); |
461 | 598 | ||
462 | if (!retval) | 599 | if (!retval) |
463 | __pm_request_idle(dev); | 600 | rpm_idle(dev, RPM_ASYNC); |
464 | 601 | ||
465 | out: | 602 | out: |
466 | if (parent) { | 603 | if (parent && !dev->power.irq_safe) { |
467 | spin_unlock_irq(&dev->power.lock); | 604 | spin_unlock_irq(&dev->power.lock); |
468 | 605 | ||
469 | pm_runtime_put(parent); | 606 | pm_runtime_put(parent); |
@@ -471,28 +608,12 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
471 | spin_lock_irq(&dev->power.lock); | 608 | spin_lock_irq(&dev->power.lock); |
472 | } | 609 | } |
473 | 610 | ||
474 | dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); | 611 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
475 | 612 | ||
476 | return retval; | 613 | return retval; |
477 | } | 614 | } |
478 | 615 | ||
479 | /** | 616 | /** |
480 | * pm_runtime_resume - Carry out run-time resume of given device. | ||
481 | * @dev: Device to suspend. | ||
482 | */ | ||
483 | int pm_runtime_resume(struct device *dev) | ||
484 | { | ||
485 | int retval; | ||
486 | |||
487 | spin_lock_irq(&dev->power.lock); | ||
488 | retval = __pm_runtime_resume(dev, false); | ||
489 | spin_unlock_irq(&dev->power.lock); | ||
490 | |||
491 | return retval; | ||
492 | } | ||
493 | EXPORT_SYMBOL_GPL(pm_runtime_resume); | ||
494 | |||
495 | /** | ||
496 | * pm_runtime_work - Universal run-time PM work function. | 617 | * pm_runtime_work - Universal run-time PM work function. |
497 | * @work: Work structure used for scheduling the execution of this function. | 618 | * @work: Work structure used for scheduling the execution of this function. |
498 | * | 619 | * |
@@ -517,13 +638,16 @@ static void pm_runtime_work(struct work_struct *work) | |||
517 | case RPM_REQ_NONE: | 638 | case RPM_REQ_NONE: |
518 | break; | 639 | break; |
519 | case RPM_REQ_IDLE: | 640 | case RPM_REQ_IDLE: |
520 | __pm_runtime_idle(dev); | 641 | rpm_idle(dev, RPM_NOWAIT); |
521 | break; | 642 | break; |
522 | case RPM_REQ_SUSPEND: | 643 | case RPM_REQ_SUSPEND: |
523 | __pm_runtime_suspend(dev, true); | 644 | rpm_suspend(dev, RPM_NOWAIT); |
645 | break; | ||
646 | case RPM_REQ_AUTOSUSPEND: | ||
647 | rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); | ||
524 | break; | 648 | break; |
525 | case RPM_REQ_RESUME: | 649 | case RPM_REQ_RESUME: |
526 | __pm_runtime_resume(dev, true); | 650 | rpm_resume(dev, RPM_NOWAIT); |
527 | break; | 651 | break; |
528 | } | 652 | } |
529 | 653 | ||
@@ -532,117 +656,10 @@ static void pm_runtime_work(struct work_struct *work) | |||
532 | } | 656 | } |
533 | 657 | ||
534 | /** | 658 | /** |
535 | * __pm_request_idle - Submit an idle notification request for given device. | ||
536 | * @dev: Device to handle. | ||
537 | * | ||
538 | * Check if the device's run-time PM status is correct for suspending the device | ||
539 | * and queue up a request to run __pm_runtime_idle() for it. | ||
540 | * | ||
541 | * This function must be called under dev->power.lock with interrupts disabled. | ||
542 | */ | ||
543 | static int __pm_request_idle(struct device *dev) | ||
544 | { | ||
545 | int retval = 0; | ||
546 | |||
547 | if (dev->power.runtime_error) | ||
548 | retval = -EINVAL; | ||
549 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
550 | || dev->power.disable_depth > 0 | ||
551 | || dev->power.runtime_status == RPM_SUSPENDED | ||
552 | || dev->power.runtime_status == RPM_SUSPENDING) | ||
553 | retval = -EAGAIN; | ||
554 | else if (!pm_children_suspended(dev)) | ||
555 | retval = -EBUSY; | ||
556 | if (retval) | ||
557 | return retval; | ||
558 | |||
559 | if (dev->power.request_pending) { | ||
560 | /* Any requests other then RPM_REQ_IDLE take precedence. */ | ||
561 | if (dev->power.request == RPM_REQ_NONE) | ||
562 | dev->power.request = RPM_REQ_IDLE; | ||
563 | else if (dev->power.request != RPM_REQ_IDLE) | ||
564 | retval = -EAGAIN; | ||
565 | return retval; | ||
566 | } | ||
567 | |||
568 | dev->power.request = RPM_REQ_IDLE; | ||
569 | dev->power.request_pending = true; | ||
570 | queue_work(pm_wq, &dev->power.work); | ||
571 | |||
572 | return retval; | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * pm_request_idle - Submit an idle notification request for given device. | ||
577 | * @dev: Device to handle. | ||
578 | */ | ||
579 | int pm_request_idle(struct device *dev) | ||
580 | { | ||
581 | unsigned long flags; | ||
582 | int retval; | ||
583 | |||
584 | spin_lock_irqsave(&dev->power.lock, flags); | ||
585 | retval = __pm_request_idle(dev); | ||
586 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
587 | |||
588 | return retval; | ||
589 | } | ||
590 | EXPORT_SYMBOL_GPL(pm_request_idle); | ||
591 | |||
592 | /** | ||
593 | * __pm_request_suspend - Submit a suspend request for given device. | ||
594 | * @dev: Device to suspend. | ||
595 | * | ||
596 | * This function must be called under dev->power.lock with interrupts disabled. | ||
597 | */ | ||
598 | static int __pm_request_suspend(struct device *dev) | ||
599 | { | ||
600 | int retval = 0; | ||
601 | |||
602 | if (dev->power.runtime_error) | ||
603 | return -EINVAL; | ||
604 | |||
605 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
606 | retval = 1; | ||
607 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
608 | || dev->power.disable_depth > 0) | ||
609 | retval = -EAGAIN; | ||
610 | else if (dev->power.runtime_status == RPM_SUSPENDING) | ||
611 | retval = -EINPROGRESS; | ||
612 | else if (!pm_children_suspended(dev)) | ||
613 | retval = -EBUSY; | ||
614 | if (retval < 0) | ||
615 | return retval; | ||
616 | |||
617 | pm_runtime_deactivate_timer(dev); | ||
618 | |||
619 | if (dev->power.request_pending) { | ||
620 | /* | ||
621 | * Pending resume requests take precedence over us, but we can | ||
622 | * overtake any other pending request. | ||
623 | */ | ||
624 | if (dev->power.request == RPM_REQ_RESUME) | ||
625 | retval = -EAGAIN; | ||
626 | else if (dev->power.request != RPM_REQ_SUSPEND) | ||
627 | dev->power.request = retval ? | ||
628 | RPM_REQ_NONE : RPM_REQ_SUSPEND; | ||
629 | return retval; | ||
630 | } else if (retval) { | ||
631 | return retval; | ||
632 | } | ||
633 | |||
634 | dev->power.request = RPM_REQ_SUSPEND; | ||
635 | dev->power.request_pending = true; | ||
636 | queue_work(pm_wq, &dev->power.work); | ||
637 | |||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | /** | ||
642 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). | 659 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). |
643 | * @data: Device pointer passed by pm_schedule_suspend(). | 660 | * @data: Device pointer passed by pm_schedule_suspend(). |
644 | * | 661 | * |
645 | * Check if the time is right and execute __pm_request_suspend() in that case. | 662 | * Check if the time is right and queue a suspend request. |
646 | */ | 663 | */ |
647 | static void pm_suspend_timer_fn(unsigned long data) | 664 | static void pm_suspend_timer_fn(unsigned long data) |
648 | { | 665 | { |
@@ -656,7 +673,8 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
656 | /* If 'expire' is after 'jiffies' we've been called too early. */ | 673 | /* If 'expire' is after 'jiffies' we've been called too early. */ |
657 | if (expires > 0 && !time_after(expires, jiffies)) { | 674 | if (expires > 0 && !time_after(expires, jiffies)) { |
658 | dev->power.timer_expires = 0; | 675 | dev->power.timer_expires = 0; |
659 | __pm_request_suspend(dev); | 676 | rpm_suspend(dev, dev->power.timer_autosuspends ? |
677 | (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); | ||
660 | } | 678 | } |
661 | 679 | ||
662 | spin_unlock_irqrestore(&dev->power.lock, flags); | 680 | spin_unlock_irqrestore(&dev->power.lock, flags); |
@@ -670,47 +688,25 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
670 | int pm_schedule_suspend(struct device *dev, unsigned int delay) | 688 | int pm_schedule_suspend(struct device *dev, unsigned int delay) |
671 | { | 689 | { |
672 | unsigned long flags; | 690 | unsigned long flags; |
673 | int retval = 0; | 691 | int retval; |
674 | 692 | ||
675 | spin_lock_irqsave(&dev->power.lock, flags); | 693 | spin_lock_irqsave(&dev->power.lock, flags); |
676 | 694 | ||
677 | if (dev->power.runtime_error) { | ||
678 | retval = -EINVAL; | ||
679 | goto out; | ||
680 | } | ||
681 | |||
682 | if (!delay) { | 695 | if (!delay) { |
683 | retval = __pm_request_suspend(dev); | 696 | retval = rpm_suspend(dev, RPM_ASYNC); |
684 | goto out; | 697 | goto out; |
685 | } | 698 | } |
686 | 699 | ||
687 | pm_runtime_deactivate_timer(dev); | 700 | retval = rpm_check_suspend_allowed(dev); |
688 | |||
689 | if (dev->power.request_pending) { | ||
690 | /* | ||
691 | * Pending resume requests take precedence over us, but any | ||
692 | * other pending requests have to be canceled. | ||
693 | */ | ||
694 | if (dev->power.request == RPM_REQ_RESUME) { | ||
695 | retval = -EAGAIN; | ||
696 | goto out; | ||
697 | } | ||
698 | dev->power.request = RPM_REQ_NONE; | ||
699 | } | ||
700 | |||
701 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
702 | retval = 1; | ||
703 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
704 | || dev->power.disable_depth > 0) | ||
705 | retval = -EAGAIN; | ||
706 | else if (!pm_children_suspended(dev)) | ||
707 | retval = -EBUSY; | ||
708 | if (retval) | 701 | if (retval) |
709 | goto out; | 702 | goto out; |
710 | 703 | ||
704 | /* Other scheduled or pending requests need to be canceled. */ | ||
705 | pm_runtime_cancel_pending(dev); | ||
706 | |||
711 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); | 707 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
712 | if (!dev->power.timer_expires) | 708 | dev->power.timer_expires += !dev->power.timer_expires; |
713 | dev->power.timer_expires = 1; | 709 | dev->power.timer_autosuspends = 0; |
714 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); | 710 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
715 | 711 | ||
716 | out: | 712 | out: |
@@ -721,103 +717,88 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
721 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); | 717 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); |
722 | 718 | ||
723 | /** | 719 | /** |
724 | * pm_request_resume - Submit a resume request for given device. | 720 | * __pm_runtime_idle - Entry point for run-time idle operations. |
725 | * @dev: Device to resume. | 721 | * @dev: Device to send idle notification for. |
722 | * @rpmflags: Flag bits. | ||
726 | * | 723 | * |
727 | * This function must be called under dev->power.lock with interrupts disabled. | 724 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and |
725 | * return immediately if it is larger than zero. Then carry out an idle | ||
726 | * notification, either synchronous or asynchronous. | ||
727 | * | ||
728 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
728 | */ | 729 | */ |
729 | static int __pm_request_resume(struct device *dev) | 730 | int __pm_runtime_idle(struct device *dev, int rpmflags) |
730 | { | 731 | { |
731 | int retval = 0; | 732 | unsigned long flags; |
732 | 733 | int retval; | |
733 | if (dev->power.runtime_error) | ||
734 | return -EINVAL; | ||
735 | |||
736 | if (dev->power.runtime_status == RPM_ACTIVE) | ||
737 | retval = 1; | ||
738 | else if (dev->power.runtime_status == RPM_RESUMING) | ||
739 | retval = -EINPROGRESS; | ||
740 | else if (dev->power.disable_depth > 0) | ||
741 | retval = -EAGAIN; | ||
742 | if (retval < 0) | ||
743 | return retval; | ||
744 | |||
745 | pm_runtime_deactivate_timer(dev); | ||
746 | 734 | ||
747 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 735 | if (rpmflags & RPM_GET_PUT) { |
748 | dev->power.deferred_resume = true; | 736 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
749 | return retval; | 737 | return 0; |
750 | } | ||
751 | if (dev->power.request_pending) { | ||
752 | /* If non-resume request is pending, we can overtake it. */ | ||
753 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; | ||
754 | return retval; | ||
755 | } | 738 | } |
756 | if (retval) | ||
757 | return retval; | ||
758 | 739 | ||
759 | dev->power.request = RPM_REQ_RESUME; | 740 | spin_lock_irqsave(&dev->power.lock, flags); |
760 | dev->power.request_pending = true; | 741 | retval = rpm_idle(dev, rpmflags); |
761 | queue_work(pm_wq, &dev->power.work); | 742 | spin_unlock_irqrestore(&dev->power.lock, flags); |
762 | 743 | ||
763 | return retval; | 744 | return retval; |
764 | } | 745 | } |
746 | EXPORT_SYMBOL_GPL(__pm_runtime_idle); | ||
765 | 747 | ||
766 | /** | 748 | /** |
767 | * pm_request_resume - Submit a resume request for given device. | 749 | * __pm_runtime_suspend - Entry point for run-time put/suspend operations. |
768 | * @dev: Device to resume. | 750 | * @dev: Device to suspend. |
751 | * @rpmflags: Flag bits. | ||
752 | * | ||
753 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and | ||
754 | * return immediately if it is larger than zero. Then carry out a suspend, | ||
755 | * either synchronous or asynchronous. | ||
756 | * | ||
757 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
769 | */ | 758 | */ |
770 | int pm_request_resume(struct device *dev) | 759 | int __pm_runtime_suspend(struct device *dev, int rpmflags) |
771 | { | 760 | { |
772 | unsigned long flags; | 761 | unsigned long flags; |
773 | int retval; | 762 | int retval; |
774 | 763 | ||
764 | if (rpmflags & RPM_GET_PUT) { | ||
765 | if (!atomic_dec_and_test(&dev->power.usage_count)) | ||
766 | return 0; | ||
767 | } | ||
768 | |||
775 | spin_lock_irqsave(&dev->power.lock, flags); | 769 | spin_lock_irqsave(&dev->power.lock, flags); |
776 | retval = __pm_request_resume(dev); | 770 | retval = rpm_suspend(dev, rpmflags); |
777 | spin_unlock_irqrestore(&dev->power.lock, flags); | 771 | spin_unlock_irqrestore(&dev->power.lock, flags); |
778 | 772 | ||
779 | return retval; | 773 | return retval; |
780 | } | 774 | } |
781 | EXPORT_SYMBOL_GPL(pm_request_resume); | 775 | EXPORT_SYMBOL_GPL(__pm_runtime_suspend); |
782 | 776 | ||
783 | /** | 777 | /** |
784 | * __pm_runtime_get - Reference count a device and wake it up, if necessary. | 778 | * __pm_runtime_resume - Entry point for run-time resume operations. |
785 | * @dev: Device to handle. | 779 | * @dev: Device to resume. |
786 | * @sync: If set and the device is suspended, resume it synchronously. | 780 | * @rpmflags: Flag bits. |
781 | * | ||
782 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then | ||
783 | * carry out a resume, either synchronous or asynchronous. | ||
787 | * | 784 | * |
788 | * Increment the usage count of the device and resume it or submit a resume | 785 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. |
789 | * request for it, depending on the value of @sync. | ||
790 | */ | 786 | */ |
791 | int __pm_runtime_get(struct device *dev, bool sync) | 787 | int __pm_runtime_resume(struct device *dev, int rpmflags) |
792 | { | 788 | { |
789 | unsigned long flags; | ||
793 | int retval; | 790 | int retval; |
794 | 791 | ||
795 | atomic_inc(&dev->power.usage_count); | 792 | if (rpmflags & RPM_GET_PUT) |
796 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); | 793 | atomic_inc(&dev->power.usage_count); |
797 | |||
798 | return retval; | ||
799 | } | ||
800 | EXPORT_SYMBOL_GPL(__pm_runtime_get); | ||
801 | |||
802 | /** | ||
803 | * __pm_runtime_put - Decrement the device's usage counter and notify its bus. | ||
804 | * @dev: Device to handle. | ||
805 | * @sync: If the device's bus type is to be notified, do that synchronously. | ||
806 | * | ||
807 | * Decrement the usage count of the device and if it reaches zero, carry out a | ||
808 | * synchronous idle notification or submit an idle notification request for it, | ||
809 | * depending on the value of @sync. | ||
810 | */ | ||
811 | int __pm_runtime_put(struct device *dev, bool sync) | ||
812 | { | ||
813 | int retval = 0; | ||
814 | 794 | ||
815 | if (atomic_dec_and_test(&dev->power.usage_count)) | 795 | spin_lock_irqsave(&dev->power.lock, flags); |
816 | retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); | 796 | retval = rpm_resume(dev, rpmflags); |
797 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
817 | 798 | ||
818 | return retval; | 799 | return retval; |
819 | } | 800 | } |
820 | EXPORT_SYMBOL_GPL(__pm_runtime_put); | 801 | EXPORT_SYMBOL_GPL(__pm_runtime_resume); |
821 | 802 | ||
822 | /** | 803 | /** |
823 | * __pm_runtime_set_status - Set run-time PM status of a device. | 804 | * __pm_runtime_set_status - Set run-time PM status of a device. |
@@ -968,7 +949,7 @@ int pm_runtime_barrier(struct device *dev) | |||
968 | 949 | ||
969 | if (dev->power.request_pending | 950 | if (dev->power.request_pending |
970 | && dev->power.request == RPM_REQ_RESUME) { | 951 | && dev->power.request == RPM_REQ_RESUME) { |
971 | __pm_runtime_resume(dev, false); | 952 | rpm_resume(dev, 0); |
972 | retval = 1; | 953 | retval = 1; |
973 | } | 954 | } |
974 | 955 | ||
@@ -1017,7 +998,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) | |||
1017 | */ | 998 | */ |
1018 | pm_runtime_get_noresume(dev); | 999 | pm_runtime_get_noresume(dev); |
1019 | 1000 | ||
1020 | __pm_runtime_resume(dev, false); | 1001 | rpm_resume(dev, 0); |
1021 | 1002 | ||
1022 | pm_runtime_put_noidle(dev); | 1003 | pm_runtime_put_noidle(dev); |
1023 | } | 1004 | } |
@@ -1065,7 +1046,7 @@ void pm_runtime_forbid(struct device *dev) | |||
1065 | 1046 | ||
1066 | dev->power.runtime_auto = false; | 1047 | dev->power.runtime_auto = false; |
1067 | atomic_inc(&dev->power.usage_count); | 1048 | atomic_inc(&dev->power.usage_count); |
1068 | __pm_runtime_resume(dev, false); | 1049 | rpm_resume(dev, 0); |
1069 | 1050 | ||
1070 | out: | 1051 | out: |
1071 | spin_unlock_irq(&dev->power.lock); | 1052 | spin_unlock_irq(&dev->power.lock); |
@@ -1086,7 +1067,7 @@ void pm_runtime_allow(struct device *dev) | |||
1086 | 1067 | ||
1087 | dev->power.runtime_auto = true; | 1068 | dev->power.runtime_auto = true; |
1088 | if (atomic_dec_and_test(&dev->power.usage_count)) | 1069 | if (atomic_dec_and_test(&dev->power.usage_count)) |
1089 | __pm_runtime_idle(dev); | 1070 | rpm_idle(dev, RPM_AUTO); |
1090 | 1071 | ||
1091 | out: | 1072 | out: |
1092 | spin_unlock_irq(&dev->power.lock); | 1073 | spin_unlock_irq(&dev->power.lock); |
@@ -1094,13 +1075,130 @@ void pm_runtime_allow(struct device *dev) | |||
1094 | EXPORT_SYMBOL_GPL(pm_runtime_allow); | 1075 | EXPORT_SYMBOL_GPL(pm_runtime_allow); |
1095 | 1076 | ||
1096 | /** | 1077 | /** |
1078 | * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device. | ||
1079 | * @dev: Device to handle. | ||
1080 | * | ||
1081 | * Set the power.no_callbacks flag, which tells the PM core that this | ||
1082 | * device is power-managed through its parent and has no run-time PM | ||
1083 | * callbacks of its own. The run-time sysfs attributes will be removed. | ||
1084 | */ | ||
1085 | void pm_runtime_no_callbacks(struct device *dev) | ||
1086 | { | ||
1087 | spin_lock_irq(&dev->power.lock); | ||
1088 | dev->power.no_callbacks = 1; | ||
1089 | spin_unlock_irq(&dev->power.lock); | ||
1090 | if (device_is_registered(dev)) | ||
1091 | rpm_sysfs_remove(dev); | ||
1092 | } | ||
1093 | EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); | ||
1094 | |||
1095 | /** | ||
1096 | * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. | ||
1097 | * @dev: Device to handle | ||
1098 | * | ||
1099 | * Set the power.irq_safe flag, which tells the PM core that the | ||
1100 | * ->runtime_suspend() and ->runtime_resume() callbacks for this device should | ||
1101 | * always be invoked with the spinlock held and interrupts disabled. It also | ||
1102 | * causes the parent's usage counter to be permanently incremented, preventing | ||
1103 | * the parent from runtime suspending -- otherwise an irq-safe child might have | ||
1104 | * to wait for a non-irq-safe parent. | ||
1105 | */ | ||
1106 | void pm_runtime_irq_safe(struct device *dev) | ||
1107 | { | ||
1108 | if (dev->parent) | ||
1109 | pm_runtime_get_sync(dev->parent); | ||
1110 | spin_lock_irq(&dev->power.lock); | ||
1111 | dev->power.irq_safe = 1; | ||
1112 | spin_unlock_irq(&dev->power.lock); | ||
1113 | } | ||
1114 | EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); | ||
1115 | |||
1116 | /** | ||
1117 | * update_autosuspend - Handle a change to a device's autosuspend settings. | ||
1118 | * @dev: Device to handle. | ||
1119 | * @old_delay: The former autosuspend_delay value. | ||
1120 | * @old_use: The former use_autosuspend value. | ||
1121 | * | ||
1122 | * Prevent runtime suspend if the new delay is negative and use_autosuspend is | ||
1123 | * set; otherwise allow it. Send an idle notification if suspends are allowed. | ||
1124 | * | ||
1125 | * This function must be called under dev->power.lock with interrupts disabled. | ||
1126 | */ | ||
1127 | static void update_autosuspend(struct device *dev, int old_delay, int old_use) | ||
1128 | { | ||
1129 | int delay = dev->power.autosuspend_delay; | ||
1130 | |||
1131 | /* Should runtime suspend be prevented now? */ | ||
1132 | if (dev->power.use_autosuspend && delay < 0) { | ||
1133 | |||
1134 | /* If it used to be allowed then prevent it. */ | ||
1135 | if (!old_use || old_delay >= 0) { | ||
1136 | atomic_inc(&dev->power.usage_count); | ||
1137 | rpm_resume(dev, 0); | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | /* Runtime suspend should be allowed now. */ | ||
1142 | else { | ||
1143 | |||
1144 | /* If it used to be prevented then allow it. */ | ||
1145 | if (old_use && old_delay < 0) | ||
1146 | atomic_dec(&dev->power.usage_count); | ||
1147 | |||
1148 | /* Maybe we can autosuspend now. */ | ||
1149 | rpm_idle(dev, RPM_AUTO); | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | /** | ||
1154 | * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. | ||
1155 | * @dev: Device to handle. | ||
1156 | * @delay: Value of the new delay in milliseconds. | ||
1157 | * | ||
1158 | * Set the device's power.autosuspend_delay value. If it changes to negative | ||
1159 | * and the power.use_autosuspend flag is set, prevent run-time suspends. If it | ||
1160 | * changes the other way, allow run-time suspends. | ||
1161 | */ | ||
1162 | void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) | ||
1163 | { | ||
1164 | int old_delay, old_use; | ||
1165 | |||
1166 | spin_lock_irq(&dev->power.lock); | ||
1167 | old_delay = dev->power.autosuspend_delay; | ||
1168 | old_use = dev->power.use_autosuspend; | ||
1169 | dev->power.autosuspend_delay = delay; | ||
1170 | update_autosuspend(dev, old_delay, old_use); | ||
1171 | spin_unlock_irq(&dev->power.lock); | ||
1172 | } | ||
1173 | EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); | ||
1174 | |||
1175 | /** | ||
1176 | * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. | ||
1177 | * @dev: Device to handle. | ||
1178 | * @use: New value for use_autosuspend. | ||
1179 | * | ||
1180 | * Set the device's power.use_autosuspend flag, and allow or prevent run-time | ||
1181 | * suspends as needed. | ||
1182 | */ | ||
1183 | void __pm_runtime_use_autosuspend(struct device *dev, bool use) | ||
1184 | { | ||
1185 | int old_delay, old_use; | ||
1186 | |||
1187 | spin_lock_irq(&dev->power.lock); | ||
1188 | old_delay = dev->power.autosuspend_delay; | ||
1189 | old_use = dev->power.use_autosuspend; | ||
1190 | dev->power.use_autosuspend = use; | ||
1191 | update_autosuspend(dev, old_delay, old_use); | ||
1192 | spin_unlock_irq(&dev->power.lock); | ||
1193 | } | ||
1194 | EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); | ||
1195 | |||
1196 | /** | ||
1097 | * pm_runtime_init - Initialize run-time PM fields in given device object. | 1197 | * pm_runtime_init - Initialize run-time PM fields in given device object. |
1098 | * @dev: Device object to initialize. | 1198 | * @dev: Device object to initialize. |
1099 | */ | 1199 | */ |
1100 | void pm_runtime_init(struct device *dev) | 1200 | void pm_runtime_init(struct device *dev) |
1101 | { | 1201 | { |
1102 | spin_lock_init(&dev->power.lock); | ||
1103 | |||
1104 | dev->power.runtime_status = RPM_SUSPENDED; | 1202 | dev->power.runtime_status = RPM_SUSPENDED; |
1105 | dev->power.idle_notification = false; | 1203 | dev->power.idle_notification = false; |
1106 | 1204 | ||
@@ -1137,4 +1235,6 @@ void pm_runtime_remove(struct device *dev) | |||
1137 | /* Change the status back to 'suspended' to match the initial status. */ | 1235 | /* Change the status back to 'suspended' to match the initial status. */ |
1138 | if (dev->power.runtime_status == RPM_ACTIVE) | 1236 | if (dev->power.runtime_status == RPM_ACTIVE) |
1139 | pm_runtime_set_suspended(dev); | 1237 | pm_runtime_set_suspended(dev); |
1238 | if (dev->power.irq_safe && dev->parent) | ||
1239 | pm_runtime_put_sync(dev->parent); | ||
1140 | } | 1240 | } |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index e56b4388fe61..a9f5b8979611 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -75,12 +75,27 @@ | |||
75 | * attribute is set to "enabled" by bus type code or device drivers and in | 75 | * attribute is set to "enabled" by bus type code or device drivers and in |
76 | * that cases it should be safe to leave the default value. | 76 | * that cases it should be safe to leave the default value. |
77 | * | 77 | * |
78 | * autosuspend_delay_ms - Report/change a device's autosuspend_delay value | ||
79 | * | ||
80 | * Some drivers don't want to carry out a runtime suspend as soon as a | ||
81 | * device becomes idle; they want it always to remain idle for some period | ||
82 | * of time before suspending it. This period is the autosuspend_delay | ||
83 | * value (expressed in milliseconds) and it can be controlled by the user. | ||
84 | * If the value is negative then the device will never be runtime | ||
85 | * suspended. | ||
86 | * | ||
87 | * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay | ||
88 | * value are used only if the driver calls pm_runtime_use_autosuspend(). | ||
89 | * | ||
78 | * wakeup_count - Report the number of wakeup events related to the device | 90 | * wakeup_count - Report the number of wakeup events related to the device |
79 | */ | 91 | */ |
80 | 92 | ||
81 | static const char enabled[] = "enabled"; | 93 | static const char enabled[] = "enabled"; |
82 | static const char disabled[] = "disabled"; | 94 | static const char disabled[] = "disabled"; |
83 | 95 | ||
96 | const char power_group_name[] = "power"; | ||
97 | EXPORT_SYMBOL_GPL(power_group_name); | ||
98 | |||
84 | #ifdef CONFIG_PM_RUNTIME | 99 | #ifdef CONFIG_PM_RUNTIME |
85 | static const char ctrl_auto[] = "auto"; | 100 | static const char ctrl_auto[] = "auto"; |
86 | static const char ctrl_on[] = "on"; | 101 | static const char ctrl_on[] = "on"; |
@@ -170,8 +185,36 @@ static ssize_t rtpm_status_show(struct device *dev, | |||
170 | } | 185 | } |
171 | 186 | ||
172 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); | 187 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); |
173 | #endif | ||
174 | 188 | ||
189 | static ssize_t autosuspend_delay_ms_show(struct device *dev, | ||
190 | struct device_attribute *attr, char *buf) | ||
191 | { | ||
192 | if (!dev->power.use_autosuspend) | ||
193 | return -EIO; | ||
194 | return sprintf(buf, "%d\n", dev->power.autosuspend_delay); | ||
195 | } | ||
196 | |||
197 | static ssize_t autosuspend_delay_ms_store(struct device *dev, | ||
198 | struct device_attribute *attr, const char *buf, size_t n) | ||
199 | { | ||
200 | long delay; | ||
201 | |||
202 | if (!dev->power.use_autosuspend) | ||
203 | return -EIO; | ||
204 | |||
205 | if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay) | ||
206 | return -EINVAL; | ||
207 | |||
208 | pm_runtime_set_autosuspend_delay(dev, delay); | ||
209 | return n; | ||
210 | } | ||
211 | |||
212 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | ||
213 | autosuspend_delay_ms_store); | ||
214 | |||
215 | #endif /* CONFIG_PM_RUNTIME */ | ||
216 | |||
217 | #ifdef CONFIG_PM_SLEEP | ||
175 | static ssize_t | 218 | static ssize_t |
176 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 219 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) |
177 | { | 220 | { |
@@ -206,15 +249,125 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
206 | 249 | ||
207 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 250 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
208 | 251 | ||
209 | #ifdef CONFIG_PM_SLEEP | ||
210 | static ssize_t wakeup_count_show(struct device *dev, | 252 | static ssize_t wakeup_count_show(struct device *dev, |
211 | struct device_attribute *attr, char *buf) | 253 | struct device_attribute *attr, char *buf) |
212 | { | 254 | { |
213 | return sprintf(buf, "%lu\n", dev->power.wakeup_count); | 255 | unsigned long count = 0; |
256 | bool enabled = false; | ||
257 | |||
258 | spin_lock_irq(&dev->power.lock); | ||
259 | if (dev->power.wakeup) { | ||
260 | count = dev->power.wakeup->event_count; | ||
261 | enabled = true; | ||
262 | } | ||
263 | spin_unlock_irq(&dev->power.lock); | ||
264 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | ||
214 | } | 265 | } |
215 | 266 | ||
216 | static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); | 267 | static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); |
217 | #endif | 268 | |
269 | static ssize_t wakeup_active_count_show(struct device *dev, | ||
270 | struct device_attribute *attr, char *buf) | ||
271 | { | ||
272 | unsigned long count = 0; | ||
273 | bool enabled = false; | ||
274 | |||
275 | spin_lock_irq(&dev->power.lock); | ||
276 | if (dev->power.wakeup) { | ||
277 | count = dev->power.wakeup->active_count; | ||
278 | enabled = true; | ||
279 | } | ||
280 | spin_unlock_irq(&dev->power.lock); | ||
281 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | ||
282 | } | ||
283 | |||
284 | static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); | ||
285 | |||
286 | static ssize_t wakeup_hit_count_show(struct device *dev, | ||
287 | struct device_attribute *attr, char *buf) | ||
288 | { | ||
289 | unsigned long count = 0; | ||
290 | bool enabled = false; | ||
291 | |||
292 | spin_lock_irq(&dev->power.lock); | ||
293 | if (dev->power.wakeup) { | ||
294 | count = dev->power.wakeup->hit_count; | ||
295 | enabled = true; | ||
296 | } | ||
297 | spin_unlock_irq(&dev->power.lock); | ||
298 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | ||
299 | } | ||
300 | |||
301 | static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL); | ||
302 | |||
303 | static ssize_t wakeup_active_show(struct device *dev, | ||
304 | struct device_attribute *attr, char *buf) | ||
305 | { | ||
306 | unsigned int active = 0; | ||
307 | bool enabled = false; | ||
308 | |||
309 | spin_lock_irq(&dev->power.lock); | ||
310 | if (dev->power.wakeup) { | ||
311 | active = dev->power.wakeup->active; | ||
312 | enabled = true; | ||
313 | } | ||
314 | spin_unlock_irq(&dev->power.lock); | ||
315 | return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); | ||
316 | } | ||
317 | |||
318 | static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL); | ||
319 | |||
320 | static ssize_t wakeup_total_time_show(struct device *dev, | ||
321 | struct device_attribute *attr, char *buf) | ||
322 | { | ||
323 | s64 msec = 0; | ||
324 | bool enabled = false; | ||
325 | |||
326 | spin_lock_irq(&dev->power.lock); | ||
327 | if (dev->power.wakeup) { | ||
328 | msec = ktime_to_ms(dev->power.wakeup->total_time); | ||
329 | enabled = true; | ||
330 | } | ||
331 | spin_unlock_irq(&dev->power.lock); | ||
332 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | ||
333 | } | ||
334 | |||
335 | static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL); | ||
336 | |||
337 | static ssize_t wakeup_max_time_show(struct device *dev, | ||
338 | struct device_attribute *attr, char *buf) | ||
339 | { | ||
340 | s64 msec = 0; | ||
341 | bool enabled = false; | ||
342 | |||
343 | spin_lock_irq(&dev->power.lock); | ||
344 | if (dev->power.wakeup) { | ||
345 | msec = ktime_to_ms(dev->power.wakeup->max_time); | ||
346 | enabled = true; | ||
347 | } | ||
348 | spin_unlock_irq(&dev->power.lock); | ||
349 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | ||
350 | } | ||
351 | |||
352 | static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL); | ||
353 | |||
354 | static ssize_t wakeup_last_time_show(struct device *dev, | ||
355 | struct device_attribute *attr, char *buf) | ||
356 | { | ||
357 | s64 msec = 0; | ||
358 | bool enabled = false; | ||
359 | |||
360 | spin_lock_irq(&dev->power.lock); | ||
361 | if (dev->power.wakeup) { | ||
362 | msec = ktime_to_ms(dev->power.wakeup->last_time); | ||
363 | enabled = true; | ||
364 | } | ||
365 | spin_unlock_irq(&dev->power.lock); | ||
366 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | ||
367 | } | ||
368 | |||
369 | static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); | ||
370 | #endif /* CONFIG_PM_SLEEP */ | ||
218 | 371 | ||
219 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 372 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
220 | #ifdef CONFIG_PM_RUNTIME | 373 | #ifdef CONFIG_PM_RUNTIME |
@@ -278,38 +431,108 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr, | |||
278 | static DEVICE_ATTR(async, 0644, async_show, async_store); | 431 | static DEVICE_ATTR(async, 0644, async_show, async_store); |
279 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ | 432 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ |
280 | 433 | ||
281 | static struct attribute * power_attrs[] = { | 434 | static struct attribute *power_attrs[] = { |
282 | #ifdef CONFIG_PM_RUNTIME | ||
283 | &dev_attr_control.attr, | ||
284 | &dev_attr_runtime_status.attr, | ||
285 | &dev_attr_runtime_suspended_time.attr, | ||
286 | &dev_attr_runtime_active_time.attr, | ||
287 | #endif | ||
288 | &dev_attr_wakeup.attr, | ||
289 | #ifdef CONFIG_PM_SLEEP | ||
290 | &dev_attr_wakeup_count.attr, | ||
291 | #endif | ||
292 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 435 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
436 | #ifdef CONFIG_PM_SLEEP | ||
293 | &dev_attr_async.attr, | 437 | &dev_attr_async.attr, |
438 | #endif | ||
294 | #ifdef CONFIG_PM_RUNTIME | 439 | #ifdef CONFIG_PM_RUNTIME |
440 | &dev_attr_runtime_status.attr, | ||
295 | &dev_attr_runtime_usage.attr, | 441 | &dev_attr_runtime_usage.attr, |
296 | &dev_attr_runtime_active_kids.attr, | 442 | &dev_attr_runtime_active_kids.attr, |
297 | &dev_attr_runtime_enabled.attr, | 443 | &dev_attr_runtime_enabled.attr, |
298 | #endif | 444 | #endif |
299 | #endif | 445 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ |
300 | NULL, | 446 | NULL, |
301 | }; | 447 | }; |
302 | static struct attribute_group pm_attr_group = { | 448 | static struct attribute_group pm_attr_group = { |
303 | .name = "power", | 449 | .name = power_group_name, |
304 | .attrs = power_attrs, | 450 | .attrs = power_attrs, |
305 | }; | 451 | }; |
306 | 452 | ||
307 | int dpm_sysfs_add(struct device * dev) | 453 | static struct attribute *wakeup_attrs[] = { |
454 | #ifdef CONFIG_PM_SLEEP | ||
455 | &dev_attr_wakeup.attr, | ||
456 | &dev_attr_wakeup_count.attr, | ||
457 | &dev_attr_wakeup_active_count.attr, | ||
458 | &dev_attr_wakeup_hit_count.attr, | ||
459 | &dev_attr_wakeup_active.attr, | ||
460 | &dev_attr_wakeup_total_time_ms.attr, | ||
461 | &dev_attr_wakeup_max_time_ms.attr, | ||
462 | &dev_attr_wakeup_last_time_ms.attr, | ||
463 | #endif | ||
464 | NULL, | ||
465 | }; | ||
466 | static struct attribute_group pm_wakeup_attr_group = { | ||
467 | .name = power_group_name, | ||
468 | .attrs = wakeup_attrs, | ||
469 | }; | ||
470 | |||
471 | static struct attribute *runtime_attrs[] = { | ||
472 | #ifdef CONFIG_PM_RUNTIME | ||
473 | #ifndef CONFIG_PM_ADVANCED_DEBUG | ||
474 | &dev_attr_runtime_status.attr, | ||
475 | #endif | ||
476 | &dev_attr_control.attr, | ||
477 | &dev_attr_runtime_suspended_time.attr, | ||
478 | &dev_attr_runtime_active_time.attr, | ||
479 | &dev_attr_autosuspend_delay_ms.attr, | ||
480 | #endif /* CONFIG_PM_RUNTIME */ | ||
481 | NULL, | ||
482 | }; | ||
483 | static struct attribute_group pm_runtime_attr_group = { | ||
484 | .name = power_group_name, | ||
485 | .attrs = runtime_attrs, | ||
486 | }; | ||
487 | |||
488 | int dpm_sysfs_add(struct device *dev) | ||
489 | { | ||
490 | int rc; | ||
491 | |||
492 | rc = sysfs_create_group(&dev->kobj, &pm_attr_group); | ||
493 | if (rc) | ||
494 | return rc; | ||
495 | |||
496 | if (pm_runtime_callbacks_present(dev)) { | ||
497 | rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); | ||
498 | if (rc) | ||
499 | goto err_out; | ||
500 | } | ||
501 | |||
502 | if (device_can_wakeup(dev)) { | ||
503 | rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
504 | if (rc) { | ||
505 | if (pm_runtime_callbacks_present(dev)) | ||
506 | sysfs_unmerge_group(&dev->kobj, | ||
507 | &pm_runtime_attr_group); | ||
508 | goto err_out; | ||
509 | } | ||
510 | } | ||
511 | return 0; | ||
512 | |||
513 | err_out: | ||
514 | sysfs_remove_group(&dev->kobj, &pm_attr_group); | ||
515 | return rc; | ||
516 | } | ||
517 | |||
518 | int wakeup_sysfs_add(struct device *dev) | ||
519 | { | ||
520 | return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
521 | } | ||
522 | |||
523 | void wakeup_sysfs_remove(struct device *dev) | ||
524 | { | ||
525 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
526 | } | ||
527 | |||
528 | void rpm_sysfs_remove(struct device *dev) | ||
308 | { | 529 | { |
309 | return sysfs_create_group(&dev->kobj, &pm_attr_group); | 530 | sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); |
310 | } | 531 | } |
311 | 532 | ||
312 | void dpm_sysfs_remove(struct device * dev) | 533 | void dpm_sysfs_remove(struct device *dev) |
313 | { | 534 | { |
535 | rpm_sysfs_remove(dev); | ||
536 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
314 | sysfs_remove_group(&dev->kobj, &pm_attr_group); | 537 | sysfs_remove_group(&dev->kobj, &pm_attr_group); |
315 | } | 538 | } |
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 0a1a2c4dbc6e..c80e138b62fe 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c | |||
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void) | |||
112 | unsigned int val; | 112 | unsigned int val; |
113 | 113 | ||
114 | get_rtc_time(&time); | 114 | get_rtc_time(&time); |
115 | printk("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", | 115 | pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", |
116 | time.tm_hour, time.tm_min, time.tm_sec, | 116 | time.tm_hour, time.tm_min, time.tm_sec, |
117 | time.tm_mon + 1, time.tm_mday, time.tm_year % 100); | 117 | time.tm_mon + 1, time.tm_mday, time.tm_year % 100); |
118 | val = time.tm_year; /* 100 years */ | 118 | val = time.tm_year; /* 100 years */ |
@@ -179,7 +179,7 @@ static int show_file_hash(unsigned int value) | |||
179 | unsigned int hash = hash_string(lineno, file, FILEHASH); | 179 | unsigned int hash = hash_string(lineno, file, FILEHASH); |
180 | if (hash != value) | 180 | if (hash != value) |
181 | continue; | 181 | continue; |
182 | printk(" hash matches %s:%u\n", file, lineno); | 182 | pr_info(" hash matches %s:%u\n", file, lineno); |
183 | match++; | 183 | match++; |
184 | } | 184 | } |
185 | return match; | 185 | return match; |
@@ -188,8 +188,10 @@ static int show_file_hash(unsigned int value) | |||
188 | static int show_dev_hash(unsigned int value) | 188 | static int show_dev_hash(unsigned int value) |
189 | { | 189 | { |
190 | int match = 0; | 190 | int match = 0; |
191 | struct list_head *entry = dpm_list.prev; | 191 | struct list_head *entry; |
192 | 192 | ||
193 | device_pm_lock(); | ||
194 | entry = dpm_list.prev; | ||
193 | while (entry != &dpm_list) { | 195 | while (entry != &dpm_list) { |
194 | struct device * dev = to_device(entry); | 196 | struct device * dev = to_device(entry); |
195 | unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); | 197 | unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); |
@@ -199,11 +201,43 @@ static int show_dev_hash(unsigned int value) | |||
199 | } | 201 | } |
200 | entry = entry->prev; | 202 | entry = entry->prev; |
201 | } | 203 | } |
204 | device_pm_unlock(); | ||
202 | return match; | 205 | return match; |
203 | } | 206 | } |
204 | 207 | ||
205 | static unsigned int hash_value_early_read; | 208 | static unsigned int hash_value_early_read; |
206 | 209 | ||
210 | int show_trace_dev_match(char *buf, size_t size) | ||
211 | { | ||
212 | unsigned int value = hash_value_early_read / (USERHASH * FILEHASH); | ||
213 | int ret = 0; | ||
214 | struct list_head *entry; | ||
215 | |||
216 | /* | ||
217 | * It's possible that multiple devices will match the hash and we can't | ||
218 | * tell which is the culprit, so it's best to output them all. | ||
219 | */ | ||
220 | device_pm_lock(); | ||
221 | entry = dpm_list.prev; | ||
222 | while (size && entry != &dpm_list) { | ||
223 | struct device *dev = to_device(entry); | ||
224 | unsigned int hash = hash_string(DEVSEED, dev_name(dev), | ||
225 | DEVHASH); | ||
226 | if (hash == value) { | ||
227 | int len = snprintf(buf, size, "%s\n", | ||
228 | dev_driver_string(dev)); | ||
229 | if (len > size) | ||
230 | len = size; | ||
231 | buf += len; | ||
232 | ret += len; | ||
233 | size -= len; | ||
234 | } | ||
235 | entry = entry->prev; | ||
236 | } | ||
237 | device_pm_unlock(); | ||
238 | return ret; | ||
239 | } | ||
240 | |||
207 | static int early_resume_init(void) | 241 | static int early_resume_init(void) |
208 | { | 242 | { |
209 | hash_value_early_read = read_magic_time(); | 243 | hash_value_early_read = read_magic_time(); |
@@ -221,7 +255,7 @@ static int late_resume_init(void) | |||
221 | val = val / FILEHASH; | 255 | val = val / FILEHASH; |
222 | dev = val /* % DEVHASH */; | 256 | dev = val /* % DEVHASH */; |
223 | 257 | ||
224 | printk(" Magic number: %d:%d:%d\n", user, file, dev); | 258 | pr_info(" Magic number: %d:%d:%d\n", user, file, dev); |
225 | show_file_hash(file); | 259 | show_file_hash(file); |
226 | show_dev_hash(dev); | 260 | show_dev_hash(dev); |
227 | return 0; | 261 | return 0; |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index eb594facfc3f..84f7c7d5a098 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -11,7 +11,12 @@ | |||
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
13 | #include <linux/suspend.h> | 13 | #include <linux/suspend.h> |
14 | #include <linux/pm.h> | 14 | #include <linux/seq_file.h> |
15 | #include <linux/debugfs.h> | ||
16 | |||
17 | #include "power.h" | ||
18 | |||
19 | #define TIMEOUT 100 | ||
15 | 20 | ||
16 | /* | 21 | /* |
17 | * If set, the suspend/hibernate code will abort transitions to a sleep state | 22 | * If set, the suspend/hibernate code will abort transitions to a sleep state |
@@ -19,19 +24,287 @@ | |||
19 | */ | 24 | */ |
20 | bool events_check_enabled; | 25 | bool events_check_enabled; |
21 | 26 | ||
22 | /* The counter of registered wakeup events. */ | 27 | /* |
23 | static unsigned long event_count; | 28 | * Combined counters of registered wakeup events and wakeup events in progress. |
24 | /* A preserved old value of event_count. */ | 29 | * They need to be modified together atomically, so it's better to use one |
25 | static unsigned long saved_event_count; | 30 | * atomic variable to hold them both. |
26 | /* The counter of wakeup events being processed. */ | 31 | */ |
27 | static unsigned long events_in_progress; | 32 | static atomic_t combined_event_count = ATOMIC_INIT(0); |
33 | |||
34 | #define IN_PROGRESS_BITS (sizeof(int) * 4) | ||
35 | #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) | ||
36 | |||
37 | static void split_counters(unsigned int *cnt, unsigned int *inpr) | ||
38 | { | ||
39 | unsigned int comb = atomic_read(&combined_event_count); | ||
40 | |||
41 | *cnt = (comb >> IN_PROGRESS_BITS); | ||
42 | *inpr = comb & MAX_IN_PROGRESS; | ||
43 | } | ||
44 | |||
45 | /* A preserved old value of the events counter. */ | ||
46 | static unsigned int saved_count; | ||
28 | 47 | ||
29 | static DEFINE_SPINLOCK(events_lock); | 48 | static DEFINE_SPINLOCK(events_lock); |
30 | 49 | ||
31 | static void pm_wakeup_timer_fn(unsigned long data); | 50 | static void pm_wakeup_timer_fn(unsigned long data); |
32 | 51 | ||
33 | static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); | 52 | static LIST_HEAD(wakeup_sources); |
34 | static unsigned long events_timer_expires; | 53 | |
54 | /** | ||
55 | * wakeup_source_create - Create a struct wakeup_source object. | ||
56 | * @name: Name of the new wakeup source. | ||
57 | */ | ||
58 | struct wakeup_source *wakeup_source_create(const char *name) | ||
59 | { | ||
60 | struct wakeup_source *ws; | ||
61 | |||
62 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); | ||
63 | if (!ws) | ||
64 | return NULL; | ||
65 | |||
66 | spin_lock_init(&ws->lock); | ||
67 | if (name) | ||
68 | ws->name = kstrdup(name, GFP_KERNEL); | ||
69 | |||
70 | return ws; | ||
71 | } | ||
72 | EXPORT_SYMBOL_GPL(wakeup_source_create); | ||
73 | |||
74 | /** | ||
75 | * wakeup_source_destroy - Destroy a struct wakeup_source object. | ||
76 | * @ws: Wakeup source to destroy. | ||
77 | */ | ||
78 | void wakeup_source_destroy(struct wakeup_source *ws) | ||
79 | { | ||
80 | if (!ws) | ||
81 | return; | ||
82 | |||
83 | spin_lock_irq(&ws->lock); | ||
84 | while (ws->active) { | ||
85 | spin_unlock_irq(&ws->lock); | ||
86 | |||
87 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | ||
88 | |||
89 | spin_lock_irq(&ws->lock); | ||
90 | } | ||
91 | spin_unlock_irq(&ws->lock); | ||
92 | |||
93 | kfree(ws->name); | ||
94 | kfree(ws); | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(wakeup_source_destroy); | ||
97 | |||
98 | /** | ||
99 | * wakeup_source_add - Add given object to the list of wakeup sources. | ||
100 | * @ws: Wakeup source object to add to the list. | ||
101 | */ | ||
102 | void wakeup_source_add(struct wakeup_source *ws) | ||
103 | { | ||
104 | if (WARN_ON(!ws)) | ||
105 | return; | ||
106 | |||
107 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); | ||
108 | ws->active = false; | ||
109 | |||
110 | spin_lock_irq(&events_lock); | ||
111 | list_add_rcu(&ws->entry, &wakeup_sources); | ||
112 | spin_unlock_irq(&events_lock); | ||
113 | } | ||
114 | EXPORT_SYMBOL_GPL(wakeup_source_add); | ||
115 | |||
116 | /** | ||
117 | * wakeup_source_remove - Remove given object from the wakeup sources list. | ||
118 | * @ws: Wakeup source object to remove from the list. | ||
119 | */ | ||
120 | void wakeup_source_remove(struct wakeup_source *ws) | ||
121 | { | ||
122 | if (WARN_ON(!ws)) | ||
123 | return; | ||
124 | |||
125 | spin_lock_irq(&events_lock); | ||
126 | list_del_rcu(&ws->entry); | ||
127 | spin_unlock_irq(&events_lock); | ||
128 | synchronize_rcu(); | ||
129 | } | ||
130 | EXPORT_SYMBOL_GPL(wakeup_source_remove); | ||
131 | |||
132 | /** | ||
133 | * wakeup_source_register - Create wakeup source and add it to the list. | ||
134 | * @name: Name of the wakeup source to register. | ||
135 | */ | ||
136 | struct wakeup_source *wakeup_source_register(const char *name) | ||
137 | { | ||
138 | struct wakeup_source *ws; | ||
139 | |||
140 | ws = wakeup_source_create(name); | ||
141 | if (ws) | ||
142 | wakeup_source_add(ws); | ||
143 | |||
144 | return ws; | ||
145 | } | ||
146 | EXPORT_SYMBOL_GPL(wakeup_source_register); | ||
147 | |||
148 | /** | ||
149 | * wakeup_source_unregister - Remove wakeup source from the list and remove it. | ||
150 | * @ws: Wakeup source object to unregister. | ||
151 | */ | ||
152 | void wakeup_source_unregister(struct wakeup_source *ws) | ||
153 | { | ||
154 | wakeup_source_remove(ws); | ||
155 | wakeup_source_destroy(ws); | ||
156 | } | ||
157 | EXPORT_SYMBOL_GPL(wakeup_source_unregister); | ||
158 | |||
159 | /** | ||
160 | * device_wakeup_attach - Attach a wakeup source object to a device object. | ||
161 | * @dev: Device to handle. | ||
162 | * @ws: Wakeup source object to attach to @dev. | ||
163 | * | ||
164 | * This causes @dev to be treated as a wakeup device. | ||
165 | */ | ||
166 | static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) | ||
167 | { | ||
168 | spin_lock_irq(&dev->power.lock); | ||
169 | if (dev->power.wakeup) { | ||
170 | spin_unlock_irq(&dev->power.lock); | ||
171 | return -EEXIST; | ||
172 | } | ||
173 | dev->power.wakeup = ws; | ||
174 | spin_unlock_irq(&dev->power.lock); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * device_wakeup_enable - Enable given device to be a wakeup source. | ||
180 | * @dev: Device to handle. | ||
181 | * | ||
182 | * Create a wakeup source object, register it and attach it to @dev. | ||
183 | */ | ||
184 | int device_wakeup_enable(struct device *dev) | ||
185 | { | ||
186 | struct wakeup_source *ws; | ||
187 | int ret; | ||
188 | |||
189 | if (!dev || !dev->power.can_wakeup) | ||
190 | return -EINVAL; | ||
191 | |||
192 | ws = wakeup_source_register(dev_name(dev)); | ||
193 | if (!ws) | ||
194 | return -ENOMEM; | ||
195 | |||
196 | ret = device_wakeup_attach(dev, ws); | ||
197 | if (ret) | ||
198 | wakeup_source_unregister(ws); | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | EXPORT_SYMBOL_GPL(device_wakeup_enable); | ||
203 | |||
204 | /** | ||
205 | * device_wakeup_detach - Detach a device's wakeup source object from it. | ||
206 | * @dev: Device to detach the wakeup source object from. | ||
207 | * | ||
208 | * After it returns, @dev will not be treated as a wakeup device any more. | ||
209 | */ | ||
210 | static struct wakeup_source *device_wakeup_detach(struct device *dev) | ||
211 | { | ||
212 | struct wakeup_source *ws; | ||
213 | |||
214 | spin_lock_irq(&dev->power.lock); | ||
215 | ws = dev->power.wakeup; | ||
216 | dev->power.wakeup = NULL; | ||
217 | spin_unlock_irq(&dev->power.lock); | ||
218 | return ws; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * device_wakeup_disable - Do not regard a device as a wakeup source any more. | ||
223 | * @dev: Device to handle. | ||
224 | * | ||
225 | * Detach the @dev's wakeup source object from it, unregister this wakeup source | ||
226 | * object and destroy it. | ||
227 | */ | ||
228 | int device_wakeup_disable(struct device *dev) | ||
229 | { | ||
230 | struct wakeup_source *ws; | ||
231 | |||
232 | if (!dev || !dev->power.can_wakeup) | ||
233 | return -EINVAL; | ||
234 | |||
235 | ws = device_wakeup_detach(dev); | ||
236 | if (ws) | ||
237 | wakeup_source_unregister(ws); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(device_wakeup_disable); | ||
242 | |||
243 | /** | ||
244 | * device_set_wakeup_capable - Set/reset device wakeup capability flag. | ||
245 | * @dev: Device to handle. | ||
246 | * @capable: Whether or not @dev is capable of waking up the system from sleep. | ||
247 | * | ||
248 | * If @capable is set, set the @dev's power.can_wakeup flag and add its | ||
249 | * wakeup-related attributes to sysfs. Otherwise, unset the @dev's | ||
250 | * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. | ||
251 | * | ||
252 | * This function may sleep and it can't be called from any context where | ||
253 | * sleeping is not allowed. | ||
254 | */ | ||
255 | void device_set_wakeup_capable(struct device *dev, bool capable) | ||
256 | { | ||
257 | if (!!dev->power.can_wakeup == !!capable) | ||
258 | return; | ||
259 | |||
260 | if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { | ||
261 | if (capable) { | ||
262 | if (wakeup_sysfs_add(dev)) | ||
263 | return; | ||
264 | } else { | ||
265 | wakeup_sysfs_remove(dev); | ||
266 | } | ||
267 | } | ||
268 | dev->power.can_wakeup = capable; | ||
269 | } | ||
270 | EXPORT_SYMBOL_GPL(device_set_wakeup_capable); | ||
271 | |||
272 | /** | ||
273 | * device_init_wakeup - Device wakeup initialization. | ||
274 | * @dev: Device to handle. | ||
275 | * @enable: Whether or not to enable @dev as a wakeup device. | ||
276 | * | ||
277 | * By default, most devices should leave wakeup disabled. The exceptions are | ||
278 | * devices that everyone expects to be wakeup sources: keyboards, power buttons, | ||
279 | * possibly network interfaces, etc. | ||
280 | */ | ||
281 | int device_init_wakeup(struct device *dev, bool enable) | ||
282 | { | ||
283 | int ret = 0; | ||
284 | |||
285 | if (enable) { | ||
286 | device_set_wakeup_capable(dev, true); | ||
287 | ret = device_wakeup_enable(dev); | ||
288 | } else { | ||
289 | device_set_wakeup_capable(dev, false); | ||
290 | } | ||
291 | |||
292 | return ret; | ||
293 | } | ||
294 | EXPORT_SYMBOL_GPL(device_init_wakeup); | ||
295 | |||
296 | /** | ||
297 | * device_set_wakeup_enable - Enable or disable a device to wake up the system. | ||
298 | * @dev: Device to handle. | ||
299 | */ | ||
300 | int device_set_wakeup_enable(struct device *dev, bool enable) | ||
301 | { | ||
302 | if (!dev || !dev->power.can_wakeup) | ||
303 | return -EINVAL; | ||
304 | |||
305 | return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); | ||
306 | } | ||
307 | EXPORT_SYMBOL_GPL(device_set_wakeup_enable); | ||
35 | 308 | ||
36 | /* | 309 | /* |
37 | * The functions below use the observation that each wakeup event starts a | 310 | * The functions below use the observation that each wakeup event starts a |
@@ -55,139 +328,282 @@ static unsigned long events_timer_expires; | |||
55 | * knowledge, however, may not be available to it, so it can simply specify time | 328 | * knowledge, however, may not be available to it, so it can simply specify time |
56 | * to wait before the system can be suspended and pass it as the second | 329 | * to wait before the system can be suspended and pass it as the second |
57 | * argument of pm_wakeup_event(). | 330 | * argument of pm_wakeup_event(). |
331 | * | ||
332 | * It is valid to call pm_relax() after pm_wakeup_event(), in which case the | ||
333 | * "no suspend" period will be ended either by the pm_relax(), or by the timer | ||
334 | * function executed when the timer expires, whichever comes first. | ||
335 | */ | ||
336 | |||
337 | /** | ||
338 | * wakup_source_activate - Mark given wakeup source as active. | ||
339 | * @ws: Wakeup source to handle. | ||
340 | * | ||
341 | * Update the @ws' statistics and, if @ws has just been activated, notify the PM | ||
342 | * core of the event by incrementing the counter of of wakeup events being | ||
343 | * processed. | ||
58 | */ | 344 | */ |
345 | static void wakeup_source_activate(struct wakeup_source *ws) | ||
346 | { | ||
347 | ws->active = true; | ||
348 | ws->active_count++; | ||
349 | ws->timer_expires = jiffies; | ||
350 | ws->last_time = ktime_get(); | ||
351 | |||
352 | /* Increment the counter of events in progress. */ | ||
353 | atomic_inc(&combined_event_count); | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * __pm_stay_awake - Notify the PM core of a wakeup event. | ||
358 | * @ws: Wakeup source object associated with the source of the event. | ||
359 | * | ||
360 | * It is safe to call this function from interrupt context. | ||
361 | */ | ||
362 | void __pm_stay_awake(struct wakeup_source *ws) | ||
363 | { | ||
364 | unsigned long flags; | ||
365 | |||
366 | if (!ws) | ||
367 | return; | ||
368 | |||
369 | spin_lock_irqsave(&ws->lock, flags); | ||
370 | ws->event_count++; | ||
371 | if (!ws->active) | ||
372 | wakeup_source_activate(ws); | ||
373 | spin_unlock_irqrestore(&ws->lock, flags); | ||
374 | } | ||
375 | EXPORT_SYMBOL_GPL(__pm_stay_awake); | ||
59 | 376 | ||
60 | /** | 377 | /** |
61 | * pm_stay_awake - Notify the PM core that a wakeup event is being processed. | 378 | * pm_stay_awake - Notify the PM core that a wakeup event is being processed. |
62 | * @dev: Device the wakeup event is related to. | 379 | * @dev: Device the wakeup event is related to. |
63 | * | 380 | * |
64 | * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the | 381 | * Notify the PM core of a wakeup event (signaled by @dev) by calling |
65 | * counter of wakeup events being processed. If @dev is not NULL, the counter | 382 | * __pm_stay_awake for the @dev's wakeup source object. |
66 | * of wakeup events related to @dev is incremented too. | ||
67 | * | 383 | * |
68 | * Call this function after detecting of a wakeup event if pm_relax() is going | 384 | * Call this function after detecting of a wakeup event if pm_relax() is going |
69 | * to be called directly after processing the event (and possibly passing it to | 385 | * to be called directly after processing the event (and possibly passing it to |
70 | * user space for further processing). | 386 | * user space for further processing). |
71 | * | ||
72 | * It is safe to call this function from interrupt context. | ||
73 | */ | 387 | */ |
74 | void pm_stay_awake(struct device *dev) | 388 | void pm_stay_awake(struct device *dev) |
75 | { | 389 | { |
76 | unsigned long flags; | 390 | unsigned long flags; |
77 | 391 | ||
78 | spin_lock_irqsave(&events_lock, flags); | 392 | if (!dev) |
79 | if (dev) | 393 | return; |
80 | dev->power.wakeup_count++; | ||
81 | 394 | ||
82 | events_in_progress++; | 395 | spin_lock_irqsave(&dev->power.lock, flags); |
83 | spin_unlock_irqrestore(&events_lock, flags); | 396 | __pm_stay_awake(dev->power.wakeup); |
397 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
84 | } | 398 | } |
399 | EXPORT_SYMBOL_GPL(pm_stay_awake); | ||
85 | 400 | ||
86 | /** | 401 | /** |
87 | * pm_relax - Notify the PM core that processing of a wakeup event has ended. | 402 | * wakup_source_deactivate - Mark given wakeup source as inactive. |
403 | * @ws: Wakeup source to handle. | ||
88 | * | 404 | * |
89 | * Notify the PM core that a wakeup event has been processed by decrementing | 405 | * Update the @ws' statistics and notify the PM core that the wakeup source has |
90 | * the counter of wakeup events being processed and incrementing the counter | 406 | * become inactive by decrementing the counter of wakeup events being processed |
91 | * of registered wakeup events. | 407 | * and incrementing the counter of registered wakeup events. |
408 | */ | ||
409 | static void wakeup_source_deactivate(struct wakeup_source *ws) | ||
410 | { | ||
411 | ktime_t duration; | ||
412 | ktime_t now; | ||
413 | |||
414 | ws->relax_count++; | ||
415 | /* | ||
416 | * __pm_relax() may be called directly or from a timer function. | ||
417 | * If it is called directly right after the timer function has been | ||
418 | * started, but before the timer function calls __pm_relax(), it is | ||
419 | * possible that __pm_stay_awake() will be called in the meantime and | ||
420 | * will set ws->active. Then, ws->active may be cleared immediately | ||
421 | * by the __pm_relax() called from the timer function, but in such a | ||
422 | * case ws->relax_count will be different from ws->active_count. | ||
423 | */ | ||
424 | if (ws->relax_count != ws->active_count) { | ||
425 | ws->relax_count--; | ||
426 | return; | ||
427 | } | ||
428 | |||
429 | ws->active = false; | ||
430 | |||
431 | now = ktime_get(); | ||
432 | duration = ktime_sub(now, ws->last_time); | ||
433 | ws->total_time = ktime_add(ws->total_time, duration); | ||
434 | if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) | ||
435 | ws->max_time = duration; | ||
436 | |||
437 | del_timer(&ws->timer); | ||
438 | |||
439 | /* | ||
440 | * Increment the counter of registered wakeup events and decrement the | ||
441 | * couter of wakeup events in progress simultaneously. | ||
442 | */ | ||
443 | atomic_add(MAX_IN_PROGRESS, &combined_event_count); | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * __pm_relax - Notify the PM core that processing of a wakeup event has ended. | ||
448 | * @ws: Wakeup source object associated with the source of the event. | ||
92 | * | 449 | * |
93 | * Call this function for wakeup events whose processing started with calling | 450 | * Call this function for wakeup events whose processing started with calling |
94 | * pm_stay_awake(). | 451 | * __pm_stay_awake(). |
95 | * | 452 | * |
96 | * It is safe to call it from interrupt context. | 453 | * It is safe to call it from interrupt context. |
97 | */ | 454 | */ |
98 | void pm_relax(void) | 455 | void __pm_relax(struct wakeup_source *ws) |
99 | { | 456 | { |
100 | unsigned long flags; | 457 | unsigned long flags; |
101 | 458 | ||
102 | spin_lock_irqsave(&events_lock, flags); | 459 | if (!ws) |
103 | if (events_in_progress) { | 460 | return; |
104 | events_in_progress--; | 461 | |
105 | event_count++; | 462 | spin_lock_irqsave(&ws->lock, flags); |
106 | } | 463 | if (ws->active) |
107 | spin_unlock_irqrestore(&events_lock, flags); | 464 | wakeup_source_deactivate(ws); |
465 | spin_unlock_irqrestore(&ws->lock, flags); | ||
108 | } | 466 | } |
467 | EXPORT_SYMBOL_GPL(__pm_relax); | ||
468 | |||
469 | /** | ||
470 | * pm_relax - Notify the PM core that processing of a wakeup event has ended. | ||
471 | * @dev: Device that signaled the event. | ||
472 | * | ||
473 | * Execute __pm_relax() for the @dev's wakeup source object. | ||
474 | */ | ||
475 | void pm_relax(struct device *dev) | ||
476 | { | ||
477 | unsigned long flags; | ||
478 | |||
479 | if (!dev) | ||
480 | return; | ||
481 | |||
482 | spin_lock_irqsave(&dev->power.lock, flags); | ||
483 | __pm_relax(dev->power.wakeup); | ||
484 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
485 | } | ||
486 | EXPORT_SYMBOL_GPL(pm_relax); | ||
109 | 487 | ||
110 | /** | 488 | /** |
111 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. | 489 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. |
490 | * @data: Address of the wakeup source object associated with the event source. | ||
112 | * | 491 | * |
113 | * Decrease the counter of wakeup events being processed after it was increased | 492 | * Call __pm_relax() for the wakeup source whose address is stored in @data. |
114 | * by pm_wakeup_event(). | ||
115 | */ | 493 | */ |
116 | static void pm_wakeup_timer_fn(unsigned long data) | 494 | static void pm_wakeup_timer_fn(unsigned long data) |
117 | { | 495 | { |
496 | __pm_relax((struct wakeup_source *)data); | ||
497 | } | ||
498 | |||
499 | /** | ||
500 | * __pm_wakeup_event - Notify the PM core of a wakeup event. | ||
501 | * @ws: Wakeup source object associated with the event source. | ||
502 | * @msec: Anticipated event processing time (in milliseconds). | ||
503 | * | ||
504 | * Notify the PM core of a wakeup event whose source is @ws that will take | ||
505 | * approximately @msec milliseconds to be processed by the kernel. If @ws is | ||
506 | * not active, activate it. If @msec is nonzero, set up the @ws' timer to | ||
507 | * execute pm_wakeup_timer_fn() in future. | ||
508 | * | ||
509 | * It is safe to call this function from interrupt context. | ||
510 | */ | ||
511 | void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) | ||
512 | { | ||
118 | unsigned long flags; | 513 | unsigned long flags; |
514 | unsigned long expires; | ||
119 | 515 | ||
120 | spin_lock_irqsave(&events_lock, flags); | 516 | if (!ws) |
121 | if (events_timer_expires | 517 | return; |
122 | && time_before_eq(events_timer_expires, jiffies)) { | 518 | |
123 | events_in_progress--; | 519 | spin_lock_irqsave(&ws->lock, flags); |
124 | events_timer_expires = 0; | 520 | |
521 | ws->event_count++; | ||
522 | if (!ws->active) | ||
523 | wakeup_source_activate(ws); | ||
524 | |||
525 | if (!msec) { | ||
526 | wakeup_source_deactivate(ws); | ||
527 | goto unlock; | ||
125 | } | 528 | } |
126 | spin_unlock_irqrestore(&events_lock, flags); | 529 | |
530 | expires = jiffies + msecs_to_jiffies(msec); | ||
531 | if (!expires) | ||
532 | expires = 1; | ||
533 | |||
534 | if (time_after(expires, ws->timer_expires)) { | ||
535 | mod_timer(&ws->timer, expires); | ||
536 | ws->timer_expires = expires; | ||
537 | } | ||
538 | |||
539 | unlock: | ||
540 | spin_unlock_irqrestore(&ws->lock, flags); | ||
127 | } | 541 | } |
542 | EXPORT_SYMBOL_GPL(__pm_wakeup_event); | ||
543 | |||
128 | 544 | ||
129 | /** | 545 | /** |
130 | * pm_wakeup_event - Notify the PM core of a wakeup event. | 546 | * pm_wakeup_event - Notify the PM core of a wakeup event. |
131 | * @dev: Device the wakeup event is related to. | 547 | * @dev: Device the wakeup event is related to. |
132 | * @msec: Anticipated event processing time (in milliseconds). | 548 | * @msec: Anticipated event processing time (in milliseconds). |
133 | * | 549 | * |
134 | * Notify the PM core of a wakeup event (signaled by @dev) that will take | 550 | * Call __pm_wakeup_event() for the @dev's wakeup source object. |
135 | * approximately @msec milliseconds to be processed by the kernel. Increment | ||
136 | * the counter of registered wakeup events and (if @msec is nonzero) set up | ||
137 | * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the | ||
138 | * timer has not been set up already, increment the counter of wakeup events | ||
139 | * being processed). If @dev is not NULL, the counter of wakeup events related | ||
140 | * to @dev is incremented too. | ||
141 | * | ||
142 | * It is safe to call this function from interrupt context. | ||
143 | */ | 551 | */ |
144 | void pm_wakeup_event(struct device *dev, unsigned int msec) | 552 | void pm_wakeup_event(struct device *dev, unsigned int msec) |
145 | { | 553 | { |
146 | unsigned long flags; | 554 | unsigned long flags; |
147 | 555 | ||
148 | spin_lock_irqsave(&events_lock, flags); | 556 | if (!dev) |
149 | event_count++; | 557 | return; |
150 | if (dev) | ||
151 | dev->power.wakeup_count++; | ||
152 | |||
153 | if (msec) { | ||
154 | unsigned long expires; | ||
155 | 558 | ||
156 | expires = jiffies + msecs_to_jiffies(msec); | 559 | spin_lock_irqsave(&dev->power.lock, flags); |
157 | if (!expires) | 560 | __pm_wakeup_event(dev->power.wakeup, msec); |
158 | expires = 1; | 561 | spin_unlock_irqrestore(&dev->power.lock, flags); |
562 | } | ||
563 | EXPORT_SYMBOL_GPL(pm_wakeup_event); | ||
159 | 564 | ||
160 | if (!events_timer_expires | 565 | /** |
161 | || time_after(expires, events_timer_expires)) { | 566 | * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources. |
162 | if (!events_timer_expires) | 567 | */ |
163 | events_in_progress++; | 568 | static void pm_wakeup_update_hit_counts(void) |
569 | { | ||
570 | unsigned long flags; | ||
571 | struct wakeup_source *ws; | ||
164 | 572 | ||
165 | mod_timer(&events_timer, expires); | 573 | rcu_read_lock(); |
166 | events_timer_expires = expires; | 574 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) { |
167 | } | 575 | spin_lock_irqsave(&ws->lock, flags); |
576 | if (ws->active) | ||
577 | ws->hit_count++; | ||
578 | spin_unlock_irqrestore(&ws->lock, flags); | ||
168 | } | 579 | } |
169 | spin_unlock_irqrestore(&events_lock, flags); | 580 | rcu_read_unlock(); |
170 | } | 581 | } |
171 | 582 | ||
172 | /** | 583 | /** |
173 | * pm_check_wakeup_events - Check for new wakeup events. | 584 | * pm_wakeup_pending - Check if power transition in progress should be aborted. |
174 | * | 585 | * |
175 | * Compare the current number of registered wakeup events with its preserved | 586 | * Compare the current number of registered wakeup events with its preserved |
176 | * value from the past to check if new wakeup events have been registered since | 587 | * value from the past and return true if new wakeup events have been registered |
177 | * the old value was stored. Check if the current number of wakeup events being | 588 | * since the old value was stored. Also return true if the current number of |
178 | * processed is zero. | 589 | * wakeup events being processed is different from zero. |
179 | */ | 590 | */ |
180 | bool pm_check_wakeup_events(void) | 591 | bool pm_wakeup_pending(void) |
181 | { | 592 | { |
182 | unsigned long flags; | 593 | unsigned long flags; |
183 | bool ret = true; | 594 | bool ret = false; |
184 | 595 | ||
185 | spin_lock_irqsave(&events_lock, flags); | 596 | spin_lock_irqsave(&events_lock, flags); |
186 | if (events_check_enabled) { | 597 | if (events_check_enabled) { |
187 | ret = (event_count == saved_event_count) && !events_in_progress; | 598 | unsigned int cnt, inpr; |
188 | events_check_enabled = ret; | 599 | |
600 | split_counters(&cnt, &inpr); | ||
601 | ret = (cnt != saved_count || inpr > 0); | ||
602 | events_check_enabled = !ret; | ||
189 | } | 603 | } |
190 | spin_unlock_irqrestore(&events_lock, flags); | 604 | spin_unlock_irqrestore(&events_lock, flags); |
605 | if (ret) | ||
606 | pm_wakeup_update_hit_counts(); | ||
191 | return ret; | 607 | return ret; |
192 | } | 608 | } |
193 | 609 | ||
@@ -198,29 +614,25 @@ bool pm_check_wakeup_events(void) | |||
198 | * Store the number of registered wakeup events at the address in @count. Block | 614 | * Store the number of registered wakeup events at the address in @count. Block |
199 | * if the current number of wakeup events being processed is nonzero. | 615 | * if the current number of wakeup events being processed is nonzero. |
200 | * | 616 | * |
201 | * Return false if the wait for the number of wakeup events being processed to | 617 | * Return 'false' if the wait for the number of wakeup events being processed to |
202 | * drop down to zero has been interrupted by a signal (and the current number | 618 | * drop down to zero has been interrupted by a signal (and the current number |
203 | * of wakeup events being processed is still nonzero). Otherwise return true. | 619 | * of wakeup events being processed is still nonzero). Otherwise return 'true'. |
204 | */ | 620 | */ |
205 | bool pm_get_wakeup_count(unsigned long *count) | 621 | bool pm_get_wakeup_count(unsigned int *count) |
206 | { | 622 | { |
207 | bool ret; | 623 | unsigned int cnt, inpr; |
208 | |||
209 | spin_lock_irq(&events_lock); | ||
210 | if (capable(CAP_SYS_ADMIN)) | ||
211 | events_check_enabled = false; | ||
212 | |||
213 | while (events_in_progress && !signal_pending(current)) { | ||
214 | spin_unlock_irq(&events_lock); | ||
215 | 624 | ||
216 | schedule_timeout_interruptible(msecs_to_jiffies(100)); | 625 | for (;;) { |
217 | 626 | split_counters(&cnt, &inpr); | |
218 | spin_lock_irq(&events_lock); | 627 | if (inpr == 0 || signal_pending(current)) |
628 | break; | ||
629 | pm_wakeup_update_hit_counts(); | ||
630 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | ||
219 | } | 631 | } |
220 | *count = event_count; | 632 | |
221 | ret = !events_in_progress; | 633 | split_counters(&cnt, &inpr); |
222 | spin_unlock_irq(&events_lock); | 634 | *count = cnt; |
223 | return ret; | 635 | return !inpr; |
224 | } | 636 | } |
225 | 637 | ||
226 | /** | 638 | /** |
@@ -229,19 +641,106 @@ bool pm_get_wakeup_count(unsigned long *count) | |||
229 | * | 641 | * |
230 | * If @count is equal to the current number of registered wakeup events and the | 642 | * If @count is equal to the current number of registered wakeup events and the |
231 | * current number of wakeup events being processed is zero, store @count as the | 643 | * current number of wakeup events being processed is zero, store @count as the |
232 | * old number of registered wakeup events to be used by pm_check_wakeup_events() | 644 | * old number of registered wakeup events for pm_check_wakeup_events(), enable |
233 | * and return true. Otherwise return false. | 645 | * wakeup events detection and return 'true'. Otherwise disable wakeup events |
646 | * detection and return 'false'. | ||
234 | */ | 647 | */ |
235 | bool pm_save_wakeup_count(unsigned long count) | 648 | bool pm_save_wakeup_count(unsigned int count) |
236 | { | 649 | { |
237 | bool ret = false; | 650 | unsigned int cnt, inpr; |
238 | 651 | ||
652 | events_check_enabled = false; | ||
239 | spin_lock_irq(&events_lock); | 653 | spin_lock_irq(&events_lock); |
240 | if (count == event_count && !events_in_progress) { | 654 | split_counters(&cnt, &inpr); |
241 | saved_event_count = count; | 655 | if (cnt == count && inpr == 0) { |
656 | saved_count = count; | ||
242 | events_check_enabled = true; | 657 | events_check_enabled = true; |
243 | ret = true; | ||
244 | } | 658 | } |
245 | spin_unlock_irq(&events_lock); | 659 | spin_unlock_irq(&events_lock); |
660 | if (!events_check_enabled) | ||
661 | pm_wakeup_update_hit_counts(); | ||
662 | return events_check_enabled; | ||
663 | } | ||
664 | |||
665 | static struct dentry *wakeup_sources_stats_dentry; | ||
666 | |||
667 | /** | ||
668 | * print_wakeup_source_stats - Print wakeup source statistics information. | ||
669 | * @m: seq_file to print the statistics into. | ||
670 | * @ws: Wakeup source object to print the statistics for. | ||
671 | */ | ||
672 | static int print_wakeup_source_stats(struct seq_file *m, | ||
673 | struct wakeup_source *ws) | ||
674 | { | ||
675 | unsigned long flags; | ||
676 | ktime_t total_time; | ||
677 | ktime_t max_time; | ||
678 | unsigned long active_count; | ||
679 | ktime_t active_time; | ||
680 | int ret; | ||
681 | |||
682 | spin_lock_irqsave(&ws->lock, flags); | ||
683 | |||
684 | total_time = ws->total_time; | ||
685 | max_time = ws->max_time; | ||
686 | active_count = ws->active_count; | ||
687 | if (ws->active) { | ||
688 | active_time = ktime_sub(ktime_get(), ws->last_time); | ||
689 | total_time = ktime_add(total_time, active_time); | ||
690 | if (active_time.tv64 > max_time.tv64) | ||
691 | max_time = active_time; | ||
692 | } else { | ||
693 | active_time = ktime_set(0, 0); | ||
694 | } | ||
695 | |||
696 | ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t" | ||
697 | "%lld\t\t%lld\t\t%lld\t\t%lld\n", | ||
698 | ws->name, active_count, ws->event_count, ws->hit_count, | ||
699 | ktime_to_ms(active_time), ktime_to_ms(total_time), | ||
700 | ktime_to_ms(max_time), ktime_to_ms(ws->last_time)); | ||
701 | |||
702 | spin_unlock_irqrestore(&ws->lock, flags); | ||
703 | |||
246 | return ret; | 704 | return ret; |
247 | } | 705 | } |
706 | |||
707 | /** | ||
708 | * wakeup_sources_stats_show - Print wakeup sources statistics information. | ||
709 | * @m: seq_file to print the statistics into. | ||
710 | */ | ||
711 | static int wakeup_sources_stats_show(struct seq_file *m, void *unused) | ||
712 | { | ||
713 | struct wakeup_source *ws; | ||
714 | |||
715 | seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t" | ||
716 | "active_since\ttotal_time\tmax_time\tlast_change\n"); | ||
717 | |||
718 | rcu_read_lock(); | ||
719 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) | ||
720 | print_wakeup_source_stats(m, ws); | ||
721 | rcu_read_unlock(); | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int wakeup_sources_stats_open(struct inode *inode, struct file *file) | ||
727 | { | ||
728 | return single_open(file, wakeup_sources_stats_show, NULL); | ||
729 | } | ||
730 | |||
731 | static const struct file_operations wakeup_sources_stats_fops = { | ||
732 | .owner = THIS_MODULE, | ||
733 | .open = wakeup_sources_stats_open, | ||
734 | .read = seq_read, | ||
735 | .llseek = seq_lseek, | ||
736 | .release = single_release, | ||
737 | }; | ||
738 | |||
739 | static int __init wakeup_sources_debugfs_init(void) | ||
740 | { | ||
741 | wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources", | ||
742 | S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops); | ||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | postcore_initcall(wakeup_sources_debugfs_init); | ||