aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 17:53:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 17:53:17 -0400
commita8cbf22559ceefdcdfac00701e8e6da7518b7e8e (patch)
tree63ebd5779a37f809f7daed77dbf27aa3f1e1110c /drivers/base
parente36f561a2c88394ef2708f1ab300fe8a79e9f651 (diff)
parent9c034392533f3e9f00656d5c58478cff2560ef81 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6: (26 commits) PM / Wakeup: Show wakeup sources statistics in debugfs PM: Introduce library for device-specific OPPs (v7) PM: Add sysfs attr for rechecking dev hash from PM trace PM: Lock PM device list mutex in show_dev_hash() PM / Runtime: Remove idle notification after failing suspend PM / Hibernate: Modify signature used to mark swap PM / Runtime: Reduce code duplication in core helper functions PM: Allow wakeup events to abort freezing of tasks PM: runtime: add missed pm_request_autosuspend PM / Hibernate: Make some boot messages look less scary PM / Runtime: Implement autosuspend support PM / Runtime: Add no_callbacks flag PM / Runtime: Combine runtime PM entry points PM / Runtime: Merge synchronous and async runtime routines PM / Runtime: Replace boolean arguments with bitflags PM / Runtime: Move code in drivers/base/power/runtime.c sysfs: Add sysfs_merge_group() and sysfs_unmerge_group() PM: Fix potential issue with failing asynchronous suspend PM / Wakeup: Introduce wakeup source objects and event statistics (v3) PM: Fix signed/unsigned warning in dpm_show_time() ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/generic_ops.c4
-rw-r--r--drivers/base/power/main.c21
-rw-r--r--drivers/base/power/opp.c628
-rw-r--r--drivers/base/power/power.h2
-rw-r--r--drivers/base/power/runtime.c944
-rw-r--r--drivers/base/power/sysfs.c217
-rw-r--r--drivers/base/power/trace.c36
-rw-r--r--drivers/base/power/wakeup.c613
9 files changed, 1927 insertions, 539 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index cbccf9a3cee4..abe46edfe5b4 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_OPS) += generic_ops.o 4obj-$(CONFIG_PM_OPS) += generic_ops.o
5obj-$(CONFIG_PM_TRACE_RTC) += trace.o 5obj-$(CONFIG_PM_TRACE_RTC) += trace.o
6obj-$(CONFIG_PM_OPP) += opp.o
6 7
7ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 8ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
8ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG 9ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 4b29d4981253..81f2c84697f4 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -46,7 +46,7 @@ int pm_generic_runtime_suspend(struct device *dev)
46 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 46 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
47 int ret; 47 int ret;
48 48
49 ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL; 49 ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
50 50
51 return ret; 51 return ret;
52} 52}
@@ -65,7 +65,7 @@ int pm_generic_runtime_resume(struct device *dev)
65 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 65 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
66 int ret; 66 int ret;
67 67
68 ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL; 68 ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
69 69
70 return ret; 70 return ret;
71} 71}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 276d5a701dc3..31b526661ec4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -51,6 +51,8 @@ static pm_message_t pm_transition;
51 */ 51 */
52static bool transition_started; 52static bool transition_started;
53 53
54static int async_error;
55
54/** 56/**
55 * device_pm_init - Initialize the PM-related part of a device object. 57 * device_pm_init - Initialize the PM-related part of a device object.
56 * @dev: Device object being initialized. 58 * @dev: Device object being initialized.
@@ -60,7 +62,8 @@ void device_pm_init(struct device *dev)
60 dev->power.status = DPM_ON; 62 dev->power.status = DPM_ON;
61 init_completion(&dev->power.completion); 63 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion); 64 complete_all(&dev->power.completion);
63 dev->power.wakeup_count = 0; 65 dev->power.wakeup = NULL;
66 spin_lock_init(&dev->power.lock);
64 pm_runtime_init(dev); 67 pm_runtime_init(dev);
65} 68}
66 69
@@ -120,6 +123,7 @@ void device_pm_remove(struct device *dev)
120 mutex_lock(&dpm_list_mtx); 123 mutex_lock(&dpm_list_mtx);
121 list_del_init(&dev->power.entry); 124 list_del_init(&dev->power.entry);
122 mutex_unlock(&dpm_list_mtx); 125 mutex_unlock(&dpm_list_mtx);
126 device_wakeup_disable(dev);
123 pm_runtime_remove(dev); 127 pm_runtime_remove(dev);
124} 128}
125 129
@@ -407,7 +411,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
407static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 411static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
408{ 412{
409 ktime_t calltime; 413 ktime_t calltime;
410 s64 usecs64; 414 u64 usecs64;
411 int usecs; 415 int usecs;
412 416
413 calltime = ktime_get(); 417 calltime = ktime_get();
@@ -600,6 +604,7 @@ static void dpm_resume(pm_message_t state)
600 INIT_LIST_HEAD(&list); 604 INIT_LIST_HEAD(&list);
601 mutex_lock(&dpm_list_mtx); 605 mutex_lock(&dpm_list_mtx);
602 pm_transition = state; 606 pm_transition = state;
607 async_error = 0;
603 608
604 list_for_each_entry(dev, &dpm_list, power.entry) { 609 list_for_each_entry(dev, &dpm_list, power.entry) {
605 if (dev->power.status < DPM_OFF) 610 if (dev->power.status < DPM_OFF)
@@ -829,8 +834,6 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
829 return error; 834 return error;
830} 835}
831 836
832static int async_error;
833
834/** 837/**
835 * device_suspend - Execute "suspend" callbacks for given device. 838 * device_suspend - Execute "suspend" callbacks for given device.
836 * @dev: Device to handle. 839 * @dev: Device to handle.
@@ -885,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
885 device_unlock(dev); 888 device_unlock(dev);
886 complete_all(&dev->power.completion); 889 complete_all(&dev->power.completion);
887 890
891 if (error)
892 async_error = error;
893
888 return error; 894 return error;
889} 895}
890 896
@@ -894,10 +900,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
894 int error; 900 int error;
895 901
896 error = __device_suspend(dev, pm_transition, true); 902 error = __device_suspend(dev, pm_transition, true);
897 if (error) { 903 if (error)
898 pm_dev_err(dev, pm_transition, " async", error); 904 pm_dev_err(dev, pm_transition, " async", error);
899 async_error = error;
900 }
901 905
902 put_device(dev); 906 put_device(dev);
903} 907}
@@ -1085,8 +1089,9 @@ EXPORT_SYMBOL_GPL(__suspend_report_result);
1085 * @dev: Device to wait for. 1089 * @dev: Device to wait for.
1086 * @subordinate: Device that needs to wait for @dev. 1090 * @subordinate: Device that needs to wait for @dev.
1087 */ 1091 */
1088void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1092int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1089{ 1093{
1090 dpm_wait(dev, subordinate->power.async_suspend); 1094 dpm_wait(dev, subordinate->power.async_suspend);
1095 return async_error;
1091} 1096}
1092EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1097EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
new file mode 100644
index 000000000000..2bb9b4cf59d7
--- /dev/null
+++ b/drivers/base/power/opp.c
@@ -0,0 +1,628 @@
1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/cpufreq.h>
20#include <linux/list.h>
21#include <linux/rculist.h>
22#include <linux/rcupdate.h>
23#include <linux/opp.h>
24
25/*
26 * Internal data structure organization with the OPP layer library is as
27 * follows:
28 * dev_opp_list (root)
29 * |- device 1 (represents voltage domain 1)
30 * | |- opp 1 (availability, freq, voltage)
31 * | |- opp 2 ..
32 * ... ...
33 * | `- opp n ..
34 * |- device 2 (represents the next voltage domain)
35 * ...
36 * `- device m (represents mth voltage domain)
37 * device 1, 2.. are represented by dev_opp structure while each opp
38 * is represented by the opp structure.
39 */
40
41/**
42 * struct opp - Generic OPP description structure
43 * @node: opp list node. The nodes are maintained throughout the lifetime
44 * of boot. It is expected only an optimal set of OPPs are
45 * added to the library by the SoC framework.
46 * RCU usage: opp list is traversed with RCU locks. node
47 * modification is possible realtime, hence the modifications
48 * are protected by the dev_opp_list_lock for integrity.
49 * IMPORTANT: the opp nodes should be maintained in increasing
50 * order.
51 * @available: true/false - marks if this OPP as available or not
52 * @rate: Frequency in hertz
53 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
54 * @dev_opp: points back to the device_opp struct this opp belongs to
55 *
56 * This structure stores the OPP information for a given device.
57 */
58struct opp {
59 struct list_head node;
60
61 bool available;
62 unsigned long rate;
63 unsigned long u_volt;
64
65 struct device_opp *dev_opp;
66};
67
68/**
69 * struct device_opp - Device opp structure
70 * @node: list node - contains the devices with OPPs that
71 * have been registered. Nodes once added are not modified in this
72 * list.
73 * RCU usage: nodes are not modified in the list of device_opp,
74 * however addition is possible and is secured by dev_opp_list_lock
75 * @dev: device pointer
76 * @opp_list: list of opps
77 *
78 * This is an internal data structure maintaining the link to opps attached to
79 * a device. This structure is not meant to be shared to users as it is
80 * meant for book keeping and private to OPP library
81 */
82struct device_opp {
83 struct list_head node;
84
85 struct device *dev;
86 struct list_head opp_list;
87};
88
89/*
90 * The root of the list of all devices. All device_opp structures branch off
91 * from here, with each device_opp containing the list of opp it supports in
92 * various states of availability.
93 */
94static LIST_HEAD(dev_opp_list);
95/* Lock to allow exclusive modification to the device and opp lists */
96static DEFINE_MUTEX(dev_opp_list_lock);
97
98/**
99 * find_device_opp() - find device_opp struct using device pointer
100 * @dev: device pointer used to lookup device OPPs
101 *
102 * Search list of device OPPs for one containing matching device. Does a RCU
103 * reader operation to grab the pointer needed.
104 *
105 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
106 * -EINVAL based on type of error.
107 *
108 * Locking: This function must be called under rcu_read_lock(). device_opp
109 * is a RCU protected pointer. This means that device_opp is valid as long
110 * as we are under RCU lock.
111 */
112static struct device_opp *find_device_opp(struct device *dev)
113{
114 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
115
116 if (unlikely(IS_ERR_OR_NULL(dev))) {
117 pr_err("%s: Invalid parameters\n", __func__);
118 return ERR_PTR(-EINVAL);
119 }
120
121 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
122 if (tmp_dev_opp->dev == dev) {
123 dev_opp = tmp_dev_opp;
124 break;
125 }
126 }
127
128 return dev_opp;
129}
130
131/**
132 * opp_get_voltage() - Gets the voltage corresponding to an available opp
133 * @opp: opp for which voltage has to be returned for
134 *
135 * Return voltage in micro volt corresponding to the opp, else
136 * return 0
137 *
138 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
139 * protected pointer. This means that opp which could have been fetched by
140 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
141 * under RCU lock. The pointer returned by the opp_find_freq family must be
142 * used in the same section as the usage of this function with the pointer
143 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
144 * pointer.
145 */
146unsigned long opp_get_voltage(struct opp *opp)
147{
148 struct opp *tmp_opp;
149 unsigned long v = 0;
150
151 tmp_opp = rcu_dereference(opp);
152 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
153 pr_err("%s: Invalid parameters\n", __func__);
154 else
155 v = tmp_opp->u_volt;
156
157 return v;
158}
159
160/**
161 * opp_get_freq() - Gets the frequency corresponding to an available opp
162 * @opp: opp for which frequency has to be returned for
163 *
164 * Return frequency in hertz corresponding to the opp, else
165 * return 0
166 *
167 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
168 * protected pointer. This means that opp which could have been fetched by
169 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
170 * under RCU lock. The pointer returned by the opp_find_freq family must be
171 * used in the same section as the usage of this function with the pointer
172 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
173 * pointer.
174 */
175unsigned long opp_get_freq(struct opp *opp)
176{
177 struct opp *tmp_opp;
178 unsigned long f = 0;
179
180 tmp_opp = rcu_dereference(opp);
181 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
182 pr_err("%s: Invalid parameters\n", __func__);
183 else
184 f = tmp_opp->rate;
185
186 return f;
187}
188
189/**
190 * opp_get_opp_count() - Get number of opps available in the opp list
191 * @dev: device for which we do this operation
192 *
193 * This function returns the number of available opps if there are any,
194 * else returns 0 if none or the corresponding error value.
195 *
196 * Locking: This function must be called under rcu_read_lock(). This function
197 * internally references two RCU protected structures: device_opp and opp which
198 * are safe as long as we are under a common RCU locked section.
199 */
200int opp_get_opp_count(struct device *dev)
201{
202 struct device_opp *dev_opp;
203 struct opp *temp_opp;
204 int count = 0;
205
206 dev_opp = find_device_opp(dev);
207 if (IS_ERR(dev_opp)) {
208 int r = PTR_ERR(dev_opp);
209 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
210 return r;
211 }
212
213 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
214 if (temp_opp->available)
215 count++;
216 }
217
218 return count;
219}
220
221/**
222 * opp_find_freq_exact() - search for an exact frequency
223 * @dev: device for which we do this operation
224 * @freq: frequency to search for
225 * @is_available: true/false - match for available opp
226 *
227 * Searches for exact match in the opp list and returns pointer to the matching
228 * opp if found, else returns ERR_PTR in case of error and should be handled
229 * using IS_ERR.
230 *
231 * Note: available is a modifier for the search. if available=true, then the
232 * match is for exact matching frequency and is available in the stored OPP
233 * table. if false, the match is for exact frequency which is not available.
234 *
235 * This provides a mechanism to enable an opp which is not available currently
236 * or the opposite as well.
237 *
238 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
239 * protected pointer. The reason for the same is that the opp pointer which is
240 * returned will remain valid for use with opp_get_{voltage, freq} only while
241 * under the locked area. The pointer returned must be used prior to unlocking
242 * with rcu_read_unlock() to maintain the integrity of the pointer.
243 */
244struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
245 bool available)
246{
247 struct device_opp *dev_opp;
248 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
249
250 dev_opp = find_device_opp(dev);
251 if (IS_ERR(dev_opp)) {
252 int r = PTR_ERR(dev_opp);
253 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
254 return ERR_PTR(r);
255 }
256
257 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
258 if (temp_opp->available == available &&
259 temp_opp->rate == freq) {
260 opp = temp_opp;
261 break;
262 }
263 }
264
265 return opp;
266}
267
268/**
269 * opp_find_freq_ceil() - Search for an rounded ceil freq
270 * @dev: device for which we do this operation
271 * @freq: Start frequency
272 *
273 * Search for the matching ceil *available* OPP from a starting freq
274 * for a device.
275 *
276 * Returns matching *opp and refreshes *freq accordingly, else returns
277 * ERR_PTR in case of error and should be handled using IS_ERR.
278 *
279 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
280 * protected pointer. The reason for the same is that the opp pointer which is
281 * returned will remain valid for use with opp_get_{voltage, freq} only while
282 * under the locked area. The pointer returned must be used prior to unlocking
283 * with rcu_read_unlock() to maintain the integrity of the pointer.
284 */
285struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
286{
287 struct device_opp *dev_opp;
288 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
289
290 if (!dev || !freq) {
291 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
292 return ERR_PTR(-EINVAL);
293 }
294
295 dev_opp = find_device_opp(dev);
296 if (IS_ERR(dev_opp))
297 return opp;
298
299 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
300 if (temp_opp->available && temp_opp->rate >= *freq) {
301 opp = temp_opp;
302 *freq = opp->rate;
303 break;
304 }
305 }
306
307 return opp;
308}
309
310/**
311 * opp_find_freq_floor() - Search for a rounded floor freq
312 * @dev: device for which we do this operation
313 * @freq: Start frequency
314 *
315 * Search for the matching floor *available* OPP from a starting freq
316 * for a device.
317 *
318 * Returns matching *opp and refreshes *freq accordingly, else returns
319 * ERR_PTR in case of error and should be handled using IS_ERR.
320 *
321 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
322 * protected pointer. The reason for the same is that the opp pointer which is
323 * returned will remain valid for use with opp_get_{voltage, freq} only while
324 * under the locked area. The pointer returned must be used prior to unlocking
325 * with rcu_read_unlock() to maintain the integrity of the pointer.
326 */
327struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
328{
329 struct device_opp *dev_opp;
330 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
331
332 if (!dev || !freq) {
333 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
334 return ERR_PTR(-EINVAL);
335 }
336
337 dev_opp = find_device_opp(dev);
338 if (IS_ERR(dev_opp))
339 return opp;
340
341 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
342 if (temp_opp->available) {
343 /* go to the next node, before choosing prev */
344 if (temp_opp->rate > *freq)
345 break;
346 else
347 opp = temp_opp;
348 }
349 }
350 if (!IS_ERR(opp))
351 *freq = opp->rate;
352
353 return opp;
354}
355
356/**
357 * opp_add() - Add an OPP table from a table definitions
358 * @dev: device for which we do this operation
359 * @freq: Frequency in Hz for this OPP
360 * @u_volt: Voltage in uVolts for this OPP
361 *
362 * This function adds an opp definition to the opp list and returns status.
363 * The opp is made available by default and it can be controlled using
364 * opp_enable/disable functions.
365 *
366 * Locking: The internal device_opp and opp structures are RCU protected.
367 * Hence this function internally uses RCU updater strategy with mutex locks
368 * to keep the integrity of the internal data structures. Callers should ensure
369 * that this function is *NOT* called under RCU protection or in contexts where
370 * mutex cannot be locked.
371 */
372int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
373{
374 struct device_opp *dev_opp = NULL;
375 struct opp *opp, *new_opp;
376 struct list_head *head;
377
378 /* allocate new OPP node */
379 new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
380 if (!new_opp) {
381 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
382 return -ENOMEM;
383 }
384
385 /* Hold our list modification lock here */
386 mutex_lock(&dev_opp_list_lock);
387
388 /* Check for existing list for 'dev' */
389 dev_opp = find_device_opp(dev);
390 if (IS_ERR(dev_opp)) {
391 /*
392 * Allocate a new device OPP table. In the infrequent case
393 * where a new device is needed to be added, we pay this
394 * penalty.
395 */
396 dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
397 if (!dev_opp) {
398 mutex_unlock(&dev_opp_list_lock);
399 kfree(new_opp);
400 dev_warn(dev,
401 "%s: Unable to create device OPP structure\n",
402 __func__);
403 return -ENOMEM;
404 }
405
406 dev_opp->dev = dev;
407 INIT_LIST_HEAD(&dev_opp->opp_list);
408
409 /* Secure the device list modification */
410 list_add_rcu(&dev_opp->node, &dev_opp_list);
411 }
412
413 /* populate the opp table */
414 new_opp->dev_opp = dev_opp;
415 new_opp->rate = freq;
416 new_opp->u_volt = u_volt;
417 new_opp->available = true;
418
419 /* Insert new OPP in order of increasing frequency */
420 head = &dev_opp->opp_list;
421 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
422 if (new_opp->rate < opp->rate)
423 break;
424 else
425 head = &opp->node;
426 }
427
428 list_add_rcu(&new_opp->node, head);
429 mutex_unlock(&dev_opp_list_lock);
430
431 return 0;
432}
433
434/**
435 * opp_set_availability() - helper to set the availability of an opp
436 * @dev: device for which we do this operation
437 * @freq: OPP frequency to modify availability
438 * @availability_req: availability status requested for this opp
439 *
440 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
441 * share a common logic which is isolated here.
442 *
443 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
444 * copy operation, returns 0 if no modifcation was done OR modification was
445 * successful.
446 *
447 * Locking: The internal device_opp and opp structures are RCU protected.
448 * Hence this function internally uses RCU updater strategy with mutex locks to
449 * keep the integrity of the internal data structures. Callers should ensure
450 * that this function is *NOT* called under RCU protection or in contexts where
451 * mutex locking or synchronize_rcu() blocking calls cannot be used.
452 */
453static int opp_set_availability(struct device *dev, unsigned long freq,
454 bool availability_req)
455{
456 struct device_opp *tmp_dev_opp, *dev_opp = NULL;
457 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
458 int r = 0;
459
460 /* keep the node allocated */
461 new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
462 if (!new_opp) {
463 dev_warn(dev, "%s: Unable to create OPP\n", __func__);
464 return -ENOMEM;
465 }
466
467 mutex_lock(&dev_opp_list_lock);
468
469 /* Find the device_opp */
470 list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
471 if (dev == tmp_dev_opp->dev) {
472 dev_opp = tmp_dev_opp;
473 break;
474 }
475 }
476 if (IS_ERR(dev_opp)) {
477 r = PTR_ERR(dev_opp);
478 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
479 goto unlock;
480 }
481
482 /* Do we have the frequency? */
483 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
484 if (tmp_opp->rate == freq) {
485 opp = tmp_opp;
486 break;
487 }
488 }
489 if (IS_ERR(opp)) {
490 r = PTR_ERR(opp);
491 goto unlock;
492 }
493
494 /* Is update really needed? */
495 if (opp->available == availability_req)
496 goto unlock;
497 /* copy the old data over */
498 *new_opp = *opp;
499
500 /* plug in new node */
501 new_opp->available = availability_req;
502
503 list_replace_rcu(&opp->node, &new_opp->node);
504 mutex_unlock(&dev_opp_list_lock);
505 synchronize_rcu();
506
507 /* clean up old opp */
508 new_opp = opp;
509 goto out;
510
511unlock:
512 mutex_unlock(&dev_opp_list_lock);
513out:
514 kfree(new_opp);
515 return r;
516}
517
518/**
519 * opp_enable() - Enable a specific OPP
520 * @dev: device for which we do this operation
521 * @freq: OPP frequency to enable
522 *
523 * Enables a provided opp. If the operation is valid, this returns 0, else the
524 * corresponding error value. It is meant to be used for users an OPP available
525 * after being temporarily made unavailable with opp_disable.
526 *
527 * Locking: The internal device_opp and opp structures are RCU protected.
528 * Hence this function indirectly uses RCU and mutex locks to keep the
529 * integrity of the internal data structures. Callers should ensure that
530 * this function is *NOT* called under RCU protection or in contexts where
531 * mutex locking or synchronize_rcu() blocking calls cannot be used.
532 */
533int opp_enable(struct device *dev, unsigned long freq)
534{
535 return opp_set_availability(dev, freq, true);
536}
537
538/**
539 * opp_disable() - Disable a specific OPP
540 * @dev: device for which we do this operation
541 * @freq: OPP frequency to disable
542 *
543 * Disables a provided opp. If the operation is valid, this returns
544 * 0, else the corresponding error value. It is meant to be a temporary
545 * control by users to make this OPP not available until the circumstances are
546 * right to make it available again (with a call to opp_enable).
547 *
548 * Locking: The internal device_opp and opp structures are RCU protected.
549 * Hence this function indirectly uses RCU and mutex locks to keep the
550 * integrity of the internal data structures. Callers should ensure that
551 * this function is *NOT* called under RCU protection or in contexts where
552 * mutex locking or synchronize_rcu() blocking calls cannot be used.
553 */
554int opp_disable(struct device *dev, unsigned long freq)
555{
556 return opp_set_availability(dev, freq, false);
557}
558
559#ifdef CONFIG_CPU_FREQ
560/**
561 * opp_init_cpufreq_table() - create a cpufreq table for a device
562 * @dev: device for which we do this operation
563 * @table: Cpufreq table returned back to caller
564 *
565 * Generate a cpufreq table for a provided device- this assumes that the
566 * opp list is already initialized and ready for usage.
567 *
568 * This function allocates required memory for the cpufreq table. It is
569 * expected that the caller does the required maintenance such as freeing
570 * the table as required.
571 *
572 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
573 * if no memory available for the operation (table is not populated), returns 0
574 * if successful and table is populated.
575 *
576 * WARNING: It is important for the callers to ensure refreshing their copy of
577 * the table if any of the mentioned functions have been invoked in the interim.
578 *
579 * Locking: The internal device_opp and opp structures are RCU protected.
580 * To simplify the logic, we pretend we are updater and hold relevant mutex here
581 * Callers should ensure that this function is *NOT* called under RCU protection
582 * or in contexts where mutex locking cannot be used.
583 */
584int opp_init_cpufreq_table(struct device *dev,
585 struct cpufreq_frequency_table **table)
586{
587 struct device_opp *dev_opp;
588 struct opp *opp;
589 struct cpufreq_frequency_table *freq_table;
590 int i = 0;
591
592 /* Pretend as if I am an updater */
593 mutex_lock(&dev_opp_list_lock);
594
595 dev_opp = find_device_opp(dev);
596 if (IS_ERR(dev_opp)) {
597 int r = PTR_ERR(dev_opp);
598 mutex_unlock(&dev_opp_list_lock);
599 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
600 return r;
601 }
602
603 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
604 (opp_get_opp_count(dev) + 1), GFP_KERNEL);
605 if (!freq_table) {
606 mutex_unlock(&dev_opp_list_lock);
607 dev_warn(dev, "%s: Unable to allocate frequency table\n",
608 __func__);
609 return -ENOMEM;
610 }
611
612 list_for_each_entry(opp, &dev_opp->opp_list, node) {
613 if (opp->available) {
614 freq_table[i].index = i;
615 freq_table[i].frequency = opp->rate / 1000;
616 i++;
617 }
618 }
619 mutex_unlock(&dev_opp_list_lock);
620
621 freq_table[i].index = i;
622 freq_table[i].frequency = CPUFREQ_TABLE_END;
623
624 *table = &freq_table[0];
625
626 return 0;
627}
628#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c0bd03c83b9c..698dde742587 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -34,6 +34,7 @@ extern void device_pm_move_last(struct device *);
34 34
35static inline void device_pm_init(struct device *dev) 35static inline void device_pm_init(struct device *dev)
36{ 36{
37 spin_lock_init(&dev->power.lock);
37 pm_runtime_init(dev); 38 pm_runtime_init(dev);
38} 39}
39 40
@@ -59,6 +60,7 @@ static inline void device_pm_move_last(struct device *dev) {}
59 60
60extern int dpm_sysfs_add(struct device *); 61extern int dpm_sysfs_add(struct device *);
61extern void dpm_sysfs_remove(struct device *); 62extern void dpm_sysfs_remove(struct device *);
63extern void rpm_sysfs_remove(struct device *);
62 64
63#else /* CONFIG_PM */ 65#else /* CONFIG_PM */
64 66
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index b78c401ffa73..1dd8676d7f55 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -2,17 +2,55 @@
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM 2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 * 3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
5 * 6 *
6 * This file is released under the GPLv2. 7 * This file is released under the GPLv2.
7 */ 8 */
8 9
9#include <linux/sched.h> 10#include <linux/sched.h>
10#include <linux/pm_runtime.h> 11#include <linux/pm_runtime.h>
11#include <linux/jiffies.h> 12#include "power.h"
12 13
13static int __pm_runtime_resume(struct device *dev, bool from_wq); 14static int rpm_resume(struct device *dev, int rpmflags);
14static int __pm_request_idle(struct device *dev); 15static int rpm_suspend(struct device *dev, int rpmflags);
15static int __pm_request_resume(struct device *dev); 16
17/**
18 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
20 *
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
26 * correctly.
27 */
28void update_pm_runtime_accounting(struct device *dev)
29{
30 unsigned long now = jiffies;
31 int delta;
32
33 delta = now - dev->power.accounting_timestamp;
34
35 if (delta < 0)
36 delta = 0;
37
38 dev->power.accounting_timestamp = now;
39
40 if (dev->power.disable_depth > 0)
41 return;
42
43 if (dev->power.runtime_status == RPM_SUSPENDED)
44 dev->power.suspended_jiffies += delta;
45 else
46 dev->power.active_jiffies += delta;
47}
48
49static void __update_runtime_status(struct device *dev, enum rpm_status status)
50{
51 update_pm_runtime_accounting(dev);
52 dev->power.runtime_status = status;
53}
16 54
17/** 55/**
18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
@@ -40,62 +78,154 @@ static void pm_runtime_cancel_pending(struct device *dev)
40 dev->power.request = RPM_REQ_NONE; 78 dev->power.request = RPM_REQ_NONE;
41} 79}
42 80
43/** 81/*
44 * __pm_runtime_idle - Notify device bus type if the device can be suspended. 82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
45 * @dev: Device to notify the bus type about. 83 * @dev: Device to handle.
46 * 84 *
47 * This function must be called under dev->power.lock with interrupts disabled. 85 * Compute the autosuspend-delay expiration time based on the device's
86 * power.last_busy time. If the delay has already expired or is disabled
87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
89 *
90 * This function may be called either with or without dev->power.lock held.
91 * Either way it can be racy, since power.last_busy may be updated at any time.
48 */ 92 */
49static int __pm_runtime_idle(struct device *dev) 93unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
50 __releases(&dev->power.lock) __acquires(&dev->power.lock) 94{
95 int autosuspend_delay;
96 long elapsed;
97 unsigned long last_busy;
98 unsigned long expires = 0;
99
100 if (!dev->power.use_autosuspend)
101 goto out;
102
103 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
104 if (autosuspend_delay < 0)
105 goto out;
106
107 last_busy = ACCESS_ONCE(dev->power.last_busy);
108 elapsed = jiffies - last_busy;
109 if (elapsed < 0)
110 goto out; /* jiffies has wrapped around. */
111
112 /*
113 * If the autosuspend_delay is >= 1 second, align the timer by rounding
114 * up to the nearest second.
115 */
116 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
117 if (autosuspend_delay >= 1000)
118 expires = round_jiffies(expires);
119 expires += !expires;
120 if (elapsed >= expires - last_busy)
121 expires = 0; /* Already expired. */
122
123 out:
124 return expires;
125}
126EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
127
128/**
129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
130 * @dev: Device to test.
131 */
132static int rpm_check_suspend_allowed(struct device *dev)
51{ 133{
52 int retval = 0; 134 int retval = 0;
53 135
54 if (dev->power.runtime_error) 136 if (dev->power.runtime_error)
55 retval = -EINVAL; 137 retval = -EINVAL;
56 else if (dev->power.idle_notification)
57 retval = -EINPROGRESS;
58 else if (atomic_read(&dev->power.usage_count) > 0 138 else if (atomic_read(&dev->power.usage_count) > 0
59 || dev->power.disable_depth > 0 139 || dev->power.disable_depth > 0)
60 || dev->power.runtime_status != RPM_ACTIVE)
61 retval = -EAGAIN; 140 retval = -EAGAIN;
62 else if (!pm_children_suspended(dev)) 141 else if (!pm_children_suspended(dev))
63 retval = -EBUSY; 142 retval = -EBUSY;
143
144 /* Pending resume requests take precedence over suspends. */
145 else if ((dev->power.deferred_resume
146 && dev->power.status == RPM_SUSPENDING)
147 || (dev->power.request_pending
148 && dev->power.request == RPM_REQ_RESUME))
149 retval = -EAGAIN;
150 else if (dev->power.runtime_status == RPM_SUSPENDED)
151 retval = 1;
152
153 return retval;
154}
155
156/**
157 * rpm_idle - Notify device bus type if the device can be suspended.
158 * @dev: Device to notify the bus type about.
159 * @rpmflags: Flag bits.
160 *
161 * Check if the device's run-time PM status allows it to be suspended. If
162 * another idle notification has been started earlier, return immediately. If
163 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
164 * run the ->runtime_idle() callback directly.
165 *
166 * This function must be called under dev->power.lock with interrupts disabled.
167 */
168static int rpm_idle(struct device *dev, int rpmflags)
169{
170 int (*callback)(struct device *);
171 int retval;
172
173 retval = rpm_check_suspend_allowed(dev);
174 if (retval < 0)
175 ; /* Conditions are wrong. */
176
177 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
178 else if (dev->power.runtime_status != RPM_ACTIVE)
179 retval = -EAGAIN;
180
181 /*
182 * Any pending request other than an idle notification takes
183 * precedence over us, except that the timer may be running.
184 */
185 else if (dev->power.request_pending &&
186 dev->power.request > RPM_REQ_IDLE)
187 retval = -EAGAIN;
188
189 /* Act as though RPM_NOWAIT is always set. */
190 else if (dev->power.idle_notification)
191 retval = -EINPROGRESS;
64 if (retval) 192 if (retval)
65 goto out; 193 goto out;
66 194
67 if (dev->power.request_pending) { 195 /* Pending requests need to be canceled. */
68 /* 196 dev->power.request = RPM_REQ_NONE;
69 * If an idle notification request is pending, cancel it. Any 197
70 * other pending request takes precedence over us. 198 if (dev->power.no_callbacks) {
71 */ 199 /* Assume ->runtime_idle() callback would have suspended. */
72 if (dev->power.request == RPM_REQ_IDLE) { 200 retval = rpm_suspend(dev, rpmflags);
73 dev->power.request = RPM_REQ_NONE; 201 goto out;
74 } else if (dev->power.request != RPM_REQ_NONE) { 202 }
75 retval = -EAGAIN; 203
76 goto out; 204 /* Carry out an asynchronous or a synchronous idle notification. */
205 if (rpmflags & RPM_ASYNC) {
206 dev->power.request = RPM_REQ_IDLE;
207 if (!dev->power.request_pending) {
208 dev->power.request_pending = true;
209 queue_work(pm_wq, &dev->power.work);
77 } 210 }
211 goto out;
78 } 212 }
79 213
80 dev->power.idle_notification = true; 214 dev->power.idle_notification = true;
81 215
82 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) { 216 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle)
83 spin_unlock_irq(&dev->power.lock); 217 callback = dev->bus->pm->runtime_idle;
84 218 else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle)
85 dev->bus->pm->runtime_idle(dev); 219 callback = dev->type->pm->runtime_idle;
86 220 else if (dev->class && dev->class->pm)
87 spin_lock_irq(&dev->power.lock); 221 callback = dev->class->pm->runtime_idle;
88 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { 222 else
89 spin_unlock_irq(&dev->power.lock); 223 callback = NULL;
90
91 dev->type->pm->runtime_idle(dev);
92 224
93 spin_lock_irq(&dev->power.lock); 225 if (callback) {
94 } else if (dev->class && dev->class->pm
95 && dev->class->pm->runtime_idle) {
96 spin_unlock_irq(&dev->power.lock); 226 spin_unlock_irq(&dev->power.lock);
97 227
98 dev->class->pm->runtime_idle(dev); 228 callback(dev);
99 229
100 spin_lock_irq(&dev->power.lock); 230 spin_lock_irq(&dev->power.lock);
101 } 231 }
@@ -108,113 +238,99 @@ static int __pm_runtime_idle(struct device *dev)
108} 238}
109 239
110/** 240/**
111 * pm_runtime_idle - Notify device bus type if the device can be suspended. 241 * rpm_callback - Run a given runtime PM callback for a given device.
112 * @dev: Device to notify the bus type about. 242 * @cb: Runtime PM callback to run.
243 * @dev: Device to run the callback for.
113 */ 244 */
114int pm_runtime_idle(struct device *dev) 245static int rpm_callback(int (*cb)(struct device *), struct device *dev)
246 __releases(&dev->power.lock) __acquires(&dev->power.lock)
115{ 247{
116 int retval; 248 int retval;
117 249
118 spin_lock_irq(&dev->power.lock); 250 if (!cb)
119 retval = __pm_runtime_idle(dev); 251 return -ENOSYS;
120 spin_unlock_irq(&dev->power.lock);
121 252
122 return retval; 253 spin_unlock_irq(&dev->power.lock);
123}
124EXPORT_SYMBOL_GPL(pm_runtime_idle);
125
126
127/**
128 * update_pm_runtime_accounting - Update the time accounting of power states
129 * @dev: Device to update the accounting for
130 *
131 * In order to be able to have time accounting of the various power states
132 * (as used by programs such as PowerTOP to show the effectiveness of runtime
133 * PM), we need to track the time spent in each state.
134 * update_pm_runtime_accounting must be called each time before the
135 * runtime_status field is updated, to account the time in the old state
136 * correctly.
137 */
138void update_pm_runtime_accounting(struct device *dev)
139{
140 unsigned long now = jiffies;
141 int delta;
142
143 delta = now - dev->power.accounting_timestamp;
144
145 if (delta < 0)
146 delta = 0;
147 254
148 dev->power.accounting_timestamp = now; 255 retval = cb(dev);
149 256
150 if (dev->power.disable_depth > 0) 257 spin_lock_irq(&dev->power.lock);
151 return; 258 dev->power.runtime_error = retval;
152
153 if (dev->power.runtime_status == RPM_SUSPENDED)
154 dev->power.suspended_jiffies += delta;
155 else
156 dev->power.active_jiffies += delta;
157}
158 259
159static void __update_runtime_status(struct device *dev, enum rpm_status status) 260 return retval;
160{
161 update_pm_runtime_accounting(dev);
162 dev->power.runtime_status = status;
163} 261}
164 262
165/** 263/**
166 * __pm_runtime_suspend - Carry out run-time suspend of given device. 264 * rpm_suspend - Carry out run-time suspend of given device.
167 * @dev: Device to suspend. 265 * @dev: Device to suspend.
168 * @from_wq: If set, the function has been called via pm_wq. 266 * @rpmflags: Flag bits.
169 * 267 *
170 * Check if the device can be suspended and run the ->runtime_suspend() callback 268 * Check if the device's run-time PM status allows it to be suspended. If
171 * provided by its bus type. If another suspend has been started earlier, wait 269 * another suspend has been started earlier, either return immediately or wait
172 * for it to finish. If an idle notification or suspend request is pending or 270 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
173 * scheduled, cancel it. 271 * pending idle notification. If the RPM_ASYNC flag is set then queue a
272 * suspend request; otherwise run the ->runtime_suspend() callback directly.
273 * If a deferred resume was requested while the callback was running then carry
274 * it out; otherwise send an idle notification for the device (if the suspend
275 * failed) or for its parent (if the suspend succeeded).
174 * 276 *
175 * This function must be called under dev->power.lock with interrupts disabled. 277 * This function must be called under dev->power.lock with interrupts disabled.
176 */ 278 */
177int __pm_runtime_suspend(struct device *dev, bool from_wq) 279static int rpm_suspend(struct device *dev, int rpmflags)
178 __releases(&dev->power.lock) __acquires(&dev->power.lock) 280 __releases(&dev->power.lock) __acquires(&dev->power.lock)
179{ 281{
282 int (*callback)(struct device *);
180 struct device *parent = NULL; 283 struct device *parent = NULL;
181 bool notify = false; 284 int retval;
182 int retval = 0;
183 285
184 dev_dbg(dev, "__pm_runtime_suspend()%s!\n", 286 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
185 from_wq ? " from workqueue" : "");
186 287
187 repeat: 288 repeat:
188 if (dev->power.runtime_error) { 289 retval = rpm_check_suspend_allowed(dev);
189 retval = -EINVAL;
190 goto out;
191 }
192 290
193 /* Pending resume requests take precedence over us. */ 291 if (retval < 0)
194 if (dev->power.request_pending 292 ; /* Conditions are wrong. */
195 && dev->power.request == RPM_REQ_RESUME) { 293
294 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
295 else if (dev->power.runtime_status == RPM_RESUMING &&
296 !(rpmflags & RPM_ASYNC))
196 retval = -EAGAIN; 297 retval = -EAGAIN;
298 if (retval)
197 goto out; 299 goto out;
300
301 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
302 if ((rpmflags & RPM_AUTO)
303 && dev->power.runtime_status != RPM_SUSPENDING) {
304 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
305
306 if (expires != 0) {
307 /* Pending requests need to be canceled. */
308 dev->power.request = RPM_REQ_NONE;
309
310 /*
311 * Optimization: If the timer is already running and is
312 * set to expire at or before the autosuspend delay,
313 * avoid the overhead of resetting it. Just let it
314 * expire; pm_suspend_timer_fn() will take care of the
315 * rest.
316 */
317 if (!(dev->power.timer_expires && time_before_eq(
318 dev->power.timer_expires, expires))) {
319 dev->power.timer_expires = expires;
320 mod_timer(&dev->power.suspend_timer, expires);
321 }
322 dev->power.timer_autosuspends = 1;
323 goto out;
324 }
198 } 325 }
199 326
200 /* Other scheduled or pending requests need to be canceled. */ 327 /* Other scheduled or pending requests need to be canceled. */
201 pm_runtime_cancel_pending(dev); 328 pm_runtime_cancel_pending(dev);
202 329
203 if (dev->power.runtime_status == RPM_SUSPENDED)
204 retval = 1;
205 else if (dev->power.runtime_status == RPM_RESUMING
206 || dev->power.disable_depth > 0
207 || atomic_read(&dev->power.usage_count) > 0)
208 retval = -EAGAIN;
209 else if (!pm_children_suspended(dev))
210 retval = -EBUSY;
211 if (retval)
212 goto out;
213
214 if (dev->power.runtime_status == RPM_SUSPENDING) { 330 if (dev->power.runtime_status == RPM_SUSPENDING) {
215 DEFINE_WAIT(wait); 331 DEFINE_WAIT(wait);
216 332
217 if (from_wq) { 333 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
218 retval = -EINPROGRESS; 334 retval = -EINPROGRESS;
219 goto out; 335 goto out;
220 } 336 }
@@ -236,46 +352,42 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
236 goto repeat; 352 goto repeat;
237 } 353 }
238 354
239 __update_runtime_status(dev, RPM_SUSPENDING);
240 dev->power.deferred_resume = false; 355 dev->power.deferred_resume = false;
356 if (dev->power.no_callbacks)
357 goto no_callback; /* Assume success. */
358
359 /* Carry out an asynchronous or a synchronous suspend. */
360 if (rpmflags & RPM_ASYNC) {
361 dev->power.request = (rpmflags & RPM_AUTO) ?
362 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
363 if (!dev->power.request_pending) {
364 dev->power.request_pending = true;
365 queue_work(pm_wq, &dev->power.work);
366 }
367 goto out;
368 }
241 369
242 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { 370 __update_runtime_status(dev, RPM_SUSPENDING);
243 spin_unlock_irq(&dev->power.lock);
244
245 retval = dev->bus->pm->runtime_suspend(dev);
246
247 spin_lock_irq(&dev->power.lock);
248 dev->power.runtime_error = retval;
249 } else if (dev->type && dev->type->pm
250 && dev->type->pm->runtime_suspend) {
251 spin_unlock_irq(&dev->power.lock);
252
253 retval = dev->type->pm->runtime_suspend(dev);
254
255 spin_lock_irq(&dev->power.lock);
256 dev->power.runtime_error = retval;
257 } else if (dev->class && dev->class->pm
258 && dev->class->pm->runtime_suspend) {
259 spin_unlock_irq(&dev->power.lock);
260
261 retval = dev->class->pm->runtime_suspend(dev);
262 371
263 spin_lock_irq(&dev->power.lock); 372 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
264 dev->power.runtime_error = retval; 373 callback = dev->bus->pm->runtime_suspend;
265 } else { 374 else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend)
266 retval = -ENOSYS; 375 callback = dev->type->pm->runtime_suspend;
267 } 376 else if (dev->class && dev->class->pm)
377 callback = dev->class->pm->runtime_suspend;
378 else
379 callback = NULL;
268 380
381 retval = rpm_callback(callback, dev);
269 if (retval) { 382 if (retval) {
270 __update_runtime_status(dev, RPM_ACTIVE); 383 __update_runtime_status(dev, RPM_ACTIVE);
271 if (retval == -EAGAIN || retval == -EBUSY) { 384 dev->power.deferred_resume = 0;
272 if (dev->power.timer_expires == 0) 385 if (retval == -EAGAIN || retval == -EBUSY)
273 notify = true;
274 dev->power.runtime_error = 0; 386 dev->power.runtime_error = 0;
275 } else { 387 else
276 pm_runtime_cancel_pending(dev); 388 pm_runtime_cancel_pending(dev);
277 }
278 } else { 389 } else {
390 no_callback:
279 __update_runtime_status(dev, RPM_SUSPENDED); 391 __update_runtime_status(dev, RPM_SUSPENDED);
280 pm_runtime_deactivate_timer(dev); 392 pm_runtime_deactivate_timer(dev);
281 393
@@ -287,14 +399,11 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
287 wake_up_all(&dev->power.wait_queue); 399 wake_up_all(&dev->power.wait_queue);
288 400
289 if (dev->power.deferred_resume) { 401 if (dev->power.deferred_resume) {
290 __pm_runtime_resume(dev, false); 402 rpm_resume(dev, 0);
291 retval = -EAGAIN; 403 retval = -EAGAIN;
292 goto out; 404 goto out;
293 } 405 }
294 406
295 if (notify)
296 __pm_runtime_idle(dev);
297
298 if (parent && !parent->power.ignore_children) { 407 if (parent && !parent->power.ignore_children) {
299 spin_unlock_irq(&dev->power.lock); 408 spin_unlock_irq(&dev->power.lock);
300 409
@@ -304,72 +413,69 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
304 } 413 }
305 414
306 out: 415 out:
307 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); 416 dev_dbg(dev, "%s returns %d\n", __func__, retval);
308
309 return retval;
310}
311
312/**
313 * pm_runtime_suspend - Carry out run-time suspend of given device.
314 * @dev: Device to suspend.
315 */
316int pm_runtime_suspend(struct device *dev)
317{
318 int retval;
319
320 spin_lock_irq(&dev->power.lock);
321 retval = __pm_runtime_suspend(dev, false);
322 spin_unlock_irq(&dev->power.lock);
323 417
324 return retval; 418 return retval;
325} 419}
326EXPORT_SYMBOL_GPL(pm_runtime_suspend);
327 420
328/** 421/**
329 * __pm_runtime_resume - Carry out run-time resume of given device. 422 * rpm_resume - Carry out run-time resume of given device.
330 * @dev: Device to resume. 423 * @dev: Device to resume.
331 * @from_wq: If set, the function has been called via pm_wq. 424 * @rpmflags: Flag bits.
332 * 425 *
333 * Check if the device can be woken up and run the ->runtime_resume() callback 426 * Check if the device's run-time PM status allows it to be resumed. Cancel
334 * provided by its bus type. If another resume has been started earlier, wait 427 * any scheduled or pending requests. If another resume has been started
335 * for it to finish. If there's a suspend running in parallel with this 428 * earlier, either return imediately or wait for it to finish, depending on the
336 * function, wait for it to finish and resume the device. Cancel any scheduled 429 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
337 * or pending requests. 430 * parallel with this function, either tell the other process to resume after
431 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
432 * flag is set then queue a resume request; otherwise run the
433 * ->runtime_resume() callback directly. Queue an idle notification for the
434 * device if the resume succeeded.
338 * 435 *
339 * This function must be called under dev->power.lock with interrupts disabled. 436 * This function must be called under dev->power.lock with interrupts disabled.
340 */ 437 */
341int __pm_runtime_resume(struct device *dev, bool from_wq) 438static int rpm_resume(struct device *dev, int rpmflags)
342 __releases(&dev->power.lock) __acquires(&dev->power.lock) 439 __releases(&dev->power.lock) __acquires(&dev->power.lock)
343{ 440{
441 int (*callback)(struct device *);
344 struct device *parent = NULL; 442 struct device *parent = NULL;
345 int retval = 0; 443 int retval = 0;
346 444
347 dev_dbg(dev, "__pm_runtime_resume()%s!\n", 445 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
348 from_wq ? " from workqueue" : "");
349 446
350 repeat: 447 repeat:
351 if (dev->power.runtime_error) { 448 if (dev->power.runtime_error)
352 retval = -EINVAL; 449 retval = -EINVAL;
450 else if (dev->power.disable_depth > 0)
451 retval = -EAGAIN;
452 if (retval)
353 goto out; 453 goto out;
354 }
355 454
356 pm_runtime_cancel_pending(dev); 455 /*
456 * Other scheduled or pending requests need to be canceled. Small
457 * optimization: If an autosuspend timer is running, leave it running
458 * rather than cancelling it now only to restart it again in the near
459 * future.
460 */
461 dev->power.request = RPM_REQ_NONE;
462 if (!dev->power.timer_autosuspends)
463 pm_runtime_deactivate_timer(dev);
357 464
358 if (dev->power.runtime_status == RPM_ACTIVE) 465 if (dev->power.runtime_status == RPM_ACTIVE) {
359 retval = 1; 466 retval = 1;
360 else if (dev->power.disable_depth > 0)
361 retval = -EAGAIN;
362 if (retval)
363 goto out; 467 goto out;
468 }
364 469
365 if (dev->power.runtime_status == RPM_RESUMING 470 if (dev->power.runtime_status == RPM_RESUMING
366 || dev->power.runtime_status == RPM_SUSPENDING) { 471 || dev->power.runtime_status == RPM_SUSPENDING) {
367 DEFINE_WAIT(wait); 472 DEFINE_WAIT(wait);
368 473
369 if (from_wq) { 474 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
370 if (dev->power.runtime_status == RPM_SUSPENDING) 475 if (dev->power.runtime_status == RPM_SUSPENDING)
371 dev->power.deferred_resume = true; 476 dev->power.deferred_resume = true;
372 retval = -EINPROGRESS; 477 else
478 retval = -EINPROGRESS;
373 goto out; 479 goto out;
374 } 480 }
375 481
@@ -391,6 +497,34 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
391 goto repeat; 497 goto repeat;
392 } 498 }
393 499
500 /*
501 * See if we can skip waking up the parent. This is safe only if
502 * power.no_callbacks is set, because otherwise we don't know whether
503 * the resume will actually succeed.
504 */
505 if (dev->power.no_callbacks && !parent && dev->parent) {
506 spin_lock(&dev->parent->power.lock);
507 if (dev->parent->power.disable_depth > 0
508 || dev->parent->power.ignore_children
509 || dev->parent->power.runtime_status == RPM_ACTIVE) {
510 atomic_inc(&dev->parent->power.child_count);
511 spin_unlock(&dev->parent->power.lock);
512 goto no_callback; /* Assume success. */
513 }
514 spin_unlock(&dev->parent->power.lock);
515 }
516
517 /* Carry out an asynchronous or a synchronous resume. */
518 if (rpmflags & RPM_ASYNC) {
519 dev->power.request = RPM_REQ_RESUME;
520 if (!dev->power.request_pending) {
521 dev->power.request_pending = true;
522 queue_work(pm_wq, &dev->power.work);
523 }
524 retval = 0;
525 goto out;
526 }
527
394 if (!parent && dev->parent) { 528 if (!parent && dev->parent) {
395 /* 529 /*
396 * Increment the parent's resume counter and resume it if 530 * Increment the parent's resume counter and resume it if
@@ -408,7 +542,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
408 */ 542 */
409 if (!parent->power.disable_depth 543 if (!parent->power.disable_depth
410 && !parent->power.ignore_children) { 544 && !parent->power.ignore_children) {
411 __pm_runtime_resume(parent, false); 545 rpm_resume(parent, 0);
412 if (parent->power.runtime_status != RPM_ACTIVE) 546 if (parent->power.runtime_status != RPM_ACTIVE)
413 retval = -EBUSY; 547 retval = -EBUSY;
414 } 548 }
@@ -420,39 +554,26 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
420 goto repeat; 554 goto repeat;
421 } 555 }
422 556
423 __update_runtime_status(dev, RPM_RESUMING); 557 if (dev->power.no_callbacks)
424 558 goto no_callback; /* Assume success. */
425 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
426 spin_unlock_irq(&dev->power.lock);
427
428 retval = dev->bus->pm->runtime_resume(dev);
429
430 spin_lock_irq(&dev->power.lock);
431 dev->power.runtime_error = retval;
432 } else if (dev->type && dev->type->pm
433 && dev->type->pm->runtime_resume) {
434 spin_unlock_irq(&dev->power.lock);
435
436 retval = dev->type->pm->runtime_resume(dev);
437 559
438 spin_lock_irq(&dev->power.lock); 560 __update_runtime_status(dev, RPM_RESUMING);
439 dev->power.runtime_error = retval;
440 } else if (dev->class && dev->class->pm
441 && dev->class->pm->runtime_resume) {
442 spin_unlock_irq(&dev->power.lock);
443
444 retval = dev->class->pm->runtime_resume(dev);
445 561
446 spin_lock_irq(&dev->power.lock); 562 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
447 dev->power.runtime_error = retval; 563 callback = dev->bus->pm->runtime_resume;
448 } else { 564 else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume)
449 retval = -ENOSYS; 565 callback = dev->type->pm->runtime_resume;
450 } 566 else if (dev->class && dev->class->pm)
567 callback = dev->class->pm->runtime_resume;
568 else
569 callback = NULL;
451 570
571 retval = rpm_callback(callback, dev);
452 if (retval) { 572 if (retval) {
453 __update_runtime_status(dev, RPM_SUSPENDED); 573 __update_runtime_status(dev, RPM_SUSPENDED);
454 pm_runtime_cancel_pending(dev); 574 pm_runtime_cancel_pending(dev);
455 } else { 575 } else {
576 no_callback:
456 __update_runtime_status(dev, RPM_ACTIVE); 577 __update_runtime_status(dev, RPM_ACTIVE);
457 if (parent) 578 if (parent)
458 atomic_inc(&parent->power.child_count); 579 atomic_inc(&parent->power.child_count);
@@ -460,7 +581,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
460 wake_up_all(&dev->power.wait_queue); 581 wake_up_all(&dev->power.wait_queue);
461 582
462 if (!retval) 583 if (!retval)
463 __pm_request_idle(dev); 584 rpm_idle(dev, RPM_ASYNC);
464 585
465 out: 586 out:
466 if (parent) { 587 if (parent) {
@@ -471,28 +592,12 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
471 spin_lock_irq(&dev->power.lock); 592 spin_lock_irq(&dev->power.lock);
472 } 593 }
473 594
474 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); 595 dev_dbg(dev, "%s returns %d\n", __func__, retval);
475 596
476 return retval; 597 return retval;
477} 598}
478 599
479/** 600/**
480 * pm_runtime_resume - Carry out run-time resume of given device.
481 * @dev: Device to suspend.
482 */
483int pm_runtime_resume(struct device *dev)
484{
485 int retval;
486
487 spin_lock_irq(&dev->power.lock);
488 retval = __pm_runtime_resume(dev, false);
489 spin_unlock_irq(&dev->power.lock);
490
491 return retval;
492}
493EXPORT_SYMBOL_GPL(pm_runtime_resume);
494
495/**
496 * pm_runtime_work - Universal run-time PM work function. 601 * pm_runtime_work - Universal run-time PM work function.
497 * @work: Work structure used for scheduling the execution of this function. 602 * @work: Work structure used for scheduling the execution of this function.
498 * 603 *
@@ -517,13 +622,16 @@ static void pm_runtime_work(struct work_struct *work)
517 case RPM_REQ_NONE: 622 case RPM_REQ_NONE:
518 break; 623 break;
519 case RPM_REQ_IDLE: 624 case RPM_REQ_IDLE:
520 __pm_runtime_idle(dev); 625 rpm_idle(dev, RPM_NOWAIT);
521 break; 626 break;
522 case RPM_REQ_SUSPEND: 627 case RPM_REQ_SUSPEND:
523 __pm_runtime_suspend(dev, true); 628 rpm_suspend(dev, RPM_NOWAIT);
629 break;
630 case RPM_REQ_AUTOSUSPEND:
631 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
524 break; 632 break;
525 case RPM_REQ_RESUME: 633 case RPM_REQ_RESUME:
526 __pm_runtime_resume(dev, true); 634 rpm_resume(dev, RPM_NOWAIT);
527 break; 635 break;
528 } 636 }
529 637
@@ -532,117 +640,10 @@ static void pm_runtime_work(struct work_struct *work)
532} 640}
533 641
534/** 642/**
535 * __pm_request_idle - Submit an idle notification request for given device.
536 * @dev: Device to handle.
537 *
538 * Check if the device's run-time PM status is correct for suspending the device
539 * and queue up a request to run __pm_runtime_idle() for it.
540 *
541 * This function must be called under dev->power.lock with interrupts disabled.
542 */
543static int __pm_request_idle(struct device *dev)
544{
545 int retval = 0;
546
547 if (dev->power.runtime_error)
548 retval = -EINVAL;
549 else if (atomic_read(&dev->power.usage_count) > 0
550 || dev->power.disable_depth > 0
551 || dev->power.runtime_status == RPM_SUSPENDED
552 || dev->power.runtime_status == RPM_SUSPENDING)
553 retval = -EAGAIN;
554 else if (!pm_children_suspended(dev))
555 retval = -EBUSY;
556 if (retval)
557 return retval;
558
559 if (dev->power.request_pending) {
560 /* Any requests other then RPM_REQ_IDLE take precedence. */
561 if (dev->power.request == RPM_REQ_NONE)
562 dev->power.request = RPM_REQ_IDLE;
563 else if (dev->power.request != RPM_REQ_IDLE)
564 retval = -EAGAIN;
565 return retval;
566 }
567
568 dev->power.request = RPM_REQ_IDLE;
569 dev->power.request_pending = true;
570 queue_work(pm_wq, &dev->power.work);
571
572 return retval;
573}
574
575/**
576 * pm_request_idle - Submit an idle notification request for given device.
577 * @dev: Device to handle.
578 */
579int pm_request_idle(struct device *dev)
580{
581 unsigned long flags;
582 int retval;
583
584 spin_lock_irqsave(&dev->power.lock, flags);
585 retval = __pm_request_idle(dev);
586 spin_unlock_irqrestore(&dev->power.lock, flags);
587
588 return retval;
589}
590EXPORT_SYMBOL_GPL(pm_request_idle);
591
592/**
593 * __pm_request_suspend - Submit a suspend request for given device.
594 * @dev: Device to suspend.
595 *
596 * This function must be called under dev->power.lock with interrupts disabled.
597 */
598static int __pm_request_suspend(struct device *dev)
599{
600 int retval = 0;
601
602 if (dev->power.runtime_error)
603 return -EINVAL;
604
605 if (dev->power.runtime_status == RPM_SUSPENDED)
606 retval = 1;
607 else if (atomic_read(&dev->power.usage_count) > 0
608 || dev->power.disable_depth > 0)
609 retval = -EAGAIN;
610 else if (dev->power.runtime_status == RPM_SUSPENDING)
611 retval = -EINPROGRESS;
612 else if (!pm_children_suspended(dev))
613 retval = -EBUSY;
614 if (retval < 0)
615 return retval;
616
617 pm_runtime_deactivate_timer(dev);
618
619 if (dev->power.request_pending) {
620 /*
621 * Pending resume requests take precedence over us, but we can
622 * overtake any other pending request.
623 */
624 if (dev->power.request == RPM_REQ_RESUME)
625 retval = -EAGAIN;
626 else if (dev->power.request != RPM_REQ_SUSPEND)
627 dev->power.request = retval ?
628 RPM_REQ_NONE : RPM_REQ_SUSPEND;
629 return retval;
630 } else if (retval) {
631 return retval;
632 }
633
634 dev->power.request = RPM_REQ_SUSPEND;
635 dev->power.request_pending = true;
636 queue_work(pm_wq, &dev->power.work);
637
638 return 0;
639}
640
641/**
642 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 643 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
643 * @data: Device pointer passed by pm_schedule_suspend(). 644 * @data: Device pointer passed by pm_schedule_suspend().
644 * 645 *
645 * Check if the time is right and execute __pm_request_suspend() in that case. 646 * Check if the time is right and queue a suspend request.
646 */ 647 */
647static void pm_suspend_timer_fn(unsigned long data) 648static void pm_suspend_timer_fn(unsigned long data)
648{ 649{
@@ -656,7 +657,8 @@ static void pm_suspend_timer_fn(unsigned long data)
656 /* If 'expire' is after 'jiffies' we've been called too early. */ 657 /* If 'expire' is after 'jiffies' we've been called too early. */
657 if (expires > 0 && !time_after(expires, jiffies)) { 658 if (expires > 0 && !time_after(expires, jiffies)) {
658 dev->power.timer_expires = 0; 659 dev->power.timer_expires = 0;
659 __pm_request_suspend(dev); 660 rpm_suspend(dev, dev->power.timer_autosuspends ?
661 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
660 } 662 }
661 663
662 spin_unlock_irqrestore(&dev->power.lock, flags); 664 spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -670,47 +672,25 @@ static void pm_suspend_timer_fn(unsigned long data)
670int pm_schedule_suspend(struct device *dev, unsigned int delay) 672int pm_schedule_suspend(struct device *dev, unsigned int delay)
671{ 673{
672 unsigned long flags; 674 unsigned long flags;
673 int retval = 0; 675 int retval;
674 676
675 spin_lock_irqsave(&dev->power.lock, flags); 677 spin_lock_irqsave(&dev->power.lock, flags);
676 678
677 if (dev->power.runtime_error) {
678 retval = -EINVAL;
679 goto out;
680 }
681
682 if (!delay) { 679 if (!delay) {
683 retval = __pm_request_suspend(dev); 680 retval = rpm_suspend(dev, RPM_ASYNC);
684 goto out; 681 goto out;
685 } 682 }
686 683
687 pm_runtime_deactivate_timer(dev); 684 retval = rpm_check_suspend_allowed(dev);
688
689 if (dev->power.request_pending) {
690 /*
691 * Pending resume requests take precedence over us, but any
692 * other pending requests have to be canceled.
693 */
694 if (dev->power.request == RPM_REQ_RESUME) {
695 retval = -EAGAIN;
696 goto out;
697 }
698 dev->power.request = RPM_REQ_NONE;
699 }
700
701 if (dev->power.runtime_status == RPM_SUSPENDED)
702 retval = 1;
703 else if (atomic_read(&dev->power.usage_count) > 0
704 || dev->power.disable_depth > 0)
705 retval = -EAGAIN;
706 else if (!pm_children_suspended(dev))
707 retval = -EBUSY;
708 if (retval) 685 if (retval)
709 goto out; 686 goto out;
710 687
688 /* Other scheduled or pending requests need to be canceled. */
689 pm_runtime_cancel_pending(dev);
690
711 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 691 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
712 if (!dev->power.timer_expires) 692 dev->power.timer_expires += !dev->power.timer_expires;
713 dev->power.timer_expires = 1; 693 dev->power.timer_autosuspends = 0;
714 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 694 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
715 695
716 out: 696 out:
@@ -721,103 +701,88 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
721EXPORT_SYMBOL_GPL(pm_schedule_suspend); 701EXPORT_SYMBOL_GPL(pm_schedule_suspend);
722 702
723/** 703/**
724 * pm_request_resume - Submit a resume request for given device. 704 * __pm_runtime_idle - Entry point for run-time idle operations.
725 * @dev: Device to resume. 705 * @dev: Device to send idle notification for.
706 * @rpmflags: Flag bits.
726 * 707 *
727 * This function must be called under dev->power.lock with interrupts disabled. 708 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
709 * return immediately if it is larger than zero. Then carry out an idle
710 * notification, either synchronous or asynchronous.
711 *
712 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
728 */ 713 */
729static int __pm_request_resume(struct device *dev) 714int __pm_runtime_idle(struct device *dev, int rpmflags)
730{ 715{
731 int retval = 0; 716 unsigned long flags;
732 717 int retval;
733 if (dev->power.runtime_error)
734 return -EINVAL;
735
736 if (dev->power.runtime_status == RPM_ACTIVE)
737 retval = 1;
738 else if (dev->power.runtime_status == RPM_RESUMING)
739 retval = -EINPROGRESS;
740 else if (dev->power.disable_depth > 0)
741 retval = -EAGAIN;
742 if (retval < 0)
743 return retval;
744
745 pm_runtime_deactivate_timer(dev);
746 718
747 if (dev->power.runtime_status == RPM_SUSPENDING) { 719 if (rpmflags & RPM_GET_PUT) {
748 dev->power.deferred_resume = true; 720 if (!atomic_dec_and_test(&dev->power.usage_count))
749 return retval; 721 return 0;
750 } 722 }
751 if (dev->power.request_pending) {
752 /* If non-resume request is pending, we can overtake it. */
753 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
754 return retval;
755 }
756 if (retval)
757 return retval;
758 723
759 dev->power.request = RPM_REQ_RESUME; 724 spin_lock_irqsave(&dev->power.lock, flags);
760 dev->power.request_pending = true; 725 retval = rpm_idle(dev, rpmflags);
761 queue_work(pm_wq, &dev->power.work); 726 spin_unlock_irqrestore(&dev->power.lock, flags);
762 727
763 return retval; 728 return retval;
764} 729}
730EXPORT_SYMBOL_GPL(__pm_runtime_idle);
765 731
766/** 732/**
767 * pm_request_resume - Submit a resume request for given device. 733 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
768 * @dev: Device to resume. 734 * @dev: Device to suspend.
735 * @rpmflags: Flag bits.
736 *
737 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
738 * return immediately if it is larger than zero. Then carry out a suspend,
739 * either synchronous or asynchronous.
740 *
741 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
769 */ 742 */
770int pm_request_resume(struct device *dev) 743int __pm_runtime_suspend(struct device *dev, int rpmflags)
771{ 744{
772 unsigned long flags; 745 unsigned long flags;
773 int retval; 746 int retval;
774 747
748 if (rpmflags & RPM_GET_PUT) {
749 if (!atomic_dec_and_test(&dev->power.usage_count))
750 return 0;
751 }
752
775 spin_lock_irqsave(&dev->power.lock, flags); 753 spin_lock_irqsave(&dev->power.lock, flags);
776 retval = __pm_request_resume(dev); 754 retval = rpm_suspend(dev, rpmflags);
777 spin_unlock_irqrestore(&dev->power.lock, flags); 755 spin_unlock_irqrestore(&dev->power.lock, flags);
778 756
779 return retval; 757 return retval;
780} 758}
781EXPORT_SYMBOL_GPL(pm_request_resume); 759EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
782 760
783/** 761/**
784 * __pm_runtime_get - Reference count a device and wake it up, if necessary. 762 * __pm_runtime_resume - Entry point for run-time resume operations.
785 * @dev: Device to handle. 763 * @dev: Device to resume.
786 * @sync: If set and the device is suspended, resume it synchronously. 764 * @rpmflags: Flag bits.
765 *
766 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
767 * carry out a resume, either synchronous or asynchronous.
787 * 768 *
788 * Increment the usage count of the device and resume it or submit a resume 769 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
789 * request for it, depending on the value of @sync.
790 */ 770 */
791int __pm_runtime_get(struct device *dev, bool sync) 771int __pm_runtime_resume(struct device *dev, int rpmflags)
792{ 772{
773 unsigned long flags;
793 int retval; 774 int retval;
794 775
795 atomic_inc(&dev->power.usage_count); 776 if (rpmflags & RPM_GET_PUT)
796 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); 777 atomic_inc(&dev->power.usage_count);
797 778
798 return retval; 779 spin_lock_irqsave(&dev->power.lock, flags);
799} 780 retval = rpm_resume(dev, rpmflags);
800EXPORT_SYMBOL_GPL(__pm_runtime_get); 781 spin_unlock_irqrestore(&dev->power.lock, flags);
801
802/**
803 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
804 * @dev: Device to handle.
805 * @sync: If the device's bus type is to be notified, do that synchronously.
806 *
807 * Decrement the usage count of the device and if it reaches zero, carry out a
808 * synchronous idle notification or submit an idle notification request for it,
809 * depending on the value of @sync.
810 */
811int __pm_runtime_put(struct device *dev, bool sync)
812{
813 int retval = 0;
814
815 if (atomic_dec_and_test(&dev->power.usage_count))
816 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
817 782
818 return retval; 783 return retval;
819} 784}
820EXPORT_SYMBOL_GPL(__pm_runtime_put); 785EXPORT_SYMBOL_GPL(__pm_runtime_resume);
821 786
822/** 787/**
823 * __pm_runtime_set_status - Set run-time PM status of a device. 788 * __pm_runtime_set_status - Set run-time PM status of a device.
@@ -968,7 +933,7 @@ int pm_runtime_barrier(struct device *dev)
968 933
969 if (dev->power.request_pending 934 if (dev->power.request_pending
970 && dev->power.request == RPM_REQ_RESUME) { 935 && dev->power.request == RPM_REQ_RESUME) {
971 __pm_runtime_resume(dev, false); 936 rpm_resume(dev, 0);
972 retval = 1; 937 retval = 1;
973 } 938 }
974 939
@@ -1017,7 +982,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
1017 */ 982 */
1018 pm_runtime_get_noresume(dev); 983 pm_runtime_get_noresume(dev);
1019 984
1020 __pm_runtime_resume(dev, false); 985 rpm_resume(dev, 0);
1021 986
1022 pm_runtime_put_noidle(dev); 987 pm_runtime_put_noidle(dev);
1023 } 988 }
@@ -1065,7 +1030,7 @@ void pm_runtime_forbid(struct device *dev)
1065 1030
1066 dev->power.runtime_auto = false; 1031 dev->power.runtime_auto = false;
1067 atomic_inc(&dev->power.usage_count); 1032 atomic_inc(&dev->power.usage_count);
1068 __pm_runtime_resume(dev, false); 1033 rpm_resume(dev, 0);
1069 1034
1070 out: 1035 out:
1071 spin_unlock_irq(&dev->power.lock); 1036 spin_unlock_irq(&dev->power.lock);
@@ -1086,7 +1051,7 @@ void pm_runtime_allow(struct device *dev)
1086 1051
1087 dev->power.runtime_auto = true; 1052 dev->power.runtime_auto = true;
1088 if (atomic_dec_and_test(&dev->power.usage_count)) 1053 if (atomic_dec_and_test(&dev->power.usage_count))
1089 __pm_runtime_idle(dev); 1054 rpm_idle(dev, RPM_AUTO);
1090 1055
1091 out: 1056 out:
1092 spin_unlock_irq(&dev->power.lock); 1057 spin_unlock_irq(&dev->power.lock);
@@ -1094,13 +1059,110 @@ void pm_runtime_allow(struct device *dev)
1094EXPORT_SYMBOL_GPL(pm_runtime_allow); 1059EXPORT_SYMBOL_GPL(pm_runtime_allow);
1095 1060
1096/** 1061/**
1062 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1063 * @dev: Device to handle.
1064 *
1065 * Set the power.no_callbacks flag, which tells the PM core that this
1066 * device is power-managed through its parent and has no run-time PM
1067 * callbacks of its own. The run-time sysfs attributes will be removed.
1068 *
1069 */
1070void pm_runtime_no_callbacks(struct device *dev)
1071{
1072 spin_lock_irq(&dev->power.lock);
1073 dev->power.no_callbacks = 1;
1074 spin_unlock_irq(&dev->power.lock);
1075 if (device_is_registered(dev))
1076 rpm_sysfs_remove(dev);
1077}
1078EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1079
1080/**
1081 * update_autosuspend - Handle a change to a device's autosuspend settings.
1082 * @dev: Device to handle.
1083 * @old_delay: The former autosuspend_delay value.
1084 * @old_use: The former use_autosuspend value.
1085 *
1086 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1087 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1088 *
1089 * This function must be called under dev->power.lock with interrupts disabled.
1090 */
1091static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1092{
1093 int delay = dev->power.autosuspend_delay;
1094
1095 /* Should runtime suspend be prevented now? */
1096 if (dev->power.use_autosuspend && delay < 0) {
1097
1098 /* If it used to be allowed then prevent it. */
1099 if (!old_use || old_delay >= 0) {
1100 atomic_inc(&dev->power.usage_count);
1101 rpm_resume(dev, 0);
1102 }
1103 }
1104
1105 /* Runtime suspend should be allowed now. */
1106 else {
1107
1108 /* If it used to be prevented then allow it. */
1109 if (old_use && old_delay < 0)
1110 atomic_dec(&dev->power.usage_count);
1111
1112 /* Maybe we can autosuspend now. */
1113 rpm_idle(dev, RPM_AUTO);
1114 }
1115}
1116
1117/**
1118 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1119 * @dev: Device to handle.
1120 * @delay: Value of the new delay in milliseconds.
1121 *
1122 * Set the device's power.autosuspend_delay value. If it changes to negative
1123 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
1124 * changes the other way, allow run-time suspends.
1125 */
1126void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1127{
1128 int old_delay, old_use;
1129
1130 spin_lock_irq(&dev->power.lock);
1131 old_delay = dev->power.autosuspend_delay;
1132 old_use = dev->power.use_autosuspend;
1133 dev->power.autosuspend_delay = delay;
1134 update_autosuspend(dev, old_delay, old_use);
1135 spin_unlock_irq(&dev->power.lock);
1136}
1137EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1138
1139/**
1140 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1141 * @dev: Device to handle.
1142 * @use: New value for use_autosuspend.
1143 *
1144 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1145 * suspends as needed.
1146 */
1147void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1148{
1149 int old_delay, old_use;
1150
1151 spin_lock_irq(&dev->power.lock);
1152 old_delay = dev->power.autosuspend_delay;
1153 old_use = dev->power.use_autosuspend;
1154 dev->power.use_autosuspend = use;
1155 update_autosuspend(dev, old_delay, old_use);
1156 spin_unlock_irq(&dev->power.lock);
1157}
1158EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1159
1160/**
1097 * pm_runtime_init - Initialize run-time PM fields in given device object. 1161 * pm_runtime_init - Initialize run-time PM fields in given device object.
1098 * @dev: Device object to initialize. 1162 * @dev: Device object to initialize.
1099 */ 1163 */
1100void pm_runtime_init(struct device *dev) 1164void pm_runtime_init(struct device *dev)
1101{ 1165{
1102 spin_lock_init(&dev->power.lock);
1103
1104 dev->power.runtime_status = RPM_SUSPENDED; 1166 dev->power.runtime_status = RPM_SUSPENDED;
1105 dev->power.idle_notification = false; 1167 dev->power.idle_notification = false;
1106 1168
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index e56b4388fe61..0b1e46bf3e56 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -75,12 +75,27 @@
75 * attribute is set to "enabled" by bus type code or device drivers and in 75 * attribute is set to "enabled" by bus type code or device drivers and in
76 * that cases it should be safe to leave the default value. 76 * that cases it should be safe to leave the default value.
77 * 77 *
78 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
79 *
80 * Some drivers don't want to carry out a runtime suspend as soon as a
81 * device becomes idle; they want it always to remain idle for some period
82 * of time before suspending it. This period is the autosuspend_delay
83 * value (expressed in milliseconds) and it can be controlled by the user.
84 * If the value is negative then the device will never be runtime
85 * suspended.
86 *
87 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
88 * value are used only if the driver calls pm_runtime_use_autosuspend().
89 *
78 * wakeup_count - Report the number of wakeup events related to the device 90 * wakeup_count - Report the number of wakeup events related to the device
79 */ 91 */
80 92
81static const char enabled[] = "enabled"; 93static const char enabled[] = "enabled";
82static const char disabled[] = "disabled"; 94static const char disabled[] = "disabled";
83 95
96const char power_group_name[] = "power";
97EXPORT_SYMBOL_GPL(power_group_name);
98
84#ifdef CONFIG_PM_RUNTIME 99#ifdef CONFIG_PM_RUNTIME
85static const char ctrl_auto[] = "auto"; 100static const char ctrl_auto[] = "auto";
86static const char ctrl_on[] = "on"; 101static const char ctrl_on[] = "on";
@@ -170,6 +185,33 @@ static ssize_t rtpm_status_show(struct device *dev,
170} 185}
171 186
172static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); 187static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
188
189static ssize_t autosuspend_delay_ms_show(struct device *dev,
190 struct device_attribute *attr, char *buf)
191{
192 if (!dev->power.use_autosuspend)
193 return -EIO;
194 return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
195}
196
197static ssize_t autosuspend_delay_ms_store(struct device *dev,
198 struct device_attribute *attr, const char *buf, size_t n)
199{
200 long delay;
201
202 if (!dev->power.use_autosuspend)
203 return -EIO;
204
205 if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
206 return -EINVAL;
207
208 pm_runtime_set_autosuspend_delay(dev, delay);
209 return n;
210}
211
212static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
213 autosuspend_delay_ms_store);
214
173#endif 215#endif
174 216
175static ssize_t 217static ssize_t
@@ -210,11 +252,122 @@ static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
210static ssize_t wakeup_count_show(struct device *dev, 252static ssize_t wakeup_count_show(struct device *dev,
211 struct device_attribute *attr, char *buf) 253 struct device_attribute *attr, char *buf)
212{ 254{
213 return sprintf(buf, "%lu\n", dev->power.wakeup_count); 255 unsigned long count = 0;
256 bool enabled = false;
257
258 spin_lock_irq(&dev->power.lock);
259 if (dev->power.wakeup) {
260 count = dev->power.wakeup->event_count;
261 enabled = true;
262 }
263 spin_unlock_irq(&dev->power.lock);
264 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
214} 265}
215 266
216static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); 267static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
217#endif 268
269static ssize_t wakeup_active_count_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
271{
272 unsigned long count = 0;
273 bool enabled = false;
274
275 spin_lock_irq(&dev->power.lock);
276 if (dev->power.wakeup) {
277 count = dev->power.wakeup->active_count;
278 enabled = true;
279 }
280 spin_unlock_irq(&dev->power.lock);
281 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
282}
283
284static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
285
286static ssize_t wakeup_hit_count_show(struct device *dev,
287 struct device_attribute *attr, char *buf)
288{
289 unsigned long count = 0;
290 bool enabled = false;
291
292 spin_lock_irq(&dev->power.lock);
293 if (dev->power.wakeup) {
294 count = dev->power.wakeup->hit_count;
295 enabled = true;
296 }
297 spin_unlock_irq(&dev->power.lock);
298 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
299}
300
301static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL);
302
303static ssize_t wakeup_active_show(struct device *dev,
304 struct device_attribute *attr, char *buf)
305{
306 unsigned int active = 0;
307 bool enabled = false;
308
309 spin_lock_irq(&dev->power.lock);
310 if (dev->power.wakeup) {
311 active = dev->power.wakeup->active;
312 enabled = true;
313 }
314 spin_unlock_irq(&dev->power.lock);
315 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
316}
317
318static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
319
320static ssize_t wakeup_total_time_show(struct device *dev,
321 struct device_attribute *attr, char *buf)
322{
323 s64 msec = 0;
324 bool enabled = false;
325
326 spin_lock_irq(&dev->power.lock);
327 if (dev->power.wakeup) {
328 msec = ktime_to_ms(dev->power.wakeup->total_time);
329 enabled = true;
330 }
331 spin_unlock_irq(&dev->power.lock);
332 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
333}
334
335static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
336
337static ssize_t wakeup_max_time_show(struct device *dev,
338 struct device_attribute *attr, char *buf)
339{
340 s64 msec = 0;
341 bool enabled = false;
342
343 spin_lock_irq(&dev->power.lock);
344 if (dev->power.wakeup) {
345 msec = ktime_to_ms(dev->power.wakeup->max_time);
346 enabled = true;
347 }
348 spin_unlock_irq(&dev->power.lock);
349 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
350}
351
352static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
353
354static ssize_t wakeup_last_time_show(struct device *dev,
355 struct device_attribute *attr, char *buf)
356{
357 s64 msec = 0;
358 bool enabled = false;
359
360 spin_lock_irq(&dev->power.lock);
361 if (dev->power.wakeup) {
362 msec = ktime_to_ms(dev->power.wakeup->last_time);
363 enabled = true;
364 }
365 spin_unlock_irq(&dev->power.lock);
366 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
367}
368
369static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
370#endif /* CONFIG_PM_SLEEP */
218 371
219#ifdef CONFIG_PM_ADVANCED_DEBUG 372#ifdef CONFIG_PM_ADVANCED_DEBUG
220#ifdef CONFIG_PM_RUNTIME 373#ifdef CONFIG_PM_RUNTIME
@@ -279,19 +432,20 @@ static DEVICE_ATTR(async, 0644, async_show, async_store);
279#endif /* CONFIG_PM_ADVANCED_DEBUG */ 432#endif /* CONFIG_PM_ADVANCED_DEBUG */
280 433
281static struct attribute * power_attrs[] = { 434static struct attribute * power_attrs[] = {
282#ifdef CONFIG_PM_RUNTIME
283 &dev_attr_control.attr,
284 &dev_attr_runtime_status.attr,
285 &dev_attr_runtime_suspended_time.attr,
286 &dev_attr_runtime_active_time.attr,
287#endif
288 &dev_attr_wakeup.attr, 435 &dev_attr_wakeup.attr,
289#ifdef CONFIG_PM_SLEEP 436#ifdef CONFIG_PM_SLEEP
290 &dev_attr_wakeup_count.attr, 437 &dev_attr_wakeup_count.attr,
438 &dev_attr_wakeup_active_count.attr,
439 &dev_attr_wakeup_hit_count.attr,
440 &dev_attr_wakeup_active.attr,
441 &dev_attr_wakeup_total_time_ms.attr,
442 &dev_attr_wakeup_max_time_ms.attr,
443 &dev_attr_wakeup_last_time_ms.attr,
291#endif 444#endif
292#ifdef CONFIG_PM_ADVANCED_DEBUG 445#ifdef CONFIG_PM_ADVANCED_DEBUG
293 &dev_attr_async.attr, 446 &dev_attr_async.attr,
294#ifdef CONFIG_PM_RUNTIME 447#ifdef CONFIG_PM_RUNTIME
448 &dev_attr_runtime_status.attr,
295 &dev_attr_runtime_usage.attr, 449 &dev_attr_runtime_usage.attr,
296 &dev_attr_runtime_active_kids.attr, 450 &dev_attr_runtime_active_kids.attr,
297 &dev_attr_runtime_enabled.attr, 451 &dev_attr_runtime_enabled.attr,
@@ -300,10 +454,53 @@ static struct attribute * power_attrs[] = {
300 NULL, 454 NULL,
301}; 455};
302static struct attribute_group pm_attr_group = { 456static struct attribute_group pm_attr_group = {
303 .name = "power", 457 .name = power_group_name,
304 .attrs = power_attrs, 458 .attrs = power_attrs,
305}; 459};
306 460
461#ifdef CONFIG_PM_RUNTIME
462
463static struct attribute *runtime_attrs[] = {
464#ifndef CONFIG_PM_ADVANCED_DEBUG
465 &dev_attr_runtime_status.attr,
466#endif
467 &dev_attr_control.attr,
468 &dev_attr_runtime_suspended_time.attr,
469 &dev_attr_runtime_active_time.attr,
470 &dev_attr_autosuspend_delay_ms.attr,
471 NULL,
472};
473static struct attribute_group pm_runtime_attr_group = {
474 .name = power_group_name,
475 .attrs = runtime_attrs,
476};
477
478int dpm_sysfs_add(struct device *dev)
479{
480 int rc;
481
482 rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
483 if (rc == 0 && !dev->power.no_callbacks) {
484 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
485 if (rc)
486 sysfs_remove_group(&dev->kobj, &pm_attr_group);
487 }
488 return rc;
489}
490
491void rpm_sysfs_remove(struct device *dev)
492{
493 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
494}
495
496void dpm_sysfs_remove(struct device *dev)
497{
498 rpm_sysfs_remove(dev);
499 sysfs_remove_group(&dev->kobj, &pm_attr_group);
500}
501
502#else /* CONFIG_PM_RUNTIME */
503
307int dpm_sysfs_add(struct device * dev) 504int dpm_sysfs_add(struct device * dev)
308{ 505{
309 return sysfs_create_group(&dev->kobj, &pm_attr_group); 506 return sysfs_create_group(&dev->kobj, &pm_attr_group);
@@ -313,3 +510,5 @@ void dpm_sysfs_remove(struct device * dev)
313{ 510{
314 sysfs_remove_group(&dev->kobj, &pm_attr_group); 511 sysfs_remove_group(&dev->kobj, &pm_attr_group);
315} 512}
513
514#endif
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 0a1a2c4dbc6e..9f4258df4cfd 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -188,8 +188,10 @@ static int show_file_hash(unsigned int value)
188static int show_dev_hash(unsigned int value) 188static int show_dev_hash(unsigned int value)
189{ 189{
190 int match = 0; 190 int match = 0;
191 struct list_head *entry = dpm_list.prev; 191 struct list_head *entry;
192 192
193 device_pm_lock();
194 entry = dpm_list.prev;
193 while (entry != &dpm_list) { 195 while (entry != &dpm_list) {
194 struct device * dev = to_device(entry); 196 struct device * dev = to_device(entry);
195 unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); 197 unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
@@ -199,11 +201,43 @@ static int show_dev_hash(unsigned int value)
199 } 201 }
200 entry = entry->prev; 202 entry = entry->prev;
201 } 203 }
204 device_pm_unlock();
202 return match; 205 return match;
203} 206}
204 207
205static unsigned int hash_value_early_read; 208static unsigned int hash_value_early_read;
206 209
210int show_trace_dev_match(char *buf, size_t size)
211{
212 unsigned int value = hash_value_early_read / (USERHASH * FILEHASH);
213 int ret = 0;
214 struct list_head *entry;
215
216 /*
217 * It's possible that multiple devices will match the hash and we can't
218 * tell which is the culprit, so it's best to output them all.
219 */
220 device_pm_lock();
221 entry = dpm_list.prev;
222 while (size && entry != &dpm_list) {
223 struct device *dev = to_device(entry);
224 unsigned int hash = hash_string(DEVSEED, dev_name(dev),
225 DEVHASH);
226 if (hash == value) {
227 int len = snprintf(buf, size, "%s\n",
228 dev_driver_string(dev));
229 if (len > size)
230 len = size;
231 buf += len;
232 ret += len;
233 size -= len;
234 }
235 entry = entry->prev;
236 }
237 device_pm_unlock();
238 return ret;
239}
240
207static int early_resume_init(void) 241static int early_resume_init(void)
208{ 242{
209 hash_value_early_read = read_magic_time(); 243 hash_value_early_read = read_magic_time();
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index eb594facfc3f..71c5528e1c35 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -11,7 +11,12 @@
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/capability.h> 12#include <linux/capability.h>
13#include <linux/suspend.h> 13#include <linux/suspend.h>
14#include <linux/pm.h> 14#include <linux/seq_file.h>
15#include <linux/debugfs.h>
16
17#include "power.h"
18
19#define TIMEOUT 100
15 20
16/* 21/*
17 * If set, the suspend/hibernate code will abort transitions to a sleep state 22 * If set, the suspend/hibernate code will abort transitions to a sleep state
@@ -20,18 +25,244 @@
20bool events_check_enabled; 25bool events_check_enabled;
21 26
22/* The counter of registered wakeup events. */ 27/* The counter of registered wakeup events. */
23static unsigned long event_count; 28static atomic_t event_count = ATOMIC_INIT(0);
24/* A preserved old value of event_count. */ 29/* A preserved old value of event_count. */
25static unsigned long saved_event_count; 30static unsigned int saved_count;
26/* The counter of wakeup events being processed. */ 31/* The counter of wakeup events being processed. */
27static unsigned long events_in_progress; 32static atomic_t events_in_progress = ATOMIC_INIT(0);
28 33
29static DEFINE_SPINLOCK(events_lock); 34static DEFINE_SPINLOCK(events_lock);
30 35
31static void pm_wakeup_timer_fn(unsigned long data); 36static void pm_wakeup_timer_fn(unsigned long data);
32 37
33static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); 38static LIST_HEAD(wakeup_sources);
34static unsigned long events_timer_expires; 39
40/**
41 * wakeup_source_create - Create a struct wakeup_source object.
42 * @name: Name of the new wakeup source.
43 */
44struct wakeup_source *wakeup_source_create(const char *name)
45{
46 struct wakeup_source *ws;
47
48 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
49 if (!ws)
50 return NULL;
51
52 spin_lock_init(&ws->lock);
53 if (name)
54 ws->name = kstrdup(name, GFP_KERNEL);
55
56 return ws;
57}
58EXPORT_SYMBOL_GPL(wakeup_source_create);
59
60/**
61 * wakeup_source_destroy - Destroy a struct wakeup_source object.
62 * @ws: Wakeup source to destroy.
63 */
64void wakeup_source_destroy(struct wakeup_source *ws)
65{
66 if (!ws)
67 return;
68
69 spin_lock_irq(&ws->lock);
70 while (ws->active) {
71 spin_unlock_irq(&ws->lock);
72
73 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
74
75 spin_lock_irq(&ws->lock);
76 }
77 spin_unlock_irq(&ws->lock);
78
79 kfree(ws->name);
80 kfree(ws);
81}
82EXPORT_SYMBOL_GPL(wakeup_source_destroy);
83
84/**
85 * wakeup_source_add - Add given object to the list of wakeup sources.
86 * @ws: Wakeup source object to add to the list.
87 */
88void wakeup_source_add(struct wakeup_source *ws)
89{
90 if (WARN_ON(!ws))
91 return;
92
93 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
94 ws->active = false;
95
96 spin_lock_irq(&events_lock);
97 list_add_rcu(&ws->entry, &wakeup_sources);
98 spin_unlock_irq(&events_lock);
99 synchronize_rcu();
100}
101EXPORT_SYMBOL_GPL(wakeup_source_add);
102
103/**
104 * wakeup_source_remove - Remove given object from the wakeup sources list.
105 * @ws: Wakeup source object to remove from the list.
106 */
107void wakeup_source_remove(struct wakeup_source *ws)
108{
109 if (WARN_ON(!ws))
110 return;
111
112 spin_lock_irq(&events_lock);
113 list_del_rcu(&ws->entry);
114 spin_unlock_irq(&events_lock);
115 synchronize_rcu();
116}
117EXPORT_SYMBOL_GPL(wakeup_source_remove);
118
119/**
120 * wakeup_source_register - Create wakeup source and add it to the list.
121 * @name: Name of the wakeup source to register.
122 */
123struct wakeup_source *wakeup_source_register(const char *name)
124{
125 struct wakeup_source *ws;
126
127 ws = wakeup_source_create(name);
128 if (ws)
129 wakeup_source_add(ws);
130
131 return ws;
132}
133EXPORT_SYMBOL_GPL(wakeup_source_register);
134
135/**
136 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
137 * @ws: Wakeup source object to unregister.
138 */
139void wakeup_source_unregister(struct wakeup_source *ws)
140{
141 wakeup_source_remove(ws);
142 wakeup_source_destroy(ws);
143}
144EXPORT_SYMBOL_GPL(wakeup_source_unregister);
145
146/**
147 * device_wakeup_attach - Attach a wakeup source object to a device object.
148 * @dev: Device to handle.
149 * @ws: Wakeup source object to attach to @dev.
150 *
151 * This causes @dev to be treated as a wakeup device.
152 */
153static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
154{
155 spin_lock_irq(&dev->power.lock);
156 if (dev->power.wakeup) {
157 spin_unlock_irq(&dev->power.lock);
158 return -EEXIST;
159 }
160 dev->power.wakeup = ws;
161 spin_unlock_irq(&dev->power.lock);
162 return 0;
163}
164
165/**
166 * device_wakeup_enable - Enable given device to be a wakeup source.
167 * @dev: Device to handle.
168 *
169 * Create a wakeup source object, register it and attach it to @dev.
170 */
171int device_wakeup_enable(struct device *dev)
172{
173 struct wakeup_source *ws;
174 int ret;
175
176 if (!dev || !dev->power.can_wakeup)
177 return -EINVAL;
178
179 ws = wakeup_source_register(dev_name(dev));
180 if (!ws)
181 return -ENOMEM;
182
183 ret = device_wakeup_attach(dev, ws);
184 if (ret)
185 wakeup_source_unregister(ws);
186
187 return ret;
188}
189EXPORT_SYMBOL_GPL(device_wakeup_enable);
190
191/**
192 * device_wakeup_detach - Detach a device's wakeup source object from it.
193 * @dev: Device to detach the wakeup source object from.
194 *
195 * After it returns, @dev will not be treated as a wakeup device any more.
196 */
197static struct wakeup_source *device_wakeup_detach(struct device *dev)
198{
199 struct wakeup_source *ws;
200
201 spin_lock_irq(&dev->power.lock);
202 ws = dev->power.wakeup;
203 dev->power.wakeup = NULL;
204 spin_unlock_irq(&dev->power.lock);
205 return ws;
206}
207
208/**
209 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
210 * @dev: Device to handle.
211 *
212 * Detach the @dev's wakeup source object from it, unregister this wakeup source
213 * object and destroy it.
214 */
215int device_wakeup_disable(struct device *dev)
216{
217 struct wakeup_source *ws;
218
219 if (!dev || !dev->power.can_wakeup)
220 return -EINVAL;
221
222 ws = device_wakeup_detach(dev);
223 if (ws)
224 wakeup_source_unregister(ws);
225
226 return 0;
227}
228EXPORT_SYMBOL_GPL(device_wakeup_disable);
229
230/**
231 * device_init_wakeup - Device wakeup initialization.
232 * @dev: Device to handle.
233 * @enable: Whether or not to enable @dev as a wakeup device.
234 *
235 * By default, most devices should leave wakeup disabled. The exceptions are
236 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
237 * possibly network interfaces, etc.
238 */
239int device_init_wakeup(struct device *dev, bool enable)
240{
241 int ret = 0;
242
243 if (enable) {
244 device_set_wakeup_capable(dev, true);
245 ret = device_wakeup_enable(dev);
246 } else {
247 device_set_wakeup_capable(dev, false);
248 }
249
250 return ret;
251}
252EXPORT_SYMBOL_GPL(device_init_wakeup);
253
254/**
255 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
256 * @dev: Device to handle.
257 */
258int device_set_wakeup_enable(struct device *dev, bool enable)
259{
260 if (!dev || !dev->power.can_wakeup)
261 return -EINVAL;
262
263 return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
264}
265EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
35 266
36/* 267/*
37 * The functions below use the observation that each wakeup event starts a 268 * The functions below use the observation that each wakeup event starts a
@@ -55,118 +286,259 @@ static unsigned long events_timer_expires;
55 * knowledge, however, may not be available to it, so it can simply specify time 286 * knowledge, however, may not be available to it, so it can simply specify time
56 * to wait before the system can be suspended and pass it as the second 287 * to wait before the system can be suspended and pass it as the second
57 * argument of pm_wakeup_event(). 288 * argument of pm_wakeup_event().
289 *
290 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
291 * "no suspend" period will be ended either by the pm_relax(), or by the timer
292 * function executed when the timer expires, whichever comes first.
58 */ 293 */
59 294
60/** 295/**
296 * wakup_source_activate - Mark given wakeup source as active.
297 * @ws: Wakeup source to handle.
298 *
299 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
300 * core of the event by incrementing the counter of of wakeup events being
301 * processed.
302 */
303static void wakeup_source_activate(struct wakeup_source *ws)
304{
305 ws->active = true;
306 ws->active_count++;
307 ws->timer_expires = jiffies;
308 ws->last_time = ktime_get();
309
310 atomic_inc(&events_in_progress);
311}
312
313/**
314 * __pm_stay_awake - Notify the PM core of a wakeup event.
315 * @ws: Wakeup source object associated with the source of the event.
316 *
317 * It is safe to call this function from interrupt context.
318 */
319void __pm_stay_awake(struct wakeup_source *ws)
320{
321 unsigned long flags;
322
323 if (!ws)
324 return;
325
326 spin_lock_irqsave(&ws->lock, flags);
327 ws->event_count++;
328 if (!ws->active)
329 wakeup_source_activate(ws);
330 spin_unlock_irqrestore(&ws->lock, flags);
331}
332EXPORT_SYMBOL_GPL(__pm_stay_awake);
333
334/**
61 * pm_stay_awake - Notify the PM core that a wakeup event is being processed. 335 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
62 * @dev: Device the wakeup event is related to. 336 * @dev: Device the wakeup event is related to.
63 * 337 *
64 * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the 338 * Notify the PM core of a wakeup event (signaled by @dev) by calling
65 * counter of wakeup events being processed. If @dev is not NULL, the counter 339 * __pm_stay_awake for the @dev's wakeup source object.
66 * of wakeup events related to @dev is incremented too.
67 * 340 *
68 * Call this function after detecting of a wakeup event if pm_relax() is going 341 * Call this function after detecting of a wakeup event if pm_relax() is going
69 * to be called directly after processing the event (and possibly passing it to 342 * to be called directly after processing the event (and possibly passing it to
70 * user space for further processing). 343 * user space for further processing).
71 *
72 * It is safe to call this function from interrupt context.
73 */ 344 */
74void pm_stay_awake(struct device *dev) 345void pm_stay_awake(struct device *dev)
75{ 346{
76 unsigned long flags; 347 unsigned long flags;
77 348
78 spin_lock_irqsave(&events_lock, flags); 349 if (!dev)
79 if (dev) 350 return;
80 dev->power.wakeup_count++;
81 351
82 events_in_progress++; 352 spin_lock_irqsave(&dev->power.lock, flags);
83 spin_unlock_irqrestore(&events_lock, flags); 353 __pm_stay_awake(dev->power.wakeup);
354 spin_unlock_irqrestore(&dev->power.lock, flags);
84} 355}
356EXPORT_SYMBOL_GPL(pm_stay_awake);
85 357
86/** 358/**
87 * pm_relax - Notify the PM core that processing of a wakeup event has ended. 359 * wakup_source_deactivate - Mark given wakeup source as inactive.
360 * @ws: Wakeup source to handle.
88 * 361 *
89 * Notify the PM core that a wakeup event has been processed by decrementing 362 * Update the @ws' statistics and notify the PM core that the wakeup source has
90 * the counter of wakeup events being processed and incrementing the counter 363 * become inactive by decrementing the counter of wakeup events being processed
91 * of registered wakeup events. 364 * and incrementing the counter of registered wakeup events.
365 */
366static void wakeup_source_deactivate(struct wakeup_source *ws)
367{
368 ktime_t duration;
369 ktime_t now;
370
371 ws->relax_count++;
372 /*
373 * __pm_relax() may be called directly or from a timer function.
374 * If it is called directly right after the timer function has been
375 * started, but before the timer function calls __pm_relax(), it is
376 * possible that __pm_stay_awake() will be called in the meantime and
377 * will set ws->active. Then, ws->active may be cleared immediately
378 * by the __pm_relax() called from the timer function, but in such a
379 * case ws->relax_count will be different from ws->active_count.
380 */
381 if (ws->relax_count != ws->active_count) {
382 ws->relax_count--;
383 return;
384 }
385
386 ws->active = false;
387
388 now = ktime_get();
389 duration = ktime_sub(now, ws->last_time);
390 ws->total_time = ktime_add(ws->total_time, duration);
391 if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
392 ws->max_time = duration;
393
394 del_timer(&ws->timer);
395
396 /*
397 * event_count has to be incremented before events_in_progress is
398 * modified, so that the callers of pm_check_wakeup_events() and
399 * pm_save_wakeup_count() don't see the old value of event_count and
400 * events_in_progress equal to zero at the same time.
401 */
402 atomic_inc(&event_count);
403 smp_mb__before_atomic_dec();
404 atomic_dec(&events_in_progress);
405}
406
407/**
408 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
409 * @ws: Wakeup source object associated with the source of the event.
92 * 410 *
93 * Call this function for wakeup events whose processing started with calling 411 * Call this function for wakeup events whose processing started with calling
94 * pm_stay_awake(). 412 * __pm_stay_awake().
95 * 413 *
96 * It is safe to call it from interrupt context. 414 * It is safe to call it from interrupt context.
97 */ 415 */
98void pm_relax(void) 416void __pm_relax(struct wakeup_source *ws)
99{ 417{
100 unsigned long flags; 418 unsigned long flags;
101 419
102 spin_lock_irqsave(&events_lock, flags); 420 if (!ws)
103 if (events_in_progress) { 421 return;
104 events_in_progress--; 422
105 event_count++; 423 spin_lock_irqsave(&ws->lock, flags);
106 } 424 if (ws->active)
107 spin_unlock_irqrestore(&events_lock, flags); 425 wakeup_source_deactivate(ws);
426 spin_unlock_irqrestore(&ws->lock, flags);
427}
428EXPORT_SYMBOL_GPL(__pm_relax);
429
430/**
431 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
432 * @dev: Device that signaled the event.
433 *
434 * Execute __pm_relax() for the @dev's wakeup source object.
435 */
436void pm_relax(struct device *dev)
437{
438 unsigned long flags;
439
440 if (!dev)
441 return;
442
443 spin_lock_irqsave(&dev->power.lock, flags);
444 __pm_relax(dev->power.wakeup);
445 spin_unlock_irqrestore(&dev->power.lock, flags);
108} 446}
447EXPORT_SYMBOL_GPL(pm_relax);
109 448
110/** 449/**
111 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 450 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
451 * @data: Address of the wakeup source object associated with the event source.
112 * 452 *
113 * Decrease the counter of wakeup events being processed after it was increased 453 * Call __pm_relax() for the wakeup source whose address is stored in @data.
114 * by pm_wakeup_event().
115 */ 454 */
116static void pm_wakeup_timer_fn(unsigned long data) 455static void pm_wakeup_timer_fn(unsigned long data)
117{ 456{
457 __pm_relax((struct wakeup_source *)data);
458}
459
460/**
461 * __pm_wakeup_event - Notify the PM core of a wakeup event.
462 * @ws: Wakeup source object associated with the event source.
463 * @msec: Anticipated event processing time (in milliseconds).
464 *
465 * Notify the PM core of a wakeup event whose source is @ws that will take
466 * approximately @msec milliseconds to be processed by the kernel. If @ws is
467 * not active, activate it. If @msec is nonzero, set up the @ws' timer to
468 * execute pm_wakeup_timer_fn() in future.
469 *
470 * It is safe to call this function from interrupt context.
471 */
472void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
473{
118 unsigned long flags; 474 unsigned long flags;
475 unsigned long expires;
119 476
120 spin_lock_irqsave(&events_lock, flags); 477 if (!ws)
121 if (events_timer_expires 478 return;
122 && time_before_eq(events_timer_expires, jiffies)) { 479
123 events_in_progress--; 480 spin_lock_irqsave(&ws->lock, flags);
124 events_timer_expires = 0; 481
482 ws->event_count++;
483 if (!ws->active)
484 wakeup_source_activate(ws);
485
486 if (!msec) {
487 wakeup_source_deactivate(ws);
488 goto unlock;
125 } 489 }
126 spin_unlock_irqrestore(&events_lock, flags); 490
491 expires = jiffies + msecs_to_jiffies(msec);
492 if (!expires)
493 expires = 1;
494
495 if (time_after(expires, ws->timer_expires)) {
496 mod_timer(&ws->timer, expires);
497 ws->timer_expires = expires;
498 }
499
500 unlock:
501 spin_unlock_irqrestore(&ws->lock, flags);
127} 502}
503EXPORT_SYMBOL_GPL(__pm_wakeup_event);
504
128 505
129/** 506/**
130 * pm_wakeup_event - Notify the PM core of a wakeup event. 507 * pm_wakeup_event - Notify the PM core of a wakeup event.
131 * @dev: Device the wakeup event is related to. 508 * @dev: Device the wakeup event is related to.
132 * @msec: Anticipated event processing time (in milliseconds). 509 * @msec: Anticipated event processing time (in milliseconds).
133 * 510 *
134 * Notify the PM core of a wakeup event (signaled by @dev) that will take 511 * Call __pm_wakeup_event() for the @dev's wakeup source object.
135 * approximately @msec milliseconds to be processed by the kernel. Increment
136 * the counter of registered wakeup events and (if @msec is nonzero) set up
137 * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the
138 * timer has not been set up already, increment the counter of wakeup events
139 * being processed). If @dev is not NULL, the counter of wakeup events related
140 * to @dev is incremented too.
141 *
142 * It is safe to call this function from interrupt context.
143 */ 512 */
144void pm_wakeup_event(struct device *dev, unsigned int msec) 513void pm_wakeup_event(struct device *dev, unsigned int msec)
145{ 514{
146 unsigned long flags; 515 unsigned long flags;
147 516
148 spin_lock_irqsave(&events_lock, flags); 517 if (!dev)
149 event_count++; 518 return;
150 if (dev)
151 dev->power.wakeup_count++;
152
153 if (msec) {
154 unsigned long expires;
155 519
156 expires = jiffies + msecs_to_jiffies(msec); 520 spin_lock_irqsave(&dev->power.lock, flags);
157 if (!expires) 521 __pm_wakeup_event(dev->power.wakeup, msec);
158 expires = 1; 522 spin_unlock_irqrestore(&dev->power.lock, flags);
523}
524EXPORT_SYMBOL_GPL(pm_wakeup_event);
159 525
160 if (!events_timer_expires 526/**
161 || time_after(expires, events_timer_expires)) { 527 * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
162 if (!events_timer_expires) 528 */
163 events_in_progress++; 529static void pm_wakeup_update_hit_counts(void)
530{
531 unsigned long flags;
532 struct wakeup_source *ws;
164 533
165 mod_timer(&events_timer, expires); 534 rcu_read_lock();
166 events_timer_expires = expires; 535 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
167 } 536 spin_lock_irqsave(&ws->lock, flags);
537 if (ws->active)
538 ws->hit_count++;
539 spin_unlock_irqrestore(&ws->lock, flags);
168 } 540 }
169 spin_unlock_irqrestore(&events_lock, flags); 541 rcu_read_unlock();
170} 542}
171 543
172/** 544/**
@@ -184,10 +556,13 @@ bool pm_check_wakeup_events(void)
184 556
185 spin_lock_irqsave(&events_lock, flags); 557 spin_lock_irqsave(&events_lock, flags);
186 if (events_check_enabled) { 558 if (events_check_enabled) {
187 ret = (event_count == saved_event_count) && !events_in_progress; 559 ret = ((unsigned int)atomic_read(&event_count) == saved_count)
560 && !atomic_read(&events_in_progress);
188 events_check_enabled = ret; 561 events_check_enabled = ret;
189 } 562 }
190 spin_unlock_irqrestore(&events_lock, flags); 563 spin_unlock_irqrestore(&events_lock, flags);
564 if (!ret)
565 pm_wakeup_update_hit_counts();
191 return ret; 566 return ret;
192} 567}
193 568
@@ -202,24 +577,20 @@ bool pm_check_wakeup_events(void)
202 * drop down to zero has been interrupted by a signal (and the current number 577 * drop down to zero has been interrupted by a signal (and the current number
203 * of wakeup events being processed is still nonzero). Otherwise return true. 578 * of wakeup events being processed is still nonzero). Otherwise return true.
204 */ 579 */
205bool pm_get_wakeup_count(unsigned long *count) 580bool pm_get_wakeup_count(unsigned int *count)
206{ 581{
207 bool ret; 582 bool ret;
208 583
209 spin_lock_irq(&events_lock);
210 if (capable(CAP_SYS_ADMIN)) 584 if (capable(CAP_SYS_ADMIN))
211 events_check_enabled = false; 585 events_check_enabled = false;
212 586
213 while (events_in_progress && !signal_pending(current)) { 587 while (atomic_read(&events_in_progress) && !signal_pending(current)) {
214 spin_unlock_irq(&events_lock); 588 pm_wakeup_update_hit_counts();
215 589 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
216 schedule_timeout_interruptible(msecs_to_jiffies(100));
217
218 spin_lock_irq(&events_lock);
219 } 590 }
220 *count = event_count; 591
221 ret = !events_in_progress; 592 ret = !atomic_read(&events_in_progress);
222 spin_unlock_irq(&events_lock); 593 *count = atomic_read(&event_count);
223 return ret; 594 return ret;
224} 595}
225 596
@@ -232,16 +603,102 @@ bool pm_get_wakeup_count(unsigned long *count)
232 * old number of registered wakeup events to be used by pm_check_wakeup_events() 603 * old number of registered wakeup events to be used by pm_check_wakeup_events()
233 * and return true. Otherwise return false. 604 * and return true. Otherwise return false.
234 */ 605 */
235bool pm_save_wakeup_count(unsigned long count) 606bool pm_save_wakeup_count(unsigned int count)
236{ 607{
237 bool ret = false; 608 bool ret = false;
238 609
239 spin_lock_irq(&events_lock); 610 spin_lock_irq(&events_lock);
240 if (count == event_count && !events_in_progress) { 611 if (count == (unsigned int)atomic_read(&event_count)
241 saved_event_count = count; 612 && !atomic_read(&events_in_progress)) {
613 saved_count = count;
242 events_check_enabled = true; 614 events_check_enabled = true;
243 ret = true; 615 ret = true;
244 } 616 }
245 spin_unlock_irq(&events_lock); 617 spin_unlock_irq(&events_lock);
618 if (!ret)
619 pm_wakeup_update_hit_counts();
620 return ret;
621}
622
623static struct dentry *wakeup_sources_stats_dentry;
624
625/**
626 * print_wakeup_source_stats - Print wakeup source statistics information.
627 * @m: seq_file to print the statistics into.
628 * @ws: Wakeup source object to print the statistics for.
629 */
630static int print_wakeup_source_stats(struct seq_file *m,
631 struct wakeup_source *ws)
632{
633 unsigned long flags;
634 ktime_t total_time;
635 ktime_t max_time;
636 unsigned long active_count;
637 ktime_t active_time;
638 int ret;
639
640 spin_lock_irqsave(&ws->lock, flags);
641
642 total_time = ws->total_time;
643 max_time = ws->max_time;
644 active_count = ws->active_count;
645 if (ws->active) {
646 active_time = ktime_sub(ktime_get(), ws->last_time);
647 total_time = ktime_add(total_time, active_time);
648 if (active_time.tv64 > max_time.tv64)
649 max_time = active_time;
650 } else {
651 active_time = ktime_set(0, 0);
652 }
653
654 ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t"
655 "%lld\t\t%lld\t\t%lld\t\t%lld\n",
656 ws->name, active_count, ws->event_count, ws->hit_count,
657 ktime_to_ms(active_time), ktime_to_ms(total_time),
658 ktime_to_ms(max_time), ktime_to_ms(ws->last_time));
659
660 spin_unlock_irqrestore(&ws->lock, flags);
661
246 return ret; 662 return ret;
247} 663}
664
665/**
666 * wakeup_sources_stats_show - Print wakeup sources statistics information.
667 * @m: seq_file to print the statistics into.
668 */
669static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
670{
671 struct wakeup_source *ws;
672
673 seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t"
674 "active_since\ttotal_time\tmax_time\tlast_change\n");
675
676 rcu_read_lock();
677 list_for_each_entry_rcu(ws, &wakeup_sources, entry)
678 print_wakeup_source_stats(m, ws);
679 rcu_read_unlock();
680
681 return 0;
682}
683
684static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
685{
686 return single_open(file, wakeup_sources_stats_show, NULL);
687}
688
689static const struct file_operations wakeup_sources_stats_fops = {
690 .owner = THIS_MODULE,
691 .open = wakeup_sources_stats_open,
692 .read = seq_read,
693 .llseek = seq_lseek,
694 .release = single_release,
695};
696
697static int __init wakeup_sources_debugfs_init(void)
698{
699 wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
700 S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
701 return 0;
702}
703
704postcore_initcall(wakeup_sources_debugfs_init);