diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 14:14:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 14:14:36 -0400 |
commit | f46e9913faeebcb6bd29edf795f12b60acbff171 (patch) | |
tree | 1ed8871d0ebd638094d27317de1d8a53712ae15a | |
parent | 8d91530c5fd7f0b1e8c4ddfea2905e55a178569b (diff) | |
parent | 8d4b9d1bfef117862a2889dec4dac227068544c9 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
PM / Runtime: Add runtime PM statistics (v3)
PM / Runtime: Make runtime_status attribute not debug-only (v. 2)
PM: Do not use dynamically allocated objects in pm_wakeup_event()
PM / Suspend: Fix ordering of calls in suspend error paths
PM / Hibernate: Fix snapshot error code path
PM / Hibernate: Fix hibernation_platform_enter()
pm_qos: Get rid of the allocation in pm_qos_add_request()
pm_qos: Reimplement using plists
plist: Add plist_last
PM: Make it possible to avoid races between wakeup and system sleep
PNPACPI: Add support for remote wakeup
PM: describe kernel policy regarding wakeup defaults (v. 2)
PM / Hibernate: Fix typos in comments in kernel/power/swap.c
29 files changed, 734 insertions, 188 deletions
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index d6a801f45b48..2875f1f74a07 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power | |||
@@ -114,3 +114,18 @@ Description: | |||
114 | if this file contains "1", which is the default. It may be | 114 | if this file contains "1", which is the default. It may be |
115 | disabled by writing "0" to this file, in which case all devices | 115 | disabled by writing "0" to this file, in which case all devices |
116 | will be suspended and resumed synchronously. | 116 | will be suspended and resumed synchronously. |
117 | |||
118 | What: /sys/power/wakeup_count | ||
119 | Date: July 2010 | ||
120 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
121 | Description: | ||
122 | The /sys/power/wakeup_count file allows user space to put the | ||
123 | system into a sleep state while taking into account the | ||
124 | concurrent arrival of wakeup events. Reading from it returns | ||
125 | the current number of registered wakeup events and it blocks if | ||
126 | some wakeup events are being processed at the time the file is | ||
127 | read from. Writing to it will only succeed if the current | ||
128 | number of wakeup events is equal to the written value and, if | ||
129 | successful, will make the kernel abort a subsequent transition | ||
130 | to a sleep state if any wakeup events are reported after the | ||
131 | write has returned. | ||
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 89de75325cea..cbccf9a3cee4 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o | 1 | obj-$(CONFIG_PM) += sysfs.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | 4 | obj-$(CONFIG_PM_OPS) += generic_ops.o |
5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 941fcb87e52a..5419a49ff135 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) | |||
59 | { | 59 | { |
60 | dev->power.status = DPM_ON; | 60 | dev->power.status = DPM_ON; |
61 | init_completion(&dev->power.completion); | 61 | init_completion(&dev->power.completion); |
62 | dev->power.wakeup_count = 0; | ||
62 | pm_runtime_init(dev); | 63 | pm_runtime_init(dev); |
63 | } | 64 | } |
64 | 65 | ||
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b0ec0e9f27e9..b78c401ffa73 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -123,6 +123,45 @@ int pm_runtime_idle(struct device *dev) | |||
123 | } | 123 | } |
124 | EXPORT_SYMBOL_GPL(pm_runtime_idle); | 124 | EXPORT_SYMBOL_GPL(pm_runtime_idle); |
125 | 125 | ||
126 | |||
127 | /** | ||
128 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
129 | * @dev: Device to update the accounting for | ||
130 | * | ||
131 | * In order to be able to have time accounting of the various power states | ||
132 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
133 | * PM), we need to track the time spent in each state. | ||
134 | * update_pm_runtime_accounting must be called each time before the | ||
135 | * runtime_status field is updated, to account the time in the old state | ||
136 | * correctly. | ||
137 | */ | ||
138 | void update_pm_runtime_accounting(struct device *dev) | ||
139 | { | ||
140 | unsigned long now = jiffies; | ||
141 | int delta; | ||
142 | |||
143 | delta = now - dev->power.accounting_timestamp; | ||
144 | |||
145 | if (delta < 0) | ||
146 | delta = 0; | ||
147 | |||
148 | dev->power.accounting_timestamp = now; | ||
149 | |||
150 | if (dev->power.disable_depth > 0) | ||
151 | return; | ||
152 | |||
153 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
154 | dev->power.suspended_jiffies += delta; | ||
155 | else | ||
156 | dev->power.active_jiffies += delta; | ||
157 | } | ||
158 | |||
159 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | ||
160 | { | ||
161 | update_pm_runtime_accounting(dev); | ||
162 | dev->power.runtime_status = status; | ||
163 | } | ||
164 | |||
126 | /** | 165 | /** |
127 | * __pm_runtime_suspend - Carry out run-time suspend of given device. | 166 | * __pm_runtime_suspend - Carry out run-time suspend of given device. |
128 | * @dev: Device to suspend. | 167 | * @dev: Device to suspend. |
@@ -197,7 +236,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
197 | goto repeat; | 236 | goto repeat; |
198 | } | 237 | } |
199 | 238 | ||
200 | dev->power.runtime_status = RPM_SUSPENDING; | 239 | __update_runtime_status(dev, RPM_SUSPENDING); |
201 | dev->power.deferred_resume = false; | 240 | dev->power.deferred_resume = false; |
202 | 241 | ||
203 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 242 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { |
@@ -228,7 +267,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
228 | } | 267 | } |
229 | 268 | ||
230 | if (retval) { | 269 | if (retval) { |
231 | dev->power.runtime_status = RPM_ACTIVE; | 270 | __update_runtime_status(dev, RPM_ACTIVE); |
232 | if (retval == -EAGAIN || retval == -EBUSY) { | 271 | if (retval == -EAGAIN || retval == -EBUSY) { |
233 | if (dev->power.timer_expires == 0) | 272 | if (dev->power.timer_expires == 0) |
234 | notify = true; | 273 | notify = true; |
@@ -237,7 +276,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
237 | pm_runtime_cancel_pending(dev); | 276 | pm_runtime_cancel_pending(dev); |
238 | } | 277 | } |
239 | } else { | 278 | } else { |
240 | dev->power.runtime_status = RPM_SUSPENDED; | 279 | __update_runtime_status(dev, RPM_SUSPENDED); |
241 | pm_runtime_deactivate_timer(dev); | 280 | pm_runtime_deactivate_timer(dev); |
242 | 281 | ||
243 | if (dev->parent) { | 282 | if (dev->parent) { |
@@ -381,7 +420,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
381 | goto repeat; | 420 | goto repeat; |
382 | } | 421 | } |
383 | 422 | ||
384 | dev->power.runtime_status = RPM_RESUMING; | 423 | __update_runtime_status(dev, RPM_RESUMING); |
385 | 424 | ||
386 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { | 425 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { |
387 | spin_unlock_irq(&dev->power.lock); | 426 | spin_unlock_irq(&dev->power.lock); |
@@ -411,10 +450,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
411 | } | 450 | } |
412 | 451 | ||
413 | if (retval) { | 452 | if (retval) { |
414 | dev->power.runtime_status = RPM_SUSPENDED; | 453 | __update_runtime_status(dev, RPM_SUSPENDED); |
415 | pm_runtime_cancel_pending(dev); | 454 | pm_runtime_cancel_pending(dev); |
416 | } else { | 455 | } else { |
417 | dev->power.runtime_status = RPM_ACTIVE; | 456 | __update_runtime_status(dev, RPM_ACTIVE); |
418 | if (parent) | 457 | if (parent) |
419 | atomic_inc(&parent->power.child_count); | 458 | atomic_inc(&parent->power.child_count); |
420 | } | 459 | } |
@@ -848,7 +887,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
848 | } | 887 | } |
849 | 888 | ||
850 | out_set: | 889 | out_set: |
851 | dev->power.runtime_status = status; | 890 | __update_runtime_status(dev, status); |
852 | dev->power.runtime_error = 0; | 891 | dev->power.runtime_error = 0; |
853 | out: | 892 | out: |
854 | spin_unlock_irqrestore(&dev->power.lock, flags); | 893 | spin_unlock_irqrestore(&dev->power.lock, flags); |
@@ -1077,6 +1116,7 @@ void pm_runtime_init(struct device *dev) | |||
1077 | dev->power.request_pending = false; | 1116 | dev->power.request_pending = false; |
1078 | dev->power.request = RPM_REQ_NONE; | 1117 | dev->power.request = RPM_REQ_NONE; |
1079 | dev->power.deferred_resume = false; | 1118 | dev->power.deferred_resume = false; |
1119 | dev->power.accounting_timestamp = jiffies; | ||
1080 | INIT_WORK(&dev->power.work, pm_runtime_work); | 1120 | INIT_WORK(&dev->power.work, pm_runtime_work); |
1081 | 1121 | ||
1082 | dev->power.timer_expires = 0; | 1122 | dev->power.timer_expires = 0; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a4c33bc51257..e56b4388fe61 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/string.h> | 6 | #include <linux/string.h> |
7 | #include <linux/pm_runtime.h> | 7 | #include <linux/pm_runtime.h> |
8 | #include <asm/atomic.h> | 8 | #include <asm/atomic.h> |
9 | #include <linux/jiffies.h> | ||
9 | #include "power.h" | 10 | #include "power.h" |
10 | 11 | ||
11 | /* | 12 | /* |
@@ -73,6 +74,8 @@ | |||
73 | * device are known to the PM core. However, for some devices this | 74 | * device are known to the PM core. However, for some devices this |
74 | * attribute is set to "enabled" by bus type code or device drivers and in | 75 | * attribute is set to "enabled" by bus type code or device drivers and in |
75 | * that cases it should be safe to leave the default value. | 76 | * that cases it should be safe to leave the default value. |
77 | * | ||
78 | * wakeup_count - Report the number of wakeup events related to the device | ||
76 | */ | 79 | */ |
77 | 80 | ||
78 | static const char enabled[] = "enabled"; | 81 | static const char enabled[] = "enabled"; |
@@ -108,6 +111,65 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, | |||
108 | } | 111 | } |
109 | 112 | ||
110 | static DEVICE_ATTR(control, 0644, control_show, control_store); | 113 | static DEVICE_ATTR(control, 0644, control_show, control_store); |
114 | |||
115 | static ssize_t rtpm_active_time_show(struct device *dev, | ||
116 | struct device_attribute *attr, char *buf) | ||
117 | { | ||
118 | int ret; | ||
119 | spin_lock_irq(&dev->power.lock); | ||
120 | update_pm_runtime_accounting(dev); | ||
121 | ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); | ||
122 | spin_unlock_irq(&dev->power.lock); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); | ||
127 | |||
128 | static ssize_t rtpm_suspended_time_show(struct device *dev, | ||
129 | struct device_attribute *attr, char *buf) | ||
130 | { | ||
131 | int ret; | ||
132 | spin_lock_irq(&dev->power.lock); | ||
133 | update_pm_runtime_accounting(dev); | ||
134 | ret = sprintf(buf, "%i\n", | ||
135 | jiffies_to_msecs(dev->power.suspended_jiffies)); | ||
136 | spin_unlock_irq(&dev->power.lock); | ||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); | ||
141 | |||
142 | static ssize_t rtpm_status_show(struct device *dev, | ||
143 | struct device_attribute *attr, char *buf) | ||
144 | { | ||
145 | const char *p; | ||
146 | |||
147 | if (dev->power.runtime_error) { | ||
148 | p = "error\n"; | ||
149 | } else if (dev->power.disable_depth) { | ||
150 | p = "unsupported\n"; | ||
151 | } else { | ||
152 | switch (dev->power.runtime_status) { | ||
153 | case RPM_SUSPENDED: | ||
154 | p = "suspended\n"; | ||
155 | break; | ||
156 | case RPM_SUSPENDING: | ||
157 | p = "suspending\n"; | ||
158 | break; | ||
159 | case RPM_RESUMING: | ||
160 | p = "resuming\n"; | ||
161 | break; | ||
162 | case RPM_ACTIVE: | ||
163 | p = "active\n"; | ||
164 | break; | ||
165 | default: | ||
166 | return -EIO; | ||
167 | } | ||
168 | } | ||
169 | return sprintf(buf, p); | ||
170 | } | ||
171 | |||
172 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); | ||
111 | #endif | 173 | #endif |
112 | 174 | ||
113 | static ssize_t | 175 | static ssize_t |
@@ -144,6 +206,16 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
144 | 206 | ||
145 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 207 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
146 | 208 | ||
209 | #ifdef CONFIG_PM_SLEEP | ||
210 | static ssize_t wakeup_count_show(struct device *dev, | ||
211 | struct device_attribute *attr, char *buf) | ||
212 | { | ||
213 | return sprintf(buf, "%lu\n", dev->power.wakeup_count); | ||
214 | } | ||
215 | |||
216 | static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); | ||
217 | #endif | ||
218 | |||
147 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 219 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
148 | #ifdef CONFIG_PM_RUNTIME | 220 | #ifdef CONFIG_PM_RUNTIME |
149 | 221 | ||
@@ -172,27 +244,8 @@ static ssize_t rtpm_enabled_show(struct device *dev, | |||
172 | return sprintf(buf, "enabled\n"); | 244 | return sprintf(buf, "enabled\n"); |
173 | } | 245 | } |
174 | 246 | ||
175 | static ssize_t rtpm_status_show(struct device *dev, | ||
176 | struct device_attribute *attr, char *buf) | ||
177 | { | ||
178 | if (dev->power.runtime_error) | ||
179 | return sprintf(buf, "error\n"); | ||
180 | switch (dev->power.runtime_status) { | ||
181 | case RPM_SUSPENDED: | ||
182 | return sprintf(buf, "suspended\n"); | ||
183 | case RPM_SUSPENDING: | ||
184 | return sprintf(buf, "suspending\n"); | ||
185 | case RPM_RESUMING: | ||
186 | return sprintf(buf, "resuming\n"); | ||
187 | case RPM_ACTIVE: | ||
188 | return sprintf(buf, "active\n"); | ||
189 | } | ||
190 | return -EIO; | ||
191 | } | ||
192 | |||
193 | static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); | 247 | static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); |
194 | static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); | 248 | static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); |
195 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); | ||
196 | static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); | 249 | static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); |
197 | 250 | ||
198 | #endif | 251 | #endif |
@@ -228,14 +281,19 @@ static DEVICE_ATTR(async, 0644, async_show, async_store); | |||
228 | static struct attribute * power_attrs[] = { | 281 | static struct attribute * power_attrs[] = { |
229 | #ifdef CONFIG_PM_RUNTIME | 282 | #ifdef CONFIG_PM_RUNTIME |
230 | &dev_attr_control.attr, | 283 | &dev_attr_control.attr, |
284 | &dev_attr_runtime_status.attr, | ||
285 | &dev_attr_runtime_suspended_time.attr, | ||
286 | &dev_attr_runtime_active_time.attr, | ||
231 | #endif | 287 | #endif |
232 | &dev_attr_wakeup.attr, | 288 | &dev_attr_wakeup.attr, |
289 | #ifdef CONFIG_PM_SLEEP | ||
290 | &dev_attr_wakeup_count.attr, | ||
291 | #endif | ||
233 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 292 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
234 | &dev_attr_async.attr, | 293 | &dev_attr_async.attr, |
235 | #ifdef CONFIG_PM_RUNTIME | 294 | #ifdef CONFIG_PM_RUNTIME |
236 | &dev_attr_runtime_usage.attr, | 295 | &dev_attr_runtime_usage.attr, |
237 | &dev_attr_runtime_active_kids.attr, | 296 | &dev_attr_runtime_active_kids.attr, |
238 | &dev_attr_runtime_status.attr, | ||
239 | &dev_attr_runtime_enabled.attr, | 297 | &dev_attr_runtime_enabled.attr, |
240 | #endif | 298 | #endif |
241 | #endif | 299 | #endif |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c new file mode 100644 index 000000000000..eb594facfc3f --- /dev/null +++ b/drivers/base/power/wakeup.c | |||
@@ -0,0 +1,247 @@ | |||
1 | /* | ||
2 | * drivers/base/power/wakeup.c - System wakeup events framework | ||
3 | * | ||
4 | * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/device.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/capability.h> | ||
13 | #include <linux/suspend.h> | ||
14 | #include <linux/pm.h> | ||
15 | |||
16 | /* | ||
17 | * If set, the suspend/hibernate code will abort transitions to a sleep state | ||
18 | * if wakeup events are registered during or immediately before the transition. | ||
19 | */ | ||
20 | bool events_check_enabled; | ||
21 | |||
22 | /* The counter of registered wakeup events. */ | ||
23 | static unsigned long event_count; | ||
24 | /* A preserved old value of event_count. */ | ||
25 | static unsigned long saved_event_count; | ||
26 | /* The counter of wakeup events being processed. */ | ||
27 | static unsigned long events_in_progress; | ||
28 | |||
29 | static DEFINE_SPINLOCK(events_lock); | ||
30 | |||
31 | static void pm_wakeup_timer_fn(unsigned long data); | ||
32 | |||
33 | static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); | ||
34 | static unsigned long events_timer_expires; | ||
35 | |||
36 | /* | ||
37 | * The functions below use the observation that each wakeup event starts a | ||
38 | * period in which the system should not be suspended. The moment this period | ||
39 | * will end depends on how the wakeup event is going to be processed after being | ||
40 | * detected and all of the possible cases can be divided into two distinct | ||
41 | * groups. | ||
42 | * | ||
43 | * First, a wakeup event may be detected by the same functional unit that will | ||
44 | * carry out the entire processing of it and possibly will pass it to user space | ||
45 | * for further processing. In that case the functional unit that has detected | ||
46 | * the event may later "close" the "no suspend" period associated with it | ||
47 | * directly as soon as it has been dealt with. The pair of pm_stay_awake() and | ||
48 | * pm_relax(), balanced with each other, is supposed to be used in such | ||
49 | * situations. | ||
50 | * | ||
51 | * Second, a wakeup event may be detected by one functional unit and processed | ||
52 | * by another one. In that case the unit that has detected it cannot really | ||
53 | * "close" the "no suspend" period associated with it, unless it knows in | ||
54 | * advance what's going to happen to the event during processing. This | ||
55 | * knowledge, however, may not be available to it, so it can simply specify time | ||
56 | * to wait before the system can be suspended and pass it as the second | ||
57 | * argument of pm_wakeup_event(). | ||
58 | */ | ||
59 | |||
60 | /** | ||
61 | * pm_stay_awake - Notify the PM core that a wakeup event is being processed. | ||
62 | * @dev: Device the wakeup event is related to. | ||
63 | * | ||
64 | * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the | ||
65 | * counter of wakeup events being processed. If @dev is not NULL, the counter | ||
66 | * of wakeup events related to @dev is incremented too. | ||
67 | * | ||
68 | * Call this function after detecting of a wakeup event if pm_relax() is going | ||
69 | * to be called directly after processing the event (and possibly passing it to | ||
70 | * user space for further processing). | ||
71 | * | ||
72 | * It is safe to call this function from interrupt context. | ||
73 | */ | ||
74 | void pm_stay_awake(struct device *dev) | ||
75 | { | ||
76 | unsigned long flags; | ||
77 | |||
78 | spin_lock_irqsave(&events_lock, flags); | ||
79 | if (dev) | ||
80 | dev->power.wakeup_count++; | ||
81 | |||
82 | events_in_progress++; | ||
83 | spin_unlock_irqrestore(&events_lock, flags); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * pm_relax - Notify the PM core that processing of a wakeup event has ended. | ||
88 | * | ||
89 | * Notify the PM core that a wakeup event has been processed by decrementing | ||
90 | * the counter of wakeup events being processed and incrementing the counter | ||
91 | * of registered wakeup events. | ||
92 | * | ||
93 | * Call this function for wakeup events whose processing started with calling | ||
94 | * pm_stay_awake(). | ||
95 | * | ||
96 | * It is safe to call it from interrupt context. | ||
97 | */ | ||
98 | void pm_relax(void) | ||
99 | { | ||
100 | unsigned long flags; | ||
101 | |||
102 | spin_lock_irqsave(&events_lock, flags); | ||
103 | if (events_in_progress) { | ||
104 | events_in_progress--; | ||
105 | event_count++; | ||
106 | } | ||
107 | spin_unlock_irqrestore(&events_lock, flags); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. | ||
112 | * | ||
113 | * Decrease the counter of wakeup events being processed after it was increased | ||
114 | * by pm_wakeup_event(). | ||
115 | */ | ||
116 | static void pm_wakeup_timer_fn(unsigned long data) | ||
117 | { | ||
118 | unsigned long flags; | ||
119 | |||
120 | spin_lock_irqsave(&events_lock, flags); | ||
121 | if (events_timer_expires | ||
122 | && time_before_eq(events_timer_expires, jiffies)) { | ||
123 | events_in_progress--; | ||
124 | events_timer_expires = 0; | ||
125 | } | ||
126 | spin_unlock_irqrestore(&events_lock, flags); | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * pm_wakeup_event - Notify the PM core of a wakeup event. | ||
131 | * @dev: Device the wakeup event is related to. | ||
132 | * @msec: Anticipated event processing time (in milliseconds). | ||
133 | * | ||
134 | * Notify the PM core of a wakeup event (signaled by @dev) that will take | ||
135 | * approximately @msec milliseconds to be processed by the kernel. Increment | ||
136 | * the counter of registered wakeup events and (if @msec is nonzero) set up | ||
137 | * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the | ||
138 | * timer has not been set up already, increment the counter of wakeup events | ||
139 | * being processed). If @dev is not NULL, the counter of wakeup events related | ||
140 | * to @dev is incremented too. | ||
141 | * | ||
142 | * It is safe to call this function from interrupt context. | ||
143 | */ | ||
144 | void pm_wakeup_event(struct device *dev, unsigned int msec) | ||
145 | { | ||
146 | unsigned long flags; | ||
147 | |||
148 | spin_lock_irqsave(&events_lock, flags); | ||
149 | event_count++; | ||
150 | if (dev) | ||
151 | dev->power.wakeup_count++; | ||
152 | |||
153 | if (msec) { | ||
154 | unsigned long expires; | ||
155 | |||
156 | expires = jiffies + msecs_to_jiffies(msec); | ||
157 | if (!expires) | ||
158 | expires = 1; | ||
159 | |||
160 | if (!events_timer_expires | ||
161 | || time_after(expires, events_timer_expires)) { | ||
162 | if (!events_timer_expires) | ||
163 | events_in_progress++; | ||
164 | |||
165 | mod_timer(&events_timer, expires); | ||
166 | events_timer_expires = expires; | ||
167 | } | ||
168 | } | ||
169 | spin_unlock_irqrestore(&events_lock, flags); | ||
170 | } | ||
171 | |||
172 | /** | ||
173 | * pm_check_wakeup_events - Check for new wakeup events. | ||
174 | * | ||
175 | * Compare the current number of registered wakeup events with its preserved | ||
176 | * value from the past to check if new wakeup events have been registered since | ||
177 | * the old value was stored. Check if the current number of wakeup events being | ||
178 | * processed is zero. | ||
179 | */ | ||
180 | bool pm_check_wakeup_events(void) | ||
181 | { | ||
182 | unsigned long flags; | ||
183 | bool ret = true; | ||
184 | |||
185 | spin_lock_irqsave(&events_lock, flags); | ||
186 | if (events_check_enabled) { | ||
187 | ret = (event_count == saved_event_count) && !events_in_progress; | ||
188 | events_check_enabled = ret; | ||
189 | } | ||
190 | spin_unlock_irqrestore(&events_lock, flags); | ||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * pm_get_wakeup_count - Read the number of registered wakeup events. | ||
196 | * @count: Address to store the value at. | ||
197 | * | ||
198 | * Store the number of registered wakeup events at the address in @count. Block | ||
199 | * if the current number of wakeup events being processed is nonzero. | ||
200 | * | ||
201 | * Return false if the wait for the number of wakeup events being processed to | ||
202 | * drop down to zero has been interrupted by a signal (and the current number | ||
203 | * of wakeup events being processed is still nonzero). Otherwise return true. | ||
204 | */ | ||
205 | bool pm_get_wakeup_count(unsigned long *count) | ||
206 | { | ||
207 | bool ret; | ||
208 | |||
209 | spin_lock_irq(&events_lock); | ||
210 | if (capable(CAP_SYS_ADMIN)) | ||
211 | events_check_enabled = false; | ||
212 | |||
213 | while (events_in_progress && !signal_pending(current)) { | ||
214 | spin_unlock_irq(&events_lock); | ||
215 | |||
216 | schedule_timeout_interruptible(msecs_to_jiffies(100)); | ||
217 | |||
218 | spin_lock_irq(&events_lock); | ||
219 | } | ||
220 | *count = event_count; | ||
221 | ret = !events_in_progress; | ||
222 | spin_unlock_irq(&events_lock); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * pm_save_wakeup_count - Save the current number of registered wakeup events. | ||
228 | * @count: Value to compare with the current number of registered wakeup events. | ||
229 | * | ||
230 | * If @count is equal to the current number of registered wakeup events and the | ||
231 | * current number of wakeup events being processed is zero, store @count as the | ||
232 | * old number of registered wakeup events to be used by pm_check_wakeup_events() | ||
233 | * and return true. Otherwise return false. | ||
234 | */ | ||
235 | bool pm_save_wakeup_count(unsigned long count) | ||
236 | { | ||
237 | bool ret = false; | ||
238 | |||
239 | spin_lock_irq(&events_lock); | ||
240 | if (count == event_count && !events_in_progress) { | ||
241 | saved_event_count = count; | ||
242 | events_check_enabled = true; | ||
243 | ret = true; | ||
244 | } | ||
245 | spin_unlock_irq(&events_lock); | ||
246 | return ret; | ||
247 | } | ||
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 57a7e41da69e..9f13b660b801 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -2901,10 +2901,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2901 | * dropped transactions. | 2901 | * dropped transactions. |
2902 | */ | 2902 | */ |
2903 | pm_qos_update_request( | 2903 | pm_qos_update_request( |
2904 | adapter->netdev->pm_qos_req, 55); | 2904 | &adapter->netdev->pm_qos_req, 55); |
2905 | } else { | 2905 | } else { |
2906 | pm_qos_update_request( | 2906 | pm_qos_update_request( |
2907 | adapter->netdev->pm_qos_req, | 2907 | &adapter->netdev->pm_qos_req, |
2908 | PM_QOS_DEFAULT_VALUE); | 2908 | PM_QOS_DEFAULT_VALUE); |
2909 | } | 2909 | } |
2910 | } | 2910 | } |
@@ -3196,9 +3196,9 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
3196 | 3196 | ||
3197 | /* DMA latency requirement to workaround early-receive/jumbo issue */ | 3197 | /* DMA latency requirement to workaround early-receive/jumbo issue */ |
3198 | if (adapter->flags & FLAG_HAS_ERT) | 3198 | if (adapter->flags & FLAG_HAS_ERT) |
3199 | adapter->netdev->pm_qos_req = | 3199 | pm_qos_add_request(&adapter->netdev->pm_qos_req, |
3200 | pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, | 3200 | PM_QOS_CPU_DMA_LATENCY, |
3201 | PM_QOS_DEFAULT_VALUE); | 3201 | PM_QOS_DEFAULT_VALUE); |
3202 | 3202 | ||
3203 | /* hardware has been reset, we need to reload some things */ | 3203 | /* hardware has been reset, we need to reload some things */ |
3204 | e1000_configure(adapter); | 3204 | e1000_configure(adapter); |
@@ -3263,11 +3263,8 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3263 | e1000_clean_tx_ring(adapter); | 3263 | e1000_clean_tx_ring(adapter); |
3264 | e1000_clean_rx_ring(adapter); | 3264 | e1000_clean_rx_ring(adapter); |
3265 | 3265 | ||
3266 | if (adapter->flags & FLAG_HAS_ERT) { | 3266 | if (adapter->flags & FLAG_HAS_ERT) |
3267 | pm_qos_remove_request( | 3267 | pm_qos_remove_request(&adapter->netdev->pm_qos_req); |
3268 | adapter->netdev->pm_qos_req); | ||
3269 | adapter->netdev->pm_qos_req = NULL; | ||
3270 | } | ||
3271 | 3268 | ||
3272 | /* | 3269 | /* |
3273 | * TODO: for power management, we could drop the link and | 3270 | * TODO: for power management, we could drop the link and |
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 5e2b2a8c56c6..add6197d3bcb 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #define DRV_VERSION "1.0.0-k0" | 48 | #define DRV_VERSION "1.0.0-k0" |
49 | char igbvf_driver_name[] = "igbvf"; | 49 | char igbvf_driver_name[] = "igbvf"; |
50 | const char igbvf_driver_version[] = DRV_VERSION; | 50 | const char igbvf_driver_version[] = DRV_VERSION; |
51 | struct pm_qos_request_list *igbvf_driver_pm_qos_req; | 51 | static struct pm_qos_request_list igbvf_driver_pm_qos_req; |
52 | static const char igbvf_driver_string[] = | 52 | static const char igbvf_driver_string[] = |
53 | "Intel(R) Virtual Function Network Driver"; | 53 | "Intel(R) Virtual Function Network Driver"; |
54 | static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; | 54 | static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; |
@@ -2902,8 +2902,8 @@ static int __init igbvf_init_module(void) | |||
2902 | printk(KERN_INFO "%s\n", igbvf_copyright); | 2902 | printk(KERN_INFO "%s\n", igbvf_copyright); |
2903 | 2903 | ||
2904 | ret = pci_register_driver(&igbvf_driver); | 2904 | ret = pci_register_driver(&igbvf_driver); |
2905 | igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, | 2905 | pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, |
2906 | PM_QOS_DEFAULT_VALUE); | 2906 | PM_QOS_DEFAULT_VALUE); |
2907 | 2907 | ||
2908 | return ret; | 2908 | return ret; |
2909 | } | 2909 | } |
@@ -2918,8 +2918,7 @@ module_init(igbvf_init_module); | |||
2918 | static void __exit igbvf_exit_module(void) | 2918 | static void __exit igbvf_exit_module(void) |
2919 | { | 2919 | { |
2920 | pci_unregister_driver(&igbvf_driver); | 2920 | pci_unregister_driver(&igbvf_driver); |
2921 | pm_qos_remove_request(igbvf_driver_pm_qos_req); | 2921 | pm_qos_remove_request(&igbvf_driver_pm_qos_req); |
2922 | igbvf_driver_pm_qos_req = NULL; | ||
2923 | } | 2922 | } |
2924 | module_exit(igbvf_exit_module); | 2923 | module_exit(igbvf_exit_module); |
2925 | 2924 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 0bd4dfa59a8a..7f0d98b885bc 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -174,7 +174,7 @@ that only one external action is invoked at a time. | |||
174 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" | 174 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" |
175 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" | 175 | #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" |
176 | 176 | ||
177 | struct pm_qos_request_list *ipw2100_pm_qos_req; | 177 | struct pm_qos_request_list ipw2100_pm_qos_req; |
178 | 178 | ||
179 | /* Debugging stuff */ | 179 | /* Debugging stuff */ |
180 | #ifdef CONFIG_IPW2100_DEBUG | 180 | #ifdef CONFIG_IPW2100_DEBUG |
@@ -1741,7 +1741,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | |||
1741 | /* the ipw2100 hardware really doesn't want power management delays | 1741 | /* the ipw2100 hardware really doesn't want power management delays |
1742 | * longer than 175usec | 1742 | * longer than 175usec |
1743 | */ | 1743 | */ |
1744 | pm_qos_update_request(ipw2100_pm_qos_req, 175); | 1744 | pm_qos_update_request(&ipw2100_pm_qos_req, 175); |
1745 | 1745 | ||
1746 | /* If the interrupt is enabled, turn it off... */ | 1746 | /* If the interrupt is enabled, turn it off... */ |
1747 | spin_lock_irqsave(&priv->low_lock, flags); | 1747 | spin_lock_irqsave(&priv->low_lock, flags); |
@@ -1889,7 +1889,7 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1889 | ipw2100_disable_interrupts(priv); | 1889 | ipw2100_disable_interrupts(priv); |
1890 | spin_unlock_irqrestore(&priv->low_lock, flags); | 1890 | spin_unlock_irqrestore(&priv->low_lock, flags); |
1891 | 1891 | ||
1892 | pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); | 1892 | pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); |
1893 | 1893 | ||
1894 | /* We have to signal any supplicant if we are disassociating */ | 1894 | /* We have to signal any supplicant if we are disassociating */ |
1895 | if (associated) | 1895 | if (associated) |
@@ -6669,8 +6669,8 @@ static int __init ipw2100_init(void) | |||
6669 | if (ret) | 6669 | if (ret) |
6670 | goto out; | 6670 | goto out; |
6671 | 6671 | ||
6672 | ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, | 6672 | pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, |
6673 | PM_QOS_DEFAULT_VALUE); | 6673 | PM_QOS_DEFAULT_VALUE); |
6674 | #ifdef CONFIG_IPW2100_DEBUG | 6674 | #ifdef CONFIG_IPW2100_DEBUG |
6675 | ipw2100_debug_level = debug; | 6675 | ipw2100_debug_level = debug; |
6676 | ret = driver_create_file(&ipw2100_pci_driver.driver, | 6676 | ret = driver_create_file(&ipw2100_pci_driver.driver, |
@@ -6692,7 +6692,7 @@ static void __exit ipw2100_exit(void) | |||
6692 | &driver_attr_debug_level); | 6692 | &driver_attr_debug_level); |
6693 | #endif | 6693 | #endif |
6694 | pci_unregister_driver(&ipw2100_pci_driver); | 6694 | pci_unregister_driver(&ipw2100_pci_driver); |
6695 | pm_qos_remove_request(ipw2100_pm_qos_req); | 6695 | pm_qos_remove_request(&ipw2100_pm_qos_req); |
6696 | } | 6696 | } |
6697 | 6697 | ||
6698 | module_init(ipw2100_init); | 6698 | module_init(ipw2100_init); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 2e7a3bf13824..1ab98bbe58dd 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -48,6 +48,7 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | |||
48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { | 48 | if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { |
49 | pci_check_pme_status(pci_dev); | 49 | pci_check_pme_status(pci_dev); |
50 | pm_runtime_resume(&pci_dev->dev); | 50 | pm_runtime_resume(&pci_dev->dev); |
51 | pci_wakeup_event(pci_dev); | ||
51 | if (pci_dev->subordinate) | 52 | if (pci_dev->subordinate) |
52 | pci_pme_wakeup_bus(pci_dev->subordinate); | 53 | pci_pme_wakeup_bus(pci_dev->subordinate); |
53 | } | 54 | } |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 740fb4ea9669..130ed1daf0f8 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1275,6 +1275,22 @@ bool pci_check_pme_status(struct pci_dev *dev) | |||
1275 | return ret; | 1275 | return ret; |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | /* | ||
1279 | * Time to wait before the system can be put into a sleep state after reporting | ||
1280 | * a wakeup event signaled by a PCI device. | ||
1281 | */ | ||
1282 | #define PCI_WAKEUP_COOLDOWN 100 | ||
1283 | |||
1284 | /** | ||
1285 | * pci_wakeup_event - Report a wakeup event related to a given PCI device. | ||
1286 | * @dev: Device to report the wakeup event for. | ||
1287 | */ | ||
1288 | void pci_wakeup_event(struct pci_dev *dev) | ||
1289 | { | ||
1290 | if (device_may_wakeup(&dev->dev)) | ||
1291 | pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN); | ||
1292 | } | ||
1293 | |||
1278 | /** | 1294 | /** |
1279 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | 1295 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. |
1280 | * @dev: Device to handle. | 1296 | * @dev: Device to handle. |
@@ -1285,8 +1301,10 @@ bool pci_check_pme_status(struct pci_dev *dev) | |||
1285 | */ | 1301 | */ |
1286 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | 1302 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) |
1287 | { | 1303 | { |
1288 | if (pci_check_pme_status(dev)) | 1304 | if (pci_check_pme_status(dev)) { |
1289 | pm_request_resume(&dev->dev); | 1305 | pm_request_resume(&dev->dev); |
1306 | pci_wakeup_event(dev); | ||
1307 | } | ||
1290 | return 0; | 1308 | return 0; |
1291 | } | 1309 | } |
1292 | 1310 | ||
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f8077b3c8c8c..c8b7fd056ccd 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -56,6 +56,7 @@ extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); | |||
56 | extern void pci_disable_enabled_device(struct pci_dev *dev); | 56 | extern void pci_disable_enabled_device(struct pci_dev *dev); |
57 | extern bool pci_check_pme_status(struct pci_dev *dev); | 57 | extern bool pci_check_pme_status(struct pci_dev *dev); |
58 | extern int pci_finish_runtime_suspend(struct pci_dev *dev); | 58 | extern int pci_finish_runtime_suspend(struct pci_dev *dev); |
59 | extern void pci_wakeup_event(struct pci_dev *dev); | ||
59 | extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); | 60 | extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); |
60 | extern void pci_pme_wakeup_bus(struct pci_bus *bus); | 61 | extern void pci_pme_wakeup_bus(struct pci_bus *bus); |
61 | extern void pci_pm_init(struct pci_dev *dev); | 62 | extern void pci_pm_init(struct pci_dev *dev); |
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme/pcie_pme.c index d672a0a63816..bbdea18693d9 100644 --- a/drivers/pci/pcie/pme/pcie_pme.c +++ b/drivers/pci/pcie/pme/pcie_pme.c | |||
@@ -154,6 +154,7 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus) | |||
154 | /* Skip PCIe devices in case we started from a root port. */ | 154 | /* Skip PCIe devices in case we started from a root port. */ |
155 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { | 155 | if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { |
156 | pm_request_resume(&dev->dev); | 156 | pm_request_resume(&dev->dev); |
157 | pci_wakeup_event(dev); | ||
157 | ret = true; | 158 | ret = true; |
158 | } | 159 | } |
159 | 160 | ||
@@ -254,8 +255,10 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | |||
254 | if (found) { | 255 | if (found) { |
255 | /* The device is there, but we have to check its PME status. */ | 256 | /* The device is there, but we have to check its PME status. */ |
256 | found = pci_check_pme_status(dev); | 257 | found = pci_check_pme_status(dev); |
257 | if (found) | 258 | if (found) { |
258 | pm_request_resume(&dev->dev); | 259 | pm_request_resume(&dev->dev); |
260 | pci_wakeup_event(dev); | ||
261 | } | ||
259 | pci_dev_put(dev); | 262 | pci_dev_put(dev); |
260 | } else if (devfn) { | 263 | } else if (devfn) { |
261 | /* | 264 | /* |
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index 5dba90995d9e..88b3cde52596 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c | |||
@@ -164,6 +164,9 @@ int __pnp_add_device(struct pnp_dev *dev) | |||
164 | list_add_tail(&dev->global_list, &pnp_global); | 164 | list_add_tail(&dev->global_list, &pnp_global); |
165 | list_add_tail(&dev->protocol_list, &dev->protocol->devices); | 165 | list_add_tail(&dev->protocol_list, &dev->protocol->devices); |
166 | spin_unlock(&pnp_lock); | 166 | spin_unlock(&pnp_lock); |
167 | if (dev->protocol->can_wakeup) | ||
168 | device_set_wakeup_capable(&dev->dev, | ||
169 | dev->protocol->can_wakeup(dev)); | ||
167 | return device_register(&dev->dev); | 170 | return device_register(&dev->dev); |
168 | } | 171 | } |
169 | 172 | ||
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index f7ff628b7d94..dc4e32e031e9 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
@@ -122,17 +122,37 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) | |||
122 | } | 122 | } |
123 | 123 | ||
124 | #ifdef CONFIG_ACPI_SLEEP | 124 | #ifdef CONFIG_ACPI_SLEEP |
125 | static bool pnpacpi_can_wakeup(struct pnp_dev *dev) | ||
126 | { | ||
127 | struct acpi_device *acpi_dev = dev->data; | ||
128 | acpi_handle handle = acpi_dev->handle; | ||
129 | |||
130 | return acpi_bus_can_wakeup(handle); | ||
131 | } | ||
132 | |||
125 | static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) | 133 | static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) |
126 | { | 134 | { |
127 | struct acpi_device *acpi_dev = dev->data; | 135 | struct acpi_device *acpi_dev = dev->data; |
128 | acpi_handle handle = acpi_dev->handle; | 136 | acpi_handle handle = acpi_dev->handle; |
129 | int power_state; | 137 | int power_state; |
130 | 138 | ||
139 | if (device_can_wakeup(&dev->dev)) { | ||
140 | int rc = acpi_pm_device_sleep_wake(&dev->dev, | ||
141 | device_may_wakeup(&dev->dev)); | ||
142 | |||
143 | if (rc) | ||
144 | return rc; | ||
145 | } | ||
131 | power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); | 146 | power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); |
132 | if (power_state < 0) | 147 | if (power_state < 0) |
133 | power_state = (state.event == PM_EVENT_ON) ? | 148 | power_state = (state.event == PM_EVENT_ON) ? |
134 | ACPI_STATE_D0 : ACPI_STATE_D3; | 149 | ACPI_STATE_D0 : ACPI_STATE_D3; |
135 | 150 | ||
151 | /* acpi_bus_set_power() often fails (keyboard port can't be | ||
152 | * powered-down?), and in any case, our return value is ignored | ||
153 | * by pnp_bus_suspend(). Hence we don't revert the wakeup | ||
154 | * setting if the set_power fails. | ||
155 | */ | ||
136 | return acpi_bus_set_power(handle, power_state); | 156 | return acpi_bus_set_power(handle, power_state); |
137 | } | 157 | } |
138 | 158 | ||
@@ -141,6 +161,8 @@ static int pnpacpi_resume(struct pnp_dev *dev) | |||
141 | struct acpi_device *acpi_dev = dev->data; | 161 | struct acpi_device *acpi_dev = dev->data; |
142 | acpi_handle handle = acpi_dev->handle; | 162 | acpi_handle handle = acpi_dev->handle; |
143 | 163 | ||
164 | if (device_may_wakeup(&dev->dev)) | ||
165 | acpi_pm_device_sleep_wake(&dev->dev, false); | ||
144 | return acpi_bus_set_power(handle, ACPI_STATE_D0); | 166 | return acpi_bus_set_power(handle, ACPI_STATE_D0); |
145 | } | 167 | } |
146 | #endif | 168 | #endif |
@@ -151,6 +173,7 @@ struct pnp_protocol pnpacpi_protocol = { | |||
151 | .set = pnpacpi_set_resources, | 173 | .set = pnpacpi_set_resources, |
152 | .disable = pnpacpi_disable_resources, | 174 | .disable = pnpacpi_disable_resources, |
153 | #ifdef CONFIG_ACPI_SLEEP | 175 | #ifdef CONFIG_ACPI_SLEEP |
176 | .can_wakeup = pnpacpi_can_wakeup, | ||
154 | .suspend = pnpacpi_suspend, | 177 | .suspend = pnpacpi_suspend, |
155 | .resume = pnpacpi_resume, | 178 | .resume = pnpacpi_resume, |
156 | #endif | 179 | #endif |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b21e4054c12c..2f22119b4b08 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -779,7 +779,7 @@ struct net_device { | |||
779 | */ | 779 | */ |
780 | char name[IFNAMSIZ]; | 780 | char name[IFNAMSIZ]; |
781 | 781 | ||
782 | struct pm_qos_request_list *pm_qos_req; | 782 | struct pm_qos_request_list pm_qos_req; |
783 | 783 | ||
784 | /* device name hash chain */ | 784 | /* device name hash chain */ |
785 | struct hlist_node name_hlist; | 785 | struct hlist_node name_hlist; |
diff --git a/include/linux/plist.h b/include/linux/plist.h index 6898985e7b38..7254eda078e5 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h | |||
@@ -260,6 +260,23 @@ static inline int plist_node_empty(const struct plist_node *node) | |||
260 | #endif | 260 | #endif |
261 | 261 | ||
262 | /** | 262 | /** |
263 | * plist_last_entry - get the struct for the last entry | ||
264 | * @head: the &struct plist_head pointer | ||
265 | * @type: the type of the struct this is embedded in | ||
266 | * @member: the name of the list_struct within the struct | ||
267 | */ | ||
268 | #ifdef CONFIG_DEBUG_PI_LIST | ||
269 | # define plist_last_entry(head, type, member) \ | ||
270 | ({ \ | ||
271 | WARN_ON(plist_head_empty(head)); \ | ||
272 | container_of(plist_last(head), type, member); \ | ||
273 | }) | ||
274 | #else | ||
275 | # define plist_last_entry(head, type, member) \ | ||
276 | container_of(plist_last(head), type, member) | ||
277 | #endif | ||
278 | |||
279 | /** | ||
263 | * plist_first - return the first node (and thus, highest priority) | 280 | * plist_first - return the first node (and thus, highest priority) |
264 | * @head: the &struct plist_head pointer | 281 | * @head: the &struct plist_head pointer |
265 | * | 282 | * |
@@ -271,4 +288,16 @@ static inline struct plist_node *plist_first(const struct plist_head *head) | |||
271 | struct plist_node, plist.node_list); | 288 | struct plist_node, plist.node_list); |
272 | } | 289 | } |
273 | 290 | ||
291 | /** | ||
292 | * plist_last - return the last node (and thus, lowest priority) | ||
293 | * @head: the &struct plist_head pointer | ||
294 | * | ||
295 | * Assumes the plist is _not_ empty. | ||
296 | */ | ||
297 | static inline struct plist_node *plist_last(const struct plist_head *head) | ||
298 | { | ||
299 | return list_entry(head->node_list.prev, | ||
300 | struct plist_node, plist.node_list); | ||
301 | } | ||
302 | |||
274 | #endif | 303 | #endif |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 8e258c727971..52e8c55ff314 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -457,6 +457,7 @@ struct dev_pm_info { | |||
457 | #ifdef CONFIG_PM_SLEEP | 457 | #ifdef CONFIG_PM_SLEEP |
458 | struct list_head entry; | 458 | struct list_head entry; |
459 | struct completion completion; | 459 | struct completion completion; |
460 | unsigned long wakeup_count; | ||
460 | #endif | 461 | #endif |
461 | #ifdef CONFIG_PM_RUNTIME | 462 | #ifdef CONFIG_PM_RUNTIME |
462 | struct timer_list suspend_timer; | 463 | struct timer_list suspend_timer; |
@@ -476,9 +477,15 @@ struct dev_pm_info { | |||
476 | enum rpm_request request; | 477 | enum rpm_request request; |
477 | enum rpm_status runtime_status; | 478 | enum rpm_status runtime_status; |
478 | int runtime_error; | 479 | int runtime_error; |
480 | unsigned long active_jiffies; | ||
481 | unsigned long suspended_jiffies; | ||
482 | unsigned long accounting_timestamp; | ||
479 | #endif | 483 | #endif |
480 | }; | 484 | }; |
481 | 485 | ||
486 | extern void update_pm_runtime_accounting(struct device *dev); | ||
487 | |||
488 | |||
482 | /* | 489 | /* |
483 | * The PM_EVENT_ messages are also used by drivers implementing the legacy | 490 | * The PM_EVENT_ messages are also used by drivers implementing the legacy |
484 | * suspend framework, based on the ->suspend() and ->resume() callbacks common | 491 | * suspend framework, based on the ->suspend() and ->resume() callbacks common |
@@ -552,6 +559,11 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
552 | } while (0) | 559 | } while (0) |
553 | 560 | ||
554 | extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); | 561 | extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); |
562 | |||
563 | /* drivers/base/power/wakeup.c */ | ||
564 | extern void pm_wakeup_event(struct device *dev, unsigned int msec); | ||
565 | extern void pm_stay_awake(struct device *dev); | ||
566 | extern void pm_relax(void); | ||
555 | #else /* !CONFIG_PM_SLEEP */ | 567 | #else /* !CONFIG_PM_SLEEP */ |
556 | 568 | ||
557 | #define device_pm_lock() do {} while (0) | 569 | #define device_pm_lock() do {} while (0) |
@@ -565,6 +577,10 @@ static inline int dpm_suspend_start(pm_message_t state) | |||
565 | #define suspend_report_result(fn, ret) do {} while (0) | 577 | #define suspend_report_result(fn, ret) do {} while (0) |
566 | 578 | ||
567 | static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} | 579 | static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} |
580 | |||
581 | static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} | ||
582 | static inline void pm_stay_awake(struct device *dev) {} | ||
583 | static inline void pm_relax(void) {} | ||
568 | #endif /* !CONFIG_PM_SLEEP */ | 584 | #endif /* !CONFIG_PM_SLEEP */ |
569 | 585 | ||
570 | /* How to reorder dpm_list after device_move() */ | 586 | /* How to reorder dpm_list after device_move() */ |
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 8ba440e5eb7f..77cbddb3784c 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h | |||
@@ -1,8 +1,10 @@ | |||
1 | #ifndef _LINUX_PM_QOS_PARAMS_H | ||
2 | #define _LINUX_PM_QOS_PARAMS_H | ||
1 | /* interface for the pm_qos_power infrastructure of the linux kernel. | 3 | /* interface for the pm_qos_power infrastructure of the linux kernel. |
2 | * | 4 | * |
3 | * Mark Gross <mgross@linux.intel.com> | 5 | * Mark Gross <mgross@linux.intel.com> |
4 | */ | 6 | */ |
5 | #include <linux/list.h> | 7 | #include <linux/plist.h> |
6 | #include <linux/notifier.h> | 8 | #include <linux/notifier.h> |
7 | #include <linux/miscdevice.h> | 9 | #include <linux/miscdevice.h> |
8 | 10 | ||
@@ -14,9 +16,12 @@ | |||
14 | #define PM_QOS_NUM_CLASSES 4 | 16 | #define PM_QOS_NUM_CLASSES 4 |
15 | #define PM_QOS_DEFAULT_VALUE -1 | 17 | #define PM_QOS_DEFAULT_VALUE -1 |
16 | 18 | ||
17 | struct pm_qos_request_list; | 19 | struct pm_qos_request_list { |
20 | struct plist_node list; | ||
21 | int pm_qos_class; | ||
22 | }; | ||
18 | 23 | ||
19 | struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value); | 24 | void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value); |
20 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, | 25 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, |
21 | s32 new_value); | 26 | s32 new_value); |
22 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); | 27 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); |
@@ -24,4 +29,6 @@ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); | |||
24 | int pm_qos_request(int pm_qos_class); | 29 | int pm_qos_request(int pm_qos_class); |
25 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); | 30 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); |
26 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); | 31 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); |
32 | int pm_qos_request_active(struct pm_qos_request_list *req); | ||
27 | 33 | ||
34 | #endif | ||
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 22d64c18056c..76aca48722ae 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -29,8 +29,11 @@ | |||
29 | 29 | ||
30 | #ifdef CONFIG_PM | 30 | #ifdef CONFIG_PM |
31 | 31 | ||
32 | /* changes to device_may_wakeup take effect on the next pm state change. | 32 | /* Changes to device_may_wakeup take effect on the next pm state change. |
33 | * by default, devices should wakeup if they can. | 33 | * |
34 | * By default, most devices should leave wakeup disabled. The exceptions | ||
35 | * are devices that everyone expects to be wakeup sources: keyboards, | ||
36 | * power buttons, possibly network interfaces, etc. | ||
34 | */ | 37 | */ |
35 | static inline void device_init_wakeup(struct device *dev, bool val) | 38 | static inline void device_init_wakeup(struct device *dev, bool val) |
36 | { | 39 | { |
@@ -59,7 +62,7 @@ static inline bool device_may_wakeup(struct device *dev) | |||
59 | 62 | ||
60 | #else /* !CONFIG_PM */ | 63 | #else /* !CONFIG_PM */ |
61 | 64 | ||
62 | /* For some reason the next two routines work even without CONFIG_PM */ | 65 | /* For some reason the following routines work even without CONFIG_PM */ |
63 | static inline void device_init_wakeup(struct device *dev, bool val) | 66 | static inline void device_init_wakeup(struct device *dev, bool val) |
64 | { | 67 | { |
65 | dev->power.can_wakeup = val; | 68 | dev->power.can_wakeup = val; |
@@ -67,6 +70,7 @@ static inline void device_init_wakeup(struct device *dev, bool val) | |||
67 | 70 | ||
68 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) | 71 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) |
69 | { | 72 | { |
73 | dev->power.can_wakeup = capable; | ||
70 | } | 74 | } |
71 | 75 | ||
72 | static inline bool device_can_wakeup(struct device *dev) | 76 | static inline bool device_can_wakeup(struct device *dev) |
diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 7c4193eb0072..1bc1338b817b 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h | |||
@@ -414,6 +414,7 @@ struct pnp_protocol { | |||
414 | int (*disable) (struct pnp_dev *dev); | 414 | int (*disable) (struct pnp_dev *dev); |
415 | 415 | ||
416 | /* protocol specific suspend/resume */ | 416 | /* protocol specific suspend/resume */ |
417 | bool (*can_wakeup) (struct pnp_dev *dev); | ||
417 | int (*suspend) (struct pnp_dev * dev, pm_message_t state); | 418 | int (*suspend) (struct pnp_dev * dev, pm_message_t state); |
418 | int (*resume) (struct pnp_dev * dev); | 419 | int (*resume) (struct pnp_dev * dev); |
419 | 420 | ||
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index bc7d6bb4cd8e..4af270ec2204 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -61,14 +61,15 @@ typedef int __bitwise suspend_state_t; | |||
61 | * before device drivers' late suspend callbacks are executed. It returns | 61 | * before device drivers' late suspend callbacks are executed. It returns |
62 | * 0 on success or a negative error code otherwise, in which case the | 62 | * 0 on success or a negative error code otherwise, in which case the |
63 | * system cannot enter the desired sleep state (@prepare_late(), @enter(), | 63 | * system cannot enter the desired sleep state (@prepare_late(), @enter(), |
64 | * @wake(), and @finish() will not be called in that case). | 64 | * and @wake() will not be called in that case). |
65 | * | 65 | * |
66 | * @prepare_late: Finish preparing the platform for entering the system sleep | 66 | * @prepare_late: Finish preparing the platform for entering the system sleep |
67 | * state indicated by @begin(). | 67 | * state indicated by @begin(). |
68 | * @prepare_late is called before disabling nonboot CPUs and after | 68 | * @prepare_late is called before disabling nonboot CPUs and after |
69 | * device drivers' late suspend callbacks have been executed. It returns | 69 | * device drivers' late suspend callbacks have been executed. It returns |
70 | * 0 on success or a negative error code otherwise, in which case the | 70 | * 0 on success or a negative error code otherwise, in which case the |
71 | * system cannot enter the desired sleep state (@enter() and @wake()). | 71 | * system cannot enter the desired sleep state (@enter() will not be |
72 | * executed). | ||
72 | * | 73 | * |
73 | * @enter: Enter the system sleep state indicated by @begin() or represented by | 74 | * @enter: Enter the system sleep state indicated by @begin() or represented by |
74 | * the argument if @begin() is not implemented. | 75 | * the argument if @begin() is not implemented. |
@@ -81,14 +82,15 @@ typedef int __bitwise suspend_state_t; | |||
81 | * resume callbacks are executed. | 82 | * resume callbacks are executed. |
82 | * This callback is optional, but should be implemented by the platforms | 83 | * This callback is optional, but should be implemented by the platforms |
83 | * that implement @prepare_late(). If implemented, it is always called | 84 | * that implement @prepare_late(). If implemented, it is always called |
84 | * after @enter(), even if @enter() fails. | 85 | * after @prepare_late and @enter(), even if one of them fails. |
85 | * | 86 | * |
86 | * @finish: Finish wake-up of the platform. | 87 | * @finish: Finish wake-up of the platform. |
87 | * @finish is called right prior to calling device drivers' regular suspend | 88 | * @finish is called right prior to calling device drivers' regular suspend |
88 | * callbacks. | 89 | * callbacks. |
89 | * This callback is optional, but should be implemented by the platforms | 90 | * This callback is optional, but should be implemented by the platforms |
90 | * that implement @prepare(). If implemented, it is always called after | 91 | * that implement @prepare(). If implemented, it is always called after |
91 | * @enter() and @wake(), if implemented, even if any of them fails. | 92 | * @enter() and @wake(), even if any of them fails. It is executed after |
93 | * a failing @prepare. | ||
92 | * | 94 | * |
93 | * @end: Called by the PM core right after resuming devices, to indicate to | 95 | * @end: Called by the PM core right after resuming devices, to indicate to |
94 | * the platform that the system has returned to the working state or | 96 | * the platform that the system has returned to the working state or |
@@ -286,6 +288,13 @@ extern int unregister_pm_notifier(struct notifier_block *nb); | |||
286 | { .notifier_call = fn, .priority = pri }; \ | 288 | { .notifier_call = fn, .priority = pri }; \ |
287 | register_pm_notifier(&fn##_nb); \ | 289 | register_pm_notifier(&fn##_nb); \ |
288 | } | 290 | } |
291 | |||
292 | /* drivers/base/power/wakeup.c */ | ||
293 | extern bool events_check_enabled; | ||
294 | |||
295 | extern bool pm_check_wakeup_events(void); | ||
296 | extern bool pm_get_wakeup_count(unsigned long *count); | ||
297 | extern bool pm_save_wakeup_count(unsigned long count); | ||
289 | #else /* !CONFIG_PM_SLEEP */ | 298 | #else /* !CONFIG_PM_SLEEP */ |
290 | 299 | ||
291 | static inline int register_pm_notifier(struct notifier_block *nb) | 300 | static inline int register_pm_notifier(struct notifier_block *nb) |
diff --git a/include/sound/pcm.h b/include/sound/pcm.h index dd76cdede64d..6e3a29732dc4 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h | |||
@@ -366,7 +366,7 @@ struct snd_pcm_substream { | |||
366 | int number; | 366 | int number; |
367 | char name[32]; /* substream name */ | 367 | char name[32]; /* substream name */ |
368 | int stream; /* stream (direction) */ | 368 | int stream; /* stream (direction) */ |
369 | struct pm_qos_request_list *latency_pm_qos_req; /* pm_qos request */ | 369 | struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */ |
370 | size_t buffer_bytes_max; /* limit ring buffer size */ | 370 | size_t buffer_bytes_max; /* limit ring buffer size */ |
371 | struct snd_dma_buffer dma_buffer; | 371 | struct snd_dma_buffer dma_buffer; |
372 | unsigned int dma_buf_id; | 372 | unsigned int dma_buf_id; |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index f42d3f737a33..996a4dec5f96 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
@@ -48,59 +48,49 @@ | |||
48 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock | 48 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock |
49 | * held, taken with _irqsave. One lock to rule them all | 49 | * held, taken with _irqsave. One lock to rule them all |
50 | */ | 50 | */ |
51 | struct pm_qos_request_list { | 51 | enum pm_qos_type { |
52 | struct list_head list; | 52 | PM_QOS_MAX, /* return the largest value */ |
53 | union { | 53 | PM_QOS_MIN /* return the smallest value */ |
54 | s32 value; | ||
55 | s32 usec; | ||
56 | s32 kbps; | ||
57 | }; | ||
58 | int pm_qos_class; | ||
59 | }; | 54 | }; |
60 | 55 | ||
61 | static s32 max_compare(s32 v1, s32 v2); | ||
62 | static s32 min_compare(s32 v1, s32 v2); | ||
63 | |||
64 | struct pm_qos_object { | 56 | struct pm_qos_object { |
65 | struct pm_qos_request_list requests; | 57 | struct plist_head requests; |
66 | struct blocking_notifier_head *notifiers; | 58 | struct blocking_notifier_head *notifiers; |
67 | struct miscdevice pm_qos_power_miscdev; | 59 | struct miscdevice pm_qos_power_miscdev; |
68 | char *name; | 60 | char *name; |
69 | s32 default_value; | 61 | s32 default_value; |
70 | atomic_t target_value; | 62 | enum pm_qos_type type; |
71 | s32 (*comparitor)(s32, s32); | ||
72 | }; | 63 | }; |
73 | 64 | ||
65 | static DEFINE_SPINLOCK(pm_qos_lock); | ||
66 | |||
74 | static struct pm_qos_object null_pm_qos; | 67 | static struct pm_qos_object null_pm_qos; |
75 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); | 68 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); |
76 | static struct pm_qos_object cpu_dma_pm_qos = { | 69 | static struct pm_qos_object cpu_dma_pm_qos = { |
77 | .requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)}, | 70 | .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), |
78 | .notifiers = &cpu_dma_lat_notifier, | 71 | .notifiers = &cpu_dma_lat_notifier, |
79 | .name = "cpu_dma_latency", | 72 | .name = "cpu_dma_latency", |
80 | .default_value = 2000 * USEC_PER_SEC, | 73 | .default_value = 2000 * USEC_PER_SEC, |
81 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), | 74 | .type = PM_QOS_MIN, |
82 | .comparitor = min_compare | ||
83 | }; | 75 | }; |
84 | 76 | ||
85 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); | 77 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); |
86 | static struct pm_qos_object network_lat_pm_qos = { | 78 | static struct pm_qos_object network_lat_pm_qos = { |
87 | .requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)}, | 79 | .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), |
88 | .notifiers = &network_lat_notifier, | 80 | .notifiers = &network_lat_notifier, |
89 | .name = "network_latency", | 81 | .name = "network_latency", |
90 | .default_value = 2000 * USEC_PER_SEC, | 82 | .default_value = 2000 * USEC_PER_SEC, |
91 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), | 83 | .type = PM_QOS_MIN |
92 | .comparitor = min_compare | ||
93 | }; | 84 | }; |
94 | 85 | ||
95 | 86 | ||
96 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); | 87 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); |
97 | static struct pm_qos_object network_throughput_pm_qos = { | 88 | static struct pm_qos_object network_throughput_pm_qos = { |
98 | .requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)}, | 89 | .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), |
99 | .notifiers = &network_throughput_notifier, | 90 | .notifiers = &network_throughput_notifier, |
100 | .name = "network_throughput", | 91 | .name = "network_throughput", |
101 | .default_value = 0, | 92 | .default_value = 0, |
102 | .target_value = ATOMIC_INIT(0), | 93 | .type = PM_QOS_MAX, |
103 | .comparitor = max_compare | ||
104 | }; | 94 | }; |
105 | 95 | ||
106 | 96 | ||
@@ -111,8 +101,6 @@ static struct pm_qos_object *pm_qos_array[] = { | |||
111 | &network_throughput_pm_qos | 101 | &network_throughput_pm_qos |
112 | }; | 102 | }; |
113 | 103 | ||
114 | static DEFINE_SPINLOCK(pm_qos_lock); | ||
115 | |||
116 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | 104 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, |
117 | size_t count, loff_t *f_pos); | 105 | size_t count, loff_t *f_pos); |
118 | static int pm_qos_power_open(struct inode *inode, struct file *filp); | 106 | static int pm_qos_power_open(struct inode *inode, struct file *filp); |
@@ -124,46 +112,55 @@ static const struct file_operations pm_qos_power_fops = { | |||
124 | .release = pm_qos_power_release, | 112 | .release = pm_qos_power_release, |
125 | }; | 113 | }; |
126 | 114 | ||
127 | /* static helper functions */ | 115 | /* unlocked internal variant */ |
128 | static s32 max_compare(s32 v1, s32 v2) | 116 | static inline int pm_qos_get_value(struct pm_qos_object *o) |
129 | { | 117 | { |
130 | return max(v1, v2); | 118 | if (plist_head_empty(&o->requests)) |
131 | } | 119 | return o->default_value; |
132 | 120 | ||
133 | static s32 min_compare(s32 v1, s32 v2) | 121 | switch (o->type) { |
134 | { | 122 | case PM_QOS_MIN: |
135 | return min(v1, v2); | 123 | return plist_last(&o->requests)->prio; |
136 | } | ||
137 | 124 | ||
125 | case PM_QOS_MAX: | ||
126 | return plist_first(&o->requests)->prio; | ||
138 | 127 | ||
139 | static void update_target(int pm_qos_class) | 128 | default: |
129 | /* runtime check for not using enum */ | ||
130 | BUG(); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | static void update_target(struct pm_qos_object *o, struct plist_node *node, | ||
135 | int del, int value) | ||
140 | { | 136 | { |
141 | s32 extreme_value; | ||
142 | struct pm_qos_request_list *node; | ||
143 | unsigned long flags; | 137 | unsigned long flags; |
144 | int call_notifier = 0; | 138 | int prev_value, curr_value; |
145 | 139 | ||
146 | spin_lock_irqsave(&pm_qos_lock, flags); | 140 | spin_lock_irqsave(&pm_qos_lock, flags); |
147 | extreme_value = pm_qos_array[pm_qos_class]->default_value; | 141 | prev_value = pm_qos_get_value(o); |
148 | list_for_each_entry(node, | 142 | /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */ |
149 | &pm_qos_array[pm_qos_class]->requests.list, list) { | 143 | if (value != PM_QOS_DEFAULT_VALUE) { |
150 | extreme_value = pm_qos_array[pm_qos_class]->comparitor( | 144 | /* |
151 | extreme_value, node->value); | 145 | * to change the list, we atomically remove, reinit |
152 | } | 146 | * with new value and add, then see if the extremal |
153 | if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) != | 147 | * changed |
154 | extreme_value) { | 148 | */ |
155 | call_notifier = 1; | 149 | plist_del(node, &o->requests); |
156 | atomic_set(&pm_qos_array[pm_qos_class]->target_value, | 150 | plist_node_init(node, value); |
157 | extreme_value); | 151 | plist_add(node, &o->requests); |
158 | pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class, | 152 | } else if (del) { |
159 | atomic_read(&pm_qos_array[pm_qos_class]->target_value)); | 153 | plist_del(node, &o->requests); |
154 | } else { | ||
155 | plist_add(node, &o->requests); | ||
160 | } | 156 | } |
157 | curr_value = pm_qos_get_value(o); | ||
161 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 158 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
162 | 159 | ||
163 | if (call_notifier) | 160 | if (prev_value != curr_value) |
164 | blocking_notifier_call_chain( | 161 | blocking_notifier_call_chain(o->notifiers, |
165 | pm_qos_array[pm_qos_class]->notifiers, | 162 | (unsigned long)curr_value, |
166 | (unsigned long) extreme_value, NULL); | 163 | NULL); |
167 | } | 164 | } |
168 | 165 | ||
169 | static int register_pm_qos_misc(struct pm_qos_object *qos) | 166 | static int register_pm_qos_misc(struct pm_qos_object *qos) |
@@ -196,10 +193,23 @@ static int find_pm_qos_object_by_minor(int minor) | |||
196 | */ | 193 | */ |
197 | int pm_qos_request(int pm_qos_class) | 194 | int pm_qos_request(int pm_qos_class) |
198 | { | 195 | { |
199 | return atomic_read(&pm_qos_array[pm_qos_class]->target_value); | 196 | unsigned long flags; |
197 | int value; | ||
198 | |||
199 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
200 | value = pm_qos_get_value(pm_qos_array[pm_qos_class]); | ||
201 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
202 | |||
203 | return value; | ||
200 | } | 204 | } |
201 | EXPORT_SYMBOL_GPL(pm_qos_request); | 205 | EXPORT_SYMBOL_GPL(pm_qos_request); |
202 | 206 | ||
207 | int pm_qos_request_active(struct pm_qos_request_list *req) | ||
208 | { | ||
209 | return req->pm_qos_class != 0; | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(pm_qos_request_active); | ||
212 | |||
203 | /** | 213 | /** |
204 | * pm_qos_add_request - inserts new qos request into the list | 214 | * pm_qos_add_request - inserts new qos request into the list |
205 | * @pm_qos_class: identifies which list of qos request to us | 215 | * @pm_qos_class: identifies which list of qos request to us |
@@ -211,27 +221,23 @@ EXPORT_SYMBOL_GPL(pm_qos_request); | |||
211 | * element as a handle for use in updating and removal. Call needs to save | 221 | * element as a handle for use in updating and removal. Call needs to save |
212 | * this handle for later use. | 222 | * this handle for later use. |
213 | */ | 223 | */ |
214 | struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) | 224 | void pm_qos_add_request(struct pm_qos_request_list *dep, |
225 | int pm_qos_class, s32 value) | ||
215 | { | 226 | { |
216 | struct pm_qos_request_list *dep; | 227 | struct pm_qos_object *o = pm_qos_array[pm_qos_class]; |
217 | unsigned long flags; | 228 | int new_value; |
218 | 229 | ||
219 | dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); | 230 | if (pm_qos_request_active(dep)) { |
220 | if (dep) { | 231 | WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); |
221 | if (value == PM_QOS_DEFAULT_VALUE) | 232 | return; |
222 | dep->value = pm_qos_array[pm_qos_class]->default_value; | ||
223 | else | ||
224 | dep->value = value; | ||
225 | dep->pm_qos_class = pm_qos_class; | ||
226 | |||
227 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
228 | list_add(&dep->list, | ||
229 | &pm_qos_array[pm_qos_class]->requests.list); | ||
230 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
231 | update_target(pm_qos_class); | ||
232 | } | 233 | } |
233 | 234 | if (value == PM_QOS_DEFAULT_VALUE) | |
234 | return dep; | 235 | new_value = o->default_value; |
236 | else | ||
237 | new_value = value; | ||
238 | plist_node_init(&dep->list, new_value); | ||
239 | dep->pm_qos_class = pm_qos_class; | ||
240 | update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); | ||
235 | } | 241 | } |
236 | EXPORT_SYMBOL_GPL(pm_qos_add_request); | 242 | EXPORT_SYMBOL_GPL(pm_qos_add_request); |
237 | 243 | ||
@@ -246,27 +252,28 @@ EXPORT_SYMBOL_GPL(pm_qos_add_request); | |||
246 | * Attempts are made to make this code callable on hot code paths. | 252 | * Attempts are made to make this code callable on hot code paths. |
247 | */ | 253 | */ |
248 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, | 254 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, |
249 | s32 new_value) | 255 | s32 new_value) |
250 | { | 256 | { |
251 | unsigned long flags; | ||
252 | int pending_update = 0; | ||
253 | s32 temp; | 257 | s32 temp; |
258 | struct pm_qos_object *o; | ||
259 | |||
260 | if (!pm_qos_req) /*guard against callers passing in null */ | ||
261 | return; | ||
254 | 262 | ||
255 | if (pm_qos_req) { /*guard against callers passing in null */ | 263 | if (!pm_qos_request_active(pm_qos_req)) { |
256 | spin_lock_irqsave(&pm_qos_lock, flags); | 264 | WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); |
257 | if (new_value == PM_QOS_DEFAULT_VALUE) | 265 | return; |
258 | temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value; | ||
259 | else | ||
260 | temp = new_value; | ||
261 | |||
262 | if (temp != pm_qos_req->value) { | ||
263 | pending_update = 1; | ||
264 | pm_qos_req->value = temp; | ||
265 | } | ||
266 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
267 | if (pending_update) | ||
268 | update_target(pm_qos_req->pm_qos_class); | ||
269 | } | 266 | } |
267 | |||
268 | o = pm_qos_array[pm_qos_req->pm_qos_class]; | ||
269 | |||
270 | if (new_value == PM_QOS_DEFAULT_VALUE) | ||
271 | temp = o->default_value; | ||
272 | else | ||
273 | temp = new_value; | ||
274 | |||
275 | if (temp != pm_qos_req->list.prio) | ||
276 | update_target(o, &pm_qos_req->list, 0, temp); | ||
270 | } | 277 | } |
271 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | 278 | EXPORT_SYMBOL_GPL(pm_qos_update_request); |
272 | 279 | ||
@@ -280,19 +287,20 @@ EXPORT_SYMBOL_GPL(pm_qos_update_request); | |||
280 | */ | 287 | */ |
281 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) | 288 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) |
282 | { | 289 | { |
283 | unsigned long flags; | 290 | struct pm_qos_object *o; |
284 | int qos_class; | ||
285 | 291 | ||
286 | if (pm_qos_req == NULL) | 292 | if (pm_qos_req == NULL) |
287 | return; | 293 | return; |
288 | /* silent return to keep pcm code cleaner */ | 294 | /* silent return to keep pcm code cleaner */ |
289 | 295 | ||
290 | qos_class = pm_qos_req->pm_qos_class; | 296 | if (!pm_qos_request_active(pm_qos_req)) { |
291 | spin_lock_irqsave(&pm_qos_lock, flags); | 297 | WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); |
292 | list_del(&pm_qos_req->list); | 298 | return; |
293 | kfree(pm_qos_req); | 299 | } |
294 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 300 | |
295 | update_target(qos_class); | 301 | o = pm_qos_array[pm_qos_req->pm_qos_class]; |
302 | update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); | ||
303 | memset(pm_qos_req, 0, sizeof(*pm_qos_req)); | ||
296 | } | 304 | } |
297 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); | 305 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); |
298 | 306 | ||
@@ -340,8 +348,12 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp) | |||
340 | 348 | ||
341 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); | 349 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); |
342 | if (pm_qos_class >= 0) { | 350 | if (pm_qos_class >= 0) { |
343 | filp->private_data = (void *) pm_qos_add_request(pm_qos_class, | 351 | struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); |
344 | PM_QOS_DEFAULT_VALUE); | 352 | if (!req) |
353 | return -ENOMEM; | ||
354 | |||
355 | pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); | ||
356 | filp->private_data = req; | ||
345 | 357 | ||
346 | if (filp->private_data) | 358 | if (filp->private_data) |
347 | return 0; | 359 | return 0; |
@@ -353,8 +365,9 @@ static int pm_qos_power_release(struct inode *inode, struct file *filp) | |||
353 | { | 365 | { |
354 | struct pm_qos_request_list *req; | 366 | struct pm_qos_request_list *req; |
355 | 367 | ||
356 | req = (struct pm_qos_request_list *)filp->private_data; | 368 | req = filp->private_data; |
357 | pm_qos_remove_request(req); | 369 | pm_qos_remove_request(req); |
370 | kfree(req); | ||
358 | 371 | ||
359 | return 0; | 372 | return 0; |
360 | } | 373 | } |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aa9e916da4d5..d26f04e92743 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -277,7 +277,7 @@ static int create_image(int platform_mode) | |||
277 | goto Enable_irqs; | 277 | goto Enable_irqs; |
278 | } | 278 | } |
279 | 279 | ||
280 | if (hibernation_test(TEST_CORE)) | 280 | if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) |
281 | goto Power_up; | 281 | goto Power_up; |
282 | 282 | ||
283 | in_suspend = 1; | 283 | in_suspend = 1; |
@@ -288,8 +288,10 @@ static int create_image(int platform_mode) | |||
288 | error); | 288 | error); |
289 | /* Restore control flow magically appears here */ | 289 | /* Restore control flow magically appears here */ |
290 | restore_processor_state(); | 290 | restore_processor_state(); |
291 | if (!in_suspend) | 291 | if (!in_suspend) { |
292 | events_check_enabled = false; | ||
292 | platform_leave(platform_mode); | 293 | platform_leave(platform_mode); |
294 | } | ||
293 | 295 | ||
294 | Power_up: | 296 | Power_up: |
295 | sysdev_resume(); | 297 | sysdev_resume(); |
@@ -328,7 +330,7 @@ int hibernation_snapshot(int platform_mode) | |||
328 | 330 | ||
329 | error = platform_begin(platform_mode); | 331 | error = platform_begin(platform_mode); |
330 | if (error) | 332 | if (error) |
331 | return error; | 333 | goto Close; |
332 | 334 | ||
333 | /* Preallocate image memory before shutting down devices. */ | 335 | /* Preallocate image memory before shutting down devices. */ |
334 | error = hibernate_preallocate_memory(); | 336 | error = hibernate_preallocate_memory(); |
@@ -511,18 +513,24 @@ int hibernation_platform_enter(void) | |||
511 | 513 | ||
512 | local_irq_disable(); | 514 | local_irq_disable(); |
513 | sysdev_suspend(PMSG_HIBERNATE); | 515 | sysdev_suspend(PMSG_HIBERNATE); |
516 | if (!pm_check_wakeup_events()) { | ||
517 | error = -EAGAIN; | ||
518 | goto Power_up; | ||
519 | } | ||
520 | |||
514 | hibernation_ops->enter(); | 521 | hibernation_ops->enter(); |
515 | /* We should never get here */ | 522 | /* We should never get here */ |
516 | while (1); | 523 | while (1); |
517 | 524 | ||
518 | /* | 525 | Power_up: |
519 | * We don't need to reenable the nonboot CPUs or resume consoles, since | 526 | sysdev_resume(); |
520 | * the system is going to be halted anyway. | 527 | local_irq_enable(); |
521 | */ | 528 | enable_nonboot_cpus(); |
529 | |||
522 | Platform_finish: | 530 | Platform_finish: |
523 | hibernation_ops->finish(); | 531 | hibernation_ops->finish(); |
524 | 532 | ||
525 | dpm_suspend_noirq(PMSG_RESTORE); | 533 | dpm_resume_noirq(PMSG_RESTORE); |
526 | 534 | ||
527 | Resume_devices: | 535 | Resume_devices: |
528 | entering_platform_hibernation = false; | 536 | entering_platform_hibernation = false; |
diff --git a/kernel/power/main.c b/kernel/power/main.c index b58800b21fc0..62b0bc6e4983 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -204,6 +204,60 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
204 | 204 | ||
205 | power_attr(state); | 205 | power_attr(state); |
206 | 206 | ||
207 | #ifdef CONFIG_PM_SLEEP | ||
208 | /* | ||
209 | * The 'wakeup_count' attribute, along with the functions defined in | ||
210 | * drivers/base/power/wakeup.c, provides a means by which wakeup events can be | ||
211 | * handled in a non-racy way. | ||
212 | * | ||
213 | * If a wakeup event occurs when the system is in a sleep state, it simply is | ||
214 | * woken up. In turn, if an event that would wake the system up from a sleep | ||
215 | * state occurs when it is undergoing a transition to that sleep state, the | ||
216 | * transition should be aborted. Moreover, if such an event occurs when the | ||
217 | * system is in the working state, an attempt to start a transition to the | ||
218 | * given sleep state should fail during certain period after the detection of | ||
219 | * the event. Using the 'state' attribute alone is not sufficient to satisfy | ||
220 | * these requirements, because a wakeup event may occur exactly when 'state' | ||
221 | * is being written to and may be delivered to user space right before it is | ||
222 | * frozen, so the event will remain only partially processed until the system is | ||
223 | * woken up by another event. In particular, it won't cause the transition to | ||
224 | * a sleep state to be aborted. | ||
225 | * | ||
226 | * This difficulty may be overcome if user space uses 'wakeup_count' before | ||
227 | * writing to 'state'. It first should read from 'wakeup_count' and store | ||
228 | * the read value. Then, after carrying out its own preparations for the system | ||
229 | * transition to a sleep state, it should write the stored value to | ||
230 | * 'wakeup_count'. If that fails, at least one wakeup event has occured since | ||
231 | * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it | ||
232 | * is allowed to write to 'state', but the transition will be aborted if there | ||
233 | * are any wakeup events detected after 'wakeup_count' was written to. | ||
234 | */ | ||
235 | |||
236 | static ssize_t wakeup_count_show(struct kobject *kobj, | ||
237 | struct kobj_attribute *attr, | ||
238 | char *buf) | ||
239 | { | ||
240 | unsigned long val; | ||
241 | |||
242 | return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; | ||
243 | } | ||
244 | |||
245 | static ssize_t wakeup_count_store(struct kobject *kobj, | ||
246 | struct kobj_attribute *attr, | ||
247 | const char *buf, size_t n) | ||
248 | { | ||
249 | unsigned long val; | ||
250 | |||
251 | if (sscanf(buf, "%lu", &val) == 1) { | ||
252 | if (pm_save_wakeup_count(val)) | ||
253 | return n; | ||
254 | } | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | |||
258 | power_attr(wakeup_count); | ||
259 | #endif /* CONFIG_PM_SLEEP */ | ||
260 | |||
207 | #ifdef CONFIG_PM_TRACE | 261 | #ifdef CONFIG_PM_TRACE |
208 | int pm_trace_enabled; | 262 | int pm_trace_enabled; |
209 | 263 | ||
@@ -236,6 +290,7 @@ static struct attribute * g[] = { | |||
236 | #endif | 290 | #endif |
237 | #ifdef CONFIG_PM_SLEEP | 291 | #ifdef CONFIG_PM_SLEEP |
238 | &pm_async_attr.attr, | 292 | &pm_async_attr.attr, |
293 | &wakeup_count_attr.attr, | ||
239 | #ifdef CONFIG_PM_DEBUG | 294 | #ifdef CONFIG_PM_DEBUG |
240 | &pm_test_attr.attr, | 295 | &pm_test_attr.attr, |
241 | #endif | 296 | #endif |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f37cb7dd4402..7335952ee473 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -136,19 +136,19 @@ static int suspend_enter(suspend_state_t state) | |||
136 | if (suspend_ops->prepare) { | 136 | if (suspend_ops->prepare) { |
137 | error = suspend_ops->prepare(); | 137 | error = suspend_ops->prepare(); |
138 | if (error) | 138 | if (error) |
139 | return error; | 139 | goto Platform_finish; |
140 | } | 140 | } |
141 | 141 | ||
142 | error = dpm_suspend_noirq(PMSG_SUSPEND); | 142 | error = dpm_suspend_noirq(PMSG_SUSPEND); |
143 | if (error) { | 143 | if (error) { |
144 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 144 | printk(KERN_ERR "PM: Some devices failed to power down\n"); |
145 | goto Platfrom_finish; | 145 | goto Platform_finish; |
146 | } | 146 | } |
147 | 147 | ||
148 | if (suspend_ops->prepare_late) { | 148 | if (suspend_ops->prepare_late) { |
149 | error = suspend_ops->prepare_late(); | 149 | error = suspend_ops->prepare_late(); |
150 | if (error) | 150 | if (error) |
151 | goto Power_up_devices; | 151 | goto Platform_wake; |
152 | } | 152 | } |
153 | 153 | ||
154 | if (suspend_test(TEST_PLATFORM)) | 154 | if (suspend_test(TEST_PLATFORM)) |
@@ -163,8 +163,10 @@ static int suspend_enter(suspend_state_t state) | |||
163 | 163 | ||
164 | error = sysdev_suspend(PMSG_SUSPEND); | 164 | error = sysdev_suspend(PMSG_SUSPEND); |
165 | if (!error) { | 165 | if (!error) { |
166 | if (!suspend_test(TEST_CORE)) | 166 | if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { |
167 | error = suspend_ops->enter(state); | 167 | error = suspend_ops->enter(state); |
168 | events_check_enabled = false; | ||
169 | } | ||
168 | sysdev_resume(); | 170 | sysdev_resume(); |
169 | } | 171 | } |
170 | 172 | ||
@@ -178,10 +180,9 @@ static int suspend_enter(suspend_state_t state) | |||
178 | if (suspend_ops->wake) | 180 | if (suspend_ops->wake) |
179 | suspend_ops->wake(); | 181 | suspend_ops->wake(); |
180 | 182 | ||
181 | Power_up_devices: | ||
182 | dpm_resume_noirq(PMSG_RESUME); | 183 | dpm_resume_noirq(PMSG_RESUME); |
183 | 184 | ||
184 | Platfrom_finish: | 185 | Platform_finish: |
185 | if (suspend_ops->finish) | 186 | if (suspend_ops->finish) |
186 | suspend_ops->finish(); | 187 | suspend_ops->finish(); |
187 | 188 | ||
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b0bb21778391..7c3ae83e41d7 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -32,7 +32,7 @@ | |||
32 | /* | 32 | /* |
33 | * The swap map is a data structure used for keeping track of each page | 33 | * The swap map is a data structure used for keeping track of each page |
34 | * written to a swap partition. It consists of many swap_map_page | 34 | * written to a swap partition. It consists of many swap_map_page |
35 | * structures that contain each an array of MAP_PAGE_SIZE swap entries. | 35 | * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. |
36 | * These structures are stored on the swap and linked together with the | 36 | * These structures are stored on the swap and linked together with the |
37 | * help of the .next_swap member. | 37 | * help of the .next_swap member. |
38 | * | 38 | * |
@@ -148,7 +148,7 @@ sector_t alloc_swapdev_block(int swap) | |||
148 | 148 | ||
149 | /** | 149 | /** |
150 | * free_all_swap_pages - free swap pages allocated for saving image data. | 150 | * free_all_swap_pages - free swap pages allocated for saving image data. |
151 | * It also frees the extents used to register which swap entres had been | 151 | * It also frees the extents used to register which swap entries had been |
152 | * allocated. | 152 | * allocated. |
153 | */ | 153 | */ |
154 | 154 | ||
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 303ac04ff6e4..a3b2a6479246 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -451,13 +451,11 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, | |||
451 | snd_pcm_timer_resolution_change(substream); | 451 | snd_pcm_timer_resolution_change(substream); |
452 | runtime->status->state = SNDRV_PCM_STATE_SETUP; | 452 | runtime->status->state = SNDRV_PCM_STATE_SETUP; |
453 | 453 | ||
454 | if (substream->latency_pm_qos_req) { | 454 | if (pm_qos_request_active(&substream->latency_pm_qos_req)) |
455 | pm_qos_remove_request(substream->latency_pm_qos_req); | 455 | pm_qos_remove_request(&substream->latency_pm_qos_req); |
456 | substream->latency_pm_qos_req = NULL; | ||
457 | } | ||
458 | if ((usecs = period_to_usecs(runtime)) >= 0) | 456 | if ((usecs = period_to_usecs(runtime)) >= 0) |
459 | substream->latency_pm_qos_req = pm_qos_add_request( | 457 | pm_qos_add_request(&substream->latency_pm_qos_req, |
460 | PM_QOS_CPU_DMA_LATENCY, usecs); | 458 | PM_QOS_CPU_DMA_LATENCY, usecs); |
461 | return 0; | 459 | return 0; |
462 | _error: | 460 | _error: |
463 | /* hardware might be unuseable from this time, | 461 | /* hardware might be unuseable from this time, |
@@ -512,8 +510,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream) | |||
512 | if (substream->ops->hw_free) | 510 | if (substream->ops->hw_free) |
513 | result = substream->ops->hw_free(substream); | 511 | result = substream->ops->hw_free(substream); |
514 | runtime->status->state = SNDRV_PCM_STATE_OPEN; | 512 | runtime->status->state = SNDRV_PCM_STATE_OPEN; |
515 | pm_qos_remove_request(substream->latency_pm_qos_req); | 513 | pm_qos_remove_request(&substream->latency_pm_qos_req); |
516 | substream->latency_pm_qos_req = NULL; | ||
517 | return result; | 514 | return result; |
518 | } | 515 | } |
519 | 516 | ||