diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 14:14:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 14:14:36 -0400 |
commit | f46e9913faeebcb6bd29edf795f12b60acbff171 (patch) | |
tree | 1ed8871d0ebd638094d27317de1d8a53712ae15a /kernel | |
parent | 8d91530c5fd7f0b1e8c4ddfea2905e55a178569b (diff) | |
parent | 8d4b9d1bfef117862a2889dec4dac227068544c9 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
PM / Runtime: Add runtime PM statistics (v3)
PM / Runtime: Make runtime_status attribute not debug-only (v. 2)
PM: Do not use dynamically allocated objects in pm_wakeup_event()
PM / Suspend: Fix ordering of calls in suspend error paths
PM / Hibernate: Fix snapshot error code path
PM / Hibernate: Fix hibernation_platform_enter()
pm_qos: Get rid of the allocation in pm_qos_add_request()
pm_qos: Reimplement using plists
plist: Add plist_last
PM: Make it possible to avoid races between wakeup and system sleep
PNPACPI: Add support for remote wakeup
PM: describe kernel policy regarding wakeup defaults (v. 2)
PM / Hibernate: Fix typos in comments in kernel/power/swap.c
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/pm_qos_params.c | 215 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 24 | ||||
-rw-r--r-- | kernel/power/main.c | 55 | ||||
-rw-r--r-- | kernel/power/suspend.c | 13 | ||||
-rw-r--r-- | kernel/power/swap.c | 4 |
5 files changed, 194 insertions, 117 deletions
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index f42d3f737a33..996a4dec5f96 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
@@ -48,59 +48,49 @@ | |||
48 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock | 48 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock |
49 | * held, taken with _irqsave. One lock to rule them all | 49 | * held, taken with _irqsave. One lock to rule them all |
50 | */ | 50 | */ |
51 | struct pm_qos_request_list { | 51 | enum pm_qos_type { |
52 | struct list_head list; | 52 | PM_QOS_MAX, /* return the largest value */ |
53 | union { | 53 | PM_QOS_MIN /* return the smallest value */ |
54 | s32 value; | ||
55 | s32 usec; | ||
56 | s32 kbps; | ||
57 | }; | ||
58 | int pm_qos_class; | ||
59 | }; | 54 | }; |
60 | 55 | ||
61 | static s32 max_compare(s32 v1, s32 v2); | ||
62 | static s32 min_compare(s32 v1, s32 v2); | ||
63 | |||
64 | struct pm_qos_object { | 56 | struct pm_qos_object { |
65 | struct pm_qos_request_list requests; | 57 | struct plist_head requests; |
66 | struct blocking_notifier_head *notifiers; | 58 | struct blocking_notifier_head *notifiers; |
67 | struct miscdevice pm_qos_power_miscdev; | 59 | struct miscdevice pm_qos_power_miscdev; |
68 | char *name; | 60 | char *name; |
69 | s32 default_value; | 61 | s32 default_value; |
70 | atomic_t target_value; | 62 | enum pm_qos_type type; |
71 | s32 (*comparitor)(s32, s32); | ||
72 | }; | 63 | }; |
73 | 64 | ||
65 | static DEFINE_SPINLOCK(pm_qos_lock); | ||
66 | |||
74 | static struct pm_qos_object null_pm_qos; | 67 | static struct pm_qos_object null_pm_qos; |
75 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); | 68 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); |
76 | static struct pm_qos_object cpu_dma_pm_qos = { | 69 | static struct pm_qos_object cpu_dma_pm_qos = { |
77 | .requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)}, | 70 | .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), |
78 | .notifiers = &cpu_dma_lat_notifier, | 71 | .notifiers = &cpu_dma_lat_notifier, |
79 | .name = "cpu_dma_latency", | 72 | .name = "cpu_dma_latency", |
80 | .default_value = 2000 * USEC_PER_SEC, | 73 | .default_value = 2000 * USEC_PER_SEC, |
81 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), | 74 | .type = PM_QOS_MIN, |
82 | .comparitor = min_compare | ||
83 | }; | 75 | }; |
84 | 76 | ||
85 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); | 77 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); |
86 | static struct pm_qos_object network_lat_pm_qos = { | 78 | static struct pm_qos_object network_lat_pm_qos = { |
87 | .requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)}, | 79 | .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), |
88 | .notifiers = &network_lat_notifier, | 80 | .notifiers = &network_lat_notifier, |
89 | .name = "network_latency", | 81 | .name = "network_latency", |
90 | .default_value = 2000 * USEC_PER_SEC, | 82 | .default_value = 2000 * USEC_PER_SEC, |
91 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), | 83 | .type = PM_QOS_MIN |
92 | .comparitor = min_compare | ||
93 | }; | 84 | }; |
94 | 85 | ||
95 | 86 | ||
96 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); | 87 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); |
97 | static struct pm_qos_object network_throughput_pm_qos = { | 88 | static struct pm_qos_object network_throughput_pm_qos = { |
98 | .requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)}, | 89 | .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), |
99 | .notifiers = &network_throughput_notifier, | 90 | .notifiers = &network_throughput_notifier, |
100 | .name = "network_throughput", | 91 | .name = "network_throughput", |
101 | .default_value = 0, | 92 | .default_value = 0, |
102 | .target_value = ATOMIC_INIT(0), | 93 | .type = PM_QOS_MAX, |
103 | .comparitor = max_compare | ||
104 | }; | 94 | }; |
105 | 95 | ||
106 | 96 | ||
@@ -111,8 +101,6 @@ static struct pm_qos_object *pm_qos_array[] = { | |||
111 | &network_throughput_pm_qos | 101 | &network_throughput_pm_qos |
112 | }; | 102 | }; |
113 | 103 | ||
114 | static DEFINE_SPINLOCK(pm_qos_lock); | ||
115 | |||
116 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | 104 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, |
117 | size_t count, loff_t *f_pos); | 105 | size_t count, loff_t *f_pos); |
118 | static int pm_qos_power_open(struct inode *inode, struct file *filp); | 106 | static int pm_qos_power_open(struct inode *inode, struct file *filp); |
@@ -124,46 +112,55 @@ static const struct file_operations pm_qos_power_fops = { | |||
124 | .release = pm_qos_power_release, | 112 | .release = pm_qos_power_release, |
125 | }; | 113 | }; |
126 | 114 | ||
127 | /* static helper functions */ | 115 | /* unlocked internal variant */ |
128 | static s32 max_compare(s32 v1, s32 v2) | 116 | static inline int pm_qos_get_value(struct pm_qos_object *o) |
129 | { | 117 | { |
130 | return max(v1, v2); | 118 | if (plist_head_empty(&o->requests)) |
131 | } | 119 | return o->default_value; |
132 | 120 | ||
133 | static s32 min_compare(s32 v1, s32 v2) | 121 | switch (o->type) { |
134 | { | 122 | case PM_QOS_MIN: |
135 | return min(v1, v2); | 123 | return plist_last(&o->requests)->prio; |
136 | } | ||
137 | 124 | ||
125 | case PM_QOS_MAX: | ||
126 | return plist_first(&o->requests)->prio; | ||
138 | 127 | ||
139 | static void update_target(int pm_qos_class) | 128 | default: |
129 | /* runtime check for not using enum */ | ||
130 | BUG(); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | static void update_target(struct pm_qos_object *o, struct plist_node *node, | ||
135 | int del, int value) | ||
140 | { | 136 | { |
141 | s32 extreme_value; | ||
142 | struct pm_qos_request_list *node; | ||
143 | unsigned long flags; | 137 | unsigned long flags; |
144 | int call_notifier = 0; | 138 | int prev_value, curr_value; |
145 | 139 | ||
146 | spin_lock_irqsave(&pm_qos_lock, flags); | 140 | spin_lock_irqsave(&pm_qos_lock, flags); |
147 | extreme_value = pm_qos_array[pm_qos_class]->default_value; | 141 | prev_value = pm_qos_get_value(o); |
148 | list_for_each_entry(node, | 142 | /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */ |
149 | &pm_qos_array[pm_qos_class]->requests.list, list) { | 143 | if (value != PM_QOS_DEFAULT_VALUE) { |
150 | extreme_value = pm_qos_array[pm_qos_class]->comparitor( | 144 | /* |
151 | extreme_value, node->value); | 145 | * to change the list, we atomically remove, reinit |
152 | } | 146 | * with new value and add, then see if the extremal |
153 | if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) != | 147 | * changed |
154 | extreme_value) { | 148 | */ |
155 | call_notifier = 1; | 149 | plist_del(node, &o->requests); |
156 | atomic_set(&pm_qos_array[pm_qos_class]->target_value, | 150 | plist_node_init(node, value); |
157 | extreme_value); | 151 | plist_add(node, &o->requests); |
158 | pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class, | 152 | } else if (del) { |
159 | atomic_read(&pm_qos_array[pm_qos_class]->target_value)); | 153 | plist_del(node, &o->requests); |
154 | } else { | ||
155 | plist_add(node, &o->requests); | ||
160 | } | 156 | } |
157 | curr_value = pm_qos_get_value(o); | ||
161 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 158 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
162 | 159 | ||
163 | if (call_notifier) | 160 | if (prev_value != curr_value) |
164 | blocking_notifier_call_chain( | 161 | blocking_notifier_call_chain(o->notifiers, |
165 | pm_qos_array[pm_qos_class]->notifiers, | 162 | (unsigned long)curr_value, |
166 | (unsigned long) extreme_value, NULL); | 163 | NULL); |
167 | } | 164 | } |
168 | 165 | ||
169 | static int register_pm_qos_misc(struct pm_qos_object *qos) | 166 | static int register_pm_qos_misc(struct pm_qos_object *qos) |
@@ -196,10 +193,23 @@ static int find_pm_qos_object_by_minor(int minor) | |||
196 | */ | 193 | */ |
197 | int pm_qos_request(int pm_qos_class) | 194 | int pm_qos_request(int pm_qos_class) |
198 | { | 195 | { |
199 | return atomic_read(&pm_qos_array[pm_qos_class]->target_value); | 196 | unsigned long flags; |
197 | int value; | ||
198 | |||
199 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
200 | value = pm_qos_get_value(pm_qos_array[pm_qos_class]); | ||
201 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
202 | |||
203 | return value; | ||
200 | } | 204 | } |
201 | EXPORT_SYMBOL_GPL(pm_qos_request); | 205 | EXPORT_SYMBOL_GPL(pm_qos_request); |
202 | 206 | ||
207 | int pm_qos_request_active(struct pm_qos_request_list *req) | ||
208 | { | ||
209 | return req->pm_qos_class != 0; | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(pm_qos_request_active); | ||
212 | |||
203 | /** | 213 | /** |
204 | * pm_qos_add_request - inserts new qos request into the list | 214 | * pm_qos_add_request - inserts new qos request into the list |
205 | * @pm_qos_class: identifies which list of qos request to us | 215 | * @pm_qos_class: identifies which list of qos request to us |
@@ -211,27 +221,23 @@ EXPORT_SYMBOL_GPL(pm_qos_request); | |||
211 | * element as a handle for use in updating and removal. Call needs to save | 221 | * element as a handle for use in updating and removal. Call needs to save |
212 | * this handle for later use. | 222 | * this handle for later use. |
213 | */ | 223 | */ |
214 | struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) | 224 | void pm_qos_add_request(struct pm_qos_request_list *dep, |
225 | int pm_qos_class, s32 value) | ||
215 | { | 226 | { |
216 | struct pm_qos_request_list *dep; | 227 | struct pm_qos_object *o = pm_qos_array[pm_qos_class]; |
217 | unsigned long flags; | 228 | int new_value; |
218 | 229 | ||
219 | dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); | 230 | if (pm_qos_request_active(dep)) { |
220 | if (dep) { | 231 | WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); |
221 | if (value == PM_QOS_DEFAULT_VALUE) | 232 | return; |
222 | dep->value = pm_qos_array[pm_qos_class]->default_value; | ||
223 | else | ||
224 | dep->value = value; | ||
225 | dep->pm_qos_class = pm_qos_class; | ||
226 | |||
227 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
228 | list_add(&dep->list, | ||
229 | &pm_qos_array[pm_qos_class]->requests.list); | ||
230 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
231 | update_target(pm_qos_class); | ||
232 | } | 233 | } |
233 | 234 | if (value == PM_QOS_DEFAULT_VALUE) | |
234 | return dep; | 235 | new_value = o->default_value; |
236 | else | ||
237 | new_value = value; | ||
238 | plist_node_init(&dep->list, new_value); | ||
239 | dep->pm_qos_class = pm_qos_class; | ||
240 | update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); | ||
235 | } | 241 | } |
236 | EXPORT_SYMBOL_GPL(pm_qos_add_request); | 242 | EXPORT_SYMBOL_GPL(pm_qos_add_request); |
237 | 243 | ||
@@ -246,27 +252,28 @@ EXPORT_SYMBOL_GPL(pm_qos_add_request); | |||
246 | * Attempts are made to make this code callable on hot code paths. | 252 | * Attempts are made to make this code callable on hot code paths. |
247 | */ | 253 | */ |
248 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, | 254 | void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, |
249 | s32 new_value) | 255 | s32 new_value) |
250 | { | 256 | { |
251 | unsigned long flags; | ||
252 | int pending_update = 0; | ||
253 | s32 temp; | 257 | s32 temp; |
258 | struct pm_qos_object *o; | ||
259 | |||
260 | if (!pm_qos_req) /*guard against callers passing in null */ | ||
261 | return; | ||
254 | 262 | ||
255 | if (pm_qos_req) { /*guard against callers passing in null */ | 263 | if (!pm_qos_request_active(pm_qos_req)) { |
256 | spin_lock_irqsave(&pm_qos_lock, flags); | 264 | WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); |
257 | if (new_value == PM_QOS_DEFAULT_VALUE) | 265 | return; |
258 | temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value; | ||
259 | else | ||
260 | temp = new_value; | ||
261 | |||
262 | if (temp != pm_qos_req->value) { | ||
263 | pending_update = 1; | ||
264 | pm_qos_req->value = temp; | ||
265 | } | ||
266 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
267 | if (pending_update) | ||
268 | update_target(pm_qos_req->pm_qos_class); | ||
269 | } | 266 | } |
267 | |||
268 | o = pm_qos_array[pm_qos_req->pm_qos_class]; | ||
269 | |||
270 | if (new_value == PM_QOS_DEFAULT_VALUE) | ||
271 | temp = o->default_value; | ||
272 | else | ||
273 | temp = new_value; | ||
274 | |||
275 | if (temp != pm_qos_req->list.prio) | ||
276 | update_target(o, &pm_qos_req->list, 0, temp); | ||
270 | } | 277 | } |
271 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | 278 | EXPORT_SYMBOL_GPL(pm_qos_update_request); |
272 | 279 | ||
@@ -280,19 +287,20 @@ EXPORT_SYMBOL_GPL(pm_qos_update_request); | |||
280 | */ | 287 | */ |
281 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) | 288 | void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) |
282 | { | 289 | { |
283 | unsigned long flags; | 290 | struct pm_qos_object *o; |
284 | int qos_class; | ||
285 | 291 | ||
286 | if (pm_qos_req == NULL) | 292 | if (pm_qos_req == NULL) |
287 | return; | 293 | return; |
288 | /* silent return to keep pcm code cleaner */ | 294 | /* silent return to keep pcm code cleaner */ |
289 | 295 | ||
290 | qos_class = pm_qos_req->pm_qos_class; | 296 | if (!pm_qos_request_active(pm_qos_req)) { |
291 | spin_lock_irqsave(&pm_qos_lock, flags); | 297 | WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); |
292 | list_del(&pm_qos_req->list); | 298 | return; |
293 | kfree(pm_qos_req); | 299 | } |
294 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 300 | |
295 | update_target(qos_class); | 301 | o = pm_qos_array[pm_qos_req->pm_qos_class]; |
302 | update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); | ||
303 | memset(pm_qos_req, 0, sizeof(*pm_qos_req)); | ||
296 | } | 304 | } |
297 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); | 305 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); |
298 | 306 | ||
@@ -340,8 +348,12 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp) | |||
340 | 348 | ||
341 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); | 349 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); |
342 | if (pm_qos_class >= 0) { | 350 | if (pm_qos_class >= 0) { |
343 | filp->private_data = (void *) pm_qos_add_request(pm_qos_class, | 351 | struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); |
344 | PM_QOS_DEFAULT_VALUE); | 352 | if (!req) |
353 | return -ENOMEM; | ||
354 | |||
355 | pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); | ||
356 | filp->private_data = req; | ||
345 | 357 | ||
346 | if (filp->private_data) | 358 | if (filp->private_data) |
347 | return 0; | 359 | return 0; |
@@ -353,8 +365,9 @@ static int pm_qos_power_release(struct inode *inode, struct file *filp) | |||
353 | { | 365 | { |
354 | struct pm_qos_request_list *req; | 366 | struct pm_qos_request_list *req; |
355 | 367 | ||
356 | req = (struct pm_qos_request_list *)filp->private_data; | 368 | req = filp->private_data; |
357 | pm_qos_remove_request(req); | 369 | pm_qos_remove_request(req); |
370 | kfree(req); | ||
358 | 371 | ||
359 | return 0; | 372 | return 0; |
360 | } | 373 | } |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aa9e916da4d5..d26f04e92743 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -277,7 +277,7 @@ static int create_image(int platform_mode) | |||
277 | goto Enable_irqs; | 277 | goto Enable_irqs; |
278 | } | 278 | } |
279 | 279 | ||
280 | if (hibernation_test(TEST_CORE)) | 280 | if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) |
281 | goto Power_up; | 281 | goto Power_up; |
282 | 282 | ||
283 | in_suspend = 1; | 283 | in_suspend = 1; |
@@ -288,8 +288,10 @@ static int create_image(int platform_mode) | |||
288 | error); | 288 | error); |
289 | /* Restore control flow magically appears here */ | 289 | /* Restore control flow magically appears here */ |
290 | restore_processor_state(); | 290 | restore_processor_state(); |
291 | if (!in_suspend) | 291 | if (!in_suspend) { |
292 | events_check_enabled = false; | ||
292 | platform_leave(platform_mode); | 293 | platform_leave(platform_mode); |
294 | } | ||
293 | 295 | ||
294 | Power_up: | 296 | Power_up: |
295 | sysdev_resume(); | 297 | sysdev_resume(); |
@@ -328,7 +330,7 @@ int hibernation_snapshot(int platform_mode) | |||
328 | 330 | ||
329 | error = platform_begin(platform_mode); | 331 | error = platform_begin(platform_mode); |
330 | if (error) | 332 | if (error) |
331 | return error; | 333 | goto Close; |
332 | 334 | ||
333 | /* Preallocate image memory before shutting down devices. */ | 335 | /* Preallocate image memory before shutting down devices. */ |
334 | error = hibernate_preallocate_memory(); | 336 | error = hibernate_preallocate_memory(); |
@@ -511,18 +513,24 @@ int hibernation_platform_enter(void) | |||
511 | 513 | ||
512 | local_irq_disable(); | 514 | local_irq_disable(); |
513 | sysdev_suspend(PMSG_HIBERNATE); | 515 | sysdev_suspend(PMSG_HIBERNATE); |
516 | if (!pm_check_wakeup_events()) { | ||
517 | error = -EAGAIN; | ||
518 | goto Power_up; | ||
519 | } | ||
520 | |||
514 | hibernation_ops->enter(); | 521 | hibernation_ops->enter(); |
515 | /* We should never get here */ | 522 | /* We should never get here */ |
516 | while (1); | 523 | while (1); |
517 | 524 | ||
518 | /* | 525 | Power_up: |
519 | * We don't need to reenable the nonboot CPUs or resume consoles, since | 526 | sysdev_resume(); |
520 | * the system is going to be halted anyway. | 527 | local_irq_enable(); |
521 | */ | 528 | enable_nonboot_cpus(); |
529 | |||
522 | Platform_finish: | 530 | Platform_finish: |
523 | hibernation_ops->finish(); | 531 | hibernation_ops->finish(); |
524 | 532 | ||
525 | dpm_suspend_noirq(PMSG_RESTORE); | 533 | dpm_resume_noirq(PMSG_RESTORE); |
526 | 534 | ||
527 | Resume_devices: | 535 | Resume_devices: |
528 | entering_platform_hibernation = false; | 536 | entering_platform_hibernation = false; |
diff --git a/kernel/power/main.c b/kernel/power/main.c index b58800b21fc0..62b0bc6e4983 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -204,6 +204,60 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
204 | 204 | ||
205 | power_attr(state); | 205 | power_attr(state); |
206 | 206 | ||
207 | #ifdef CONFIG_PM_SLEEP | ||
208 | /* | ||
209 | * The 'wakeup_count' attribute, along with the functions defined in | ||
210 | * drivers/base/power/wakeup.c, provides a means by which wakeup events can be | ||
211 | * handled in a non-racy way. | ||
212 | * | ||
213 | * If a wakeup event occurs when the system is in a sleep state, it simply is | ||
214 | * woken up. In turn, if an event that would wake the system up from a sleep | ||
215 | * state occurs when it is undergoing a transition to that sleep state, the | ||
216 | * transition should be aborted. Moreover, if such an event occurs when the | ||
217 | * system is in the working state, an attempt to start a transition to the | ||
218 | * given sleep state should fail during certain period after the detection of | ||
219 | * the event. Using the 'state' attribute alone is not sufficient to satisfy | ||
220 | * these requirements, because a wakeup event may occur exactly when 'state' | ||
221 | * is being written to and may be delivered to user space right before it is | ||
222 | * frozen, so the event will remain only partially processed until the system is | ||
223 | * woken up by another event. In particular, it won't cause the transition to | ||
224 | * a sleep state to be aborted. | ||
225 | * | ||
226 | * This difficulty may be overcome if user space uses 'wakeup_count' before | ||
227 | * writing to 'state'. It first should read from 'wakeup_count' and store | ||
228 | * the read value. Then, after carrying out its own preparations for the system | ||
229 | * transition to a sleep state, it should write the stored value to | ||
230 | * 'wakeup_count'. If that fails, at least one wakeup event has occured since | ||
231 | * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it | ||
232 | * is allowed to write to 'state', but the transition will be aborted if there | ||
233 | * are any wakeup events detected after 'wakeup_count' was written to. | ||
234 | */ | ||
235 | |||
236 | static ssize_t wakeup_count_show(struct kobject *kobj, | ||
237 | struct kobj_attribute *attr, | ||
238 | char *buf) | ||
239 | { | ||
240 | unsigned long val; | ||
241 | |||
242 | return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; | ||
243 | } | ||
244 | |||
245 | static ssize_t wakeup_count_store(struct kobject *kobj, | ||
246 | struct kobj_attribute *attr, | ||
247 | const char *buf, size_t n) | ||
248 | { | ||
249 | unsigned long val; | ||
250 | |||
251 | if (sscanf(buf, "%lu", &val) == 1) { | ||
252 | if (pm_save_wakeup_count(val)) | ||
253 | return n; | ||
254 | } | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | |||
258 | power_attr(wakeup_count); | ||
259 | #endif /* CONFIG_PM_SLEEP */ | ||
260 | |||
207 | #ifdef CONFIG_PM_TRACE | 261 | #ifdef CONFIG_PM_TRACE |
208 | int pm_trace_enabled; | 262 | int pm_trace_enabled; |
209 | 263 | ||
@@ -236,6 +290,7 @@ static struct attribute * g[] = { | |||
236 | #endif | 290 | #endif |
237 | #ifdef CONFIG_PM_SLEEP | 291 | #ifdef CONFIG_PM_SLEEP |
238 | &pm_async_attr.attr, | 292 | &pm_async_attr.attr, |
293 | &wakeup_count_attr.attr, | ||
239 | #ifdef CONFIG_PM_DEBUG | 294 | #ifdef CONFIG_PM_DEBUG |
240 | &pm_test_attr.attr, | 295 | &pm_test_attr.attr, |
241 | #endif | 296 | #endif |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f37cb7dd4402..7335952ee473 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -136,19 +136,19 @@ static int suspend_enter(suspend_state_t state) | |||
136 | if (suspend_ops->prepare) { | 136 | if (suspend_ops->prepare) { |
137 | error = suspend_ops->prepare(); | 137 | error = suspend_ops->prepare(); |
138 | if (error) | 138 | if (error) |
139 | return error; | 139 | goto Platform_finish; |
140 | } | 140 | } |
141 | 141 | ||
142 | error = dpm_suspend_noirq(PMSG_SUSPEND); | 142 | error = dpm_suspend_noirq(PMSG_SUSPEND); |
143 | if (error) { | 143 | if (error) { |
144 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 144 | printk(KERN_ERR "PM: Some devices failed to power down\n"); |
145 | goto Platfrom_finish; | 145 | goto Platform_finish; |
146 | } | 146 | } |
147 | 147 | ||
148 | if (suspend_ops->prepare_late) { | 148 | if (suspend_ops->prepare_late) { |
149 | error = suspend_ops->prepare_late(); | 149 | error = suspend_ops->prepare_late(); |
150 | if (error) | 150 | if (error) |
151 | goto Power_up_devices; | 151 | goto Platform_wake; |
152 | } | 152 | } |
153 | 153 | ||
154 | if (suspend_test(TEST_PLATFORM)) | 154 | if (suspend_test(TEST_PLATFORM)) |
@@ -163,8 +163,10 @@ static int suspend_enter(suspend_state_t state) | |||
163 | 163 | ||
164 | error = sysdev_suspend(PMSG_SUSPEND); | 164 | error = sysdev_suspend(PMSG_SUSPEND); |
165 | if (!error) { | 165 | if (!error) { |
166 | if (!suspend_test(TEST_CORE)) | 166 | if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { |
167 | error = suspend_ops->enter(state); | 167 | error = suspend_ops->enter(state); |
168 | events_check_enabled = false; | ||
169 | } | ||
168 | sysdev_resume(); | 170 | sysdev_resume(); |
169 | } | 171 | } |
170 | 172 | ||
@@ -178,10 +180,9 @@ static int suspend_enter(suspend_state_t state) | |||
178 | if (suspend_ops->wake) | 180 | if (suspend_ops->wake) |
179 | suspend_ops->wake(); | 181 | suspend_ops->wake(); |
180 | 182 | ||
181 | Power_up_devices: | ||
182 | dpm_resume_noirq(PMSG_RESUME); | 183 | dpm_resume_noirq(PMSG_RESUME); |
183 | 184 | ||
184 | Platfrom_finish: | 185 | Platform_finish: |
185 | if (suspend_ops->finish) | 186 | if (suspend_ops->finish) |
186 | suspend_ops->finish(); | 187 | suspend_ops->finish(); |
187 | 188 | ||
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b0bb21778391..7c3ae83e41d7 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -32,7 +32,7 @@ | |||
32 | /* | 32 | /* |
33 | * The swap map is a data structure used for keeping track of each page | 33 | * The swap map is a data structure used for keeping track of each page |
34 | * written to a swap partition. It consists of many swap_map_page | 34 | * written to a swap partition. It consists of many swap_map_page |
35 | * structures that contain each an array of MAP_PAGE_SIZE swap entries. | 35 | * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. |
36 | * These structures are stored on the swap and linked together with the | 36 | * These structures are stored on the swap and linked together with the |
37 | * help of the .next_swap member. | 37 | * help of the .next_swap member. |
38 | * | 38 | * |
@@ -148,7 +148,7 @@ sector_t alloc_swapdev_block(int swap) | |||
148 | 148 | ||
149 | /** | 149 | /** |
150 | * free_all_swap_pages - free swap pages allocated for saving image data. | 150 | * free_all_swap_pages - free swap pages allocated for saving image data. |
151 | * It also frees the extents used to register which swap entres had been | 151 | * It also frees the extents used to register which swap entries had been |
152 | * allocated. | 152 | * allocated. |
153 | */ | 153 | */ |
154 | 154 | ||