aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/wakeup.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/wakeup.c')
-rw-r--r--drivers/base/power/wakeup.c697
1 files changed, 598 insertions, 99 deletions
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index eb594facfc3f..84f7c7d5a098 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -11,7 +11,12 @@
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/capability.h> 12#include <linux/capability.h>
13#include <linux/suspend.h> 13#include <linux/suspend.h>
14#include <linux/pm.h> 14#include <linux/seq_file.h>
15#include <linux/debugfs.h>
16
17#include "power.h"
18
19#define TIMEOUT 100
15 20
16/* 21/*
17 * If set, the suspend/hibernate code will abort transitions to a sleep state 22 * If set, the suspend/hibernate code will abort transitions to a sleep state
@@ -19,19 +24,287 @@
19 */ 24 */
20bool events_check_enabled; 25bool events_check_enabled;
21 26
22/* The counter of registered wakeup events. */ 27/*
23static unsigned long event_count; 28 * Combined counters of registered wakeup events and wakeup events in progress.
24/* A preserved old value of event_count. */ 29 * They need to be modified together atomically, so it's better to use one
25static unsigned long saved_event_count; 30 * atomic variable to hold them both.
26/* The counter of wakeup events being processed. */ 31 */
27static unsigned long events_in_progress; 32static atomic_t combined_event_count = ATOMIC_INIT(0);
33
34#define IN_PROGRESS_BITS (sizeof(int) * 4)
35#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
36
37static void split_counters(unsigned int *cnt, unsigned int *inpr)
38{
39 unsigned int comb = atomic_read(&combined_event_count);
40
41 *cnt = (comb >> IN_PROGRESS_BITS);
42 *inpr = comb & MAX_IN_PROGRESS;
43}
44
45/* A preserved old value of the events counter. */
46static unsigned int saved_count;
28 47
29static DEFINE_SPINLOCK(events_lock); 48static DEFINE_SPINLOCK(events_lock);
30 49
31static void pm_wakeup_timer_fn(unsigned long data); 50static void pm_wakeup_timer_fn(unsigned long data);
32 51
33static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); 52static LIST_HEAD(wakeup_sources);
34static unsigned long events_timer_expires; 53
54/**
55 * wakeup_source_create - Create a struct wakeup_source object.
56 * @name: Name of the new wakeup source.
57 */
58struct wakeup_source *wakeup_source_create(const char *name)
59{
60 struct wakeup_source *ws;
61
62 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
63 if (!ws)
64 return NULL;
65
66 spin_lock_init(&ws->lock);
67 if (name)
68 ws->name = kstrdup(name, GFP_KERNEL);
69
70 return ws;
71}
72EXPORT_SYMBOL_GPL(wakeup_source_create);
73
74/**
75 * wakeup_source_destroy - Destroy a struct wakeup_source object.
76 * @ws: Wakeup source to destroy.
77 */
78void wakeup_source_destroy(struct wakeup_source *ws)
79{
80 if (!ws)
81 return;
82
83 spin_lock_irq(&ws->lock);
84 while (ws->active) {
85 spin_unlock_irq(&ws->lock);
86
87 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
88
89 spin_lock_irq(&ws->lock);
90 }
91 spin_unlock_irq(&ws->lock);
92
93 kfree(ws->name);
94 kfree(ws);
95}
96EXPORT_SYMBOL_GPL(wakeup_source_destroy);
97
98/**
99 * wakeup_source_add - Add given object to the list of wakeup sources.
100 * @ws: Wakeup source object to add to the list.
101 */
102void wakeup_source_add(struct wakeup_source *ws)
103{
104 if (WARN_ON(!ws))
105 return;
106
107 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
108 ws->active = false;
109
110 spin_lock_irq(&events_lock);
111 list_add_rcu(&ws->entry, &wakeup_sources);
112 spin_unlock_irq(&events_lock);
113}
114EXPORT_SYMBOL_GPL(wakeup_source_add);
115
116/**
117 * wakeup_source_remove - Remove given object from the wakeup sources list.
118 * @ws: Wakeup source object to remove from the list.
119 */
120void wakeup_source_remove(struct wakeup_source *ws)
121{
122 if (WARN_ON(!ws))
123 return;
124
125 spin_lock_irq(&events_lock);
126 list_del_rcu(&ws->entry);
127 spin_unlock_irq(&events_lock);
128 synchronize_rcu();
129}
130EXPORT_SYMBOL_GPL(wakeup_source_remove);
131
132/**
133 * wakeup_source_register - Create wakeup source and add it to the list.
134 * @name: Name of the wakeup source to register.
135 */
136struct wakeup_source *wakeup_source_register(const char *name)
137{
138 struct wakeup_source *ws;
139
140 ws = wakeup_source_create(name);
141 if (ws)
142 wakeup_source_add(ws);
143
144 return ws;
145}
146EXPORT_SYMBOL_GPL(wakeup_source_register);
147
148/**
149 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
150 * @ws: Wakeup source object to unregister.
151 */
152void wakeup_source_unregister(struct wakeup_source *ws)
153{
154 wakeup_source_remove(ws);
155 wakeup_source_destroy(ws);
156}
157EXPORT_SYMBOL_GPL(wakeup_source_unregister);
158
159/**
160 * device_wakeup_attach - Attach a wakeup source object to a device object.
161 * @dev: Device to handle.
162 * @ws: Wakeup source object to attach to @dev.
163 *
164 * This causes @dev to be treated as a wakeup device.
165 */
166static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
167{
168 spin_lock_irq(&dev->power.lock);
169 if (dev->power.wakeup) {
170 spin_unlock_irq(&dev->power.lock);
171 return -EEXIST;
172 }
173 dev->power.wakeup = ws;
174 spin_unlock_irq(&dev->power.lock);
175 return 0;
176}
177
178/**
179 * device_wakeup_enable - Enable given device to be a wakeup source.
180 * @dev: Device to handle.
181 *
182 * Create a wakeup source object, register it and attach it to @dev.
183 */
184int device_wakeup_enable(struct device *dev)
185{
186 struct wakeup_source *ws;
187 int ret;
188
189 if (!dev || !dev->power.can_wakeup)
190 return -EINVAL;
191
192 ws = wakeup_source_register(dev_name(dev));
193 if (!ws)
194 return -ENOMEM;
195
196 ret = device_wakeup_attach(dev, ws);
197 if (ret)
198 wakeup_source_unregister(ws);
199
200 return ret;
201}
202EXPORT_SYMBOL_GPL(device_wakeup_enable);
203
204/**
205 * device_wakeup_detach - Detach a device's wakeup source object from it.
206 * @dev: Device to detach the wakeup source object from.
207 *
208 * After it returns, @dev will not be treated as a wakeup device any more.
209 */
210static struct wakeup_source *device_wakeup_detach(struct device *dev)
211{
212 struct wakeup_source *ws;
213
214 spin_lock_irq(&dev->power.lock);
215 ws = dev->power.wakeup;
216 dev->power.wakeup = NULL;
217 spin_unlock_irq(&dev->power.lock);
218 return ws;
219}
220
221/**
222 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
223 * @dev: Device to handle.
224 *
225 * Detach the @dev's wakeup source object from it, unregister this wakeup source
226 * object and destroy it.
227 */
228int device_wakeup_disable(struct device *dev)
229{
230 struct wakeup_source *ws;
231
232 if (!dev || !dev->power.can_wakeup)
233 return -EINVAL;
234
235 ws = device_wakeup_detach(dev);
236 if (ws)
237 wakeup_source_unregister(ws);
238
239 return 0;
240}
241EXPORT_SYMBOL_GPL(device_wakeup_disable);
242
243/**
244 * device_set_wakeup_capable - Set/reset device wakeup capability flag.
245 * @dev: Device to handle.
246 * @capable: Whether or not @dev is capable of waking up the system from sleep.
247 *
248 * If @capable is set, set the @dev's power.can_wakeup flag and add its
249 * wakeup-related attributes to sysfs. Otherwise, unset the @dev's
250 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
251 *
252 * This function may sleep and it can't be called from any context where
253 * sleeping is not allowed.
254 */
255void device_set_wakeup_capable(struct device *dev, bool capable)
256{
257 if (!!dev->power.can_wakeup == !!capable)
258 return;
259
260 if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
261 if (capable) {
262 if (wakeup_sysfs_add(dev))
263 return;
264 } else {
265 wakeup_sysfs_remove(dev);
266 }
267 }
268 dev->power.can_wakeup = capable;
269}
270EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
271
272/**
273 * device_init_wakeup - Device wakeup initialization.
274 * @dev: Device to handle.
275 * @enable: Whether or not to enable @dev as a wakeup device.
276 *
277 * By default, most devices should leave wakeup disabled. The exceptions are
278 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
279 * possibly network interfaces, etc.
280 */
281int device_init_wakeup(struct device *dev, bool enable)
282{
283 int ret = 0;
284
285 if (enable) {
286 device_set_wakeup_capable(dev, true);
287 ret = device_wakeup_enable(dev);
288 } else {
289 device_set_wakeup_capable(dev, false);
290 }
291
292 return ret;
293}
294EXPORT_SYMBOL_GPL(device_init_wakeup);
295
296/**
297 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
298 * @dev: Device to handle.
299 */
300int device_set_wakeup_enable(struct device *dev, bool enable)
301{
302 if (!dev || !dev->power.can_wakeup)
303 return -EINVAL;
304
305 return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
306}
307EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
35 308
36/* 309/*
37 * The functions below use the observation that each wakeup event starts a 310 * The functions below use the observation that each wakeup event starts a
@@ -55,139 +328,282 @@ static unsigned long events_timer_expires;
55 * knowledge, however, may not be available to it, so it can simply specify time 328 * knowledge, however, may not be available to it, so it can simply specify time
56 * to wait before the system can be suspended and pass it as the second 329 * to wait before the system can be suspended and pass it as the second
57 * argument of pm_wakeup_event(). 330 * argument of pm_wakeup_event().
331 *
332 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
333 * "no suspend" period will be ended either by the pm_relax(), or by the timer
334 * function executed when the timer expires, whichever comes first.
335 */
336
337/**
338 * wakup_source_activate - Mark given wakeup source as active.
339 * @ws: Wakeup source to handle.
340 *
341 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
342 * core of the event by incrementing the counter of of wakeup events being
343 * processed.
58 */ 344 */
345static void wakeup_source_activate(struct wakeup_source *ws)
346{
347 ws->active = true;
348 ws->active_count++;
349 ws->timer_expires = jiffies;
350 ws->last_time = ktime_get();
351
352 /* Increment the counter of events in progress. */
353 atomic_inc(&combined_event_count);
354}
355
356/**
357 * __pm_stay_awake - Notify the PM core of a wakeup event.
358 * @ws: Wakeup source object associated with the source of the event.
359 *
360 * It is safe to call this function from interrupt context.
361 */
362void __pm_stay_awake(struct wakeup_source *ws)
363{
364 unsigned long flags;
365
366 if (!ws)
367 return;
368
369 spin_lock_irqsave(&ws->lock, flags);
370 ws->event_count++;
371 if (!ws->active)
372 wakeup_source_activate(ws);
373 spin_unlock_irqrestore(&ws->lock, flags);
374}
375EXPORT_SYMBOL_GPL(__pm_stay_awake);
59 376
60/** 377/**
61 * pm_stay_awake - Notify the PM core that a wakeup event is being processed. 378 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
62 * @dev: Device the wakeup event is related to. 379 * @dev: Device the wakeup event is related to.
63 * 380 *
64 * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the 381 * Notify the PM core of a wakeup event (signaled by @dev) by calling
65 * counter of wakeup events being processed. If @dev is not NULL, the counter 382 * __pm_stay_awake for the @dev's wakeup source object.
66 * of wakeup events related to @dev is incremented too.
67 * 383 *
68 * Call this function after detecting of a wakeup event if pm_relax() is going 384 * Call this function after detecting of a wakeup event if pm_relax() is going
69 * to be called directly after processing the event (and possibly passing it to 385 * to be called directly after processing the event (and possibly passing it to
70 * user space for further processing). 386 * user space for further processing).
71 *
72 * It is safe to call this function from interrupt context.
73 */ 387 */
74void pm_stay_awake(struct device *dev) 388void pm_stay_awake(struct device *dev)
75{ 389{
76 unsigned long flags; 390 unsigned long flags;
77 391
78 spin_lock_irqsave(&events_lock, flags); 392 if (!dev)
79 if (dev) 393 return;
80 dev->power.wakeup_count++;
81 394
82 events_in_progress++; 395 spin_lock_irqsave(&dev->power.lock, flags);
83 spin_unlock_irqrestore(&events_lock, flags); 396 __pm_stay_awake(dev->power.wakeup);
397 spin_unlock_irqrestore(&dev->power.lock, flags);
84} 398}
399EXPORT_SYMBOL_GPL(pm_stay_awake);
85 400
86/** 401/**
87 * pm_relax - Notify the PM core that processing of a wakeup event has ended. 402 * wakup_source_deactivate - Mark given wakeup source as inactive.
403 * @ws: Wakeup source to handle.
88 * 404 *
89 * Notify the PM core that a wakeup event has been processed by decrementing 405 * Update the @ws' statistics and notify the PM core that the wakeup source has
90 * the counter of wakeup events being processed and incrementing the counter 406 * become inactive by decrementing the counter of wakeup events being processed
91 * of registered wakeup events. 407 * and incrementing the counter of registered wakeup events.
408 */
409static void wakeup_source_deactivate(struct wakeup_source *ws)
410{
411 ktime_t duration;
412 ktime_t now;
413
414 ws->relax_count++;
415 /*
416 * __pm_relax() may be called directly or from a timer function.
417 * If it is called directly right after the timer function has been
418 * started, but before the timer function calls __pm_relax(), it is
419 * possible that __pm_stay_awake() will be called in the meantime and
420 * will set ws->active. Then, ws->active may be cleared immediately
421 * by the __pm_relax() called from the timer function, but in such a
422 * case ws->relax_count will be different from ws->active_count.
423 */
424 if (ws->relax_count != ws->active_count) {
425 ws->relax_count--;
426 return;
427 }
428
429 ws->active = false;
430
431 now = ktime_get();
432 duration = ktime_sub(now, ws->last_time);
433 ws->total_time = ktime_add(ws->total_time, duration);
434 if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
435 ws->max_time = duration;
436
437 del_timer(&ws->timer);
438
439 /*
440 * Increment the counter of registered wakeup events and decrement the
441 * couter of wakeup events in progress simultaneously.
442 */
443 atomic_add(MAX_IN_PROGRESS, &combined_event_count);
444}
445
446/**
447 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
448 * @ws: Wakeup source object associated with the source of the event.
92 * 449 *
93 * Call this function for wakeup events whose processing started with calling 450 * Call this function for wakeup events whose processing started with calling
94 * pm_stay_awake(). 451 * __pm_stay_awake().
95 * 452 *
96 * It is safe to call it from interrupt context. 453 * It is safe to call it from interrupt context.
97 */ 454 */
98void pm_relax(void) 455void __pm_relax(struct wakeup_source *ws)
99{ 456{
100 unsigned long flags; 457 unsigned long flags;
101 458
102 spin_lock_irqsave(&events_lock, flags); 459 if (!ws)
103 if (events_in_progress) { 460 return;
104 events_in_progress--; 461
105 event_count++; 462 spin_lock_irqsave(&ws->lock, flags);
106 } 463 if (ws->active)
107 spin_unlock_irqrestore(&events_lock, flags); 464 wakeup_source_deactivate(ws);
465 spin_unlock_irqrestore(&ws->lock, flags);
108} 466}
467EXPORT_SYMBOL_GPL(__pm_relax);
468
469/**
470 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
471 * @dev: Device that signaled the event.
472 *
473 * Execute __pm_relax() for the @dev's wakeup source object.
474 */
475void pm_relax(struct device *dev)
476{
477 unsigned long flags;
478
479 if (!dev)
480 return;
481
482 spin_lock_irqsave(&dev->power.lock, flags);
483 __pm_relax(dev->power.wakeup);
484 spin_unlock_irqrestore(&dev->power.lock, flags);
485}
486EXPORT_SYMBOL_GPL(pm_relax);
109 487
110/** 488/**
111 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 489 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
490 * @data: Address of the wakeup source object associated with the event source.
112 * 491 *
113 * Decrease the counter of wakeup events being processed after it was increased 492 * Call __pm_relax() for the wakeup source whose address is stored in @data.
114 * by pm_wakeup_event().
115 */ 493 */
116static void pm_wakeup_timer_fn(unsigned long data) 494static void pm_wakeup_timer_fn(unsigned long data)
117{ 495{
496 __pm_relax((struct wakeup_source *)data);
497}
498
499/**
500 * __pm_wakeup_event - Notify the PM core of a wakeup event.
501 * @ws: Wakeup source object associated with the event source.
502 * @msec: Anticipated event processing time (in milliseconds).
503 *
504 * Notify the PM core of a wakeup event whose source is @ws that will take
505 * approximately @msec milliseconds to be processed by the kernel. If @ws is
506 * not active, activate it. If @msec is nonzero, set up the @ws' timer to
507 * execute pm_wakeup_timer_fn() in future.
508 *
509 * It is safe to call this function from interrupt context.
510 */
511void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
512{
118 unsigned long flags; 513 unsigned long flags;
514 unsigned long expires;
119 515
120 spin_lock_irqsave(&events_lock, flags); 516 if (!ws)
121 if (events_timer_expires 517 return;
122 && time_before_eq(events_timer_expires, jiffies)) { 518
123 events_in_progress--; 519 spin_lock_irqsave(&ws->lock, flags);
124 events_timer_expires = 0; 520
521 ws->event_count++;
522 if (!ws->active)
523 wakeup_source_activate(ws);
524
525 if (!msec) {
526 wakeup_source_deactivate(ws);
527 goto unlock;
125 } 528 }
126 spin_unlock_irqrestore(&events_lock, flags); 529
530 expires = jiffies + msecs_to_jiffies(msec);
531 if (!expires)
532 expires = 1;
533
534 if (time_after(expires, ws->timer_expires)) {
535 mod_timer(&ws->timer, expires);
536 ws->timer_expires = expires;
537 }
538
539 unlock:
540 spin_unlock_irqrestore(&ws->lock, flags);
127} 541}
542EXPORT_SYMBOL_GPL(__pm_wakeup_event);
543
128 544
129/** 545/**
130 * pm_wakeup_event - Notify the PM core of a wakeup event. 546 * pm_wakeup_event - Notify the PM core of a wakeup event.
131 * @dev: Device the wakeup event is related to. 547 * @dev: Device the wakeup event is related to.
132 * @msec: Anticipated event processing time (in milliseconds). 548 * @msec: Anticipated event processing time (in milliseconds).
133 * 549 *
134 * Notify the PM core of a wakeup event (signaled by @dev) that will take 550 * Call __pm_wakeup_event() for the @dev's wakeup source object.
135 * approximately @msec milliseconds to be processed by the kernel. Increment
136 * the counter of registered wakeup events and (if @msec is nonzero) set up
137 * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the
138 * timer has not been set up already, increment the counter of wakeup events
139 * being processed). If @dev is not NULL, the counter of wakeup events related
140 * to @dev is incremented too.
141 *
142 * It is safe to call this function from interrupt context.
143 */ 551 */
144void pm_wakeup_event(struct device *dev, unsigned int msec) 552void pm_wakeup_event(struct device *dev, unsigned int msec)
145{ 553{
146 unsigned long flags; 554 unsigned long flags;
147 555
148 spin_lock_irqsave(&events_lock, flags); 556 if (!dev)
149 event_count++; 557 return;
150 if (dev)
151 dev->power.wakeup_count++;
152
153 if (msec) {
154 unsigned long expires;
155 558
156 expires = jiffies + msecs_to_jiffies(msec); 559 spin_lock_irqsave(&dev->power.lock, flags);
157 if (!expires) 560 __pm_wakeup_event(dev->power.wakeup, msec);
158 expires = 1; 561 spin_unlock_irqrestore(&dev->power.lock, flags);
562}
563EXPORT_SYMBOL_GPL(pm_wakeup_event);
159 564
160 if (!events_timer_expires 565/**
161 || time_after(expires, events_timer_expires)) { 566 * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
162 if (!events_timer_expires) 567 */
163 events_in_progress++; 568static void pm_wakeup_update_hit_counts(void)
569{
570 unsigned long flags;
571 struct wakeup_source *ws;
164 572
165 mod_timer(&events_timer, expires); 573 rcu_read_lock();
166 events_timer_expires = expires; 574 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
167 } 575 spin_lock_irqsave(&ws->lock, flags);
576 if (ws->active)
577 ws->hit_count++;
578 spin_unlock_irqrestore(&ws->lock, flags);
168 } 579 }
169 spin_unlock_irqrestore(&events_lock, flags); 580 rcu_read_unlock();
170} 581}
171 582
172/** 583/**
173 * pm_check_wakeup_events - Check for new wakeup events. 584 * pm_wakeup_pending - Check if power transition in progress should be aborted.
174 * 585 *
175 * Compare the current number of registered wakeup events with its preserved 586 * Compare the current number of registered wakeup events with its preserved
176 * value from the past to check if new wakeup events have been registered since 587 * value from the past and return true if new wakeup events have been registered
177 * the old value was stored. Check if the current number of wakeup events being 588 * since the old value was stored. Also return true if the current number of
178 * processed is zero. 589 * wakeup events being processed is different from zero.
179 */ 590 */
180bool pm_check_wakeup_events(void) 591bool pm_wakeup_pending(void)
181{ 592{
182 unsigned long flags; 593 unsigned long flags;
183 bool ret = true; 594 bool ret = false;
184 595
185 spin_lock_irqsave(&events_lock, flags); 596 spin_lock_irqsave(&events_lock, flags);
186 if (events_check_enabled) { 597 if (events_check_enabled) {
187 ret = (event_count == saved_event_count) && !events_in_progress; 598 unsigned int cnt, inpr;
188 events_check_enabled = ret; 599
600 split_counters(&cnt, &inpr);
601 ret = (cnt != saved_count || inpr > 0);
602 events_check_enabled = !ret;
189 } 603 }
190 spin_unlock_irqrestore(&events_lock, flags); 604 spin_unlock_irqrestore(&events_lock, flags);
605 if (ret)
606 pm_wakeup_update_hit_counts();
191 return ret; 607 return ret;
192} 608}
193 609
@@ -198,29 +614,25 @@ bool pm_check_wakeup_events(void)
198 * Store the number of registered wakeup events at the address in @count. Block 614 * Store the number of registered wakeup events at the address in @count. Block
199 * if the current number of wakeup events being processed is nonzero. 615 * if the current number of wakeup events being processed is nonzero.
200 * 616 *
201 * Return false if the wait for the number of wakeup events being processed to 617 * Return 'false' if the wait for the number of wakeup events being processed to
202 * drop down to zero has been interrupted by a signal (and the current number 618 * drop down to zero has been interrupted by a signal (and the current number
203 * of wakeup events being processed is still nonzero). Otherwise return true. 619 * of wakeup events being processed is still nonzero). Otherwise return 'true'.
204 */ 620 */
205bool pm_get_wakeup_count(unsigned long *count) 621bool pm_get_wakeup_count(unsigned int *count)
206{ 622{
207 bool ret; 623 unsigned int cnt, inpr;
208
209 spin_lock_irq(&events_lock);
210 if (capable(CAP_SYS_ADMIN))
211 events_check_enabled = false;
212
213 while (events_in_progress && !signal_pending(current)) {
214 spin_unlock_irq(&events_lock);
215 624
216 schedule_timeout_interruptible(msecs_to_jiffies(100)); 625 for (;;) {
217 626 split_counters(&cnt, &inpr);
218 spin_lock_irq(&events_lock); 627 if (inpr == 0 || signal_pending(current))
628 break;
629 pm_wakeup_update_hit_counts();
630 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
219 } 631 }
220 *count = event_count; 632
221 ret = !events_in_progress; 633 split_counters(&cnt, &inpr);
222 spin_unlock_irq(&events_lock); 634 *count = cnt;
223 return ret; 635 return !inpr;
224} 636}
225 637
226/** 638/**
@@ -229,19 +641,106 @@ bool pm_get_wakeup_count(unsigned long *count)
229 * 641 *
230 * If @count is equal to the current number of registered wakeup events and the 642 * If @count is equal to the current number of registered wakeup events and the
231 * current number of wakeup events being processed is zero, store @count as the 643 * current number of wakeup events being processed is zero, store @count as the
232 * old number of registered wakeup events to be used by pm_check_wakeup_events() 644 * old number of registered wakeup events for pm_check_wakeup_events(), enable
233 * and return true. Otherwise return false. 645 * wakeup events detection and return 'true'. Otherwise disable wakeup events
646 * detection and return 'false'.
234 */ 647 */
235bool pm_save_wakeup_count(unsigned long count) 648bool pm_save_wakeup_count(unsigned int count)
236{ 649{
237 bool ret = false; 650 unsigned int cnt, inpr;
238 651
652 events_check_enabled = false;
239 spin_lock_irq(&events_lock); 653 spin_lock_irq(&events_lock);
240 if (count == event_count && !events_in_progress) { 654 split_counters(&cnt, &inpr);
241 saved_event_count = count; 655 if (cnt == count && inpr == 0) {
656 saved_count = count;
242 events_check_enabled = true; 657 events_check_enabled = true;
243 ret = true;
244 } 658 }
245 spin_unlock_irq(&events_lock); 659 spin_unlock_irq(&events_lock);
660 if (!events_check_enabled)
661 pm_wakeup_update_hit_counts();
662 return events_check_enabled;
663}
664
665static struct dentry *wakeup_sources_stats_dentry;
666
667/**
668 * print_wakeup_source_stats - Print wakeup source statistics information.
669 * @m: seq_file to print the statistics into.
670 * @ws: Wakeup source object to print the statistics for.
671 */
672static int print_wakeup_source_stats(struct seq_file *m,
673 struct wakeup_source *ws)
674{
675 unsigned long flags;
676 ktime_t total_time;
677 ktime_t max_time;
678 unsigned long active_count;
679 ktime_t active_time;
680 int ret;
681
682 spin_lock_irqsave(&ws->lock, flags);
683
684 total_time = ws->total_time;
685 max_time = ws->max_time;
686 active_count = ws->active_count;
687 if (ws->active) {
688 active_time = ktime_sub(ktime_get(), ws->last_time);
689 total_time = ktime_add(total_time, active_time);
690 if (active_time.tv64 > max_time.tv64)
691 max_time = active_time;
692 } else {
693 active_time = ktime_set(0, 0);
694 }
695
696 ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t"
697 "%lld\t\t%lld\t\t%lld\t\t%lld\n",
698 ws->name, active_count, ws->event_count, ws->hit_count,
699 ktime_to_ms(active_time), ktime_to_ms(total_time),
700 ktime_to_ms(max_time), ktime_to_ms(ws->last_time));
701
702 spin_unlock_irqrestore(&ws->lock, flags);
703
246 return ret; 704 return ret;
247} 705}
706
707/**
708 * wakeup_sources_stats_show - Print wakeup sources statistics information.
709 * @m: seq_file to print the statistics into.
710 */
711static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
712{
713 struct wakeup_source *ws;
714
715 seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t"
716 "active_since\ttotal_time\tmax_time\tlast_change\n");
717
718 rcu_read_lock();
719 list_for_each_entry_rcu(ws, &wakeup_sources, entry)
720 print_wakeup_source_stats(m, ws);
721 rcu_read_unlock();
722
723 return 0;
724}
725
726static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, wakeup_sources_stats_show, NULL);
729}
730
731static const struct file_operations wakeup_sources_stats_fops = {
732 .owner = THIS_MODULE,
733 .open = wakeup_sources_stats_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
739static int __init wakeup_sources_debugfs_init(void)
740{
741 wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
742 S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
743 return 0;
744}
745
746postcore_initcall(wakeup_sources_debugfs_init);