diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2011-01-31 05:06:39 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2011-03-14 19:43:13 -0400 |
commit | 023d3779145ec6b7a0f38f19672a347b92feb74e (patch) | |
tree | 1ec78dc731dad2b8155faf6625617284f1b3b76d /drivers/base | |
parent | dc1b83ab08f1954335692cdcd499f78c94f4c42a (diff) |
PM / Wakeup: Combine atomic counters to avoid reordering issues
The memory barrier in wakeup_source_deactivate() is supposed to
prevent the callers of pm_wakeup_pending() and pm_get_wakeup_count()
from seeing the new value of events_in_progress (0, in particular)
and the old value of event_count at the same time. However, if
wakeup_source_deactivate() is executed by CPU0 and, for instance,
pm_wakeup_pending() is executed by CPU1, where both processors can
reorder operations, the memory barrier in wakeup_source_deactivate()
doesn't affect CPU1 which can reorder reads. In that case CPU1 may
very well decide to fetch event_count before it's modified and
events_in_progress after it's been updated, so pm_wakeup_pending()
may fail to detect a wakeup event. This issue can be addressed by
using a single atomic variable to store both events_in_progress
and event_count, so that they can be updated together in a single
atomic operation.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/wakeup.c | 61 |
1 files changed, 39 insertions, 22 deletions
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 8ec406d8f548..e5e73b5efc80 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -24,12 +24,26 @@ | |||
24 | */ | 24 | */ |
25 | bool events_check_enabled; | 25 | bool events_check_enabled; |
26 | 26 | ||
27 | /* The counter of registered wakeup events. */ | 27 | /* |
28 | static atomic_t event_count = ATOMIC_INIT(0); | 28 | * Combined counters of registered wakeup events and wakeup events in progress. |
29 | /* A preserved old value of event_count. */ | 29 | * They need to be modified together atomically, so it's better to use one |
30 | * atomic variable to hold them both. | ||
31 | */ | ||
32 | static atomic_t combined_event_count = ATOMIC_INIT(0); | ||
33 | |||
34 | #define IN_PROGRESS_BITS (sizeof(int) * 4) | ||
35 | #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) | ||
36 | |||
37 | static void split_counters(unsigned int *cnt, unsigned int *inpr) | ||
38 | { | ||
39 | unsigned int comb = atomic_read(&combined_event_count); | ||
40 | |||
41 | *cnt = (comb >> IN_PROGRESS_BITS); | ||
42 | *inpr = comb & MAX_IN_PROGRESS; | ||
43 | } | ||
44 | |||
45 | /* A preserved old value of the events counter. */ | ||
30 | static unsigned int saved_count; | 46 | static unsigned int saved_count; |
31 | /* The counter of wakeup events being processed. */ | ||
32 | static atomic_t events_in_progress = ATOMIC_INIT(0); | ||
33 | 47 | ||
34 | static DEFINE_SPINLOCK(events_lock); | 48 | static DEFINE_SPINLOCK(events_lock); |
35 | 49 | ||
@@ -307,7 +321,8 @@ static void wakeup_source_activate(struct wakeup_source *ws) | |||
307 | ws->timer_expires = jiffies; | 321 | ws->timer_expires = jiffies; |
308 | ws->last_time = ktime_get(); | 322 | ws->last_time = ktime_get(); |
309 | 323 | ||
310 | atomic_inc(&events_in_progress); | 324 | /* Increment the counter of events in progress. */ |
325 | atomic_inc(&combined_event_count); | ||
311 | } | 326 | } |
312 | 327 | ||
313 | /** | 328 | /** |
@@ -394,14 +409,10 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) | |||
394 | del_timer(&ws->timer); | 409 | del_timer(&ws->timer); |
395 | 410 | ||
396 | /* | 411 | /* |
397 | * event_count has to be incremented before events_in_progress is | 412 | * Increment the counter of registered wakeup events and decrement the |
398 | * modified, so that the callers of pm_check_wakeup_events() and | 413 | * couter of wakeup events in progress simultaneously. |
399 | * pm_save_wakeup_count() don't see the old value of event_count and | ||
400 | * events_in_progress equal to zero at the same time. | ||
401 | */ | 414 | */ |
402 | atomic_inc(&event_count); | 415 | atomic_add(MAX_IN_PROGRESS, &combined_event_count); |
403 | smp_mb__before_atomic_dec(); | ||
404 | atomic_dec(&events_in_progress); | ||
405 | } | 416 | } |
406 | 417 | ||
407 | /** | 418 | /** |
@@ -556,8 +567,10 @@ bool pm_wakeup_pending(void) | |||
556 | 567 | ||
557 | spin_lock_irqsave(&events_lock, flags); | 568 | spin_lock_irqsave(&events_lock, flags); |
558 | if (events_check_enabled) { | 569 | if (events_check_enabled) { |
559 | ret = ((unsigned int)atomic_read(&event_count) != saved_count) | 570 | unsigned int cnt, inpr; |
560 | || atomic_read(&events_in_progress); | 571 | |
572 | split_counters(&cnt, &inpr); | ||
573 | ret = (cnt != saved_count || inpr > 0); | ||
561 | events_check_enabled = !ret; | 574 | events_check_enabled = !ret; |
562 | } | 575 | } |
563 | spin_unlock_irqrestore(&events_lock, flags); | 576 | spin_unlock_irqrestore(&events_lock, flags); |
@@ -579,19 +592,22 @@ bool pm_wakeup_pending(void) | |||
579 | */ | 592 | */ |
580 | bool pm_get_wakeup_count(unsigned int *count) | 593 | bool pm_get_wakeup_count(unsigned int *count) |
581 | { | 594 | { |
582 | bool ret; | 595 | unsigned int cnt, inpr; |
583 | 596 | ||
584 | if (capable(CAP_SYS_ADMIN)) | 597 | if (capable(CAP_SYS_ADMIN)) |
585 | events_check_enabled = false; | 598 | events_check_enabled = false; |
586 | 599 | ||
587 | while (atomic_read(&events_in_progress) && !signal_pending(current)) { | 600 | for (;;) { |
601 | split_counters(&cnt, &inpr); | ||
602 | if (inpr == 0 || signal_pending(current)) | ||
603 | break; | ||
588 | pm_wakeup_update_hit_counts(); | 604 | pm_wakeup_update_hit_counts(); |
589 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | 605 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); |
590 | } | 606 | } |
591 | 607 | ||
592 | ret = !atomic_read(&events_in_progress); | 608 | split_counters(&cnt, &inpr); |
593 | *count = atomic_read(&event_count); | 609 | *count = cnt; |
594 | return ret; | 610 | return !inpr; |
595 | } | 611 | } |
596 | 612 | ||
597 | /** | 613 | /** |
@@ -605,11 +621,12 @@ bool pm_get_wakeup_count(unsigned int *count) | |||
605 | */ | 621 | */ |
606 | bool pm_save_wakeup_count(unsigned int count) | 622 | bool pm_save_wakeup_count(unsigned int count) |
607 | { | 623 | { |
624 | unsigned int cnt, inpr; | ||
608 | bool ret = false; | 625 | bool ret = false; |
609 | 626 | ||
610 | spin_lock_irq(&events_lock); | 627 | spin_lock_irq(&events_lock); |
611 | if (count == (unsigned int)atomic_read(&event_count) | 628 | split_counters(&cnt, &inpr); |
612 | && !atomic_read(&events_in_progress)) { | 629 | if (cnt == count && inpr == 0) { |
613 | saved_count = count; | 630 | saved_count = count; |
614 | events_check_enabled = true; | 631 | events_check_enabled = true; |
615 | ret = true; | 632 | ret = true; |