diff options
author | David S. Miller <davem@davemloft.net> | 2016-05-04 00:52:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-05-04 00:52:29 -0400 |
commit | cba653210056cf47cc1969f831f05ddfb99ee2bd (patch) | |
tree | 92d93a3eee5b12d77af3696b9da8026e71df5752 /kernel/workqueue.c | |
parent | 26879da58711aa604a1b866cbeedd7e0f78f90ad (diff) | |
parent | 7391daf2ffc780679d6ab3fad1db2619e5dd2c2a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
net/ipv4/ip_gre.c
Minor conflicts between tunnel bug fixes in net and
ipv6 tunnel cleanups in net-next.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2232ae3e3ad6..3bfdff06eea7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, | |||
666 | */ | 666 | */ |
667 | smp_wmb(); | 667 | smp_wmb(); |
668 | set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); | 668 | set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); |
669 | /* | ||
670 | * The following mb guarantees that previous clear of a PENDING bit | ||
671 | * will not be reordered with any speculative LOADS or STORES from | ||
672 | * work->current_func, which is executed afterwards. This possible | ||
673 | * reordering can lead to a missed execution on attempt to qeueue | ||
674 | * the same @work. E.g. consider this case: | ||
675 | * | ||
676 | * CPU#0 CPU#1 | ||
677 | * ---------------------------- -------------------------------- | ||
678 | * | ||
679 | * 1 STORE event_indicated | ||
680 | * 2 queue_work_on() { | ||
681 | * 3 test_and_set_bit(PENDING) | ||
682 | * 4 } set_..._and_clear_pending() { | ||
683 | * 5 set_work_data() # clear bit | ||
684 | * 6 smp_mb() | ||
685 | * 7 work->current_func() { | ||
686 | * 8 LOAD event_indicated | ||
687 | * } | ||
688 | * | ||
689 | * Without an explicit full barrier speculative LOAD on line 8 can | ||
690 | * be executed before CPU#0 does STORE on line 1. If that happens, | ||
691 | * CPU#0 observes the PENDING bit is still set and new execution of | ||
692 | * a @work is not queued in a hope, that CPU#1 will eventually | ||
693 | * finish the queued @work. Meanwhile CPU#1 does not see | ||
694 | * event_indicated is set, because speculative LOAD was executed | ||
695 | * before actual STORE. | ||
696 | */ | ||
697 | smp_mb(); | ||
669 | } | 698 | } |
670 | 699 | ||
671 | static void clear_work_data(struct work_struct *work) | 700 | static void clear_work_data(struct work_struct *work) |