diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 56814902bc56..69bd8083930c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -648,7 +648,7 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, | |||
| 648 | * The following mb guarantees that previous clear of a PENDING bit | 648 | * The following mb guarantees that previous clear of a PENDING bit |
| 649 | * will not be reordered with any speculative LOADS or STORES from | 649 | * will not be reordered with any speculative LOADS or STORES from |
| 650 | * work->current_func, which is executed afterwards. This possible | 650 | * work->current_func, which is executed afterwards. This possible |
| 651 | * reordering can lead to a missed execution on attempt to qeueue | 651 | * reordering can lead to a missed execution on attempt to queue |
| 652 | * the same @work. E.g. consider this case: | 652 | * the same @work. E.g. consider this case: |
| 653 | * | 653 | * |
| 654 | * CPU#0 CPU#1 | 654 | * CPU#0 CPU#1 |
| @@ -1343,7 +1343,7 @@ static bool is_chained_work(struct workqueue_struct *wq) | |||
| 1343 | 1343 | ||
| 1344 | worker = current_wq_worker(); | 1344 | worker = current_wq_worker(); |
| 1345 | /* | 1345 | /* |
| 1346 | * Return %true iff I'm a worker execuing a work item on @wq. If | 1346 | * Return %true iff I'm a worker executing a work item on @wq. If |
| 1347 | * I'm @worker, it's safe to dereference it without locking. | 1347 | * I'm @worker, it's safe to dereference it without locking. |
| 1348 | */ | 1348 | */ |
| 1349 | return worker && worker->current_pwq->wq == wq; | 1349 | return worker && worker->current_pwq->wq == wq; |
| @@ -1725,7 +1725,7 @@ static void rcu_work_rcufn(struct rcu_head *rcu) | |||
| 1725 | * | 1725 | * |
| 1726 | * Return: %false if @rwork was already pending, %true otherwise. Note | 1726 | * Return: %false if @rwork was already pending, %true otherwise. Note |
| 1727 | * that a full RCU grace period is guaranteed only after a %true return. | 1727 | * that a full RCU grace period is guaranteed only after a %true return. |
| 1728 | * While @rwork is guarnateed to be executed after a %false return, the | 1728 | * While @rwork is guaranteed to be executed after a %false return, the |
| 1729 | * execution may happen before a full RCU grace period has passed. | 1729 | * execution may happen before a full RCU grace period has passed. |
| 1730 | */ | 1730 | */ |
| 1731 | bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) | 1731 | bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) |
| @@ -3017,6 +3017,9 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) | |||
| 3017 | if (WARN_ON(!wq_online)) | 3017 | if (WARN_ON(!wq_online)) |
| 3018 | return false; | 3018 | return false; |
| 3019 | 3019 | ||
| 3020 | if (WARN_ON(!work->func)) | ||
| 3021 | return false; | ||
| 3022 | |||
| 3020 | if (!from_cancel) { | 3023 | if (!from_cancel) { |
| 3021 | lock_map_acquire(&work->lockdep_map); | 3024 | lock_map_acquire(&work->lockdep_map); |
| 3022 | lock_map_release(&work->lockdep_map); | 3025 | lock_map_release(&work->lockdep_map); |
