diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 56 |
1 files changed, 52 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f28849394791..41ff75b478c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work) | |||
| 2728 | } | 2728 | } |
| 2729 | EXPORT_SYMBOL_GPL(flush_work); | 2729 | EXPORT_SYMBOL_GPL(flush_work); |
| 2730 | 2730 | ||
| 2731 | struct cwt_wait { | ||
| 2732 | wait_queue_t wait; | ||
| 2733 | struct work_struct *work; | ||
| 2734 | }; | ||
| 2735 | |||
| 2736 | static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
| 2737 | { | ||
| 2738 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); | ||
| 2739 | |||
| 2740 | if (cwait->work != key) | ||
| 2741 | return 0; | ||
| 2742 | return autoremove_wake_function(wait, mode, sync, key); | ||
| 2743 | } | ||
| 2744 | |||
| 2731 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | 2745 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) |
| 2732 | { | 2746 | { |
| 2747 | static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); | ||
| 2733 | unsigned long flags; | 2748 | unsigned long flags; |
| 2734 | int ret; | 2749 | int ret; |
| 2735 | 2750 | ||
| 2736 | do { | 2751 | do { |
| 2737 | ret = try_to_grab_pending(work, is_dwork, &flags); | 2752 | ret = try_to_grab_pending(work, is_dwork, &flags); |
| 2738 | /* | 2753 | /* |
| 2739 | * If someone else is canceling, wait for the same event it | 2754 | * If someone else is already canceling, wait for it to |
| 2740 | * would be waiting for before retrying. | 2755 | * finish. flush_work() doesn't work for PREEMPT_NONE |
| 2756 | * because we may get scheduled between @work's completion | ||
| 2757 | * and the other canceling task resuming and clearing | ||
| 2758 | * CANCELING - flush_work() will return false immediately | ||
| 2759 | * as @work is no longer busy, try_to_grab_pending() will | ||
| 2760 | * return -ENOENT as @work is still being canceled and the | ||
| 2761 | * other canceling task won't be able to clear CANCELING as | ||
| 2762 | * we're hogging the CPU. | ||
| 2763 | * | ||
| 2764 | * Let's wait for completion using a waitqueue. As this | ||
| 2765 | * may lead to the thundering herd problem, use a custom | ||
| 2766 | * wake function which matches @work along with exclusive | ||
| 2767 | * wait and wakeup. | ||
| 2741 | */ | 2768 | */ |
| 2742 | if (unlikely(ret == -ENOENT)) | 2769 | if (unlikely(ret == -ENOENT)) { |
| 2743 | flush_work(work); | 2770 | struct cwt_wait cwait; |
| 2771 | |||
| 2772 | init_wait(&cwait.wait); | ||
| 2773 | cwait.wait.func = cwt_wakefn; | ||
| 2774 | cwait.work = work; | ||
| 2775 | |||
| 2776 | prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, | ||
| 2777 | TASK_UNINTERRUPTIBLE); | ||
| 2778 | if (work_is_canceling(work)) | ||
| 2779 | schedule(); | ||
| 2780 | finish_wait(&cancel_waitq, &cwait.wait); | ||
| 2781 | } | ||
| 2744 | } while (unlikely(ret < 0)); | 2782 | } while (unlikely(ret < 0)); |
| 2745 | 2783 | ||
| 2746 | /* tell other tasks trying to grab @work to back off */ | 2784 | /* tell other tasks trying to grab @work to back off */ |
| @@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
| 2749 | 2787 | ||
| 2750 | flush_work(work); | 2788 | flush_work(work); |
| 2751 | clear_work_data(work); | 2789 | clear_work_data(work); |
| 2790 | |||
| 2791 | /* | ||
| 2792 | * Paired with prepare_to_wait() above so that either | ||
| 2793 | * waitqueue_active() is visible here or !work_is_canceling() is | ||
| 2794 | * visible there. | ||
| 2795 | */ | ||
| 2796 | smp_mb(); | ||
| 2797 | if (waitqueue_active(&cancel_waitq)) | ||
| 2798 | __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); | ||
| 2799 | |||
| 2752 | return ret; | 2800 | return ret; |
| 2753 | } | 2801 | } |
| 2754 | 2802 | ||
