diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 60 |
1 files changed, 59 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e785b0f2aea5..8ee6ec82f88a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -932,6 +932,38 @@ static void insert_work(struct cpu_workqueue_struct *cwq, | |||
932 | wake_up_worker(gcwq); | 932 | wake_up_worker(gcwq); |
933 | } | 933 | } |
934 | 934 | ||
935 | /* | ||
936 | * Test whether @work is being queued from another work executing on the | ||
937 | * same workqueue. This is rather expensive and should only be used from | ||
938 | * cold paths. | ||
939 | */ | ||
940 | static bool is_chained_work(struct workqueue_struct *wq) | ||
941 | { | ||
942 | unsigned long flags; | ||
943 | unsigned int cpu; | ||
944 | |||
945 | for_each_gcwq_cpu(cpu) { | ||
946 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
947 | struct worker *worker; | ||
948 | struct hlist_node *pos; | ||
949 | int i; | ||
950 | |||
951 | spin_lock_irqsave(&gcwq->lock, flags); | ||
952 | for_each_busy_worker(worker, i, pos, gcwq) { | ||
953 | if (worker->task != current) | ||
954 | continue; | ||
955 | spin_unlock_irqrestore(&gcwq->lock, flags); | ||
956 | /* | ||
957 | * I'm @worker, no locking necessary. See if @work | ||
958 | * is headed to the same workqueue. | ||
959 | */ | ||
960 | return worker->current_cwq->wq == wq; | ||
961 | } | ||
962 | spin_unlock_irqrestore(&gcwq->lock, flags); | ||
963 | } | ||
964 | return false; | ||
965 | } | ||
966 | |||
935 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | 967 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, |
936 | struct work_struct *work) | 968 | struct work_struct *work) |
937 | { | 969 | { |
@@ -943,7 +975,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
943 | 975 | ||
944 | debug_work_activate(work); | 976 | debug_work_activate(work); |
945 | 977 | ||
946 | if (WARN_ON_ONCE(wq->flags & WQ_DYING)) | 978 | /* if dying, only works from the same workqueue are allowed */ |
979 | if (unlikely(wq->flags & WQ_DYING) && | ||
980 | WARN_ON_ONCE(!is_chained_work(wq))) | ||
947 | return; | 981 | return; |
948 | 982 | ||
949 | /* determine gcwq to use */ | 983 | /* determine gcwq to use */ |
@@ -2936,11 +2970,35 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key); | |||
2936 | */ | 2970 | */ |
2937 | void destroy_workqueue(struct workqueue_struct *wq) | 2971 | void destroy_workqueue(struct workqueue_struct *wq) |
2938 | { | 2972 | { |
2973 | unsigned int flush_cnt = 0; | ||
2939 | unsigned int cpu; | 2974 | unsigned int cpu; |
2940 | 2975 | ||
2976 | /* | ||
2977 | * Mark @wq dying and drain all pending works. Once WQ_DYING is | ||
2978 | * set, only chain queueing is allowed. IOW, only currently | ||
2979 | * pending or running work items on @wq can queue further work | ||
2980 | * items on it. @wq is flushed repeatedly until it becomes empty. | ||
2981 | * The number of flushing is detemined by the depth of chaining and | ||
2982 | * should be relatively short. Whine if it takes too long. | ||
2983 | */ | ||
2941 | wq->flags |= WQ_DYING; | 2984 | wq->flags |= WQ_DYING; |
2985 | reflush: | ||
2942 | flush_workqueue(wq); | 2986 | flush_workqueue(wq); |
2943 | 2987 | ||
2988 | for_each_cwq_cpu(cpu, wq) { | ||
2989 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
2990 | |||
2991 | if (!cwq->nr_active && list_empty(&cwq->delayed_works)) | ||
2992 | continue; | ||
2993 | |||
2994 | if (++flush_cnt == 10 || | ||
2995 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) | ||
2996 | printk(KERN_WARNING "workqueue %s: flush on " | ||
2997 | "destruction isn't complete after %u tries\n", | ||
2998 | wq->name, flush_cnt); | ||
2999 | goto reflush; | ||
3000 | } | ||
3001 | |||
2944 | /* | 3002 | /* |
2945 | * wq list is used to freeze wq, remove from list after | 3003 | * wq list is used to freeze wq, remove from list after |
2946 | * flushing is complete in case freeze races us. | 3004 | * flushing is complete in case freeze races us. |