aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-01-27 06:29:13 -0500
committerThomas Gleixner <tglx@linutronix.de>2011-01-27 06:29:37 -0500
commitf97b12cce6dea51880a6a89d4607c29c70a6a841 (patch)
tree1f05f6d39975bd213e7506e8a73ae0a59188c75e /kernel/workqueue.c
parentccaa8d657117bb1876d471bd91579d774106778d (diff)
parent1bae4ce27c9c90344f23c65ea6966c50ffeae2f5 (diff)
Merge commit 'v2.6.38-rc2' into core/locking
Reason: Update to mainline before adding the locking cleanup Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c87
1 files changed, 80 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 90db1bd1a978..11869faa6819 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -661,7 +661,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
661{ 661{
662 struct worker *worker = kthread_data(task); 662 struct worker *worker = kthread_data(task);
663 663
664 if (likely(!(worker->flags & WORKER_NOT_RUNNING))) 664 if (!(worker->flags & WORKER_NOT_RUNNING))
665 atomic_inc(get_gcwq_nr_running(cpu)); 665 atomic_inc(get_gcwq_nr_running(cpu));
666} 666}
667 667
@@ -687,7 +687,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
687 struct global_cwq *gcwq = get_gcwq(cpu); 687 struct global_cwq *gcwq = get_gcwq(cpu);
688 atomic_t *nr_running = get_gcwq_nr_running(cpu); 688 atomic_t *nr_running = get_gcwq_nr_running(cpu);
689 689
690 if (unlikely(worker->flags & WORKER_NOT_RUNNING)) 690 if (worker->flags & WORKER_NOT_RUNNING)
691 return NULL; 691 return NULL;
692 692
693 /* this can only happen on the local cpu */ 693 /* this can only happen on the local cpu */
@@ -768,7 +768,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
768 768
769 worker->flags &= ~flags; 769 worker->flags &= ~flags;
770 770
771 /* if transitioning out of NOT_RUNNING, increment nr_running */ 771 /*
772 * If transitioning out of NOT_RUNNING, increment nr_running. Note
773 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
774 * of multiple flags, not a single flag.
775 */
772 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 776 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
773 if (!(worker->flags & WORKER_NOT_RUNNING)) 777 if (!(worker->flags & WORKER_NOT_RUNNING))
774 atomic_inc(get_gcwq_nr_running(gcwq->cpu)); 778 atomic_inc(get_gcwq_nr_running(gcwq->cpu));
@@ -932,6 +936,38 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
932 wake_up_worker(gcwq); 936 wake_up_worker(gcwq);
933} 937}
934 938
939/*
940 * Test whether @work is being queued from another work executing on the
941 * same workqueue. This is rather expensive and should only be used from
942 * cold paths.
943 */
944static bool is_chained_work(struct workqueue_struct *wq)
945{
946 unsigned long flags;
947 unsigned int cpu;
948
949 for_each_gcwq_cpu(cpu) {
950 struct global_cwq *gcwq = get_gcwq(cpu);
951 struct worker *worker;
952 struct hlist_node *pos;
953 int i;
954
955 spin_lock_irqsave(&gcwq->lock, flags);
956 for_each_busy_worker(worker, i, pos, gcwq) {
957 if (worker->task != current)
958 continue;
959 spin_unlock_irqrestore(&gcwq->lock, flags);
960 /*
961 * I'm @worker, no locking necessary. See if @work
962 * is headed to the same workqueue.
963 */
964 return worker->current_cwq->wq == wq;
965 }
966 spin_unlock_irqrestore(&gcwq->lock, flags);
967 }
968 return false;
969}
970
935static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 971static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
936 struct work_struct *work) 972 struct work_struct *work)
937{ 973{
@@ -943,7 +979,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
943 979
944 debug_work_activate(work); 980 debug_work_activate(work);
945 981
946 if (WARN_ON_ONCE(wq->flags & WQ_DYING)) 982 /* if dying, only works from the same workqueue are allowed */
983 if (unlikely(wq->flags & WQ_DYING) &&
984 WARN_ON_ONCE(!is_chained_work(wq)))
947 return; 985 return;
948 986
949 /* determine gcwq to use */ 987 /* determine gcwq to use */
@@ -1806,7 +1844,7 @@ __acquires(&gcwq->lock)
1806 spin_unlock_irq(&gcwq->lock); 1844 spin_unlock_irq(&gcwq->lock);
1807 1845
1808 work_clear_pending(work); 1846 work_clear_pending(work);
1809 lock_map_acquire(&cwq->wq->lockdep_map); 1847 lock_map_acquire_read(&cwq->wq->lockdep_map);
1810 lock_map_acquire(&lockdep_map); 1848 lock_map_acquire(&lockdep_map);
1811 trace_workqueue_execute_start(work); 1849 trace_workqueue_execute_start(work);
1812 f(work); 1850 f(work);
@@ -2350,8 +2388,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2350 insert_wq_barrier(cwq, barr, work, worker); 2388 insert_wq_barrier(cwq, barr, work, worker);
2351 spin_unlock_irq(&gcwq->lock); 2389 spin_unlock_irq(&gcwq->lock);
2352 2390
2353 lock_map_acquire(&cwq->wq->lockdep_map); 2391 /*
2392 * If @max_active is 1 or rescuer is in use, flushing another work
2393 * item on the same workqueue may lead to deadlock. Make sure the
2394 * flusher is not running on the same workqueue by verifying write
2395 * access.
2396 */
2397 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2398 lock_map_acquire(&cwq->wq->lockdep_map);
2399 else
2400 lock_map_acquire_read(&cwq->wq->lockdep_map);
2354 lock_map_release(&cwq->wq->lockdep_map); 2401 lock_map_release(&cwq->wq->lockdep_map);
2402
2355 return true; 2403 return true;
2356already_gone: 2404already_gone:
2357 spin_unlock_irq(&gcwq->lock); 2405 spin_unlock_irq(&gcwq->lock);
@@ -2936,11 +2984,35 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
2936 */ 2984 */
2937void destroy_workqueue(struct workqueue_struct *wq) 2985void destroy_workqueue(struct workqueue_struct *wq)
2938{ 2986{
2987 unsigned int flush_cnt = 0;
2939 unsigned int cpu; 2988 unsigned int cpu;
2940 2989
2990 /*
2991 * Mark @wq dying and drain all pending works. Once WQ_DYING is
2992 * set, only chain queueing is allowed. IOW, only currently
2993 * pending or running work items on @wq can queue further work
2994 * items on it. @wq is flushed repeatedly until it becomes empty.
2995 * The number of flushing is detemined by the depth of chaining and
2996 * should be relatively short. Whine if it takes too long.
2997 */
2941 wq->flags |= WQ_DYING; 2998 wq->flags |= WQ_DYING;
2999reflush:
2942 flush_workqueue(wq); 3000 flush_workqueue(wq);
2943 3001
3002 for_each_cwq_cpu(cpu, wq) {
3003 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3004
3005 if (!cwq->nr_active && list_empty(&cwq->delayed_works))
3006 continue;
3007
3008 if (++flush_cnt == 10 ||
3009 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
3010 printk(KERN_WARNING "workqueue %s: flush on "
3011 "destruction isn't complete after %u tries\n",
3012 wq->name, flush_cnt);
3013 goto reflush;
3014 }
3015
2944 /* 3016 /*
2945 * wq list is used to freeze wq, remove from list after 3017 * wq list is used to freeze wq, remove from list after
2946 * flushing is complete in case freeze races us. 3018 * flushing is complete in case freeze races us.
@@ -3692,7 +3764,8 @@ static int __init init_workqueues(void)
3692 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); 3764 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3693 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 3765 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3694 WQ_UNBOUND_MAX_ACTIVE); 3766 WQ_UNBOUND_MAX_ACTIVE);
3695 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); 3767 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3768 !system_unbound_wq);
3696 return 0; 3769 return 0;
3697} 3770}
3698early_initcall(init_workqueues); 3771early_initcall(init_workqueues);