aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-08-06 16:58:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-08-06 16:58:34 -0400
commit4264bc13712b40be38d8fc01a37c34279a27e30d (patch)
tree0d8c9ad3b085218f390b62480e79e2e83f2983ae /kernel/workqueue.c
parent0fff1068720f52942058e597a8bb7b4487086b46 (diff)
parent2865a8fb44cc32420407362cbda80c10fa09c6b2 (diff)
Merge branch 'for-3.11-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull two workqueue fixes from Tejun Heo: "A lockdep notation update so that nested work_on_cpu() invocations don't lead to spurious lockdep warnings and fix for an unbound attr bug which made what's shown in sysfs deviate from the actual ones. Both patches have pretty limited scope" * 'for-3.11-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: copy workqueue_attrs with all fields workqueue: allow work_on_cpu() to be called recursively
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c44
1 files changed, 34 insertions, 10 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0b72e816b8d0..7f5d4be22034 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2817,6 +2817,19 @@ already_gone:
2817 return false; 2817 return false;
2818} 2818}
2819 2819
2820static bool __flush_work(struct work_struct *work)
2821{
2822 struct wq_barrier barr;
2823
2824 if (start_flush_work(work, &barr)) {
2825 wait_for_completion(&barr.done);
2826 destroy_work_on_stack(&barr.work);
2827 return true;
2828 } else {
2829 return false;
2830 }
2831}
2832
2820/** 2833/**
2821 * flush_work - wait for a work to finish executing the last queueing instance 2834 * flush_work - wait for a work to finish executing the last queueing instance
2822 * @work: the work to flush 2835 * @work: the work to flush
@@ -2830,18 +2843,10 @@ already_gone:
2830 */ 2843 */
2831bool flush_work(struct work_struct *work) 2844bool flush_work(struct work_struct *work)
2832{ 2845{
2833 struct wq_barrier barr;
2834
2835 lock_map_acquire(&work->lockdep_map); 2846 lock_map_acquire(&work->lockdep_map);
2836 lock_map_release(&work->lockdep_map); 2847 lock_map_release(&work->lockdep_map);
2837 2848
2838 if (start_flush_work(work, &barr)) { 2849 return __flush_work(work);
2839 wait_for_completion(&barr.done);
2840 destroy_work_on_stack(&barr.work);
2841 return true;
2842 } else {
2843 return false;
2844 }
2845} 2850}
2846EXPORT_SYMBOL_GPL(flush_work); 2851EXPORT_SYMBOL_GPL(flush_work);
2847 2852
@@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
3411{ 3416{
3412 to->nice = from->nice; 3417 to->nice = from->nice;
3413 cpumask_copy(to->cpumask, from->cpumask); 3418 cpumask_copy(to->cpumask, from->cpumask);
3419 /*
3420 * Unlike hash and equality test, this function doesn't ignore
3421 * ->no_numa as it is used for both pool and wq attrs. Instead,
3422 * get_unbound_pool() explicitly clears ->no_numa after copying.
3423 */
3424 to->no_numa = from->no_numa;
3414} 3425}
3415 3426
3416/* hash value of the content of @attr */ 3427/* hash value of the content of @attr */
@@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3578 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 3589 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3579 copy_workqueue_attrs(pool->attrs, attrs); 3590 copy_workqueue_attrs(pool->attrs, attrs);
3580 3591
3592 /*
3593 * no_numa isn't a worker_pool attribute, always clear it. See
3594 * 'struct workqueue_attrs' comments for detail.
3595 */
3596 pool->attrs->no_numa = false;
3597
3581 /* if cpumask is contained inside a NUMA node, we belong to that node */ 3598 /* if cpumask is contained inside a NUMA node, we belong to that node */
3582 if (wq_numa_enabled) { 3599 if (wq_numa_enabled) {
3583 for_each_node(node) { 3600 for_each_node(node) {
@@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4756 4773
4757 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4774 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4758 schedule_work_on(cpu, &wfc.work); 4775 schedule_work_on(cpu, &wfc.work);
4759 flush_work(&wfc.work); 4776
4777 /*
4778 * The work item is on-stack and can't lead to deadlock through
4779 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4780 * when work_on_cpu()s are nested.
4781 */
4782 __flush_work(&wfc.work);
4783
4760 return wfc.ret; 4784 return wfc.ret;
4761} 4785}
4762EXPORT_SYMBOL_GPL(work_on_cpu); 4786EXPORT_SYMBOL_GPL(work_on_cpu);