diff options
author | Mark Brown <broonie@linaro.org> | 2013-08-15 06:19:52 -0400 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2013-08-15 06:19:52 -0400 |
commit | 4210606b19852dce52ed1a687db816695b6048e1 (patch) | |
tree | fd0762f88dc54560d87fc1e523741d92228f6a9c /kernel/workqueue.c | |
parent | 4d8cfa4642f7d8fafa4d60f05dd34fe8c3b9fa45 (diff) | |
parent | b7ae6f31d8243ec684af16bc5c763eccdfabaec0 (diff) |
Merge branch 'topic/dma' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into asoc-pxa
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 48 |
1 files changed, 36 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f02c4a4a0c3c..7f5d4be22034 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2817,6 +2817,19 @@ already_gone: | |||
2817 | return false; | 2817 | return false; |
2818 | } | 2818 | } |
2819 | 2819 | ||
2820 | static bool __flush_work(struct work_struct *work) | ||
2821 | { | ||
2822 | struct wq_barrier barr; | ||
2823 | |||
2824 | if (start_flush_work(work, &barr)) { | ||
2825 | wait_for_completion(&barr.done); | ||
2826 | destroy_work_on_stack(&barr.work); | ||
2827 | return true; | ||
2828 | } else { | ||
2829 | return false; | ||
2830 | } | ||
2831 | } | ||
2832 | |||
2820 | /** | 2833 | /** |
2821 | * flush_work - wait for a work to finish executing the last queueing instance | 2834 | * flush_work - wait for a work to finish executing the last queueing instance |
2822 | * @work: the work to flush | 2835 | * @work: the work to flush |
@@ -2830,18 +2843,10 @@ already_gone: | |||
2830 | */ | 2843 | */ |
2831 | bool flush_work(struct work_struct *work) | 2844 | bool flush_work(struct work_struct *work) |
2832 | { | 2845 | { |
2833 | struct wq_barrier barr; | ||
2834 | |||
2835 | lock_map_acquire(&work->lockdep_map); | 2846 | lock_map_acquire(&work->lockdep_map); |
2836 | lock_map_release(&work->lockdep_map); | 2847 | lock_map_release(&work->lockdep_map); |
2837 | 2848 | ||
2838 | if (start_flush_work(work, &barr)) { | 2849 | return __flush_work(work); |
2839 | wait_for_completion(&barr.done); | ||
2840 | destroy_work_on_stack(&barr.work); | ||
2841 | return true; | ||
2842 | } else { | ||
2843 | return false; | ||
2844 | } | ||
2845 | } | 2850 | } |
2846 | EXPORT_SYMBOL_GPL(flush_work); | 2851 | EXPORT_SYMBOL_GPL(flush_work); |
2847 | 2852 | ||
@@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, | |||
3411 | { | 3416 | { |
3412 | to->nice = from->nice; | 3417 | to->nice = from->nice; |
3413 | cpumask_copy(to->cpumask, from->cpumask); | 3418 | cpumask_copy(to->cpumask, from->cpumask); |
3419 | /* | ||
3420 | * Unlike hash and equality test, this function doesn't ignore | ||
3421 | * ->no_numa as it is used for both pool and wq attrs. Instead, | ||
3422 | * get_unbound_pool() explicitly clears ->no_numa after copying. | ||
3423 | */ | ||
3424 | to->no_numa = from->no_numa; | ||
3414 | } | 3425 | } |
3415 | 3426 | ||
3416 | /* hash value of the content of @attr */ | 3427 | /* hash value of the content of @attr */ |
@@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | |||
3578 | lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ | 3589 | lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ |
3579 | copy_workqueue_attrs(pool->attrs, attrs); | 3590 | copy_workqueue_attrs(pool->attrs, attrs); |
3580 | 3591 | ||
3592 | /* | ||
3593 | * no_numa isn't a worker_pool attribute, always clear it. See | ||
3594 | * 'struct workqueue_attrs' comments for detail. | ||
3595 | */ | ||
3596 | pool->attrs->no_numa = false; | ||
3597 | |||
3581 | /* if cpumask is contained inside a NUMA node, we belong to that node */ | 3598 | /* if cpumask is contained inside a NUMA node, we belong to that node */ |
3582 | if (wq_numa_enabled) { | 3599 | if (wq_numa_enabled) { |
3583 | for_each_node(node) { | 3600 | for_each_node(node) { |
@@ -4644,7 +4661,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
4644 | * Workqueues should be brought up before normal priority CPU notifiers. | 4661 | * Workqueues should be brought up before normal priority CPU notifiers. |
4645 | * This will be registered high priority CPU notifier. | 4662 | * This will be registered high priority CPU notifier. |
4646 | */ | 4663 | */ |
4647 | static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | 4664 | static int workqueue_cpu_up_callback(struct notifier_block *nfb, |
4648 | unsigned long action, | 4665 | unsigned long action, |
4649 | void *hcpu) | 4666 | void *hcpu) |
4650 | { | 4667 | { |
@@ -4697,7 +4714,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
4697 | * Workqueues should be brought down after normal priority CPU notifiers. | 4714 | * Workqueues should be brought down after normal priority CPU notifiers. |
4698 | * This will be registered as low priority CPU notifier. | 4715 | * This will be registered as low priority CPU notifier. |
4699 | */ | 4716 | */ |
4700 | static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, | 4717 | static int workqueue_cpu_down_callback(struct notifier_block *nfb, |
4701 | unsigned long action, | 4718 | unsigned long action, |
4702 | void *hcpu) | 4719 | void *hcpu) |
4703 | { | 4720 | { |
@@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | |||
4756 | 4773 | ||
4757 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); | 4774 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
4758 | schedule_work_on(cpu, &wfc.work); | 4775 | schedule_work_on(cpu, &wfc.work); |
4759 | flush_work(&wfc.work); | 4776 | |
4777 | /* | ||
4778 | * The work item is on-stack and can't lead to deadlock through | ||
4779 | * flushing. Use __flush_work() to avoid spurious lockdep warnings | ||
4780 | * when work_on_cpu()s are nested. | ||
4781 | */ | ||
4782 | __flush_work(&wfc.work); | ||
4783 | |||
4760 | return wfc.ret; | 4784 | return wfc.ret; |
4761 | } | 4785 | } |
4762 | EXPORT_SYMBOL_GPL(work_on_cpu); | 4786 | EXPORT_SYMBOL_GPL(work_on_cpu); |