aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorBjorn Helgaas <bhelgaas@google.com>2013-11-18 13:00:29 -0500
committerBjorn Helgaas <bhelgaas@google.com>2013-11-25 16:37:22 -0500
commit12997d1a999cd1b22e21a238c96780f2a55e4e13 (patch)
tree70470549b47f1a0d90861609c6ce8ca2d95e286e /kernel/workqueue.c
parent12c3156f10c5d8c5f1fb3f0bbdb8c1ddb1d1f65c (diff)
Revert "workqueue: allow work_on_cpu() to be called recursively"
This reverts commit c2fda509667b0fda4372a237f5a59ea4570b1627. c2fda509667b removed lockdep annotation from work_on_cpu() to work around the PCI path that calls work_on_cpu() from within a work_on_cpu() work item (PF driver .probe() method -> pci_enable_sriov() -> add VFs -> VF driver .probe method). 961da7fb6b22 ("PCI: Avoid unnecessary CPU switch when calling driver .probe() method) avoids that recursive work_on_cpu() use in a different way, so this revert restores the work_on_cpu() lockdep annotation. Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Acked-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c32
1 files changed, 10 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 987293d03ebc..5690b8eabfbc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2840,19 +2840,6 @@ already_gone:
2840 return false; 2840 return false;
2841} 2841}
2842 2842
2843static bool __flush_work(struct work_struct *work)
2844{
2845 struct wq_barrier barr;
2846
2847 if (start_flush_work(work, &barr)) {
2848 wait_for_completion(&barr.done);
2849 destroy_work_on_stack(&barr.work);
2850 return true;
2851 } else {
2852 return false;
2853 }
2854}
2855
2856/** 2843/**
2857 * flush_work - wait for a work to finish executing the last queueing instance 2844 * flush_work - wait for a work to finish executing the last queueing instance
2858 * @work: the work to flush 2845 * @work: the work to flush
@@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work)
2866 */ 2853 */
2867bool flush_work(struct work_struct *work) 2854bool flush_work(struct work_struct *work)
2868{ 2855{
2856 struct wq_barrier barr;
2857
2869 lock_map_acquire(&work->lockdep_map); 2858 lock_map_acquire(&work->lockdep_map);
2870 lock_map_release(&work->lockdep_map); 2859 lock_map_release(&work->lockdep_map);
2871 2860
2872 return __flush_work(work); 2861 if (start_flush_work(work, &barr)) {
2862 wait_for_completion(&barr.done);
2863 destroy_work_on_stack(&barr.work);
2864 return true;
2865 } else {
2866 return false;
2867 }
2873} 2868}
2874EXPORT_SYMBOL_GPL(flush_work); 2869EXPORT_SYMBOL_GPL(flush_work);
2875 2870
@@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4814 4809
4815 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4810 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4816 schedule_work_on(cpu, &wfc.work); 4811 schedule_work_on(cpu, &wfc.work);
4817 4812 flush_work(&wfc.work);
4818 /*
4819 * The work item is on-stack and can't lead to deadlock through
4820 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4821 * when work_on_cpu()s are nested.
4822 */
4823 __flush_work(&wfc.work);
4824
4825 return wfc.ret; 4813 return wfc.ret;
4826} 4814}
4827EXPORT_SYMBOL_GPL(work_on_cpu); 4815EXPORT_SYMBOL_GPL(work_on_cpu);