aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c32
1 files changed, 10 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 987293d03ebc..5690b8eabfbc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2840,19 +2840,6 @@ already_gone:
2840 return false; 2840 return false;
2841} 2841}
2842 2842
2843static bool __flush_work(struct work_struct *work)
2844{
2845 struct wq_barrier barr;
2846
2847 if (start_flush_work(work, &barr)) {
2848 wait_for_completion(&barr.done);
2849 destroy_work_on_stack(&barr.work);
2850 return true;
2851 } else {
2852 return false;
2853 }
2854}
2855
2856/** 2843/**
2857 * flush_work - wait for a work to finish executing the last queueing instance 2844 * flush_work - wait for a work to finish executing the last queueing instance
2858 * @work: the work to flush 2845 * @work: the work to flush
@@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work)
2866 */ 2853 */
2867bool flush_work(struct work_struct *work) 2854bool flush_work(struct work_struct *work)
2868{ 2855{
2856 struct wq_barrier barr;
2857
2869 lock_map_acquire(&work->lockdep_map); 2858 lock_map_acquire(&work->lockdep_map);
2870 lock_map_release(&work->lockdep_map); 2859 lock_map_release(&work->lockdep_map);
2871 2860
2872 return __flush_work(work); 2861 if (start_flush_work(work, &barr)) {
2862 wait_for_completion(&barr.done);
2863 destroy_work_on_stack(&barr.work);
2864 return true;
2865 } else {
2866 return false;
2867 }
2873} 2868}
2874EXPORT_SYMBOL_GPL(flush_work); 2869EXPORT_SYMBOL_GPL(flush_work);
2875 2870
@@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4814 4809
4815 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4810 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4816 schedule_work_on(cpu, &wfc.work); 4811 schedule_work_on(cpu, &wfc.work);
4817 4812 flush_work(&wfc.work);
4818 /*
4819 * The work item is on-stack and can't lead to deadlock through
4820 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4821 * when work_on_cpu()s are nested.
4822 */
4823 __flush_work(&wfc.work);
4824
4825 return wfc.ret; 4813 return wfc.ret;
4826} 4814}
4827EXPORT_SYMBOL_GPL(work_on_cpu); 4815EXPORT_SYMBOL_GPL(work_on_cpu);