aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2009-10-14 00:22:47 -0400
committerAndi Kleen <ak@linux.intel.com>2009-10-19 01:29:22 -0400
commit65a64464349883891e21e74af16c05d6e1eeb4e9 (patch)
tree7b4744f01840c337506dbb24debe5e50ee76186e /kernel
parent5d5429af066b8896e903d829ac143711ed2c25f2 (diff)
HWPOISON: Allow schedule_on_each_cpu() from keventd
Right now when calling schedule_on_each_cpu() from keventd there is a deadlock because it tries to schedule a work item on the current CPU too. This happens via lru_add_drain_all() in hwpoison. Just call the function for the current CPU in this case. This is actually faster too. Debugging with Fengguang Wu & Max Asbock Signed-off-by: Andi Kleen <ak@linux.intel.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index addfe2df93b1..f61a2fecf281 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -667,21 +667,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
667int schedule_on_each_cpu(work_func_t func) 667int schedule_on_each_cpu(work_func_t func)
668{ 668{
669 int cpu; 669 int cpu;
670 int orig = -1;
670 struct work_struct *works; 671 struct work_struct *works;
671 672
672 works = alloc_percpu(struct work_struct); 673 works = alloc_percpu(struct work_struct);
673 if (!works) 674 if (!works)
674 return -ENOMEM; 675 return -ENOMEM;
675 676
677 /*
678 * when running in keventd don't schedule a work item on itself.
679 * Can just call directly because the work queue is already bound.
680 * This also is faster.
681 * Make this a generic parameter for other workqueues?
682 */
683 if (current_is_keventd()) {
684 orig = raw_smp_processor_id();
685 INIT_WORK(per_cpu_ptr(works, orig), func);
686 func(per_cpu_ptr(works, orig));
687 }
688
676 get_online_cpus(); 689 get_online_cpus();
677 for_each_online_cpu(cpu) { 690 for_each_online_cpu(cpu) {
678 struct work_struct *work = per_cpu_ptr(works, cpu); 691 struct work_struct *work = per_cpu_ptr(works, cpu);
679 692
693 if (cpu == orig)
694 continue;
680 INIT_WORK(work, func); 695 INIT_WORK(work, func);
681 schedule_work_on(cpu, work); 696 schedule_work_on(cpu, work);
682 } 697 }
683 for_each_online_cpu(cpu) 698 for_each_online_cpu(cpu) {
684 flush_work(per_cpu_ptr(works, cpu)); 699 if (cpu != orig)
700 flush_work(per_cpu_ptr(works, cpu));
701 }
685 put_online_cpus(); 702 put_online_cpus();
686 free_percpu(works); 703 free_percpu(works);
687 return 0; 704 return 0;