aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-01-08 04:00:43 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:12:40 -0500
commit15316ba81aee6775d6079fb46c66c801989e7d10 (patch)
treec6190bdcc6e15fd8b5657a5932313e296b475577
parent48db57f8ff10eb09ab887ccb6150b0da0c7be24e (diff)
[PATCH] add schedule_on_each_cpu()
swap migration's isolate_lru_page() currently uses an IPI to notify other processors that the lru caches need to be drained if the page cannot be found on the LRU. The IPI interrupt may interrupt a processor that is just processing lru requests and cause a race condition. This patch introduces a new function run_on_each_cpu() that uses the keventd() to run the LRU draining on each processor. Processors disable preemption when dealing the LRU caches (these are per processor) and thus executing LRU draining from another process is safe. Thanks to Lee Schermerhorn <lee.schermerhorn@hp.com> for finding this race condition. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--kernel/workqueue.c19
2 files changed, 20 insertions, 0 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index ac39d04d027c..86b111300231 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -65,6 +65,7 @@ extern int FASTCALL(schedule_work(struct work_struct *work));
65extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); 65extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
66 66
67extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); 67extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
68extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
68extern void flush_scheduled_work(void); 69extern void flush_scheduled_work(void);
69extern int current_is_keventd(void); 70extern int current_is_keventd(void);
70extern int keventd_up(void); 71extern int keventd_up(void);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2bd5aee1c736..62d47220696a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -419,6 +419,25 @@ int schedule_delayed_work_on(int cpu,
419 return ret; 419 return ret;
420} 420}
421 421
422int schedule_on_each_cpu(void (*func) (void *info), void *info)
423{
424 int cpu;
425 struct work_struct *work;
426
427 work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
428
429 if (!work)
430 return -ENOMEM;
431 for_each_online_cpu(cpu) {
432 INIT_WORK(work + cpu, func, info);
433 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
434 work + cpu);
435 }
436 flush_workqueue(keventd_wq);
437 kfree(work);
438 return 0;
439}
440
422void flush_scheduled_work(void) 441void flush_scheduled_work(void)
423{ 442{
424 flush_workqueue(keventd_wq); 443 flush_workqueue(keventd_wq);