aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2005-10-30 18:01:59 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 20:37:18 -0500
commit89ada67917f516212452443a56b9fd3b65b74dc7 (patch)
tree00986fd653bf1973a70ae894f503b3b2352943b5 /kernel
parentd61780c0d384939ef31c46b47442854d5def4623 (diff)
[PATCH] Use alloc_percpu to allocate workqueues locally
This patch makes the workqueus use alloc_percpu instead of an array. The workqueues are placed on nodes local to each processor. The workqueue structure can grow to a significant size on a system with lots of processors if this patch is not applied. 64 bit architectures with all debugging features enabled and configured for 512 processors will not be able to boot without this patch. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 91bacb13a7e2..7cee222231bc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -12,6 +12,8 @@
12 * Andrew Morton <andrewm@uow.edu.au> 12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu> 14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
15 */ 17 */
16 18
17#include <linux/module.h> 19#include <linux/module.h>
@@ -57,7 +59,7 @@ struct cpu_workqueue_struct {
57 * per-CPU workqueues: 59 * per-CPU workqueues:
58 */ 60 */
59struct workqueue_struct { 61struct workqueue_struct {
60 struct cpu_workqueue_struct cpu_wq[NR_CPUS]; 62 struct cpu_workqueue_struct *cpu_wq;
61 const char *name; 63 const char *name;
62 struct list_head list; /* Empty if single thread */ 64 struct list_head list; /* Empty if single thread */
63}; 65};
@@ -102,7 +104,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
102 if (unlikely(is_single_threaded(wq))) 104 if (unlikely(is_single_threaded(wq)))
103 cpu = 0; 105 cpu = 0;
104 BUG_ON(!list_empty(&work->entry)); 106 BUG_ON(!list_empty(&work->entry));
105 __queue_work(wq->cpu_wq + cpu, work); 107 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
106 ret = 1; 108 ret = 1;
107 } 109 }
108 put_cpu(); 110 put_cpu();
@@ -118,7 +120,7 @@ static void delayed_work_timer_fn(unsigned long __data)
118 if (unlikely(is_single_threaded(wq))) 120 if (unlikely(is_single_threaded(wq)))
119 cpu = 0; 121 cpu = 0;
120 122
121 __queue_work(wq->cpu_wq + cpu, work); 123 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
122} 124}
123 125
124int fastcall queue_delayed_work(struct workqueue_struct *wq, 126int fastcall queue_delayed_work(struct workqueue_struct *wq,
@@ -265,13 +267,13 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
265 267
266 if (is_single_threaded(wq)) { 268 if (is_single_threaded(wq)) {
267 /* Always use cpu 0's area. */ 269 /* Always use cpu 0's area. */
268 flush_cpu_workqueue(wq->cpu_wq + 0); 270 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0));
269 } else { 271 } else {
270 int cpu; 272 int cpu;
271 273
272 lock_cpu_hotplug(); 274 lock_cpu_hotplug();
273 for_each_online_cpu(cpu) 275 for_each_online_cpu(cpu)
274 flush_cpu_workqueue(wq->cpu_wq + cpu); 276 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
275 unlock_cpu_hotplug(); 277 unlock_cpu_hotplug();
276 } 278 }
277} 279}
@@ -279,7 +281,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
279static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 281static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
280 int cpu) 282 int cpu)
281{ 283{
282 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu; 284 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
283 struct task_struct *p; 285 struct task_struct *p;
284 286
285 spin_lock_init(&cwq->lock); 287 spin_lock_init(&cwq->lock);
@@ -312,6 +314,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
312 if (!wq) 314 if (!wq)
313 return NULL; 315 return NULL;
314 316
317 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
315 wq->name = name; 318 wq->name = name;
316 /* We don't need the distraction of CPUs appearing and vanishing. */ 319 /* We don't need the distraction of CPUs appearing and vanishing. */
317 lock_cpu_hotplug(); 320 lock_cpu_hotplug();
@@ -353,7 +356,7 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
353 unsigned long flags; 356 unsigned long flags;
354 struct task_struct *p; 357 struct task_struct *p;
355 358
356 cwq = wq->cpu_wq + cpu; 359 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
357 spin_lock_irqsave(&cwq->lock, flags); 360 spin_lock_irqsave(&cwq->lock, flags);
358 p = cwq->thread; 361 p = cwq->thread;
359 cwq->thread = NULL; 362 cwq->thread = NULL;
@@ -380,6 +383,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
380 spin_unlock(&workqueue_lock); 383 spin_unlock(&workqueue_lock);
381 } 384 }
382 unlock_cpu_hotplug(); 385 unlock_cpu_hotplug();
386 free_percpu(wq->cpu_wq);
383 kfree(wq); 387 kfree(wq);
384} 388}
385 389
@@ -458,7 +462,7 @@ int current_is_keventd(void)
458 462
459 BUG_ON(!keventd_wq); 463 BUG_ON(!keventd_wq);
460 464
461 cwq = keventd_wq->cpu_wq + cpu; 465 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
462 if (current == cwq->thread) 466 if (current == cwq->thread)
463 ret = 1; 467 ret = 1;
464 468
@@ -470,7 +474,7 @@ int current_is_keventd(void)
470/* Take the work from this (downed) CPU. */ 474/* Take the work from this (downed) CPU. */
471static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 475static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
472{ 476{
473 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu; 477 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
474 LIST_HEAD(list); 478 LIST_HEAD(list);
475 struct work_struct *work; 479 struct work_struct *work;
476 480
@@ -481,7 +485,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
481 printk("Taking work for %s\n", wq->name); 485 printk("Taking work for %s\n", wq->name);
482 work = list_entry(list.next,struct work_struct,entry); 486 work = list_entry(list.next,struct work_struct,entry);
483 list_del(&work->entry); 487 list_del(&work->entry);
484 __queue_work(wq->cpu_wq + smp_processor_id(), work); 488 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
485 } 489 }
486 spin_unlock_irq(&cwq->lock); 490 spin_unlock_irq(&cwq->lock);
487} 491}
@@ -508,15 +512,18 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
508 case CPU_ONLINE: 512 case CPU_ONLINE:
509 /* Kick off worker threads. */ 513 /* Kick off worker threads. */
510 list_for_each_entry(wq, &workqueues, list) { 514 list_for_each_entry(wq, &workqueues, list) {
511 kthread_bind(wq->cpu_wq[hotcpu].thread, hotcpu); 515 struct cpu_workqueue_struct *cwq;
512 wake_up_process(wq->cpu_wq[hotcpu].thread); 516
517 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
518 kthread_bind(cwq->thread, hotcpu);
519 wake_up_process(cwq->thread);
513 } 520 }
514 break; 521 break;
515 522
516 case CPU_UP_CANCELED: 523 case CPU_UP_CANCELED:
517 list_for_each_entry(wq, &workqueues, list) { 524 list_for_each_entry(wq, &workqueues, list) {
518 /* Unbind so it can run. */ 525 /* Unbind so it can run. */
519 kthread_bind(wq->cpu_wq[hotcpu].thread, 526 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
520 smp_processor_id()); 527 smp_processor_id());
521 cleanup_workqueue_thread(wq, hotcpu); 528 cleanup_workqueue_thread(wq, hotcpu);
522 } 529 }