diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 06:06:36 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 06:06:36 -0400 |
commit | a18f22a968de17b29f2310cdb7ba69163e65ec15 (patch) | |
tree | a7d56d88fad5e444d7661484109758a2f436129e /kernel/workqueue.c | |
parent | a1c57e0fec53defe745e64417eacdbd3618c3e66 (diff) | |
parent | 798778b8653f64b7b2162ac70eca10367cff6ce8 (diff) |
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts:
arch/ia64/kernel/cyclone.c
arch/mips/kernel/i8253.c
arch/x86/kernel/i8253.c
Reason: Resolve conflicts so further cleanups do not conflict further
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ee6578b578ad..e3378e8d3a5c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -251,10 +251,12 @@ struct workqueue_struct *system_wq __read_mostly; | |||
251 | struct workqueue_struct *system_long_wq __read_mostly; | 251 | struct workqueue_struct *system_long_wq __read_mostly; |
252 | struct workqueue_struct *system_nrt_wq __read_mostly; | 252 | struct workqueue_struct *system_nrt_wq __read_mostly; |
253 | struct workqueue_struct *system_unbound_wq __read_mostly; | 253 | struct workqueue_struct *system_unbound_wq __read_mostly; |
254 | struct workqueue_struct *system_freezable_wq __read_mostly; | ||
254 | EXPORT_SYMBOL_GPL(system_wq); | 255 | EXPORT_SYMBOL_GPL(system_wq); |
255 | EXPORT_SYMBOL_GPL(system_long_wq); | 256 | EXPORT_SYMBOL_GPL(system_long_wq); |
256 | EXPORT_SYMBOL_GPL(system_nrt_wq); | 257 | EXPORT_SYMBOL_GPL(system_nrt_wq); |
257 | EXPORT_SYMBOL_GPL(system_unbound_wq); | 258 | EXPORT_SYMBOL_GPL(system_unbound_wq); |
259 | EXPORT_SYMBOL_GPL(system_freezable_wq); | ||
258 | 260 | ||
259 | #define CREATE_TRACE_POINTS | 261 | #define CREATE_TRACE_POINTS |
260 | #include <trace/events/workqueue.h> | 262 | #include <trace/events/workqueue.h> |
@@ -316,6 +318,11 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, | |||
316 | 318 | ||
317 | static struct debug_obj_descr work_debug_descr; | 319 | static struct debug_obj_descr work_debug_descr; |
318 | 320 | ||
321 | static void *work_debug_hint(void *addr) | ||
322 | { | ||
323 | return ((struct work_struct *) addr)->func; | ||
324 | } | ||
325 | |||
319 | /* | 326 | /* |
320 | * fixup_init is called when: | 327 | * fixup_init is called when: |
321 | * - an active object is initialized | 328 | * - an active object is initialized |
@@ -387,6 +394,7 @@ static int work_fixup_free(void *addr, enum debug_obj_state state) | |||
387 | 394 | ||
388 | static struct debug_obj_descr work_debug_descr = { | 395 | static struct debug_obj_descr work_debug_descr = { |
389 | .name = "work_struct", | 396 | .name = "work_struct", |
397 | .debug_hint = work_debug_hint, | ||
390 | .fixup_init = work_fixup_init, | 398 | .fixup_init = work_fixup_init, |
391 | .fixup_activate = work_fixup_activate, | 399 | .fixup_activate = work_fixup_activate, |
392 | .fixup_free = work_fixup_free, | 400 | .fixup_free = work_fixup_free, |
@@ -1283,8 +1291,14 @@ __acquires(&gcwq->lock) | |||
1283 | return true; | 1291 | return true; |
1284 | spin_unlock_irq(&gcwq->lock); | 1292 | spin_unlock_irq(&gcwq->lock); |
1285 | 1293 | ||
1286 | /* CPU has come up inbetween, retry migration */ | 1294 | /* |
1295 | * We've raced with CPU hot[un]plug. Give it a breather | ||
1296 | * and retry migration. cond_resched() is required here; | ||
1297 | * otherwise, we might deadlock against cpu_stop trying to | ||
1298 | * bring down the CPU on non-preemptive kernel. | ||
1299 | */ | ||
1287 | cpu_relax(); | 1300 | cpu_relax(); |
1301 | cond_resched(); | ||
1288 | } | 1302 | } |
1289 | } | 1303 | } |
1290 | 1304 | ||
@@ -1358,8 +1372,10 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) | |||
1358 | worker->id = id; | 1372 | worker->id = id; |
1359 | 1373 | ||
1360 | if (!on_unbound_cpu) | 1374 | if (!on_unbound_cpu) |
1361 | worker->task = kthread_create(worker_thread, worker, | 1375 | worker->task = kthread_create_on_node(worker_thread, |
1362 | "kworker/%u:%d", gcwq->cpu, id); | 1376 | worker, |
1377 | cpu_to_node(gcwq->cpu), | ||
1378 | "kworker/%u:%d", gcwq->cpu, id); | ||
1363 | else | 1379 | else |
1364 | worker->task = kthread_create(worker_thread, worker, | 1380 | worker->task = kthread_create(worker_thread, worker, |
1365 | "kworker/u:%d", id); | 1381 | "kworker/u:%d", id); |
@@ -3775,8 +3791,10 @@ static int __init init_workqueues(void) | |||
3775 | system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); | 3791 | system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); |
3776 | system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, | 3792 | system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, |
3777 | WQ_UNBOUND_MAX_ACTIVE); | 3793 | WQ_UNBOUND_MAX_ACTIVE); |
3794 | system_freezable_wq = alloc_workqueue("events_freezable", | ||
3795 | WQ_FREEZABLE, 0); | ||
3778 | BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || | 3796 | BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || |
3779 | !system_unbound_wq); | 3797 | !system_unbound_wq || !system_freezable_wq); |
3780 | return 0; | 3798 | return 0; |
3781 | } | 3799 | } |
3782 | early_initcall(init_workqueues); | 3800 | early_initcall(init_workqueues); |