diff options
author | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
commit | 478c6a43fcbc6c11609f8cee7c7b57223907754f (patch) | |
tree | a7f7952099da60d33032aed6de9c0c56c9f8779e /kernel/workqueue.c | |
parent | 8a3f257c704e02aee9869decd069a806b45be3f1 (diff) | |
parent | 6bb597507f9839b13498781e481f5458aea33620 (diff) |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/cpu/cpufreq/longhaul.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 47 |
1 files changed, 14 insertions, 33 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1f0c509b40d..32f8e0d2bf5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -48,8 +48,6 @@ struct cpu_workqueue_struct { | |||
48 | 48 | ||
49 | struct workqueue_struct *wq; | 49 | struct workqueue_struct *wq; |
50 | struct task_struct *thread; | 50 | struct task_struct *thread; |
51 | |||
52 | int run_depth; /* Detect run_workqueue() recursion depth */ | ||
53 | } ____cacheline_aligned; | 51 | } ____cacheline_aligned; |
54 | 52 | ||
55 | /* | 53 | /* |
@@ -262,13 +260,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); | |||
262 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 260 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
263 | { | 261 | { |
264 | spin_lock_irq(&cwq->lock); | 262 | spin_lock_irq(&cwq->lock); |
265 | cwq->run_depth++; | ||
266 | if (cwq->run_depth > 3) { | ||
267 | /* morton gets to eat his hat */ | ||
268 | printk("%s: recursion depth exceeded: %d\n", | ||
269 | __func__, cwq->run_depth); | ||
270 | dump_stack(); | ||
271 | } | ||
272 | while (!list_empty(&cwq->worklist)) { | 263 | while (!list_empty(&cwq->worklist)) { |
273 | struct work_struct *work = list_entry(cwq->worklist.next, | 264 | struct work_struct *work = list_entry(cwq->worklist.next, |
274 | struct work_struct, entry); | 265 | struct work_struct, entry); |
@@ -311,7 +302,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
311 | spin_lock_irq(&cwq->lock); | 302 | spin_lock_irq(&cwq->lock); |
312 | cwq->current_work = NULL; | 303 | cwq->current_work = NULL; |
313 | } | 304 | } |
314 | cwq->run_depth--; | ||
315 | spin_unlock_irq(&cwq->lock); | 305 | spin_unlock_irq(&cwq->lock); |
316 | } | 306 | } |
317 | 307 | ||
@@ -368,29 +358,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | |||
368 | 358 | ||
369 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 359 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
370 | { | 360 | { |
371 | int active; | 361 | int active = 0; |
362 | struct wq_barrier barr; | ||
372 | 363 | ||
373 | if (cwq->thread == current) { | 364 | WARN_ON(cwq->thread == current); |
374 | /* | ||
375 | * Probably keventd trying to flush its own queue. So simply run | ||
376 | * it by hand rather than deadlocking. | ||
377 | */ | ||
378 | run_workqueue(cwq); | ||
379 | active = 1; | ||
380 | } else { | ||
381 | struct wq_barrier barr; | ||
382 | 365 | ||
383 | active = 0; | 366 | spin_lock_irq(&cwq->lock); |
384 | spin_lock_irq(&cwq->lock); | 367 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
385 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 368 | insert_wq_barrier(cwq, &barr, &cwq->worklist); |
386 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | 369 | active = 1; |
387 | active = 1; | ||
388 | } | ||
389 | spin_unlock_irq(&cwq->lock); | ||
390 | |||
391 | if (active) | ||
392 | wait_for_completion(&barr.done); | ||
393 | } | 370 | } |
371 | spin_unlock_irq(&cwq->lock); | ||
372 | |||
373 | if (active) | ||
374 | wait_for_completion(&barr.done); | ||
394 | 375 | ||
395 | return active; | 376 | return active; |
396 | } | 377 | } |
@@ -416,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
416 | might_sleep(); | 397 | might_sleep(); |
417 | lock_map_acquire(&wq->lockdep_map); | 398 | lock_map_acquire(&wq->lockdep_map); |
418 | lock_map_release(&wq->lockdep_map); | 399 | lock_map_release(&wq->lockdep_map); |
419 | for_each_cpu_mask_nr(cpu, *cpu_map) | 400 | for_each_cpu(cpu, cpu_map) |
420 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
421 | } | 402 | } |
422 | EXPORT_SYMBOL_GPL(flush_workqueue); | 403 | EXPORT_SYMBOL_GPL(flush_workqueue); |
@@ -547,7 +528,7 @@ static void wait_on_work(struct work_struct *work) | |||
547 | wq = cwq->wq; | 528 | wq = cwq->wq; |
548 | cpu_map = wq_cpu_map(wq); | 529 | cpu_map = wq_cpu_map(wq); |
549 | 530 | ||
550 | for_each_cpu_mask_nr(cpu, *cpu_map) | 531 | for_each_cpu(cpu, cpu_map) |
551 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 532 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
552 | } | 533 | } |
553 | 534 | ||
@@ -911,7 +892,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
911 | list_del(&wq->list); | 892 | list_del(&wq->list); |
912 | spin_unlock(&workqueue_lock); | 893 | spin_unlock(&workqueue_lock); |
913 | 894 | ||
914 | for_each_cpu_mask_nr(cpu, *cpu_map) | 895 | for_each_cpu(cpu, cpu_map) |
915 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 896 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
916 | cpu_maps_update_done(); | 897 | cpu_maps_update_done(); |
917 | 898 | ||