diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 47 |
1 files changed, 14 insertions, 33 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e53ee18ef431..b6b966ce1451 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -49,8 +49,6 @@ struct cpu_workqueue_struct { | |||
| 49 | 49 | ||
| 50 | struct workqueue_struct *wq; | 50 | struct workqueue_struct *wq; |
| 51 | struct task_struct *thread; | 51 | struct task_struct *thread; |
| 52 | |||
| 53 | int run_depth; /* Detect run_workqueue() recursion depth */ | ||
| 54 | } ____cacheline_aligned; | 52 | } ____cacheline_aligned; |
| 55 | 53 | ||
| 56 | /* | 54 | /* |
| @@ -269,13 +267,6 @@ DEFINE_TRACE(workqueue_execution); | |||
| 269 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 267 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
| 270 | { | 268 | { |
| 271 | spin_lock_irq(&cwq->lock); | 269 | spin_lock_irq(&cwq->lock); |
| 272 | cwq->run_depth++; | ||
| 273 | if (cwq->run_depth > 3) { | ||
| 274 | /* morton gets to eat his hat */ | ||
| 275 | printk("%s: recursion depth exceeded: %d\n", | ||
| 276 | __func__, cwq->run_depth); | ||
| 277 | dump_stack(); | ||
| 278 | } | ||
| 279 | while (!list_empty(&cwq->worklist)) { | 270 | while (!list_empty(&cwq->worklist)) { |
| 280 | struct work_struct *work = list_entry(cwq->worklist.next, | 271 | struct work_struct *work = list_entry(cwq->worklist.next, |
| 281 | struct work_struct, entry); | 272 | struct work_struct, entry); |
| @@ -318,7 +309,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 318 | spin_lock_irq(&cwq->lock); | 309 | spin_lock_irq(&cwq->lock); |
| 319 | cwq->current_work = NULL; | 310 | cwq->current_work = NULL; |
| 320 | } | 311 | } |
| 321 | cwq->run_depth--; | ||
| 322 | spin_unlock_irq(&cwq->lock); | 312 | spin_unlock_irq(&cwq->lock); |
| 323 | } | 313 | } |
| 324 | 314 | ||
| @@ -375,29 +365,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | |||
| 375 | 365 | ||
| 376 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 366 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
| 377 | { | 367 | { |
| 378 | int active; | 368 | int active = 0; |
| 369 | struct wq_barrier barr; | ||
| 379 | 370 | ||
| 380 | if (cwq->thread == current) { | 371 | WARN_ON(cwq->thread == current); |
| 381 | /* | ||
| 382 | * Probably keventd trying to flush its own queue. So simply run | ||
| 383 | * it by hand rather than deadlocking. | ||
| 384 | */ | ||
| 385 | run_workqueue(cwq); | ||
| 386 | active = 1; | ||
| 387 | } else { | ||
| 388 | struct wq_barrier barr; | ||
| 389 | 372 | ||
| 390 | active = 0; | 373 | spin_lock_irq(&cwq->lock); |
| 391 | spin_lock_irq(&cwq->lock); | 374 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
| 392 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 375 | insert_wq_barrier(cwq, &barr, &cwq->worklist); |
| 393 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | 376 | active = 1; |
| 394 | active = 1; | ||
| 395 | } | ||
| 396 | spin_unlock_irq(&cwq->lock); | ||
| 397 | |||
| 398 | if (active) | ||
| 399 | wait_for_completion(&barr.done); | ||
| 400 | } | 377 | } |
| 378 | spin_unlock_irq(&cwq->lock); | ||
| 379 | |||
| 380 | if (active) | ||
| 381 | wait_for_completion(&barr.done); | ||
| 401 | 382 | ||
| 402 | return active; | 383 | return active; |
| 403 | } | 384 | } |
| @@ -423,7 +404,7 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
| 423 | might_sleep(); | 404 | might_sleep(); |
| 424 | lock_map_acquire(&wq->lockdep_map); | 405 | lock_map_acquire(&wq->lockdep_map); |
| 425 | lock_map_release(&wq->lockdep_map); | 406 | lock_map_release(&wq->lockdep_map); |
| 426 | for_each_cpu_mask_nr(cpu, *cpu_map) | 407 | for_each_cpu(cpu, cpu_map) |
| 427 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 408 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
| 428 | } | 409 | } |
| 429 | EXPORT_SYMBOL_GPL(flush_workqueue); | 410 | EXPORT_SYMBOL_GPL(flush_workqueue); |
| @@ -554,7 +535,7 @@ static void wait_on_work(struct work_struct *work) | |||
| 554 | wq = cwq->wq; | 535 | wq = cwq->wq; |
| 555 | cpu_map = wq_cpu_map(wq); | 536 | cpu_map = wq_cpu_map(wq); |
| 556 | 537 | ||
| 557 | for_each_cpu_mask_nr(cpu, *cpu_map) | 538 | for_each_cpu(cpu, cpu_map) |
| 558 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 539 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
| 559 | } | 540 | } |
| 560 | 541 | ||
| @@ -925,7 +906,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 925 | list_del(&wq->list); | 906 | list_del(&wq->list); |
| 926 | spin_unlock(&workqueue_lock); | 907 | spin_unlock(&workqueue_lock); |
| 927 | 908 | ||
| 928 | for_each_cpu_mask_nr(cpu, *cpu_map) | 909 | for_each_cpu(cpu, cpu_map) |
| 929 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 910 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
| 930 | cpu_maps_update_done(); | 911 | cpu_maps_update_done(); |
| 931 | 912 | ||
