diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 41 |
1 files changed, 11 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9aedd9fd825..32f8e0d2bf5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -48,8 +48,6 @@ struct cpu_workqueue_struct { | |||
| 48 | 48 | ||
| 49 | struct workqueue_struct *wq; | 49 | struct workqueue_struct *wq; |
| 50 | struct task_struct *thread; | 50 | struct task_struct *thread; |
| 51 | |||
| 52 | int run_depth; /* Detect run_workqueue() recursion depth */ | ||
| 53 | } ____cacheline_aligned; | 51 | } ____cacheline_aligned; |
| 54 | 52 | ||
| 55 | /* | 53 | /* |
| @@ -262,13 +260,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); | |||
| 262 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 260 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
| 263 | { | 261 | { |
| 264 | spin_lock_irq(&cwq->lock); | 262 | spin_lock_irq(&cwq->lock); |
| 265 | cwq->run_depth++; | ||
| 266 | if (cwq->run_depth > 3) { | ||
| 267 | /* morton gets to eat his hat */ | ||
| 268 | printk("%s: recursion depth exceeded: %d\n", | ||
| 269 | __func__, cwq->run_depth); | ||
| 270 | dump_stack(); | ||
| 271 | } | ||
| 272 | while (!list_empty(&cwq->worklist)) { | 263 | while (!list_empty(&cwq->worklist)) { |
| 273 | struct work_struct *work = list_entry(cwq->worklist.next, | 264 | struct work_struct *work = list_entry(cwq->worklist.next, |
| 274 | struct work_struct, entry); | 265 | struct work_struct, entry); |
| @@ -311,7 +302,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 311 | spin_lock_irq(&cwq->lock); | 302 | spin_lock_irq(&cwq->lock); |
| 312 | cwq->current_work = NULL; | 303 | cwq->current_work = NULL; |
| 313 | } | 304 | } |
| 314 | cwq->run_depth--; | ||
| 315 | spin_unlock_irq(&cwq->lock); | 305 | spin_unlock_irq(&cwq->lock); |
| 316 | } | 306 | } |
| 317 | 307 | ||
| @@ -368,29 +358,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | |||
| 368 | 358 | ||
| 369 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 359 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
| 370 | { | 360 | { |
| 371 | int active; | 361 | int active = 0; |
| 362 | struct wq_barrier barr; | ||
| 372 | 363 | ||
| 373 | if (cwq->thread == current) { | 364 | WARN_ON(cwq->thread == current); |
| 374 | /* | ||
| 375 | * Probably keventd trying to flush its own queue. So simply run | ||
| 376 | * it by hand rather than deadlocking. | ||
| 377 | */ | ||
| 378 | run_workqueue(cwq); | ||
| 379 | active = 1; | ||
| 380 | } else { | ||
| 381 | struct wq_barrier barr; | ||
| 382 | 365 | ||
| 383 | active = 0; | 366 | spin_lock_irq(&cwq->lock); |
| 384 | spin_lock_irq(&cwq->lock); | 367 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
| 385 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 368 | insert_wq_barrier(cwq, &barr, &cwq->worklist); |
| 386 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | 369 | active = 1; |
| 387 | active = 1; | ||
| 388 | } | ||
| 389 | spin_unlock_irq(&cwq->lock); | ||
| 390 | |||
| 391 | if (active) | ||
| 392 | wait_for_completion(&barr.done); | ||
| 393 | } | 370 | } |
| 371 | spin_unlock_irq(&cwq->lock); | ||
| 372 | |||
| 373 | if (active) | ||
| 374 | wait_for_completion(&barr.done); | ||
| 394 | 375 | ||
| 395 | return active; | 376 | return active; |
| 396 | } | 377 | } |
