aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c41
1 files changed, 11 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3003ecad08f4..b6b966ce1451 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -49,8 +49,6 @@ struct cpu_workqueue_struct {
49 49
50 struct workqueue_struct *wq; 50 struct workqueue_struct *wq;
51 struct task_struct *thread; 51 struct task_struct *thread;
52
53 int run_depth; /* Detect run_workqueue() recursion depth */
54} ____cacheline_aligned; 52} ____cacheline_aligned;
55 53
56/* 54/*
@@ -269,13 +267,6 @@ DEFINE_TRACE(workqueue_execution);
269static void run_workqueue(struct cpu_workqueue_struct *cwq) 267static void run_workqueue(struct cpu_workqueue_struct *cwq)
270{ 268{
271 spin_lock_irq(&cwq->lock); 269 spin_lock_irq(&cwq->lock);
272 cwq->run_depth++;
273 if (cwq->run_depth > 3) {
274 /* morton gets to eat his hat */
275 printk("%s: recursion depth exceeded: %d\n",
276 __func__, cwq->run_depth);
277 dump_stack();
278 }
279 while (!list_empty(&cwq->worklist)) { 270 while (!list_empty(&cwq->worklist)) {
280 struct work_struct *work = list_entry(cwq->worklist.next, 271 struct work_struct *work = list_entry(cwq->worklist.next,
281 struct work_struct, entry); 272 struct work_struct, entry);
@@ -318,7 +309,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
318 spin_lock_irq(&cwq->lock); 309 spin_lock_irq(&cwq->lock);
319 cwq->current_work = NULL; 310 cwq->current_work = NULL;
320 } 311 }
321 cwq->run_depth--;
322 spin_unlock_irq(&cwq->lock); 312 spin_unlock_irq(&cwq->lock);
323} 313}
324 314
@@ -375,29 +365,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
375 365
376static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 366static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
377{ 367{
378 int active; 368 int active = 0;
369 struct wq_barrier barr;
379 370
380 if (cwq->thread == current) { 371 WARN_ON(cwq->thread == current);
381 /*
382 * Probably keventd trying to flush its own queue. So simply run
383 * it by hand rather than deadlocking.
384 */
385 run_workqueue(cwq);
386 active = 1;
387 } else {
388 struct wq_barrier barr;
389 372
390 active = 0; 373 spin_lock_irq(&cwq->lock);
391 spin_lock_irq(&cwq->lock); 374 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
392 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 375 insert_wq_barrier(cwq, &barr, &cwq->worklist);
393 insert_wq_barrier(cwq, &barr, &cwq->worklist); 376 active = 1;
394 active = 1;
395 }
396 spin_unlock_irq(&cwq->lock);
397
398 if (active)
399 wait_for_completion(&barr.done);
400 } 377 }
378 spin_unlock_irq(&cwq->lock);
379
380 if (active)
381 wait_for_completion(&barr.done);
401 382
402 return active; 383 return active;
403} 384}