aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-05-09 05:33:54 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:51 -0400
commit83c22520c51bf67529367e8237f95c03fe44e2da (patch)
treed9798e4f60551bdad0f7d8f1f1d5c69d7c4214c1 /kernel/workqueue.c
parentedab2516a6c1752e8e5e3d55727cabf12346e5df (diff)
flush_cpu_workqueue: don't flush an empty ->worklist
Now when we have ->current_work we can avoid adding a barrier and waiting for its completition when cwq's queue is empty. Note: this change is also useful if we change flush_workqueue() to also check the dead CPUs. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com> Cc: Gautham Shenoy <ego@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5176d51bcc2a..5ecf4984e382 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -404,12 +404,15 @@ static void wq_barrier_func(struct work_struct *work)
404 complete(&barr->done); 404 complete(&barr->done);
405} 405}
406 406
407static inline void init_wq_barrier(struct wq_barrier *barr) 407static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
408 struct wq_barrier *barr, int tail)
408{ 409{
409 INIT_WORK(&barr->work, wq_barrier_func); 410 INIT_WORK(&barr->work, wq_barrier_func);
410 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 411 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
411 412
412 init_completion(&barr->done); 413 init_completion(&barr->done);
414
415 insert_work(cwq, &barr->work, tail);
413} 416}
414 417
415static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 418static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
@@ -428,13 +431,20 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
428 preempt_disable(); 431 preempt_disable();
429 } else { 432 } else {
430 struct wq_barrier barr; 433 struct wq_barrier barr;
434 int active = 0;
431 435
432 init_wq_barrier(&barr); 436 spin_lock_irq(&cwq->lock);
433 __queue_work(cwq, &barr.work); 437 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
438 insert_wq_barrier(cwq, &barr, 1);
439 active = 1;
440 }
441 spin_unlock_irq(&cwq->lock);
434 442
435 preempt_enable(); /* Can no longer touch *cwq */ 443 if (active) {
436 wait_for_completion(&barr.done); 444 preempt_enable();
437 preempt_disable(); 445 wait_for_completion(&barr.done);
446 preempt_disable();
447 }
438 } 448 }
439} 449}
440 450
@@ -475,8 +485,7 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
475 485
476 spin_lock_irq(&cwq->lock); 486 spin_lock_irq(&cwq->lock);
477 if (unlikely(cwq->current_work == work)) { 487 if (unlikely(cwq->current_work == work)) {
478 init_wq_barrier(&barr); 488 insert_wq_barrier(cwq, &barr, 0);
479 insert_work(cwq, &barr.work, 0);
480 running = 1; 489 running = 1;
481 } 490 }
482 spin_unlock_irq(&cwq->lock); 491 spin_unlock_irq(&cwq->lock);