aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 04:07:13 -0400
committerTejun Heo <tj@kernel.org>2010-06-29 04:07:13 -0400
commit18aa9effad4adb2c1efe123af4eb24fec9f59b30 (patch)
tree76975b37ae0ef1ebf460aaab68359ed7a12fe37a /kernel
parent7a22ad757ec75186ad43a5b4670fa7423ee8f480 (diff)
workqueue: implement WQ_NON_REENTRANT
With gcwq managing all the workers and work->data pointing to the last gcwq it was on, non-reentrance can be easily implemented by checking whether the work is still running on the previous gcwq on queueing. Implement it. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c68277c204ab..bce1074bdec1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
534 534
535 debug_work_activate(work); 535 debug_work_activate(work);
536 536
537 /* determine gcwq to use */ 537 /*
538 * Determine gcwq to use. SINGLE_CPU is inherently
539 * NON_REENTRANT, so test it first.
540 */
538 if (!(wq->flags & WQ_SINGLE_CPU)) { 541 if (!(wq->flags & WQ_SINGLE_CPU)) {
539 /* just use the requested cpu for multicpu workqueues */ 542 struct global_cwq *last_gcwq;
543
544 /*
545 * It's multi cpu. If @wq is non-reentrant and @work
546 * was previously on a different cpu, it might still
547 * be running there, in which case the work needs to
548 * be queued on that cpu to guarantee non-reentrance.
549 */
540 gcwq = get_gcwq(cpu); 550 gcwq = get_gcwq(cpu);
541 spin_lock_irqsave(&gcwq->lock, flags); 551 if (wq->flags & WQ_NON_REENTRANT &&
552 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
553 struct worker *worker;
554
555 spin_lock_irqsave(&last_gcwq->lock, flags);
556
557 worker = find_worker_executing_work(last_gcwq, work);
558
559 if (worker && worker->current_cwq->wq == wq)
560 gcwq = last_gcwq;
561 else {
562 /* meh... not running there, queue here */
563 spin_unlock_irqrestore(&last_gcwq->lock, flags);
564 spin_lock_irqsave(&gcwq->lock, flags);
565 }
566 } else
567 spin_lock_irqsave(&gcwq->lock, flags);
542 } else { 568 } else {
543 unsigned int req_cpu = cpu; 569 unsigned int req_cpu = cpu;
544 570