diff options
| -rw-r--r-- | include/linux/workqueue.h | 1 | ||||
| -rw-r--r-- | kernel/workqueue.c | 32 |
2 files changed, 30 insertions, 3 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0a7814131e66..07cf5e5f91cb 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -225,6 +225,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
| 225 | enum { | 225 | enum { |
| 226 | WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ | 226 | WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ |
| 227 | WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ | 227 | WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ |
| 228 | WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ | ||
| 228 | }; | 229 | }; |
| 229 | 230 | ||
| 230 | extern struct workqueue_struct * | 231 | extern struct workqueue_struct * |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c68277c204ab..bce1074bdec1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 534 | 534 | ||
| 535 | debug_work_activate(work); | 535 | debug_work_activate(work); |
| 536 | 536 | ||
| 537 | /* determine gcwq to use */ | 537 | /* |
| 538 | * Determine gcwq to use. SINGLE_CPU is inherently | ||
| 539 | * NON_REENTRANT, so test it first. | ||
| 540 | */ | ||
| 538 | if (!(wq->flags & WQ_SINGLE_CPU)) { | 541 | if (!(wq->flags & WQ_SINGLE_CPU)) { |
| 539 | /* just use the requested cpu for multicpu workqueues */ | 542 | struct global_cwq *last_gcwq; |
| 543 | |||
| 544 | /* | ||
| 545 | * It's multi cpu. If @wq is non-reentrant and @work | ||
| 546 | * was previously on a different cpu, it might still | ||
| 547 | * be running there, in which case the work needs to | ||
| 548 | * be queued on that cpu to guarantee non-reentrance. | ||
| 549 | */ | ||
| 540 | gcwq = get_gcwq(cpu); | 550 | gcwq = get_gcwq(cpu); |
| 541 | spin_lock_irqsave(&gcwq->lock, flags); | 551 | if (wq->flags & WQ_NON_REENTRANT && |
| 552 | (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { | ||
| 553 | struct worker *worker; | ||
| 554 | |||
| 555 | spin_lock_irqsave(&last_gcwq->lock, flags); | ||
| 556 | |||
| 557 | worker = find_worker_executing_work(last_gcwq, work); | ||
| 558 | |||
| 559 | if (worker && worker->current_cwq->wq == wq) | ||
| 560 | gcwq = last_gcwq; | ||
| 561 | else { | ||
| 562 | /* meh... not running there, queue here */ | ||
| 563 | spin_unlock_irqrestore(&last_gcwq->lock, flags); | ||
| 564 | spin_lock_irqsave(&gcwq->lock, flags); | ||
| 565 | } | ||
| 566 | } else | ||
| 567 | spin_lock_irqsave(&gcwq->lock, flags); | ||
| 542 | } else { | 568 | } else { |
| 543 | unsigned int req_cpu = cpu; | 569 | unsigned int req_cpu = cpu; |
| 544 | 570 | ||
