aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 04:07:15 -0400
committerTejun Heo <tj@kernel.org>2010-06-29 04:07:15 -0400
commitfb0e7beb5c1b6fb4da786ba709d7138373d5fb22 (patch)
treee222b2238ed691ced6eaeb47733f7e4c2b9a3c37 /kernel/workqueue.c
parent649027d73a6309ac34dc2886362e662bd73456dc (diff)
workqueue: implement cpu intensive workqueue
This patch implements cpu intensive workqueue which can be specified with WQ_CPU_INTENSIVE flag on creation. Works queued to a cpu intensive workqueue don't participate in concurrency management. IOW, it doesn't contribute to gcwq->nr_running and thus doesn't delay excution of other works. Note that although cpu intensive works won't delay other works, they can be delayed by other works. Combine with WQ_HIGHPRI to avoid being delayed by other works too. As the name suggests this is useful when using workqueue for cpu intensive works. Workers executing cpu intensive works are not considered for workqueue concurrency management and left for the scheduler to manage. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5775717288d5..6fa847c5c5e9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -52,8 +52,10 @@ enum {
52 WORKER_PREP = 1 << 3, /* preparing to run works */ 52 WORKER_PREP = 1 << 3, /* preparing to run works */
53 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ 53 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
54 WORKER_REBIND = 1 << 5, /* mom is home, come back */ 54 WORKER_REBIND = 1 << 5, /* mom is home, come back */
55 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
55 56
56 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND, 57 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
58 WORKER_CPU_INTENSIVE,
57 59
58 /* gcwq->trustee_state */ 60 /* gcwq->trustee_state */
59 TRUSTEE_START = 0, /* start */ 61 TRUSTEE_START = 0, /* start */
@@ -1641,6 +1643,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1641 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1643 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1642 struct global_cwq *gcwq = cwq->gcwq; 1644 struct global_cwq *gcwq = cwq->gcwq;
1643 struct hlist_head *bwh = busy_worker_head(gcwq, work); 1645 struct hlist_head *bwh = busy_worker_head(gcwq, work);
1646 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1644 work_func_t f = work->func; 1647 work_func_t f = work->func;
1645 int work_color; 1648 int work_color;
1646 struct worker *collision; 1649 struct worker *collision;
@@ -1692,6 +1695,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1692 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; 1695 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1693 } 1696 }
1694 1697
1698 /*
1699 * CPU intensive works don't participate in concurrency
1700 * management. They're the scheduler's responsibility.
1701 */
1702 if (unlikely(cpu_intensive))
1703 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1704
1695 spin_unlock_irq(&gcwq->lock); 1705 spin_unlock_irq(&gcwq->lock);
1696 1706
1697 work_clear_pending(work); 1707 work_clear_pending(work);
@@ -1713,6 +1723,10 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1713 1723
1714 spin_lock_irq(&gcwq->lock); 1724 spin_lock_irq(&gcwq->lock);
1715 1725
1726 /* clear cpu intensive status */
1727 if (unlikely(cpu_intensive))
1728 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1729
1716 /* we're done with it, release */ 1730 /* we're done with it, release */
1717 hlist_del_init(&worker->hentry); 1731 hlist_del_init(&worker->hentry);
1718 worker->current_work = NULL; 1732 worker->current_work = NULL;