aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--kernel/workqueue.c16
2 files changed, 16 insertions, 1 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 006dcf7e808a..3f36d37ac5ba 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -232,6 +232,7 @@ enum {
232 WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ 232 WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */
233 WQ_RESCUER = 1 << 3, /* has an rescue worker */ 233 WQ_RESCUER = 1 << 3, /* has an rescue worker */
234 WQ_HIGHPRI = 1 << 4, /* high priority */ 234 WQ_HIGHPRI = 1 << 4, /* high priority */
235 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
235 236
236 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 237 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
237 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 238 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5775717288d5..6fa847c5c5e9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -52,8 +52,10 @@ enum {
52 WORKER_PREP = 1 << 3, /* preparing to run works */ 52 WORKER_PREP = 1 << 3, /* preparing to run works */
53 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ 53 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
54 WORKER_REBIND = 1 << 5, /* mom is home, come back */ 54 WORKER_REBIND = 1 << 5, /* mom is home, come back */
55 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
55 56
56 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND, 57 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
58 WORKER_CPU_INTENSIVE,
57 59
58 /* gcwq->trustee_state */ 60 /* gcwq->trustee_state */
59 TRUSTEE_START = 0, /* start */ 61 TRUSTEE_START = 0, /* start */
@@ -1641,6 +1643,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1641 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1643 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1642 struct global_cwq *gcwq = cwq->gcwq; 1644 struct global_cwq *gcwq = cwq->gcwq;
1643 struct hlist_head *bwh = busy_worker_head(gcwq, work); 1645 struct hlist_head *bwh = busy_worker_head(gcwq, work);
1646 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1644 work_func_t f = work->func; 1647 work_func_t f = work->func;
1645 int work_color; 1648 int work_color;
1646 struct worker *collision; 1649 struct worker *collision;
@@ -1692,6 +1695,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1692 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; 1695 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1693 } 1696 }
1694 1697
1698 /*
1699 * CPU intensive works don't participate in concurrency
1700 * management. They're the scheduler's responsibility.
1701 */
1702 if (unlikely(cpu_intensive))
1703 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1704
1695 spin_unlock_irq(&gcwq->lock); 1705 spin_unlock_irq(&gcwq->lock);
1696 1706
1697 work_clear_pending(work); 1707 work_clear_pending(work);
@@ -1713,6 +1723,10 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1713 1723
1714 spin_lock_irq(&gcwq->lock); 1724 spin_lock_irq(&gcwq->lock);
1715 1725
1726 /* clear cpu intensive status */
1727 if (unlikely(cpu_intensive))
1728 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1729
1716 /* we're done with it, release */ 1730 /* we're done with it, release */
1717 hlist_del_init(&worker->hentry); 1731 hlist_del_init(&worker->hentry);
1718 worker->current_work = NULL; 1732 worker->current_work = NULL;