diff options
author | Mike Galbraith <umgwanakikbuti@gmail.com> | 2016-02-09 17:59:38 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2016-02-09 17:59:38 -0500 |
commit | ef557180447fa9a7a0affd3abb21ecceb4b5e125 (patch) | |
tree | b55840888c855b2c605df724a3b647ee82543622 /kernel | |
parent | 041bd12e272c53a35c54c13875839bcb98c999ce (diff) |
workqueue: schedule WORK_CPU_UNBOUND work on wq_unbound_cpumask CPUs
WORK_CPU_UNBOUND work items queued to a bound workqueue always run
locally. This is a good thing normally, but not when the user has
asked us to keep unbound work away from certain CPUs. Round robin
these to wq_unbound_cpumask CPUs instead, as perturbation avoidance
trumps performance.
tj: Cosmetic and comment changes. WARN_ON_ONCE() dropped from empty
(wq_unbound_cpumask AND cpu_online_mask). If we want that, it
should be done when config changes.
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 34 |
1 files changed, 32 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5e63d3b719ae..054774605d2f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -301,7 +301,11 @@ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ | |||
301 | static LIST_HEAD(workqueues); /* PR: list of all workqueues */ | 301 | static LIST_HEAD(workqueues); /* PR: list of all workqueues */ |
302 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ | 302 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ |
303 | 303 | ||
304 | static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */ | 304 | /* PL: allowable cpus for unbound wqs and work items */ |
305 | static cpumask_var_t wq_unbound_cpumask; | ||
306 | |||
307 | /* CPU where unbound work was last round robin scheduled from this CPU */ | ||
308 | static DEFINE_PER_CPU(int, wq_rr_cpu_last); | ||
305 | 309 | ||
306 | /* the per-cpu worker pools */ | 310 | /* the per-cpu worker pools */ |
307 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], | 311 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], |
@@ -1298,6 +1302,32 @@ static bool is_chained_work(struct workqueue_struct *wq) | |||
1298 | return worker && worker->current_pwq->wq == wq; | 1302 | return worker && worker->current_pwq->wq == wq; |
1299 | } | 1303 | } |
1300 | 1304 | ||
1305 | /* | ||
1306 | * When queueing an unbound work item to a wq, prefer local CPU if allowed | ||
1307 | * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to | ||
1308 | * avoid perturbing sensitive tasks. | ||
1309 | */ | ||
1310 | static int wq_select_unbound_cpu(int cpu) | ||
1311 | { | ||
1312 | int new_cpu; | ||
1313 | |||
1314 | if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) | ||
1315 | return cpu; | ||
1316 | if (cpumask_empty(wq_unbound_cpumask)) | ||
1317 | return cpu; | ||
1318 | |||
1319 | new_cpu = __this_cpu_read(wq_rr_cpu_last); | ||
1320 | new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); | ||
1321 | if (unlikely(new_cpu >= nr_cpu_ids)) { | ||
1322 | new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); | ||
1323 | if (unlikely(new_cpu >= nr_cpu_ids)) | ||
1324 | return cpu; | ||
1325 | } | ||
1326 | __this_cpu_write(wq_rr_cpu_last, new_cpu); | ||
1327 | |||
1328 | return new_cpu; | ||
1329 | } | ||
1330 | |||
1301 | static void __queue_work(int cpu, struct workqueue_struct *wq, | 1331 | static void __queue_work(int cpu, struct workqueue_struct *wq, |
1302 | struct work_struct *work) | 1332 | struct work_struct *work) |
1303 | { | 1333 | { |
@@ -1323,7 +1353,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, | |||
1323 | return; | 1353 | return; |
1324 | retry: | 1354 | retry: |
1325 | if (req_cpu == WORK_CPU_UNBOUND) | 1355 | if (req_cpu == WORK_CPU_UNBOUND) |
1326 | cpu = raw_smp_processor_id(); | 1356 | cpu = wq_select_unbound_cpu(raw_smp_processor_id()); |
1327 | 1357 | ||
1328 | /* pwq which will be used unless @work is executing elsewhere */ | 1358 | /* pwq which will be used unless @work is executing elsewhere */ |
1329 | if (!(wq->flags & WQ_UNBOUND)) | 1359 | if (!(wq->flags & WQ_UNBOUND)) |