diff options
author | Daniel Jordan <daniel.m.jordan@oracle.com> | 2019-09-05 21:40:27 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2019-09-13 07:15:40 -0400 |
commit | 45d153c08bc73c8ced640dc20d8f2b749a6cb0d0 (patch) | |
tree | 52e892c2a0ed227a9da6a081fd7b842bc69fa5b4 /kernel/padata.c | |
parent | cc491d8e6486c56e07e60d9992cd56f63dc9fd6c (diff) |
padata: use separate workqueues for parallel and serial work
padata currently uses one per-CPU workqueue per instance for all work.
Prepare for running parallel jobs on an unbound workqueue by introducing
dedicated workqueues for parallel and serial work.
Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'kernel/padata.c')
-rw-r--r-- | kernel/padata.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/padata.c b/kernel/padata.c index 8a362923c488..669f5d53d357 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -152,7 +152,7 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
152 | list_add_tail(&padata->list, &queue->parallel.list); | 152 | list_add_tail(&padata->list, &queue->parallel.list); |
153 | spin_unlock(&queue->parallel.lock); | 153 | spin_unlock(&queue->parallel.lock); |
154 | 154 | ||
155 | queue_work_on(target_cpu, pinst->wq, &queue->work); | 155 | queue_work_on(target_cpu, pinst->parallel_wq, &queue->work); |
156 | 156 | ||
157 | out: | 157 | out: |
158 | rcu_read_unlock_bh(); | 158 | rcu_read_unlock_bh(); |
@@ -261,7 +261,7 @@ static void padata_reorder(struct parallel_data *pd) | |||
261 | list_add_tail(&padata->list, &squeue->serial.list); | 261 | list_add_tail(&padata->list, &squeue->serial.list); |
262 | spin_unlock(&squeue->serial.lock); | 262 | spin_unlock(&squeue->serial.lock); |
263 | 263 | ||
264 | queue_work_on(cb_cpu, pinst->wq, &squeue->work); | 264 | queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); |
265 | } | 265 | } |
266 | 266 | ||
267 | spin_unlock_bh(&pd->lock); | 267 | spin_unlock_bh(&pd->lock); |
@@ -278,7 +278,7 @@ static void padata_reorder(struct parallel_data *pd) | |||
278 | 278 | ||
279 | next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); | 279 | next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); |
280 | if (!list_empty(&next_queue->reorder.list)) | 280 | if (!list_empty(&next_queue->reorder.list)) |
281 | queue_work(pinst->wq, &pd->reorder_work); | 281 | queue_work(pinst->serial_wq, &pd->reorder_work); |
282 | } | 282 | } |
283 | 283 | ||
284 | static void invoke_padata_reorder(struct work_struct *work) | 284 | static void invoke_padata_reorder(struct work_struct *work) |
@@ -818,7 +818,8 @@ static void __padata_free(struct padata_instance *pinst) | |||
818 | padata_free_pd(pinst->pd); | 818 | padata_free_pd(pinst->pd); |
819 | free_cpumask_var(pinst->cpumask.pcpu); | 819 | free_cpumask_var(pinst->cpumask.pcpu); |
820 | free_cpumask_var(pinst->cpumask.cbcpu); | 820 | free_cpumask_var(pinst->cpumask.cbcpu); |
821 | destroy_workqueue(pinst->wq); | 821 | destroy_workqueue(pinst->serial_wq); |
822 | destroy_workqueue(pinst->parallel_wq); | ||
822 | kfree(pinst); | 823 | kfree(pinst); |
823 | } | 824 | } |
824 | 825 | ||
@@ -967,18 +968,23 @@ static struct padata_instance *padata_alloc(const char *name, | |||
967 | if (!pinst) | 968 | if (!pinst) |
968 | goto err; | 969 | goto err; |
969 | 970 | ||
970 | pinst->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, | 971 | pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_MEM_RECLAIM | |
971 | 1, name); | 972 | WQ_CPU_INTENSIVE, 1, name); |
972 | if (!pinst->wq) | 973 | if (!pinst->parallel_wq) |
973 | goto err_free_inst; | 974 | goto err_free_inst; |
974 | 975 | ||
975 | get_online_cpus(); | 976 | get_online_cpus(); |
976 | 977 | ||
977 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) | 978 | pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM | |
979 | WQ_CPU_INTENSIVE, 1, name); | ||
980 | if (!pinst->serial_wq) | ||
978 | goto err_put_cpus; | 981 | goto err_put_cpus; |
982 | |||
983 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) | ||
984 | goto err_free_serial_wq; | ||
979 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { | 985 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { |
980 | free_cpumask_var(pinst->cpumask.pcpu); | 986 | free_cpumask_var(pinst->cpumask.pcpu); |
981 | goto err_put_cpus; | 987 | goto err_free_serial_wq; |
982 | } | 988 | } |
983 | if (!padata_validate_cpumask(pinst, pcpumask) || | 989 | if (!padata_validate_cpumask(pinst, pcpumask) || |
984 | !padata_validate_cpumask(pinst, cbcpumask)) | 990 | !padata_validate_cpumask(pinst, cbcpumask)) |
@@ -1010,9 +1016,11 @@ static struct padata_instance *padata_alloc(const char *name, | |||
1010 | err_free_masks: | 1016 | err_free_masks: |
1011 | free_cpumask_var(pinst->cpumask.pcpu); | 1017 | free_cpumask_var(pinst->cpumask.pcpu); |
1012 | free_cpumask_var(pinst->cpumask.cbcpu); | 1018 | free_cpumask_var(pinst->cpumask.cbcpu); |
1019 | err_free_serial_wq: | ||
1020 | destroy_workqueue(pinst->serial_wq); | ||
1013 | err_put_cpus: | 1021 | err_put_cpus: |
1014 | put_online_cpus(); | 1022 | put_online_cpus(); |
1015 | destroy_workqueue(pinst->wq); | 1023 | destroy_workqueue(pinst->parallel_wq); |
1016 | err_free_inst: | 1024 | err_free_inst: |
1017 | kfree(pinst); | 1025 | kfree(pinst); |
1018 | err: | 1026 | err: |