aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/padata.c
diff options
context:
space:
mode:
authorMathias Krause <minipli@googlemail.com>2017-09-08 14:57:11 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2017-10-07 00:10:32 -0400
commit350ef88e7e922354f82a931897ad4a4ce6c686ff (patch)
tree1a4e182954c15959d059660ba124117c172fe5a3 /kernel/padata.c
parentcf5868c8a22dc2854b96e9569064bb92365549ca (diff)
padata: ensure padata_do_serial() runs on the correct CPU
If the algorithm we're parallelizing is asynchronous we might change CPUs between padata_do_parallel() and padata_do_serial(). However, we don't expect this to happen as we need to enqueue the padata object into the per-cpu reorder queue we took it from, i.e. the same-cpu's parallel queue. Ensure we're not switching CPUs for a given padata object by tracking the CPU within the padata object. If the serial callback gets called on the wrong CPU, defer invoking padata_reorder() via a kernel worker on the CPU we're expected to run on. Signed-off-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'kernel/padata.c')
-rw-r--r--kernel/padata.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/kernel/padata.c b/kernel/padata.c
index b4066147bce4..f262c9a4e70a 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -131,6 +131,7 @@ int padata_do_parallel(struct padata_instance *pinst,
131 padata->cb_cpu = cb_cpu; 131 padata->cb_cpu = cb_cpu;
132 132
133 target_cpu = padata_cpu_hash(pd); 133 target_cpu = padata_cpu_hash(pd);
134 padata->cpu = target_cpu;
134 queue = per_cpu_ptr(pd->pqueue, target_cpu); 135 queue = per_cpu_ptr(pd->pqueue, target_cpu);
135 136
136 spin_lock(&queue->parallel.lock); 137 spin_lock(&queue->parallel.lock);
@@ -363,10 +364,21 @@ void padata_do_serial(struct padata_priv *padata)
363 int cpu; 364 int cpu;
364 struct padata_parallel_queue *pqueue; 365 struct padata_parallel_queue *pqueue;
365 struct parallel_data *pd; 366 struct parallel_data *pd;
367 int reorder_via_wq = 0;
366 368
367 pd = padata->pd; 369 pd = padata->pd;
368 370
369 cpu = get_cpu(); 371 cpu = get_cpu();
372
373 /* We need to run on the same CPU padata_do_parallel(.., padata, ..)
374 * was called on -- or, at least, enqueue the padata object into the
375 * correct per-cpu queue.
376 */
377 if (cpu != padata->cpu) {
378 reorder_via_wq = 1;
379 cpu = padata->cpu;
380 }
381
370 pqueue = per_cpu_ptr(pd->pqueue, cpu); 382 pqueue = per_cpu_ptr(pd->pqueue, cpu);
371 383
372 spin_lock(&pqueue->reorder.lock); 384 spin_lock(&pqueue->reorder.lock);
@@ -376,7 +388,13 @@ void padata_do_serial(struct padata_priv *padata)
376 388
377 put_cpu(); 389 put_cpu();
378 390
379 padata_reorder(pd); 391 /* If we're running on the wrong CPU, call padata_reorder() via a
392 * kernel worker.
393 */
394 if (reorder_via_wq)
395 queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
396 else
397 padata_reorder(pd);
380} 398}
381EXPORT_SYMBOL(padata_do_serial); 399EXPORT_SYMBOL(padata_do_serial);
382 400