summaryrefslogtreecommitdiffstats
path: root/crypto/mcryptd.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-11-30 07:39:27 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2017-12-11 06:29:54 -0500
commit9abffc6f2efe46c3564c04312e52e07622d40e51 (patch)
tree48e15a835828ad250c765564d29c2b09041ddf3d /crypto/mcryptd.c
parent11edb555966ed2c66c533d17c604f9d7e580a829 (diff)
crypto: mcryptd - protect the per-CPU queue with a lock
mcryptd_enqueue_request() grabs the per-CPU queue struct and protects access to it with disabled preemption. Then it schedules a worker on the same CPU. The worker in mcryptd_queue_worker() guards access to the same per-CPU variable with disabled preemption. If we take CPU-hotplug into account then it is possible that between queue_work_on() and the actual invocation of the worker the CPU goes down and the worker will be scheduled on _another_ CPU. And here the preempt_disable() protection does not work anymore. The easiest thing is to add a spin_lock() to guard access to the list. Another detail: mcryptd_queue_worker() is not processing more than MCRYPTD_BATCH invocation in a row. If there are still items left, then it will invoke queue_work() to proceed with more later. *I* would suggest to simply drop that check because it does not use a system workqueue and the workqueue is already marked as "CPU_INTENSIVE". And if preemption is required then the scheduler should do it. However if queue_work() is used then the work item is marked as CPU unbound. That means it will try to run on the local CPU but it may run on another CPU as well. Especially with CONFIG_DEBUG_WQ_FORCE_RR_CPU=y. Again, the preempt_disable() won't work here but lock which was introduced will help. In order to keep work-item on the local CPU (and avoid RR) I changed it to queue_work_on(). Cc: stable@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/mcryptd.c')
-rw-r--r--crypto/mcryptd.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 4e6472658852..eca04d3729b3 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
81 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); 81 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
82 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 82 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
83 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); 83 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
84 spin_lock_init(&cpu_queue->q_lock);
84 } 85 }
85 return 0; 86 return 0;
86} 87}
@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
104 int cpu, err; 105 int cpu, err;
105 struct mcryptd_cpu_queue *cpu_queue; 106 struct mcryptd_cpu_queue *cpu_queue;
106 107
107 cpu = get_cpu(); 108 cpu_queue = raw_cpu_ptr(queue->cpu_queue);
108 cpu_queue = this_cpu_ptr(queue->cpu_queue); 109 spin_lock(&cpu_queue->q_lock);
109 rctx->tag.cpu = cpu; 110 cpu = smp_processor_id();
111 rctx->tag.cpu = smp_processor_id();
110 112
111 err = crypto_enqueue_request(&cpu_queue->queue, request); 113 err = crypto_enqueue_request(&cpu_queue->queue, request);
112 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", 114 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
113 cpu, cpu_queue, request); 115 cpu, cpu_queue, request);
116 spin_unlock(&cpu_queue->q_lock);
114 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 117 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
115 put_cpu();
116 118
117 return err; 119 return err;
118} 120}
@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
161 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); 163 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
162 i = 0; 164 i = 0;
163 while (i < MCRYPTD_BATCH || single_task_running()) { 165 while (i < MCRYPTD_BATCH || single_task_running()) {
164 /* 166
165 * preempt_disable/enable is used to prevent 167 spin_lock_bh(&cpu_queue->q_lock);
166 * being preempted by mcryptd_enqueue_request()
167 */
168 local_bh_disable();
169 preempt_disable();
170 backlog = crypto_get_backlog(&cpu_queue->queue); 168 backlog = crypto_get_backlog(&cpu_queue->queue);
171 req = crypto_dequeue_request(&cpu_queue->queue); 169 req = crypto_dequeue_request(&cpu_queue->queue);
172 preempt_enable(); 170 spin_unlock_bh(&cpu_queue->q_lock);
173 local_bh_enable();
174 171
175 if (!req) { 172 if (!req) {
176 mcryptd_opportunistic_flush(); 173 mcryptd_opportunistic_flush();
@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
185 ++i; 182 ++i;
186 } 183 }
187 if (cpu_queue->queue.qlen) 184 if (cpu_queue->queue.qlen)
188 queue_work(kcrypto_wq, &cpu_queue->work); 185 queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
189} 186}
190 187
191void mcryptd_flusher(struct work_struct *__work) 188void mcryptd_flusher(struct work_struct *__work)