diff options
Diffstat (limited to 'crypto/mcryptd.c')
-rw-r--r-- | crypto/mcryptd.c | 23 |
1 files changed, 10 insertions, 13 deletions
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 4e6472658852..eca04d3729b3 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c | |||
@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue, | |||
81 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); | 81 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); |
82 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | 82 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
83 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); | 83 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); |
84 | spin_lock_init(&cpu_queue->q_lock); | ||
84 | } | 85 | } |
85 | return 0; | 86 | return 0; |
86 | } | 87 | } |
@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue, | |||
104 | int cpu, err; | 105 | int cpu, err; |
105 | struct mcryptd_cpu_queue *cpu_queue; | 106 | struct mcryptd_cpu_queue *cpu_queue; |
106 | 107 | ||
107 | cpu = get_cpu(); | 108 | cpu_queue = raw_cpu_ptr(queue->cpu_queue); |
108 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | 109 | spin_lock(&cpu_queue->q_lock); |
109 | rctx->tag.cpu = cpu; | 110 | cpu = smp_processor_id(); |
111 | rctx->tag.cpu = smp_processor_id(); | ||
110 | 112 | ||
111 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 113 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
112 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", | 114 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", |
113 | cpu, cpu_queue, request); | 115 | cpu, cpu_queue, request); |
116 | spin_unlock(&cpu_queue->q_lock); | ||
114 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 117 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
115 | put_cpu(); | ||
116 | 118 | ||
117 | return err; | 119 | return err; |
118 | } | 120 | } |
@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work) | |||
161 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); | 163 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); |
162 | i = 0; | 164 | i = 0; |
163 | while (i < MCRYPTD_BATCH || single_task_running()) { | 165 | while (i < MCRYPTD_BATCH || single_task_running()) { |
164 | /* | 166 | |
165 | * preempt_disable/enable is used to prevent | 167 | spin_lock_bh(&cpu_queue->q_lock); |
166 | * being preempted by mcryptd_enqueue_request() | ||
167 | */ | ||
168 | local_bh_disable(); | ||
169 | preempt_disable(); | ||
170 | backlog = crypto_get_backlog(&cpu_queue->queue); | 168 | backlog = crypto_get_backlog(&cpu_queue->queue); |
171 | req = crypto_dequeue_request(&cpu_queue->queue); | 169 | req = crypto_dequeue_request(&cpu_queue->queue); |
172 | preempt_enable(); | 170 | spin_unlock_bh(&cpu_queue->q_lock); |
173 | local_bh_enable(); | ||
174 | 171 | ||
175 | if (!req) { | 172 | if (!req) { |
176 | mcryptd_opportunistic_flush(); | 173 | mcryptd_opportunistic_flush(); |
@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work) | |||
185 | ++i; | 182 | ++i; |
186 | } | 183 | } |
187 | if (cpu_queue->queue.qlen) | 184 | if (cpu_queue->queue.qlen) |
188 | queue_work(kcrypto_wq, &cpu_queue->work); | 185 | queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); |
189 | } | 186 | } |
190 | 187 | ||
191 | void mcryptd_flusher(struct work_struct *__work) | 188 | void mcryptd_flusher(struct work_struct *__work) |