aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/crypto_engine.c
diff options
context:
space:
mode:
authorCorentin LABBE <clabbe.montjoie@gmail.com>2018-01-26 14:15:30 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2018-02-15 10:26:50 -0500
commit218d1cc1860c45b77f6814b44f6f0ffb9e40a82f (patch)
tree4900e135c45983208dd1fd8a0b6976aba328e909 /crypto/crypto_engine.c
parentce09a6c042c446fe86ce92fe7c673fc56470dd8f (diff)
crypto: engine - Permit to enqueue all async requests
The crypto engine could actually only enqueue hash and ablkcipher request. This patch permit it to enqueue any type of crypto_async_request. Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com> Tested-by: Fabien Dessenne <fabien.dessenne@st.com> Tested-by: Fabien Dessenne <fabien.dessenne@st.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/crypto_engine.c')
-rw-r--r--crypto/crypto_engine.c301
1 files changed, 164 insertions, 137 deletions
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 61e7c4e02fd2..992e8d8dcdd9 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -15,13 +15,50 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <crypto/engine.h> 17#include <crypto/engine.h>
18#include <crypto/internal/hash.h>
19#include <uapi/linux/sched/types.h> 18#include <uapi/linux/sched/types.h>
20#include "internal.h" 19#include "internal.h"
21 20
22#define CRYPTO_ENGINE_MAX_QLEN 10 21#define CRYPTO_ENGINE_MAX_QLEN 10
23 22
24/** 23/**
24 * crypto_finalize_request - finalize one request if the request is done
25 * @engine: the hardware engine
26 * @req: the request need to be finalized
27 * @err: error number
28 */
29static void crypto_finalize_request(struct crypto_engine *engine,
30 struct crypto_async_request *req, int err)
31{
32 unsigned long flags;
33 bool finalize_cur_req = false;
34 int ret;
35 struct crypto_engine_ctx *enginectx;
36
37 spin_lock_irqsave(&engine->queue_lock, flags);
38 if (engine->cur_req == req)
39 finalize_cur_req = true;
40 spin_unlock_irqrestore(&engine->queue_lock, flags);
41
42 if (finalize_cur_req) {
43 enginectx = crypto_tfm_ctx(req->tfm);
44 if (engine->cur_req_prepared &&
45 enginectx->op.unprepare_request) {
46 ret = enginectx->op.unprepare_request(engine, req);
47 if (ret)
48 dev_err(engine->dev, "failed to unprepare request\n");
49 }
50 spin_lock_irqsave(&engine->queue_lock, flags);
51 engine->cur_req = NULL;
52 engine->cur_req_prepared = false;
53 spin_unlock_irqrestore(&engine->queue_lock, flags);
54 }
55
56 req->complete(req, err);
57
58 kthread_queue_work(engine->kworker, &engine->pump_requests);
59}
60
61/**
25 * crypto_pump_requests - dequeue one request from engine queue to process 62 * crypto_pump_requests - dequeue one request from engine queue to process
26 * @engine: the hardware engine 63 * @engine: the hardware engine
27 * @in_kthread: true if we are in the context of the request pump thread 64 * @in_kthread: true if we are in the context of the request pump thread
@@ -34,11 +71,10 @@ static void crypto_pump_requests(struct crypto_engine *engine,
34 bool in_kthread) 71 bool in_kthread)
35{ 72{
36 struct crypto_async_request *async_req, *backlog; 73 struct crypto_async_request *async_req, *backlog;
37 struct ahash_request *hreq;
38 struct ablkcipher_request *breq;
39 unsigned long flags; 74 unsigned long flags;
40 bool was_busy = false; 75 bool was_busy = false;
41 int ret, rtype; 76 int ret;
77 struct crypto_engine_ctx *enginectx;
42 78
43 spin_lock_irqsave(&engine->queue_lock, flags); 79 spin_lock_irqsave(&engine->queue_lock, flags);
44 80
@@ -94,7 +130,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
94 130
95 spin_unlock_irqrestore(&engine->queue_lock, flags); 131 spin_unlock_irqrestore(&engine->queue_lock, flags);
96 132
97 rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
98 /* Until here we get the request need to be encrypted successfully */ 133 /* Until here we get the request need to be encrypted successfully */
99 if (!was_busy && engine->prepare_crypt_hardware) { 134 if (!was_busy && engine->prepare_crypt_hardware) {
100 ret = engine->prepare_crypt_hardware(engine); 135 ret = engine->prepare_crypt_hardware(engine);
@@ -104,57 +139,31 @@ static void crypto_pump_requests(struct crypto_engine *engine,
104 } 139 }
105 } 140 }
106 141
107 switch (rtype) { 142 enginectx = crypto_tfm_ctx(async_req->tfm);
108 case CRYPTO_ALG_TYPE_AHASH: 143
109 hreq = ahash_request_cast(engine->cur_req); 144 if (enginectx->op.prepare_request) {
110 if (engine->prepare_hash_request) { 145 ret = enginectx->op.prepare_request(engine, async_req);
111 ret = engine->prepare_hash_request(engine, hreq);
112 if (ret) {
113 dev_err(engine->dev, "failed to prepare request: %d\n",
114 ret);
115 goto req_err;
116 }
117 engine->cur_req_prepared = true;
118 }
119 ret = engine->hash_one_request(engine, hreq);
120 if (ret) {
121 dev_err(engine->dev, "failed to hash one request from queue\n");
122 goto req_err;
123 }
124 return;
125 case CRYPTO_ALG_TYPE_ABLKCIPHER:
126 breq = ablkcipher_request_cast(engine->cur_req);
127 if (engine->prepare_cipher_request) {
128 ret = engine->prepare_cipher_request(engine, breq);
129 if (ret) {
130 dev_err(engine->dev, "failed to prepare request: %d\n",
131 ret);
132 goto req_err;
133 }
134 engine->cur_req_prepared = true;
135 }
136 ret = engine->cipher_one_request(engine, breq);
137 if (ret) { 146 if (ret) {
138 dev_err(engine->dev, "failed to cipher one request from queue\n"); 147 dev_err(engine->dev, "failed to prepare request: %d\n",
148 ret);
139 goto req_err; 149 goto req_err;
140 } 150 }
141 return; 151 engine->cur_req_prepared = true;
142 default: 152 }
143 dev_err(engine->dev, "failed to prepare request of unknown type\n"); 153 if (!enginectx->op.do_one_request) {
144 return; 154 dev_err(engine->dev, "failed to do request\n");
155 ret = -EINVAL;
156 goto req_err;
145 } 157 }
158 ret = enginectx->op.do_one_request(engine, async_req);
159 if (ret) {
160 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
161 goto req_err;
162 }
163 return;
146 164
147req_err: 165req_err:
148 switch (rtype) { 166 crypto_finalize_request(engine, async_req, ret);
149 case CRYPTO_ALG_TYPE_AHASH:
150 hreq = ahash_request_cast(engine->cur_req);
151 crypto_finalize_hash_request(engine, hreq, ret);
152 break;
153 case CRYPTO_ALG_TYPE_ABLKCIPHER:
154 breq = ablkcipher_request_cast(engine->cur_req);
155 crypto_finalize_cipher_request(engine, breq, ret);
156 break;
157 }
158 return; 167 return;
159 168
160out: 169out:
@@ -170,13 +179,12 @@ static void crypto_pump_work(struct kthread_work *work)
170} 179}
171 180
172/** 181/**
173 * crypto_transfer_cipher_request - transfer the new request into the 182 * crypto_transfer_request - transfer the new request into the engine queue
174 * enginequeue
175 * @engine: the hardware engine 183 * @engine: the hardware engine
176 * @req: the request need to be listed into the engine queue 184 * @req: the request need to be listed into the engine queue
177 */ 185 */
178int crypto_transfer_cipher_request(struct crypto_engine *engine, 186static int crypto_transfer_request(struct crypto_engine *engine,
179 struct ablkcipher_request *req, 187 struct crypto_async_request *req,
180 bool need_pump) 188 bool need_pump)
181{ 189{
182 unsigned long flags; 190 unsigned long flags;
@@ -189,7 +197,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
189 return -ESHUTDOWN; 197 return -ESHUTDOWN;
190 } 198 }
191 199
192 ret = ablkcipher_enqueue_request(&engine->queue, req); 200 ret = crypto_enqueue_request(&engine->queue, req);
193 201
194 if (!engine->busy && need_pump) 202 if (!engine->busy && need_pump)
195 kthread_queue_work(engine->kworker, &engine->pump_requests); 203 kthread_queue_work(engine->kworker, &engine->pump_requests);
@@ -197,102 +205,131 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
197 spin_unlock_irqrestore(&engine->queue_lock, flags); 205 spin_unlock_irqrestore(&engine->queue_lock, flags);
198 return ret; 206 return ret;
199} 207}
200EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
201 208
202/** 209/**
203 * crypto_transfer_cipher_request_to_engine - transfer one request to list 210 * crypto_transfer_request_to_engine - transfer one request to list
204 * into the engine queue 211 * into the engine queue
205 * @engine: the hardware engine 212 * @engine: the hardware engine
206 * @req: the request need to be listed into the engine queue 213 * @req: the request need to be listed into the engine queue
207 */ 214 */
208int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, 215static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
209 struct ablkcipher_request *req) 216 struct crypto_async_request *req)
210{ 217{
211 return crypto_transfer_cipher_request(engine, req, true); 218 return crypto_transfer_request(engine, req, true);
212} 219}
213EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
214 220
215/** 221/**
216 * crypto_transfer_hash_request - transfer the new request into the 222 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
217 * enginequeue 223 * to list into the engine queue
218 * @engine: the hardware engine 224 * @engine: the hardware engine
219 * @req: the request need to be listed into the engine queue 225 * @req: the request need to be listed into the engine queue
226 * TODO: Remove this function when skcipher conversion is finished
220 */ 227 */
221int crypto_transfer_hash_request(struct crypto_engine *engine, 228int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
222 struct ahash_request *req, bool need_pump) 229 struct ablkcipher_request *req)
223{ 230{
224 unsigned long flags; 231 return crypto_transfer_request_to_engine(engine, &req->base);
225 int ret; 232}
226 233EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
227 spin_lock_irqsave(&engine->queue_lock, flags);
228
229 if (!engine->running) {
230 spin_unlock_irqrestore(&engine->queue_lock, flags);
231 return -ESHUTDOWN;
232 }
233
234 ret = ahash_enqueue_request(&engine->queue, req);
235 234
236 if (!engine->busy && need_pump) 235/**
237 kthread_queue_work(engine->kworker, &engine->pump_requests); 236 * crypto_transfer_aead_request_to_engine - transfer one aead_request
237 * to list into the engine queue
238 * @engine: the hardware engine
239 * @req: the request need to be listed into the engine queue
240 */
241int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
242 struct aead_request *req)
243{
244 return crypto_transfer_request_to_engine(engine, &req->base);
245}
246EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
238 247
239 spin_unlock_irqrestore(&engine->queue_lock, flags); 248/**
240 return ret; 249 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
250 * to list into the engine queue
251 * @engine: the hardware engine
252 * @req: the request need to be listed into the engine queue
253 */
254int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
255 struct akcipher_request *req)
256{
257 return crypto_transfer_request_to_engine(engine, &req->base);
241} 258}
242EXPORT_SYMBOL_GPL(crypto_transfer_hash_request); 259EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
243 260
244/** 261/**
245 * crypto_transfer_hash_request_to_engine - transfer one request to list 262 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
246 * into the engine queue 263 * to list into the engine queue
247 * @engine: the hardware engine 264 * @engine: the hardware engine
248 * @req: the request need to be listed into the engine queue 265 * @req: the request need to be listed into the engine queue
249 */ 266 */
250int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, 267int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
251 struct ahash_request *req) 268 struct ahash_request *req)
252{ 269{
253 return crypto_transfer_hash_request(engine, req, true); 270 return crypto_transfer_request_to_engine(engine, &req->base);
254} 271}
255EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); 272EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
256 273
257/** 274/**
258 * crypto_finalize_cipher_request - finalize one request if the request is done 275 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
276 * to list into the engine queue
277 * @engine: the hardware engine
278 * @req: the request need to be listed into the engine queue
279 */
280int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
281 struct skcipher_request *req)
282{
283 return crypto_transfer_request_to_engine(engine, &req->base);
284}
285EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
286
287/**
288 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
289 * the request is done
259 * @engine: the hardware engine 290 * @engine: the hardware engine
260 * @req: the request need to be finalized 291 * @req: the request need to be finalized
261 * @err: error number 292 * @err: error number
293 * TODO: Remove this function when skcipher conversion is finished
262 */ 294 */
263void crypto_finalize_cipher_request(struct crypto_engine *engine, 295void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
264 struct ablkcipher_request *req, int err) 296 struct ablkcipher_request *req, int err)
265{ 297{
266 unsigned long flags; 298 return crypto_finalize_request(engine, &req->base, err);
267 bool finalize_cur_req = false; 299}
268 int ret; 300EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
269
270 spin_lock_irqsave(&engine->queue_lock, flags);
271 if (engine->cur_req == &req->base)
272 finalize_cur_req = true;
273 spin_unlock_irqrestore(&engine->queue_lock, flags);
274
275 if (finalize_cur_req) {
276 if (engine->cur_req_prepared &&
277 engine->unprepare_cipher_request) {
278 ret = engine->unprepare_cipher_request(engine, req);
279 if (ret)
280 dev_err(engine->dev, "failed to unprepare request\n");
281 }
282 spin_lock_irqsave(&engine->queue_lock, flags);
283 engine->cur_req = NULL;
284 engine->cur_req_prepared = false;
285 spin_unlock_irqrestore(&engine->queue_lock, flags);
286 }
287 301
288 req->base.complete(&req->base, err); 302/**
303 * crypto_finalize_aead_request - finalize one aead_request if
304 * the request is done
305 * @engine: the hardware engine
306 * @req: the request need to be finalized
307 * @err: error number
308 */
309void crypto_finalize_aead_request(struct crypto_engine *engine,
310 struct aead_request *req, int err)
311{
312 return crypto_finalize_request(engine, &req->base, err);
313}
314EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
289 315
290 kthread_queue_work(engine->kworker, &engine->pump_requests); 316/**
317 * crypto_finalize_akcipher_request - finalize one akcipher_request if
318 * the request is done
319 * @engine: the hardware engine
320 * @req: the request need to be finalized
321 * @err: error number
322 */
323void crypto_finalize_akcipher_request(struct crypto_engine *engine,
324 struct akcipher_request *req, int err)
325{
326 return crypto_finalize_request(engine, &req->base, err);
291} 327}
292EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); 328EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
293 329
294/** 330/**
295 * crypto_finalize_hash_request - finalize one request if the request is done 331 * crypto_finalize_hash_request - finalize one ahash_request if
332 * the request is done
296 * @engine: the hardware engine 333 * @engine: the hardware engine
297 * @req: the request need to be finalized 334 * @req: the request need to be finalized
298 * @err: error number 335 * @err: error number
@@ -300,35 +337,25 @@ EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
300void crypto_finalize_hash_request(struct crypto_engine *engine, 337void crypto_finalize_hash_request(struct crypto_engine *engine,
301 struct ahash_request *req, int err) 338 struct ahash_request *req, int err)
302{ 339{
303 unsigned long flags; 340 return crypto_finalize_request(engine, &req->base, err);
304 bool finalize_cur_req = false;
305 int ret;
306
307 spin_lock_irqsave(&engine->queue_lock, flags);
308 if (engine->cur_req == &req->base)
309 finalize_cur_req = true;
310 spin_unlock_irqrestore(&engine->queue_lock, flags);
311
312 if (finalize_cur_req) {
313 if (engine->cur_req_prepared &&
314 engine->unprepare_hash_request) {
315 ret = engine->unprepare_hash_request(engine, req);
316 if (ret)
317 dev_err(engine->dev, "failed to unprepare request\n");
318 }
319 spin_lock_irqsave(&engine->queue_lock, flags);
320 engine->cur_req = NULL;
321 engine->cur_req_prepared = false;
322 spin_unlock_irqrestore(&engine->queue_lock, flags);
323 }
324
325 req->base.complete(&req->base, err);
326
327 kthread_queue_work(engine->kworker, &engine->pump_requests);
328} 341}
329EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); 342EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
330 343
331/** 344/**
345 * crypto_finalize_skcipher_request - finalize one skcipher_request if
346 * the request is done
347 * @engine: the hardware engine
348 * @req: the request need to be finalized
349 * @err: error number
350 */
351void crypto_finalize_skcipher_request(struct crypto_engine *engine,
352 struct skcipher_request *req, int err)
353{
354 return crypto_finalize_request(engine, &req->base, err);
355}
356EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
357
358/**
332 * crypto_engine_start - start the hardware engine 359 * crypto_engine_start - start the hardware engine
333 * @engine: the hardware engine need to be started 360 * @engine: the hardware engine need to be started
334 * 361 *