aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/crypto_engine.c
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linaro.org>2016-01-26 07:25:39 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2016-02-01 09:27:02 -0500
commit735d37b5424b27aa685276b8b90b7e57c4705ac1 (patch)
tree93fa6b3f48a144f4209d02c4b1c0a69677093a6b /crypto/crypto_engine.c
parent9f93a8a0ba91fa3babe76a428e6c24f4c39f125e (diff)
crypto: engine - Introduce the block request crypto engine framework
Now block cipher engines need to implement and maintain their own queue/thread for processing requests, moreover currently helpers provided for only the queue itself (in crypto_enqueue_request() and crypto_dequeue_request()) but they don't help with the mechanics of driving the hardware (things like running the request immediately, DMA map it or providing a thread to process the queue in) even though a lot of that code really shouldn't vary that much from device to device. Thus this patch provides a mechanism for pushing requests to the hardware as it becomes free that drivers could use. And this framework is patterned on the SPI code and has worked out well there. (https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/ drivers/spi/spi.c?id=ffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0) Signed-off-by: Baolin Wang <baolin.wang@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/crypto_engine.c')
-rw-r--r--crypto/crypto_engine.c355
1 files changed, 355 insertions, 0 deletions
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
new file mode 100644
index 000000000000..a55c82dd48ef
--- /dev/null
+++ b/crypto/crypto_engine.c
@@ -0,0 +1,355 @@
1/*
2 * Handle async block request by crypto hardware engine.
3 *
4 * Copyright (C) 2016 Linaro, Inc.
5 *
6 * Author: Baolin Wang <baolin.wang@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <linux/err.h>
16#include <linux/delay.h>
17#include "internal.h"
18
19#define CRYPTO_ENGINE_MAX_QLEN 10
20
21void crypto_finalize_request(struct crypto_engine *engine,
22 struct ablkcipher_request *req, int err);
23
24/**
25 * crypto_pump_requests - dequeue one request from engine queue to process
26 * @engine: the hardware engine
27 * @in_kthread: true if we are in the context of the request pump thread
28 *
29 * This function checks if there is any request in the engine queue that
30 * needs processing and if so call out to the driver to initialize hardware
31 * and handle each request.
32 */
33static void crypto_pump_requests(struct crypto_engine *engine,
34 bool in_kthread)
35{
36 struct crypto_async_request *async_req, *backlog;
37 struct ablkcipher_request *req;
38 unsigned long flags;
39 bool was_busy = false;
40 int ret;
41
42 spin_lock_irqsave(&engine->queue_lock, flags);
43
44 /* Make sure we are not already running a request */
45 if (engine->cur_req)
46 goto out;
47
48 /* If another context is idling then defer */
49 if (engine->idling) {
50 queue_kthread_work(&engine->kworker, &engine->pump_requests);
51 goto out;
52 }
53
54 /* Check if the engine queue is idle */
55 if (!crypto_queue_len(&engine->queue) || !engine->running) {
56 if (!engine->busy)
57 goto out;
58
59 /* Only do teardown in the thread */
60 if (!in_kthread) {
61 queue_kthread_work(&engine->kworker,
62 &engine->pump_requests);
63 goto out;
64 }
65
66 engine->busy = false;
67 engine->idling = true;
68 spin_unlock_irqrestore(&engine->queue_lock, flags);
69
70 if (engine->unprepare_crypt_hardware &&
71 engine->unprepare_crypt_hardware(engine))
72 pr_err("failed to unprepare crypt hardware\n");
73
74 spin_lock_irqsave(&engine->queue_lock, flags);
75 engine->idling = false;
76 goto out;
77 }
78
79 /* Get the fist request from the engine queue to handle */
80 backlog = crypto_get_backlog(&engine->queue);
81 async_req = crypto_dequeue_request(&engine->queue);
82 if (!async_req)
83 goto out;
84
85 req = ablkcipher_request_cast(async_req);
86
87 engine->cur_req = req;
88 if (backlog)
89 backlog->complete(backlog, -EINPROGRESS);
90
91 if (engine->busy)
92 was_busy = true;
93 else
94 engine->busy = true;
95
96 spin_unlock_irqrestore(&engine->queue_lock, flags);
97
98 /* Until here we get the request need to be encrypted successfully */
99 if (!was_busy && engine->prepare_crypt_hardware) {
100 ret = engine->prepare_crypt_hardware(engine);
101 if (ret) {
102 pr_err("failed to prepare crypt hardware\n");
103 goto req_err;
104 }
105 }
106
107 if (engine->prepare_request) {
108 ret = engine->prepare_request(engine, engine->cur_req);
109 if (ret) {
110 pr_err("failed to prepare request: %d\n", ret);
111 goto req_err;
112 }
113 engine->cur_req_prepared = true;
114 }
115
116 ret = engine->crypt_one_request(engine, engine->cur_req);
117 if (ret) {
118 pr_err("failed to crypt one request from queue\n");
119 goto req_err;
120 }
121 return;
122
123req_err:
124 crypto_finalize_request(engine, engine->cur_req, ret);
125 return;
126
127out:
128 spin_unlock_irqrestore(&engine->queue_lock, flags);
129}
130
131static void crypto_pump_work(struct kthread_work *work)
132{
133 struct crypto_engine *engine =
134 container_of(work, struct crypto_engine, pump_requests);
135
136 crypto_pump_requests(engine, true);
137}
138
139/**
140 * crypto_transfer_request - transfer the new request into the engine queue
141 * @engine: the hardware engine
142 * @req: the request need to be listed into the engine queue
143 */
144int crypto_transfer_request(struct crypto_engine *engine,
145 struct ablkcipher_request *req, bool need_pump)
146{
147 unsigned long flags;
148 int ret;
149
150 spin_lock_irqsave(&engine->queue_lock, flags);
151
152 if (!engine->running) {
153 spin_unlock_irqrestore(&engine->queue_lock, flags);
154 return -ESHUTDOWN;
155 }
156
157 ret = ablkcipher_enqueue_request(&engine->queue, req);
158
159 if (!engine->busy && need_pump)
160 queue_kthread_work(&engine->kworker, &engine->pump_requests);
161
162 spin_unlock_irqrestore(&engine->queue_lock, flags);
163 return ret;
164}
165EXPORT_SYMBOL_GPL(crypto_transfer_request);
166
167/**
168 * crypto_transfer_request_to_engine - transfer one request to list into the
169 * engine queue
170 * @engine: the hardware engine
171 * @req: the request need to be listed into the engine queue
172 */
173int crypto_transfer_request_to_engine(struct crypto_engine *engine,
174 struct ablkcipher_request *req)
175{
176 return crypto_transfer_request(engine, req, true);
177}
178EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
179
180/**
181 * crypto_finalize_request - finalize one request if the request is done
182 * @engine: the hardware engine
183 * @req: the request need to be finalized
184 * @err: error number
185 */
186void crypto_finalize_request(struct crypto_engine *engine,
187 struct ablkcipher_request *req, int err)
188{
189 unsigned long flags;
190 bool finalize_cur_req = false;
191 int ret;
192
193 spin_lock_irqsave(&engine->queue_lock, flags);
194 if (engine->cur_req == req)
195 finalize_cur_req = true;
196 spin_unlock_irqrestore(&engine->queue_lock, flags);
197
198 if (finalize_cur_req) {
199 if (engine->cur_req_prepared && engine->unprepare_request) {
200 ret = engine->unprepare_request(engine, req);
201 if (ret)
202 pr_err("failed to unprepare request\n");
203 }
204
205 spin_lock_irqsave(&engine->queue_lock, flags);
206 engine->cur_req = NULL;
207 engine->cur_req_prepared = false;
208 spin_unlock_irqrestore(&engine->queue_lock, flags);
209 }
210
211 req->base.complete(&req->base, err);
212
213 queue_kthread_work(&engine->kworker, &engine->pump_requests);
214}
215EXPORT_SYMBOL_GPL(crypto_finalize_request);
216
217/**
218 * crypto_engine_start - start the hardware engine
219 * @engine: the hardware engine need to be started
220 *
221 * Return 0 on success, else on fail.
222 */
223int crypto_engine_start(struct crypto_engine *engine)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&engine->queue_lock, flags);
228
229 if (engine->running || engine->busy) {
230 spin_unlock_irqrestore(&engine->queue_lock, flags);
231 return -EBUSY;
232 }
233
234 engine->running = true;
235 spin_unlock_irqrestore(&engine->queue_lock, flags);
236
237 queue_kthread_work(&engine->kworker, &engine->pump_requests);
238
239 return 0;
240}
241EXPORT_SYMBOL_GPL(crypto_engine_start);
242
243/**
244 * crypto_engine_stop - stop the hardware engine
245 * @engine: the hardware engine need to be stopped
246 *
247 * Return 0 on success, else on fail.
248 */
249int crypto_engine_stop(struct crypto_engine *engine)
250{
251 unsigned long flags;
252 unsigned limit = 500;
253 int ret = 0;
254
255 spin_lock_irqsave(&engine->queue_lock, flags);
256
257 /*
258 * If the engine queue is not empty or the engine is on busy state,
259 * we need to wait for a while to pump the requests of engine queue.
260 */
261 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
262 spin_unlock_irqrestore(&engine->queue_lock, flags);
263 msleep(20);
264 spin_lock_irqsave(&engine->queue_lock, flags);
265 }
266
267 if (crypto_queue_len(&engine->queue) || engine->busy)
268 ret = -EBUSY;
269 else
270 engine->running = false;
271
272 spin_unlock_irqrestore(&engine->queue_lock, flags);
273
274 if (ret)
275 pr_warn("could not stop engine\n");
276
277 return ret;
278}
279EXPORT_SYMBOL_GPL(crypto_engine_stop);
280
281/**
282 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
283 * initialize it.
284 * @dev: the device attached with one hardware engine
285 * @rt: whether this queue is set to run as a realtime task
286 *
287 * This must be called from context that can sleep.
288 * Return: the crypto engine structure on success, else NULL.
289 */
290struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
291{
292 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
293 struct crypto_engine *engine;
294
295 if (!dev)
296 return NULL;
297
298 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
299 if (!engine)
300 return NULL;
301
302 engine->rt = rt;
303 engine->running = false;
304 engine->busy = false;
305 engine->idling = false;
306 engine->cur_req_prepared = false;
307 engine->priv_data = dev;
308 snprintf(engine->name, sizeof(engine->name),
309 "%s-engine", dev_name(dev));
310
311 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
312 spin_lock_init(&engine->queue_lock);
313
314 init_kthread_worker(&engine->kworker);
315 engine->kworker_task = kthread_run(kthread_worker_fn,
316 &engine->kworker, "%s",
317 engine->name);
318 if (IS_ERR(engine->kworker_task)) {
319 dev_err(dev, "failed to create crypto request pump task\n");
320 return NULL;
321 }
322 init_kthread_work(&engine->pump_requests, crypto_pump_work);
323
324 if (engine->rt) {
325 dev_info(dev, "will run requests pump with realtime priority\n");
326 sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
327 }
328
329 return engine;
330}
331EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
332
333/**
334 * crypto_engine_exit - free the resources of hardware engine when exit
335 * @engine: the hardware engine need to be freed
336 *
337 * Return 0 for success.
338 */
339int crypto_engine_exit(struct crypto_engine *engine)
340{
341 int ret;
342
343 ret = crypto_engine_stop(engine);
344 if (ret)
345 return ret;
346
347 flush_kthread_worker(&engine->kworker);
348 kthread_stop(engine->kworker_task);
349
350 return 0;
351}
352EXPORT_SYMBOL_GPL(crypto_engine_exit);
353
354MODULE_LICENSE("GPL");
355MODULE_DESCRIPTION("Crypto hardware engine framework");