diff options
author | Petr Mladek <pmladek@suse.com> | 2016-10-19 07:54:30 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2016-10-24 23:08:25 -0400 |
commit | c4ca2b0b25814cd56665c1c8a7c6254d900a6f11 (patch) | |
tree | 25977405aa442aa6211b0daef85b3630289dbdde /crypto/crypto_engine.c | |
parent | 103600ab966a2f02d8986bbfdf87b762b1c6a06d (diff) |
crypto: engine - Handle the kthread worker using the new API
Use the new API to create and destroy the crypto engine kthread
worker. The API hides some implementation details.
In particular, kthread_create_worker() allocates and initializes
struct kthread_worker. It runs the kthread the right way
and stores task_struct into the worker structure.
kthread_destroy_worker() flushes all pending works, stops
the kthread and frees the structure.
This patch does not change the existing behavior except for
dynamically allocating struct kthread_worker and storing
only the pointer of this structure.
It is compile tested only because I did not find an easy
way how to run the code. Well, it should be pretty safe
given the nature of the change.
Signed-off-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/crypto_engine.c')
-rw-r--r-- | crypto/crypto_engine.c | 26 |
1 files changed, 11 insertions, 15 deletions
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 6989ba0046df..f1bf3418d968 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c | |||
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
47 | 47 | ||
48 | /* If another context is idling then defer */ | 48 | /* If another context is idling then defer */ |
49 | if (engine->idling) { | 49 | if (engine->idling) { |
50 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 50 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
51 | goto out; | 51 | goto out; |
52 | } | 52 | } |
53 | 53 | ||
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
58 | 58 | ||
59 | /* Only do teardown in the thread */ | 59 | /* Only do teardown in the thread */ |
60 | if (!in_kthread) { | 60 | if (!in_kthread) { |
61 | kthread_queue_work(&engine->kworker, | 61 | kthread_queue_work(engine->kworker, |
62 | &engine->pump_requests); | 62 | &engine->pump_requests); |
63 | goto out; | 63 | goto out; |
64 | } | 64 | } |
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine, | |||
189 | ret = ablkcipher_enqueue_request(&engine->queue, req); | 189 | ret = ablkcipher_enqueue_request(&engine->queue, req); |
190 | 190 | ||
191 | if (!engine->busy && need_pump) | 191 | if (!engine->busy && need_pump) |
192 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 192 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
193 | 193 | ||
194 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 194 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
195 | return ret; | 195 | return ret; |
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine, | |||
231 | ret = ahash_enqueue_request(&engine->queue, req); | 231 | ret = ahash_enqueue_request(&engine->queue, req); |
232 | 232 | ||
233 | if (!engine->busy && need_pump) | 233 | if (!engine->busy && need_pump) |
234 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 234 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
235 | 235 | ||
236 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 236 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
237 | return ret; | 237 | return ret; |
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine, | |||
284 | 284 | ||
285 | req->base.complete(&req->base, err); | 285 | req->base.complete(&req->base, err); |
286 | 286 | ||
287 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 287 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
288 | } | 288 | } |
289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); | 289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); |
290 | 290 | ||
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine, | |||
321 | 321 | ||
322 | req->base.complete(&req->base, err); | 322 | req->base.complete(&req->base, err); |
323 | 323 | ||
324 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 324 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
325 | } | 325 | } |
326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); | 326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
327 | 327 | ||
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine) | |||
345 | engine->running = true; | 345 | engine->running = true; |
346 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 346 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
347 | 347 | ||
348 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 348 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
349 | 349 | ||
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
@@ -422,11 +422,8 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); | 422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); |
423 | spin_lock_init(&engine->queue_lock); | 423 | spin_lock_init(&engine->queue_lock); |
424 | 424 | ||
425 | kthread_init_worker(&engine->kworker); | 425 | engine->kworker = kthread_create_worker(0, "%s", engine->name); |
426 | engine->kworker_task = kthread_run(kthread_worker_fn, | 426 | if (IS_ERR(engine->kworker)) { |
427 | &engine->kworker, "%s", | ||
428 | engine->name); | ||
429 | if (IS_ERR(engine->kworker_task)) { | ||
430 | dev_err(dev, "failed to create crypto request pump task\n"); | 427 | dev_err(dev, "failed to create crypto request pump task\n"); |
431 | return NULL; | 428 | return NULL; |
432 | } | 429 | } |
@@ -434,7 +431,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
434 | 431 | ||
435 | if (engine->rt) { | 432 | if (engine->rt) { |
436 | dev_info(dev, "will run requests pump with realtime priority\n"); | 433 | dev_info(dev, "will run requests pump with realtime priority\n"); |
437 | sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m); | 434 | sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); |
438 | } | 435 | } |
439 | 436 | ||
440 | return engine; | 437 | return engine; |
@@ -455,8 +452,7 @@ int crypto_engine_exit(struct crypto_engine *engine) | |||
455 | if (ret) | 452 | if (ret) |
456 | return ret; | 453 | return ret; |
457 | 454 | ||
458 | kthread_flush_worker(&engine->kworker); | 455 | kthread_destroy_worker(engine->kworker); |
459 | kthread_stop(engine->kworker_task); | ||
460 | 456 | ||
461 | return 0; | 457 | return 0; |
462 | } | 458 | } |