diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 14:04:34 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 14:04:34 -0400 |
| commit | 562f477a54478002ddfbb5b85627c009ca41e71d (patch) | |
| tree | 52384cc554ae64cc7a26878d64d606f40fd703ce /crypto/cryptd.c | |
| parent | ada19a31a90b4f46c040c25ef4ef8ffc203c7fc6 (diff) | |
| parent | 949abe574739848b1e68271fbac86c3cb4506aad (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (29 commits)
crypto: sha512-s390 - Add missing block size
hwrng: timeriomem - Breaks an allyesconfig build on s390:
nlattr: Fix build error with NET off
crypto: testmgr - add zlib test
crypto: zlib - New zlib crypto module, using pcomp
crypto: testmgr - Add support for the pcomp interface
crypto: compress - Add pcomp interface
netlink: Move netlink attribute parsing support to lib
crypto: Fix dead links
hwrng: timeriomem - New driver
crypto: chainiv - Use kcrypto_wq instead of keventd_wq
crypto: cryptd - Per-CPU thread implementation based on kcrypto_wq
crypto: api - Use dedicated workqueue for crypto subsystem
crypto: testmgr - Test skciphers with no IVs
crypto: aead - Avoid infinite loop when nivaead fails selftest
crypto: skcipher - Avoid infinite loop when cipher fails selftest
crypto: api - Fix crypto_alloc_tfm/create_create_tfm return convention
crypto: api - crypto_alg_mod_lookup either tested or untested
crypto: amcc - Add crypt4xx driver
crypto: ansi_cprng - Add maintainer
...
Diffstat (limited to 'crypto/cryptd.c')
| -rw-r--r-- | crypto/cryptd.c | 237 |
1 files changed, 129 insertions, 108 deletions
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index d29e06b350ff..d14b22658d7a 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
| @@ -12,30 +12,31 @@ | |||
| 12 | 12 | ||
| 13 | #include <crypto/algapi.h> | 13 | #include <crypto/algapi.h> |
| 14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
| 15 | #include <crypto/cryptd.h> | ||
| 16 | #include <crypto/crypto_wq.h> | ||
| 15 | #include <linux/err.h> | 17 | #include <linux/err.h> |
| 16 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 17 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
| 18 | #include <linux/kthread.h> | ||
| 19 | #include <linux/list.h> | 20 | #include <linux/list.h> |
| 20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 21 | #include <linux/mutex.h> | ||
| 22 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
| 23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
| 24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 25 | #include <linux/spinlock.h> | ||
| 26 | 25 | ||
| 27 | #define CRYPTD_MAX_QLEN 100 | 26 | #define CRYPTD_MAX_CPU_QLEN 100 |
| 28 | 27 | ||
| 29 | struct cryptd_state { | 28 | struct cryptd_cpu_queue { |
| 30 | spinlock_t lock; | ||
| 31 | struct mutex mutex; | ||
| 32 | struct crypto_queue queue; | 29 | struct crypto_queue queue; |
| 33 | struct task_struct *task; | 30 | struct work_struct work; |
| 31 | }; | ||
| 32 | |||
| 33 | struct cryptd_queue { | ||
| 34 | struct cryptd_cpu_queue *cpu_queue; | ||
| 34 | }; | 35 | }; |
| 35 | 36 | ||
| 36 | struct cryptd_instance_ctx { | 37 | struct cryptd_instance_ctx { |
| 37 | struct crypto_spawn spawn; | 38 | struct crypto_spawn spawn; |
| 38 | struct cryptd_state *state; | 39 | struct cryptd_queue *queue; |
| 39 | }; | 40 | }; |
| 40 | 41 | ||
| 41 | struct cryptd_blkcipher_ctx { | 42 | struct cryptd_blkcipher_ctx { |
| @@ -54,11 +55,85 @@ struct cryptd_hash_request_ctx { | |||
| 54 | crypto_completion_t complete; | 55 | crypto_completion_t complete; |
| 55 | }; | 56 | }; |
| 56 | 57 | ||
| 57 | static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) | 58 | static void cryptd_queue_worker(struct work_struct *work); |
| 59 | |||
| 60 | static int cryptd_init_queue(struct cryptd_queue *queue, | ||
| 61 | unsigned int max_cpu_qlen) | ||
| 62 | { | ||
| 63 | int cpu; | ||
| 64 | struct cryptd_cpu_queue *cpu_queue; | ||
| 65 | |||
| 66 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | ||
| 67 | if (!queue->cpu_queue) | ||
| 68 | return -ENOMEM; | ||
| 69 | for_each_possible_cpu(cpu) { | ||
| 70 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | ||
| 71 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | ||
| 72 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | ||
| 73 | } | ||
| 74 | return 0; | ||
| 75 | } | ||
| 76 | |||
| 77 | static void cryptd_fini_queue(struct cryptd_queue *queue) | ||
| 78 | { | ||
| 79 | int cpu; | ||
| 80 | struct cryptd_cpu_queue *cpu_queue; | ||
| 81 | |||
| 82 | for_each_possible_cpu(cpu) { | ||
| 83 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | ||
| 84 | BUG_ON(cpu_queue->queue.qlen); | ||
| 85 | } | ||
| 86 | free_percpu(queue->cpu_queue); | ||
| 87 | } | ||
| 88 | |||
| 89 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | ||
| 90 | struct crypto_async_request *request) | ||
| 91 | { | ||
| 92 | int cpu, err; | ||
| 93 | struct cryptd_cpu_queue *cpu_queue; | ||
| 94 | |||
| 95 | cpu = get_cpu(); | ||
| 96 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | ||
| 97 | err = crypto_enqueue_request(&cpu_queue->queue, request); | ||
| 98 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | ||
| 99 | put_cpu(); | ||
| 100 | |||
| 101 | return err; | ||
| 102 | } | ||
| 103 | |||
| 104 | /* Called in workqueue context, do one real cryption work (via | ||
| 105 | * req->complete) and reschedule itself if there are more work to | ||
| 106 | * do. */ | ||
| 107 | static void cryptd_queue_worker(struct work_struct *work) | ||
| 108 | { | ||
| 109 | struct cryptd_cpu_queue *cpu_queue; | ||
| 110 | struct crypto_async_request *req, *backlog; | ||
| 111 | |||
| 112 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | ||
| 113 | /* Only handle one request at a time to avoid hogging crypto | ||
| 114 | * workqueue. preempt_disable/enable is used to prevent | ||
| 115 | * being preempted by cryptd_enqueue_request() */ | ||
| 116 | preempt_disable(); | ||
| 117 | backlog = crypto_get_backlog(&cpu_queue->queue); | ||
| 118 | req = crypto_dequeue_request(&cpu_queue->queue); | ||
| 119 | preempt_enable(); | ||
| 120 | |||
| 121 | if (!req) | ||
| 122 | return; | ||
| 123 | |||
| 124 | if (backlog) | ||
| 125 | backlog->complete(backlog, -EINPROGRESS); | ||
| 126 | req->complete(req, 0); | ||
| 127 | |||
| 128 | if (cpu_queue->queue.qlen) | ||
| 129 | queue_work(kcrypto_wq, &cpu_queue->work); | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | ||
| 58 | { | 133 | { |
| 59 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 134 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
| 60 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 135 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
| 61 | return ictx->state; | 136 | return ictx->queue; |
| 62 | } | 137 | } |
| 63 | 138 | ||
| 64 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | 139 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
| @@ -130,19 +205,13 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |||
| 130 | { | 205 | { |
| 131 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | 206 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); |
| 132 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 207 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 133 | struct cryptd_state *state = | 208 | struct cryptd_queue *queue; |
| 134 | cryptd_get_state(crypto_ablkcipher_tfm(tfm)); | ||
| 135 | int err; | ||
| 136 | 209 | ||
| 210 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); | ||
| 137 | rctx->complete = req->base.complete; | 211 | rctx->complete = req->base.complete; |
| 138 | req->base.complete = complete; | 212 | req->base.complete = complete; |
| 139 | 213 | ||
| 140 | spin_lock_bh(&state->lock); | 214 | return cryptd_enqueue_request(queue, &req->base); |
| 141 | err = ablkcipher_enqueue_request(&state->queue, req); | ||
| 142 | spin_unlock_bh(&state->lock); | ||
| 143 | |||
| 144 | wake_up_process(state->task); | ||
| 145 | return err; | ||
| 146 | } | 215 | } |
| 147 | 216 | ||
| 148 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | 217 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) |
| @@ -176,21 +245,12 @@ static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |||
| 176 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | 245 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) |
| 177 | { | 246 | { |
| 178 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 247 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
| 179 | struct cryptd_state *state = cryptd_get_state(tfm); | ||
| 180 | int active; | ||
| 181 | |||
| 182 | mutex_lock(&state->mutex); | ||
| 183 | active = ablkcipher_tfm_in_queue(&state->queue, | ||
| 184 | __crypto_ablkcipher_cast(tfm)); | ||
| 185 | mutex_unlock(&state->mutex); | ||
| 186 | |||
| 187 | BUG_ON(active); | ||
| 188 | 248 | ||
| 189 | crypto_free_blkcipher(ctx->child); | 249 | crypto_free_blkcipher(ctx->child); |
| 190 | } | 250 | } |
| 191 | 251 | ||
| 192 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | 252 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, |
| 193 | struct cryptd_state *state) | 253 | struct cryptd_queue *queue) |
| 194 | { | 254 | { |
| 195 | struct crypto_instance *inst; | 255 | struct crypto_instance *inst; |
| 196 | struct cryptd_instance_ctx *ctx; | 256 | struct cryptd_instance_ctx *ctx; |
| @@ -213,7 +273,7 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |||
| 213 | if (err) | 273 | if (err) |
| 214 | goto out_free_inst; | 274 | goto out_free_inst; |
| 215 | 275 | ||
| 216 | ctx->state = state; | 276 | ctx->queue = queue; |
| 217 | 277 | ||
| 218 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 278 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
| 219 | 279 | ||
| @@ -231,7 +291,7 @@ out_free_inst: | |||
| 231 | } | 291 | } |
| 232 | 292 | ||
| 233 | static struct crypto_instance *cryptd_alloc_blkcipher( | 293 | static struct crypto_instance *cryptd_alloc_blkcipher( |
| 234 | struct rtattr **tb, struct cryptd_state *state) | 294 | struct rtattr **tb, struct cryptd_queue *queue) |
| 235 | { | 295 | { |
| 236 | struct crypto_instance *inst; | 296 | struct crypto_instance *inst; |
| 237 | struct crypto_alg *alg; | 297 | struct crypto_alg *alg; |
| @@ -241,7 +301,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher( | |||
| 241 | if (IS_ERR(alg)) | 301 | if (IS_ERR(alg)) |
| 242 | return ERR_CAST(alg); | 302 | return ERR_CAST(alg); |
| 243 | 303 | ||
| 244 | inst = cryptd_alloc_instance(alg, state); | 304 | inst = cryptd_alloc_instance(alg, queue); |
| 245 | if (IS_ERR(inst)) | 305 | if (IS_ERR(inst)) |
| 246 | goto out_put_alg; | 306 | goto out_put_alg; |
| 247 | 307 | ||
| @@ -289,15 +349,6 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | |||
| 289 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | 349 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) |
| 290 | { | 350 | { |
| 291 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 351 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
| 292 | struct cryptd_state *state = cryptd_get_state(tfm); | ||
| 293 | int active; | ||
| 294 | |||
| 295 | mutex_lock(&state->mutex); | ||
| 296 | active = ahash_tfm_in_queue(&state->queue, | ||
| 297 | __crypto_ahash_cast(tfm)); | ||
| 298 | mutex_unlock(&state->mutex); | ||
| 299 | |||
| 300 | BUG_ON(active); | ||
| 301 | 352 | ||
| 302 | crypto_free_hash(ctx->child); | 353 | crypto_free_hash(ctx->child); |
| 303 | } | 354 | } |
| @@ -323,19 +374,13 @@ static int cryptd_hash_enqueue(struct ahash_request *req, | |||
| 323 | { | 374 | { |
| 324 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 375 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
| 325 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 376 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 326 | struct cryptd_state *state = | 377 | struct cryptd_queue *queue = |
| 327 | cryptd_get_state(crypto_ahash_tfm(tfm)); | 378 | cryptd_get_queue(crypto_ahash_tfm(tfm)); |
| 328 | int err; | ||
| 329 | 379 | ||
| 330 | rctx->complete = req->base.complete; | 380 | rctx->complete = req->base.complete; |
| 331 | req->base.complete = complete; | 381 | req->base.complete = complete; |
| 332 | 382 | ||
| 333 | spin_lock_bh(&state->lock); | 383 | return cryptd_enqueue_request(queue, &req->base); |
| 334 | err = ahash_enqueue_request(&state->queue, req); | ||
| 335 | spin_unlock_bh(&state->lock); | ||
| 336 | |||
| 337 | wake_up_process(state->task); | ||
| 338 | return err; | ||
| 339 | } | 384 | } |
| 340 | 385 | ||
| 341 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | 386 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
| @@ -468,7 +513,7 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |||
| 468 | } | 513 | } |
| 469 | 514 | ||
| 470 | static struct crypto_instance *cryptd_alloc_hash( | 515 | static struct crypto_instance *cryptd_alloc_hash( |
| 471 | struct rtattr **tb, struct cryptd_state *state) | 516 | struct rtattr **tb, struct cryptd_queue *queue) |
| 472 | { | 517 | { |
| 473 | struct crypto_instance *inst; | 518 | struct crypto_instance *inst; |
| 474 | struct crypto_alg *alg; | 519 | struct crypto_alg *alg; |
| @@ -478,7 +523,7 @@ static struct crypto_instance *cryptd_alloc_hash( | |||
| 478 | if (IS_ERR(alg)) | 523 | if (IS_ERR(alg)) |
| 479 | return ERR_PTR(PTR_ERR(alg)); | 524 | return ERR_PTR(PTR_ERR(alg)); |
| 480 | 525 | ||
| 481 | inst = cryptd_alloc_instance(alg, state); | 526 | inst = cryptd_alloc_instance(alg, queue); |
| 482 | if (IS_ERR(inst)) | 527 | if (IS_ERR(inst)) |
| 483 | goto out_put_alg; | 528 | goto out_put_alg; |
| 484 | 529 | ||
| @@ -502,7 +547,7 @@ out_put_alg: | |||
| 502 | return inst; | 547 | return inst; |
| 503 | } | 548 | } |
| 504 | 549 | ||
| 505 | static struct cryptd_state state; | 550 | static struct cryptd_queue queue; |
| 506 | 551 | ||
| 507 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | 552 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) |
| 508 | { | 553 | { |
| @@ -514,9 +559,9 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | |||
| 514 | 559 | ||
| 515 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 560 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
| 516 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 561 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
| 517 | return cryptd_alloc_blkcipher(tb, &state); | 562 | return cryptd_alloc_blkcipher(tb, &queue); |
| 518 | case CRYPTO_ALG_TYPE_DIGEST: | 563 | case CRYPTO_ALG_TYPE_DIGEST: |
| 519 | return cryptd_alloc_hash(tb, &state); | 564 | return cryptd_alloc_hash(tb, &queue); |
| 520 | } | 565 | } |
| 521 | 566 | ||
| 522 | return ERR_PTR(-EINVAL); | 567 | return ERR_PTR(-EINVAL); |
| @@ -537,82 +582,58 @@ static struct crypto_template cryptd_tmpl = { | |||
| 537 | .module = THIS_MODULE, | 582 | .module = THIS_MODULE, |
| 538 | }; | 583 | }; |
| 539 | 584 | ||
| 540 | static inline int cryptd_create_thread(struct cryptd_state *state, | 585 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
| 541 | int (*fn)(void *data), const char *name) | 586 | u32 type, u32 mask) |
| 542 | { | 587 | { |
| 543 | spin_lock_init(&state->lock); | 588 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; |
| 544 | mutex_init(&state->mutex); | 589 | struct crypto_ablkcipher *tfm; |
| 545 | crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); | 590 | |
| 546 | 591 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
| 547 | state->task = kthread_run(fn, state, name); | 592 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
| 548 | if (IS_ERR(state->task)) | 593 | return ERR_PTR(-EINVAL); |
| 549 | return PTR_ERR(state->task); | 594 | tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask); |
| 595 | if (IS_ERR(tfm)) | ||
| 596 | return ERR_CAST(tfm); | ||
| 597 | if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) { | ||
| 598 | crypto_free_ablkcipher(tfm); | ||
| 599 | return ERR_PTR(-EINVAL); | ||
| 600 | } | ||
| 550 | 601 | ||
| 551 | return 0; | 602 | return __cryptd_ablkcipher_cast(tfm); |
| 552 | } | 603 | } |
| 604 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | ||
| 553 | 605 | ||
| 554 | static inline void cryptd_stop_thread(struct cryptd_state *state) | 606 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) |
| 555 | { | 607 | { |
| 556 | BUG_ON(state->queue.qlen); | 608 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
| 557 | kthread_stop(state->task); | 609 | return ctx->child; |
| 558 | } | 610 | } |
| 611 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | ||
| 559 | 612 | ||
| 560 | static int cryptd_thread(void *data) | 613 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
| 561 | { | 614 | { |
| 562 | struct cryptd_state *state = data; | 615 | crypto_free_ablkcipher(&tfm->base); |
| 563 | int stop; | ||
| 564 | |||
| 565 | current->flags |= PF_NOFREEZE; | ||
| 566 | |||
| 567 | do { | ||
| 568 | struct crypto_async_request *req, *backlog; | ||
| 569 | |||
| 570 | mutex_lock(&state->mutex); | ||
| 571 | __set_current_state(TASK_INTERRUPTIBLE); | ||
| 572 | |||
| 573 | spin_lock_bh(&state->lock); | ||
| 574 | backlog = crypto_get_backlog(&state->queue); | ||
| 575 | req = crypto_dequeue_request(&state->queue); | ||
| 576 | spin_unlock_bh(&state->lock); | ||
| 577 | |||
| 578 | stop = kthread_should_stop(); | ||
| 579 | |||
| 580 | if (stop || req) { | ||
| 581 | __set_current_state(TASK_RUNNING); | ||
| 582 | if (req) { | ||
| 583 | if (backlog) | ||
| 584 | backlog->complete(backlog, | ||
| 585 | -EINPROGRESS); | ||
| 586 | req->complete(req, 0); | ||
| 587 | } | ||
| 588 | } | ||
| 589 | |||
| 590 | mutex_unlock(&state->mutex); | ||
| 591 | |||
| 592 | schedule(); | ||
| 593 | } while (!stop); | ||
| 594 | |||
| 595 | return 0; | ||
| 596 | } | 616 | } |
| 617 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | ||
| 597 | 618 | ||
| 598 | static int __init cryptd_init(void) | 619 | static int __init cryptd_init(void) |
| 599 | { | 620 | { |
| 600 | int err; | 621 | int err; |
| 601 | 622 | ||
| 602 | err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); | 623 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
| 603 | if (err) | 624 | if (err) |
| 604 | return err; | 625 | return err; |
| 605 | 626 | ||
| 606 | err = crypto_register_template(&cryptd_tmpl); | 627 | err = crypto_register_template(&cryptd_tmpl); |
| 607 | if (err) | 628 | if (err) |
| 608 | kthread_stop(state.task); | 629 | cryptd_fini_queue(&queue); |
| 609 | 630 | ||
| 610 | return err; | 631 | return err; |
| 611 | } | 632 | } |
| 612 | 633 | ||
| 613 | static void __exit cryptd_exit(void) | 634 | static void __exit cryptd_exit(void) |
| 614 | { | 635 | { |
| 615 | cryptd_stop_thread(&state); | 636 | cryptd_fini_queue(&queue); |
| 616 | crypto_unregister_template(&cryptd_tmpl); | 637 | crypto_unregister_template(&cryptd_tmpl); |
| 617 | } | 638 | } |
| 618 | 639 | ||
