diff options
77 files changed, 3852 insertions, 1654 deletions
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl index fb2a1526f6ec..088b79c341ff 100644 --- a/Documentation/DocBook/crypto-API.tmpl +++ b/Documentation/DocBook/crypto-API.tmpl | |||
| @@ -797,7 +797,8 @@ kernel crypto API | Caller | |||
| 797 | include/linux/crypto.h and their definition can be seen below. | 797 | include/linux/crypto.h and their definition can be seen below. |
| 798 | The former function registers a single transformation, while | 798 | The former function registers a single transformation, while |
| 799 | the latter works on an array of transformation descriptions. | 799 | the latter works on an array of transformation descriptions. |
| 800 | The latter is useful when registering transformations in bulk. | 800 | The latter is useful when registering transformations in bulk, |
| 801 | for example when a driver implements multiple transformations. | ||
| 801 | </para> | 802 | </para> |
| 802 | 803 | ||
| 803 | <programlisting> | 804 | <programlisting> |
| @@ -822,18 +823,31 @@ kernel crypto API | Caller | |||
| 822 | </para> | 823 | </para> |
| 823 | 824 | ||
| 824 | <para> | 825 | <para> |
| 825 | The bulk registration / unregistration functions require | 826 | The bulk registration/unregistration functions |
| 826 | that struct crypto_alg is an array of count size. These | 827 | register/unregister each transformation in the given array of |
| 827 | functions simply loop over that array and register / | 828 | length count. They handle errors as follows: |
| 828 | unregister each individual algorithm. If an error occurs, | ||
| 829 | the loop is terminated at the offending algorithm definition. | ||
| 830 | That means, the algorithms prior to the offending algorithm | ||
| 831 | are successfully registered. Note, the caller has no way of | ||
| 832 | knowing which cipher implementations have successfully | ||
| 833 | registered. If this is important to know, the caller should | ||
| 834 | loop through the different implementations using the single | ||
| 835 | instance *_alg functions for each individual implementation. | ||
| 836 | </para> | 829 | </para> |
| 830 | <itemizedlist> | ||
| 831 | <listitem> | ||
| 832 | <para> | ||
| 833 | crypto_register_algs() succeeds if and only if it | ||
| 834 | successfully registers all the given transformations. If an | ||
| 835 | error occurs partway through, then it rolls back successful | ||
| 836 | registrations before returning the error code. Note that if | ||
| 837 | a driver needs to handle registration errors for individual | ||
| 838 | transformations, then it will need to use the non-bulk | ||
| 839 | function crypto_register_alg() instead. | ||
| 840 | </para> | ||
| 841 | </listitem> | ||
| 842 | <listitem> | ||
| 843 | <para> | ||
| 844 | crypto_unregister_algs() tries to unregister all the given | ||
| 845 | transformations, continuing on error. It logs errors and | ||
| 846 | always returns zero. | ||
| 847 | </para> | ||
| 848 | </listitem> | ||
| 849 | </itemizedlist> | ||
| 850 | |||
| 837 | </sect1> | 851 | </sect1> |
| 838 | 852 | ||
| 839 | <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title> | 853 | <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title> |
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 1568cb5cd870..7546b3c02466 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c | |||
| @@ -138,7 +138,7 @@ static struct shash_alg ghash_alg = { | |||
| 138 | .setkey = ghash_setkey, | 138 | .setkey = ghash_setkey, |
| 139 | .descsize = sizeof(struct ghash_desc_ctx), | 139 | .descsize = sizeof(struct ghash_desc_ctx), |
| 140 | .base = { | 140 | .base = { |
| 141 | .cra_name = "ghash", | 141 | .cra_name = "__ghash", |
| 142 | .cra_driver_name = "__driver-ghash-ce", | 142 | .cra_driver_name = "__driver-ghash-ce", |
| 143 | .cra_priority = 0, | 143 | .cra_priority = 0, |
| 144 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL, | 144 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL, |
| @@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req) | |||
| 220 | } | 220 | } |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static int ghash_async_import(struct ahash_request *req, const void *in) | ||
| 224 | { | ||
| 225 | struct ahash_request *cryptd_req = ahash_request_ctx(req); | ||
| 226 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
| 227 | struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); | ||
| 228 | struct shash_desc *desc = cryptd_shash_desc(cryptd_req); | ||
| 229 | |||
| 230 | desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm); | ||
| 231 | desc->flags = req->base.flags; | ||
| 232 | |||
| 233 | return crypto_shash_import(desc, in); | ||
| 234 | } | ||
| 235 | |||
| 236 | static int ghash_async_export(struct ahash_request *req, void *out) | ||
| 237 | { | ||
| 238 | struct ahash_request *cryptd_req = ahash_request_ctx(req); | ||
| 239 | struct shash_desc *desc = cryptd_shash_desc(cryptd_req); | ||
| 240 | |||
| 241 | return crypto_shash_export(desc, out); | ||
| 242 | } | ||
| 243 | |||
| 223 | static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, | 244 | static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, |
| 224 | unsigned int keylen) | 245 | unsigned int keylen) |
| 225 | { | 246 | { |
| @@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = { | |||
| 268 | .final = ghash_async_final, | 289 | .final = ghash_async_final, |
| 269 | .setkey = ghash_async_setkey, | 290 | .setkey = ghash_async_setkey, |
| 270 | .digest = ghash_async_digest, | 291 | .digest = ghash_async_digest, |
| 292 | .import = ghash_async_import, | ||
| 293 | .export = ghash_async_export, | ||
| 271 | .halg.digestsize = GHASH_DIGEST_SIZE, | 294 | .halg.digestsize = GHASH_DIGEST_SIZE, |
| 295 | .halg.statesize = sizeof(struct ghash_desc_ctx), | ||
| 272 | .halg.base = { | 296 | .halg.base = { |
| 273 | .cra_name = "ghash", | 297 | .cra_name = "ghash", |
| 274 | .cra_driver_name = "ghash-ce", | 298 | .cra_driver_name = "ghash-ce", |
diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S index dcd01f3f0bb0..2468fade49cf 100644 --- a/arch/arm/crypto/sha1-armv7-neon.S +++ b/arch/arm/crypto/sha1-armv7-neon.S | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <asm/assembler.h> | 12 | #include <asm/assembler.h> |
| 13 | 13 | ||
| 14 | .syntax unified | 14 | .syntax unified |
| 15 | .code 32 | ||
| 16 | .fpu neon | 15 | .fpu neon |
| 17 | 16 | ||
| 18 | .text | 17 | .text |
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S index 125e16520061..82ddc9bdfeb1 100644 --- a/arch/powerpc/crypto/sha1-powerpc-asm.S +++ b/arch/powerpc/crypto/sha1-powerpc-asm.S | |||
| @@ -7,6 +7,15 @@ | |||
| 7 | #include <asm/ppc_asm.h> | 7 | #include <asm/ppc_asm.h> |
| 8 | #include <asm/asm-offsets.h> | 8 | #include <asm/asm-offsets.h> |
| 9 | 9 | ||
| 10 | #ifdef __BIG_ENDIAN__ | ||
| 11 | #define LWZ(rt, d, ra) \ | ||
| 12 | lwz rt,d(ra) | ||
| 13 | #else | ||
| 14 | #define LWZ(rt, d, ra) \ | ||
| 15 | li rt,d; \ | ||
| 16 | lwbrx rt,rt,ra | ||
| 17 | #endif | ||
| 18 | |||
| 10 | /* | 19 | /* |
| 11 | * We roll the registers for T, A, B, C, D, E around on each | 20 | * We roll the registers for T, A, B, C, D, E around on each |
| 12 | * iteration; T on iteration t is A on iteration t+1, and so on. | 21 | * iteration; T on iteration t is A on iteration t+1, and so on. |
| @@ -23,7 +32,7 @@ | |||
| 23 | #define W(t) (((t)%16)+16) | 32 | #define W(t) (((t)%16)+16) |
| 24 | 33 | ||
| 25 | #define LOADW(t) \ | 34 | #define LOADW(t) \ |
| 26 | lwz W(t),(t)*4(r4) | 35 | LWZ(W(t),(t)*4,r4) |
| 27 | 36 | ||
| 28 | #define STEPD0_LOAD(t) \ | 37 | #define STEPD0_LOAD(t) \ |
| 29 | andc r0,RD(t),RB(t); \ | 38 | andc r0,RD(t),RB(t); \ |
| @@ -33,7 +42,7 @@ | |||
| 33 | add r0,RE(t),r15; \ | 42 | add r0,RE(t),r15; \ |
| 34 | add RT(t),RT(t),r6; \ | 43 | add RT(t),RT(t),r6; \ |
| 35 | add r14,r0,W(t); \ | 44 | add r14,r0,W(t); \ |
| 36 | lwz W((t)+4),((t)+4)*4(r4); \ | 45 | LWZ(W((t)+4),((t)+4)*4,r4); \ |
| 37 | rotlwi RB(t),RB(t),30; \ | 46 | rotlwi RB(t),RB(t),30; \ |
| 38 | add RT(t),RT(t),r14 | 47 | add RT(t),RT(t),r14 |
| 39 | 48 | ||
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 68a5ceaa04c8..2d8466f9e49b 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c | |||
| @@ -39,6 +39,37 @@ struct algif_hash_tfm { | |||
| 39 | bool has_key; | 39 | bool has_key; |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) | ||
| 43 | { | ||
| 44 | unsigned ds; | ||
| 45 | |||
| 46 | if (ctx->result) | ||
| 47 | return 0; | ||
| 48 | |||
| 49 | ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); | ||
| 50 | |||
| 51 | ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); | ||
| 52 | if (!ctx->result) | ||
| 53 | return -ENOMEM; | ||
| 54 | |||
| 55 | memset(ctx->result, 0, ds); | ||
| 56 | |||
| 57 | return 0; | ||
| 58 | } | ||
| 59 | |||
| 60 | static void hash_free_result(struct sock *sk, struct hash_ctx *ctx) | ||
| 61 | { | ||
| 62 | unsigned ds; | ||
| 63 | |||
| 64 | if (!ctx->result) | ||
| 65 | return; | ||
| 66 | |||
| 67 | ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); | ||
| 68 | |||
| 69 | sock_kzfree_s(sk, ctx->result, ds); | ||
| 70 | ctx->result = NULL; | ||
| 71 | } | ||
| 72 | |||
| 42 | static int hash_sendmsg(struct socket *sock, struct msghdr *msg, | 73 | static int hash_sendmsg(struct socket *sock, struct msghdr *msg, |
| 43 | size_t ignored) | 74 | size_t ignored) |
| 44 | { | 75 | { |
| @@ -54,6 +85,9 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, | |||
| 54 | 85 | ||
| 55 | lock_sock(sk); | 86 | lock_sock(sk); |
| 56 | if (!ctx->more) { | 87 | if (!ctx->more) { |
| 88 | if ((msg->msg_flags & MSG_MORE)) | ||
| 89 | hash_free_result(sk, ctx); | ||
| 90 | |||
| 57 | err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), | 91 | err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), |
| 58 | &ctx->completion); | 92 | &ctx->completion); |
| 59 | if (err) | 93 | if (err) |
| @@ -90,6 +124,10 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, | |||
| 90 | 124 | ||
| 91 | ctx->more = msg->msg_flags & MSG_MORE; | 125 | ctx->more = msg->msg_flags & MSG_MORE; |
| 92 | if (!ctx->more) { | 126 | if (!ctx->more) { |
| 127 | err = hash_alloc_result(sk, ctx); | ||
| 128 | if (err) | ||
| 129 | goto unlock; | ||
| 130 | |||
| 93 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); | 131 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); |
| 94 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), | 132 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), |
| 95 | &ctx->completion); | 133 | &ctx->completion); |
| @@ -116,6 +154,13 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, | |||
| 116 | sg_init_table(ctx->sgl.sg, 1); | 154 | sg_init_table(ctx->sgl.sg, 1); |
| 117 | sg_set_page(ctx->sgl.sg, page, size, offset); | 155 | sg_set_page(ctx->sgl.sg, page, size, offset); |
| 118 | 156 | ||
| 157 | if (!(flags & MSG_MORE)) { | ||
| 158 | err = hash_alloc_result(sk, ctx); | ||
| 159 | if (err) | ||
| 160 | goto unlock; | ||
| 161 | } else if (!ctx->more) | ||
| 162 | hash_free_result(sk, ctx); | ||
| 163 | |||
| 119 | ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); | 164 | ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); |
| 120 | 165 | ||
| 121 | if (!(flags & MSG_MORE)) { | 166 | if (!(flags & MSG_MORE)) { |
| @@ -153,6 +198,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
| 153 | struct alg_sock *ask = alg_sk(sk); | 198 | struct alg_sock *ask = alg_sk(sk); |
| 154 | struct hash_ctx *ctx = ask->private; | 199 | struct hash_ctx *ctx = ask->private; |
| 155 | unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); | 200 | unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); |
| 201 | bool result; | ||
| 156 | int err; | 202 | int err; |
| 157 | 203 | ||
| 158 | if (len > ds) | 204 | if (len > ds) |
| @@ -161,17 +207,29 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
| 161 | msg->msg_flags |= MSG_TRUNC; | 207 | msg->msg_flags |= MSG_TRUNC; |
| 162 | 208 | ||
| 163 | lock_sock(sk); | 209 | lock_sock(sk); |
| 210 | result = ctx->result; | ||
| 211 | err = hash_alloc_result(sk, ctx); | ||
| 212 | if (err) | ||
| 213 | goto unlock; | ||
| 214 | |||
| 215 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); | ||
| 216 | |||
| 164 | if (ctx->more) { | 217 | if (ctx->more) { |
| 165 | ctx->more = 0; | 218 | ctx->more = 0; |
| 166 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); | ||
| 167 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), | 219 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), |
| 168 | &ctx->completion); | 220 | &ctx->completion); |
| 169 | if (err) | 221 | if (err) |
| 170 | goto unlock; | 222 | goto unlock; |
| 223 | } else if (!result) { | ||
| 224 | err = af_alg_wait_for_completion( | ||
| 225 | crypto_ahash_digest(&ctx->req), | ||
| 226 | &ctx->completion); | ||
| 171 | } | 227 | } |
| 172 | 228 | ||
| 173 | err = memcpy_to_msg(msg, ctx->result, len); | 229 | err = memcpy_to_msg(msg, ctx->result, len); |
| 174 | 230 | ||
| 231 | hash_free_result(sk, ctx); | ||
| 232 | |||
| 175 | unlock: | 233 | unlock: |
| 176 | release_sock(sk); | 234 | release_sock(sk); |
| 177 | 235 | ||
| @@ -394,8 +452,7 @@ static void hash_sock_destruct(struct sock *sk) | |||
| 394 | struct alg_sock *ask = alg_sk(sk); | 452 | struct alg_sock *ask = alg_sk(sk); |
| 395 | struct hash_ctx *ctx = ask->private; | 453 | struct hash_ctx *ctx = ask->private; |
| 396 | 454 | ||
| 397 | sock_kzfree_s(sk, ctx->result, | 455 | hash_free_result(sk, ctx); |
| 398 | crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); | ||
| 399 | sock_kfree_s(sk, ctx, ctx->len); | 456 | sock_kfree_s(sk, ctx, ctx->len); |
| 400 | af_alg_release_parent(sk); | 457 | af_alg_release_parent(sk); |
| 401 | } | 458 | } |
| @@ -407,20 +464,12 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) | |||
| 407 | struct algif_hash_tfm *tfm = private; | 464 | struct algif_hash_tfm *tfm = private; |
| 408 | struct crypto_ahash *hash = tfm->hash; | 465 | struct crypto_ahash *hash = tfm->hash; |
| 409 | unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); | 466 | unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); |
| 410 | unsigned ds = crypto_ahash_digestsize(hash); | ||
| 411 | 467 | ||
| 412 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | 468 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); |
| 413 | if (!ctx) | 469 | if (!ctx) |
| 414 | return -ENOMEM; | 470 | return -ENOMEM; |
| 415 | 471 | ||
| 416 | ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); | 472 | ctx->result = NULL; |
| 417 | if (!ctx->result) { | ||
| 418 | sock_kfree_s(sk, ctx, len); | ||
| 419 | return -ENOMEM; | ||
| 420 | } | ||
| 421 | |||
| 422 | memset(ctx->result, 0, ds); | ||
| 423 | |||
| 424 | ctx->len = len; | 473 | ctx->len = len; |
| 425 | ctx->more = 0; | 474 | ctx->more = 0; |
| 426 | af_alg_init_completion(&ctx->completion); | 475 | af_alg_init_completion(&ctx->completion); |
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c index c1229614c7e3..8e94e29dc6fc 100644 --- a/crypto/crct10dif_generic.c +++ b/crypto/crct10dif_generic.c | |||
| @@ -107,10 +107,7 @@ static struct shash_alg alg = { | |||
| 107 | 107 | ||
| 108 | static int __init crct10dif_mod_init(void) | 108 | static int __init crct10dif_mod_init(void) |
| 109 | { | 109 | { |
| 110 | int ret; | 110 | return crypto_register_shash(&alg); |
| 111 | |||
| 112 | ret = crypto_register_shash(&alg); | ||
| 113 | return ret; | ||
| 114 | } | 111 | } |
| 115 | 112 | ||
| 116 | static void __exit crct10dif_mod_fini(void) | 113 | static void __exit crct10dif_mod_fini(void) |
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index a55c82dd48ef..bfb92ace2c91 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c | |||
| @@ -14,13 +14,12 @@ | |||
| 14 | 14 | ||
| 15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
| 16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
| 17 | #include <crypto/engine.h> | ||
| 18 | #include <crypto/internal/hash.h> | ||
| 17 | #include "internal.h" | 19 | #include "internal.h" |
| 18 | 20 | ||
| 19 | #define CRYPTO_ENGINE_MAX_QLEN 10 | 21 | #define CRYPTO_ENGINE_MAX_QLEN 10 |
| 20 | 22 | ||
| 21 | void crypto_finalize_request(struct crypto_engine *engine, | ||
| 22 | struct ablkcipher_request *req, int err); | ||
| 23 | |||
| 24 | /** | 23 | /** |
| 25 | * crypto_pump_requests - dequeue one request from engine queue to process | 24 | * crypto_pump_requests - dequeue one request from engine queue to process |
| 26 | * @engine: the hardware engine | 25 | * @engine: the hardware engine |
| @@ -34,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
| 34 | bool in_kthread) | 33 | bool in_kthread) |
| 35 | { | 34 | { |
| 36 | struct crypto_async_request *async_req, *backlog; | 35 | struct crypto_async_request *async_req, *backlog; |
| 37 | struct ablkcipher_request *req; | 36 | struct ahash_request *hreq; |
| 37 | struct ablkcipher_request *breq; | ||
| 38 | unsigned long flags; | 38 | unsigned long flags; |
| 39 | bool was_busy = false; | 39 | bool was_busy = false; |
| 40 | int ret; | 40 | int ret, rtype; |
| 41 | 41 | ||
| 42 | spin_lock_irqsave(&engine->queue_lock, flags); | 42 | spin_lock_irqsave(&engine->queue_lock, flags); |
| 43 | 43 | ||
| @@ -82,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
| 82 | if (!async_req) | 82 | if (!async_req) |
| 83 | goto out; | 83 | goto out; |
| 84 | 84 | ||
| 85 | req = ablkcipher_request_cast(async_req); | 85 | engine->cur_req = async_req; |
| 86 | |||
| 87 | engine->cur_req = req; | ||
| 88 | if (backlog) | 86 | if (backlog) |
| 89 | backlog->complete(backlog, -EINPROGRESS); | 87 | backlog->complete(backlog, -EINPROGRESS); |
| 90 | 88 | ||
| @@ -95,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
| 95 | 93 | ||
| 96 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 94 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
| 97 | 95 | ||
| 96 | rtype = crypto_tfm_alg_type(engine->cur_req->tfm); | ||
| 98 | /* Until here we get the request need to be encrypted successfully */ | 97 | /* Until here we get the request need to be encrypted successfully */ |
| 99 | if (!was_busy && engine->prepare_crypt_hardware) { | 98 | if (!was_busy && engine->prepare_crypt_hardware) { |
| 100 | ret = engine->prepare_crypt_hardware(engine); | 99 | ret = engine->prepare_crypt_hardware(engine); |
| @@ -104,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
| 104 | } | 103 | } |
| 105 | } | 104 | } |
| 106 | 105 | ||
| 107 | if (engine->prepare_request) { | 106 | switch (rtype) { |
| 108 | ret = engine->prepare_request(engine, engine->cur_req); | 107 | case CRYPTO_ALG_TYPE_AHASH: |
| 108 | hreq = ahash_request_cast(engine->cur_req); | ||
| 109 | if (engine->prepare_hash_request) { | ||
| 110 | ret = engine->prepare_hash_request(engine, hreq); | ||
| 111 | if (ret) { | ||
| 112 | pr_err("failed to prepare request: %d\n", ret); | ||
| 113 | goto req_err; | ||
| 114 | } | ||
| 115 | engine->cur_req_prepared = true; | ||
| 116 | } | ||
| 117 | ret = engine->hash_one_request(engine, hreq); | ||
| 109 | if (ret) { | 118 | if (ret) { |
| 110 | pr_err("failed to prepare request: %d\n", ret); | 119 | pr_err("failed to hash one request from queue\n"); |
| 111 | goto req_err; | 120 | goto req_err; |
| 112 | } | 121 | } |
| 113 | engine->cur_req_prepared = true; | 122 | return; |
| 114 | } | 123 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
| 115 | 124 | breq = ablkcipher_request_cast(engine->cur_req); | |
| 116 | ret = engine->crypt_one_request(engine, engine->cur_req); | 125 | if (engine->prepare_cipher_request) { |
| 117 | if (ret) { | 126 | ret = engine->prepare_cipher_request(engine, breq); |
| 118 | pr_err("failed to crypt one request from queue\n"); | 127 | if (ret) { |
| 119 | goto req_err; | 128 | pr_err("failed to prepare request: %d\n", ret); |
| 129 | goto req_err; | ||
| 130 | } | ||
| 131 | engine->cur_req_prepared = true; | ||
| 132 | } | ||
| 133 | ret = engine->cipher_one_request(engine, breq); | ||
| 134 | if (ret) { | ||
| 135 | pr_err("failed to cipher one request from queue\n"); | ||
| 136 | goto req_err; | ||
| 137 | } | ||
| 138 | return; | ||
| 139 | default: | ||
| 140 | pr_err("failed to prepare request of unknown type\n"); | ||
| 141 | return; | ||
| 120 | } | 142 | } |
| 121 | return; | ||
| 122 | 143 | ||
| 123 | req_err: | 144 | req_err: |
| 124 | crypto_finalize_request(engine, engine->cur_req, ret); | 145 | switch (rtype) { |
| 146 | case CRYPTO_ALG_TYPE_AHASH: | ||
| 147 | hreq = ahash_request_cast(engine->cur_req); | ||
| 148 | crypto_finalize_hash_request(engine, hreq, ret); | ||
| 149 | break; | ||
| 150 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
| 151 | breq = ablkcipher_request_cast(engine->cur_req); | ||
| 152 | crypto_finalize_cipher_request(engine, breq, ret); | ||
| 153 | break; | ||
| 154 | } | ||
| 125 | return; | 155 | return; |
| 126 | 156 | ||
| 127 | out: | 157 | out: |
| @@ -137,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work) | |||
| 137 | } | 167 | } |
| 138 | 168 | ||
| 139 | /** | 169 | /** |
| 140 | * crypto_transfer_request - transfer the new request into the engine queue | 170 | * crypto_transfer_cipher_request - transfer the new request into the |
| 171 | * enginequeue | ||
| 141 | * @engine: the hardware engine | 172 | * @engine: the hardware engine |
| 142 | * @req: the request need to be listed into the engine queue | 173 | * @req: the request need to be listed into the engine queue |
| 143 | */ | 174 | */ |
| 144 | int crypto_transfer_request(struct crypto_engine *engine, | 175 | int crypto_transfer_cipher_request(struct crypto_engine *engine, |
| 145 | struct ablkcipher_request *req, bool need_pump) | 176 | struct ablkcipher_request *req, |
| 177 | bool need_pump) | ||
| 146 | { | 178 | { |
| 147 | unsigned long flags; | 179 | unsigned long flags; |
| 148 | int ret; | 180 | int ret; |
| @@ -162,46 +194,125 @@ int crypto_transfer_request(struct crypto_engine *engine, | |||
| 162 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 194 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
| 163 | return ret; | 195 | return ret; |
| 164 | } | 196 | } |
| 165 | EXPORT_SYMBOL_GPL(crypto_transfer_request); | 197 | EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request); |
| 198 | |||
| 199 | /** | ||
| 200 | * crypto_transfer_cipher_request_to_engine - transfer one request to list | ||
| 201 | * into the engine queue | ||
| 202 | * @engine: the hardware engine | ||
| 203 | * @req: the request need to be listed into the engine queue | ||
| 204 | */ | ||
| 205 | int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, | ||
| 206 | struct ablkcipher_request *req) | ||
| 207 | { | ||
| 208 | return crypto_transfer_cipher_request(engine, req, true); | ||
| 209 | } | ||
| 210 | EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine); | ||
| 211 | |||
| 212 | /** | ||
| 213 | * crypto_transfer_hash_request - transfer the new request into the | ||
| 214 | * enginequeue | ||
| 215 | * @engine: the hardware engine | ||
| 216 | * @req: the request need to be listed into the engine queue | ||
| 217 | */ | ||
| 218 | int crypto_transfer_hash_request(struct crypto_engine *engine, | ||
| 219 | struct ahash_request *req, bool need_pump) | ||
| 220 | { | ||
| 221 | unsigned long flags; | ||
| 222 | int ret; | ||
| 223 | |||
| 224 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
| 225 | |||
| 226 | if (!engine->running) { | ||
| 227 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
| 228 | return -ESHUTDOWN; | ||
| 229 | } | ||
| 230 | |||
| 231 | ret = ahash_enqueue_request(&engine->queue, req); | ||
| 232 | |||
| 233 | if (!engine->busy && need_pump) | ||
| 234 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | ||
| 235 | |||
| 236 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
| 237 | return ret; | ||
| 238 | } | ||
| 239 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request); | ||
| 166 | 240 | ||
| 167 | /** | 241 | /** |
| 168 | * crypto_transfer_request_to_engine - transfer one request to list into the | 242 | * crypto_transfer_hash_request_to_engine - transfer one request to list |
| 169 | * engine queue | 243 | * into the engine queue |
| 170 | * @engine: the hardware engine | 244 | * @engine: the hardware engine |
| 171 | * @req: the request need to be listed into the engine queue | 245 | * @req: the request need to be listed into the engine queue |
| 172 | */ | 246 | */ |
| 173 | int crypto_transfer_request_to_engine(struct crypto_engine *engine, | 247 | int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, |
| 174 | struct ablkcipher_request *req) | 248 | struct ahash_request *req) |
| 175 | { | 249 | { |
| 176 | return crypto_transfer_request(engine, req, true); | 250 | return crypto_transfer_hash_request(engine, req, true); |
| 177 | } | 251 | } |
| 178 | EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine); | 252 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); |
| 179 | 253 | ||
| 180 | /** | 254 | /** |
| 181 | * crypto_finalize_request - finalize one request if the request is done | 255 | * crypto_finalize_cipher_request - finalize one request if the request is done |
| 182 | * @engine: the hardware engine | 256 | * @engine: the hardware engine |
| 183 | * @req: the request need to be finalized | 257 | * @req: the request need to be finalized |
| 184 | * @err: error number | 258 | * @err: error number |
| 185 | */ | 259 | */ |
| 186 | void crypto_finalize_request(struct crypto_engine *engine, | 260 | void crypto_finalize_cipher_request(struct crypto_engine *engine, |
| 187 | struct ablkcipher_request *req, int err) | 261 | struct ablkcipher_request *req, int err) |
| 188 | { | 262 | { |
| 189 | unsigned long flags; | 263 | unsigned long flags; |
| 190 | bool finalize_cur_req = false; | 264 | bool finalize_cur_req = false; |
| 191 | int ret; | 265 | int ret; |
| 192 | 266 | ||
| 193 | spin_lock_irqsave(&engine->queue_lock, flags); | 267 | spin_lock_irqsave(&engine->queue_lock, flags); |
| 194 | if (engine->cur_req == req) | 268 | if (engine->cur_req == &req->base) |
| 195 | finalize_cur_req = true; | 269 | finalize_cur_req = true; |
| 196 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 270 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
| 197 | 271 | ||
| 198 | if (finalize_cur_req) { | 272 | if (finalize_cur_req) { |
| 199 | if (engine->cur_req_prepared && engine->unprepare_request) { | 273 | if (engine->cur_req_prepared && |
| 200 | ret = engine->unprepare_request(engine, req); | 274 | engine->unprepare_cipher_request) { |
| 275 | ret = engine->unprepare_cipher_request(engine, req); | ||
| 201 | if (ret) | 276 | if (ret) |
| 202 | pr_err("failed to unprepare request\n"); | 277 | pr_err("failed to unprepare request\n"); |
| 203 | } | 278 | } |
| 279 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
| 280 | engine->cur_req = NULL; | ||
| 281 | engine->cur_req_prepared = false; | ||
| 282 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
| 283 | } | ||
| 284 | |||
| 285 | req->base.complete(&req->base, err); | ||
| 204 | 286 | ||
| 287 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | ||
| 288 | } | ||
| 289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); | ||
| 290 | |||
| 291 | /** | ||
| 292 | * crypto_finalize_hash_request - finalize one request if the request is done | ||
| 293 | * @engine: the hardware engine | ||
| 294 | * @req: the request need to be finalized | ||
| 295 | * @err: error number | ||
| 296 | */ | ||
| 297 | void crypto_finalize_hash_request(struct crypto_engine *engine, | ||
| 298 | struct ahash_request *req, int err) | ||
| 299 | { | ||
| 300 | unsigned long flags; | ||
| 301 | bool finalize_cur_req = false; | ||
| 302 | int ret; | ||
| 303 | |||
| 304 | spin_lock_irqsave(&engine->queue_lock, flags); | ||
| 305 | if (engine->cur_req == &req->base) | ||
| 306 | finalize_cur_req = true; | ||
| 307 | spin_unlock_irqrestore(&engine->queue_lock, flags); | ||
| 308 | |||
| 309 | if (finalize_cur_req) { | ||
| 310 | if (engine->cur_req_prepared && | ||
| 311 | engine->unprepare_hash_request) { | ||
| 312 | ret = engine->unprepare_hash_request(engine, req); | ||
| 313 | if (ret) | ||
| 314 | pr_err("failed to unprepare request\n"); | ||
| 315 | } | ||
| 205 | spin_lock_irqsave(&engine->queue_lock, flags); | 316 | spin_lock_irqsave(&engine->queue_lock, flags); |
| 206 | engine->cur_req = NULL; | 317 | engine->cur_req = NULL; |
| 207 | engine->cur_req_prepared = false; | 318 | engine->cur_req_prepared = false; |
| @@ -212,7 +323,7 @@ void crypto_finalize_request(struct crypto_engine *engine, | |||
| 212 | 323 | ||
| 213 | queue_kthread_work(&engine->kworker, &engine->pump_requests); | 324 | queue_kthread_work(&engine->kworker, &engine->pump_requests); |
| 214 | } | 325 | } |
| 215 | EXPORT_SYMBOL_GPL(crypto_finalize_request); | 326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
| 216 | 327 | ||
| 217 | /** | 328 | /** |
| 218 | * crypto_engine_start - start the hardware engine | 329 | * crypto_engine_start - start the hardware engine |
| @@ -249,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start); | |||
| 249 | int crypto_engine_stop(struct crypto_engine *engine) | 360 | int crypto_engine_stop(struct crypto_engine *engine) |
| 250 | { | 361 | { |
| 251 | unsigned long flags; | 362 | unsigned long flags; |
| 252 | unsigned limit = 500; | 363 | unsigned int limit = 500; |
| 253 | int ret = 0; | 364 | int ret = 0; |
| 254 | 365 | ||
| 255 | spin_lock_irqsave(&engine->queue_lock, flags); | 366 | spin_lock_irqsave(&engine->queue_lock, flags); |
diff --git a/crypto/drbg.c b/crypto/drbg.c index f752da3a7c75..fb33f7d3b052 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
| @@ -1178,12 +1178,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) | |||
| 1178 | goto err; | 1178 | goto err; |
| 1179 | 1179 | ||
| 1180 | drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); | 1180 | drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); |
| 1181 | if (!drbg->Vbuf) | 1181 | if (!drbg->Vbuf) { |
| 1182 | ret = -ENOMEM; | ||
| 1182 | goto fini; | 1183 | goto fini; |
| 1184 | } | ||
| 1183 | drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1); | 1185 | drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1); |
| 1184 | drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); | 1186 | drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); |
| 1185 | if (!drbg->Cbuf) | 1187 | if (!drbg->Cbuf) { |
| 1188 | ret = -ENOMEM; | ||
| 1186 | goto fini; | 1189 | goto fini; |
| 1190 | } | ||
| 1187 | drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1); | 1191 | drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1); |
| 1188 | /* scratchpad is only generated for CTR and Hash */ | 1192 | /* scratchpad is only generated for CTR and Hash */ |
| 1189 | if (drbg->core->flags & DRBG_HMAC) | 1193 | if (drbg->core->flags & DRBG_HMAC) |
| @@ -1199,8 +1203,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) | |||
| 1199 | 1203 | ||
| 1200 | if (0 < sb_size) { | 1204 | if (0 < sb_size) { |
| 1201 | drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL); | 1205 | drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL); |
| 1202 | if (!drbg->scratchpadbuf) | 1206 | if (!drbg->scratchpadbuf) { |
| 1207 | ret = -ENOMEM; | ||
| 1203 | goto fini; | 1208 | goto fini; |
| 1209 | } | ||
| 1204 | drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1); | 1210 | drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1); |
| 1205 | } | 1211 | } |
| 1206 | 1212 | ||
| @@ -1917,6 +1923,8 @@ static inline int __init drbg_healthcheck_sanity(void) | |||
| 1917 | return -ENOMEM; | 1923 | return -ENOMEM; |
| 1918 | 1924 | ||
| 1919 | mutex_init(&drbg->drbg_mutex); | 1925 | mutex_init(&drbg->drbg_mutex); |
| 1926 | drbg->core = &drbg_cores[coreref]; | ||
| 1927 | drbg->reseed_threshold = drbg_max_requests(drbg); | ||
| 1920 | 1928 | ||
| 1921 | /* | 1929 | /* |
| 1922 | * if the following tests fail, it is likely that there is a buffer | 1930 | * if the following tests fail, it is likely that there is a buffer |
| @@ -1926,12 +1934,6 @@ static inline int __init drbg_healthcheck_sanity(void) | |||
| 1926 | * grave bug. | 1934 | * grave bug. |
| 1927 | */ | 1935 | */ |
| 1928 | 1936 | ||
| 1929 | /* get a valid instance of DRBG for following tests */ | ||
| 1930 | ret = drbg_instantiate(drbg, NULL, coreref, pr); | ||
| 1931 | if (ret) { | ||
| 1932 | rc = ret; | ||
| 1933 | goto outbuf; | ||
| 1934 | } | ||
| 1935 | max_addtllen = drbg_max_addtl(drbg); | 1937 | max_addtllen = drbg_max_addtl(drbg); |
| 1936 | max_request_bytes = drbg_max_request_bytes(drbg); | 1938 | max_request_bytes = drbg_max_request_bytes(drbg); |
| 1937 | drbg_string_fill(&addtl, buf, max_addtllen + 1); | 1939 | drbg_string_fill(&addtl, buf, max_addtllen + 1); |
| @@ -1941,10 +1943,9 @@ static inline int __init drbg_healthcheck_sanity(void) | |||
| 1941 | /* overflow max_bits */ | 1943 | /* overflow max_bits */ |
| 1942 | len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL); | 1944 | len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL); |
| 1943 | BUG_ON(0 < len); | 1945 | BUG_ON(0 < len); |
| 1944 | drbg_uninstantiate(drbg); | ||
| 1945 | 1946 | ||
| 1946 | /* overflow max addtllen with personalization string */ | 1947 | /* overflow max addtllen with personalization string */ |
| 1947 | ret = drbg_instantiate(drbg, &addtl, coreref, pr); | 1948 | ret = drbg_seed(drbg, &addtl, false); |
| 1948 | BUG_ON(0 == ret); | 1949 | BUG_ON(0 == ret); |
| 1949 | /* all tests passed */ | 1950 | /* all tests passed */ |
| 1950 | rc = 0; | 1951 | rc = 0; |
| @@ -1952,9 +1953,7 @@ static inline int __init drbg_healthcheck_sanity(void) | |||
| 1952 | pr_devel("DRBG: Sanity tests for failure code paths successfully " | 1953 | pr_devel("DRBG: Sanity tests for failure code paths successfully " |
| 1953 | "completed\n"); | 1954 | "completed\n"); |
| 1954 | 1955 | ||
| 1955 | drbg_uninstantiate(drbg); | 1956 | kfree(drbg); |
| 1956 | outbuf: | ||
| 1957 | kzfree(drbg); | ||
| 1958 | return rc; | 1957 | return rc; |
| 1959 | } | 1958 | } |
| 1960 | 1959 | ||
| @@ -2006,7 +2005,7 @@ static int __init drbg_init(void) | |||
| 2006 | { | 2005 | { |
| 2007 | unsigned int i = 0; /* pointer to drbg_algs */ | 2006 | unsigned int i = 0; /* pointer to drbg_algs */ |
| 2008 | unsigned int j = 0; /* pointer to drbg_cores */ | 2007 | unsigned int j = 0; /* pointer to drbg_cores */ |
| 2009 | int ret = -EFAULT; | 2008 | int ret; |
| 2010 | 2009 | ||
| 2011 | ret = drbg_healthcheck_sanity(); | 2010 | ret = drbg_healthcheck_sanity(); |
| 2012 | if (ret) | 2011 | if (ret) |
| @@ -2016,7 +2015,7 @@ static int __init drbg_init(void) | |||
| 2016 | pr_info("DRBG: Cannot register all DRBG types" | 2015 | pr_info("DRBG: Cannot register all DRBG types" |
| 2017 | "(slots needed: %zu, slots available: %zu)\n", | 2016 | "(slots needed: %zu, slots available: %zu)\n", |
| 2018 | ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs)); | 2017 | ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs)); |
| 2019 | return ret; | 2018 | return -EFAULT; |
| 2020 | } | 2019 | } |
| 2021 | 2020 | ||
| 2022 | /* | 2021 | /* |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 70a892e87ccb..f624ac98c94e 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
| @@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
| 117 | struct crypto_skcipher *ctr = ctx->ctr; | 117 | struct crypto_skcipher *ctr = ctx->ctr; |
| 118 | struct { | 118 | struct { |
| 119 | be128 hash; | 119 | be128 hash; |
| 120 | u8 iv[8]; | 120 | u8 iv[16]; |
| 121 | 121 | ||
| 122 | struct crypto_gcm_setkey_result result; | 122 | struct crypto_gcm_setkey_result result; |
| 123 | 123 | ||
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index bac70995e064..12ad3e3a84e3 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c | |||
| @@ -14,24 +14,13 @@ | |||
| 14 | 14 | ||
| 15 | #include <crypto/algapi.h> | 15 | #include <crypto/algapi.h> |
| 16 | #include <crypto/gf128mul.h> | 16 | #include <crypto/gf128mul.h> |
| 17 | #include <crypto/ghash.h> | ||
| 17 | #include <crypto/internal/hash.h> | 18 | #include <crypto/internal/hash.h> |
| 18 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
| 19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| 20 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 22 | 23 | ||
| 23 | #define GHASH_BLOCK_SIZE 16 | ||
| 24 | #define GHASH_DIGEST_SIZE 16 | ||
| 25 | |||
| 26 | struct ghash_ctx { | ||
| 27 | struct gf128mul_4k *gf128; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct ghash_desc_ctx { | ||
| 31 | u8 buffer[GHASH_BLOCK_SIZE]; | ||
| 32 | u32 bytes; | ||
| 33 | }; | ||
| 34 | |||
| 35 | static int ghash_init(struct shash_desc *desc) | 24 | static int ghash_init(struct shash_desc *desc) |
| 36 | { | 25 | { |
| 37 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | 26 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 86fb59b109a9..94ee44acd465 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c | |||
| @@ -612,12 +612,7 @@ EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); | |||
| 612 | 612 | ||
| 613 | int ahash_mcryptd_digest(struct ahash_request *desc) | 613 | int ahash_mcryptd_digest(struct ahash_request *desc) |
| 614 | { | 614 | { |
| 615 | int err; | 615 | return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc); |
| 616 | |||
| 617 | err = crypto_ahash_init(desc) ?: | ||
| 618 | ahash_mcryptd_finup(desc); | ||
| 619 | |||
| 620 | return err; | ||
| 621 | } | 616 | } |
| 622 | 617 | ||
| 623 | int ahash_mcryptd_update(struct ahash_request *desc) | 618 | int ahash_mcryptd_update(struct ahash_request *desc) |
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c index 4df6451e7543..0b66dc824606 100644 --- a/crypto/rsa_helper.c +++ b/crypto/rsa_helper.c | |||
| @@ -35,8 +35,8 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, | |||
| 35 | n_sz--; | 35 | n_sz--; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | /* In FIPS mode only allow key size 2K & 3K */ | 38 | /* In FIPS mode only allow key size 2K and higher */ |
| 39 | if (n_sz != 256 && n_sz != 384) { | 39 | if (n_sz < 256) { |
| 40 | pr_err("RSA: key size not allowed in FIPS mode\n"); | 40 | pr_err("RSA: key size not allowed in FIPS mode\n"); |
| 41 | return -EINVAL; | 41 | return -EINVAL; |
| 42 | } | 42 | } |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5c9d5a5e7b65..62dffa0028ac 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
| @@ -209,16 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq, | |||
| 209 | char *state; | 209 | char *state; |
| 210 | struct ahash_request *req; | 210 | struct ahash_request *req; |
| 211 | int statesize, ret = -EINVAL; | 211 | int statesize, ret = -EINVAL; |
| 212 | const char guard[] = { 0x00, 0xba, 0xad, 0x00 }; | ||
| 212 | 213 | ||
| 213 | req = *preq; | 214 | req = *preq; |
| 214 | statesize = crypto_ahash_statesize( | 215 | statesize = crypto_ahash_statesize( |
| 215 | crypto_ahash_reqtfm(req)); | 216 | crypto_ahash_reqtfm(req)); |
| 216 | state = kmalloc(statesize, GFP_KERNEL); | 217 | state = kmalloc(statesize + sizeof(guard), GFP_KERNEL); |
| 217 | if (!state) { | 218 | if (!state) { |
| 218 | pr_err("alt: hash: Failed to alloc state for %s\n", algo); | 219 | pr_err("alt: hash: Failed to alloc state for %s\n", algo); |
| 219 | goto out_nostate; | 220 | goto out_nostate; |
| 220 | } | 221 | } |
| 222 | memcpy(state + statesize, guard, sizeof(guard)); | ||
| 221 | ret = crypto_ahash_export(req, state); | 223 | ret = crypto_ahash_export(req, state); |
| 224 | WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); | ||
| 222 | if (ret) { | 225 | if (ret) { |
| 223 | pr_err("alt: hash: Failed to export() for %s\n", algo); | 226 | pr_err("alt: hash: Failed to export() for %s\n", algo); |
| 224 | goto out; | 227 | goto out; |
| @@ -665,7 +668,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
| 665 | memcpy(key, template[i].key, template[i].klen); | 668 | memcpy(key, template[i].key, template[i].klen); |
| 666 | 669 | ||
| 667 | ret = crypto_aead_setkey(tfm, key, template[i].klen); | 670 | ret = crypto_aead_setkey(tfm, key, template[i].klen); |
| 668 | if (!ret == template[i].fail) { | 671 | if (template[i].fail == !ret) { |
| 669 | pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n", | 672 | pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n", |
| 670 | d, j, algo, crypto_aead_get_flags(tfm)); | 673 | d, j, algo, crypto_aead_get_flags(tfm)); |
| 671 | goto out; | 674 | goto out; |
| @@ -770,7 +773,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
| 770 | memcpy(key, template[i].key, template[i].klen); | 773 | memcpy(key, template[i].key, template[i].klen); |
| 771 | 774 | ||
| 772 | ret = crypto_aead_setkey(tfm, key, template[i].klen); | 775 | ret = crypto_aead_setkey(tfm, key, template[i].klen); |
| 773 | if (!ret == template[i].fail) { | 776 | if (template[i].fail == !ret) { |
| 774 | pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n", | 777 | pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n", |
| 775 | d, j, algo, crypto_aead_get_flags(tfm)); | 778 | d, j, algo, crypto_aead_get_flags(tfm)); |
| 776 | goto out; | 779 | goto out; |
| @@ -1008,6 +1011,9 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, | |||
| 1008 | if (template[i].np) | 1011 | if (template[i].np) |
| 1009 | continue; | 1012 | continue; |
| 1010 | 1013 | ||
| 1014 | if (fips_enabled && template[i].fips_skip) | ||
| 1015 | continue; | ||
| 1016 | |||
| 1011 | j++; | 1017 | j++; |
| 1012 | 1018 | ||
| 1013 | ret = -EINVAL; | 1019 | ret = -EINVAL; |
| @@ -1023,7 +1029,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, | |||
| 1023 | 1029 | ||
| 1024 | ret = crypto_cipher_setkey(tfm, template[i].key, | 1030 | ret = crypto_cipher_setkey(tfm, template[i].key, |
| 1025 | template[i].klen); | 1031 | template[i].klen); |
| 1026 | if (!ret == template[i].fail) { | 1032 | if (template[i].fail == !ret) { |
| 1027 | printk(KERN_ERR "alg: cipher: setkey failed " | 1033 | printk(KERN_ERR "alg: cipher: setkey failed " |
| 1028 | "on test %d for %s: flags=%x\n", j, | 1034 | "on test %d for %s: flags=%x\n", j, |
| 1029 | algo, crypto_cipher_get_flags(tfm)); | 1035 | algo, crypto_cipher_get_flags(tfm)); |
| @@ -1112,6 +1118,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
| 1112 | if (template[i].np && !template[i].also_non_np) | 1118 | if (template[i].np && !template[i].also_non_np) |
| 1113 | continue; | 1119 | continue; |
| 1114 | 1120 | ||
| 1121 | if (fips_enabled && template[i].fips_skip) | ||
| 1122 | continue; | ||
| 1123 | |||
| 1115 | if (template[i].iv) | 1124 | if (template[i].iv) |
| 1116 | memcpy(iv, template[i].iv, ivsize); | 1125 | memcpy(iv, template[i].iv, ivsize); |
| 1117 | else | 1126 | else |
| @@ -1133,7 +1142,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
| 1133 | 1142 | ||
| 1134 | ret = crypto_skcipher_setkey(tfm, template[i].key, | 1143 | ret = crypto_skcipher_setkey(tfm, template[i].key, |
| 1135 | template[i].klen); | 1144 | template[i].klen); |
| 1136 | if (!ret == template[i].fail) { | 1145 | if (template[i].fail == !ret) { |
| 1137 | pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", | 1146 | pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", |
| 1138 | d, j, algo, crypto_skcipher_get_flags(tfm)); | 1147 | d, j, algo, crypto_skcipher_get_flags(tfm)); |
| 1139 | goto out; | 1148 | goto out; |
| @@ -1198,6 +1207,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
| 1198 | if (!template[i].np) | 1207 | if (!template[i].np) |
| 1199 | continue; | 1208 | continue; |
| 1200 | 1209 | ||
| 1210 | if (fips_enabled && template[i].fips_skip) | ||
| 1211 | continue; | ||
| 1212 | |||
| 1201 | if (template[i].iv) | 1213 | if (template[i].iv) |
| 1202 | memcpy(iv, template[i].iv, ivsize); | 1214 | memcpy(iv, template[i].iv, ivsize); |
| 1203 | else | 1215 | else |
| @@ -1211,7 +1223,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc, | |||
| 1211 | 1223 | ||
| 1212 | ret = crypto_skcipher_setkey(tfm, template[i].key, | 1224 | ret = crypto_skcipher_setkey(tfm, template[i].key, |
| 1213 | template[i].klen); | 1225 | template[i].klen); |
| 1214 | if (!ret == template[i].fail) { | 1226 | if (template[i].fail == !ret) { |
| 1215 | pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", | 1227 | pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", |
| 1216 | d, j, algo, crypto_skcipher_get_flags(tfm)); | 1228 | d, j, algo, crypto_skcipher_get_flags(tfm)); |
| 1217 | goto out; | 1229 | goto out; |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index acb6bbff781a..e64a4ef9d8ca 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
| @@ -59,6 +59,7 @@ struct hash_testvec { | |||
| 59 | * @tap: How to distribute data in @np SGs | 59 | * @tap: How to distribute data in @np SGs |
| 60 | * @also_non_np: if set to 1, the test will be also done without | 60 | * @also_non_np: if set to 1, the test will be also done without |
| 61 | * splitting data in @np SGs | 61 | * splitting data in @np SGs |
| 62 | * @fips_skip: Skip the test vector in FIPS mode | ||
| 62 | */ | 63 | */ |
| 63 | 64 | ||
| 64 | struct cipher_testvec { | 65 | struct cipher_testvec { |
| @@ -75,6 +76,7 @@ struct cipher_testvec { | |||
| 75 | unsigned char klen; | 76 | unsigned char klen; |
| 76 | unsigned short ilen; | 77 | unsigned short ilen; |
| 77 | unsigned short rlen; | 78 | unsigned short rlen; |
| 79 | bool fips_skip; | ||
| 78 | }; | 80 | }; |
| 79 | 81 | ||
| 80 | struct aead_testvec { | 82 | struct aead_testvec { |
| @@ -18224,6 +18226,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = { | |||
| 18224 | "\x00\x00\x00\x00\x00\x00\x00\x00" | 18226 | "\x00\x00\x00\x00\x00\x00\x00\x00" |
| 18225 | "\x00\x00\x00\x00\x00\x00\x00\x00", | 18227 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 18226 | .klen = 32, | 18228 | .klen = 32, |
| 18229 | .fips_skip = 1, | ||
| 18227 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" | 18230 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" |
| 18228 | "\x00\x00\x00\x00\x00\x00\x00\x00", | 18231 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 18229 | .input = "\x00\x00\x00\x00\x00\x00\x00\x00" | 18232 | .input = "\x00\x00\x00\x00\x00\x00\x00\x00" |
| @@ -18566,6 +18569,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = { | |||
| 18566 | "\x00\x00\x00\x00\x00\x00\x00\x00" | 18569 | "\x00\x00\x00\x00\x00\x00\x00\x00" |
| 18567 | "\x00\x00\x00\x00\x00\x00\x00\x00", | 18570 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 18568 | .klen = 32, | 18571 | .klen = 32, |
| 18572 | .fips_skip = 1, | ||
| 18569 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" | 18573 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" |
| 18570 | "\x00\x00\x00\x00\x00\x00\x00\x00", | 18574 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 18571 | .input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec" | 18575 | .input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec" |
diff --git a/crypto/xor.c b/crypto/xor.c index 35d6b3adf230..263af9fb45ea 100644 --- a/crypto/xor.c +++ b/crypto/xor.c | |||
| @@ -24,6 +24,10 @@ | |||
| 24 | #include <linux/preempt.h> | 24 | #include <linux/preempt.h> |
| 25 | #include <asm/xor.h> | 25 | #include <asm/xor.h> |
| 26 | 26 | ||
| 27 | #ifndef XOR_SELECT_TEMPLATE | ||
| 28 | #define XOR_SELECT_TEMPLATE(x) (x) | ||
| 29 | #endif | ||
| 30 | |||
| 27 | /* The xor routines to use. */ | 31 | /* The xor routines to use. */ |
| 28 | static struct xor_block_template *active_template; | 32 | static struct xor_block_template *active_template; |
| 29 | 33 | ||
| @@ -109,6 +113,15 @@ calibrate_xor_blocks(void) | |||
| 109 | void *b1, *b2; | 113 | void *b1, *b2; |
| 110 | struct xor_block_template *f, *fastest; | 114 | struct xor_block_template *f, *fastest; |
| 111 | 115 | ||
| 116 | fastest = XOR_SELECT_TEMPLATE(NULL); | ||
| 117 | |||
| 118 | if (fastest) { | ||
| 119 | printk(KERN_INFO "xor: automatically using best " | ||
| 120 | "checksumming function %-10s\n", | ||
| 121 | fastest->name); | ||
| 122 | goto out; | ||
| 123 | } | ||
| 124 | |||
| 112 | /* | 125 | /* |
| 113 | * Note: Since the memory is not actually used for _anything_ but to | 126 | * Note: Since the memory is not actually used for _anything_ but to |
| 114 | * test the XOR speed, we don't really want kmemcheck to warn about | 127 | * test the XOR speed, we don't really want kmemcheck to warn about |
| @@ -126,36 +139,22 @@ calibrate_xor_blocks(void) | |||
| 126 | * all the possible functions, just test the best one | 139 | * all the possible functions, just test the best one |
| 127 | */ | 140 | */ |
| 128 | 141 | ||
| 129 | fastest = NULL; | ||
| 130 | |||
| 131 | #ifdef XOR_SELECT_TEMPLATE | ||
| 132 | fastest = XOR_SELECT_TEMPLATE(fastest); | ||
| 133 | #endif | ||
| 134 | |||
| 135 | #define xor_speed(templ) do_xor_speed((templ), b1, b2) | 142 | #define xor_speed(templ) do_xor_speed((templ), b1, b2) |
| 136 | 143 | ||
| 137 | if (fastest) { | 144 | printk(KERN_INFO "xor: measuring software checksum speed\n"); |
| 138 | printk(KERN_INFO "xor: automatically using best " | 145 | XOR_TRY_TEMPLATES; |
| 139 | "checksumming function:\n"); | 146 | fastest = template_list; |
| 140 | xor_speed(fastest); | 147 | for (f = fastest; f; f = f->next) |
| 141 | goto out; | 148 | if (f->speed > fastest->speed) |
| 142 | } else { | 149 | fastest = f; |
| 143 | printk(KERN_INFO "xor: measuring software checksum speed\n"); | ||
| 144 | XOR_TRY_TEMPLATES; | ||
| 145 | fastest = template_list; | ||
| 146 | for (f = fastest; f; f = f->next) | ||
| 147 | if (f->speed > fastest->speed) | ||
| 148 | fastest = f; | ||
| 149 | } | ||
| 150 | 150 | ||
| 151 | printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n", | 151 | printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n", |
| 152 | fastest->name, fastest->speed / 1000, fastest->speed % 1000); | 152 | fastest->name, fastest->speed / 1000, fastest->speed % 1000); |
| 153 | 153 | ||
| 154 | #undef xor_speed | 154 | #undef xor_speed |
| 155 | 155 | ||
| 156 | out: | ||
| 157 | free_pages((unsigned long)b1, 2); | 156 | free_pages((unsigned long)b1, 2); |
| 158 | 157 | out: | |
| 159 | active_template = fastest; | 158 | active_template = fastest; |
| 160 | return 0; | 159 | return 0; |
| 161 | } | 160 | } |
diff --git a/crypto/xts.c b/crypto/xts.c index 26ba5833b994..305343f22a02 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> | 6 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> |
| 7 | * | 7 | * |
| 8 | * Based om ecb.c | 8 | * Based on ecb.c |
| 9 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 9 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
| 10 | * | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 8c0770bf8881..200dab5136a7 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
| @@ -410,6 +410,19 @@ config HW_RANDOM_MESON | |||
| 410 | 410 | ||
| 411 | If unsure, say Y. | 411 | If unsure, say Y. |
| 412 | 412 | ||
| 413 | config HW_RANDOM_CAVIUM | ||
| 414 | tristate "Cavium ThunderX Random Number Generator support" | ||
| 415 | depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT)) | ||
| 416 | default HW_RANDOM | ||
| 417 | ---help--- | ||
| 418 | This driver provides kernel-side support for the Random Number | ||
| 419 | Generator hardware found on Cavium SoCs. | ||
| 420 | |||
| 421 | To compile this driver as a module, choose M here: the | ||
| 422 | module will be called cavium_rng. | ||
| 423 | |||
| 424 | If unsure, say Y. | ||
| 425 | |||
| 413 | endif # HW_RANDOM | 426 | endif # HW_RANDOM |
| 414 | 427 | ||
| 415 | config UML_RANDOM | 428 | config UML_RANDOM |
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 04bb0b03356f..5f52b1e4e7be 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
| @@ -35,3 +35,4 @@ obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o | |||
| 35 | obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o | 35 | obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o |
| 36 | obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o | 36 | obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o |
| 37 | obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o | 37 | obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o |
| 38 | obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o | ||
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 48f6a83cdd61..4a99ac756f08 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c | |||
| @@ -24,16 +24,18 @@ | |||
| 24 | * warranty of any kind, whether express or implied. | 24 | * warranty of any kind, whether express or implied. |
| 25 | */ | 25 | */ |
| 26 | 26 | ||
| 27 | #include <linux/module.h> | 27 | #include <linux/delay.h> |
| 28 | #include <linux/hw_random.h> | ||
| 28 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
| 30 | #include <linux/module.h> | ||
| 29 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
| 30 | #include <linux/hw_random.h> | ||
| 31 | #include <linux/delay.h> | ||
| 32 | #include <asm/io.h> | ||
| 33 | 32 | ||
| 33 | #define DRV_NAME "AMD768-HWRNG" | ||
| 34 | 34 | ||
| 35 | #define PFX KBUILD_MODNAME ": " | 35 | #define RNGDATA 0x00 |
| 36 | 36 | #define RNGDONE 0x04 | |
| 37 | #define PMBASE_OFFSET 0xF0 | ||
| 38 | #define PMBASE_SIZE 8 | ||
| 37 | 39 | ||
| 38 | /* | 40 | /* |
| 39 | * Data for PCI driver interface | 41 | * Data for PCI driver interface |
| @@ -50,72 +52,84 @@ static const struct pci_device_id pci_tbl[] = { | |||
| 50 | }; | 52 | }; |
| 51 | MODULE_DEVICE_TABLE(pci, pci_tbl); | 53 | MODULE_DEVICE_TABLE(pci, pci_tbl); |
| 52 | 54 | ||
| 53 | static struct pci_dev *amd_pdev; | 55 | struct amd768_priv { |
| 54 | 56 | void __iomem *iobase; | |
| 57 | struct pci_dev *pcidev; | ||
| 58 | }; | ||
| 55 | 59 | ||
| 56 | static int amd_rng_data_present(struct hwrng *rng, int wait) | 60 | static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) |
| 57 | { | 61 | { |
| 58 | u32 pmbase = (u32)rng->priv; | 62 | u32 *data = buf; |
| 59 | int data, i; | 63 | struct amd768_priv *priv = (struct amd768_priv *)rng->priv; |
| 60 | 64 | size_t read = 0; | |
| 61 | for (i = 0; i < 20; i++) { | 65 | /* We will wait at maximum one time per read */ |
| 62 | data = !!(inl(pmbase + 0xF4) & 1); | 66 | int timeout = max / 4 + 1; |
| 63 | if (data || !wait) | 67 | |
| 64 | break; | 68 | /* |
| 65 | udelay(10); | 69 | * RNG data is available when RNGDONE is set to 1 |
| 70 | * New random numbers are generated approximately 128 microseconds | ||
| 71 | * after RNGDATA is read | ||
| 72 | */ | ||
| 73 | while (read < max) { | ||
| 74 | if (ioread32(priv->iobase + RNGDONE) == 0) { | ||
| 75 | if (wait) { | ||
| 76 | /* Delay given by datasheet */ | ||
| 77 | usleep_range(128, 196); | ||
| 78 | if (timeout-- == 0) | ||
| 79 | return read; | ||
| 80 | } else { | ||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | } else { | ||
| 84 | *data = ioread32(priv->iobase + RNGDATA); | ||
| 85 | data++; | ||
| 86 | read += 4; | ||
| 87 | } | ||
| 66 | } | 88 | } |
| 67 | return data; | ||
| 68 | } | ||
| 69 | 89 | ||
| 70 | static int amd_rng_data_read(struct hwrng *rng, u32 *data) | 90 | return read; |
| 71 | { | ||
| 72 | u32 pmbase = (u32)rng->priv; | ||
| 73 | |||
| 74 | *data = inl(pmbase + 0xF0); | ||
| 75 | |||
| 76 | return 4; | ||
| 77 | } | 91 | } |
| 78 | 92 | ||
| 79 | static int amd_rng_init(struct hwrng *rng) | 93 | static int amd_rng_init(struct hwrng *rng) |
| 80 | { | 94 | { |
| 95 | struct amd768_priv *priv = (struct amd768_priv *)rng->priv; | ||
| 81 | u8 rnen; | 96 | u8 rnen; |
| 82 | 97 | ||
| 83 | pci_read_config_byte(amd_pdev, 0x40, &rnen); | 98 | pci_read_config_byte(priv->pcidev, 0x40, &rnen); |
| 84 | rnen |= (1 << 7); /* RNG on */ | 99 | rnen |= BIT(7); /* RNG on */ |
| 85 | pci_write_config_byte(amd_pdev, 0x40, rnen); | 100 | pci_write_config_byte(priv->pcidev, 0x40, rnen); |
| 86 | 101 | ||
| 87 | pci_read_config_byte(amd_pdev, 0x41, &rnen); | 102 | pci_read_config_byte(priv->pcidev, 0x41, &rnen); |
| 88 | rnen |= (1 << 7); /* PMIO enable */ | 103 | rnen |= BIT(7); /* PMIO enable */ |
| 89 | pci_write_config_byte(amd_pdev, 0x41, rnen); | 104 | pci_write_config_byte(priv->pcidev, 0x41, rnen); |
| 90 | 105 | ||
| 91 | return 0; | 106 | return 0; |
| 92 | } | 107 | } |
| 93 | 108 | ||
| 94 | static void amd_rng_cleanup(struct hwrng *rng) | 109 | static void amd_rng_cleanup(struct hwrng *rng) |
| 95 | { | 110 | { |
| 111 | struct amd768_priv *priv = (struct amd768_priv *)rng->priv; | ||
| 96 | u8 rnen; | 112 | u8 rnen; |
| 97 | 113 | ||
| 98 | pci_read_config_byte(amd_pdev, 0x40, &rnen); | 114 | pci_read_config_byte(priv->pcidev, 0x40, &rnen); |
| 99 | rnen &= ~(1 << 7); /* RNG off */ | 115 | rnen &= ~BIT(7); /* RNG off */ |
| 100 | pci_write_config_byte(amd_pdev, 0x40, rnen); | 116 | pci_write_config_byte(priv->pcidev, 0x40, rnen); |
| 101 | } | 117 | } |
| 102 | 118 | ||
| 103 | |||
| 104 | static struct hwrng amd_rng = { | 119 | static struct hwrng amd_rng = { |
| 105 | .name = "amd", | 120 | .name = "amd", |
| 106 | .init = amd_rng_init, | 121 | .init = amd_rng_init, |
| 107 | .cleanup = amd_rng_cleanup, | 122 | .cleanup = amd_rng_cleanup, |
| 108 | .data_present = amd_rng_data_present, | 123 | .read = amd_rng_read, |
| 109 | .data_read = amd_rng_data_read, | ||
| 110 | }; | 124 | }; |
| 111 | 125 | ||
| 112 | |||
| 113 | static int __init mod_init(void) | 126 | static int __init mod_init(void) |
| 114 | { | 127 | { |
| 115 | int err = -ENODEV; | 128 | int err = -ENODEV; |
| 116 | struct pci_dev *pdev = NULL; | 129 | struct pci_dev *pdev = NULL; |
| 117 | const struct pci_device_id *ent; | 130 | const struct pci_device_id *ent; |
| 118 | u32 pmbase; | 131 | u32 pmbase; |
| 132 | struct amd768_priv *priv; | ||
| 119 | 133 | ||
| 120 | for_each_pci_dev(pdev) { | 134 | for_each_pci_dev(pdev) { |
| 121 | ent = pci_match_id(pci_tbl, pdev); | 135 | ent = pci_match_id(pci_tbl, pdev); |
| @@ -123,42 +137,44 @@ static int __init mod_init(void) | |||
| 123 | goto found; | 137 | goto found; |
| 124 | } | 138 | } |
| 125 | /* Device not found. */ | 139 | /* Device not found. */ |
| 126 | goto out; | 140 | return -ENODEV; |
| 127 | 141 | ||
| 128 | found: | 142 | found: |
| 129 | err = pci_read_config_dword(pdev, 0x58, &pmbase); | 143 | err = pci_read_config_dword(pdev, 0x58, &pmbase); |
| 130 | if (err) | 144 | if (err) |
| 131 | goto out; | 145 | return err; |
| 132 | err = -EIO; | 146 | |
| 133 | pmbase &= 0x0000FF00; | 147 | pmbase &= 0x0000FF00; |
| 134 | if (pmbase == 0) | 148 | if (pmbase == 0) |
| 135 | goto out; | 149 | return -EIO; |
| 136 | if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) { | 150 | |
| 137 | dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n", | 151 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
| 152 | if (!priv) | ||
| 153 | return -ENOMEM; | ||
| 154 | |||
| 155 | if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, | ||
| 156 | PMBASE_SIZE, DRV_NAME)) { | ||
| 157 | dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", | ||
| 138 | pmbase + 0xF0); | 158 | pmbase + 0xF0); |
| 139 | err = -EBUSY; | 159 | return -EBUSY; |
| 140 | goto out; | ||
| 141 | } | 160 | } |
| 142 | amd_rng.priv = (unsigned long)pmbase; | 161 | |
| 143 | amd_pdev = pdev; | 162 | priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, |
| 144 | 163 | PMBASE_SIZE); | |
| 145 | pr_info("AMD768 RNG detected\n"); | 164 | if (!priv->iobase) { |
| 146 | err = hwrng_register(&amd_rng); | 165 | pr_err(DRV_NAME "Cannot map ioport\n"); |
| 147 | if (err) { | 166 | return -ENOMEM; |
| 148 | pr_err(PFX "RNG registering failed (%d)\n", | ||
| 149 | err); | ||
| 150 | release_region(pmbase + 0xF0, 8); | ||
| 151 | goto out; | ||
| 152 | } | 167 | } |
| 153 | out: | 168 | |
| 154 | return err; | 169 | amd_rng.priv = (unsigned long)priv; |
| 170 | priv->pcidev = pdev; | ||
| 171 | |||
| 172 | pr_info(DRV_NAME " detected\n"); | ||
| 173 | return devm_hwrng_register(&pdev->dev, &amd_rng); | ||
| 155 | } | 174 | } |
| 156 | 175 | ||
| 157 | static void __exit mod_exit(void) | 176 | static void __exit mod_exit(void) |
| 158 | { | 177 | { |
| 159 | u32 pmbase = (unsigned long)amd_rng.priv; | ||
| 160 | release_region(pmbase + 0xF0, 8); | ||
| 161 | hwrng_unregister(&amd_rng); | ||
| 162 | } | 178 | } |
| 163 | 179 | ||
| 164 | module_init(mod_init); | 180 | module_init(mod_init); |
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index af2149273fe0..574211a49549 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c | |||
| @@ -92,9 +92,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev) | |||
| 92 | bcm2835_rng_ops.priv = (unsigned long)rng_base; | 92 | bcm2835_rng_ops.priv = (unsigned long)rng_base; |
| 93 | 93 | ||
| 94 | rng_id = of_match_node(bcm2835_rng_of_match, np); | 94 | rng_id = of_match_node(bcm2835_rng_of_match, np); |
| 95 | if (!rng_id) | 95 | if (!rng_id) { |
| 96 | iounmap(rng_base); | ||
| 96 | return -EINVAL; | 97 | return -EINVAL; |
| 97 | 98 | } | |
| 98 | /* Check for rng init function, execute it */ | 99 | /* Check for rng init function, execute it */ |
| 99 | rng_setup = rng_id->data; | 100 | rng_setup = rng_id->data; |
| 100 | if (rng_setup) | 101 | if (rng_setup) |
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c new file mode 100644 index 000000000000..066ae0e78d63 --- /dev/null +++ b/drivers/char/hw_random/cavium-rng-vf.c | |||
| @@ -0,0 +1,99 @@ | |||
| 1 | /* | ||
| 2 | * Hardware Random Number Generator support for Cavium, Inc. | ||
| 3 | * Thunder processor family. | ||
| 4 | * | ||
| 5 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 6 | * License. See the file "COPYING" in the main directory of this archive | ||
| 7 | * for more details. | ||
| 8 | * | ||
| 9 | * Copyright (C) 2016 Cavium, Inc. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/hw_random.h> | ||
| 13 | #include <linux/io.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/pci.h> | ||
| 16 | #include <linux/pci_ids.h> | ||
| 17 | |||
| 18 | struct cavium_rng { | ||
| 19 | struct hwrng ops; | ||
| 20 | void __iomem *result; | ||
| 21 | }; | ||
| 22 | |||
| 23 | /* Read data from the RNG unit */ | ||
| 24 | static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) | ||
| 25 | { | ||
| 26 | struct cavium_rng *p = container_of(rng, struct cavium_rng, ops); | ||
| 27 | unsigned int size = max; | ||
| 28 | |||
| 29 | while (size >= 8) { | ||
| 30 | *((u64 *)dat) = readq(p->result); | ||
| 31 | size -= 8; | ||
| 32 | dat += 8; | ||
| 33 | } | ||
| 34 | while (size > 0) { | ||
| 35 | *((u8 *)dat) = readb(p->result); | ||
| 36 | size--; | ||
| 37 | dat++; | ||
| 38 | } | ||
| 39 | return max; | ||
| 40 | } | ||
| 41 | |||
| 42 | /* Map Cavium RNG to an HWRNG object */ | ||
| 43 | static int cavium_rng_probe_vf(struct pci_dev *pdev, | ||
| 44 | const struct pci_device_id *id) | ||
| 45 | { | ||
| 46 | struct cavium_rng *rng; | ||
| 47 | int ret; | ||
| 48 | |||
| 49 | rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); | ||
| 50 | if (!rng) | ||
| 51 | return -ENOMEM; | ||
| 52 | |||
| 53 | /* Map the RNG result */ | ||
| 54 | rng->result = pcim_iomap(pdev, 0, 0); | ||
| 55 | if (!rng->result) { | ||
| 56 | dev_err(&pdev->dev, "Error iomap failed retrieving result.\n"); | ||
| 57 | return -ENOMEM; | ||
| 58 | } | ||
| 59 | |||
| 60 | rng->ops.name = "cavium rng"; | ||
| 61 | rng->ops.read = cavium_rng_read; | ||
| 62 | rng->ops.quality = 1000; | ||
| 63 | |||
| 64 | pci_set_drvdata(pdev, rng); | ||
| 65 | |||
| 66 | ret = hwrng_register(&rng->ops); | ||
| 67 | if (ret) { | ||
| 68 | dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); | ||
| 69 | return ret; | ||
| 70 | } | ||
| 71 | |||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | /* Remove the VF */ | ||
| 76 | void cavium_rng_remove_vf(struct pci_dev *pdev) | ||
| 77 | { | ||
| 78 | struct cavium_rng *rng; | ||
| 79 | |||
| 80 | rng = pci_get_drvdata(pdev); | ||
| 81 | hwrng_unregister(&rng->ops); | ||
| 82 | } | ||
| 83 | |||
| 84 | static const struct pci_device_id cavium_rng_vf_id_table[] = { | ||
| 85 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0}, | ||
| 86 | {0,}, | ||
| 87 | }; | ||
| 88 | MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table); | ||
| 89 | |||
| 90 | static struct pci_driver cavium_rng_vf_driver = { | ||
| 91 | .name = "cavium_rng_vf", | ||
| 92 | .id_table = cavium_rng_vf_id_table, | ||
| 93 | .probe = cavium_rng_probe_vf, | ||
| 94 | .remove = cavium_rng_remove_vf, | ||
| 95 | }; | ||
| 96 | module_pci_driver(cavium_rng_vf_driver); | ||
| 97 | |||
| 98 | MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>"); | ||
| 99 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c new file mode 100644 index 000000000000..a944e0a47f42 --- /dev/null +++ b/drivers/char/hw_random/cavium-rng.c | |||
| @@ -0,0 +1,94 @@ | |||
| 1 | /* | ||
| 2 | * Hardware Random Number Generator support for Cavium Inc. | ||
| 3 | * Thunder processor family. | ||
| 4 | * | ||
| 5 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 6 | * License. See the file "COPYING" in the main directory of this archive | ||
| 7 | * for more details. | ||
| 8 | * | ||
| 9 | * Copyright (C) 2016 Cavium, Inc. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/hw_random.h> | ||
| 13 | #include <linux/io.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/pci.h> | ||
| 16 | #include <linux/pci_ids.h> | ||
| 17 | |||
| 18 | #define THUNDERX_RNM_ENT_EN 0x1 | ||
| 19 | #define THUNDERX_RNM_RNG_EN 0x2 | ||
| 20 | |||
| 21 | struct cavium_rng_pf { | ||
| 22 | void __iomem *control_status; | ||
| 23 | }; | ||
| 24 | |||
| 25 | /* Enable the RNG hardware and activate the VF */ | ||
| 26 | static int cavium_rng_probe(struct pci_dev *pdev, | ||
| 27 | const struct pci_device_id *id) | ||
| 28 | { | ||
| 29 | struct cavium_rng_pf *rng; | ||
| 30 | int iov_err; | ||
| 31 | |||
| 32 | rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); | ||
| 33 | if (!rng) | ||
| 34 | return -ENOMEM; | ||
| 35 | |||
| 36 | /*Map the RNG control */ | ||
| 37 | rng->control_status = pcim_iomap(pdev, 0, 0); | ||
| 38 | if (!rng->control_status) { | ||
| 39 | dev_err(&pdev->dev, | ||
| 40 | "Error iomap failed retrieving control_status.\n"); | ||
| 41 | return -ENOMEM; | ||
| 42 | } | ||
| 43 | |||
| 44 | /* Enable the RNG hardware and entropy source */ | ||
| 45 | writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN, | ||
| 46 | rng->control_status); | ||
| 47 | |||
| 48 | pci_set_drvdata(pdev, rng); | ||
| 49 | |||
| 50 | /* Enable the Cavium RNG as a VF */ | ||
| 51 | iov_err = pci_enable_sriov(pdev, 1); | ||
| 52 | if (iov_err != 0) { | ||
| 53 | /* Disable the RNG hardware and entropy source */ | ||
| 54 | writeq(0, rng->control_status); | ||
| 55 | dev_err(&pdev->dev, | ||
| 56 | "Error initializing RNG virtual function,(%i).\n", | ||
| 57 | iov_err); | ||
| 58 | return iov_err; | ||
| 59 | } | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | /* Disable VF and RNG Hardware */ | ||
| 65 | void cavium_rng_remove(struct pci_dev *pdev) | ||
| 66 | { | ||
| 67 | struct cavium_rng_pf *rng; | ||
| 68 | |||
| 69 | rng = pci_get_drvdata(pdev); | ||
| 70 | |||
| 71 | /* Remove the VF */ | ||
| 72 | pci_disable_sriov(pdev); | ||
| 73 | |||
| 74 | /* Disable the RNG hardware and entropy source */ | ||
| 75 | writeq(0, rng->control_status); | ||
| 76 | } | ||
| 77 | |||
| 78 | static const struct pci_device_id cavium_rng_pf_id_table[] = { | ||
| 79 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */ | ||
| 80 | {0,}, | ||
| 81 | }; | ||
| 82 | |||
| 83 | MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table); | ||
| 84 | |||
| 85 | static struct pci_driver cavium_rng_pf_driver = { | ||
| 86 | .name = "cavium_rng_pf", | ||
| 87 | .id_table = cavium_rng_pf_id_table, | ||
| 88 | .probe = cavium_rng_probe, | ||
| 89 | .remove = cavium_rng_remove, | ||
| 90 | }; | ||
| 91 | |||
| 92 | module_pci_driver(cavium_rng_pf_driver); | ||
| 93 | MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>"); | ||
| 94 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 9203f2d130c0..482794526e8c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
| @@ -449,22 +449,6 @@ int hwrng_register(struct hwrng *rng) | |||
| 449 | goto out; | 449 | goto out; |
| 450 | 450 | ||
| 451 | mutex_lock(&rng_mutex); | 451 | mutex_lock(&rng_mutex); |
| 452 | |||
| 453 | /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ | ||
| 454 | err = -ENOMEM; | ||
| 455 | if (!rng_buffer) { | ||
| 456 | rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); | ||
| 457 | if (!rng_buffer) | ||
| 458 | goto out_unlock; | ||
| 459 | } | ||
| 460 | if (!rng_fillbuf) { | ||
| 461 | rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); | ||
| 462 | if (!rng_fillbuf) { | ||
| 463 | kfree(rng_buffer); | ||
| 464 | goto out_unlock; | ||
| 465 | } | ||
| 466 | } | ||
| 467 | |||
| 468 | /* Must not register two RNGs with the same name. */ | 452 | /* Must not register two RNGs with the same name. */ |
| 469 | err = -EEXIST; | 453 | err = -EEXIST; |
| 470 | list_for_each_entry(tmp, &rng_list, list) { | 454 | list_for_each_entry(tmp, &rng_list, list) { |
| @@ -573,7 +557,26 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister); | |||
| 573 | 557 | ||
| 574 | static int __init hwrng_modinit(void) | 558 | static int __init hwrng_modinit(void) |
| 575 | { | 559 | { |
| 576 | return register_miscdev(); | 560 | int ret = -ENOMEM; |
| 561 | |||
| 562 | /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ | ||
| 563 | rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); | ||
| 564 | if (!rng_buffer) | ||
| 565 | return -ENOMEM; | ||
| 566 | |||
| 567 | rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); | ||
| 568 | if (!rng_fillbuf) { | ||
| 569 | kfree(rng_buffer); | ||
| 570 | return -ENOMEM; | ||
| 571 | } | ||
| 572 | |||
| 573 | ret = register_miscdev(); | ||
| 574 | if (ret) { | ||
| 575 | kfree(rng_fillbuf); | ||
| 576 | kfree(rng_buffer); | ||
| 577 | } | ||
| 578 | |||
| 579 | return ret; | ||
| 577 | } | 580 | } |
| 578 | 581 | ||
| 579 | static void __exit hwrng_modexit(void) | 582 | static void __exit hwrng_modexit(void) |
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 0d0579fe465e..e7a245942029 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c | |||
| @@ -24,15 +24,12 @@ | |||
| 24 | * warranty of any kind, whether express or implied. | 24 | * warranty of any kind, whether express or implied. |
| 25 | */ | 25 | */ |
| 26 | 26 | ||
| 27 | #include <linux/module.h> | 27 | #include <linux/delay.h> |
| 28 | #include <linux/hw_random.h> | ||
| 29 | #include <linux/io.h> | ||
| 28 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
| 31 | #include <linux/module.h> | ||
| 29 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
| 30 | #include <linux/hw_random.h> | ||
| 31 | #include <linux/delay.h> | ||
| 32 | #include <asm/io.h> | ||
| 33 | |||
| 34 | |||
| 35 | #define PFX KBUILD_MODNAME ": " | ||
| 36 | 33 | ||
| 37 | #define GEODE_RNG_DATA_REG 0x50 | 34 | #define GEODE_RNG_DATA_REG 0x50 |
| 38 | #define GEODE_RNG_STATUS_REG 0x54 | 35 | #define GEODE_RNG_STATUS_REG 0x54 |
| @@ -85,7 +82,6 @@ static struct hwrng geode_rng = { | |||
| 85 | 82 | ||
| 86 | static int __init mod_init(void) | 83 | static int __init mod_init(void) |
| 87 | { | 84 | { |
| 88 | int err = -ENODEV; | ||
| 89 | struct pci_dev *pdev = NULL; | 85 | struct pci_dev *pdev = NULL; |
| 90 | const struct pci_device_id *ent; | 86 | const struct pci_device_id *ent; |
| 91 | void __iomem *mem; | 87 | void __iomem *mem; |
| @@ -93,43 +89,27 @@ static int __init mod_init(void) | |||
| 93 | 89 | ||
| 94 | for_each_pci_dev(pdev) { | 90 | for_each_pci_dev(pdev) { |
| 95 | ent = pci_match_id(pci_tbl, pdev); | 91 | ent = pci_match_id(pci_tbl, pdev); |
| 96 | if (ent) | 92 | if (ent) { |
| 97 | goto found; | 93 | rng_base = pci_resource_start(pdev, 0); |
| 98 | } | 94 | if (rng_base == 0) |
| 99 | /* Device not found. */ | 95 | return -ENODEV; |
| 100 | goto out; | 96 | |
| 101 | 97 | mem = devm_ioremap(&pdev->dev, rng_base, 0x58); | |
| 102 | found: | 98 | if (!mem) |
| 103 | rng_base = pci_resource_start(pdev, 0); | 99 | return -ENOMEM; |
| 104 | if (rng_base == 0) | 100 | geode_rng.priv = (unsigned long)mem; |
| 105 | goto out; | 101 | |
| 106 | err = -ENOMEM; | 102 | pr_info("AMD Geode RNG detected\n"); |
| 107 | mem = ioremap(rng_base, 0x58); | 103 | return devm_hwrng_register(&pdev->dev, &geode_rng); |
| 108 | if (!mem) | 104 | } |
| 109 | goto out; | ||
| 110 | geode_rng.priv = (unsigned long)mem; | ||
| 111 | |||
| 112 | pr_info("AMD Geode RNG detected\n"); | ||
| 113 | err = hwrng_register(&geode_rng); | ||
| 114 | if (err) { | ||
| 115 | pr_err(PFX "RNG registering failed (%d)\n", | ||
| 116 | err); | ||
| 117 | goto err_unmap; | ||
| 118 | } | 105 | } |
| 119 | out: | ||
| 120 | return err; | ||
| 121 | 106 | ||
| 122 | err_unmap: | 107 | /* Device not found. */ |
| 123 | iounmap(mem); | 108 | return -ENODEV; |
| 124 | goto out; | ||
| 125 | } | 109 | } |
| 126 | 110 | ||
| 127 | static void __exit mod_exit(void) | 111 | static void __exit mod_exit(void) |
| 128 | { | 112 | { |
| 129 | void __iomem *mem = (void __iomem *)geode_rng.priv; | ||
| 130 | |||
| 131 | hwrng_unregister(&geode_rng); | ||
| 132 | iounmap(mem); | ||
| 133 | } | 113 | } |
| 134 | 114 | ||
| 135 | module_init(mod_init); | 115 | module_init(mod_init); |
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c index 0cfd81bcaeac..58bef39f7286 100644 --- a/drivers/char/hw_random/meson-rng.c +++ b/drivers/char/hw_random/meson-rng.c | |||
| @@ -76,9 +76,6 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) | |||
| 76 | struct meson_rng_data *data = | 76 | struct meson_rng_data *data = |
| 77 | container_of(rng, struct meson_rng_data, rng); | 77 | container_of(rng, struct meson_rng_data, rng); |
| 78 | 78 | ||
| 79 | if (max < sizeof(u32)) | ||
| 80 | return 0; | ||
| 81 | |||
| 82 | *(u32 *)buf = readl_relaxed(data->base + RNG_DATA); | 79 | *(u32 *)buf = readl_relaxed(data->base + RNG_DATA); |
| 83 | 80 | ||
| 84 | return sizeof(u32); | 81 | return sizeof(u32); |
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 01d4be2c354b..f5c26a5f6875 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c | |||
| @@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev) | |||
| 385 | 385 | ||
| 386 | pm_runtime_enable(&pdev->dev); | 386 | pm_runtime_enable(&pdev->dev); |
| 387 | ret = pm_runtime_get_sync(&pdev->dev); | 387 | ret = pm_runtime_get_sync(&pdev->dev); |
| 388 | if (ret) { | 388 | if (ret < 0) { |
| 389 | dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); | 389 | dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); |
| 390 | pm_runtime_put_noidle(&pdev->dev); | 390 | pm_runtime_put_noidle(&pdev->dev); |
| 391 | goto err_ioremap; | 391 | goto err_ioremap; |
| @@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev) | |||
| 443 | int ret; | 443 | int ret; |
| 444 | 444 | ||
| 445 | ret = pm_runtime_get_sync(dev); | 445 | ret = pm_runtime_get_sync(dev); |
| 446 | if (ret) { | 446 | if (ret < 0) { |
| 447 | dev_err(dev, "Failed to runtime_get device: %d\n", ret); | 447 | dev_err(dev, "Failed to runtime_get device: %d\n", ret); |
| 448 | pm_runtime_put_noidle(dev); | 448 | pm_runtime_put_noidle(dev); |
| 449 | return ret; | 449 | return ret; |
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index 8da14f1a1f56..37a58d78aab3 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c | |||
| @@ -71,12 +71,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) | |||
| 71 | return 0; | 71 | return 0; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static int omap3_rom_rng_data_present(struct hwrng *rng, int wait) | 74 | static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w) |
| 75 | { | ||
| 76 | return 1; | ||
| 77 | } | ||
| 78 | |||
| 79 | static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) | ||
| 80 | { | 75 | { |
| 81 | int r; | 76 | int r; |
| 82 | 77 | ||
| @@ -88,8 +83,7 @@ static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) | |||
| 88 | 83 | ||
| 89 | static struct hwrng omap3_rom_rng_ops = { | 84 | static struct hwrng omap3_rom_rng_ops = { |
| 90 | .name = "omap3-rom", | 85 | .name = "omap3-rom", |
| 91 | .data_present = omap3_rom_rng_data_present, | 86 | .read = omap3_rom_rng_read, |
| 92 | .data_read = omap3_rom_rng_data_read, | ||
| 93 | }; | 87 | }; |
| 94 | 88 | ||
| 95 | static int omap3_rom_rng_probe(struct platform_device *pdev) | 89 | static int omap3_rom_rng_probe(struct platform_device *pdev) |
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index c19e23d22b36..545df485bcc4 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c | |||
| @@ -95,42 +95,20 @@ static struct hwrng pasemi_rng = { | |||
| 95 | .data_read = pasemi_rng_data_read, | 95 | .data_read = pasemi_rng_data_read, |
| 96 | }; | 96 | }; |
| 97 | 97 | ||
| 98 | static int rng_probe(struct platform_device *ofdev) | 98 | static int rng_probe(struct platform_device *pdev) |
| 99 | { | 99 | { |
| 100 | void __iomem *rng_regs; | 100 | void __iomem *rng_regs; |
| 101 | struct device_node *rng_np = ofdev->dev.of_node; | 101 | struct resource *res; |
| 102 | struct resource res; | ||
| 103 | int err = 0; | ||
| 104 | 102 | ||
| 105 | err = of_address_to_resource(rng_np, 0, &res); | 103 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 106 | if (err) | 104 | rng_regs = devm_ioremap_resource(&pdev->dev, res); |
| 107 | return -ENODEV; | 105 | if (IS_ERR(rng_regs)) |
| 108 | 106 | return PTR_ERR(rng_regs); | |
| 109 | rng_regs = ioremap(res.start, 0x100); | ||
| 110 | |||
| 111 | if (!rng_regs) | ||
| 112 | return -ENOMEM; | ||
| 113 | 107 | ||
| 114 | pasemi_rng.priv = (unsigned long)rng_regs; | 108 | pasemi_rng.priv = (unsigned long)rng_regs; |
| 115 | 109 | ||
| 116 | pr_info("Registering PA Semi RNG\n"); | 110 | pr_info("Registering PA Semi RNG\n"); |
| 117 | 111 | return devm_hwrng_register(&pdev->dev, &pasemi_rng); | |
| 118 | err = hwrng_register(&pasemi_rng); | ||
| 119 | |||
| 120 | if (err) | ||
| 121 | iounmap(rng_regs); | ||
| 122 | |||
| 123 | return err; | ||
| 124 | } | ||
| 125 | |||
| 126 | static int rng_remove(struct platform_device *dev) | ||
| 127 | { | ||
| 128 | void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv; | ||
| 129 | |||
| 130 | hwrng_unregister(&pasemi_rng); | ||
| 131 | iounmap(rng_regs); | ||
| 132 | |||
| 133 | return 0; | ||
| 134 | } | 112 | } |
| 135 | 113 | ||
| 136 | static const struct of_device_id rng_match[] = { | 114 | static const struct of_device_id rng_match[] = { |
| @@ -146,7 +124,6 @@ static struct platform_driver rng_driver = { | |||
| 146 | .of_match_table = rng_match, | 124 | .of_match_table = rng_match, |
| 147 | }, | 125 | }, |
| 148 | .probe = rng_probe, | 126 | .probe = rng_probe, |
| 149 | .remove = rng_remove, | ||
| 150 | }; | 127 | }; |
| 151 | 128 | ||
| 152 | module_platform_driver(rng_driver); | 129 | module_platform_driver(rng_driver); |
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index 108897bea2d0..11dc9b7c09ce 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c | |||
| @@ -143,7 +143,6 @@ static struct platform_driver pic32_rng_driver = { | |||
| 143 | .remove = pic32_rng_remove, | 143 | .remove = pic32_rng_remove, |
| 144 | .driver = { | 144 | .driver = { |
| 145 | .name = "pic32-rng", | 145 | .name = "pic32-rng", |
| 146 | .owner = THIS_MODULE, | ||
| 147 | .of_match_table = of_match_ptr(pic32_rng_of_match), | 146 | .of_match_table = of_match_ptr(pic32_rng_of_match), |
| 148 | }, | 147 | }, |
| 149 | }; | 148 | }; |
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c index 1d35363d23c5..938ec10e733d 100644 --- a/drivers/char/hw_random/st-rng.c +++ b/drivers/char/hw_random/st-rng.c | |||
| @@ -54,9 +54,6 @@ static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) | |||
| 54 | u32 status; | 54 | u32 status; |
| 55 | int i; | 55 | int i; |
| 56 | 56 | ||
| 57 | if (max < sizeof(u16)) | ||
| 58 | return -EINVAL; | ||
| 59 | |||
| 60 | /* Wait until FIFO is full - max 4uS*/ | 57 | /* Wait until FIFO is full - max 4uS*/ |
| 61 | for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) { | 58 | for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) { |
| 62 | status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG); | 59 | status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG); |
| @@ -111,6 +108,7 @@ static int st_rng_probe(struct platform_device *pdev) | |||
| 111 | ret = hwrng_register(&ddata->ops); | 108 | ret = hwrng_register(&ddata->ops); |
| 112 | if (ret) { | 109 | if (ret) { |
| 113 | dev_err(&pdev->dev, "Failed to register HW RNG\n"); | 110 | dev_err(&pdev->dev, "Failed to register HW RNG\n"); |
| 111 | clk_disable_unprepare(clk); | ||
| 114 | return ret; | 112 | return ret; |
| 115 | } | 113 | } |
| 116 | 114 | ||
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c index a7b694913416..1093583b579c 100644 --- a/drivers/char/hw_random/tx4939-rng.c +++ b/drivers/char/hw_random/tx4939-rng.c | |||
| @@ -144,22 +144,13 @@ static int __init tx4939_rng_probe(struct platform_device *dev) | |||
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | platform_set_drvdata(dev, rngdev); | 146 | platform_set_drvdata(dev, rngdev); |
| 147 | return hwrng_register(&rngdev->rng); | 147 | return devm_hwrng_register(&dev->dev, &rngdev->rng); |
| 148 | } | ||
| 149 | |||
| 150 | static int __exit tx4939_rng_remove(struct platform_device *dev) | ||
| 151 | { | ||
| 152 | struct tx4939_rng *rngdev = platform_get_drvdata(dev); | ||
| 153 | |||
| 154 | hwrng_unregister(&rngdev->rng); | ||
| 155 | return 0; | ||
| 156 | } | 148 | } |
| 157 | 149 | ||
| 158 | static struct platform_driver tx4939_rng_driver = { | 150 | static struct platform_driver tx4939_rng_driver = { |
| 159 | .driver = { | 151 | .driver = { |
| 160 | .name = "tx4939-rng", | 152 | .name = "tx4939-rng", |
| 161 | }, | 153 | }, |
| 162 | .remove = tx4939_rng_remove, | ||
| 163 | }; | 154 | }; |
| 164 | 155 | ||
| 165 | module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe); | 156 | module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe); |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 9b035b7d7f4f..4d2b81f2b223 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
| @@ -318,6 +318,9 @@ config CRYPTO_DEV_OMAP_AES | |||
| 318 | select CRYPTO_AES | 318 | select CRYPTO_AES |
| 319 | select CRYPTO_BLKCIPHER | 319 | select CRYPTO_BLKCIPHER |
| 320 | select CRYPTO_ENGINE | 320 | select CRYPTO_ENGINE |
| 321 | select CRYPTO_CBC | ||
| 322 | select CRYPTO_ECB | ||
| 323 | select CRYPTO_CTR | ||
| 321 | help | 324 | help |
| 322 | OMAP processors have AES module accelerator. Select this if you | 325 | OMAP processors have AES module accelerator. Select this if you |
| 323 | want to use the OMAP module for AES algorithms. | 326 | want to use the OMAP module for AES algorithms. |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b3044219772c..156aad167cd6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -111,6 +111,42 @@ | |||
| 111 | #else | 111 | #else |
| 112 | #define debug(format, arg...) | 112 | #define debug(format, arg...) |
| 113 | #endif | 113 | #endif |
| 114 | |||
| 115 | #ifdef DEBUG | ||
| 116 | #include <linux/highmem.h> | ||
| 117 | |||
| 118 | static void dbg_dump_sg(const char *level, const char *prefix_str, | ||
| 119 | int prefix_type, int rowsize, int groupsize, | ||
| 120 | struct scatterlist *sg, size_t tlen, bool ascii, | ||
| 121 | bool may_sleep) | ||
| 122 | { | ||
| 123 | struct scatterlist *it; | ||
| 124 | void *it_page; | ||
| 125 | size_t len; | ||
| 126 | void *buf; | ||
| 127 | |||
| 128 | for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) { | ||
| 129 | /* | ||
| 130 | * make sure the scatterlist's page | ||
| 131 | * has a valid virtual memory mapping | ||
| 132 | */ | ||
| 133 | it_page = kmap_atomic(sg_page(it)); | ||
| 134 | if (unlikely(!it_page)) { | ||
| 135 | printk(KERN_ERR "dbg_dump_sg: kmap failed\n"); | ||
| 136 | return; | ||
| 137 | } | ||
| 138 | |||
| 139 | buf = it_page + it->offset; | ||
| 140 | len = min(tlen, it->length); | ||
| 141 | print_hex_dump(level, prefix_str, prefix_type, rowsize, | ||
| 142 | groupsize, buf, len, ascii); | ||
| 143 | tlen -= len; | ||
| 144 | |||
| 145 | kunmap_atomic(it_page); | ||
| 146 | } | ||
| 147 | } | ||
| 148 | #endif | ||
| 149 | |||
| 114 | static struct list_head alg_list; | 150 | static struct list_head alg_list; |
| 115 | 151 | ||
| 116 | struct caam_alg_entry { | 152 | struct caam_alg_entry { |
| @@ -227,8 +263,9 @@ static void append_key_aead(u32 *desc, struct caam_ctx *ctx, | |||
| 227 | if (is_rfc3686) { | 263 | if (is_rfc3686) { |
| 228 | nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len + | 264 | nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len + |
| 229 | enckeylen); | 265 | enckeylen); |
| 230 | append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | | 266 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
| 231 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | 267 | LDST_CLASS_IND_CCB | |
| 268 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 232 | append_move(desc, | 269 | append_move(desc, |
| 233 | MOVE_SRC_OUTFIFO | | 270 | MOVE_SRC_OUTFIFO | |
| 234 | MOVE_DEST_CLASS1CTX | | 271 | MOVE_DEST_CLASS1CTX | |
| @@ -500,11 +537,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 500 | 537 | ||
| 501 | /* Load Counter into CONTEXT1 reg */ | 538 | /* Load Counter into CONTEXT1 reg */ |
| 502 | if (is_rfc3686) | 539 | if (is_rfc3686) |
| 503 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | | 540 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | |
| 504 | LDST_CLASS_1_CCB | | 541 | LDST_SRCDST_BYTE_CONTEXT | |
| 505 | LDST_SRCDST_BYTE_CONTEXT | | 542 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << |
| 506 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | 543 | LDST_OFFSET_SHIFT)); |
| 507 | LDST_OFFSET_SHIFT)); | ||
| 508 | 544 | ||
| 509 | /* Class 1 operation */ | 545 | /* Class 1 operation */ |
| 510 | append_operation(desc, ctx->class1_alg_type | | 546 | append_operation(desc, ctx->class1_alg_type | |
| @@ -578,11 +614,10 @@ skip_enc: | |||
| 578 | 614 | ||
| 579 | /* Load Counter into CONTEXT1 reg */ | 615 | /* Load Counter into CONTEXT1 reg */ |
| 580 | if (is_rfc3686) | 616 | if (is_rfc3686) |
| 581 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | | 617 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | |
| 582 | LDST_CLASS_1_CCB | | 618 | LDST_SRCDST_BYTE_CONTEXT | |
| 583 | LDST_SRCDST_BYTE_CONTEXT | | 619 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << |
| 584 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | 620 | LDST_OFFSET_SHIFT)); |
| 585 | LDST_OFFSET_SHIFT)); | ||
| 586 | 621 | ||
| 587 | /* Choose operation */ | 622 | /* Choose operation */ |
| 588 | if (ctr_mode) | 623 | if (ctr_mode) |
| @@ -683,11 +718,10 @@ copy_iv: | |||
| 683 | 718 | ||
| 684 | /* Load Counter into CONTEXT1 reg */ | 719 | /* Load Counter into CONTEXT1 reg */ |
| 685 | if (is_rfc3686) | 720 | if (is_rfc3686) |
| 686 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | | 721 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | |
| 687 | LDST_CLASS_1_CCB | | 722 | LDST_SRCDST_BYTE_CONTEXT | |
| 688 | LDST_SRCDST_BYTE_CONTEXT | | 723 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << |
| 689 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | 724 | LDST_OFFSET_SHIFT)); |
| 690 | LDST_OFFSET_SHIFT)); | ||
| 691 | 725 | ||
| 692 | /* Class 1 operation */ | 726 | /* Class 1 operation */ |
| 693 | append_operation(desc, ctx->class1_alg_type | | 727 | append_operation(desc, ctx->class1_alg_type | |
| @@ -1478,7 +1512,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1478 | int ret = 0; | 1512 | int ret = 0; |
| 1479 | u32 *key_jump_cmd; | 1513 | u32 *key_jump_cmd; |
| 1480 | u32 *desc; | 1514 | u32 *desc; |
| 1481 | u32 *nonce; | 1515 | u8 *nonce; |
| 1482 | u32 geniv; | 1516 | u32 geniv; |
| 1483 | u32 ctx1_iv_off = 0; | 1517 | u32 ctx1_iv_off = 0; |
| 1484 | const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == | 1518 | const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == |
| @@ -1531,9 +1565,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1531 | 1565 | ||
| 1532 | /* Load nonce into CONTEXT1 reg */ | 1566 | /* Load nonce into CONTEXT1 reg */ |
| 1533 | if (is_rfc3686) { | 1567 | if (is_rfc3686) { |
| 1534 | nonce = (u32 *)(key + keylen); | 1568 | nonce = (u8 *)key + keylen; |
| 1535 | append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | | 1569 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
| 1536 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | 1570 | LDST_CLASS_IND_CCB | |
| 1571 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1537 | append_move(desc, MOVE_WAITCOMP | | 1572 | append_move(desc, MOVE_WAITCOMP | |
| 1538 | MOVE_SRC_OUTFIFO | | 1573 | MOVE_SRC_OUTFIFO | |
| 1539 | MOVE_DEST_CLASS1CTX | | 1574 | MOVE_DEST_CLASS1CTX | |
| @@ -1549,11 +1584,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1549 | 1584 | ||
| 1550 | /* Load counter into CONTEXT1 reg */ | 1585 | /* Load counter into CONTEXT1 reg */ |
| 1551 | if (is_rfc3686) | 1586 | if (is_rfc3686) |
| 1552 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | | 1587 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | |
| 1553 | LDST_CLASS_1_CCB | | 1588 | LDST_SRCDST_BYTE_CONTEXT | |
| 1554 | LDST_SRCDST_BYTE_CONTEXT | | 1589 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << |
| 1555 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | 1590 | LDST_OFFSET_SHIFT)); |
| 1556 | LDST_OFFSET_SHIFT)); | ||
| 1557 | 1591 | ||
| 1558 | /* Load operation */ | 1592 | /* Load operation */ |
| 1559 | append_operation(desc, ctx->class1_alg_type | | 1593 | append_operation(desc, ctx->class1_alg_type | |
| @@ -1590,9 +1624,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1590 | 1624 | ||
| 1591 | /* Load nonce into CONTEXT1 reg */ | 1625 | /* Load nonce into CONTEXT1 reg */ |
| 1592 | if (is_rfc3686) { | 1626 | if (is_rfc3686) { |
| 1593 | nonce = (u32 *)(key + keylen); | 1627 | nonce = (u8 *)key + keylen; |
| 1594 | append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | | 1628 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
| 1595 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | 1629 | LDST_CLASS_IND_CCB | |
| 1630 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1596 | append_move(desc, MOVE_WAITCOMP | | 1631 | append_move(desc, MOVE_WAITCOMP | |
| 1597 | MOVE_SRC_OUTFIFO | | 1632 | MOVE_SRC_OUTFIFO | |
| 1598 | MOVE_DEST_CLASS1CTX | | 1633 | MOVE_DEST_CLASS1CTX | |
| @@ -1608,11 +1643,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1608 | 1643 | ||
| 1609 | /* Load counter into CONTEXT1 reg */ | 1644 | /* Load counter into CONTEXT1 reg */ |
| 1610 | if (is_rfc3686) | 1645 | if (is_rfc3686) |
| 1611 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | | 1646 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | |
| 1612 | LDST_CLASS_1_CCB | | 1647 | LDST_SRCDST_BYTE_CONTEXT | |
| 1613 | LDST_SRCDST_BYTE_CONTEXT | | 1648 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << |
| 1614 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | 1649 | LDST_OFFSET_SHIFT)); |
| 1615 | LDST_OFFSET_SHIFT)); | ||
| 1616 | 1650 | ||
| 1617 | /* Choose operation */ | 1651 | /* Choose operation */ |
| 1618 | if (ctr_mode) | 1652 | if (ctr_mode) |
| @@ -1653,9 +1687,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1653 | 1687 | ||
| 1654 | /* Load Nonce into CONTEXT1 reg */ | 1688 | /* Load Nonce into CONTEXT1 reg */ |
| 1655 | if (is_rfc3686) { | 1689 | if (is_rfc3686) { |
| 1656 | nonce = (u32 *)(key + keylen); | 1690 | nonce = (u8 *)key + keylen; |
| 1657 | append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | | 1691 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
| 1658 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | 1692 | LDST_CLASS_IND_CCB | |
| 1693 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1659 | append_move(desc, MOVE_WAITCOMP | | 1694 | append_move(desc, MOVE_WAITCOMP | |
| 1660 | MOVE_SRC_OUTFIFO | | 1695 | MOVE_SRC_OUTFIFO | |
| 1661 | MOVE_DEST_CLASS1CTX | | 1696 | MOVE_DEST_CLASS1CTX | |
| @@ -1685,11 +1720,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1685 | 1720 | ||
| 1686 | /* Load Counter into CONTEXT1 reg */ | 1721 | /* Load Counter into CONTEXT1 reg */ |
| 1687 | if (is_rfc3686) | 1722 | if (is_rfc3686) |
| 1688 | append_load_imm_u32(desc, (u32)1, LDST_IMM | | 1723 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | |
| 1689 | LDST_CLASS_1_CCB | | 1724 | LDST_SRCDST_BYTE_CONTEXT | |
| 1690 | LDST_SRCDST_BYTE_CONTEXT | | 1725 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << |
| 1691 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | 1726 | LDST_OFFSET_SHIFT)); |
| 1692 | LDST_OFFSET_SHIFT)); | ||
| 1693 | 1727 | ||
| 1694 | if (ctx1_iv_off) | 1728 | if (ctx1_iv_off) |
| 1695 | append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | | 1729 | append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | |
| @@ -1995,9 +2029,9 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 1995 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | 2029 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", |
| 1996 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 2030 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
| 1997 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 2031 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
| 1998 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", | 2032 | dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
| 1999 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 2033 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
| 2000 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | 2034 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true); |
| 2001 | #endif | 2035 | #endif |
| 2002 | 2036 | ||
| 2003 | ablkcipher_unmap(jrdev, edesc, req); | 2037 | ablkcipher_unmap(jrdev, edesc, req); |
| @@ -2027,9 +2061,9 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 2027 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | 2061 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", |
| 2028 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 2062 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
| 2029 | ivsize, 1); | 2063 | ivsize, 1); |
| 2030 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", | 2064 | dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
| 2031 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 2065 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
| 2032 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | 2066 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true); |
| 2033 | #endif | 2067 | #endif |
| 2034 | 2068 | ||
| 2035 | ablkcipher_unmap(jrdev, edesc, req); | 2069 | ablkcipher_unmap(jrdev, edesc, req); |
| @@ -2184,12 +2218,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 2184 | int len, sec4_sg_index = 0; | 2218 | int len, sec4_sg_index = 0; |
| 2185 | 2219 | ||
| 2186 | #ifdef DEBUG | 2220 | #ifdef DEBUG |
| 2221 | bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 2222 | CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); | ||
| 2187 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", | 2223 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", |
| 2188 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 2224 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
| 2189 | ivsize, 1); | 2225 | ivsize, 1); |
| 2190 | print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", | 2226 | printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes); |
| 2191 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 2227 | dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ", |
| 2192 | edesc->src_nents ? 100 : req->nbytes, 1); | 2228 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
| 2229 | edesc->src_nents ? 100 : req->nbytes, 1, may_sleep); | ||
| 2193 | #endif | 2230 | #endif |
| 2194 | 2231 | ||
| 2195 | len = desc_len(sh_desc); | 2232 | len = desc_len(sh_desc); |
| @@ -2241,12 +2278,14 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 2241 | int len, sec4_sg_index = 0; | 2278 | int len, sec4_sg_index = 0; |
| 2242 | 2279 | ||
| 2243 | #ifdef DEBUG | 2280 | #ifdef DEBUG |
| 2281 | bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 2282 | CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); | ||
| 2244 | print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", | 2283 | print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", |
| 2245 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 2284 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
| 2246 | ivsize, 1); | 2285 | ivsize, 1); |
| 2247 | print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ", | 2286 | dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", |
| 2248 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 2287 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
| 2249 | edesc->src_nents ? 100 : req->nbytes, 1); | 2288 | edesc->src_nents ? 100 : req->nbytes, 1, may_sleep); |
| 2250 | #endif | 2289 | #endif |
| 2251 | 2290 | ||
| 2252 | len = desc_len(sh_desc); | 2291 | len = desc_len(sh_desc); |
| @@ -2516,18 +2555,20 @@ static int aead_decrypt(struct aead_request *req) | |||
| 2516 | u32 *desc; | 2555 | u32 *desc; |
| 2517 | int ret = 0; | 2556 | int ret = 0; |
| 2518 | 2557 | ||
| 2558 | #ifdef DEBUG | ||
| 2559 | bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 2560 | CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); | ||
| 2561 | dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ", | ||
| 2562 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, | ||
| 2563 | req->assoclen + req->cryptlen, 1, may_sleep); | ||
| 2564 | #endif | ||
| 2565 | |||
| 2519 | /* allocate extended descriptor */ | 2566 | /* allocate extended descriptor */ |
| 2520 | edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, | 2567 | edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, |
| 2521 | &all_contig, false); | 2568 | &all_contig, false); |
| 2522 | if (IS_ERR(edesc)) | 2569 | if (IS_ERR(edesc)) |
| 2523 | return PTR_ERR(edesc); | 2570 | return PTR_ERR(edesc); |
| 2524 | 2571 | ||
| 2525 | #ifdef DEBUG | ||
| 2526 | print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", | ||
| 2527 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
| 2528 | req->assoclen + req->cryptlen, 1); | ||
| 2529 | #endif | ||
| 2530 | |||
| 2531 | /* Create and submit job descriptor*/ | 2572 | /* Create and submit job descriptor*/ |
| 2532 | init_authenc_job(req, edesc, all_contig, false); | 2573 | init_authenc_job(req, edesc, all_contig, false); |
| 2533 | #ifdef DEBUG | 2574 | #ifdef DEBUG |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 36365b3efdfd..660dc206969f 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -99,17 +99,17 @@ static struct list_head hash_list; | |||
| 99 | 99 | ||
| 100 | /* ahash per-session context */ | 100 | /* ahash per-session context */ |
| 101 | struct caam_hash_ctx { | 101 | struct caam_hash_ctx { |
| 102 | struct device *jrdev; | 102 | u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
| 103 | u32 sh_desc_update[DESC_HASH_MAX_USED_LEN]; | 103 | u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
| 104 | u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN]; | 104 | u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
| 105 | u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN]; | 105 | u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
| 106 | u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN]; | 106 | u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
| 107 | u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN]; | 107 | dma_addr_t sh_desc_update_dma ____cacheline_aligned; |
| 108 | dma_addr_t sh_desc_update_dma; | ||
| 109 | dma_addr_t sh_desc_update_first_dma; | 108 | dma_addr_t sh_desc_update_first_dma; |
| 110 | dma_addr_t sh_desc_fin_dma; | 109 | dma_addr_t sh_desc_fin_dma; |
| 111 | dma_addr_t sh_desc_digest_dma; | 110 | dma_addr_t sh_desc_digest_dma; |
| 112 | dma_addr_t sh_desc_finup_dma; | 111 | dma_addr_t sh_desc_finup_dma; |
| 112 | struct device *jrdev; | ||
| 113 | u32 alg_type; | 113 | u32 alg_type; |
| 114 | u32 alg_op; | 114 | u32 alg_op; |
| 115 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; | 115 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; |
| @@ -187,15 +187,6 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, | |||
| 187 | return buf_dma; | 187 | return buf_dma; |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | /* Map req->src and put it in link table */ | ||
| 191 | static inline void src_map_to_sec4_sg(struct device *jrdev, | ||
| 192 | struct scatterlist *src, int src_nents, | ||
| 193 | struct sec4_sg_entry *sec4_sg) | ||
| 194 | { | ||
| 195 | dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE); | ||
| 196 | sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); | ||
| 197 | } | ||
| 198 | |||
| 199 | /* | 190 | /* |
| 200 | * Only put buffer in link table if it contains data, which is possible, | 191 | * Only put buffer in link table if it contains data, which is possible, |
| 201 | * since a buffer has previously been used, and needs to be unmapped, | 192 | * since a buffer has previously been used, and needs to be unmapped, |
| @@ -449,7 +440,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
| 449 | u32 *desc; | 440 | u32 *desc; |
| 450 | struct split_key_result result; | 441 | struct split_key_result result; |
| 451 | dma_addr_t src_dma, dst_dma; | 442 | dma_addr_t src_dma, dst_dma; |
| 452 | int ret = 0; | 443 | int ret; |
| 453 | 444 | ||
| 454 | desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | 445 | desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); |
| 455 | if (!desc) { | 446 | if (!desc) { |
| @@ -526,7 +517,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
| 526 | struct device *jrdev = ctx->jrdev; | 517 | struct device *jrdev = ctx->jrdev; |
| 527 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); | 518 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); |
| 528 | int digestsize = crypto_ahash_digestsize(ahash); | 519 | int digestsize = crypto_ahash_digestsize(ahash); |
| 529 | int ret = 0; | 520 | int ret; |
| 530 | u8 *hashed_key = NULL; | 521 | u8 *hashed_key = NULL; |
| 531 | 522 | ||
| 532 | #ifdef DEBUG | 523 | #ifdef DEBUG |
| @@ -534,14 +525,15 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
| 534 | #endif | 525 | #endif |
| 535 | 526 | ||
| 536 | if (keylen > blocksize) { | 527 | if (keylen > blocksize) { |
| 537 | hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL | | 528 | hashed_key = kmalloc_array(digestsize, |
| 538 | GFP_DMA); | 529 | sizeof(*hashed_key), |
| 530 | GFP_KERNEL | GFP_DMA); | ||
| 539 | if (!hashed_key) | 531 | if (!hashed_key) |
| 540 | return -ENOMEM; | 532 | return -ENOMEM; |
| 541 | ret = hash_digest_key(ctx, key, &keylen, hashed_key, | 533 | ret = hash_digest_key(ctx, key, &keylen, hashed_key, |
| 542 | digestsize); | 534 | digestsize); |
| 543 | if (ret) | 535 | if (ret) |
| 544 | goto badkey; | 536 | goto bad_free_key; |
| 545 | key = hashed_key; | 537 | key = hashed_key; |
| 546 | } | 538 | } |
| 547 | 539 | ||
| @@ -559,14 +551,14 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
| 559 | 551 | ||
| 560 | ret = gen_split_hash_key(ctx, key, keylen); | 552 | ret = gen_split_hash_key(ctx, key, keylen); |
| 561 | if (ret) | 553 | if (ret) |
| 562 | goto badkey; | 554 | goto bad_free_key; |
| 563 | 555 | ||
| 564 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | 556 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, |
| 565 | DMA_TO_DEVICE); | 557 | DMA_TO_DEVICE); |
| 566 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | 558 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
| 567 | dev_err(jrdev, "unable to map key i/o memory\n"); | 559 | dev_err(jrdev, "unable to map key i/o memory\n"); |
| 568 | ret = -ENOMEM; | 560 | ret = -ENOMEM; |
| 569 | goto map_err; | 561 | goto error_free_key; |
| 570 | } | 562 | } |
| 571 | #ifdef DEBUG | 563 | #ifdef DEBUG |
| 572 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 564 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
| @@ -579,11 +571,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
| 579 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, | 571 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, |
| 580 | DMA_TO_DEVICE); | 572 | DMA_TO_DEVICE); |
| 581 | } | 573 | } |
| 582 | 574 | error_free_key: | |
| 583 | map_err: | ||
| 584 | kfree(hashed_key); | 575 | kfree(hashed_key); |
| 585 | return ret; | 576 | return ret; |
| 586 | badkey: | 577 | bad_free_key: |
| 587 | kfree(hashed_key); | 578 | kfree(hashed_key); |
| 588 | crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); | 579 | crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 589 | return -EINVAL; | 580 | return -EINVAL; |
| @@ -595,16 +586,16 @@ badkey: | |||
| 595 | * @sec4_sg_dma: physical mapped address of h/w link table | 586 | * @sec4_sg_dma: physical mapped address of h/w link table |
| 596 | * @src_nents: number of segments in input scatterlist | 587 | * @src_nents: number of segments in input scatterlist |
| 597 | * @sec4_sg_bytes: length of dma mapped sec4_sg space | 588 | * @sec4_sg_bytes: length of dma mapped sec4_sg space |
| 598 | * @sec4_sg: pointer to h/w link table | ||
| 599 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 589 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
| 590 | * @sec4_sg: h/w link table | ||
| 600 | */ | 591 | */ |
| 601 | struct ahash_edesc { | 592 | struct ahash_edesc { |
| 602 | dma_addr_t dst_dma; | 593 | dma_addr_t dst_dma; |
| 603 | dma_addr_t sec4_sg_dma; | 594 | dma_addr_t sec4_sg_dma; |
| 604 | int src_nents; | 595 | int src_nents; |
| 605 | int sec4_sg_bytes; | 596 | int sec4_sg_bytes; |
| 606 | struct sec4_sg_entry *sec4_sg; | 597 | u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; |
| 607 | u32 hw_desc[0]; | 598 | struct sec4_sg_entry sec4_sg[0]; |
| 608 | }; | 599 | }; |
| 609 | 600 | ||
| 610 | static inline void ahash_unmap(struct device *dev, | 601 | static inline void ahash_unmap(struct device *dev, |
| @@ -774,6 +765,65 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
| 774 | req->base.complete(&req->base, err); | 765 | req->base.complete(&req->base, err); |
| 775 | } | 766 | } |
| 776 | 767 | ||
| 768 | /* | ||
| 769 | * Allocate an enhanced descriptor, which contains the hardware descriptor | ||
| 770 | * and space for hardware scatter table containing sg_num entries. | ||
| 771 | */ | ||
| 772 | static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, | ||
| 773 | int sg_num, u32 *sh_desc, | ||
| 774 | dma_addr_t sh_desc_dma, | ||
| 775 | gfp_t flags) | ||
| 776 | { | ||
| 777 | struct ahash_edesc *edesc; | ||
| 778 | unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); | ||
| 779 | |||
| 780 | edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); | ||
| 781 | if (!edesc) { | ||
| 782 | dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); | ||
| 783 | return NULL; | ||
| 784 | } | ||
| 785 | |||
| 786 | init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), | ||
| 787 | HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 788 | |||
| 789 | return edesc; | ||
| 790 | } | ||
| 791 | |||
| 792 | static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, | ||
| 793 | struct ahash_edesc *edesc, | ||
| 794 | struct ahash_request *req, int nents, | ||
| 795 | unsigned int first_sg, | ||
| 796 | unsigned int first_bytes, size_t to_hash) | ||
| 797 | { | ||
| 798 | dma_addr_t src_dma; | ||
| 799 | u32 options; | ||
| 800 | |||
| 801 | if (nents > 1 || first_sg) { | ||
| 802 | struct sec4_sg_entry *sg = edesc->sec4_sg; | ||
| 803 | unsigned int sgsize = sizeof(*sg) * (first_sg + nents); | ||
| 804 | |||
| 805 | sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); | ||
| 806 | |||
| 807 | src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); | ||
| 808 | if (dma_mapping_error(ctx->jrdev, src_dma)) { | ||
| 809 | dev_err(ctx->jrdev, "unable to map S/G table\n"); | ||
| 810 | return -ENOMEM; | ||
| 811 | } | ||
| 812 | |||
| 813 | edesc->sec4_sg_bytes = sgsize; | ||
| 814 | edesc->sec4_sg_dma = src_dma; | ||
| 815 | options = LDST_SGF; | ||
| 816 | } else { | ||
| 817 | src_dma = sg_dma_address(req->src); | ||
| 818 | options = 0; | ||
| 819 | } | ||
| 820 | |||
| 821 | append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, | ||
| 822 | options); | ||
| 823 | |||
| 824 | return 0; | ||
| 825 | } | ||
| 826 | |||
| 777 | /* submit update job descriptor */ | 827 | /* submit update job descriptor */ |
| 778 | static int ahash_update_ctx(struct ahash_request *req) | 828 | static int ahash_update_ctx(struct ahash_request *req) |
| 779 | { | 829 | { |
| @@ -789,12 +839,10 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 789 | int *next_buflen = state->current_buf ? &state->buflen_0 : | 839 | int *next_buflen = state->current_buf ? &state->buflen_0 : |
| 790 | &state->buflen_1, last_buflen; | 840 | &state->buflen_1, last_buflen; |
| 791 | int in_len = *buflen + req->nbytes, to_hash; | 841 | int in_len = *buflen + req->nbytes, to_hash; |
| 792 | u32 *sh_desc = ctx->sh_desc_update, *desc; | 842 | u32 *desc; |
| 793 | dma_addr_t ptr = ctx->sh_desc_update_dma; | 843 | int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; |
| 794 | int src_nents, sec4_sg_bytes, sec4_sg_src_index; | ||
| 795 | struct ahash_edesc *edesc; | 844 | struct ahash_edesc *edesc; |
| 796 | int ret = 0; | 845 | int ret = 0; |
| 797 | int sh_len; | ||
| 798 | 846 | ||
| 799 | last_buflen = *next_buflen; | 847 | last_buflen = *next_buflen; |
| 800 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); | 848 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); |
| @@ -807,40 +855,51 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 807 | dev_err(jrdev, "Invalid number of src SG.\n"); | 855 | dev_err(jrdev, "Invalid number of src SG.\n"); |
| 808 | return src_nents; | 856 | return src_nents; |
| 809 | } | 857 | } |
| 858 | |||
| 859 | if (src_nents) { | ||
| 860 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | ||
| 861 | DMA_TO_DEVICE); | ||
| 862 | if (!mapped_nents) { | ||
| 863 | dev_err(jrdev, "unable to DMA map source\n"); | ||
| 864 | return -ENOMEM; | ||
| 865 | } | ||
| 866 | } else { | ||
| 867 | mapped_nents = 0; | ||
| 868 | } | ||
| 869 | |||
| 810 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); | 870 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); |
| 811 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | 871 | sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * |
| 812 | sizeof(struct sec4_sg_entry); | 872 | sizeof(struct sec4_sg_entry); |
| 813 | 873 | ||
| 814 | /* | 874 | /* |
| 815 | * allocate space for base edesc and hw desc commands, | 875 | * allocate space for base edesc and hw desc commands, |
| 816 | * link tables | 876 | * link tables |
| 817 | */ | 877 | */ |
| 818 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + | 878 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, |
| 819 | sec4_sg_bytes, GFP_DMA | flags); | 879 | ctx->sh_desc_update, |
| 880 | ctx->sh_desc_update_dma, flags); | ||
| 820 | if (!edesc) { | 881 | if (!edesc) { |
| 821 | dev_err(jrdev, | 882 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
| 822 | "could not allocate extended descriptor\n"); | ||
| 823 | return -ENOMEM; | 883 | return -ENOMEM; |
| 824 | } | 884 | } |
| 825 | 885 | ||
| 826 | edesc->src_nents = src_nents; | 886 | edesc->src_nents = src_nents; |
| 827 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 887 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
| 828 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 829 | DESC_JOB_IO_LEN; | ||
| 830 | 888 | ||
| 831 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | 889 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
| 832 | edesc->sec4_sg, DMA_BIDIRECTIONAL); | 890 | edesc->sec4_sg, DMA_BIDIRECTIONAL); |
| 833 | if (ret) | 891 | if (ret) |
| 834 | return ret; | 892 | goto unmap_ctx; |
| 835 | 893 | ||
| 836 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, | 894 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, |
| 837 | edesc->sec4_sg + 1, | 895 | edesc->sec4_sg + 1, |
| 838 | buf, state->buf_dma, | 896 | buf, state->buf_dma, |
| 839 | *buflen, last_buflen); | 897 | *buflen, last_buflen); |
| 840 | 898 | ||
| 841 | if (src_nents) { | 899 | if (mapped_nents) { |
| 842 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | 900 | sg_to_sec4_sg_last(req->src, mapped_nents, |
| 843 | edesc->sec4_sg + sec4_sg_src_index); | 901 | edesc->sec4_sg + sec4_sg_src_index, |
| 902 | 0); | ||
| 844 | if (*next_buflen) | 903 | if (*next_buflen) |
| 845 | scatterwalk_map_and_copy(next_buf, req->src, | 904 | scatterwalk_map_and_copy(next_buf, req->src, |
| 846 | to_hash - *buflen, | 905 | to_hash - *buflen, |
| @@ -852,17 +911,15 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 852 | 911 | ||
| 853 | state->current_buf = !state->current_buf; | 912 | state->current_buf = !state->current_buf; |
| 854 | 913 | ||
| 855 | sh_len = desc_len(sh_desc); | ||
| 856 | desc = edesc->hw_desc; | 914 | desc = edesc->hw_desc; |
| 857 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
| 858 | HDR_REVERSE); | ||
| 859 | 915 | ||
| 860 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 916 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
| 861 | sec4_sg_bytes, | 917 | sec4_sg_bytes, |
| 862 | DMA_TO_DEVICE); | 918 | DMA_TO_DEVICE); |
| 863 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | 919 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
| 864 | dev_err(jrdev, "unable to map S/G table\n"); | 920 | dev_err(jrdev, "unable to map S/G table\n"); |
| 865 | return -ENOMEM; | 921 | ret = -ENOMEM; |
| 922 | goto unmap_ctx; | ||
| 866 | } | 923 | } |
| 867 | 924 | ||
| 868 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | 925 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + |
| @@ -877,13 +934,10 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 877 | #endif | 934 | #endif |
| 878 | 935 | ||
| 879 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); | 936 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); |
| 880 | if (!ret) { | 937 | if (ret) |
| 881 | ret = -EINPROGRESS; | 938 | goto unmap_ctx; |
| 882 | } else { | 939 | |
| 883 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | 940 | ret = -EINPROGRESS; |
| 884 | DMA_BIDIRECTIONAL); | ||
| 885 | kfree(edesc); | ||
| 886 | } | ||
| 887 | } else if (*next_buflen) { | 941 | } else if (*next_buflen) { |
| 888 | scatterwalk_map_and_copy(buf + *buflen, req->src, 0, | 942 | scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
| 889 | req->nbytes, 0); | 943 | req->nbytes, 0); |
| @@ -899,6 +953,10 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 899 | #endif | 953 | #endif |
| 900 | 954 | ||
| 901 | return ret; | 955 | return ret; |
| 956 | unmap_ctx: | ||
| 957 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); | ||
| 958 | kfree(edesc); | ||
| 959 | return ret; | ||
| 902 | } | 960 | } |
| 903 | 961 | ||
| 904 | static int ahash_final_ctx(struct ahash_request *req) | 962 | static int ahash_final_ctx(struct ahash_request *req) |
| @@ -913,38 +971,31 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 913 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | 971 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; |
| 914 | int last_buflen = state->current_buf ? state->buflen_0 : | 972 | int last_buflen = state->current_buf ? state->buflen_0 : |
| 915 | state->buflen_1; | 973 | state->buflen_1; |
| 916 | u32 *sh_desc = ctx->sh_desc_fin, *desc; | 974 | u32 *desc; |
| 917 | dma_addr_t ptr = ctx->sh_desc_fin_dma; | ||
| 918 | int sec4_sg_bytes, sec4_sg_src_index; | 975 | int sec4_sg_bytes, sec4_sg_src_index; |
| 919 | int digestsize = crypto_ahash_digestsize(ahash); | 976 | int digestsize = crypto_ahash_digestsize(ahash); |
| 920 | struct ahash_edesc *edesc; | 977 | struct ahash_edesc *edesc; |
| 921 | int ret = 0; | 978 | int ret; |
| 922 | int sh_len; | ||
| 923 | 979 | ||
| 924 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); | 980 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); |
| 925 | sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); | 981 | sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); |
| 926 | 982 | ||
| 927 | /* allocate space for base edesc and hw desc commands, link tables */ | 983 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 928 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, | 984 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, |
| 929 | GFP_DMA | flags); | 985 | ctx->sh_desc_fin, ctx->sh_desc_fin_dma, |
| 930 | if (!edesc) { | 986 | flags); |
| 931 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 987 | if (!edesc) |
| 932 | return -ENOMEM; | 988 | return -ENOMEM; |
| 933 | } | ||
| 934 | 989 | ||
| 935 | sh_len = desc_len(sh_desc); | ||
| 936 | desc = edesc->hw_desc; | 990 | desc = edesc->hw_desc; |
| 937 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 938 | 991 | ||
| 939 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 992 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
| 940 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 941 | DESC_JOB_IO_LEN; | ||
| 942 | edesc->src_nents = 0; | 993 | edesc->src_nents = 0; |
| 943 | 994 | ||
| 944 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | 995 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
| 945 | edesc->sec4_sg, DMA_TO_DEVICE); | 996 | edesc->sec4_sg, DMA_TO_DEVICE); |
| 946 | if (ret) | 997 | if (ret) |
| 947 | return ret; | 998 | goto unmap_ctx; |
| 948 | 999 | ||
| 949 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | 1000 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, |
| 950 | buf, state->buf_dma, buflen, | 1001 | buf, state->buf_dma, buflen, |
| @@ -956,7 +1007,8 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 956 | sec4_sg_bytes, DMA_TO_DEVICE); | 1007 | sec4_sg_bytes, DMA_TO_DEVICE); |
| 957 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | 1008 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
| 958 | dev_err(jrdev, "unable to map S/G table\n"); | 1009 | dev_err(jrdev, "unable to map S/G table\n"); |
| 959 | return -ENOMEM; | 1010 | ret = -ENOMEM; |
| 1011 | goto unmap_ctx; | ||
| 960 | } | 1012 | } |
| 961 | 1013 | ||
| 962 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, | 1014 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, |
| @@ -966,7 +1018,8 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 966 | digestsize); | 1018 | digestsize); |
| 967 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | 1019 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
| 968 | dev_err(jrdev, "unable to map dst\n"); | 1020 | dev_err(jrdev, "unable to map dst\n"); |
| 969 | return -ENOMEM; | 1021 | ret = -ENOMEM; |
| 1022 | goto unmap_ctx; | ||
| 970 | } | 1023 | } |
| 971 | 1024 | ||
| 972 | #ifdef DEBUG | 1025 | #ifdef DEBUG |
| @@ -975,13 +1028,13 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 975 | #endif | 1028 | #endif |
| 976 | 1029 | ||
| 977 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | 1030 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); |
| 978 | if (!ret) { | 1031 | if (ret) |
| 979 | ret = -EINPROGRESS; | 1032 | goto unmap_ctx; |
| 980 | } else { | ||
| 981 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
| 982 | kfree(edesc); | ||
| 983 | } | ||
| 984 | 1033 | ||
| 1034 | return -EINPROGRESS; | ||
| 1035 | unmap_ctx: | ||
| 1036 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
| 1037 | kfree(edesc); | ||
| 985 | return ret; | 1038 | return ret; |
| 986 | } | 1039 | } |
| 987 | 1040 | ||
| @@ -997,68 +1050,66 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
| 997 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | 1050 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; |
| 998 | int last_buflen = state->current_buf ? state->buflen_0 : | 1051 | int last_buflen = state->current_buf ? state->buflen_0 : |
| 999 | state->buflen_1; | 1052 | state->buflen_1; |
| 1000 | u32 *sh_desc = ctx->sh_desc_finup, *desc; | 1053 | u32 *desc; |
| 1001 | dma_addr_t ptr = ctx->sh_desc_finup_dma; | 1054 | int sec4_sg_src_index; |
| 1002 | int sec4_sg_bytes, sec4_sg_src_index; | 1055 | int src_nents, mapped_nents; |
| 1003 | int src_nents; | ||
| 1004 | int digestsize = crypto_ahash_digestsize(ahash); | 1056 | int digestsize = crypto_ahash_digestsize(ahash); |
| 1005 | struct ahash_edesc *edesc; | 1057 | struct ahash_edesc *edesc; |
| 1006 | int ret = 0; | 1058 | int ret; |
| 1007 | int sh_len; | ||
| 1008 | 1059 | ||
| 1009 | src_nents = sg_nents_for_len(req->src, req->nbytes); | 1060 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
| 1010 | if (src_nents < 0) { | 1061 | if (src_nents < 0) { |
| 1011 | dev_err(jrdev, "Invalid number of src SG.\n"); | 1062 | dev_err(jrdev, "Invalid number of src SG.\n"); |
| 1012 | return src_nents; | 1063 | return src_nents; |
| 1013 | } | 1064 | } |
| 1065 | |||
| 1066 | if (src_nents) { | ||
| 1067 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | ||
| 1068 | DMA_TO_DEVICE); | ||
| 1069 | if (!mapped_nents) { | ||
| 1070 | dev_err(jrdev, "unable to DMA map source\n"); | ||
| 1071 | return -ENOMEM; | ||
| 1072 | } | ||
| 1073 | } else { | ||
| 1074 | mapped_nents = 0; | ||
| 1075 | } | ||
| 1076 | |||
| 1014 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); | 1077 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); |
| 1015 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | ||
| 1016 | sizeof(struct sec4_sg_entry); | ||
| 1017 | 1078 | ||
| 1018 | /* allocate space for base edesc and hw desc commands, link tables */ | 1079 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1019 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, | 1080 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, |
| 1020 | GFP_DMA | flags); | 1081 | ctx->sh_desc_finup, ctx->sh_desc_finup_dma, |
| 1082 | flags); | ||
| 1021 | if (!edesc) { | 1083 | if (!edesc) { |
| 1022 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1084 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
| 1023 | return -ENOMEM; | 1085 | return -ENOMEM; |
| 1024 | } | 1086 | } |
| 1025 | 1087 | ||
| 1026 | sh_len = desc_len(sh_desc); | ||
| 1027 | desc = edesc->hw_desc; | 1088 | desc = edesc->hw_desc; |
| 1028 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 1029 | 1089 | ||
| 1030 | edesc->src_nents = src_nents; | 1090 | edesc->src_nents = src_nents; |
| 1031 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 1032 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1033 | DESC_JOB_IO_LEN; | ||
| 1034 | 1091 | ||
| 1035 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | 1092 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
| 1036 | edesc->sec4_sg, DMA_TO_DEVICE); | 1093 | edesc->sec4_sg, DMA_TO_DEVICE); |
| 1037 | if (ret) | 1094 | if (ret) |
| 1038 | return ret; | 1095 | goto unmap_ctx; |
| 1039 | 1096 | ||
| 1040 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | 1097 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, |
| 1041 | buf, state->buf_dma, buflen, | 1098 | buf, state->buf_dma, buflen, |
| 1042 | last_buflen); | 1099 | last_buflen); |
| 1043 | 1100 | ||
| 1044 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + | 1101 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, |
| 1045 | sec4_sg_src_index); | 1102 | sec4_sg_src_index, ctx->ctx_len + buflen, |
| 1046 | 1103 | req->nbytes); | |
| 1047 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1104 | if (ret) |
| 1048 | sec4_sg_bytes, DMA_TO_DEVICE); | 1105 | goto unmap_ctx; |
| 1049 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
| 1050 | dev_err(jrdev, "unable to map S/G table\n"); | ||
| 1051 | return -ENOMEM; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | ||
| 1055 | buflen + req->nbytes, LDST_SGF); | ||
| 1056 | 1106 | ||
| 1057 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1107 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
| 1058 | digestsize); | 1108 | digestsize); |
| 1059 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | 1109 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
| 1060 | dev_err(jrdev, "unable to map dst\n"); | 1110 | dev_err(jrdev, "unable to map dst\n"); |
| 1061 | return -ENOMEM; | 1111 | ret = -ENOMEM; |
| 1112 | goto unmap_ctx; | ||
| 1062 | } | 1113 | } |
| 1063 | 1114 | ||
| 1064 | #ifdef DEBUG | 1115 | #ifdef DEBUG |
| @@ -1067,13 +1118,13 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
| 1067 | #endif | 1118 | #endif |
| 1068 | 1119 | ||
| 1069 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | 1120 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); |
| 1070 | if (!ret) { | 1121 | if (ret) |
| 1071 | ret = -EINPROGRESS; | 1122 | goto unmap_ctx; |
| 1072 | } else { | ||
| 1073 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
| 1074 | kfree(edesc); | ||
| 1075 | } | ||
| 1076 | 1123 | ||
| 1124 | return -EINPROGRESS; | ||
| 1125 | unmap_ctx: | ||
| 1126 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
| 1127 | kfree(edesc); | ||
| 1077 | return ret; | 1128 | return ret; |
| 1078 | } | 1129 | } |
| 1079 | 1130 | ||
| @@ -1084,60 +1135,56 @@ static int ahash_digest(struct ahash_request *req) | |||
| 1084 | struct device *jrdev = ctx->jrdev; | 1135 | struct device *jrdev = ctx->jrdev; |
| 1085 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1136 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
| 1086 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 1137 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
| 1087 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | 1138 | u32 *desc; |
| 1088 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | ||
| 1089 | int digestsize = crypto_ahash_digestsize(ahash); | 1139 | int digestsize = crypto_ahash_digestsize(ahash); |
| 1090 | int src_nents, sec4_sg_bytes; | 1140 | int src_nents, mapped_nents; |
| 1091 | dma_addr_t src_dma; | ||
| 1092 | struct ahash_edesc *edesc; | 1141 | struct ahash_edesc *edesc; |
| 1093 | int ret = 0; | 1142 | int ret; |
| 1094 | u32 options; | ||
| 1095 | int sh_len; | ||
| 1096 | 1143 | ||
| 1097 | src_nents = sg_count(req->src, req->nbytes); | 1144 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
| 1098 | if (src_nents < 0) { | 1145 | if (src_nents < 0) { |
| 1099 | dev_err(jrdev, "Invalid number of src SG.\n"); | 1146 | dev_err(jrdev, "Invalid number of src SG.\n"); |
| 1100 | return src_nents; | 1147 | return src_nents; |
| 1101 | } | 1148 | } |
| 1102 | dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); | 1149 | |
| 1103 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | 1150 | if (src_nents) { |
| 1151 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | ||
| 1152 | DMA_TO_DEVICE); | ||
| 1153 | if (!mapped_nents) { | ||
| 1154 | dev_err(jrdev, "unable to map source for DMA\n"); | ||
| 1155 | return -ENOMEM; | ||
| 1156 | } | ||
| 1157 | } else { | ||
| 1158 | mapped_nents = 0; | ||
| 1159 | } | ||
| 1104 | 1160 | ||
| 1105 | /* allocate space for base edesc and hw desc commands, link tables */ | 1161 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1106 | edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN, | 1162 | edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, |
| 1107 | GFP_DMA | flags); | 1163 | ctx->sh_desc_digest, ctx->sh_desc_digest_dma, |
| 1164 | flags); | ||
| 1108 | if (!edesc) { | 1165 | if (!edesc) { |
| 1109 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1166 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
| 1110 | return -ENOMEM; | 1167 | return -ENOMEM; |
| 1111 | } | 1168 | } |
| 1112 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1113 | DESC_JOB_IO_LEN; | ||
| 1114 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 1115 | edesc->src_nents = src_nents; | ||
| 1116 | 1169 | ||
| 1117 | sh_len = desc_len(sh_desc); | 1170 | edesc->src_nents = src_nents; |
| 1118 | desc = edesc->hw_desc; | ||
| 1119 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 1120 | 1171 | ||
| 1121 | if (src_nents) { | 1172 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, |
| 1122 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); | 1173 | req->nbytes); |
| 1123 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1174 | if (ret) { |
| 1124 | sec4_sg_bytes, DMA_TO_DEVICE); | 1175 | ahash_unmap(jrdev, edesc, req, digestsize); |
| 1125 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | 1176 | kfree(edesc); |
| 1126 | dev_err(jrdev, "unable to map S/G table\n"); | 1177 | return ret; |
| 1127 | return -ENOMEM; | ||
| 1128 | } | ||
| 1129 | src_dma = edesc->sec4_sg_dma; | ||
| 1130 | options = LDST_SGF; | ||
| 1131 | } else { | ||
| 1132 | src_dma = sg_dma_address(req->src); | ||
| 1133 | options = 0; | ||
| 1134 | } | 1178 | } |
| 1135 | append_seq_in_ptr(desc, src_dma, req->nbytes, options); | 1179 | |
| 1180 | desc = edesc->hw_desc; | ||
| 1136 | 1181 | ||
| 1137 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1182 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
| 1138 | digestsize); | 1183 | digestsize); |
| 1139 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | 1184 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
| 1140 | dev_err(jrdev, "unable to map dst\n"); | 1185 | dev_err(jrdev, "unable to map dst\n"); |
| 1186 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
| 1187 | kfree(edesc); | ||
| 1141 | return -ENOMEM; | 1188 | return -ENOMEM; |
| 1142 | } | 1189 | } |
| 1143 | 1190 | ||
| @@ -1168,29 +1215,23 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
| 1168 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 1215 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
| 1169 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | 1216 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; |
| 1170 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | 1217 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; |
| 1171 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | 1218 | u32 *desc; |
| 1172 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | ||
| 1173 | int digestsize = crypto_ahash_digestsize(ahash); | 1219 | int digestsize = crypto_ahash_digestsize(ahash); |
| 1174 | struct ahash_edesc *edesc; | 1220 | struct ahash_edesc *edesc; |
| 1175 | int ret = 0; | 1221 | int ret; |
| 1176 | int sh_len; | ||
| 1177 | 1222 | ||
| 1178 | /* allocate space for base edesc and hw desc commands, link tables */ | 1223 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1179 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags); | 1224 | edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, |
| 1180 | if (!edesc) { | 1225 | ctx->sh_desc_digest_dma, flags); |
| 1181 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1226 | if (!edesc) |
| 1182 | return -ENOMEM; | 1227 | return -ENOMEM; |
| 1183 | } | ||
| 1184 | 1228 | ||
| 1185 | edesc->sec4_sg_bytes = 0; | ||
| 1186 | sh_len = desc_len(sh_desc); | ||
| 1187 | desc = edesc->hw_desc; | 1229 | desc = edesc->hw_desc; |
| 1188 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 1189 | 1230 | ||
| 1190 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | 1231 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); |
| 1191 | if (dma_mapping_error(jrdev, state->buf_dma)) { | 1232 | if (dma_mapping_error(jrdev, state->buf_dma)) { |
| 1192 | dev_err(jrdev, "unable to map src\n"); | 1233 | dev_err(jrdev, "unable to map src\n"); |
| 1193 | return -ENOMEM; | 1234 | goto unmap; |
| 1194 | } | 1235 | } |
| 1195 | 1236 | ||
| 1196 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | 1237 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
| @@ -1199,7 +1240,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
| 1199 | digestsize); | 1240 | digestsize); |
| 1200 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | 1241 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
| 1201 | dev_err(jrdev, "unable to map dst\n"); | 1242 | dev_err(jrdev, "unable to map dst\n"); |
| 1202 | return -ENOMEM; | 1243 | goto unmap; |
| 1203 | } | 1244 | } |
| 1204 | edesc->src_nents = 0; | 1245 | edesc->src_nents = 0; |
| 1205 | 1246 | ||
| @@ -1217,6 +1258,11 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
| 1217 | } | 1258 | } |
| 1218 | 1259 | ||
| 1219 | return ret; | 1260 | return ret; |
| 1261 | unmap: | ||
| 1262 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
| 1263 | kfree(edesc); | ||
| 1264 | return -ENOMEM; | ||
| 1265 | |||
| 1220 | } | 1266 | } |
| 1221 | 1267 | ||
| 1222 | /* submit ahash update if it the first job descriptor after update */ | 1268 | /* submit ahash update if it the first job descriptor after update */ |
| @@ -1234,48 +1280,58 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1234 | int *next_buflen = state->current_buf ? &state->buflen_0 : | 1280 | int *next_buflen = state->current_buf ? &state->buflen_0 : |
| 1235 | &state->buflen_1; | 1281 | &state->buflen_1; |
| 1236 | int in_len = *buflen + req->nbytes, to_hash; | 1282 | int in_len = *buflen + req->nbytes, to_hash; |
| 1237 | int sec4_sg_bytes, src_nents; | 1283 | int sec4_sg_bytes, src_nents, mapped_nents; |
| 1238 | struct ahash_edesc *edesc; | 1284 | struct ahash_edesc *edesc; |
| 1239 | u32 *desc, *sh_desc = ctx->sh_desc_update_first; | 1285 | u32 *desc; |
| 1240 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; | ||
| 1241 | int ret = 0; | 1286 | int ret = 0; |
| 1242 | int sh_len; | ||
| 1243 | 1287 | ||
| 1244 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); | 1288 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); |
| 1245 | to_hash = in_len - *next_buflen; | 1289 | to_hash = in_len - *next_buflen; |
| 1246 | 1290 | ||
| 1247 | if (to_hash) { | 1291 | if (to_hash) { |
| 1248 | src_nents = sg_nents_for_len(req->src, | 1292 | src_nents = sg_nents_for_len(req->src, |
| 1249 | req->nbytes - (*next_buflen)); | 1293 | req->nbytes - *next_buflen); |
| 1250 | if (src_nents < 0) { | 1294 | if (src_nents < 0) { |
| 1251 | dev_err(jrdev, "Invalid number of src SG.\n"); | 1295 | dev_err(jrdev, "Invalid number of src SG.\n"); |
| 1252 | return src_nents; | 1296 | return src_nents; |
| 1253 | } | 1297 | } |
| 1254 | sec4_sg_bytes = (1 + src_nents) * | 1298 | |
| 1299 | if (src_nents) { | ||
| 1300 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | ||
| 1301 | DMA_TO_DEVICE); | ||
| 1302 | if (!mapped_nents) { | ||
| 1303 | dev_err(jrdev, "unable to DMA map source\n"); | ||
| 1304 | return -ENOMEM; | ||
| 1305 | } | ||
| 1306 | } else { | ||
| 1307 | mapped_nents = 0; | ||
| 1308 | } | ||
| 1309 | |||
| 1310 | sec4_sg_bytes = (1 + mapped_nents) * | ||
| 1255 | sizeof(struct sec4_sg_entry); | 1311 | sizeof(struct sec4_sg_entry); |
| 1256 | 1312 | ||
| 1257 | /* | 1313 | /* |
| 1258 | * allocate space for base edesc and hw desc commands, | 1314 | * allocate space for base edesc and hw desc commands, |
| 1259 | * link tables | 1315 | * link tables |
| 1260 | */ | 1316 | */ |
| 1261 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + | 1317 | edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, |
| 1262 | sec4_sg_bytes, GFP_DMA | flags); | 1318 | ctx->sh_desc_update_first, |
| 1319 | ctx->sh_desc_update_first_dma, | ||
| 1320 | flags); | ||
| 1263 | if (!edesc) { | 1321 | if (!edesc) { |
| 1264 | dev_err(jrdev, | 1322 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
| 1265 | "could not allocate extended descriptor\n"); | ||
| 1266 | return -ENOMEM; | 1323 | return -ENOMEM; |
| 1267 | } | 1324 | } |
| 1268 | 1325 | ||
| 1269 | edesc->src_nents = src_nents; | 1326 | edesc->src_nents = src_nents; |
| 1270 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1327 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
| 1271 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1272 | DESC_JOB_IO_LEN; | ||
| 1273 | edesc->dst_dma = 0; | 1328 | edesc->dst_dma = 0; |
| 1274 | 1329 | ||
| 1275 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, | 1330 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, |
| 1276 | buf, *buflen); | 1331 | buf, *buflen); |
| 1277 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | 1332 | sg_to_sec4_sg_last(req->src, mapped_nents, |
| 1278 | edesc->sec4_sg + 1); | 1333 | edesc->sec4_sg + 1, 0); |
| 1334 | |||
| 1279 | if (*next_buflen) { | 1335 | if (*next_buflen) { |
| 1280 | scatterwalk_map_and_copy(next_buf, req->src, | 1336 | scatterwalk_map_and_copy(next_buf, req->src, |
| 1281 | to_hash - *buflen, | 1337 | to_hash - *buflen, |
| @@ -1284,24 +1340,22 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1284 | 1340 | ||
| 1285 | state->current_buf = !state->current_buf; | 1341 | state->current_buf = !state->current_buf; |
| 1286 | 1342 | ||
| 1287 | sh_len = desc_len(sh_desc); | ||
| 1288 | desc = edesc->hw_desc; | 1343 | desc = edesc->hw_desc; |
| 1289 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
| 1290 | HDR_REVERSE); | ||
| 1291 | 1344 | ||
| 1292 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1345 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
| 1293 | sec4_sg_bytes, | 1346 | sec4_sg_bytes, |
| 1294 | DMA_TO_DEVICE); | 1347 | DMA_TO_DEVICE); |
| 1295 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | 1348 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
| 1296 | dev_err(jrdev, "unable to map S/G table\n"); | 1349 | dev_err(jrdev, "unable to map S/G table\n"); |
| 1297 | return -ENOMEM; | 1350 | ret = -ENOMEM; |
| 1351 | goto unmap_ctx; | ||
| 1298 | } | 1352 | } |
| 1299 | 1353 | ||
| 1300 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); | 1354 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); |
| 1301 | 1355 | ||
| 1302 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | 1356 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
| 1303 | if (ret) | 1357 | if (ret) |
| 1304 | return ret; | 1358 | goto unmap_ctx; |
| 1305 | 1359 | ||
| 1306 | #ifdef DEBUG | 1360 | #ifdef DEBUG |
| 1307 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1361 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
| @@ -1310,16 +1364,13 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1310 | #endif | 1364 | #endif |
| 1311 | 1365 | ||
| 1312 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); | 1366 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); |
| 1313 | if (!ret) { | 1367 | if (ret) |
| 1314 | ret = -EINPROGRESS; | 1368 | goto unmap_ctx; |
| 1315 | state->update = ahash_update_ctx; | 1369 | |
| 1316 | state->finup = ahash_finup_ctx; | 1370 | ret = -EINPROGRESS; |
| 1317 | state->final = ahash_final_ctx; | 1371 | state->update = ahash_update_ctx; |
| 1318 | } else { | 1372 | state->finup = ahash_finup_ctx; |
| 1319 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | 1373 | state->final = ahash_final_ctx; |
| 1320 | DMA_TO_DEVICE); | ||
| 1321 | kfree(edesc); | ||
| 1322 | } | ||
| 1323 | } else if (*next_buflen) { | 1374 | } else if (*next_buflen) { |
| 1324 | scatterwalk_map_and_copy(buf + *buflen, req->src, 0, | 1375 | scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
| 1325 | req->nbytes, 0); | 1376 | req->nbytes, 0); |
| @@ -1335,6 +1386,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1335 | #endif | 1386 | #endif |
| 1336 | 1387 | ||
| 1337 | return ret; | 1388 | return ret; |
| 1389 | unmap_ctx: | ||
| 1390 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); | ||
| 1391 | kfree(edesc); | ||
| 1392 | return ret; | ||
| 1338 | } | 1393 | } |
| 1339 | 1394 | ||
| 1340 | /* submit ahash finup if it the first job descriptor after update */ | 1395 | /* submit ahash finup if it the first job descriptor after update */ |
| @@ -1350,61 +1405,63 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
| 1350 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | 1405 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; |
| 1351 | int last_buflen = state->current_buf ? state->buflen_0 : | 1406 | int last_buflen = state->current_buf ? state->buflen_0 : |
| 1352 | state->buflen_1; | 1407 | state->buflen_1; |
| 1353 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | 1408 | u32 *desc; |
| 1354 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | 1409 | int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; |
| 1355 | int sec4_sg_bytes, sec4_sg_src_index, src_nents; | ||
| 1356 | int digestsize = crypto_ahash_digestsize(ahash); | 1410 | int digestsize = crypto_ahash_digestsize(ahash); |
| 1357 | struct ahash_edesc *edesc; | 1411 | struct ahash_edesc *edesc; |
| 1358 | int sh_len; | 1412 | int ret; |
| 1359 | int ret = 0; | ||
| 1360 | 1413 | ||
| 1361 | src_nents = sg_nents_for_len(req->src, req->nbytes); | 1414 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
| 1362 | if (src_nents < 0) { | 1415 | if (src_nents < 0) { |
| 1363 | dev_err(jrdev, "Invalid number of src SG.\n"); | 1416 | dev_err(jrdev, "Invalid number of src SG.\n"); |
| 1364 | return src_nents; | 1417 | return src_nents; |
| 1365 | } | 1418 | } |
| 1419 | |||
| 1420 | if (src_nents) { | ||
| 1421 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | ||
| 1422 | DMA_TO_DEVICE); | ||
| 1423 | if (!mapped_nents) { | ||
| 1424 | dev_err(jrdev, "unable to DMA map source\n"); | ||
| 1425 | return -ENOMEM; | ||
| 1426 | } | ||
| 1427 | } else { | ||
| 1428 | mapped_nents = 0; | ||
| 1429 | } | ||
| 1430 | |||
| 1366 | sec4_sg_src_index = 2; | 1431 | sec4_sg_src_index = 2; |
| 1367 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | 1432 | sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * |
| 1368 | sizeof(struct sec4_sg_entry); | 1433 | sizeof(struct sec4_sg_entry); |
| 1369 | 1434 | ||
| 1370 | /* allocate space for base edesc and hw desc commands, link tables */ | 1435 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1371 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, | 1436 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, |
| 1372 | GFP_DMA | flags); | 1437 | ctx->sh_desc_digest, ctx->sh_desc_digest_dma, |
| 1438 | flags); | ||
| 1373 | if (!edesc) { | 1439 | if (!edesc) { |
| 1374 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1440 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
| 1375 | return -ENOMEM; | 1441 | return -ENOMEM; |
| 1376 | } | 1442 | } |
| 1377 | 1443 | ||
| 1378 | sh_len = desc_len(sh_desc); | ||
| 1379 | desc = edesc->hw_desc; | 1444 | desc = edesc->hw_desc; |
| 1380 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 1381 | 1445 | ||
| 1382 | edesc->src_nents = src_nents; | 1446 | edesc->src_nents = src_nents; |
| 1383 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 1447 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
| 1384 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1385 | DESC_JOB_IO_LEN; | ||
| 1386 | 1448 | ||
| 1387 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, | 1449 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, |
| 1388 | state->buf_dma, buflen, | 1450 | state->buf_dma, buflen, |
| 1389 | last_buflen); | 1451 | last_buflen); |
| 1390 | 1452 | ||
| 1391 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1); | 1453 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, |
| 1392 | 1454 | req->nbytes); | |
| 1393 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1455 | if (ret) { |
| 1394 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
| 1395 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
| 1396 | dev_err(jrdev, "unable to map S/G table\n"); | 1456 | dev_err(jrdev, "unable to map S/G table\n"); |
| 1397 | return -ENOMEM; | 1457 | goto unmap; |
| 1398 | } | 1458 | } |
| 1399 | 1459 | ||
| 1400 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + | ||
| 1401 | req->nbytes, LDST_SGF); | ||
| 1402 | |||
| 1403 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1460 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
| 1404 | digestsize); | 1461 | digestsize); |
| 1405 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { | 1462 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
| 1406 | dev_err(jrdev, "unable to map dst\n"); | 1463 | dev_err(jrdev, "unable to map dst\n"); |
| 1407 | return -ENOMEM; | 1464 | goto unmap; |
| 1408 | } | 1465 | } |
| 1409 | 1466 | ||
| 1410 | #ifdef DEBUG | 1467 | #ifdef DEBUG |
| @@ -1421,6 +1478,11 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
| 1421 | } | 1478 | } |
| 1422 | 1479 | ||
| 1423 | return ret; | 1480 | return ret; |
| 1481 | unmap: | ||
| 1482 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
| 1483 | kfree(edesc); | ||
| 1484 | return -ENOMEM; | ||
| 1485 | |||
| 1424 | } | 1486 | } |
| 1425 | 1487 | ||
| 1426 | /* submit first update job descriptor after init */ | 1488 | /* submit first update job descriptor after init */ |
| @@ -1436,78 +1498,65 @@ static int ahash_update_first(struct ahash_request *req) | |||
| 1436 | int *next_buflen = state->current_buf ? | 1498 | int *next_buflen = state->current_buf ? |
| 1437 | &state->buflen_1 : &state->buflen_0; | 1499 | &state->buflen_1 : &state->buflen_0; |
| 1438 | int to_hash; | 1500 | int to_hash; |
| 1439 | u32 *sh_desc = ctx->sh_desc_update_first, *desc; | 1501 | u32 *desc; |
| 1440 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; | 1502 | int src_nents, mapped_nents; |
| 1441 | int sec4_sg_bytes, src_nents; | ||
| 1442 | dma_addr_t src_dma; | ||
| 1443 | u32 options; | ||
| 1444 | struct ahash_edesc *edesc; | 1503 | struct ahash_edesc *edesc; |
| 1445 | int ret = 0; | 1504 | int ret = 0; |
| 1446 | int sh_len; | ||
| 1447 | 1505 | ||
| 1448 | *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - | 1506 | *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - |
| 1449 | 1); | 1507 | 1); |
| 1450 | to_hash = req->nbytes - *next_buflen; | 1508 | to_hash = req->nbytes - *next_buflen; |
| 1451 | 1509 | ||
| 1452 | if (to_hash) { | 1510 | if (to_hash) { |
| 1453 | src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); | 1511 | src_nents = sg_nents_for_len(req->src, |
| 1512 | req->nbytes - *next_buflen); | ||
| 1454 | if (src_nents < 0) { | 1513 | if (src_nents < 0) { |
| 1455 | dev_err(jrdev, "Invalid number of src SG.\n"); | 1514 | dev_err(jrdev, "Invalid number of src SG.\n"); |
| 1456 | return src_nents; | 1515 | return src_nents; |
| 1457 | } | 1516 | } |
| 1458 | dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); | 1517 | |
| 1459 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | 1518 | if (src_nents) { |
| 1519 | mapped_nents = dma_map_sg(jrdev, req->src, src_nents, | ||
| 1520 | DMA_TO_DEVICE); | ||
| 1521 | if (!mapped_nents) { | ||
| 1522 | dev_err(jrdev, "unable to map source for DMA\n"); | ||
| 1523 | return -ENOMEM; | ||
| 1524 | } | ||
| 1525 | } else { | ||
| 1526 | mapped_nents = 0; | ||
| 1527 | } | ||
| 1460 | 1528 | ||
| 1461 | /* | 1529 | /* |
| 1462 | * allocate space for base edesc and hw desc commands, | 1530 | * allocate space for base edesc and hw desc commands, |
| 1463 | * link tables | 1531 | * link tables |
| 1464 | */ | 1532 | */ |
| 1465 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + | 1533 | edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? |
| 1466 | sec4_sg_bytes, GFP_DMA | flags); | 1534 | mapped_nents : 0, |
| 1535 | ctx->sh_desc_update_first, | ||
| 1536 | ctx->sh_desc_update_first_dma, | ||
| 1537 | flags); | ||
| 1467 | if (!edesc) { | 1538 | if (!edesc) { |
| 1468 | dev_err(jrdev, | 1539 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
| 1469 | "could not allocate extended descriptor\n"); | ||
| 1470 | return -ENOMEM; | 1540 | return -ENOMEM; |
| 1471 | } | 1541 | } |
| 1472 | 1542 | ||
| 1473 | edesc->src_nents = src_nents; | 1543 | edesc->src_nents = src_nents; |
| 1474 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 1475 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1476 | DESC_JOB_IO_LEN; | ||
| 1477 | edesc->dst_dma = 0; | 1544 | edesc->dst_dma = 0; |
| 1478 | 1545 | ||
| 1479 | if (src_nents) { | 1546 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, |
| 1480 | sg_to_sec4_sg_last(req->src, src_nents, | 1547 | to_hash); |
| 1481 | edesc->sec4_sg, 0); | 1548 | if (ret) |
| 1482 | edesc->sec4_sg_dma = dma_map_single(jrdev, | 1549 | goto unmap_ctx; |
| 1483 | edesc->sec4_sg, | ||
| 1484 | sec4_sg_bytes, | ||
| 1485 | DMA_TO_DEVICE); | ||
| 1486 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
| 1487 | dev_err(jrdev, "unable to map S/G table\n"); | ||
| 1488 | return -ENOMEM; | ||
| 1489 | } | ||
| 1490 | src_dma = edesc->sec4_sg_dma; | ||
| 1491 | options = LDST_SGF; | ||
| 1492 | } else { | ||
| 1493 | src_dma = sg_dma_address(req->src); | ||
| 1494 | options = 0; | ||
| 1495 | } | ||
| 1496 | 1550 | ||
| 1497 | if (*next_buflen) | 1551 | if (*next_buflen) |
| 1498 | scatterwalk_map_and_copy(next_buf, req->src, to_hash, | 1552 | scatterwalk_map_and_copy(next_buf, req->src, to_hash, |
| 1499 | *next_buflen, 0); | 1553 | *next_buflen, 0); |
| 1500 | 1554 | ||
| 1501 | sh_len = desc_len(sh_desc); | ||
| 1502 | desc = edesc->hw_desc; | 1555 | desc = edesc->hw_desc; |
| 1503 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
| 1504 | HDR_REVERSE); | ||
| 1505 | |||
| 1506 | append_seq_in_ptr(desc, src_dma, to_hash, options); | ||
| 1507 | 1556 | ||
| 1508 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | 1557 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
| 1509 | if (ret) | 1558 | if (ret) |
| 1510 | return ret; | 1559 | goto unmap_ctx; |
| 1511 | 1560 | ||
| 1512 | #ifdef DEBUG | 1561 | #ifdef DEBUG |
| 1513 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1562 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
| @@ -1515,18 +1564,14 @@ static int ahash_update_first(struct ahash_request *req) | |||
| 1515 | desc_bytes(desc), 1); | 1564 | desc_bytes(desc), 1); |
| 1516 | #endif | 1565 | #endif |
| 1517 | 1566 | ||
| 1518 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, | 1567 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); |
| 1519 | req); | 1568 | if (ret) |
| 1520 | if (!ret) { | 1569 | goto unmap_ctx; |
| 1521 | ret = -EINPROGRESS; | 1570 | |
| 1522 | state->update = ahash_update_ctx; | 1571 | ret = -EINPROGRESS; |
| 1523 | state->finup = ahash_finup_ctx; | 1572 | state->update = ahash_update_ctx; |
| 1524 | state->final = ahash_final_ctx; | 1573 | state->finup = ahash_finup_ctx; |
| 1525 | } else { | 1574 | state->final = ahash_final_ctx; |
| 1526 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | ||
| 1527 | DMA_TO_DEVICE); | ||
| 1528 | kfree(edesc); | ||
| 1529 | } | ||
| 1530 | } else if (*next_buflen) { | 1575 | } else if (*next_buflen) { |
| 1531 | state->update = ahash_update_no_ctx; | 1576 | state->update = ahash_update_no_ctx; |
| 1532 | state->finup = ahash_finup_no_ctx; | 1577 | state->finup = ahash_finup_no_ctx; |
| @@ -1541,6 +1586,10 @@ static int ahash_update_first(struct ahash_request *req) | |||
| 1541 | #endif | 1586 | #endif |
| 1542 | 1587 | ||
| 1543 | return ret; | 1588 | return ret; |
| 1589 | unmap_ctx: | ||
| 1590 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); | ||
| 1591 | kfree(edesc); | ||
| 1592 | return ret; | ||
| 1544 | } | 1593 | } |
| 1545 | 1594 | ||
| 1546 | static int ahash_finup_first(struct ahash_request *req) | 1595 | static int ahash_finup_first(struct ahash_request *req) |
| @@ -1799,7 +1848,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
| 1799 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, | 1848 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, |
| 1800 | HASH_MSG_LEN + 64, | 1849 | HASH_MSG_LEN + 64, |
| 1801 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; | 1850 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; |
| 1802 | int ret = 0; | ||
| 1803 | 1851 | ||
| 1804 | /* | 1852 | /* |
| 1805 | * Get a Job ring from Job Ring driver to ensure in-order | 1853 | * Get a Job ring from Job Ring driver to ensure in-order |
| @@ -1819,10 +1867,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
| 1819 | 1867 | ||
| 1820 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 1868 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 1821 | sizeof(struct caam_hash_state)); | 1869 | sizeof(struct caam_hash_state)); |
| 1822 | 1870 | return ahash_set_sh_desc(ahash); | |
| 1823 | ret = ahash_set_sh_desc(ahash); | ||
| 1824 | |||
| 1825 | return ret; | ||
| 1826 | } | 1871 | } |
| 1827 | 1872 | ||
| 1828 | static void caam_hash_cra_exit(struct crypto_tfm *tfm) | 1873 | static void caam_hash_cra_exit(struct crypto_tfm *tfm) |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 0ec112ee5204..72ff19658985 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "jr.h" | 14 | #include "jr.h" |
| 15 | #include "desc_constr.h" | 15 | #include "desc_constr.h" |
| 16 | #include "error.h" | 16 | #include "error.h" |
| 17 | #include "ctrl.h" | ||
| 17 | 18 | ||
| 18 | bool caam_little_end; | 19 | bool caam_little_end; |
| 19 | EXPORT_SYMBOL(caam_little_end); | 20 | EXPORT_SYMBOL(caam_little_end); |
| @@ -826,6 +827,8 @@ static int caam_probe(struct platform_device *pdev) | |||
| 826 | 827 | ||
| 827 | caam_remove: | 828 | caam_remove: |
| 828 | caam_remove(pdev); | 829 | caam_remove(pdev); |
| 830 | return ret; | ||
| 831 | |||
| 829 | iounmap_ctrl: | 832 | iounmap_ctrl: |
| 830 | iounmap(ctrl); | 833 | iounmap(ctrl); |
| 831 | disable_caam_emi_slow: | 834 | disable_caam_emi_slow: |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 26427c11ad87..513b6646bb36 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
| @@ -23,13 +23,7 @@ | |||
| 23 | #define SEC4_SG_OFFSET_MASK 0x00001fff | 23 | #define SEC4_SG_OFFSET_MASK 0x00001fff |
| 24 | 24 | ||
| 25 | struct sec4_sg_entry { | 25 | struct sec4_sg_entry { |
| 26 | #if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \ | ||
| 27 | defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX) | ||
| 28 | u32 rsvd1; | ||
| 29 | dma_addr_t ptr; | ||
| 30 | #else | ||
| 31 | u64 ptr; | 26 | u64 ptr; |
| 32 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */ | ||
| 33 | u32 len; | 27 | u32 len; |
| 34 | u32 bpid_offset; | 28 | u32 bpid_offset; |
| 35 | }; | 29 | }; |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index d3869b95e7b1..a8cd8a78ec1f 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
| @@ -325,6 +325,23 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ | |||
| 325 | APPEND_CMD_RAW_IMM(load, LOAD, u32); | 325 | APPEND_CMD_RAW_IMM(load, LOAD, u32); |
| 326 | 326 | ||
| 327 | /* | 327 | /* |
| 328 | * ee - endianness | ||
| 329 | * size - size of immediate type in bytes | ||
| 330 | */ | ||
| 331 | #define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \ | ||
| 332 | static inline void append_##cmd##_imm_##ee##size(u32 *desc, \ | ||
| 333 | u##size immediate, \ | ||
| 334 | u32 options) \ | ||
| 335 | { \ | ||
| 336 | __##ee##size data = cpu_to_##ee##size(immediate); \ | ||
| 337 | PRINT_POS; \ | ||
| 338 | append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \ | ||
| 339 | append_data(desc, &data, sizeof(data)); \ | ||
| 340 | } | ||
| 341 | |||
| 342 | APPEND_CMD_RAW_IMM2(load, LOAD, be, 32); | ||
| 343 | |||
| 344 | /* | ||
| 328 | * Append math command. Only the last part of destination and source need to | 345 | * Append math command. Only the last part of destination and source need to |
| 329 | * be specified | 346 | * be specified |
| 330 | */ | 347 | */ |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index e2bcacc1a921..5d4c05074a5c 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
| @@ -41,7 +41,6 @@ struct caam_drv_private_jr { | |||
| 41 | struct device *dev; | 41 | struct device *dev; |
| 42 | int ridx; | 42 | int ridx; |
| 43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | 43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
| 44 | struct tasklet_struct irqtask; | ||
| 45 | int irq; /* One per queue */ | 44 | int irq; /* One per queue */ |
| 46 | 45 | ||
| 47 | /* Number of scatterlist crypt transforms active on the JobR */ | 46 | /* Number of scatterlist crypt transforms active on the JobR */ |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index a81f551ac222..757c27f9953d 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -73,8 +73,6 @@ static int caam_jr_shutdown(struct device *dev) | |||
| 73 | 73 | ||
| 74 | ret = caam_reset_hw_jr(dev); | 74 | ret = caam_reset_hw_jr(dev); |
| 75 | 75 | ||
| 76 | tasklet_kill(&jrp->irqtask); | ||
| 77 | |||
| 78 | /* Release interrupt */ | 76 | /* Release interrupt */ |
| 79 | free_irq(jrp->irq, dev); | 77 | free_irq(jrp->irq, dev); |
| 80 | 78 | ||
| @@ -130,7 +128,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | |||
| 130 | 128 | ||
| 131 | /* | 129 | /* |
| 132 | * Check the output ring for ready responses, kick | 130 | * Check the output ring for ready responses, kick |
| 133 | * tasklet if jobs done. | 131 | * the threaded irq if jobs done. |
| 134 | */ | 132 | */ |
| 135 | irqstate = rd_reg32(&jrp->rregs->jrintstatus); | 133 | irqstate = rd_reg32(&jrp->rregs->jrintstatus); |
| 136 | if (!irqstate) | 134 | if (!irqstate) |
| @@ -152,18 +150,13 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | |||
| 152 | /* Have valid interrupt at this point, just ACK and trigger */ | 150 | /* Have valid interrupt at this point, just ACK and trigger */ |
| 153 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); | 151 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); |
| 154 | 152 | ||
| 155 | preempt_disable(); | 153 | return IRQ_WAKE_THREAD; |
| 156 | tasklet_schedule(&jrp->irqtask); | ||
| 157 | preempt_enable(); | ||
| 158 | |||
| 159 | return IRQ_HANDLED; | ||
| 160 | } | 154 | } |
| 161 | 155 | ||
| 162 | /* Deferred service handler, run as interrupt-fired tasklet */ | 156 | static irqreturn_t caam_jr_threadirq(int irq, void *st_dev) |
| 163 | static void caam_jr_dequeue(unsigned long devarg) | ||
| 164 | { | 157 | { |
| 165 | int hw_idx, sw_idx, i, head, tail; | 158 | int hw_idx, sw_idx, i, head, tail; |
| 166 | struct device *dev = (struct device *)devarg; | 159 | struct device *dev = st_dev; |
| 167 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | 160 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
| 168 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); | 161 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); |
| 169 | u32 *userdesc, userstatus; | 162 | u32 *userdesc, userstatus; |
| @@ -237,6 +230,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 237 | 230 | ||
| 238 | /* reenable / unmask IRQs */ | 231 | /* reenable / unmask IRQs */ |
| 239 | clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); | 232 | clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); |
| 233 | |||
| 234 | return IRQ_HANDLED; | ||
| 240 | } | 235 | } |
| 241 | 236 | ||
| 242 | /** | 237 | /** |
| @@ -394,11 +389,10 @@ static int caam_jr_init(struct device *dev) | |||
| 394 | 389 | ||
| 395 | jrp = dev_get_drvdata(dev); | 390 | jrp = dev_get_drvdata(dev); |
| 396 | 391 | ||
| 397 | tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); | ||
| 398 | |||
| 399 | /* Connect job ring interrupt handler. */ | 392 | /* Connect job ring interrupt handler. */ |
| 400 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, | 393 | error = request_threaded_irq(jrp->irq, caam_jr_interrupt, |
| 401 | dev_name(dev), dev); | 394 | caam_jr_threadirq, IRQF_SHARED, |
| 395 | dev_name(dev), dev); | ||
| 402 | if (error) { | 396 | if (error) { |
| 403 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", | 397 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", |
| 404 | jrp->ridx, jrp->irq); | 398 | jrp->ridx, jrp->irq); |
| @@ -460,7 +454,6 @@ out_free_inpring: | |||
| 460 | out_free_irq: | 454 | out_free_irq: |
| 461 | free_irq(jrp->irq, dev); | 455 | free_irq(jrp->irq, dev); |
| 462 | out_kill_deq: | 456 | out_kill_deq: |
| 463 | tasklet_kill(&jrp->irqtask); | ||
| 464 | return error; | 457 | return error; |
| 465 | } | 458 | } |
| 466 | 459 | ||
| @@ -513,6 +506,7 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
| 513 | error = caam_jr_init(jrdev); /* now turn on hardware */ | 506 | error = caam_jr_init(jrdev); /* now turn on hardware */ |
| 514 | if (error) { | 507 | if (error) { |
| 515 | irq_dispose_mapping(jrpriv->irq); | 508 | irq_dispose_mapping(jrpriv->irq); |
| 509 | iounmap(ctrl); | ||
| 516 | return error; | 510 | return error; |
| 517 | } | 511 | } |
| 518 | 512 | ||
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index b3c5016f6458..84d2f838a063 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
| @@ -196,6 +196,14 @@ static inline u64 rd_reg64(void __iomem *reg) | |||
| 196 | #define caam_dma_to_cpu(value) caam32_to_cpu(value) | 196 | #define caam_dma_to_cpu(value) caam32_to_cpu(value) |
| 197 | #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ | 197 | #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ |
| 198 | 198 | ||
| 199 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX | ||
| 200 | #define cpu_to_caam_dma64(value) \ | ||
| 201 | (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ | ||
| 202 | (u64)cpu_to_caam32(upper_32_bits(value))) | ||
| 203 | #else | ||
| 204 | #define cpu_to_caam_dma64(value) cpu_to_caam64(value) | ||
| 205 | #endif | ||
| 206 | |||
| 199 | /* | 207 | /* |
| 200 | * jr_outentry | 208 | * jr_outentry |
| 201 | * Represents each entry in a JobR output ring | 209 | * Represents each entry in a JobR output ring |
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index 19dc64fede0d..41cd5a356d05 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
| @@ -15,7 +15,7 @@ struct sec4_sg_entry; | |||
| 15 | static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, | 15 | static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, |
| 16 | dma_addr_t dma, u32 len, u16 offset) | 16 | dma_addr_t dma, u32 len, u16 offset) |
| 17 | { | 17 | { |
| 18 | sec4_sg_ptr->ptr = cpu_to_caam_dma(dma); | 18 | sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); |
| 19 | sec4_sg_ptr->len = cpu_to_caam32(len); | 19 | sec4_sg_ptr->len = cpu_to_caam32(len); |
| 20 | sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK); | 20 | sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK); |
| 21 | #ifdef DEBUG | 21 | #ifdef DEBUG |
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index ee4d2741b3ab..346ceb8f17bd 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile | |||
| @@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o | |||
| 2 | ccp-objs := ccp-dev.o \ | 2 | ccp-objs := ccp-dev.o \ |
| 3 | ccp-ops.o \ | 3 | ccp-ops.o \ |
| 4 | ccp-dev-v3.o \ | 4 | ccp-dev-v3.o \ |
| 5 | ccp-dev-v5.o \ | ||
| 5 | ccp-platform.o \ | 6 | ccp-platform.o \ |
| 6 | ccp-dmaengine.o | 7 | ccp-dmaengine.o |
| 7 | ccp-$(CONFIG_PCI) += ccp-pci.o | 8 | ccp-$(CONFIG_PCI) += ccp-pci.o |
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 8f36af62fe95..84a652be4274 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
| 5 | * | 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * Author: Gary R Hook <gary.hook@amd.com> | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
| @@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, | |||
| 134 | rctx->cmd.engine = CCP_ENGINE_SHA; | 135 | rctx->cmd.engine = CCP_ENGINE_SHA; |
| 135 | rctx->cmd.u.sha.type = rctx->type; | 136 | rctx->cmd.u.sha.type = rctx->type; |
| 136 | rctx->cmd.u.sha.ctx = &rctx->ctx_sg; | 137 | rctx->cmd.u.sha.ctx = &rctx->ctx_sg; |
| 137 | rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); | 138 | |
| 139 | switch (rctx->type) { | ||
| 140 | case CCP_SHA_TYPE_1: | ||
| 141 | rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE; | ||
| 142 | break; | ||
| 143 | case CCP_SHA_TYPE_224: | ||
| 144 | rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE; | ||
| 145 | break; | ||
| 146 | case CCP_SHA_TYPE_256: | ||
| 147 | rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE; | ||
| 148 | break; | ||
| 149 | default: | ||
| 150 | /* Should never get here */ | ||
| 151 | break; | ||
| 152 | } | ||
| 153 | |||
| 138 | rctx->cmd.u.sha.src = sg; | 154 | rctx->cmd.u.sha.src = sg; |
| 139 | rctx->cmd.u.sha.src_len = rctx->hash_cnt; | 155 | rctx->cmd.u.sha.src_len = rctx->hash_cnt; |
| 140 | rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? | 156 | rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? |
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index d7a710347967..8d2dbacc6161 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
| 5 | * | 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * Author: Gary R Hook <gary.hook@amd.com> | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
| @@ -19,6 +20,61 @@ | |||
| 19 | 20 | ||
| 20 | #include "ccp-dev.h" | 21 | #include "ccp-dev.h" |
| 21 | 22 | ||
| 23 | static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count) | ||
| 24 | { | ||
| 25 | int start; | ||
| 26 | struct ccp_device *ccp = cmd_q->ccp; | ||
| 27 | |||
| 28 | for (;;) { | ||
| 29 | mutex_lock(&ccp->sb_mutex); | ||
| 30 | |||
| 31 | start = (u32)bitmap_find_next_zero_area(ccp->sb, | ||
| 32 | ccp->sb_count, | ||
| 33 | ccp->sb_start, | ||
| 34 | count, 0); | ||
| 35 | if (start <= ccp->sb_count) { | ||
| 36 | bitmap_set(ccp->sb, start, count); | ||
| 37 | |||
| 38 | mutex_unlock(&ccp->sb_mutex); | ||
| 39 | break; | ||
| 40 | } | ||
| 41 | |||
| 42 | ccp->sb_avail = 0; | ||
| 43 | |||
| 44 | mutex_unlock(&ccp->sb_mutex); | ||
| 45 | |||
| 46 | /* Wait for KSB entries to become available */ | ||
| 47 | if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) | ||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | |||
| 51 | return KSB_START + start; | ||
| 52 | } | ||
| 53 | |||
| 54 | static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start, | ||
| 55 | unsigned int count) | ||
| 56 | { | ||
| 57 | struct ccp_device *ccp = cmd_q->ccp; | ||
| 58 | |||
| 59 | if (!start) | ||
| 60 | return; | ||
| 61 | |||
| 62 | mutex_lock(&ccp->sb_mutex); | ||
| 63 | |||
| 64 | bitmap_clear(ccp->sb, start - KSB_START, count); | ||
| 65 | |||
| 66 | ccp->sb_avail = 1; | ||
| 67 | |||
| 68 | mutex_unlock(&ccp->sb_mutex); | ||
| 69 | |||
| 70 | wake_up_interruptible_all(&ccp->sb_queue); | ||
| 71 | } | ||
| 72 | |||
| 73 | static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q) | ||
| 74 | { | ||
| 75 | return CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); | ||
| 76 | } | ||
| 77 | |||
| 22 | static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) | 78 | static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) |
| 23 | { | 79 | { |
| 24 | struct ccp_cmd_queue *cmd_q = op->cmd_q; | 80 | struct ccp_cmd_queue *cmd_q = op->cmd_q; |
| @@ -68,6 +124,9 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) | |||
| 68 | /* On error delete all related jobs from the queue */ | 124 | /* On error delete all related jobs from the queue */ |
| 69 | cmd = (cmd_q->id << DEL_Q_ID_SHIFT) | 125 | cmd = (cmd_q->id << DEL_Q_ID_SHIFT) |
| 70 | | op->jobid; | 126 | | op->jobid; |
| 127 | if (cmd_q->cmd_error) | ||
| 128 | ccp_log_error(cmd_q->ccp, | ||
| 129 | cmd_q->cmd_error); | ||
| 71 | 130 | ||
| 72 | iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); | 131 | iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); |
| 73 | 132 | ||
| @@ -99,10 +158,10 @@ static int ccp_perform_aes(struct ccp_op *op) | |||
| 99 | | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) | 158 | | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) |
| 100 | | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) | 159 | | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) |
| 101 | | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) | 160 | | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) |
| 102 | | (op->ksb_key << REQ1_KEY_KSB_SHIFT); | 161 | | (op->sb_key << REQ1_KEY_KSB_SHIFT); |
| 103 | cr[1] = op->src.u.dma.length - 1; | 162 | cr[1] = op->src.u.dma.length - 1; |
| 104 | cr[2] = ccp_addr_lo(&op->src.u.dma); | 163 | cr[2] = ccp_addr_lo(&op->src.u.dma); |
| 105 | cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) | 164 | cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) |
| 106 | | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | 165 | | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) |
| 107 | | ccp_addr_hi(&op->src.u.dma); | 166 | | ccp_addr_hi(&op->src.u.dma); |
| 108 | cr[4] = ccp_addr_lo(&op->dst.u.dma); | 167 | cr[4] = ccp_addr_lo(&op->dst.u.dma); |
| @@ -129,10 +188,10 @@ static int ccp_perform_xts_aes(struct ccp_op *op) | |||
| 129 | cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) | 188 | cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) |
| 130 | | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) | 189 | | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) |
| 131 | | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) | 190 | | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) |
| 132 | | (op->ksb_key << REQ1_KEY_KSB_SHIFT); | 191 | | (op->sb_key << REQ1_KEY_KSB_SHIFT); |
| 133 | cr[1] = op->src.u.dma.length - 1; | 192 | cr[1] = op->src.u.dma.length - 1; |
| 134 | cr[2] = ccp_addr_lo(&op->src.u.dma); | 193 | cr[2] = ccp_addr_lo(&op->src.u.dma); |
| 135 | cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) | 194 | cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) |
| 136 | | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | 195 | | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) |
| 137 | | ccp_addr_hi(&op->src.u.dma); | 196 | | ccp_addr_hi(&op->src.u.dma); |
| 138 | cr[4] = ccp_addr_lo(&op->dst.u.dma); | 197 | cr[4] = ccp_addr_lo(&op->dst.u.dma); |
| @@ -158,7 +217,7 @@ static int ccp_perform_sha(struct ccp_op *op) | |||
| 158 | | REQ1_INIT; | 217 | | REQ1_INIT; |
| 159 | cr[1] = op->src.u.dma.length - 1; | 218 | cr[1] = op->src.u.dma.length - 1; |
| 160 | cr[2] = ccp_addr_lo(&op->src.u.dma); | 219 | cr[2] = ccp_addr_lo(&op->src.u.dma); |
| 161 | cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) | 220 | cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) |
| 162 | | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | 221 | | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) |
| 163 | | ccp_addr_hi(&op->src.u.dma); | 222 | | ccp_addr_hi(&op->src.u.dma); |
| 164 | 223 | ||
| @@ -181,11 +240,11 @@ static int ccp_perform_rsa(struct ccp_op *op) | |||
| 181 | /* Fill out the register contents for REQ1 through REQ6 */ | 240 | /* Fill out the register contents for REQ1 through REQ6 */ |
| 182 | cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) | 241 | cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) |
| 183 | | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) | 242 | | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) |
| 184 | | (op->ksb_key << REQ1_KEY_KSB_SHIFT) | 243 | | (op->sb_key << REQ1_KEY_KSB_SHIFT) |
| 185 | | REQ1_EOM; | 244 | | REQ1_EOM; |
| 186 | cr[1] = op->u.rsa.input_len - 1; | 245 | cr[1] = op->u.rsa.input_len - 1; |
| 187 | cr[2] = ccp_addr_lo(&op->src.u.dma); | 246 | cr[2] = ccp_addr_lo(&op->src.u.dma); |
| 188 | cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) | 247 | cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) |
| 189 | | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | 248 | | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) |
| 190 | | ccp_addr_hi(&op->src.u.dma); | 249 | | ccp_addr_hi(&op->src.u.dma); |
| 191 | cr[4] = ccp_addr_lo(&op->dst.u.dma); | 250 | cr[4] = ccp_addr_lo(&op->dst.u.dma); |
| @@ -215,10 +274,10 @@ static int ccp_perform_passthru(struct ccp_op *op) | |||
| 215 | | ccp_addr_hi(&op->src.u.dma); | 274 | | ccp_addr_hi(&op->src.u.dma); |
| 216 | 275 | ||
| 217 | if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) | 276 | if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) |
| 218 | cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT); | 277 | cr[3] |= (op->sb_key << REQ4_KSB_SHIFT); |
| 219 | } else { | 278 | } else { |
| 220 | cr[2] = op->src.u.ksb * CCP_KSB_BYTES; | 279 | cr[2] = op->src.u.sb * CCP_SB_BYTES; |
| 221 | cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT); | 280 | cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT); |
| 222 | } | 281 | } |
| 223 | 282 | ||
| 224 | if (op->dst.type == CCP_MEMTYPE_SYSTEM) { | 283 | if (op->dst.type == CCP_MEMTYPE_SYSTEM) { |
| @@ -226,8 +285,8 @@ static int ccp_perform_passthru(struct ccp_op *op) | |||
| 226 | cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | 285 | cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) |
| 227 | | ccp_addr_hi(&op->dst.u.dma); | 286 | | ccp_addr_hi(&op->dst.u.dma); |
| 228 | } else { | 287 | } else { |
| 229 | cr[4] = op->dst.u.ksb * CCP_KSB_BYTES; | 288 | cr[4] = op->dst.u.sb * CCP_SB_BYTES; |
| 230 | cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT); | 289 | cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT); |
| 231 | } | 290 | } |
| 232 | 291 | ||
| 233 | if (op->eom) | 292 | if (op->eom) |
| @@ -256,35 +315,6 @@ static int ccp_perform_ecc(struct ccp_op *op) | |||
| 256 | return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); | 315 | return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); |
| 257 | } | 316 | } |
| 258 | 317 | ||
| 259 | static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) | ||
| 260 | { | ||
| 261 | struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); | ||
| 262 | u32 trng_value; | ||
| 263 | int len = min_t(int, sizeof(trng_value), max); | ||
| 264 | |||
| 265 | /* | ||
| 266 | * Locking is provided by the caller so we can update device | ||
| 267 | * hwrng-related fields safely | ||
| 268 | */ | ||
| 269 | trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); | ||
| 270 | if (!trng_value) { | ||
| 271 | /* Zero is returned if not data is available or if a | ||
| 272 | * bad-entropy error is present. Assume an error if | ||
| 273 | * we exceed TRNG_RETRIES reads of zero. | ||
| 274 | */ | ||
| 275 | if (ccp->hwrng_retries++ > TRNG_RETRIES) | ||
| 276 | return -EIO; | ||
| 277 | |||
| 278 | return 0; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* Reset the counter and save the rng value */ | ||
| 282 | ccp->hwrng_retries = 0; | ||
| 283 | memcpy(data, &trng_value, len); | ||
| 284 | |||
| 285 | return len; | ||
| 286 | } | ||
| 287 | |||
| 288 | static int ccp_init(struct ccp_device *ccp) | 318 | static int ccp_init(struct ccp_device *ccp) |
| 289 | { | 319 | { |
| 290 | struct device *dev = ccp->dev; | 320 | struct device *dev = ccp->dev; |
| @@ -321,9 +351,9 @@ static int ccp_init(struct ccp_device *ccp) | |||
| 321 | cmd_q->dma_pool = dma_pool; | 351 | cmd_q->dma_pool = dma_pool; |
| 322 | 352 | ||
| 323 | /* Reserve 2 KSB regions for the queue */ | 353 | /* Reserve 2 KSB regions for the queue */ |
| 324 | cmd_q->ksb_key = KSB_START + ccp->ksb_start++; | 354 | cmd_q->sb_key = KSB_START + ccp->sb_start++; |
| 325 | cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; | 355 | cmd_q->sb_ctx = KSB_START + ccp->sb_start++; |
| 326 | ccp->ksb_count -= 2; | 356 | ccp->sb_count -= 2; |
| 327 | 357 | ||
| 328 | /* Preset some register values and masks that are queue | 358 | /* Preset some register values and masks that are queue |
| 329 | * number dependent | 359 | * number dependent |
| @@ -335,7 +365,7 @@ static int ccp_init(struct ccp_device *ccp) | |||
| 335 | cmd_q->int_ok = 1 << (i * 2); | 365 | cmd_q->int_ok = 1 << (i * 2); |
| 336 | cmd_q->int_err = 1 << ((i * 2) + 1); | 366 | cmd_q->int_err = 1 << ((i * 2) + 1); |
| 337 | 367 | ||
| 338 | cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); | 368 | cmd_q->free_slots = ccp_get_free_slots(cmd_q); |
| 339 | 369 | ||
| 340 | init_waitqueue_head(&cmd_q->int_queue); | 370 | init_waitqueue_head(&cmd_q->int_queue); |
| 341 | 371 | ||
| @@ -375,9 +405,10 @@ static int ccp_init(struct ccp_device *ccp) | |||
| 375 | } | 405 | } |
| 376 | 406 | ||
| 377 | /* Initialize the queues used to wait for KSB space and suspend */ | 407 | /* Initialize the queues used to wait for KSB space and suspend */ |
| 378 | init_waitqueue_head(&ccp->ksb_queue); | 408 | init_waitqueue_head(&ccp->sb_queue); |
| 379 | init_waitqueue_head(&ccp->suspend_queue); | 409 | init_waitqueue_head(&ccp->suspend_queue); |
| 380 | 410 | ||
| 411 | dev_dbg(dev, "Starting threads...\n"); | ||
| 381 | /* Create a kthread for each queue */ | 412 | /* Create a kthread for each queue */ |
| 382 | for (i = 0; i < ccp->cmd_q_count; i++) { | 413 | for (i = 0; i < ccp->cmd_q_count; i++) { |
| 383 | struct task_struct *kthread; | 414 | struct task_struct *kthread; |
| @@ -397,29 +428,26 @@ static int ccp_init(struct ccp_device *ccp) | |||
| 397 | wake_up_process(kthread); | 428 | wake_up_process(kthread); |
| 398 | } | 429 | } |
| 399 | 430 | ||
| 400 | /* Register the RNG */ | 431 | dev_dbg(dev, "Enabling interrupts...\n"); |
| 401 | ccp->hwrng.name = ccp->rngname; | 432 | /* Enable interrupts */ |
| 402 | ccp->hwrng.read = ccp_trng_read; | 433 | iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); |
| 403 | ret = hwrng_register(&ccp->hwrng); | 434 | |
| 404 | if (ret) { | 435 | dev_dbg(dev, "Registering device...\n"); |
| 405 | dev_err(dev, "error registering hwrng (%d)\n", ret); | 436 | ccp_add_device(ccp); |
| 437 | |||
| 438 | ret = ccp_register_rng(ccp); | ||
| 439 | if (ret) | ||
| 406 | goto e_kthread; | 440 | goto e_kthread; |
| 407 | } | ||
| 408 | 441 | ||
| 409 | /* Register the DMA engine support */ | 442 | /* Register the DMA engine support */ |
| 410 | ret = ccp_dmaengine_register(ccp); | 443 | ret = ccp_dmaengine_register(ccp); |
| 411 | if (ret) | 444 | if (ret) |
| 412 | goto e_hwrng; | 445 | goto e_hwrng; |
| 413 | 446 | ||
| 414 | ccp_add_device(ccp); | ||
| 415 | |||
| 416 | /* Enable interrupts */ | ||
| 417 | iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); | ||
| 418 | |||
| 419 | return 0; | 447 | return 0; |
| 420 | 448 | ||
| 421 | e_hwrng: | 449 | e_hwrng: |
| 422 | hwrng_unregister(&ccp->hwrng); | 450 | ccp_unregister_rng(ccp); |
| 423 | 451 | ||
| 424 | e_kthread: | 452 | e_kthread: |
| 425 | for (i = 0; i < ccp->cmd_q_count; i++) | 453 | for (i = 0; i < ccp->cmd_q_count; i++) |
| @@ -441,19 +469,14 @@ static void ccp_destroy(struct ccp_device *ccp) | |||
| 441 | struct ccp_cmd *cmd; | 469 | struct ccp_cmd *cmd; |
| 442 | unsigned int qim, i; | 470 | unsigned int qim, i; |
| 443 | 471 | ||
| 444 | /* Remove this device from the list of available units first */ | ||
| 445 | ccp_del_device(ccp); | ||
| 446 | |||
| 447 | /* Unregister the DMA engine */ | 472 | /* Unregister the DMA engine */ |
| 448 | ccp_dmaengine_unregister(ccp); | 473 | ccp_dmaengine_unregister(ccp); |
| 449 | 474 | ||
| 450 | /* Unregister the RNG */ | 475 | /* Unregister the RNG */ |
| 451 | hwrng_unregister(&ccp->hwrng); | 476 | ccp_unregister_rng(ccp); |
| 452 | 477 | ||
| 453 | /* Stop the queue kthreads */ | 478 | /* Remove this device from the list of available units */ |
| 454 | for (i = 0; i < ccp->cmd_q_count; i++) | 479 | ccp_del_device(ccp); |
| 455 | if (ccp->cmd_q[i].kthread) | ||
| 456 | kthread_stop(ccp->cmd_q[i].kthread); | ||
| 457 | 480 | ||
| 458 | /* Build queue interrupt mask (two interrupt masks per queue) */ | 481 | /* Build queue interrupt mask (two interrupt masks per queue) */ |
| 459 | qim = 0; | 482 | qim = 0; |
| @@ -472,6 +495,11 @@ static void ccp_destroy(struct ccp_device *ccp) | |||
| 472 | } | 495 | } |
| 473 | iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); | 496 | iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); |
| 474 | 497 | ||
| 498 | /* Stop the queue kthreads */ | ||
| 499 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
| 500 | if (ccp->cmd_q[i].kthread) | ||
| 501 | kthread_stop(ccp->cmd_q[i].kthread); | ||
| 502 | |||
| 475 | ccp->free_irq(ccp); | 503 | ccp->free_irq(ccp); |
| 476 | 504 | ||
| 477 | for (i = 0; i < ccp->cmd_q_count; i++) | 505 | for (i = 0; i < ccp->cmd_q_count; i++) |
| @@ -527,18 +555,24 @@ static irqreturn_t ccp_irq_handler(int irq, void *data) | |||
| 527 | } | 555 | } |
| 528 | 556 | ||
| 529 | static const struct ccp_actions ccp3_actions = { | 557 | static const struct ccp_actions ccp3_actions = { |
| 530 | .perform_aes = ccp_perform_aes, | 558 | .aes = ccp_perform_aes, |
| 531 | .perform_xts_aes = ccp_perform_xts_aes, | 559 | .xts_aes = ccp_perform_xts_aes, |
| 532 | .perform_sha = ccp_perform_sha, | 560 | .sha = ccp_perform_sha, |
| 533 | .perform_rsa = ccp_perform_rsa, | 561 | .rsa = ccp_perform_rsa, |
| 534 | .perform_passthru = ccp_perform_passthru, | 562 | .passthru = ccp_perform_passthru, |
| 535 | .perform_ecc = ccp_perform_ecc, | 563 | .ecc = ccp_perform_ecc, |
| 564 | .sballoc = ccp_alloc_ksb, | ||
| 565 | .sbfree = ccp_free_ksb, | ||
| 536 | .init = ccp_init, | 566 | .init = ccp_init, |
| 537 | .destroy = ccp_destroy, | 567 | .destroy = ccp_destroy, |
| 568 | .get_free_slots = ccp_get_free_slots, | ||
| 538 | .irqhandler = ccp_irq_handler, | 569 | .irqhandler = ccp_irq_handler, |
| 539 | }; | 570 | }; |
| 540 | 571 | ||
| 541 | struct ccp_vdata ccpv3 = { | 572 | const struct ccp_vdata ccpv3 = { |
| 542 | .version = CCP_VERSION(3, 0), | 573 | .version = CCP_VERSION(3, 0), |
| 574 | .setup = NULL, | ||
| 543 | .perform = &ccp3_actions, | 575 | .perform = &ccp3_actions, |
| 576 | .bar = 2, | ||
| 577 | .offset = 0x20000, | ||
| 544 | }; | 578 | }; |
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c new file mode 100644 index 000000000000..faf3cb3ddce2 --- /dev/null +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
| @@ -0,0 +1,1017 @@ | |||
| 1 | /* | ||
| 2 | * AMD Cryptographic Coprocessor (CCP) driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
| 5 | * | ||
| 6 | * Author: Gary R Hook <gary.hook@amd.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/pci.h> | ||
| 16 | #include <linux/kthread.h> | ||
| 17 | #include <linux/dma-mapping.h> | ||
| 18 | #include <linux/interrupt.h> | ||
| 19 | #include <linux/compiler.h> | ||
| 20 | #include <linux/ccp.h> | ||
| 21 | |||
| 22 | #include "ccp-dev.h" | ||
| 23 | |||
| 24 | static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) | ||
| 25 | { | ||
| 26 | struct ccp_device *ccp; | ||
| 27 | int start; | ||
| 28 | |||
| 29 | /* First look at the map for the queue */ | ||
| 30 | if (cmd_q->lsb >= 0) { | ||
| 31 | start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, | ||
| 32 | LSB_SIZE, | ||
| 33 | 0, count, 0); | ||
| 34 | if (start < LSB_SIZE) { | ||
| 35 | bitmap_set(cmd_q->lsbmap, start, count); | ||
| 36 | return start + cmd_q->lsb * LSB_SIZE; | ||
| 37 | } | ||
| 38 | } | ||
| 39 | |||
| 40 | /* No joy; try to get an entry from the shared blocks */ | ||
| 41 | ccp = cmd_q->ccp; | ||
| 42 | for (;;) { | ||
| 43 | mutex_lock(&ccp->sb_mutex); | ||
| 44 | |||
| 45 | start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, | ||
| 46 | MAX_LSB_CNT * LSB_SIZE, | ||
| 47 | 0, | ||
| 48 | count, 0); | ||
| 49 | if (start <= MAX_LSB_CNT * LSB_SIZE) { | ||
| 50 | bitmap_set(ccp->lsbmap, start, count); | ||
| 51 | |||
| 52 | mutex_unlock(&ccp->sb_mutex); | ||
| 53 | return start * LSB_ITEM_SIZE; | ||
| 54 | } | ||
| 55 | |||
| 56 | ccp->sb_avail = 0; | ||
| 57 | |||
| 58 | mutex_unlock(&ccp->sb_mutex); | ||
| 59 | |||
| 60 | /* Wait for KSB entries to become available */ | ||
| 61 | if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | } | ||
| 65 | |||
| 66 | static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, | ||
| 67 | unsigned int count) | ||
| 68 | { | ||
| 69 | int lsbno = start / LSB_SIZE; | ||
| 70 | |||
| 71 | if (!start) | ||
| 72 | return; | ||
| 73 | |||
| 74 | if (cmd_q->lsb == lsbno) { | ||
| 75 | /* An entry from the private LSB */ | ||
| 76 | bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count); | ||
| 77 | } else { | ||
| 78 | /* From the shared LSBs */ | ||
| 79 | struct ccp_device *ccp = cmd_q->ccp; | ||
| 80 | |||
| 81 | mutex_lock(&ccp->sb_mutex); | ||
| 82 | bitmap_clear(ccp->lsbmap, start, count); | ||
| 83 | ccp->sb_avail = 1; | ||
| 84 | mutex_unlock(&ccp->sb_mutex); | ||
| 85 | wake_up_interruptible_all(&ccp->sb_queue); | ||
| 86 | } | ||
| 87 | } | ||
| 88 | |||
| 89 | /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */ | ||
| 90 | union ccp_function { | ||
| 91 | struct { | ||
| 92 | u16 size:7; | ||
| 93 | u16 encrypt:1; | ||
| 94 | u16 mode:5; | ||
| 95 | u16 type:2; | ||
| 96 | } aes; | ||
| 97 | struct { | ||
| 98 | u16 size:7; | ||
| 99 | u16 encrypt:1; | ||
| 100 | u16 rsvd:5; | ||
| 101 | u16 type:2; | ||
| 102 | } aes_xts; | ||
| 103 | struct { | ||
| 104 | u16 rsvd1:10; | ||
| 105 | u16 type:4; | ||
| 106 | u16 rsvd2:1; | ||
| 107 | } sha; | ||
| 108 | struct { | ||
| 109 | u16 mode:3; | ||
| 110 | u16 size:12; | ||
| 111 | } rsa; | ||
| 112 | struct { | ||
| 113 | u16 byteswap:2; | ||
| 114 | u16 bitwise:3; | ||
| 115 | u16 reflect:2; | ||
| 116 | u16 rsvd:8; | ||
| 117 | } pt; | ||
| 118 | struct { | ||
| 119 | u16 rsvd:13; | ||
| 120 | } zlib; | ||
| 121 | struct { | ||
| 122 | u16 size:10; | ||
| 123 | u16 type:2; | ||
| 124 | u16 mode:3; | ||
| 125 | } ecc; | ||
| 126 | u16 raw; | ||
| 127 | }; | ||
| 128 | |||
| 129 | #define CCP_AES_SIZE(p) ((p)->aes.size) | ||
| 130 | #define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt) | ||
| 131 | #define CCP_AES_MODE(p) ((p)->aes.mode) | ||
| 132 | #define CCP_AES_TYPE(p) ((p)->aes.type) | ||
| 133 | #define CCP_XTS_SIZE(p) ((p)->aes_xts.size) | ||
| 134 | #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) | ||
| 135 | #define CCP_SHA_TYPE(p) ((p)->sha.type) | ||
| 136 | #define CCP_RSA_SIZE(p) ((p)->rsa.size) | ||
| 137 | #define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) | ||
| 138 | #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) | ||
| 139 | #define CCP_ECC_MODE(p) ((p)->ecc.mode) | ||
| 140 | #define CCP_ECC_AFFINE(p) ((p)->ecc.one) | ||
| 141 | |||
| 142 | /* Word 0 */ | ||
| 143 | #define CCP5_CMD_DW0(p) ((p)->dw0) | ||
| 144 | #define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) | ||
| 145 | #define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) | ||
| 146 | #define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) | ||
| 147 | #define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) | ||
| 148 | #define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) | ||
| 149 | #define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) | ||
| 150 | #define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) | ||
| 151 | |||
| 152 | /* Word 1 */ | ||
| 153 | #define CCP5_CMD_DW1(p) ((p)->length) | ||
| 154 | #define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) | ||
| 155 | |||
| 156 | /* Word 2 */ | ||
| 157 | #define CCP5_CMD_DW2(p) ((p)->src_lo) | ||
| 158 | #define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) | ||
| 159 | |||
| 160 | /* Word 3 */ | ||
| 161 | #define CCP5_CMD_DW3(p) ((p)->dw3) | ||
| 162 | #define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) | ||
| 163 | #define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) | ||
| 164 | #define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) | ||
| 165 | #define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) | ||
| 166 | |||
| 167 | /* Words 4/5 */ | ||
| 168 | #define CCP5_CMD_DW4(p) ((p)->dw4) | ||
| 169 | #define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) | ||
| 170 | #define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) | ||
| 171 | #define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) | ||
| 172 | #define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) | ||
| 173 | #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) | ||
| 174 | #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) | ||
| 175 | #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) | ||
| 176 | |||
| 177 | /* Word 6/7 */ | ||
| 178 | #define CCP5_CMD_DW6(p) ((p)->key_lo) | ||
| 179 | #define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) | ||
| 180 | #define CCP5_CMD_DW7(p) ((p)->dw7) | ||
| 181 | #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) | ||
| 182 | #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) | ||
| 183 | |||
| 184 | static inline u32 low_address(unsigned long addr) | ||
| 185 | { | ||
| 186 | return (u64)addr & 0x0ffffffff; | ||
| 187 | } | ||
| 188 | |||
| 189 | static inline u32 high_address(unsigned long addr) | ||
| 190 | { | ||
| 191 | return ((u64)addr >> 32) & 0x00000ffff; | ||
| 192 | } | ||
| 193 | |||
| 194 | static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) | ||
| 195 | { | ||
| 196 | unsigned int head_idx, n; | ||
| 197 | u32 head_lo, queue_start; | ||
| 198 | |||
| 199 | queue_start = low_address(cmd_q->qdma_tail); | ||
| 200 | head_lo = ioread32(cmd_q->reg_head_lo); | ||
| 201 | head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); | ||
| 202 | |||
| 203 | n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; | ||
| 204 | |||
| 205 | return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ | ||
| 206 | } | ||
| 207 | |||
| 208 | static int ccp5_do_cmd(struct ccp5_desc *desc, | ||
| 209 | struct ccp_cmd_queue *cmd_q) | ||
| 210 | { | ||
| 211 | u32 *mP; | ||
| 212 | __le32 *dP; | ||
| 213 | u32 tail; | ||
| 214 | int i; | ||
| 215 | int ret = 0; | ||
| 216 | |||
| 217 | if (CCP5_CMD_SOC(desc)) { | ||
| 218 | CCP5_CMD_IOC(desc) = 1; | ||
| 219 | CCP5_CMD_SOC(desc) = 0; | ||
| 220 | } | ||
| 221 | mutex_lock(&cmd_q->q_mutex); | ||
| 222 | |||
| 223 | mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; | ||
| 224 | dP = (__le32 *) desc; | ||
| 225 | for (i = 0; i < 8; i++) | ||
| 226 | mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ | ||
| 227 | |||
| 228 | cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; | ||
| 229 | |||
| 230 | /* The data used by this command must be flushed to memory */ | ||
| 231 | wmb(); | ||
| 232 | |||
| 233 | /* Write the new tail address back to the queue register */ | ||
| 234 | tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); | ||
| 235 | iowrite32(tail, cmd_q->reg_tail_lo); | ||
| 236 | |||
| 237 | /* Turn the queue back on using our cached control register */ | ||
| 238 | iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); | ||
| 239 | mutex_unlock(&cmd_q->q_mutex); | ||
| 240 | |||
| 241 | if (CCP5_CMD_IOC(desc)) { | ||
| 242 | /* Wait for the job to complete */ | ||
| 243 | ret = wait_event_interruptible(cmd_q->int_queue, | ||
| 244 | cmd_q->int_rcvd); | ||
| 245 | if (ret || cmd_q->cmd_error) { | ||
| 246 | if (cmd_q->cmd_error) | ||
| 247 | ccp_log_error(cmd_q->ccp, | ||
| 248 | cmd_q->cmd_error); | ||
| 249 | /* A version 5 device doesn't use Job IDs... */ | ||
| 250 | if (!ret) | ||
| 251 | ret = -EIO; | ||
| 252 | } | ||
| 253 | cmd_q->int_rcvd = 0; | ||
| 254 | } | ||
| 255 | |||
| 256 | return 0; | ||
| 257 | } | ||
| 258 | |||
| 259 | static int ccp5_perform_aes(struct ccp_op *op) | ||
| 260 | { | ||
| 261 | struct ccp5_desc desc; | ||
| 262 | union ccp_function function; | ||
| 263 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; | ||
| 264 | |||
| 265 | /* Zero out all the fields of the command desc */ | ||
| 266 | memset(&desc, 0, Q_DESC_SIZE); | ||
| 267 | |||
| 268 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES; | ||
| 269 | |||
| 270 | CCP5_CMD_SOC(&desc) = op->soc; | ||
| 271 | CCP5_CMD_IOC(&desc) = 1; | ||
| 272 | CCP5_CMD_INIT(&desc) = op->init; | ||
| 273 | CCP5_CMD_EOM(&desc) = op->eom; | ||
| 274 | CCP5_CMD_PROT(&desc) = 0; | ||
| 275 | |||
| 276 | function.raw = 0; | ||
| 277 | CCP_AES_ENCRYPT(&function) = op->u.aes.action; | ||
| 278 | CCP_AES_MODE(&function) = op->u.aes.mode; | ||
| 279 | CCP_AES_TYPE(&function) = op->u.aes.type; | ||
| 280 | if (op->u.aes.mode == CCP_AES_MODE_CFB) | ||
| 281 | CCP_AES_SIZE(&function) = 0x7f; | ||
| 282 | |||
| 283 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
| 284 | |||
| 285 | CCP5_CMD_LEN(&desc) = op->src.u.dma.length; | ||
| 286 | |||
| 287 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
| 288 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
| 289 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 290 | |||
| 291 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
| 292 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
| 293 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 294 | |||
| 295 | CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); | ||
| 296 | CCP5_CMD_KEY_HI(&desc) = 0; | ||
| 297 | CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; | ||
| 298 | CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; | ||
| 299 | |||
| 300 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
| 301 | } | ||
| 302 | |||
| 303 | static int ccp5_perform_xts_aes(struct ccp_op *op) | ||
| 304 | { | ||
| 305 | struct ccp5_desc desc; | ||
| 306 | union ccp_function function; | ||
| 307 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; | ||
| 308 | |||
| 309 | /* Zero out all the fields of the command desc */ | ||
| 310 | memset(&desc, 0, Q_DESC_SIZE); | ||
| 311 | |||
| 312 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128; | ||
| 313 | |||
| 314 | CCP5_CMD_SOC(&desc) = op->soc; | ||
| 315 | CCP5_CMD_IOC(&desc) = 1; | ||
| 316 | CCP5_CMD_INIT(&desc) = op->init; | ||
| 317 | CCP5_CMD_EOM(&desc) = op->eom; | ||
| 318 | CCP5_CMD_PROT(&desc) = 0; | ||
| 319 | |||
| 320 | function.raw = 0; | ||
| 321 | CCP_XTS_ENCRYPT(&function) = op->u.xts.action; | ||
| 322 | CCP_XTS_SIZE(&function) = op->u.xts.unit_size; | ||
| 323 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
| 324 | |||
| 325 | CCP5_CMD_LEN(&desc) = op->src.u.dma.length; | ||
| 326 | |||
| 327 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
| 328 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
| 329 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 330 | |||
| 331 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
| 332 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
| 333 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 334 | |||
| 335 | CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); | ||
| 336 | CCP5_CMD_KEY_HI(&desc) = 0; | ||
| 337 | CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; | ||
| 338 | CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; | ||
| 339 | |||
| 340 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
| 341 | } | ||
| 342 | |||
| 343 | static int ccp5_perform_sha(struct ccp_op *op) | ||
| 344 | { | ||
| 345 | struct ccp5_desc desc; | ||
| 346 | union ccp_function function; | ||
| 347 | |||
| 348 | /* Zero out all the fields of the command desc */ | ||
| 349 | memset(&desc, 0, Q_DESC_SIZE); | ||
| 350 | |||
| 351 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA; | ||
| 352 | |||
| 353 | CCP5_CMD_SOC(&desc) = op->soc; | ||
| 354 | CCP5_CMD_IOC(&desc) = 1; | ||
| 355 | CCP5_CMD_INIT(&desc) = 1; | ||
| 356 | CCP5_CMD_EOM(&desc) = op->eom; | ||
| 357 | CCP5_CMD_PROT(&desc) = 0; | ||
| 358 | |||
| 359 | function.raw = 0; | ||
| 360 | CCP_SHA_TYPE(&function) = op->u.sha.type; | ||
| 361 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
| 362 | |||
| 363 | CCP5_CMD_LEN(&desc) = op->src.u.dma.length; | ||
| 364 | |||
| 365 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
| 366 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
| 367 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 368 | |||
| 369 | CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; | ||
| 370 | |||
| 371 | if (op->eom) { | ||
| 372 | CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits); | ||
| 373 | CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits); | ||
| 374 | } else { | ||
| 375 | CCP5_CMD_SHA_LO(&desc) = 0; | ||
| 376 | CCP5_CMD_SHA_HI(&desc) = 0; | ||
| 377 | } | ||
| 378 | |||
| 379 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
| 380 | } | ||
| 381 | |||
| 382 | static int ccp5_perform_rsa(struct ccp_op *op) | ||
| 383 | { | ||
| 384 | struct ccp5_desc desc; | ||
| 385 | union ccp_function function; | ||
| 386 | |||
| 387 | /* Zero out all the fields of the command desc */ | ||
| 388 | memset(&desc, 0, Q_DESC_SIZE); | ||
| 389 | |||
| 390 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA; | ||
| 391 | |||
| 392 | CCP5_CMD_SOC(&desc) = op->soc; | ||
| 393 | CCP5_CMD_IOC(&desc) = 1; | ||
| 394 | CCP5_CMD_INIT(&desc) = 0; | ||
| 395 | CCP5_CMD_EOM(&desc) = 1; | ||
| 396 | CCP5_CMD_PROT(&desc) = 0; | ||
| 397 | |||
| 398 | function.raw = 0; | ||
| 399 | CCP_RSA_SIZE(&function) = op->u.rsa.mod_size; | ||
| 400 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
| 401 | |||
| 402 | CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; | ||
| 403 | |||
| 404 | /* Source is from external memory */ | ||
| 405 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
| 406 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
| 407 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 408 | |||
| 409 | /* Destination is in external memory */ | ||
| 410 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
| 411 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
| 412 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 413 | |||
| 414 | /* Key (Exponent) is in external memory */ | ||
| 415 | CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); | ||
| 416 | CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); | ||
| 417 | CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 418 | |||
| 419 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
| 420 | } | ||
| 421 | |||
| 422 | static int ccp5_perform_passthru(struct ccp_op *op) | ||
| 423 | { | ||
| 424 | struct ccp5_desc desc; | ||
| 425 | union ccp_function function; | ||
| 426 | struct ccp_dma_info *saddr = &op->src.u.dma; | ||
| 427 | struct ccp_dma_info *daddr = &op->dst.u.dma; | ||
| 428 | |||
| 429 | memset(&desc, 0, Q_DESC_SIZE); | ||
| 430 | |||
| 431 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; | ||
| 432 | |||
| 433 | CCP5_CMD_SOC(&desc) = 0; | ||
| 434 | CCP5_CMD_IOC(&desc) = 1; | ||
| 435 | CCP5_CMD_INIT(&desc) = 0; | ||
| 436 | CCP5_CMD_EOM(&desc) = op->eom; | ||
| 437 | CCP5_CMD_PROT(&desc) = 0; | ||
| 438 | |||
| 439 | function.raw = 0; | ||
| 440 | CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; | ||
| 441 | CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; | ||
| 442 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
| 443 | |||
| 444 | /* Length of source data is always 256 bytes */ | ||
| 445 | if (op->src.type == CCP_MEMTYPE_SYSTEM) | ||
| 446 | CCP5_CMD_LEN(&desc) = saddr->length; | ||
| 447 | else | ||
| 448 | CCP5_CMD_LEN(&desc) = daddr->length; | ||
| 449 | |||
| 450 | if (op->src.type == CCP_MEMTYPE_SYSTEM) { | ||
| 451 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
| 452 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
| 453 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 454 | |||
| 455 | if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) | ||
| 456 | CCP5_CMD_LSB_ID(&desc) = op->sb_key; | ||
| 457 | } else { | ||
| 458 | u32 key_addr = op->src.u.sb * CCP_SB_BYTES; | ||
| 459 | |||
| 460 | CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); | ||
| 461 | CCP5_CMD_SRC_HI(&desc) = 0; | ||
| 462 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; | ||
| 463 | } | ||
| 464 | |||
| 465 | if (op->dst.type == CCP_MEMTYPE_SYSTEM) { | ||
| 466 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
| 467 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
| 468 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 469 | } else { | ||
| 470 | u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; | ||
| 471 | |||
| 472 | CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); | ||
| 473 | CCP5_CMD_DST_HI(&desc) = 0; | ||
| 474 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; | ||
| 475 | } | ||
| 476 | |||
| 477 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
| 478 | } | ||
| 479 | |||
| 480 | static int ccp5_perform_ecc(struct ccp_op *op) | ||
| 481 | { | ||
| 482 | struct ccp5_desc desc; | ||
| 483 | union ccp_function function; | ||
| 484 | |||
| 485 | /* Zero out all the fields of the command desc */ | ||
| 486 | memset(&desc, 0, Q_DESC_SIZE); | ||
| 487 | |||
| 488 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC; | ||
| 489 | |||
| 490 | CCP5_CMD_SOC(&desc) = 0; | ||
| 491 | CCP5_CMD_IOC(&desc) = 1; | ||
| 492 | CCP5_CMD_INIT(&desc) = 0; | ||
| 493 | CCP5_CMD_EOM(&desc) = 1; | ||
| 494 | CCP5_CMD_PROT(&desc) = 0; | ||
| 495 | |||
| 496 | function.raw = 0; | ||
| 497 | function.ecc.mode = op->u.ecc.function; | ||
| 498 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
| 499 | |||
| 500 | CCP5_CMD_LEN(&desc) = op->src.u.dma.length; | ||
| 501 | |||
| 502 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
| 503 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
| 504 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 505 | |||
| 506 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
| 507 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
| 508 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
| 509 | |||
| 510 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
| 511 | } | ||
| 512 | |||
| 513 | static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) | ||
| 514 | { | ||
| 515 | int q_mask = 1 << cmd_q->id; | ||
| 516 | int queues = 0; | ||
| 517 | int j; | ||
| 518 | |||
| 519 | /* Build a bit mask to know which LSBs this queue has access to. | ||
| 520 | * Don't bother with segment 0 as it has special privileges. | ||
| 521 | */ | ||
| 522 | for (j = 1; j < MAX_LSB_CNT; j++) { | ||
| 523 | if (status & q_mask) | ||
| 524 | bitmap_set(cmd_q->lsbmask, j, 1); | ||
| 525 | status >>= LSB_REGION_WIDTH; | ||
| 526 | } | ||
| 527 | queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); | ||
| 528 | dev_info(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", | ||
| 529 | cmd_q->id, queues); | ||
| 530 | |||
| 531 | return queues ? 0 : -EINVAL; | ||
| 532 | } | ||
| 533 | |||
| 534 | |||
| 535 | static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, | ||
| 536 | int lsb_cnt, int n_lsbs, | ||
| 537 | unsigned long *lsb_pub) | ||
| 538 | { | ||
| 539 | DECLARE_BITMAP(qlsb, MAX_LSB_CNT); | ||
| 540 | int bitno; | ||
| 541 | int qlsb_wgt; | ||
| 542 | int i; | ||
| 543 | |||
| 544 | /* For each queue: | ||
| 545 | * If the count of potential LSBs available to a queue matches the | ||
| 546 | * ordinal given to us in lsb_cnt: | ||
| 547 | * Copy the mask of possible LSBs for this queue into "qlsb"; | ||
| 548 | * For each bit in qlsb, see if the corresponding bit in the | ||
| 549 | * aggregation mask is set; if so, we have a match. | ||
| 550 | * If we have a match, clear the bit in the aggregation to | ||
| 551 | * mark it as no longer available. | ||
| 552 | * If there is no match, clear the bit in qlsb and keep looking. | ||
| 553 | */ | ||
| 554 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 555 | struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; | ||
| 556 | |||
| 557 | qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); | ||
| 558 | |||
| 559 | if (qlsb_wgt == lsb_cnt) { | ||
| 560 | bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); | ||
| 561 | |||
| 562 | bitno = find_first_bit(qlsb, MAX_LSB_CNT); | ||
| 563 | while (bitno < MAX_LSB_CNT) { | ||
| 564 | if (test_bit(bitno, lsb_pub)) { | ||
| 565 | /* We found an available LSB | ||
| 566 | * that this queue can access | ||
| 567 | */ | ||
| 568 | cmd_q->lsb = bitno; | ||
| 569 | bitmap_clear(lsb_pub, bitno, 1); | ||
| 570 | dev_info(ccp->dev, | ||
| 571 | "Queue %d gets LSB %d\n", | ||
| 572 | i, bitno); | ||
| 573 | break; | ||
| 574 | } | ||
| 575 | bitmap_clear(qlsb, bitno, 1); | ||
| 576 | bitno = find_first_bit(qlsb, MAX_LSB_CNT); | ||
| 577 | } | ||
| 578 | if (bitno >= MAX_LSB_CNT) | ||
| 579 | return -EINVAL; | ||
| 580 | n_lsbs--; | ||
| 581 | } | ||
| 582 | } | ||
| 583 | return n_lsbs; | ||
| 584 | } | ||
| 585 | |||
| 586 | /* For each queue, from the most- to least-constrained: | ||
| 587 | * find an LSB that can be assigned to the queue. If there are N queues that | ||
| 588 | * can only use M LSBs, where N > M, fail; otherwise, every queue will get a | ||
| 589 | * dedicated LSB. Remaining LSB regions become a shared resource. | ||
| 590 | * If we have fewer LSBs than queues, all LSB regions become shared resources. | ||
| 591 | */ | ||
| 592 | static int ccp_assign_lsbs(struct ccp_device *ccp) | ||
| 593 | { | ||
| 594 | DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); | ||
| 595 | DECLARE_BITMAP(qlsb, MAX_LSB_CNT); | ||
| 596 | int n_lsbs = 0; | ||
| 597 | int bitno; | ||
| 598 | int i, lsb_cnt; | ||
| 599 | int rc = 0; | ||
| 600 | |||
| 601 | bitmap_zero(lsb_pub, MAX_LSB_CNT); | ||
| 602 | |||
| 603 | /* Create an aggregate bitmap to get a total count of available LSBs */ | ||
| 604 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
| 605 | bitmap_or(lsb_pub, | ||
| 606 | lsb_pub, ccp->cmd_q[i].lsbmask, | ||
| 607 | MAX_LSB_CNT); | ||
| 608 | |||
| 609 | n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); | ||
| 610 | |||
| 611 | if (n_lsbs >= ccp->cmd_q_count) { | ||
| 612 | /* We have enough LSBS to give every queue a private LSB. | ||
| 613 | * Brute force search to start with the queues that are more | ||
| 614 | * constrained in LSB choice. When an LSB is privately | ||
| 615 | * assigned, it is removed from the public mask. | ||
| 616 | * This is an ugly N squared algorithm with some optimization. | ||
| 617 | */ | ||
| 618 | for (lsb_cnt = 1; | ||
| 619 | n_lsbs && (lsb_cnt <= MAX_LSB_CNT); | ||
| 620 | lsb_cnt++) { | ||
| 621 | rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, | ||
| 622 | lsb_pub); | ||
| 623 | if (rc < 0) | ||
| 624 | return -EINVAL; | ||
| 625 | n_lsbs = rc; | ||
| 626 | } | ||
| 627 | } | ||
| 628 | |||
| 629 | rc = 0; | ||
| 630 | /* What's left of the LSBs, according to the public mask, now become | ||
| 631 | * shared. Any zero bits in the lsb_pub mask represent an LSB region | ||
| 632 | * that can't be used as a shared resource, so mark the LSB slots for | ||
| 633 | * them as "in use". | ||
| 634 | */ | ||
| 635 | bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); | ||
| 636 | |||
| 637 | bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); | ||
| 638 | while (bitno < MAX_LSB_CNT) { | ||
| 639 | bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); | ||
| 640 | bitmap_set(qlsb, bitno, 1); | ||
| 641 | bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); | ||
| 642 | } | ||
| 643 | |||
| 644 | return rc; | ||
| 645 | } | ||
| 646 | |||
| 647 | static int ccp5_init(struct ccp_device *ccp) | ||
| 648 | { | ||
| 649 | struct device *dev = ccp->dev; | ||
| 650 | struct ccp_cmd_queue *cmd_q; | ||
| 651 | struct dma_pool *dma_pool; | ||
| 652 | char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; | ||
| 653 | unsigned int qmr, qim, i; | ||
| 654 | u64 status; | ||
| 655 | u32 status_lo, status_hi; | ||
| 656 | int ret; | ||
| 657 | |||
| 658 | /* Find available queues */ | ||
| 659 | qim = 0; | ||
| 660 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); | ||
| 661 | for (i = 0; i < MAX_HW_QUEUES; i++) { | ||
| 662 | |||
| 663 | if (!(qmr & (1 << i))) | ||
| 664 | continue; | ||
| 665 | |||
| 666 | /* Allocate a dma pool for this queue */ | ||
| 667 | snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", | ||
| 668 | ccp->name, i); | ||
| 669 | dma_pool = dma_pool_create(dma_pool_name, dev, | ||
| 670 | CCP_DMAPOOL_MAX_SIZE, | ||
| 671 | CCP_DMAPOOL_ALIGN, 0); | ||
| 672 | if (!dma_pool) { | ||
| 673 | dev_err(dev, "unable to allocate dma pool\n"); | ||
| 674 | ret = -ENOMEM; | ||
| 675 | } | ||
| 676 | |||
| 677 | cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; | ||
| 678 | ccp->cmd_q_count++; | ||
| 679 | |||
| 680 | cmd_q->ccp = ccp; | ||
| 681 | cmd_q->id = i; | ||
| 682 | cmd_q->dma_pool = dma_pool; | ||
| 683 | mutex_init(&cmd_q->q_mutex); | ||
| 684 | |||
| 685 | /* Page alignment satisfies our needs for N <= 128 */ | ||
| 686 | BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); | ||
| 687 | cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); | ||
| 688 | cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, | ||
| 689 | &cmd_q->qbase_dma, | ||
| 690 | GFP_KERNEL); | ||
| 691 | if (!cmd_q->qbase) { | ||
| 692 | dev_err(dev, "unable to allocate command queue\n"); | ||
| 693 | ret = -ENOMEM; | ||
| 694 | goto e_pool; | ||
| 695 | } | ||
| 696 | |||
| 697 | cmd_q->qidx = 0; | ||
| 698 | /* Preset some register values and masks that are queue | ||
| 699 | * number dependent | ||
| 700 | */ | ||
| 701 | cmd_q->reg_control = ccp->io_regs + | ||
| 702 | CMD5_Q_STATUS_INCR * (i + 1); | ||
| 703 | cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; | ||
| 704 | cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; | ||
| 705 | cmd_q->reg_int_enable = cmd_q->reg_control + | ||
| 706 | CMD5_Q_INT_ENABLE_BASE; | ||
| 707 | cmd_q->reg_interrupt_status = cmd_q->reg_control + | ||
| 708 | CMD5_Q_INTERRUPT_STATUS_BASE; | ||
| 709 | cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; | ||
| 710 | cmd_q->reg_int_status = cmd_q->reg_control + | ||
| 711 | CMD5_Q_INT_STATUS_BASE; | ||
| 712 | cmd_q->reg_dma_status = cmd_q->reg_control + | ||
| 713 | CMD5_Q_DMA_STATUS_BASE; | ||
| 714 | cmd_q->reg_dma_read_status = cmd_q->reg_control + | ||
| 715 | CMD5_Q_DMA_READ_STATUS_BASE; | ||
| 716 | cmd_q->reg_dma_write_status = cmd_q->reg_control + | ||
| 717 | CMD5_Q_DMA_WRITE_STATUS_BASE; | ||
| 718 | |||
| 719 | init_waitqueue_head(&cmd_q->int_queue); | ||
| 720 | |||
| 721 | dev_dbg(dev, "queue #%u available\n", i); | ||
| 722 | } | ||
| 723 | if (ccp->cmd_q_count == 0) { | ||
| 724 | dev_notice(dev, "no command queues available\n"); | ||
| 725 | ret = -EIO; | ||
| 726 | goto e_pool; | ||
| 727 | } | ||
| 728 | dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); | ||
| 729 | |||
| 730 | /* Turn off the queues and disable interrupts until ready */ | ||
| 731 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 732 | cmd_q = &ccp->cmd_q[i]; | ||
| 733 | |||
| 734 | cmd_q->qcontrol = 0; /* Start with nothing */ | ||
| 735 | iowrite32(cmd_q->qcontrol, cmd_q->reg_control); | ||
| 736 | |||
| 737 | /* Disable the interrupts */ | ||
| 738 | iowrite32(0x00, cmd_q->reg_int_enable); | ||
| 739 | ioread32(cmd_q->reg_int_status); | ||
| 740 | ioread32(cmd_q->reg_status); | ||
| 741 | |||
| 742 | /* Clear the interrupts */ | ||
| 743 | iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); | ||
| 744 | } | ||
| 745 | |||
| 746 | dev_dbg(dev, "Requesting an IRQ...\n"); | ||
| 747 | /* Request an irq */ | ||
| 748 | ret = ccp->get_irq(ccp); | ||
| 749 | if (ret) { | ||
| 750 | dev_err(dev, "unable to allocate an IRQ\n"); | ||
| 751 | goto e_pool; | ||
| 752 | } | ||
| 753 | |||
| 754 | /* Initialize the queue used to suspend */ | ||
| 755 | init_waitqueue_head(&ccp->suspend_queue); | ||
| 756 | |||
| 757 | dev_dbg(dev, "Loading LSB map...\n"); | ||
| 758 | /* Copy the private LSB mask to the public registers */ | ||
| 759 | status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); | ||
| 760 | status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); | ||
| 761 | iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); | ||
| 762 | iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); | ||
| 763 | status = ((u64)status_hi<<30) | (u64)status_lo; | ||
| 764 | |||
| 765 | dev_dbg(dev, "Configuring virtual queues...\n"); | ||
| 766 | /* Configure size of each virtual queue accessible to host */ | ||
| 767 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 768 | u32 dma_addr_lo; | ||
| 769 | u32 dma_addr_hi; | ||
| 770 | |||
| 771 | cmd_q = &ccp->cmd_q[i]; | ||
| 772 | |||
| 773 | cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); | ||
| 774 | cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; | ||
| 775 | |||
| 776 | cmd_q->qdma_tail = cmd_q->qbase_dma; | ||
| 777 | dma_addr_lo = low_address(cmd_q->qdma_tail); | ||
| 778 | iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); | ||
| 779 | iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); | ||
| 780 | |||
| 781 | dma_addr_hi = high_address(cmd_q->qdma_tail); | ||
| 782 | cmd_q->qcontrol |= (dma_addr_hi << 16); | ||
| 783 | iowrite32(cmd_q->qcontrol, cmd_q->reg_control); | ||
| 784 | |||
| 785 | /* Find the LSB regions accessible to the queue */ | ||
| 786 | ccp_find_lsb_regions(cmd_q, status); | ||
| 787 | cmd_q->lsb = -1; /* Unassigned value */ | ||
| 788 | } | ||
| 789 | |||
| 790 | dev_dbg(dev, "Assigning LSBs...\n"); | ||
| 791 | ret = ccp_assign_lsbs(ccp); | ||
| 792 | if (ret) { | ||
| 793 | dev_err(dev, "Unable to assign LSBs (%d)\n", ret); | ||
| 794 | goto e_irq; | ||
| 795 | } | ||
| 796 | |||
| 797 | /* Optimization: pre-allocate LSB slots for each queue */ | ||
| 798 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 799 | ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); | ||
| 800 | ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); | ||
| 801 | } | ||
| 802 | |||
| 803 | dev_dbg(dev, "Starting threads...\n"); | ||
| 804 | /* Create a kthread for each queue */ | ||
| 805 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 806 | struct task_struct *kthread; | ||
| 807 | |||
| 808 | cmd_q = &ccp->cmd_q[i]; | ||
| 809 | |||
| 810 | kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, | ||
| 811 | "%s-q%u", ccp->name, cmd_q->id); | ||
| 812 | if (IS_ERR(kthread)) { | ||
| 813 | dev_err(dev, "error creating queue thread (%ld)\n", | ||
| 814 | PTR_ERR(kthread)); | ||
| 815 | ret = PTR_ERR(kthread); | ||
| 816 | goto e_kthread; | ||
| 817 | } | ||
| 818 | |||
| 819 | cmd_q->kthread = kthread; | ||
| 820 | wake_up_process(kthread); | ||
| 821 | } | ||
| 822 | |||
| 823 | dev_dbg(dev, "Enabling interrupts...\n"); | ||
| 824 | /* Enable interrupts */ | ||
| 825 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 826 | cmd_q = &ccp->cmd_q[i]; | ||
| 827 | iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable); | ||
| 828 | } | ||
| 829 | |||
| 830 | dev_dbg(dev, "Registering device...\n"); | ||
| 831 | /* Put this on the unit list to make it available */ | ||
| 832 | ccp_add_device(ccp); | ||
| 833 | |||
| 834 | ret = ccp_register_rng(ccp); | ||
| 835 | if (ret) | ||
| 836 | goto e_kthread; | ||
| 837 | |||
| 838 | /* Register the DMA engine support */ | ||
| 839 | ret = ccp_dmaengine_register(ccp); | ||
| 840 | if (ret) | ||
| 841 | goto e_hwrng; | ||
| 842 | |||
| 843 | return 0; | ||
| 844 | |||
| 845 | e_hwrng: | ||
| 846 | ccp_unregister_rng(ccp); | ||
| 847 | |||
| 848 | e_kthread: | ||
| 849 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
| 850 | if (ccp->cmd_q[i].kthread) | ||
| 851 | kthread_stop(ccp->cmd_q[i].kthread); | ||
| 852 | |||
| 853 | e_irq: | ||
| 854 | ccp->free_irq(ccp); | ||
| 855 | |||
| 856 | e_pool: | ||
| 857 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
| 858 | dma_pool_destroy(ccp->cmd_q[i].dma_pool); | ||
| 859 | |||
| 860 | return ret; | ||
| 861 | } | ||
| 862 | |||
| 863 | static void ccp5_destroy(struct ccp_device *ccp) | ||
| 864 | { | ||
| 865 | struct device *dev = ccp->dev; | ||
| 866 | struct ccp_cmd_queue *cmd_q; | ||
| 867 | struct ccp_cmd *cmd; | ||
| 868 | unsigned int i; | ||
| 869 | |||
| 870 | /* Unregister the DMA engine */ | ||
| 871 | ccp_dmaengine_unregister(ccp); | ||
| 872 | |||
| 873 | /* Unregister the RNG */ | ||
| 874 | ccp_unregister_rng(ccp); | ||
| 875 | |||
| 876 | /* Remove this device from the list of available units first */ | ||
| 877 | ccp_del_device(ccp); | ||
| 878 | |||
| 879 | /* Disable and clear interrupts */ | ||
| 880 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 881 | cmd_q = &ccp->cmd_q[i]; | ||
| 882 | |||
| 883 | /* Turn off the run bit */ | ||
| 884 | iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); | ||
| 885 | |||
| 886 | /* Disable the interrupts */ | ||
| 887 | iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); | ||
| 888 | |||
| 889 | /* Clear the interrupt status */ | ||
| 890 | iowrite32(0x00, cmd_q->reg_int_enable); | ||
| 891 | ioread32(cmd_q->reg_int_status); | ||
| 892 | ioread32(cmd_q->reg_status); | ||
| 893 | } | ||
| 894 | |||
| 895 | /* Stop the queue kthreads */ | ||
| 896 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
| 897 | if (ccp->cmd_q[i].kthread) | ||
| 898 | kthread_stop(ccp->cmd_q[i].kthread); | ||
| 899 | |||
| 900 | ccp->free_irq(ccp); | ||
| 901 | |||
| 902 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 903 | cmd_q = &ccp->cmd_q[i]; | ||
| 904 | dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, | ||
| 905 | cmd_q->qbase_dma); | ||
| 906 | } | ||
| 907 | |||
| 908 | /* Flush the cmd and backlog queue */ | ||
| 909 | while (!list_empty(&ccp->cmd)) { | ||
| 910 | /* Invoke the callback directly with an error code */ | ||
| 911 | cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); | ||
| 912 | list_del(&cmd->entry); | ||
| 913 | cmd->callback(cmd->data, -ENODEV); | ||
| 914 | } | ||
| 915 | while (!list_empty(&ccp->backlog)) { | ||
| 916 | /* Invoke the callback directly with an error code */ | ||
| 917 | cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); | ||
| 918 | list_del(&cmd->entry); | ||
| 919 | cmd->callback(cmd->data, -ENODEV); | ||
| 920 | } | ||
| 921 | } | ||
| 922 | |||
| 923 | static irqreturn_t ccp5_irq_handler(int irq, void *data) | ||
| 924 | { | ||
| 925 | struct device *dev = data; | ||
| 926 | struct ccp_device *ccp = dev_get_drvdata(dev); | ||
| 927 | u32 status; | ||
| 928 | unsigned int i; | ||
| 929 | |||
| 930 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
| 931 | struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; | ||
| 932 | |||
| 933 | status = ioread32(cmd_q->reg_interrupt_status); | ||
| 934 | |||
| 935 | if (status) { | ||
| 936 | cmd_q->int_status = status; | ||
| 937 | cmd_q->q_status = ioread32(cmd_q->reg_status); | ||
| 938 | cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); | ||
| 939 | |||
| 940 | /* On error, only save the first error value */ | ||
| 941 | if ((status & INT_ERROR) && !cmd_q->cmd_error) | ||
| 942 | cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); | ||
| 943 | |||
| 944 | cmd_q->int_rcvd = 1; | ||
| 945 | |||
| 946 | /* Acknowledge the interrupt and wake the kthread */ | ||
| 947 | iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); | ||
| 948 | wake_up_interruptible(&cmd_q->int_queue); | ||
| 949 | } | ||
| 950 | } | ||
| 951 | |||
| 952 | return IRQ_HANDLED; | ||
| 953 | } | ||
| 954 | |||
| 955 | static void ccp5_config(struct ccp_device *ccp) | ||
| 956 | { | ||
| 957 | /* Public side */ | ||
| 958 | iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); | ||
| 959 | } | ||
| 960 | |||
| 961 | static void ccp5other_config(struct ccp_device *ccp) | ||
| 962 | { | ||
| 963 | int i; | ||
| 964 | u32 rnd; | ||
| 965 | |||
| 966 | /* We own all of the queues on the NTB CCP */ | ||
| 967 | |||
| 968 | iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); | ||
| 969 | iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); | ||
| 970 | for (i = 0; i < 12; i++) { | ||
| 971 | rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); | ||
| 972 | iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); | ||
| 973 | } | ||
| 974 | |||
| 975 | iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); | ||
| 976 | iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); | ||
| 977 | iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); | ||
| 978 | |||
| 979 | iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); | ||
| 980 | iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); | ||
| 981 | |||
| 982 | iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); | ||
| 983 | |||
| 984 | ccp5_config(ccp); | ||
| 985 | } | ||
| 986 | |||
| 987 | /* Version 5 adds some function, but is essentially the same as v5 */ | ||
| 988 | static const struct ccp_actions ccp5_actions = { | ||
| 989 | .aes = ccp5_perform_aes, | ||
| 990 | .xts_aes = ccp5_perform_xts_aes, | ||
| 991 | .sha = ccp5_perform_sha, | ||
| 992 | .rsa = ccp5_perform_rsa, | ||
| 993 | .passthru = ccp5_perform_passthru, | ||
| 994 | .ecc = ccp5_perform_ecc, | ||
| 995 | .sballoc = ccp_lsb_alloc, | ||
| 996 | .sbfree = ccp_lsb_free, | ||
| 997 | .init = ccp5_init, | ||
| 998 | .destroy = ccp5_destroy, | ||
| 999 | .get_free_slots = ccp5_get_free_slots, | ||
| 1000 | .irqhandler = ccp5_irq_handler, | ||
| 1001 | }; | ||
| 1002 | |||
| 1003 | const struct ccp_vdata ccpv5a = { | ||
| 1004 | .version = CCP_VERSION(5, 0), | ||
| 1005 | .setup = ccp5_config, | ||
| 1006 | .perform = &ccp5_actions, | ||
| 1007 | .bar = 2, | ||
| 1008 | .offset = 0x0, | ||
| 1009 | }; | ||
| 1010 | |||
| 1011 | const struct ccp_vdata ccpv5b = { | ||
| 1012 | .version = CCP_VERSION(5, 0), | ||
| 1013 | .setup = ccp5other_config, | ||
| 1014 | .perform = &ccp5_actions, | ||
| 1015 | .bar = 2, | ||
| 1016 | .offset = 0x0, | ||
| 1017 | }; | ||
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 87b9f2bfa623..cafa633aae10 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
| 5 | * | 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * Author: Gary R Hook <gary.hook@amd.com> | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
| @@ -39,6 +40,59 @@ struct ccp_tasklet_data { | |||
| 39 | struct ccp_cmd *cmd; | 40 | struct ccp_cmd *cmd; |
| 40 | }; | 41 | }; |
| 41 | 42 | ||
| 43 | /* Human-readable error strings */ | ||
| 44 | char *ccp_error_codes[] = { | ||
| 45 | "", | ||
| 46 | "ERR 01: ILLEGAL_ENGINE", | ||
| 47 | "ERR 02: ILLEGAL_KEY_ID", | ||
| 48 | "ERR 03: ILLEGAL_FUNCTION_TYPE", | ||
| 49 | "ERR 04: ILLEGAL_FUNCTION_MODE", | ||
| 50 | "ERR 05: ILLEGAL_FUNCTION_ENCRYPT", | ||
| 51 | "ERR 06: ILLEGAL_FUNCTION_SIZE", | ||
| 52 | "ERR 07: Zlib_MISSING_INIT_EOM", | ||
| 53 | "ERR 08: ILLEGAL_FUNCTION_RSVD", | ||
| 54 | "ERR 09: ILLEGAL_BUFFER_LENGTH", | ||
| 55 | "ERR 10: VLSB_FAULT", | ||
| 56 | "ERR 11: ILLEGAL_MEM_ADDR", | ||
| 57 | "ERR 12: ILLEGAL_MEM_SEL", | ||
| 58 | "ERR 13: ILLEGAL_CONTEXT_ID", | ||
| 59 | "ERR 14: ILLEGAL_KEY_ADDR", | ||
| 60 | "ERR 15: 0xF Reserved", | ||
| 61 | "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE", | ||
| 62 | "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE", | ||
| 63 | "ERR 18: CMD_TIMEOUT", | ||
| 64 | "ERR 19: IDMA0_AXI_SLVERR", | ||
| 65 | "ERR 20: IDMA0_AXI_DECERR", | ||
| 66 | "ERR 21: 0x15 Reserved", | ||
| 67 | "ERR 22: IDMA1_AXI_SLAVE_FAULT", | ||
| 68 | "ERR 23: IDMA1_AIXI_DECERR", | ||
| 69 | "ERR 24: 0x18 Reserved", | ||
| 70 | "ERR 25: ZLIBVHB_AXI_SLVERR", | ||
| 71 | "ERR 26: ZLIBVHB_AXI_DECERR", | ||
| 72 | "ERR 27: 0x1B Reserved", | ||
| 73 | "ERR 27: ZLIB_UNEXPECTED_EOM", | ||
| 74 | "ERR 27: ZLIB_EXTRA_DATA", | ||
| 75 | "ERR 30: ZLIB_BTYPE", | ||
| 76 | "ERR 31: ZLIB_UNDEFINED_SYMBOL", | ||
| 77 | "ERR 32: ZLIB_UNDEFINED_DISTANCE_S", | ||
| 78 | "ERR 33: ZLIB_CODE_LENGTH_SYMBOL", | ||
| 79 | "ERR 34: ZLIB _VHB_ILLEGAL_FETCH", | ||
| 80 | "ERR 35: ZLIB_UNCOMPRESSED_LEN", | ||
| 81 | "ERR 36: ZLIB_LIMIT_REACHED", | ||
| 82 | "ERR 37: ZLIB_CHECKSUM_MISMATCH0", | ||
| 83 | "ERR 38: ODMA0_AXI_SLVERR", | ||
| 84 | "ERR 39: ODMA0_AXI_DECERR", | ||
| 85 | "ERR 40: 0x28 Reserved", | ||
| 86 | "ERR 41: ODMA1_AXI_SLVERR", | ||
| 87 | "ERR 42: ODMA1_AXI_DECERR", | ||
| 88 | "ERR 43: LSB_PARITY_ERR", | ||
| 89 | }; | ||
| 90 | |||
| 91 | void ccp_log_error(struct ccp_device *d, int e) | ||
| 92 | { | ||
| 93 | dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e); | ||
| 94 | } | ||
| 95 | |||
| 42 | /* List of CCPs, CCP count, read-write access lock, and access functions | 96 | /* List of CCPs, CCP count, read-write access lock, and access functions |
| 43 | * | 97 | * |
| 44 | * Lock structure: get ccp_unit_lock for reading whenever we need to | 98 | * Lock structure: get ccp_unit_lock for reading whenever we need to |
| @@ -58,7 +112,7 @@ static struct ccp_device *ccp_rr; | |||
| 58 | 112 | ||
| 59 | /* Ever-increasing value to produce unique unit numbers */ | 113 | /* Ever-increasing value to produce unique unit numbers */ |
| 60 | static atomic_t ccp_unit_ordinal; | 114 | static atomic_t ccp_unit_ordinal; |
| 61 | unsigned int ccp_increment_unit_ordinal(void) | 115 | static unsigned int ccp_increment_unit_ordinal(void) |
| 62 | { | 116 | { |
| 63 | return atomic_inc_return(&ccp_unit_ordinal); | 117 | return atomic_inc_return(&ccp_unit_ordinal); |
| 64 | } | 118 | } |
| @@ -118,6 +172,29 @@ void ccp_del_device(struct ccp_device *ccp) | |||
| 118 | write_unlock_irqrestore(&ccp_unit_lock, flags); | 172 | write_unlock_irqrestore(&ccp_unit_lock, flags); |
| 119 | } | 173 | } |
| 120 | 174 | ||
| 175 | |||
| 176 | |||
| 177 | int ccp_register_rng(struct ccp_device *ccp) | ||
| 178 | { | ||
| 179 | int ret = 0; | ||
| 180 | |||
| 181 | dev_dbg(ccp->dev, "Registering RNG...\n"); | ||
| 182 | /* Register an RNG */ | ||
| 183 | ccp->hwrng.name = ccp->rngname; | ||
| 184 | ccp->hwrng.read = ccp_trng_read; | ||
| 185 | ret = hwrng_register(&ccp->hwrng); | ||
| 186 | if (ret) | ||
| 187 | dev_err(ccp->dev, "error registering hwrng (%d)\n", ret); | ||
| 188 | |||
| 189 | return ret; | ||
| 190 | } | ||
| 191 | |||
| 192 | void ccp_unregister_rng(struct ccp_device *ccp) | ||
| 193 | { | ||
| 194 | if (ccp->hwrng.name) | ||
| 195 | hwrng_unregister(&ccp->hwrng); | ||
| 196 | } | ||
| 197 | |||
| 121 | static struct ccp_device *ccp_get_device(void) | 198 | static struct ccp_device *ccp_get_device(void) |
| 122 | { | 199 | { |
| 123 | unsigned long flags; | 200 | unsigned long flags; |
| @@ -397,9 +474,9 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) | |||
| 397 | 474 | ||
| 398 | spin_lock_init(&ccp->cmd_lock); | 475 | spin_lock_init(&ccp->cmd_lock); |
| 399 | mutex_init(&ccp->req_mutex); | 476 | mutex_init(&ccp->req_mutex); |
| 400 | mutex_init(&ccp->ksb_mutex); | 477 | mutex_init(&ccp->sb_mutex); |
| 401 | ccp->ksb_count = KSB_COUNT; | 478 | ccp->sb_count = KSB_COUNT; |
| 402 | ccp->ksb_start = 0; | 479 | ccp->sb_start = 0; |
| 403 | 480 | ||
| 404 | ccp->ord = ccp_increment_unit_ordinal(); | 481 | ccp->ord = ccp_increment_unit_ordinal(); |
| 405 | snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); | 482 | snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); |
| @@ -408,6 +485,34 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) | |||
| 408 | return ccp; | 485 | return ccp; |
| 409 | } | 486 | } |
| 410 | 487 | ||
| 488 | int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) | ||
| 489 | { | ||
| 490 | struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); | ||
| 491 | u32 trng_value; | ||
| 492 | int len = min_t(int, sizeof(trng_value), max); | ||
| 493 | |||
| 494 | /* Locking is provided by the caller so we can update device | ||
| 495 | * hwrng-related fields safely | ||
| 496 | */ | ||
| 497 | trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); | ||
| 498 | if (!trng_value) { | ||
| 499 | /* Zero is returned if not data is available or if a | ||
| 500 | * bad-entropy error is present. Assume an error if | ||
| 501 | * we exceed TRNG_RETRIES reads of zero. | ||
| 502 | */ | ||
| 503 | if (ccp->hwrng_retries++ > TRNG_RETRIES) | ||
| 504 | return -EIO; | ||
| 505 | |||
| 506 | return 0; | ||
| 507 | } | ||
| 508 | |||
| 509 | /* Reset the counter and save the rng value */ | ||
| 510 | ccp->hwrng_retries = 0; | ||
| 511 | memcpy(data, &trng_value, len); | ||
| 512 | |||
| 513 | return len; | ||
| 514 | } | ||
| 515 | |||
| 411 | #ifdef CONFIG_PM | 516 | #ifdef CONFIG_PM |
| 412 | bool ccp_queues_suspended(struct ccp_device *ccp) | 517 | bool ccp_queues_suspended(struct ccp_device *ccp) |
| 413 | { | 518 | { |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index bd41ffceff82..da5f4a678083 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
| 5 | * | 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * Author: Gary R Hook <gary.hook@amd.com> | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
| @@ -60,7 +61,69 @@ | |||
| 60 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) | 61 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) |
| 61 | #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) | 62 | #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) |
| 62 | 63 | ||
| 63 | /****** REQ0 Related Values ******/ | 64 | /* ------------------------ CCP Version 5 Specifics ------------------------ */ |
| 65 | #define CMD5_QUEUE_MASK_OFFSET 0x00 | ||
| 66 | #define CMD5_QUEUE_PRIO_OFFSET 0x04 | ||
| 67 | #define CMD5_REQID_CONFIG_OFFSET 0x08 | ||
| 68 | #define CMD5_CMD_TIMEOUT_OFFSET 0x10 | ||
| 69 | #define LSB_PUBLIC_MASK_LO_OFFSET 0x18 | ||
| 70 | #define LSB_PUBLIC_MASK_HI_OFFSET 0x1C | ||
| 71 | #define LSB_PRIVATE_MASK_LO_OFFSET 0x20 | ||
| 72 | #define LSB_PRIVATE_MASK_HI_OFFSET 0x24 | ||
| 73 | |||
| 74 | #define CMD5_Q_CONTROL_BASE 0x0000 | ||
| 75 | #define CMD5_Q_TAIL_LO_BASE 0x0004 | ||
| 76 | #define CMD5_Q_HEAD_LO_BASE 0x0008 | ||
| 77 | #define CMD5_Q_INT_ENABLE_BASE 0x000C | ||
| 78 | #define CMD5_Q_INTERRUPT_STATUS_BASE 0x0010 | ||
| 79 | |||
| 80 | #define CMD5_Q_STATUS_BASE 0x0100 | ||
| 81 | #define CMD5_Q_INT_STATUS_BASE 0x0104 | ||
| 82 | #define CMD5_Q_DMA_STATUS_BASE 0x0108 | ||
| 83 | #define CMD5_Q_DMA_READ_STATUS_BASE 0x010C | ||
| 84 | #define CMD5_Q_DMA_WRITE_STATUS_BASE 0x0110 | ||
| 85 | #define CMD5_Q_ABORT_BASE 0x0114 | ||
| 86 | #define CMD5_Q_AX_CACHE_BASE 0x0118 | ||
| 87 | |||
| 88 | #define CMD5_CONFIG_0_OFFSET 0x6000 | ||
| 89 | #define CMD5_TRNG_CTL_OFFSET 0x6008 | ||
| 90 | #define CMD5_AES_MASK_OFFSET 0x6010 | ||
| 91 | #define CMD5_CLK_GATE_CTL_OFFSET 0x603C | ||
| 92 | |||
| 93 | /* Address offset between two virtual queue registers */ | ||
| 94 | #define CMD5_Q_STATUS_INCR 0x1000 | ||
| 95 | |||
| 96 | /* Bit masks */ | ||
| 97 | #define CMD5_Q_RUN 0x1 | ||
| 98 | #define CMD5_Q_HALT 0x2 | ||
| 99 | #define CMD5_Q_MEM_LOCATION 0x4 | ||
| 100 | #define CMD5_Q_SIZE 0x1F | ||
| 101 | #define CMD5_Q_SHIFT 3 | ||
| 102 | #define COMMANDS_PER_QUEUE 16 | ||
| 103 | #define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ | ||
| 104 | CMD5_Q_SIZE) | ||
| 105 | #define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) | ||
| 106 | #define Q_DESC_SIZE sizeof(struct ccp5_desc) | ||
| 107 | #define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) | ||
| 108 | |||
| 109 | #define INT_COMPLETION 0x1 | ||
| 110 | #define INT_ERROR 0x2 | ||
| 111 | #define INT_QUEUE_STOPPED 0x4 | ||
| 112 | #define ALL_INTERRUPTS (INT_COMPLETION| \ | ||
| 113 | INT_ERROR| \ | ||
| 114 | INT_QUEUE_STOPPED) | ||
| 115 | |||
| 116 | #define LSB_REGION_WIDTH 5 | ||
| 117 | #define MAX_LSB_CNT 8 | ||
| 118 | |||
| 119 | #define LSB_SIZE 16 | ||
| 120 | #define LSB_ITEM_SIZE 32 | ||
| 121 | #define PLSB_MAP_SIZE (LSB_SIZE) | ||
| 122 | #define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE) | ||
| 123 | |||
| 124 | #define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE) | ||
| 125 | |||
| 126 | /* ------------------------ CCP Version 3 Specifics ------------------------ */ | ||
| 64 | #define REQ0_WAIT_FOR_WRITE 0x00000004 | 127 | #define REQ0_WAIT_FOR_WRITE 0x00000004 |
| 65 | #define REQ0_INT_ON_COMPLETE 0x00000002 | 128 | #define REQ0_INT_ON_COMPLETE 0x00000002 |
| 66 | #define REQ0_STOP_ON_COMPLETE 0x00000001 | 129 | #define REQ0_STOP_ON_COMPLETE 0x00000001 |
| @@ -110,29 +173,30 @@ | |||
| 110 | #define KSB_START 77 | 173 | #define KSB_START 77 |
| 111 | #define KSB_END 127 | 174 | #define KSB_END 127 |
| 112 | #define KSB_COUNT (KSB_END - KSB_START + 1) | 175 | #define KSB_COUNT (KSB_END - KSB_START + 1) |
| 113 | #define CCP_KSB_BITS 256 | 176 | #define CCP_SB_BITS 256 |
| 114 | #define CCP_KSB_BYTES 32 | ||
| 115 | 177 | ||
| 116 | #define CCP_JOBID_MASK 0x0000003f | 178 | #define CCP_JOBID_MASK 0x0000003f |
| 117 | 179 | ||
| 180 | /* ------------------------ General CCP Defines ------------------------ */ | ||
| 181 | |||
| 118 | #define CCP_DMAPOOL_MAX_SIZE 64 | 182 | #define CCP_DMAPOOL_MAX_SIZE 64 |
| 119 | #define CCP_DMAPOOL_ALIGN BIT(5) | 183 | #define CCP_DMAPOOL_ALIGN BIT(5) |
| 120 | 184 | ||
| 121 | #define CCP_REVERSE_BUF_SIZE 64 | 185 | #define CCP_REVERSE_BUF_SIZE 64 |
| 122 | 186 | ||
| 123 | #define CCP_AES_KEY_KSB_COUNT 1 | 187 | #define CCP_AES_KEY_SB_COUNT 1 |
| 124 | #define CCP_AES_CTX_KSB_COUNT 1 | 188 | #define CCP_AES_CTX_SB_COUNT 1 |
| 125 | 189 | ||
| 126 | #define CCP_XTS_AES_KEY_KSB_COUNT 1 | 190 | #define CCP_XTS_AES_KEY_SB_COUNT 1 |
| 127 | #define CCP_XTS_AES_CTX_KSB_COUNT 1 | 191 | #define CCP_XTS_AES_CTX_SB_COUNT 1 |
| 128 | 192 | ||
| 129 | #define CCP_SHA_KSB_COUNT 1 | 193 | #define CCP_SHA_SB_COUNT 1 |
| 130 | 194 | ||
| 131 | #define CCP_RSA_MAX_WIDTH 4096 | 195 | #define CCP_RSA_MAX_WIDTH 4096 |
| 132 | 196 | ||
| 133 | #define CCP_PASSTHRU_BLOCKSIZE 256 | 197 | #define CCP_PASSTHRU_BLOCKSIZE 256 |
| 134 | #define CCP_PASSTHRU_MASKSIZE 32 | 198 | #define CCP_PASSTHRU_MASKSIZE 32 |
| 135 | #define CCP_PASSTHRU_KSB_COUNT 1 | 199 | #define CCP_PASSTHRU_SB_COUNT 1 |
| 136 | 200 | ||
| 137 | #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ | 201 | #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ |
| 138 | #define CCP_ECC_MAX_OPERANDS 6 | 202 | #define CCP_ECC_MAX_OPERANDS 6 |
| @@ -144,31 +208,12 @@ | |||
| 144 | #define CCP_ECC_RESULT_OFFSET 60 | 208 | #define CCP_ECC_RESULT_OFFSET 60 |
| 145 | #define CCP_ECC_RESULT_SUCCESS 0x0001 | 209 | #define CCP_ECC_RESULT_SUCCESS 0x0001 |
| 146 | 210 | ||
| 147 | struct ccp_op; | 211 | #define CCP_SB_BYTES 32 |
| 148 | |||
| 149 | /* Structure for computation functions that are device-specific */ | ||
| 150 | struct ccp_actions { | ||
| 151 | int (*perform_aes)(struct ccp_op *); | ||
| 152 | int (*perform_xts_aes)(struct ccp_op *); | ||
| 153 | int (*perform_sha)(struct ccp_op *); | ||
| 154 | int (*perform_rsa)(struct ccp_op *); | ||
| 155 | int (*perform_passthru)(struct ccp_op *); | ||
| 156 | int (*perform_ecc)(struct ccp_op *); | ||
| 157 | int (*init)(struct ccp_device *); | ||
| 158 | void (*destroy)(struct ccp_device *); | ||
| 159 | irqreturn_t (*irqhandler)(int, void *); | ||
| 160 | }; | ||
| 161 | |||
| 162 | /* Structure to hold CCP version-specific values */ | ||
| 163 | struct ccp_vdata { | ||
| 164 | unsigned int version; | ||
| 165 | const struct ccp_actions *perform; | ||
| 166 | }; | ||
| 167 | |||
| 168 | extern struct ccp_vdata ccpv3; | ||
| 169 | 212 | ||
| 213 | struct ccp_op; | ||
| 170 | struct ccp_device; | 214 | struct ccp_device; |
| 171 | struct ccp_cmd; | 215 | struct ccp_cmd; |
| 216 | struct ccp_fns; | ||
| 172 | 217 | ||
| 173 | struct ccp_dma_cmd { | 218 | struct ccp_dma_cmd { |
| 174 | struct list_head entry; | 219 | struct list_head entry; |
| @@ -212,9 +257,29 @@ struct ccp_cmd_queue { | |||
| 212 | /* Queue dma pool */ | 257 | /* Queue dma pool */ |
| 213 | struct dma_pool *dma_pool; | 258 | struct dma_pool *dma_pool; |
| 214 | 259 | ||
| 215 | /* Queue reserved KSB regions */ | 260 | /* Queue base address (not neccessarily aligned)*/ |
| 216 | u32 ksb_key; | 261 | struct ccp5_desc *qbase; |
| 217 | u32 ksb_ctx; | 262 | |
| 263 | /* Aligned queue start address (per requirement) */ | ||
| 264 | struct mutex q_mutex ____cacheline_aligned; | ||
| 265 | unsigned int qidx; | ||
| 266 | |||
| 267 | /* Version 5 has different requirements for queue memory */ | ||
| 268 | unsigned int qsize; | ||
| 269 | dma_addr_t qbase_dma; | ||
| 270 | dma_addr_t qdma_tail; | ||
| 271 | |||
| 272 | /* Per-queue reserved storage block(s) */ | ||
| 273 | u32 sb_key; | ||
| 274 | u32 sb_ctx; | ||
| 275 | |||
| 276 | /* Bitmap of LSBs that can be accessed by this queue */ | ||
| 277 | DECLARE_BITMAP(lsbmask, MAX_LSB_CNT); | ||
| 278 | /* Private LSB that is assigned to this queue, or -1 if none. | ||
| 279 | * Bitmap for my private LSB, unused otherwise | ||
| 280 | */ | ||
| 281 | unsigned int lsb; | ||
| 282 | DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE); | ||
| 218 | 283 | ||
| 219 | /* Queue processing thread */ | 284 | /* Queue processing thread */ |
| 220 | struct task_struct *kthread; | 285 | struct task_struct *kthread; |
| @@ -229,8 +294,17 @@ struct ccp_cmd_queue { | |||
| 229 | u32 int_err; | 294 | u32 int_err; |
| 230 | 295 | ||
| 231 | /* Register addresses for queue */ | 296 | /* Register addresses for queue */ |
| 297 | void __iomem *reg_control; | ||
| 298 | void __iomem *reg_tail_lo; | ||
| 299 | void __iomem *reg_head_lo; | ||
| 300 | void __iomem *reg_int_enable; | ||
| 301 | void __iomem *reg_interrupt_status; | ||
| 232 | void __iomem *reg_status; | 302 | void __iomem *reg_status; |
| 233 | void __iomem *reg_int_status; | 303 | void __iomem *reg_int_status; |
| 304 | void __iomem *reg_dma_status; | ||
| 305 | void __iomem *reg_dma_read_status; | ||
| 306 | void __iomem *reg_dma_write_status; | ||
| 307 | u32 qcontrol; /* Cached control register */ | ||
| 234 | 308 | ||
| 235 | /* Status values from job */ | 309 | /* Status values from job */ |
| 236 | u32 int_status; | 310 | u32 int_status; |
| @@ -253,16 +327,14 @@ struct ccp_device { | |||
| 253 | 327 | ||
| 254 | struct device *dev; | 328 | struct device *dev; |
| 255 | 329 | ||
| 256 | /* | 330 | /* Bus specific device information |
| 257 | * Bus specific device information | ||
| 258 | */ | 331 | */ |
| 259 | void *dev_specific; | 332 | void *dev_specific; |
| 260 | int (*get_irq)(struct ccp_device *ccp); | 333 | int (*get_irq)(struct ccp_device *ccp); |
| 261 | void (*free_irq)(struct ccp_device *ccp); | 334 | void (*free_irq)(struct ccp_device *ccp); |
| 262 | unsigned int irq; | 335 | unsigned int irq; |
| 263 | 336 | ||
| 264 | /* | 337 | /* I/O area used for device communication. The register mapping |
| 265 | * I/O area used for device communication. The register mapping | ||
| 266 | * starts at an offset into the mapped bar. | 338 | * starts at an offset into the mapped bar. |
| 267 | * The CMD_REQx registers and the Delete_Cmd_Queue_Job register | 339 | * The CMD_REQx registers and the Delete_Cmd_Queue_Job register |
| 268 | * need to be protected while a command queue thread is accessing | 340 | * need to be protected while a command queue thread is accessing |
| @@ -272,8 +344,7 @@ struct ccp_device { | |||
| 272 | void __iomem *io_map; | 344 | void __iomem *io_map; |
| 273 | void __iomem *io_regs; | 345 | void __iomem *io_regs; |
| 274 | 346 | ||
| 275 | /* | 347 | /* Master lists that all cmds are queued on. Because there can be |
| 276 | * Master lists that all cmds are queued on. Because there can be | ||
| 277 | * more than one CCP command queue that can process a cmd a separate | 348 | * more than one CCP command queue that can process a cmd a separate |
| 278 | * backlog list is neeeded so that the backlog completion call | 349 | * backlog list is neeeded so that the backlog completion call |
| 279 | * completes before the cmd is available for execution. | 350 | * completes before the cmd is available for execution. |
| @@ -283,47 +354,54 @@ struct ccp_device { | |||
| 283 | struct list_head cmd; | 354 | struct list_head cmd; |
| 284 | struct list_head backlog; | 355 | struct list_head backlog; |
| 285 | 356 | ||
| 286 | /* | 357 | /* The command queues. These represent the queues available on the |
| 287 | * The command queues. These represent the queues available on the | ||
| 288 | * CCP that are available for processing cmds | 358 | * CCP that are available for processing cmds |
| 289 | */ | 359 | */ |
| 290 | struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; | 360 | struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; |
| 291 | unsigned int cmd_q_count; | 361 | unsigned int cmd_q_count; |
| 292 | 362 | ||
| 293 | /* | 363 | /* Support for the CCP True RNG |
| 294 | * Support for the CCP True RNG | ||
| 295 | */ | 364 | */ |
| 296 | struct hwrng hwrng; | 365 | struct hwrng hwrng; |
| 297 | unsigned int hwrng_retries; | 366 | unsigned int hwrng_retries; |
| 298 | 367 | ||
| 299 | /* | 368 | /* Support for the CCP DMA capabilities |
| 300 | * Support for the CCP DMA capabilities | ||
| 301 | */ | 369 | */ |
| 302 | struct dma_device dma_dev; | 370 | struct dma_device dma_dev; |
| 303 | struct ccp_dma_chan *ccp_dma_chan; | 371 | struct ccp_dma_chan *ccp_dma_chan; |
| 304 | struct kmem_cache *dma_cmd_cache; | 372 | struct kmem_cache *dma_cmd_cache; |
| 305 | struct kmem_cache *dma_desc_cache; | 373 | struct kmem_cache *dma_desc_cache; |
| 306 | 374 | ||
| 307 | /* | 375 | /* A counter used to generate job-ids for cmds submitted to the CCP |
| 308 | * A counter used to generate job-ids for cmds submitted to the CCP | ||
| 309 | */ | 376 | */ |
| 310 | atomic_t current_id ____cacheline_aligned; | 377 | atomic_t current_id ____cacheline_aligned; |
| 311 | 378 | ||
| 312 | /* | 379 | /* The v3 CCP uses key storage blocks (SB) to maintain context for |
| 313 | * The CCP uses key storage blocks (KSB) to maintain context for certain | 380 | * certain operations. To prevent multiple cmds from using the same |
| 314 | * operations. To prevent multiple cmds from using the same KSB range | 381 | * SB range a command queue reserves an SB range for the duration of |
| 315 | * a command queue reserves a KSB range for the duration of the cmd. | 382 | * the cmd. Each queue, will however, reserve 2 SB blocks for |
| 316 | * Each queue, will however, reserve 2 KSB blocks for operations that | 383 | * operations that only require single SB entries (eg. AES context/iv |
| 317 | * only require single KSB entries (eg. AES context/iv and key) in order | 384 | * and key) in order to avoid allocation contention. This will reserve |
| 318 | * to avoid allocation contention. This will reserve at most 10 KSB | 385 | * at most 10 SB entries, leaving 40 SB entries available for dynamic |
| 319 | * entries, leaving 40 KSB entries available for dynamic allocation. | 386 | * allocation. |
| 387 | * | ||
| 388 | * The v5 CCP Local Storage Block (LSB) is broken up into 8 | ||
| 389 | * memrory ranges, each of which can be enabled for access by one | ||
| 390 | * or more queues. Device initialization takes this into account, | ||
| 391 | * and attempts to assign one region for exclusive use by each | ||
| 392 | * available queue; the rest are then aggregated as "public" use. | ||
| 393 | * If there are fewer regions than queues, all regions are shared | ||
| 394 | * amongst all queues. | ||
| 320 | */ | 395 | */ |
| 321 | struct mutex ksb_mutex ____cacheline_aligned; | 396 | struct mutex sb_mutex ____cacheline_aligned; |
| 322 | DECLARE_BITMAP(ksb, KSB_COUNT); | 397 | DECLARE_BITMAP(sb, KSB_COUNT); |
| 323 | wait_queue_head_t ksb_queue; | 398 | wait_queue_head_t sb_queue; |
| 324 | unsigned int ksb_avail; | 399 | unsigned int sb_avail; |
| 325 | unsigned int ksb_count; | 400 | unsigned int sb_count; |
| 326 | u32 ksb_start; | 401 | u32 sb_start; |
| 402 | |||
| 403 | /* Bitmap of shared LSBs, if any */ | ||
| 404 | DECLARE_BITMAP(lsbmap, SLSB_MAP_SIZE); | ||
| 327 | 405 | ||
| 328 | /* Suspend support */ | 406 | /* Suspend support */ |
| 329 | unsigned int suspending; | 407 | unsigned int suspending; |
| @@ -335,10 +413,11 @@ struct ccp_device { | |||
| 335 | 413 | ||
| 336 | enum ccp_memtype { | 414 | enum ccp_memtype { |
| 337 | CCP_MEMTYPE_SYSTEM = 0, | 415 | CCP_MEMTYPE_SYSTEM = 0, |
| 338 | CCP_MEMTYPE_KSB, | 416 | CCP_MEMTYPE_SB, |
| 339 | CCP_MEMTYPE_LOCAL, | 417 | CCP_MEMTYPE_LOCAL, |
| 340 | CCP_MEMTYPE__LAST, | 418 | CCP_MEMTYPE__LAST, |
| 341 | }; | 419 | }; |
| 420 | #define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB | ||
| 342 | 421 | ||
| 343 | struct ccp_dma_info { | 422 | struct ccp_dma_info { |
| 344 | dma_addr_t address; | 423 | dma_addr_t address; |
| @@ -379,7 +458,7 @@ struct ccp_mem { | |||
| 379 | enum ccp_memtype type; | 458 | enum ccp_memtype type; |
| 380 | union { | 459 | union { |
| 381 | struct ccp_dma_info dma; | 460 | struct ccp_dma_info dma; |
| 382 | u32 ksb; | 461 | u32 sb; |
| 383 | } u; | 462 | } u; |
| 384 | }; | 463 | }; |
| 385 | 464 | ||
| @@ -419,13 +498,14 @@ struct ccp_op { | |||
| 419 | u32 jobid; | 498 | u32 jobid; |
| 420 | u32 ioc; | 499 | u32 ioc; |
| 421 | u32 soc; | 500 | u32 soc; |
| 422 | u32 ksb_key; | 501 | u32 sb_key; |
| 423 | u32 ksb_ctx; | 502 | u32 sb_ctx; |
| 424 | u32 init; | 503 | u32 init; |
| 425 | u32 eom; | 504 | u32 eom; |
| 426 | 505 | ||
| 427 | struct ccp_mem src; | 506 | struct ccp_mem src; |
| 428 | struct ccp_mem dst; | 507 | struct ccp_mem dst; |
| 508 | struct ccp_mem exp; | ||
| 429 | 509 | ||
| 430 | union { | 510 | union { |
| 431 | struct ccp_aes_op aes; | 511 | struct ccp_aes_op aes; |
| @@ -435,6 +515,7 @@ struct ccp_op { | |||
| 435 | struct ccp_passthru_op passthru; | 515 | struct ccp_passthru_op passthru; |
| 436 | struct ccp_ecc_op ecc; | 516 | struct ccp_ecc_op ecc; |
| 437 | } u; | 517 | } u; |
| 518 | struct ccp_mem key; | ||
| 438 | }; | 519 | }; |
| 439 | 520 | ||
| 440 | static inline u32 ccp_addr_lo(struct ccp_dma_info *info) | 521 | static inline u32 ccp_addr_lo(struct ccp_dma_info *info) |
| @@ -447,6 +528,70 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info) | |||
| 447 | return upper_32_bits(info->address + info->offset) & 0x0000ffff; | 528 | return upper_32_bits(info->address + info->offset) & 0x0000ffff; |
| 448 | } | 529 | } |
| 449 | 530 | ||
| 531 | /** | ||
| 532 | * descriptor for version 5 CPP commands | ||
| 533 | * 8 32-bit words: | ||
| 534 | * word 0: function; engine; control bits | ||
| 535 | * word 1: length of source data | ||
| 536 | * word 2: low 32 bits of source pointer | ||
| 537 | * word 3: upper 16 bits of source pointer; source memory type | ||
| 538 | * word 4: low 32 bits of destination pointer | ||
| 539 | * word 5: upper 16 bits of destination pointer; destination memory type | ||
| 540 | * word 6: low 32 bits of key pointer | ||
| 541 | * word 7: upper 16 bits of key pointer; key memory type | ||
| 542 | */ | ||
| 543 | struct dword0 { | ||
| 544 | __le32 soc:1; | ||
| 545 | __le32 ioc:1; | ||
| 546 | __le32 rsvd1:1; | ||
| 547 | __le32 init:1; | ||
| 548 | __le32 eom:1; /* AES/SHA only */ | ||
| 549 | __le32 function:15; | ||
| 550 | __le32 engine:4; | ||
| 551 | __le32 prot:1; | ||
| 552 | __le32 rsvd2:7; | ||
| 553 | }; | ||
| 554 | |||
| 555 | struct dword3 { | ||
| 556 | __le32 src_hi:16; | ||
| 557 | __le32 src_mem:2; | ||
| 558 | __le32 lsb_cxt_id:8; | ||
| 559 | __le32 rsvd1:5; | ||
| 560 | __le32 fixed:1; | ||
| 561 | }; | ||
| 562 | |||
| 563 | union dword4 { | ||
| 564 | __le32 dst_lo; /* NON-SHA */ | ||
| 565 | __le32 sha_len_lo; /* SHA */ | ||
| 566 | }; | ||
| 567 | |||
| 568 | union dword5 { | ||
| 569 | struct { | ||
| 570 | __le32 dst_hi:16; | ||
| 571 | __le32 dst_mem:2; | ||
| 572 | __le32 rsvd1:13; | ||
| 573 | __le32 fixed:1; | ||
| 574 | } fields; | ||
| 575 | __le32 sha_len_hi; | ||
| 576 | }; | ||
| 577 | |||
| 578 | struct dword7 { | ||
| 579 | __le32 key_hi:16; | ||
| 580 | __le32 key_mem:2; | ||
| 581 | __le32 rsvd1:14; | ||
| 582 | }; | ||
| 583 | |||
| 584 | struct ccp5_desc { | ||
| 585 | struct dword0 dw0; | ||
| 586 | __le32 length; | ||
| 587 | __le32 src_lo; | ||
| 588 | struct dword3 dw3; | ||
| 589 | union dword4 dw4; | ||
| 590 | union dword5 dw5; | ||
| 591 | __le32 key_lo; | ||
| 592 | struct dword7 dw7; | ||
| 593 | }; | ||
| 594 | |||
| 450 | int ccp_pci_init(void); | 595 | int ccp_pci_init(void); |
| 451 | void ccp_pci_exit(void); | 596 | void ccp_pci_exit(void); |
| 452 | 597 | ||
| @@ -456,13 +601,48 @@ void ccp_platform_exit(void); | |||
| 456 | void ccp_add_device(struct ccp_device *ccp); | 601 | void ccp_add_device(struct ccp_device *ccp); |
| 457 | void ccp_del_device(struct ccp_device *ccp); | 602 | void ccp_del_device(struct ccp_device *ccp); |
| 458 | 603 | ||
| 604 | extern void ccp_log_error(struct ccp_device *, int); | ||
| 605 | |||
| 459 | struct ccp_device *ccp_alloc_struct(struct device *dev); | 606 | struct ccp_device *ccp_alloc_struct(struct device *dev); |
| 460 | bool ccp_queues_suspended(struct ccp_device *ccp); | 607 | bool ccp_queues_suspended(struct ccp_device *ccp); |
| 461 | int ccp_cmd_queue_thread(void *data); | 608 | int ccp_cmd_queue_thread(void *data); |
| 609 | int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait); | ||
| 462 | 610 | ||
| 463 | int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); | 611 | int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); |
| 464 | 612 | ||
| 613 | int ccp_register_rng(struct ccp_device *ccp); | ||
| 614 | void ccp_unregister_rng(struct ccp_device *ccp); | ||
| 465 | int ccp_dmaengine_register(struct ccp_device *ccp); | 615 | int ccp_dmaengine_register(struct ccp_device *ccp); |
| 466 | void ccp_dmaengine_unregister(struct ccp_device *ccp); | 616 | void ccp_dmaengine_unregister(struct ccp_device *ccp); |
| 467 | 617 | ||
| 618 | /* Structure for computation functions that are device-specific */ | ||
| 619 | struct ccp_actions { | ||
| 620 | int (*aes)(struct ccp_op *); | ||
| 621 | int (*xts_aes)(struct ccp_op *); | ||
| 622 | int (*sha)(struct ccp_op *); | ||
| 623 | int (*rsa)(struct ccp_op *); | ||
| 624 | int (*passthru)(struct ccp_op *); | ||
| 625 | int (*ecc)(struct ccp_op *); | ||
| 626 | u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); | ||
| 627 | void (*sbfree)(struct ccp_cmd_queue *, unsigned int, | ||
| 628 | unsigned int); | ||
| 629 | unsigned int (*get_free_slots)(struct ccp_cmd_queue *); | ||
| 630 | int (*init)(struct ccp_device *); | ||
| 631 | void (*destroy)(struct ccp_device *); | ||
| 632 | irqreturn_t (*irqhandler)(int, void *); | ||
| 633 | }; | ||
| 634 | |||
| 635 | /* Structure to hold CCP version-specific values */ | ||
| 636 | struct ccp_vdata { | ||
| 637 | const unsigned int version; | ||
| 638 | void (*setup)(struct ccp_device *); | ||
| 639 | const struct ccp_actions *perform; | ||
| 640 | const unsigned int bar; | ||
| 641 | const unsigned int offset; | ||
| 642 | }; | ||
| 643 | |||
| 644 | extern const struct ccp_vdata ccpv3; | ||
| 645 | extern const struct ccp_vdata ccpv5a; | ||
| 646 | extern const struct ccp_vdata ccpv5b; | ||
| 647 | |||
| 468 | #endif | 648 | #endif |
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 94f77b0f9ae7..6553912804f7 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c | |||
| @@ -299,12 +299,10 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, | |||
| 299 | { | 299 | { |
| 300 | struct ccp_dma_desc *desc; | 300 | struct ccp_dma_desc *desc; |
| 301 | 301 | ||
| 302 | desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT); | 302 | desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT); |
| 303 | if (!desc) | 303 | if (!desc) |
| 304 | return NULL; | 304 | return NULL; |
| 305 | 305 | ||
| 306 | memset(desc, 0, sizeof(*desc)); | ||
| 307 | |||
| 308 | dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan); | 306 | dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan); |
| 309 | desc->tx_desc.flags = flags; | 307 | desc->tx_desc.flags = flags; |
| 310 | desc->tx_desc.tx_submit = ccp_tx_submit; | 308 | desc->tx_desc.tx_submit = ccp_tx_submit; |
| @@ -650,8 +648,11 @@ int ccp_dmaengine_register(struct ccp_device *ccp) | |||
| 650 | dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, | 648 | dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, |
| 651 | "%s-dmaengine-desc-cache", | 649 | "%s-dmaengine-desc-cache", |
| 652 | ccp->name); | 650 | ccp->name); |
| 653 | if (!dma_cmd_cache_name) | 651 | if (!dma_desc_cache_name) { |
| 654 | return -ENOMEM; | 652 | ret = -ENOMEM; |
| 653 | goto err_cache; | ||
| 654 | } | ||
| 655 | |||
| 655 | ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name, | 656 | ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name, |
| 656 | sizeof(struct ccp_dma_desc), | 657 | sizeof(struct ccp_dma_desc), |
| 657 | sizeof(void *), | 658 | sizeof(void *), |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index ffa2891035ac..50fae4442801 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
| 5 | * | 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * Author: Gary R Hook <gary.hook@amd.com> | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
| @@ -20,72 +21,28 @@ | |||
| 20 | #include "ccp-dev.h" | 21 | #include "ccp-dev.h" |
| 21 | 22 | ||
| 22 | /* SHA initial context values */ | 23 | /* SHA initial context values */ |
| 23 | static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 24 | static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { |
| 24 | cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), | 25 | cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), |
| 25 | cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), | 26 | cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), |
| 26 | cpu_to_be32(SHA1_H4), 0, 0, 0, | 27 | cpu_to_be32(SHA1_H4), |
| 27 | }; | 28 | }; |
| 28 | 29 | ||
| 29 | static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 30 | static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { |
| 30 | cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), | 31 | cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), |
| 31 | cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), | 32 | cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), |
| 32 | cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), | 33 | cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), |
| 33 | cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), | 34 | cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), |
| 34 | }; | 35 | }; |
| 35 | 36 | ||
| 36 | static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 37 | static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { |
| 37 | cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), | 38 | cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), |
| 38 | cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), | 39 | cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), |
| 39 | cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), | 40 | cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), |
| 40 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), | 41 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), |
| 41 | }; | 42 | }; |
| 42 | 43 | ||
| 43 | static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count) | 44 | #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ |
| 44 | { | 45 | ccp_gen_jobid(ccp) : 0) |
| 45 | int start; | ||
| 46 | |||
| 47 | for (;;) { | ||
| 48 | mutex_lock(&ccp->ksb_mutex); | ||
| 49 | |||
| 50 | start = (u32)bitmap_find_next_zero_area(ccp->ksb, | ||
| 51 | ccp->ksb_count, | ||
| 52 | ccp->ksb_start, | ||
| 53 | count, 0); | ||
| 54 | if (start <= ccp->ksb_count) { | ||
| 55 | bitmap_set(ccp->ksb, start, count); | ||
| 56 | |||
| 57 | mutex_unlock(&ccp->ksb_mutex); | ||
| 58 | break; | ||
| 59 | } | ||
| 60 | |||
| 61 | ccp->ksb_avail = 0; | ||
| 62 | |||
| 63 | mutex_unlock(&ccp->ksb_mutex); | ||
| 64 | |||
| 65 | /* Wait for KSB entries to become available */ | ||
| 66 | if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail)) | ||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | return KSB_START + start; | ||
| 71 | } | ||
| 72 | |||
| 73 | static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start, | ||
| 74 | unsigned int count) | ||
| 75 | { | ||
| 76 | if (!start) | ||
| 77 | return; | ||
| 78 | |||
| 79 | mutex_lock(&ccp->ksb_mutex); | ||
| 80 | |||
| 81 | bitmap_clear(ccp->ksb, start - KSB_START, count); | ||
| 82 | |||
| 83 | ccp->ksb_avail = 1; | ||
| 84 | |||
| 85 | mutex_unlock(&ccp->ksb_mutex); | ||
| 86 | |||
| 87 | wake_up_interruptible_all(&ccp->ksb_queue); | ||
| 88 | } | ||
| 89 | 46 | ||
| 90 | static u32 ccp_gen_jobid(struct ccp_device *ccp) | 47 | static u32 ccp_gen_jobid(struct ccp_device *ccp) |
| 91 | { | 48 | { |
| @@ -231,7 +188,7 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, | |||
| 231 | unsigned int len, unsigned int se_len, | 188 | unsigned int len, unsigned int se_len, |
| 232 | bool sign_extend) | 189 | bool sign_extend) |
| 233 | { | 190 | { |
| 234 | unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; | 191 | unsigned int nbytes, sg_offset, dm_offset, sb_len, i; |
| 235 | u8 buffer[CCP_REVERSE_BUF_SIZE]; | 192 | u8 buffer[CCP_REVERSE_BUF_SIZE]; |
| 236 | 193 | ||
| 237 | if (WARN_ON(se_len > sizeof(buffer))) | 194 | if (WARN_ON(se_len > sizeof(buffer))) |
| @@ -241,21 +198,21 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, | |||
| 241 | dm_offset = 0; | 198 | dm_offset = 0; |
| 242 | nbytes = len; | 199 | nbytes = len; |
| 243 | while (nbytes) { | 200 | while (nbytes) { |
| 244 | ksb_len = min_t(unsigned int, nbytes, se_len); | 201 | sb_len = min_t(unsigned int, nbytes, se_len); |
| 245 | sg_offset -= ksb_len; | 202 | sg_offset -= sb_len; |
| 246 | 203 | ||
| 247 | scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0); | 204 | scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0); |
| 248 | for (i = 0; i < ksb_len; i++) | 205 | for (i = 0; i < sb_len; i++) |
| 249 | wa->address[dm_offset + i] = buffer[ksb_len - i - 1]; | 206 | wa->address[dm_offset + i] = buffer[sb_len - i - 1]; |
| 250 | 207 | ||
| 251 | dm_offset += ksb_len; | 208 | dm_offset += sb_len; |
| 252 | nbytes -= ksb_len; | 209 | nbytes -= sb_len; |
| 253 | 210 | ||
| 254 | if ((ksb_len != se_len) && sign_extend) { | 211 | if ((sb_len != se_len) && sign_extend) { |
| 255 | /* Must sign-extend to nearest sign-extend length */ | 212 | /* Must sign-extend to nearest sign-extend length */ |
| 256 | if (wa->address[dm_offset - 1] & 0x80) | 213 | if (wa->address[dm_offset - 1] & 0x80) |
| 257 | memset(wa->address + dm_offset, 0xff, | 214 | memset(wa->address + dm_offset, 0xff, |
| 258 | se_len - ksb_len); | 215 | se_len - sb_len); |
| 259 | } | 216 | } |
| 260 | } | 217 | } |
| 261 | 218 | ||
| @@ -266,22 +223,22 @@ static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, | |||
| 266 | struct scatterlist *sg, | 223 | struct scatterlist *sg, |
| 267 | unsigned int len) | 224 | unsigned int len) |
| 268 | { | 225 | { |
| 269 | unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; | 226 | unsigned int nbytes, sg_offset, dm_offset, sb_len, i; |
| 270 | u8 buffer[CCP_REVERSE_BUF_SIZE]; | 227 | u8 buffer[CCP_REVERSE_BUF_SIZE]; |
| 271 | 228 | ||
| 272 | sg_offset = 0; | 229 | sg_offset = 0; |
| 273 | dm_offset = len; | 230 | dm_offset = len; |
| 274 | nbytes = len; | 231 | nbytes = len; |
| 275 | while (nbytes) { | 232 | while (nbytes) { |
| 276 | ksb_len = min_t(unsigned int, nbytes, sizeof(buffer)); | 233 | sb_len = min_t(unsigned int, nbytes, sizeof(buffer)); |
| 277 | dm_offset -= ksb_len; | 234 | dm_offset -= sb_len; |
| 278 | 235 | ||
| 279 | for (i = 0; i < ksb_len; i++) | 236 | for (i = 0; i < sb_len; i++) |
| 280 | buffer[ksb_len - i - 1] = wa->address[dm_offset + i]; | 237 | buffer[sb_len - i - 1] = wa->address[dm_offset + i]; |
| 281 | scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1); | 238 | scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1); |
| 282 | 239 | ||
| 283 | sg_offset += ksb_len; | 240 | sg_offset += sb_len; |
| 284 | nbytes -= ksb_len; | 241 | nbytes -= sb_len; |
| 285 | } | 242 | } |
| 286 | } | 243 | } |
| 287 | 244 | ||
| @@ -449,9 +406,9 @@ static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, | |||
| 449 | } | 406 | } |
| 450 | } | 407 | } |
| 451 | 408 | ||
| 452 | static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, | 409 | static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, |
| 453 | struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, | 410 | struct ccp_dm_workarea *wa, u32 jobid, u32 sb, |
| 454 | u32 byte_swap, bool from) | 411 | u32 byte_swap, bool from) |
| 455 | { | 412 | { |
| 456 | struct ccp_op op; | 413 | struct ccp_op op; |
| 457 | 414 | ||
| @@ -463,8 +420,8 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, | |||
| 463 | 420 | ||
| 464 | if (from) { | 421 | if (from) { |
| 465 | op.soc = 1; | 422 | op.soc = 1; |
| 466 | op.src.type = CCP_MEMTYPE_KSB; | 423 | op.src.type = CCP_MEMTYPE_SB; |
| 467 | op.src.u.ksb = ksb; | 424 | op.src.u.sb = sb; |
| 468 | op.dst.type = CCP_MEMTYPE_SYSTEM; | 425 | op.dst.type = CCP_MEMTYPE_SYSTEM; |
| 469 | op.dst.u.dma.address = wa->dma.address; | 426 | op.dst.u.dma.address = wa->dma.address; |
| 470 | op.dst.u.dma.length = wa->length; | 427 | op.dst.u.dma.length = wa->length; |
| @@ -472,27 +429,27 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, | |||
| 472 | op.src.type = CCP_MEMTYPE_SYSTEM; | 429 | op.src.type = CCP_MEMTYPE_SYSTEM; |
| 473 | op.src.u.dma.address = wa->dma.address; | 430 | op.src.u.dma.address = wa->dma.address; |
| 474 | op.src.u.dma.length = wa->length; | 431 | op.src.u.dma.length = wa->length; |
| 475 | op.dst.type = CCP_MEMTYPE_KSB; | 432 | op.dst.type = CCP_MEMTYPE_SB; |
| 476 | op.dst.u.ksb = ksb; | 433 | op.dst.u.sb = sb; |
| 477 | } | 434 | } |
| 478 | 435 | ||
| 479 | op.u.passthru.byte_swap = byte_swap; | 436 | op.u.passthru.byte_swap = byte_swap; |
| 480 | 437 | ||
| 481 | return cmd_q->ccp->vdata->perform->perform_passthru(&op); | 438 | return cmd_q->ccp->vdata->perform->passthru(&op); |
| 482 | } | 439 | } |
| 483 | 440 | ||
| 484 | static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q, | 441 | static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q, |
| 485 | struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, | 442 | struct ccp_dm_workarea *wa, u32 jobid, u32 sb, |
| 486 | u32 byte_swap) | 443 | u32 byte_swap) |
| 487 | { | 444 | { |
| 488 | return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false); | 445 | return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false); |
| 489 | } | 446 | } |
| 490 | 447 | ||
| 491 | static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q, | 448 | static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, |
| 492 | struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, | 449 | struct ccp_dm_workarea *wa, u32 jobid, u32 sb, |
| 493 | u32 byte_swap) | 450 | u32 byte_swap) |
| 494 | { | 451 | { |
| 495 | return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true); | 452 | return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); |
| 496 | } | 453 | } |
| 497 | 454 | ||
| 498 | static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, | 455 | static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, |
| @@ -527,54 +484,54 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 527 | return -EINVAL; | 484 | return -EINVAL; |
| 528 | } | 485 | } |
| 529 | 486 | ||
| 530 | BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); | 487 | BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); |
| 531 | BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); | 488 | BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); |
| 532 | 489 | ||
| 533 | ret = -EIO; | 490 | ret = -EIO; |
| 534 | memset(&op, 0, sizeof(op)); | 491 | memset(&op, 0, sizeof(op)); |
| 535 | op.cmd_q = cmd_q; | 492 | op.cmd_q = cmd_q; |
| 536 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 493 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
| 537 | op.ksb_key = cmd_q->ksb_key; | 494 | op.sb_key = cmd_q->sb_key; |
| 538 | op.ksb_ctx = cmd_q->ksb_ctx; | 495 | op.sb_ctx = cmd_q->sb_ctx; |
| 539 | op.init = 1; | 496 | op.init = 1; |
| 540 | op.u.aes.type = aes->type; | 497 | op.u.aes.type = aes->type; |
| 541 | op.u.aes.mode = aes->mode; | 498 | op.u.aes.mode = aes->mode; |
| 542 | op.u.aes.action = aes->action; | 499 | op.u.aes.action = aes->action; |
| 543 | 500 | ||
| 544 | /* All supported key sizes fit in a single (32-byte) KSB entry | 501 | /* All supported key sizes fit in a single (32-byte) SB entry |
| 545 | * and must be in little endian format. Use the 256-bit byte | 502 | * and must be in little endian format. Use the 256-bit byte |
| 546 | * swap passthru option to convert from big endian to little | 503 | * swap passthru option to convert from big endian to little |
| 547 | * endian. | 504 | * endian. |
| 548 | */ | 505 | */ |
| 549 | ret = ccp_init_dm_workarea(&key, cmd_q, | 506 | ret = ccp_init_dm_workarea(&key, cmd_q, |
| 550 | CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, | 507 | CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, |
| 551 | DMA_TO_DEVICE); | 508 | DMA_TO_DEVICE); |
| 552 | if (ret) | 509 | if (ret) |
| 553 | return ret; | 510 | return ret; |
| 554 | 511 | ||
| 555 | dm_offset = CCP_KSB_BYTES - aes->key_len; | 512 | dm_offset = CCP_SB_BYTES - aes->key_len; |
| 556 | ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); | 513 | ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); |
| 557 | ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, | 514 | ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, |
| 558 | CCP_PASSTHRU_BYTESWAP_256BIT); | 515 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 559 | if (ret) { | 516 | if (ret) { |
| 560 | cmd->engine_error = cmd_q->cmd_error; | 517 | cmd->engine_error = cmd_q->cmd_error; |
| 561 | goto e_key; | 518 | goto e_key; |
| 562 | } | 519 | } |
| 563 | 520 | ||
| 564 | /* The AES context fits in a single (32-byte) KSB entry and | 521 | /* The AES context fits in a single (32-byte) SB entry and |
| 565 | * must be in little endian format. Use the 256-bit byte swap | 522 | * must be in little endian format. Use the 256-bit byte swap |
| 566 | * passthru option to convert from big endian to little endian. | 523 | * passthru option to convert from big endian to little endian. |
| 567 | */ | 524 | */ |
| 568 | ret = ccp_init_dm_workarea(&ctx, cmd_q, | 525 | ret = ccp_init_dm_workarea(&ctx, cmd_q, |
| 569 | CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, | 526 | CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, |
| 570 | DMA_BIDIRECTIONAL); | 527 | DMA_BIDIRECTIONAL); |
| 571 | if (ret) | 528 | if (ret) |
| 572 | goto e_key; | 529 | goto e_key; |
| 573 | 530 | ||
| 574 | dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; | 531 | dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; |
| 575 | ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); | 532 | ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); |
| 576 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 533 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 577 | CCP_PASSTHRU_BYTESWAP_256BIT); | 534 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 578 | if (ret) { | 535 | if (ret) { |
| 579 | cmd->engine_error = cmd_q->cmd_error; | 536 | cmd->engine_error = cmd_q->cmd_error; |
| 580 | goto e_ctx; | 537 | goto e_ctx; |
| @@ -592,9 +549,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 592 | op.eom = 1; | 549 | op.eom = 1; |
| 593 | 550 | ||
| 594 | /* Push the K1/K2 key to the CCP now */ | 551 | /* Push the K1/K2 key to the CCP now */ |
| 595 | ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, | 552 | ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, |
| 596 | op.ksb_ctx, | 553 | op.sb_ctx, |
| 597 | CCP_PASSTHRU_BYTESWAP_256BIT); | 554 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 598 | if (ret) { | 555 | if (ret) { |
| 599 | cmd->engine_error = cmd_q->cmd_error; | 556 | cmd->engine_error = cmd_q->cmd_error; |
| 600 | goto e_src; | 557 | goto e_src; |
| @@ -602,15 +559,15 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 602 | 559 | ||
| 603 | ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, | 560 | ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, |
| 604 | aes->cmac_key_len); | 561 | aes->cmac_key_len); |
| 605 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 562 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 606 | CCP_PASSTHRU_BYTESWAP_256BIT); | 563 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 607 | if (ret) { | 564 | if (ret) { |
| 608 | cmd->engine_error = cmd_q->cmd_error; | 565 | cmd->engine_error = cmd_q->cmd_error; |
| 609 | goto e_src; | 566 | goto e_src; |
| 610 | } | 567 | } |
| 611 | } | 568 | } |
| 612 | 569 | ||
| 613 | ret = cmd_q->ccp->vdata->perform->perform_aes(&op); | 570 | ret = cmd_q->ccp->vdata->perform->aes(&op); |
| 614 | if (ret) { | 571 | if (ret) { |
| 615 | cmd->engine_error = cmd_q->cmd_error; | 572 | cmd->engine_error = cmd_q->cmd_error; |
| 616 | goto e_src; | 573 | goto e_src; |
| @@ -622,15 +579,15 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 622 | /* Retrieve the AES context - convert from LE to BE using | 579 | /* Retrieve the AES context - convert from LE to BE using |
| 623 | * 32-byte (256-bit) byteswapping | 580 | * 32-byte (256-bit) byteswapping |
| 624 | */ | 581 | */ |
| 625 | ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 582 | ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 626 | CCP_PASSTHRU_BYTESWAP_256BIT); | 583 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 627 | if (ret) { | 584 | if (ret) { |
| 628 | cmd->engine_error = cmd_q->cmd_error; | 585 | cmd->engine_error = cmd_q->cmd_error; |
| 629 | goto e_src; | 586 | goto e_src; |
| 630 | } | 587 | } |
| 631 | 588 | ||
| 632 | /* ...but we only need AES_BLOCK_SIZE bytes */ | 589 | /* ...but we only need AES_BLOCK_SIZE bytes */ |
| 633 | dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; | 590 | dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; |
| 634 | ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); | 591 | ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); |
| 635 | 592 | ||
| 636 | e_src: | 593 | e_src: |
| @@ -680,56 +637,56 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 680 | return -EINVAL; | 637 | return -EINVAL; |
| 681 | } | 638 | } |
| 682 | 639 | ||
| 683 | BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); | 640 | BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); |
| 684 | BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); | 641 | BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); |
| 685 | 642 | ||
| 686 | ret = -EIO; | 643 | ret = -EIO; |
| 687 | memset(&op, 0, sizeof(op)); | 644 | memset(&op, 0, sizeof(op)); |
| 688 | op.cmd_q = cmd_q; | 645 | op.cmd_q = cmd_q; |
| 689 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 646 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
| 690 | op.ksb_key = cmd_q->ksb_key; | 647 | op.sb_key = cmd_q->sb_key; |
| 691 | op.ksb_ctx = cmd_q->ksb_ctx; | 648 | op.sb_ctx = cmd_q->sb_ctx; |
| 692 | op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; | 649 | op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; |
| 693 | op.u.aes.type = aes->type; | 650 | op.u.aes.type = aes->type; |
| 694 | op.u.aes.mode = aes->mode; | 651 | op.u.aes.mode = aes->mode; |
| 695 | op.u.aes.action = aes->action; | 652 | op.u.aes.action = aes->action; |
| 696 | 653 | ||
| 697 | /* All supported key sizes fit in a single (32-byte) KSB entry | 654 | /* All supported key sizes fit in a single (32-byte) SB entry |
| 698 | * and must be in little endian format. Use the 256-bit byte | 655 | * and must be in little endian format. Use the 256-bit byte |
| 699 | * swap passthru option to convert from big endian to little | 656 | * swap passthru option to convert from big endian to little |
| 700 | * endian. | 657 | * endian. |
| 701 | */ | 658 | */ |
| 702 | ret = ccp_init_dm_workarea(&key, cmd_q, | 659 | ret = ccp_init_dm_workarea(&key, cmd_q, |
| 703 | CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, | 660 | CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, |
| 704 | DMA_TO_DEVICE); | 661 | DMA_TO_DEVICE); |
| 705 | if (ret) | 662 | if (ret) |
| 706 | return ret; | 663 | return ret; |
| 707 | 664 | ||
| 708 | dm_offset = CCP_KSB_BYTES - aes->key_len; | 665 | dm_offset = CCP_SB_BYTES - aes->key_len; |
| 709 | ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); | 666 | ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); |
| 710 | ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, | 667 | ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, |
| 711 | CCP_PASSTHRU_BYTESWAP_256BIT); | 668 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 712 | if (ret) { | 669 | if (ret) { |
| 713 | cmd->engine_error = cmd_q->cmd_error; | 670 | cmd->engine_error = cmd_q->cmd_error; |
| 714 | goto e_key; | 671 | goto e_key; |
| 715 | } | 672 | } |
| 716 | 673 | ||
| 717 | /* The AES context fits in a single (32-byte) KSB entry and | 674 | /* The AES context fits in a single (32-byte) SB entry and |
| 718 | * must be in little endian format. Use the 256-bit byte swap | 675 | * must be in little endian format. Use the 256-bit byte swap |
| 719 | * passthru option to convert from big endian to little endian. | 676 | * passthru option to convert from big endian to little endian. |
| 720 | */ | 677 | */ |
| 721 | ret = ccp_init_dm_workarea(&ctx, cmd_q, | 678 | ret = ccp_init_dm_workarea(&ctx, cmd_q, |
| 722 | CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, | 679 | CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, |
| 723 | DMA_BIDIRECTIONAL); | 680 | DMA_BIDIRECTIONAL); |
| 724 | if (ret) | 681 | if (ret) |
| 725 | goto e_key; | 682 | goto e_key; |
| 726 | 683 | ||
| 727 | if (aes->mode != CCP_AES_MODE_ECB) { | 684 | if (aes->mode != CCP_AES_MODE_ECB) { |
| 728 | /* Load the AES context - conver to LE */ | 685 | /* Load the AES context - convert to LE */ |
| 729 | dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; | 686 | dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; |
| 730 | ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); | 687 | ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); |
| 731 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 688 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 732 | CCP_PASSTHRU_BYTESWAP_256BIT); | 689 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 733 | if (ret) { | 690 | if (ret) { |
| 734 | cmd->engine_error = cmd_q->cmd_error; | 691 | cmd->engine_error = cmd_q->cmd_error; |
| 735 | goto e_ctx; | 692 | goto e_ctx; |
| @@ -772,7 +729,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 772 | op.soc = 1; | 729 | op.soc = 1; |
| 773 | } | 730 | } |
| 774 | 731 | ||
| 775 | ret = cmd_q->ccp->vdata->perform->perform_aes(&op); | 732 | ret = cmd_q->ccp->vdata->perform->aes(&op); |
| 776 | if (ret) { | 733 | if (ret) { |
| 777 | cmd->engine_error = cmd_q->cmd_error; | 734 | cmd->engine_error = cmd_q->cmd_error; |
| 778 | goto e_dst; | 735 | goto e_dst; |
| @@ -785,15 +742,15 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 785 | /* Retrieve the AES context - convert from LE to BE using | 742 | /* Retrieve the AES context - convert from LE to BE using |
| 786 | * 32-byte (256-bit) byteswapping | 743 | * 32-byte (256-bit) byteswapping |
| 787 | */ | 744 | */ |
| 788 | ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 745 | ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 789 | CCP_PASSTHRU_BYTESWAP_256BIT); | 746 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 790 | if (ret) { | 747 | if (ret) { |
| 791 | cmd->engine_error = cmd_q->cmd_error; | 748 | cmd->engine_error = cmd_q->cmd_error; |
| 792 | goto e_dst; | 749 | goto e_dst; |
| 793 | } | 750 | } |
| 794 | 751 | ||
| 795 | /* ...but we only need AES_BLOCK_SIZE bytes */ | 752 | /* ...but we only need AES_BLOCK_SIZE bytes */ |
| 796 | dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; | 753 | dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; |
| 797 | ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); | 754 | ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); |
| 798 | } | 755 | } |
| 799 | 756 | ||
| @@ -857,53 +814,53 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 857 | if (!xts->key || !xts->iv || !xts->src || !xts->dst) | 814 | if (!xts->key || !xts->iv || !xts->src || !xts->dst) |
| 858 | return -EINVAL; | 815 | return -EINVAL; |
| 859 | 816 | ||
| 860 | BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1); | 817 | BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); |
| 861 | BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1); | 818 | BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); |
| 862 | 819 | ||
| 863 | ret = -EIO; | 820 | ret = -EIO; |
| 864 | memset(&op, 0, sizeof(op)); | 821 | memset(&op, 0, sizeof(op)); |
| 865 | op.cmd_q = cmd_q; | 822 | op.cmd_q = cmd_q; |
| 866 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 823 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
| 867 | op.ksb_key = cmd_q->ksb_key; | 824 | op.sb_key = cmd_q->sb_key; |
| 868 | op.ksb_ctx = cmd_q->ksb_ctx; | 825 | op.sb_ctx = cmd_q->sb_ctx; |
| 869 | op.init = 1; | 826 | op.init = 1; |
| 870 | op.u.xts.action = xts->action; | 827 | op.u.xts.action = xts->action; |
| 871 | op.u.xts.unit_size = xts->unit_size; | 828 | op.u.xts.unit_size = xts->unit_size; |
| 872 | 829 | ||
| 873 | /* All supported key sizes fit in a single (32-byte) KSB entry | 830 | /* All supported key sizes fit in a single (32-byte) SB entry |
| 874 | * and must be in little endian format. Use the 256-bit byte | 831 | * and must be in little endian format. Use the 256-bit byte |
| 875 | * swap passthru option to convert from big endian to little | 832 | * swap passthru option to convert from big endian to little |
| 876 | * endian. | 833 | * endian. |
| 877 | */ | 834 | */ |
| 878 | ret = ccp_init_dm_workarea(&key, cmd_q, | 835 | ret = ccp_init_dm_workarea(&key, cmd_q, |
| 879 | CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, | 836 | CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES, |
| 880 | DMA_TO_DEVICE); | 837 | DMA_TO_DEVICE); |
| 881 | if (ret) | 838 | if (ret) |
| 882 | return ret; | 839 | return ret; |
| 883 | 840 | ||
| 884 | dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128; | 841 | dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; |
| 885 | ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); | 842 | ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); |
| 886 | ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); | 843 | ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); |
| 887 | ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, | 844 | ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, |
| 888 | CCP_PASSTHRU_BYTESWAP_256BIT); | 845 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 889 | if (ret) { | 846 | if (ret) { |
| 890 | cmd->engine_error = cmd_q->cmd_error; | 847 | cmd->engine_error = cmd_q->cmd_error; |
| 891 | goto e_key; | 848 | goto e_key; |
| 892 | } | 849 | } |
| 893 | 850 | ||
| 894 | /* The AES context fits in a single (32-byte) KSB entry and | 851 | /* The AES context fits in a single (32-byte) SB entry and |
| 895 | * for XTS is already in little endian format so no byte swapping | 852 | * for XTS is already in little endian format so no byte swapping |
| 896 | * is needed. | 853 | * is needed. |
| 897 | */ | 854 | */ |
| 898 | ret = ccp_init_dm_workarea(&ctx, cmd_q, | 855 | ret = ccp_init_dm_workarea(&ctx, cmd_q, |
| 899 | CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, | 856 | CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES, |
| 900 | DMA_BIDIRECTIONAL); | 857 | DMA_BIDIRECTIONAL); |
| 901 | if (ret) | 858 | if (ret) |
| 902 | goto e_key; | 859 | goto e_key; |
| 903 | 860 | ||
| 904 | ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); | 861 | ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); |
| 905 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 862 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 906 | CCP_PASSTHRU_BYTESWAP_NOOP); | 863 | CCP_PASSTHRU_BYTESWAP_NOOP); |
| 907 | if (ret) { | 864 | if (ret) { |
| 908 | cmd->engine_error = cmd_q->cmd_error; | 865 | cmd->engine_error = cmd_q->cmd_error; |
| 909 | goto e_ctx; | 866 | goto e_ctx; |
| @@ -937,7 +894,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 937 | if (!src.sg_wa.bytes_left) | 894 | if (!src.sg_wa.bytes_left) |
| 938 | op.eom = 1; | 895 | op.eom = 1; |
| 939 | 896 | ||
| 940 | ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op); | 897 | ret = cmd_q->ccp->vdata->perform->xts_aes(&op); |
| 941 | if (ret) { | 898 | if (ret) { |
| 942 | cmd->engine_error = cmd_q->cmd_error; | 899 | cmd->engine_error = cmd_q->cmd_error; |
| 943 | goto e_dst; | 900 | goto e_dst; |
| @@ -949,15 +906,15 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 949 | /* Retrieve the AES context - convert from LE to BE using | 906 | /* Retrieve the AES context - convert from LE to BE using |
| 950 | * 32-byte (256-bit) byteswapping | 907 | * 32-byte (256-bit) byteswapping |
| 951 | */ | 908 | */ |
| 952 | ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 909 | ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 953 | CCP_PASSTHRU_BYTESWAP_256BIT); | 910 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 954 | if (ret) { | 911 | if (ret) { |
| 955 | cmd->engine_error = cmd_q->cmd_error; | 912 | cmd->engine_error = cmd_q->cmd_error; |
| 956 | goto e_dst; | 913 | goto e_dst; |
| 957 | } | 914 | } |
| 958 | 915 | ||
| 959 | /* ...but we only need AES_BLOCK_SIZE bytes */ | 916 | /* ...but we only need AES_BLOCK_SIZE bytes */ |
| 960 | dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; | 917 | dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; |
| 961 | ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); | 918 | ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); |
| 962 | 919 | ||
| 963 | e_dst: | 920 | e_dst: |
| @@ -982,163 +939,227 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 982 | struct ccp_dm_workarea ctx; | 939 | struct ccp_dm_workarea ctx; |
| 983 | struct ccp_data src; | 940 | struct ccp_data src; |
| 984 | struct ccp_op op; | 941 | struct ccp_op op; |
| 942 | unsigned int ioffset, ooffset; | ||
| 943 | unsigned int digest_size; | ||
| 944 | int sb_count; | ||
| 945 | const void *init; | ||
| 946 | u64 block_size; | ||
| 947 | int ctx_size; | ||
| 985 | int ret; | 948 | int ret; |
| 986 | 949 | ||
| 987 | if (sha->ctx_len != CCP_SHA_CTXSIZE) | 950 | switch (sha->type) { |
| 951 | case CCP_SHA_TYPE_1: | ||
| 952 | if (sha->ctx_len < SHA1_DIGEST_SIZE) | ||
| 953 | return -EINVAL; | ||
| 954 | block_size = SHA1_BLOCK_SIZE; | ||
| 955 | break; | ||
| 956 | case CCP_SHA_TYPE_224: | ||
| 957 | if (sha->ctx_len < SHA224_DIGEST_SIZE) | ||
| 958 | return -EINVAL; | ||
| 959 | block_size = SHA224_BLOCK_SIZE; | ||
| 960 | break; | ||
| 961 | case CCP_SHA_TYPE_256: | ||
| 962 | if (sha->ctx_len < SHA256_DIGEST_SIZE) | ||
| 963 | return -EINVAL; | ||
| 964 | block_size = SHA256_BLOCK_SIZE; | ||
| 965 | break; | ||
| 966 | default: | ||
| 988 | return -EINVAL; | 967 | return -EINVAL; |
| 968 | } | ||
| 989 | 969 | ||
| 990 | if (!sha->ctx) | 970 | if (!sha->ctx) |
| 991 | return -EINVAL; | 971 | return -EINVAL; |
| 992 | 972 | ||
| 993 | if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1))) | 973 | if (!sha->final && (sha->src_len & (block_size - 1))) |
| 994 | return -EINVAL; | 974 | return -EINVAL; |
| 995 | 975 | ||
| 996 | if (!sha->src_len) { | 976 | /* The version 3 device can't handle zero-length input */ |
| 997 | const u8 *sha_zero; | 977 | if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { |
| 998 | 978 | ||
| 999 | /* Not final, just return */ | 979 | if (!sha->src_len) { |
| 1000 | if (!sha->final) | 980 | unsigned int digest_len; |
| 1001 | return 0; | 981 | const u8 *sha_zero; |
| 1002 | 982 | ||
| 1003 | /* CCP can't do a zero length sha operation so the caller | 983 | /* Not final, just return */ |
| 1004 | * must buffer the data. | 984 | if (!sha->final) |
| 1005 | */ | 985 | return 0; |
| 1006 | if (sha->msg_bits) | ||
| 1007 | return -EINVAL; | ||
| 1008 | 986 | ||
| 1009 | /* The CCP cannot perform zero-length sha operations so the | 987 | /* CCP can't do a zero length sha operation so the |
| 1010 | * caller is required to buffer data for the final operation. | 988 | * caller must buffer the data. |
| 1011 | * However, a sha operation for a message with a total length | 989 | */ |
| 1012 | * of zero is valid so known values are required to supply | 990 | if (sha->msg_bits) |
| 1013 | * the result. | 991 | return -EINVAL; |
| 1014 | */ | 992 | |
| 1015 | switch (sha->type) { | 993 | /* The CCP cannot perform zero-length sha operations |
| 1016 | case CCP_SHA_TYPE_1: | 994 | * so the caller is required to buffer data for the |
| 1017 | sha_zero = sha1_zero_message_hash; | 995 | * final operation. However, a sha operation for a |
| 1018 | break; | 996 | * message with a total length of zero is valid so |
| 1019 | case CCP_SHA_TYPE_224: | 997 | * known values are required to supply the result. |
| 1020 | sha_zero = sha224_zero_message_hash; | 998 | */ |
| 1021 | break; | 999 | switch (sha->type) { |
| 1022 | case CCP_SHA_TYPE_256: | 1000 | case CCP_SHA_TYPE_1: |
| 1023 | sha_zero = sha256_zero_message_hash; | 1001 | sha_zero = sha1_zero_message_hash; |
| 1024 | break; | 1002 | digest_len = SHA1_DIGEST_SIZE; |
| 1025 | default: | 1003 | break; |
| 1026 | return -EINVAL; | 1004 | case CCP_SHA_TYPE_224: |
| 1027 | } | 1005 | sha_zero = sha224_zero_message_hash; |
| 1006 | digest_len = SHA224_DIGEST_SIZE; | ||
| 1007 | break; | ||
| 1008 | case CCP_SHA_TYPE_256: | ||
| 1009 | sha_zero = sha256_zero_message_hash; | ||
| 1010 | digest_len = SHA256_DIGEST_SIZE; | ||
| 1011 | break; | ||
| 1012 | default: | ||
| 1013 | return -EINVAL; | ||
| 1014 | } | ||
| 1028 | 1015 | ||
| 1029 | scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, | 1016 | scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, |
| 1030 | sha->ctx_len, 1); | 1017 | digest_len, 1); |
| 1031 | 1018 | ||
| 1032 | return 0; | 1019 | return 0; |
| 1020 | } | ||
| 1033 | } | 1021 | } |
| 1034 | 1022 | ||
| 1035 | if (!sha->src) | 1023 | /* Set variables used throughout */ |
| 1036 | return -EINVAL; | 1024 | switch (sha->type) { |
| 1025 | case CCP_SHA_TYPE_1: | ||
| 1026 | digest_size = SHA1_DIGEST_SIZE; | ||
| 1027 | init = (void *) ccp_sha1_init; | ||
| 1028 | ctx_size = SHA1_DIGEST_SIZE; | ||
| 1029 | sb_count = 1; | ||
| 1030 | if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) | ||
| 1031 | ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; | ||
| 1032 | else | ||
| 1033 | ooffset = ioffset = 0; | ||
| 1034 | break; | ||
| 1035 | case CCP_SHA_TYPE_224: | ||
| 1036 | digest_size = SHA224_DIGEST_SIZE; | ||
| 1037 | init = (void *) ccp_sha224_init; | ||
| 1038 | ctx_size = SHA256_DIGEST_SIZE; | ||
| 1039 | sb_count = 1; | ||
| 1040 | ioffset = 0; | ||
| 1041 | if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) | ||
| 1042 | ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; | ||
| 1043 | else | ||
| 1044 | ooffset = 0; | ||
| 1045 | break; | ||
| 1046 | case CCP_SHA_TYPE_256: | ||
| 1047 | digest_size = SHA256_DIGEST_SIZE; | ||
| 1048 | init = (void *) ccp_sha256_init; | ||
| 1049 | ctx_size = SHA256_DIGEST_SIZE; | ||
| 1050 | sb_count = 1; | ||
| 1051 | ooffset = ioffset = 0; | ||
| 1052 | break; | ||
| 1053 | default: | ||
| 1054 | ret = -EINVAL; | ||
| 1055 | goto e_data; | ||
| 1056 | } | ||
| 1037 | 1057 | ||
| 1038 | BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1); | 1058 | /* For zero-length plaintext the src pointer is ignored; |
| 1059 | * otherwise both parts must be valid | ||
| 1060 | */ | ||
| 1061 | if (sha->src_len && !sha->src) | ||
| 1062 | return -EINVAL; | ||
| 1039 | 1063 | ||
| 1040 | memset(&op, 0, sizeof(op)); | 1064 | memset(&op, 0, sizeof(op)); |
| 1041 | op.cmd_q = cmd_q; | 1065 | op.cmd_q = cmd_q; |
| 1042 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1066 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
| 1043 | op.ksb_ctx = cmd_q->ksb_ctx; | 1067 | op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ |
| 1044 | op.u.sha.type = sha->type; | 1068 | op.u.sha.type = sha->type; |
| 1045 | op.u.sha.msg_bits = sha->msg_bits; | 1069 | op.u.sha.msg_bits = sha->msg_bits; |
| 1046 | 1070 | ||
| 1047 | /* The SHA context fits in a single (32-byte) KSB entry and | 1071 | ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, |
| 1048 | * must be in little endian format. Use the 256-bit byte swap | ||
| 1049 | * passthru option to convert from big endian to little endian. | ||
| 1050 | */ | ||
| 1051 | ret = ccp_init_dm_workarea(&ctx, cmd_q, | ||
| 1052 | CCP_SHA_KSB_COUNT * CCP_KSB_BYTES, | ||
| 1053 | DMA_BIDIRECTIONAL); | 1072 | DMA_BIDIRECTIONAL); |
| 1054 | if (ret) | 1073 | if (ret) |
| 1055 | return ret; | 1074 | return ret; |
| 1056 | |||
| 1057 | if (sha->first) { | 1075 | if (sha->first) { |
| 1058 | const __be32 *init; | ||
| 1059 | |||
| 1060 | switch (sha->type) { | 1076 | switch (sha->type) { |
| 1061 | case CCP_SHA_TYPE_1: | 1077 | case CCP_SHA_TYPE_1: |
| 1062 | init = ccp_sha1_init; | ||
| 1063 | break; | ||
| 1064 | case CCP_SHA_TYPE_224: | 1078 | case CCP_SHA_TYPE_224: |
| 1065 | init = ccp_sha224_init; | ||
| 1066 | break; | ||
| 1067 | case CCP_SHA_TYPE_256: | 1079 | case CCP_SHA_TYPE_256: |
| 1068 | init = ccp_sha256_init; | 1080 | memcpy(ctx.address + ioffset, init, ctx_size); |
| 1069 | break; | 1081 | break; |
| 1070 | default: | 1082 | default: |
| 1071 | ret = -EINVAL; | 1083 | ret = -EINVAL; |
| 1072 | goto e_ctx; | 1084 | goto e_ctx; |
| 1073 | } | 1085 | } |
| 1074 | memcpy(ctx.address, init, CCP_SHA_CTXSIZE); | ||
| 1075 | } else { | 1086 | } else { |
| 1076 | ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | 1087 | /* Restore the context */ |
| 1088 | ccp_set_dm_area(&ctx, 0, sha->ctx, 0, | ||
| 1089 | sb_count * CCP_SB_BYTES); | ||
| 1077 | } | 1090 | } |
| 1078 | 1091 | ||
| 1079 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 1092 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 1080 | CCP_PASSTHRU_BYTESWAP_256BIT); | 1093 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 1081 | if (ret) { | 1094 | if (ret) { |
| 1082 | cmd->engine_error = cmd_q->cmd_error; | 1095 | cmd->engine_error = cmd_q->cmd_error; |
| 1083 | goto e_ctx; | 1096 | goto e_ctx; |
| 1084 | } | 1097 | } |
| 1085 | 1098 | ||
| 1086 | /* Send data to the CCP SHA engine */ | 1099 | if (sha->src) { |
| 1087 | ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, | 1100 | /* Send data to the CCP SHA engine; block_size is set above */ |
| 1088 | CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE); | 1101 | ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, |
| 1089 | if (ret) | 1102 | block_size, DMA_TO_DEVICE); |
| 1090 | goto e_ctx; | 1103 | if (ret) |
| 1104 | goto e_ctx; | ||
| 1091 | 1105 | ||
| 1092 | while (src.sg_wa.bytes_left) { | 1106 | while (src.sg_wa.bytes_left) { |
| 1093 | ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false); | 1107 | ccp_prepare_data(&src, NULL, &op, block_size, false); |
| 1094 | if (sha->final && !src.sg_wa.bytes_left) | 1108 | if (sha->final && !src.sg_wa.bytes_left) |
| 1095 | op.eom = 1; | 1109 | op.eom = 1; |
| 1110 | |||
| 1111 | ret = cmd_q->ccp->vdata->perform->sha(&op); | ||
| 1112 | if (ret) { | ||
| 1113 | cmd->engine_error = cmd_q->cmd_error; | ||
| 1114 | goto e_data; | ||
| 1115 | } | ||
| 1096 | 1116 | ||
| 1097 | ret = cmd_q->ccp->vdata->perform->perform_sha(&op); | 1117 | ccp_process_data(&src, NULL, &op); |
| 1118 | } | ||
| 1119 | } else { | ||
| 1120 | op.eom = 1; | ||
| 1121 | ret = cmd_q->ccp->vdata->perform->sha(&op); | ||
| 1098 | if (ret) { | 1122 | if (ret) { |
| 1099 | cmd->engine_error = cmd_q->cmd_error; | 1123 | cmd->engine_error = cmd_q->cmd_error; |
| 1100 | goto e_data; | 1124 | goto e_data; |
| 1101 | } | 1125 | } |
| 1102 | |||
| 1103 | ccp_process_data(&src, NULL, &op); | ||
| 1104 | } | 1126 | } |
| 1105 | 1127 | ||
| 1106 | /* Retrieve the SHA context - convert from LE to BE using | 1128 | /* Retrieve the SHA context - convert from LE to BE using |
| 1107 | * 32-byte (256-bit) byteswapping to BE | 1129 | * 32-byte (256-bit) byteswapping to BE |
| 1108 | */ | 1130 | */ |
| 1109 | ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 1131 | ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
| 1110 | CCP_PASSTHRU_BYTESWAP_256BIT); | 1132 | CCP_PASSTHRU_BYTESWAP_256BIT); |
| 1111 | if (ret) { | 1133 | if (ret) { |
| 1112 | cmd->engine_error = cmd_q->cmd_error; | 1134 | cmd->engine_error = cmd_q->cmd_error; |
| 1113 | goto e_data; | 1135 | goto e_data; |
| 1114 | } | 1136 | } |
| 1115 | 1137 | ||
| 1116 | ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | 1138 | if (sha->final) { |
| 1117 | 1139 | /* Finishing up, so get the digest */ | |
| 1118 | if (sha->final && sha->opad) { | ||
| 1119 | /* HMAC operation, recursively perform final SHA */ | ||
| 1120 | struct ccp_cmd hmac_cmd; | ||
| 1121 | struct scatterlist sg; | ||
| 1122 | u64 block_size, digest_size; | ||
| 1123 | u8 *hmac_buf; | ||
| 1124 | |||
| 1125 | switch (sha->type) { | 1140 | switch (sha->type) { |
| 1126 | case CCP_SHA_TYPE_1: | 1141 | case CCP_SHA_TYPE_1: |
| 1127 | block_size = SHA1_BLOCK_SIZE; | ||
| 1128 | digest_size = SHA1_DIGEST_SIZE; | ||
| 1129 | break; | ||
| 1130 | case CCP_SHA_TYPE_224: | 1142 | case CCP_SHA_TYPE_224: |
| 1131 | block_size = SHA224_BLOCK_SIZE; | ||
| 1132 | digest_size = SHA224_DIGEST_SIZE; | ||
| 1133 | break; | ||
| 1134 | case CCP_SHA_TYPE_256: | 1143 | case CCP_SHA_TYPE_256: |
| 1135 | block_size = SHA256_BLOCK_SIZE; | 1144 | ccp_get_dm_area(&ctx, ooffset, |
| 1136 | digest_size = SHA256_DIGEST_SIZE; | 1145 | sha->ctx, 0, |
| 1146 | digest_size); | ||
| 1137 | break; | 1147 | break; |
| 1138 | default: | 1148 | default: |
| 1139 | ret = -EINVAL; | 1149 | ret = -EINVAL; |
| 1140 | goto e_data; | 1150 | goto e_ctx; |
| 1141 | } | 1151 | } |
| 1152 | } else { | ||
| 1153 | /* Stash the context */ | ||
| 1154 | ccp_get_dm_area(&ctx, 0, sha->ctx, 0, | ||
| 1155 | sb_count * CCP_SB_BYTES); | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | if (sha->final && sha->opad) { | ||
| 1159 | /* HMAC operation, recursively perform final SHA */ | ||
| 1160 | struct ccp_cmd hmac_cmd; | ||
| 1161 | struct scatterlist sg; | ||
| 1162 | u8 *hmac_buf; | ||
| 1142 | 1163 | ||
| 1143 | if (sha->opad_len != block_size) { | 1164 | if (sha->opad_len != block_size) { |
| 1144 | ret = -EINVAL; | 1165 | ret = -EINVAL; |
| @@ -1153,7 +1174,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1153 | sg_init_one(&sg, hmac_buf, block_size + digest_size); | 1174 | sg_init_one(&sg, hmac_buf, block_size + digest_size); |
| 1154 | 1175 | ||
| 1155 | scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); | 1176 | scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); |
| 1156 | memcpy(hmac_buf + block_size, ctx.address, digest_size); | 1177 | switch (sha->type) { |
| 1178 | case CCP_SHA_TYPE_1: | ||
| 1179 | case CCP_SHA_TYPE_224: | ||
| 1180 | case CCP_SHA_TYPE_256: | ||
| 1181 | memcpy(hmac_buf + block_size, | ||
| 1182 | ctx.address + ooffset, | ||
| 1183 | digest_size); | ||
| 1184 | break; | ||
| 1185 | default: | ||
| 1186 | ret = -EINVAL; | ||
| 1187 | goto e_ctx; | ||
| 1188 | } | ||
| 1157 | 1189 | ||
| 1158 | memset(&hmac_cmd, 0, sizeof(hmac_cmd)); | 1190 | memset(&hmac_cmd, 0, sizeof(hmac_cmd)); |
| 1159 | hmac_cmd.engine = CCP_ENGINE_SHA; | 1191 | hmac_cmd.engine = CCP_ENGINE_SHA; |
| @@ -1176,7 +1208,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1176 | } | 1208 | } |
| 1177 | 1209 | ||
| 1178 | e_data: | 1210 | e_data: |
| 1179 | ccp_free_data(&src, cmd_q); | 1211 | if (sha->src) |
| 1212 | ccp_free_data(&src, cmd_q); | ||
| 1180 | 1213 | ||
| 1181 | e_ctx: | 1214 | e_ctx: |
| 1182 | ccp_dm_free(&ctx); | 1215 | ccp_dm_free(&ctx); |
| @@ -1190,7 +1223,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1190 | struct ccp_dm_workarea exp, src; | 1223 | struct ccp_dm_workarea exp, src; |
| 1191 | struct ccp_data dst; | 1224 | struct ccp_data dst; |
| 1192 | struct ccp_op op; | 1225 | struct ccp_op op; |
| 1193 | unsigned int ksb_count, i_len, o_len; | 1226 | unsigned int sb_count, i_len, o_len; |
| 1194 | int ret; | 1227 | int ret; |
| 1195 | 1228 | ||
| 1196 | if (rsa->key_size > CCP_RSA_MAX_WIDTH) | 1229 | if (rsa->key_size > CCP_RSA_MAX_WIDTH) |
| @@ -1208,16 +1241,17 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1208 | o_len = ((rsa->key_size + 255) / 256) * 32; | 1241 | o_len = ((rsa->key_size + 255) / 256) * 32; |
| 1209 | i_len = o_len * 2; | 1242 | i_len = o_len * 2; |
| 1210 | 1243 | ||
| 1211 | ksb_count = o_len / CCP_KSB_BYTES; | 1244 | sb_count = o_len / CCP_SB_BYTES; |
| 1212 | 1245 | ||
| 1213 | memset(&op, 0, sizeof(op)); | 1246 | memset(&op, 0, sizeof(op)); |
| 1214 | op.cmd_q = cmd_q; | 1247 | op.cmd_q = cmd_q; |
| 1215 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1248 | op.jobid = ccp_gen_jobid(cmd_q->ccp); |
| 1216 | op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count); | 1249 | op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count); |
| 1217 | if (!op.ksb_key) | 1250 | |
| 1251 | if (!op.sb_key) | ||
| 1218 | return -EIO; | 1252 | return -EIO; |
| 1219 | 1253 | ||
| 1220 | /* The RSA exponent may span multiple (32-byte) KSB entries and must | 1254 | /* The RSA exponent may span multiple (32-byte) SB entries and must |
| 1221 | * be in little endian format. Reverse copy each 32-byte chunk | 1255 | * be in little endian format. Reverse copy each 32-byte chunk |
| 1222 | * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk) | 1256 | * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk) |
| 1223 | * and each byte within that chunk and do not perform any byte swap | 1257 | * and each byte within that chunk and do not perform any byte swap |
| @@ -1225,14 +1259,14 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1225 | */ | 1259 | */ |
| 1226 | ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); | 1260 | ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); |
| 1227 | if (ret) | 1261 | if (ret) |
| 1228 | goto e_ksb; | 1262 | goto e_sb; |
| 1229 | 1263 | ||
| 1230 | ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, | 1264 | ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, |
| 1231 | CCP_KSB_BYTES, false); | 1265 | CCP_SB_BYTES, false); |
| 1232 | if (ret) | 1266 | if (ret) |
| 1233 | goto e_exp; | 1267 | goto e_exp; |
| 1234 | ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, | 1268 | ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, |
| 1235 | CCP_PASSTHRU_BYTESWAP_NOOP); | 1269 | CCP_PASSTHRU_BYTESWAP_NOOP); |
| 1236 | if (ret) { | 1270 | if (ret) { |
| 1237 | cmd->engine_error = cmd_q->cmd_error; | 1271 | cmd->engine_error = cmd_q->cmd_error; |
| 1238 | goto e_exp; | 1272 | goto e_exp; |
| @@ -1247,12 +1281,12 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1247 | goto e_exp; | 1281 | goto e_exp; |
| 1248 | 1282 | ||
| 1249 | ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, | 1283 | ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, |
| 1250 | CCP_KSB_BYTES, false); | 1284 | CCP_SB_BYTES, false); |
| 1251 | if (ret) | 1285 | if (ret) |
| 1252 | goto e_src; | 1286 | goto e_src; |
| 1253 | src.address += o_len; /* Adjust the address for the copy operation */ | 1287 | src.address += o_len; /* Adjust the address for the copy operation */ |
| 1254 | ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, | 1288 | ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, |
| 1255 | CCP_KSB_BYTES, false); | 1289 | CCP_SB_BYTES, false); |
| 1256 | if (ret) | 1290 | if (ret) |
| 1257 | goto e_src; | 1291 | goto e_src; |
| 1258 | src.address -= o_len; /* Reset the address to original value */ | 1292 | src.address -= o_len; /* Reset the address to original value */ |
| @@ -1274,7 +1308,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1274 | op.u.rsa.mod_size = rsa->key_size; | 1308 | op.u.rsa.mod_size = rsa->key_size; |
| 1275 | op.u.rsa.input_len = i_len; | 1309 | op.u.rsa.input_len = i_len; |
| 1276 | 1310 | ||
| 1277 | ret = cmd_q->ccp->vdata->perform->perform_rsa(&op); | 1311 | ret = cmd_q->ccp->vdata->perform->rsa(&op); |
| 1278 | if (ret) { | 1312 | if (ret) { |
| 1279 | cmd->engine_error = cmd_q->cmd_error; | 1313 | cmd->engine_error = cmd_q->cmd_error; |
| 1280 | goto e_dst; | 1314 | goto e_dst; |
| @@ -1291,8 +1325,8 @@ e_src: | |||
| 1291 | e_exp: | 1325 | e_exp: |
| 1292 | ccp_dm_free(&exp); | 1326 | ccp_dm_free(&exp); |
| 1293 | 1327 | ||
| 1294 | e_ksb: | 1328 | e_sb: |
| 1295 | ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count); | 1329 | cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); |
| 1296 | 1330 | ||
| 1297 | return ret; | 1331 | return ret; |
| 1298 | } | 1332 | } |
| @@ -1306,7 +1340,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 1306 | struct ccp_op op; | 1340 | struct ccp_op op; |
| 1307 | bool in_place = false; | 1341 | bool in_place = false; |
| 1308 | unsigned int i; | 1342 | unsigned int i; |
| 1309 | int ret; | 1343 | int ret = 0; |
| 1310 | 1344 | ||
| 1311 | if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) | 1345 | if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) |
| 1312 | return -EINVAL; | 1346 | return -EINVAL; |
| @@ -1321,26 +1355,26 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 1321 | return -EINVAL; | 1355 | return -EINVAL; |
| 1322 | } | 1356 | } |
| 1323 | 1357 | ||
| 1324 | BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); | 1358 | BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); |
| 1325 | 1359 | ||
| 1326 | memset(&op, 0, sizeof(op)); | 1360 | memset(&op, 0, sizeof(op)); |
| 1327 | op.cmd_q = cmd_q; | 1361 | op.cmd_q = cmd_q; |
| 1328 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1362 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
| 1329 | 1363 | ||
| 1330 | if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { | 1364 | if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { |
| 1331 | /* Load the mask */ | 1365 | /* Load the mask */ |
| 1332 | op.ksb_key = cmd_q->ksb_key; | 1366 | op.sb_key = cmd_q->sb_key; |
| 1333 | 1367 | ||
| 1334 | ret = ccp_init_dm_workarea(&mask, cmd_q, | 1368 | ret = ccp_init_dm_workarea(&mask, cmd_q, |
| 1335 | CCP_PASSTHRU_KSB_COUNT * | 1369 | CCP_PASSTHRU_SB_COUNT * |
| 1336 | CCP_KSB_BYTES, | 1370 | CCP_SB_BYTES, |
| 1337 | DMA_TO_DEVICE); | 1371 | DMA_TO_DEVICE); |
| 1338 | if (ret) | 1372 | if (ret) |
| 1339 | return ret; | 1373 | return ret; |
| 1340 | 1374 | ||
| 1341 | ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); | 1375 | ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); |
| 1342 | ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, | 1376 | ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, |
| 1343 | CCP_PASSTHRU_BYTESWAP_NOOP); | 1377 | CCP_PASSTHRU_BYTESWAP_NOOP); |
| 1344 | if (ret) { | 1378 | if (ret) { |
| 1345 | cmd->engine_error = cmd_q->cmd_error; | 1379 | cmd->engine_error = cmd_q->cmd_error; |
| 1346 | goto e_mask; | 1380 | goto e_mask; |
| @@ -1399,7 +1433,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 1399 | op.dst.u.dma.offset = dst.sg_wa.sg_used; | 1433 | op.dst.u.dma.offset = dst.sg_wa.sg_used; |
| 1400 | op.dst.u.dma.length = op.src.u.dma.length; | 1434 | op.dst.u.dma.length = op.src.u.dma.length; |
| 1401 | 1435 | ||
| 1402 | ret = cmd_q->ccp->vdata->perform->perform_passthru(&op); | 1436 | ret = cmd_q->ccp->vdata->perform->passthru(&op); |
| 1403 | if (ret) { | 1437 | if (ret) { |
| 1404 | cmd->engine_error = cmd_q->cmd_error; | 1438 | cmd->engine_error = cmd_q->cmd_error; |
| 1405 | goto e_dst; | 1439 | goto e_dst; |
| @@ -1448,7 +1482,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 1448 | return -EINVAL; | 1482 | return -EINVAL; |
| 1449 | } | 1483 | } |
| 1450 | 1484 | ||
| 1451 | BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); | 1485 | BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); |
| 1452 | 1486 | ||
| 1453 | memset(&op, 0, sizeof(op)); | 1487 | memset(&op, 0, sizeof(op)); |
| 1454 | op.cmd_q = cmd_q; | 1488 | op.cmd_q = cmd_q; |
| @@ -1456,13 +1490,13 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 1456 | 1490 | ||
| 1457 | if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { | 1491 | if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { |
| 1458 | /* Load the mask */ | 1492 | /* Load the mask */ |
| 1459 | op.ksb_key = cmd_q->ksb_key; | 1493 | op.sb_key = cmd_q->sb_key; |
| 1460 | 1494 | ||
| 1461 | mask.length = pt->mask_len; | 1495 | mask.length = pt->mask_len; |
| 1462 | mask.dma.address = pt->mask; | 1496 | mask.dma.address = pt->mask; |
| 1463 | mask.dma.length = pt->mask_len; | 1497 | mask.dma.length = pt->mask_len; |
| 1464 | 1498 | ||
| 1465 | ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, | 1499 | ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, |
| 1466 | CCP_PASSTHRU_BYTESWAP_NOOP); | 1500 | CCP_PASSTHRU_BYTESWAP_NOOP); |
| 1467 | if (ret) { | 1501 | if (ret) { |
| 1468 | cmd->engine_error = cmd_q->cmd_error; | 1502 | cmd->engine_error = cmd_q->cmd_error; |
| @@ -1484,7 +1518,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, | |||
| 1484 | op.dst.u.dma.offset = 0; | 1518 | op.dst.u.dma.offset = 0; |
| 1485 | op.dst.u.dma.length = pt->src_len; | 1519 | op.dst.u.dma.length = pt->src_len; |
| 1486 | 1520 | ||
| 1487 | ret = cmd_q->ccp->vdata->perform->perform_passthru(&op); | 1521 | ret = cmd_q->ccp->vdata->perform->passthru(&op); |
| 1488 | if (ret) | 1522 | if (ret) |
| 1489 | cmd->engine_error = cmd_q->cmd_error; | 1523 | cmd->engine_error = cmd_q->cmd_error; |
| 1490 | 1524 | ||
| @@ -1514,7 +1548,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1514 | 1548 | ||
| 1515 | memset(&op, 0, sizeof(op)); | 1549 | memset(&op, 0, sizeof(op)); |
| 1516 | op.cmd_q = cmd_q; | 1550 | op.cmd_q = cmd_q; |
| 1517 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1551 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
| 1518 | 1552 | ||
| 1519 | /* Concatenate the modulus and the operands. Both the modulus and | 1553 | /* Concatenate the modulus and the operands. Both the modulus and |
| 1520 | * the operands must be in little endian format. Since the input | 1554 | * the operands must be in little endian format. Since the input |
| @@ -1575,7 +1609,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1575 | 1609 | ||
| 1576 | op.u.ecc.function = cmd->u.ecc.function; | 1610 | op.u.ecc.function = cmd->u.ecc.function; |
| 1577 | 1611 | ||
| 1578 | ret = cmd_q->ccp->vdata->perform->perform_ecc(&op); | 1612 | ret = cmd_q->ccp->vdata->perform->ecc(&op); |
| 1579 | if (ret) { | 1613 | if (ret) { |
| 1580 | cmd->engine_error = cmd_q->cmd_error; | 1614 | cmd->engine_error = cmd_q->cmd_error; |
| 1581 | goto e_dst; | 1615 | goto e_dst; |
| @@ -1639,7 +1673,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1639 | 1673 | ||
| 1640 | memset(&op, 0, sizeof(op)); | 1674 | memset(&op, 0, sizeof(op)); |
| 1641 | op.cmd_q = cmd_q; | 1675 | op.cmd_q = cmd_q; |
| 1642 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1676 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
| 1643 | 1677 | ||
| 1644 | /* Concatenate the modulus and the operands. Both the modulus and | 1678 | /* Concatenate the modulus and the operands. Both the modulus and |
| 1645 | * the operands must be in little endian format. Since the input | 1679 | * the operands must be in little endian format. Since the input |
| @@ -1677,7 +1711,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1677 | goto e_src; | 1711 | goto e_src; |
| 1678 | src.address += CCP_ECC_OPERAND_SIZE; | 1712 | src.address += CCP_ECC_OPERAND_SIZE; |
| 1679 | 1713 | ||
| 1680 | /* Set the first point Z coordianate to 1 */ | 1714 | /* Set the first point Z coordinate to 1 */ |
| 1681 | *src.address = 0x01; | 1715 | *src.address = 0x01; |
| 1682 | src.address += CCP_ECC_OPERAND_SIZE; | 1716 | src.address += CCP_ECC_OPERAND_SIZE; |
| 1683 | 1717 | ||
| @@ -1696,7 +1730,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1696 | goto e_src; | 1730 | goto e_src; |
| 1697 | src.address += CCP_ECC_OPERAND_SIZE; | 1731 | src.address += CCP_ECC_OPERAND_SIZE; |
| 1698 | 1732 | ||
| 1699 | /* Set the second point Z coordianate to 1 */ | 1733 | /* Set the second point Z coordinate to 1 */ |
| 1700 | *src.address = 0x01; | 1734 | *src.address = 0x01; |
| 1701 | src.address += CCP_ECC_OPERAND_SIZE; | 1735 | src.address += CCP_ECC_OPERAND_SIZE; |
| 1702 | } else { | 1736 | } else { |
| @@ -1739,7 +1773,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1739 | 1773 | ||
| 1740 | op.u.ecc.function = cmd->u.ecc.function; | 1774 | op.u.ecc.function = cmd->u.ecc.function; |
| 1741 | 1775 | ||
| 1742 | ret = cmd_q->ccp->vdata->perform->perform_ecc(&op); | 1776 | ret = cmd_q->ccp->vdata->perform->ecc(&op); |
| 1743 | if (ret) { | 1777 | if (ret) { |
| 1744 | cmd->engine_error = cmd_q->cmd_error; | 1778 | cmd->engine_error = cmd_q->cmd_error; |
| 1745 | goto e_dst; | 1779 | goto e_dst; |
| @@ -1810,7 +1844,7 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
| 1810 | cmd->engine_error = 0; | 1844 | cmd->engine_error = 0; |
| 1811 | cmd_q->cmd_error = 0; | 1845 | cmd_q->cmd_error = 0; |
| 1812 | cmd_q->int_rcvd = 0; | 1846 | cmd_q->int_rcvd = 0; |
| 1813 | cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); | 1847 | cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); |
| 1814 | 1848 | ||
| 1815 | switch (cmd->engine) { | 1849 | switch (cmd->engine) { |
| 1816 | case CCP_ENGINE_AES: | 1850 | case CCP_ENGINE_AES: |
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 0bf262e36b6b..28a9996c1085 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
| 5 | * | 5 | * |
| 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
| 7 | * Author: Gary R Hook <gary.hook@amd.com> | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
| @@ -25,9 +26,6 @@ | |||
| 25 | 26 | ||
| 26 | #include "ccp-dev.h" | 27 | #include "ccp-dev.h" |
| 27 | 28 | ||
| 28 | #define IO_BAR 2 | ||
| 29 | #define IO_OFFSET 0x20000 | ||
| 30 | |||
| 31 | #define MSIX_VECTORS 2 | 29 | #define MSIX_VECTORS 2 |
| 32 | 30 | ||
| 33 | struct ccp_msix { | 31 | struct ccp_msix { |
| @@ -143,10 +141,11 @@ static void ccp_free_irqs(struct ccp_device *ccp) | |||
| 143 | free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, | 141 | free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, |
| 144 | dev); | 142 | dev); |
| 145 | pci_disable_msix(pdev); | 143 | pci_disable_msix(pdev); |
| 146 | } else { | 144 | } else if (ccp->irq) { |
| 147 | free_irq(ccp->irq, dev); | 145 | free_irq(ccp->irq, dev); |
| 148 | pci_disable_msi(pdev); | 146 | pci_disable_msi(pdev); |
| 149 | } | 147 | } |
| 148 | ccp->irq = 0; | ||
| 150 | } | 149 | } |
| 151 | 150 | ||
| 152 | static int ccp_find_mmio_area(struct ccp_device *ccp) | 151 | static int ccp_find_mmio_area(struct ccp_device *ccp) |
| @@ -156,10 +155,11 @@ static int ccp_find_mmio_area(struct ccp_device *ccp) | |||
| 156 | resource_size_t io_len; | 155 | resource_size_t io_len; |
| 157 | unsigned long io_flags; | 156 | unsigned long io_flags; |
| 158 | 157 | ||
| 159 | io_flags = pci_resource_flags(pdev, IO_BAR); | 158 | io_flags = pci_resource_flags(pdev, ccp->vdata->bar); |
| 160 | io_len = pci_resource_len(pdev, IO_BAR); | 159 | io_len = pci_resource_len(pdev, ccp->vdata->bar); |
| 161 | if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) | 160 | if ((io_flags & IORESOURCE_MEM) && |
| 162 | return IO_BAR; | 161 | (io_len >= (ccp->vdata->offset + 0x800))) |
| 162 | return ccp->vdata->bar; | ||
| 163 | 163 | ||
| 164 | return -EIO; | 164 | return -EIO; |
| 165 | } | 165 | } |
| @@ -216,7 +216,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 216 | dev_err(dev, "pci_iomap failed\n"); | 216 | dev_err(dev, "pci_iomap failed\n"); |
| 217 | goto e_device; | 217 | goto e_device; |
| 218 | } | 218 | } |
| 219 | ccp->io_regs = ccp->io_map + IO_OFFSET; | 219 | ccp->io_regs = ccp->io_map + ccp->vdata->offset; |
| 220 | 220 | ||
| 221 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); | 221 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); |
| 222 | if (ret) { | 222 | if (ret) { |
| @@ -230,6 +230,9 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 230 | 230 | ||
| 231 | dev_set_drvdata(dev, ccp); | 231 | dev_set_drvdata(dev, ccp); |
| 232 | 232 | ||
| 233 | if (ccp->vdata->setup) | ||
| 234 | ccp->vdata->setup(ccp); | ||
| 235 | |||
| 233 | ret = ccp->vdata->perform->init(ccp); | 236 | ret = ccp->vdata->perform->init(ccp); |
| 234 | if (ret) | 237 | if (ret) |
| 235 | goto e_iomap; | 238 | goto e_iomap; |
| @@ -322,6 +325,8 @@ static int ccp_pci_resume(struct pci_dev *pdev) | |||
| 322 | 325 | ||
| 323 | static const struct pci_device_id ccp_pci_table[] = { | 326 | static const struct pci_device_id ccp_pci_table[] = { |
| 324 | { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, | 327 | { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, |
| 328 | { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a }, | ||
| 329 | { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b }, | ||
| 325 | /* Last entry must be zero */ | 330 | /* Last entry must be zero */ |
| 326 | { 0, } | 331 | { 0, } |
| 327 | }; | 332 | }; |
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index eee2c7e6c299..e09d4055b19e 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
| @@ -636,20 +636,12 @@ struct hifn_request_context { | |||
| 636 | 636 | ||
| 637 | static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg) | 637 | static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg) |
| 638 | { | 638 | { |
| 639 | u32 ret; | 639 | return readl(dev->bar[0] + reg); |
| 640 | |||
| 641 | ret = readl(dev->bar[0] + reg); | ||
| 642 | |||
| 643 | return ret; | ||
| 644 | } | 640 | } |
| 645 | 641 | ||
| 646 | static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg) | 642 | static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg) |
| 647 | { | 643 | { |
| 648 | u32 ret; | 644 | return readl(dev->bar[1] + reg); |
| 649 | |||
| 650 | ret = readl(dev->bar[1] + reg); | ||
| 651 | |||
| 652 | return ret; | ||
| 653 | } | 645 | } |
| 654 | 646 | ||
| 655 | static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) | 647 | static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) |
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 68e8aa90fe01..a2e77b87485b 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c | |||
| @@ -71,6 +71,7 @@ | |||
| 71 | #define DRIVER_FLAGS_MD5 BIT(21) | 71 | #define DRIVER_FLAGS_MD5 BIT(21) |
| 72 | 72 | ||
| 73 | #define IMG_HASH_QUEUE_LENGTH 20 | 73 | #define IMG_HASH_QUEUE_LENGTH 20 |
| 74 | #define IMG_HASH_DMA_BURST 4 | ||
| 74 | #define IMG_HASH_DMA_THRESHOLD 64 | 75 | #define IMG_HASH_DMA_THRESHOLD 64 |
| 75 | 76 | ||
| 76 | #ifdef __LITTLE_ENDIAN | 77 | #ifdef __LITTLE_ENDIAN |
| @@ -102,8 +103,10 @@ struct img_hash_request_ctx { | |||
| 102 | unsigned long op; | 103 | unsigned long op; |
| 103 | 104 | ||
| 104 | size_t bufcnt; | 105 | size_t bufcnt; |
| 105 | u8 buffer[0] __aligned(sizeof(u32)); | ||
| 106 | struct ahash_request fallback_req; | 106 | struct ahash_request fallback_req; |
| 107 | |||
| 108 | /* Zero length buffer must remain last member of struct */ | ||
| 109 | u8 buffer[0] __aligned(sizeof(u32)); | ||
| 107 | }; | 110 | }; |
| 108 | 111 | ||
| 109 | struct img_hash_ctx { | 112 | struct img_hash_ctx { |
| @@ -340,7 +343,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) | |||
| 340 | dma_conf.direction = DMA_MEM_TO_DEV; | 343 | dma_conf.direction = DMA_MEM_TO_DEV; |
| 341 | dma_conf.dst_addr = hdev->bus_addr; | 344 | dma_conf.dst_addr = hdev->bus_addr; |
| 342 | dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 345 | dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
| 343 | dma_conf.dst_maxburst = 16; | 346 | dma_conf.dst_maxburst = IMG_HASH_DMA_BURST; |
| 344 | dma_conf.device_fc = false; | 347 | dma_conf.device_fc = false; |
| 345 | 348 | ||
| 346 | err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); | 349 | err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); |
| @@ -361,7 +364,7 @@ static void img_hash_dma_task(unsigned long d) | |||
| 361 | size_t nbytes, bleft, wsend, len, tbc; | 364 | size_t nbytes, bleft, wsend, len, tbc; |
| 362 | struct scatterlist tsg; | 365 | struct scatterlist tsg; |
| 363 | 366 | ||
| 364 | if (!ctx->sg) | 367 | if (!hdev->req || !ctx->sg) |
| 365 | return; | 368 | return; |
| 366 | 369 | ||
| 367 | addr = sg_virt(ctx->sg); | 370 | addr = sg_virt(ctx->sg); |
| @@ -587,6 +590,32 @@ static int img_hash_finup(struct ahash_request *req) | |||
| 587 | return crypto_ahash_finup(&rctx->fallback_req); | 590 | return crypto_ahash_finup(&rctx->fallback_req); |
| 588 | } | 591 | } |
| 589 | 592 | ||
| 593 | static int img_hash_import(struct ahash_request *req, const void *in) | ||
| 594 | { | ||
| 595 | struct img_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
| 596 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
| 597 | struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
| 598 | |||
| 599 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); | ||
| 600 | rctx->fallback_req.base.flags = req->base.flags | ||
| 601 | & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 602 | |||
| 603 | return crypto_ahash_import(&rctx->fallback_req, in); | ||
| 604 | } | ||
| 605 | |||
| 606 | static int img_hash_export(struct ahash_request *req, void *out) | ||
| 607 | { | ||
| 608 | struct img_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
| 609 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
| 610 | struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
| 611 | |||
| 612 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); | ||
| 613 | rctx->fallback_req.base.flags = req->base.flags | ||
| 614 | & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 615 | |||
| 616 | return crypto_ahash_export(&rctx->fallback_req, out); | ||
| 617 | } | ||
| 618 | |||
| 590 | static int img_hash_digest(struct ahash_request *req) | 619 | static int img_hash_digest(struct ahash_request *req) |
| 591 | { | 620 | { |
| 592 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 621 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| @@ -643,10 +672,9 @@ static int img_hash_digest(struct ahash_request *req) | |||
| 643 | return err; | 672 | return err; |
| 644 | } | 673 | } |
| 645 | 674 | ||
| 646 | static int img_hash_cra_init(struct crypto_tfm *tfm) | 675 | static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name) |
| 647 | { | 676 | { |
| 648 | struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 677 | struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
| 649 | const char *alg_name = crypto_tfm_alg_name(tfm); | ||
| 650 | int err = -ENOMEM; | 678 | int err = -ENOMEM; |
| 651 | 679 | ||
| 652 | ctx->fallback = crypto_alloc_ahash(alg_name, 0, | 680 | ctx->fallback = crypto_alloc_ahash(alg_name, 0, |
| @@ -658,6 +686,7 @@ static int img_hash_cra_init(struct crypto_tfm *tfm) | |||
| 658 | } | 686 | } |
| 659 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 687 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 660 | sizeof(struct img_hash_request_ctx) + | 688 | sizeof(struct img_hash_request_ctx) + |
| 689 | crypto_ahash_reqsize(ctx->fallback) + | ||
| 661 | IMG_HASH_DMA_THRESHOLD); | 690 | IMG_HASH_DMA_THRESHOLD); |
| 662 | 691 | ||
| 663 | return 0; | 692 | return 0; |
| @@ -666,6 +695,26 @@ err: | |||
| 666 | return err; | 695 | return err; |
| 667 | } | 696 | } |
| 668 | 697 | ||
| 698 | static int img_hash_cra_md5_init(struct crypto_tfm *tfm) | ||
| 699 | { | ||
| 700 | return img_hash_cra_init(tfm, "md5-generic"); | ||
| 701 | } | ||
| 702 | |||
| 703 | static int img_hash_cra_sha1_init(struct crypto_tfm *tfm) | ||
| 704 | { | ||
| 705 | return img_hash_cra_init(tfm, "sha1-generic"); | ||
| 706 | } | ||
| 707 | |||
| 708 | static int img_hash_cra_sha224_init(struct crypto_tfm *tfm) | ||
| 709 | { | ||
| 710 | return img_hash_cra_init(tfm, "sha224-generic"); | ||
| 711 | } | ||
| 712 | |||
| 713 | static int img_hash_cra_sha256_init(struct crypto_tfm *tfm) | ||
| 714 | { | ||
| 715 | return img_hash_cra_init(tfm, "sha256-generic"); | ||
| 716 | } | ||
| 717 | |||
| 669 | static void img_hash_cra_exit(struct crypto_tfm *tfm) | 718 | static void img_hash_cra_exit(struct crypto_tfm *tfm) |
| 670 | { | 719 | { |
| 671 | struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm); | 720 | struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm); |
| @@ -711,9 +760,12 @@ static struct ahash_alg img_algs[] = { | |||
| 711 | .update = img_hash_update, | 760 | .update = img_hash_update, |
| 712 | .final = img_hash_final, | 761 | .final = img_hash_final, |
| 713 | .finup = img_hash_finup, | 762 | .finup = img_hash_finup, |
| 763 | .export = img_hash_export, | ||
| 764 | .import = img_hash_import, | ||
| 714 | .digest = img_hash_digest, | 765 | .digest = img_hash_digest, |
| 715 | .halg = { | 766 | .halg = { |
| 716 | .digestsize = MD5_DIGEST_SIZE, | 767 | .digestsize = MD5_DIGEST_SIZE, |
| 768 | .statesize = sizeof(struct md5_state), | ||
| 717 | .base = { | 769 | .base = { |
| 718 | .cra_name = "md5", | 770 | .cra_name = "md5", |
| 719 | .cra_driver_name = "img-md5", | 771 | .cra_driver_name = "img-md5", |
| @@ -723,7 +775,7 @@ static struct ahash_alg img_algs[] = { | |||
| 723 | CRYPTO_ALG_NEED_FALLBACK, | 775 | CRYPTO_ALG_NEED_FALLBACK, |
| 724 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | 776 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, |
| 725 | .cra_ctxsize = sizeof(struct img_hash_ctx), | 777 | .cra_ctxsize = sizeof(struct img_hash_ctx), |
| 726 | .cra_init = img_hash_cra_init, | 778 | .cra_init = img_hash_cra_md5_init, |
| 727 | .cra_exit = img_hash_cra_exit, | 779 | .cra_exit = img_hash_cra_exit, |
| 728 | .cra_module = THIS_MODULE, | 780 | .cra_module = THIS_MODULE, |
| 729 | } | 781 | } |
| @@ -734,9 +786,12 @@ static struct ahash_alg img_algs[] = { | |||
| 734 | .update = img_hash_update, | 786 | .update = img_hash_update, |
| 735 | .final = img_hash_final, | 787 | .final = img_hash_final, |
| 736 | .finup = img_hash_finup, | 788 | .finup = img_hash_finup, |
| 789 | .export = img_hash_export, | ||
| 790 | .import = img_hash_import, | ||
| 737 | .digest = img_hash_digest, | 791 | .digest = img_hash_digest, |
| 738 | .halg = { | 792 | .halg = { |
| 739 | .digestsize = SHA1_DIGEST_SIZE, | 793 | .digestsize = SHA1_DIGEST_SIZE, |
| 794 | .statesize = sizeof(struct sha1_state), | ||
| 740 | .base = { | 795 | .base = { |
| 741 | .cra_name = "sha1", | 796 | .cra_name = "sha1", |
| 742 | .cra_driver_name = "img-sha1", | 797 | .cra_driver_name = "img-sha1", |
| @@ -746,7 +801,7 @@ static struct ahash_alg img_algs[] = { | |||
| 746 | CRYPTO_ALG_NEED_FALLBACK, | 801 | CRYPTO_ALG_NEED_FALLBACK, |
| 747 | .cra_blocksize = SHA1_BLOCK_SIZE, | 802 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 748 | .cra_ctxsize = sizeof(struct img_hash_ctx), | 803 | .cra_ctxsize = sizeof(struct img_hash_ctx), |
| 749 | .cra_init = img_hash_cra_init, | 804 | .cra_init = img_hash_cra_sha1_init, |
| 750 | .cra_exit = img_hash_cra_exit, | 805 | .cra_exit = img_hash_cra_exit, |
| 751 | .cra_module = THIS_MODULE, | 806 | .cra_module = THIS_MODULE, |
| 752 | } | 807 | } |
| @@ -757,9 +812,12 @@ static struct ahash_alg img_algs[] = { | |||
| 757 | .update = img_hash_update, | 812 | .update = img_hash_update, |
| 758 | .final = img_hash_final, | 813 | .final = img_hash_final, |
| 759 | .finup = img_hash_finup, | 814 | .finup = img_hash_finup, |
| 815 | .export = img_hash_export, | ||
| 816 | .import = img_hash_import, | ||
| 760 | .digest = img_hash_digest, | 817 | .digest = img_hash_digest, |
| 761 | .halg = { | 818 | .halg = { |
| 762 | .digestsize = SHA224_DIGEST_SIZE, | 819 | .digestsize = SHA224_DIGEST_SIZE, |
| 820 | .statesize = sizeof(struct sha256_state), | ||
| 763 | .base = { | 821 | .base = { |
| 764 | .cra_name = "sha224", | 822 | .cra_name = "sha224", |
| 765 | .cra_driver_name = "img-sha224", | 823 | .cra_driver_name = "img-sha224", |
| @@ -769,7 +827,7 @@ static struct ahash_alg img_algs[] = { | |||
| 769 | CRYPTO_ALG_NEED_FALLBACK, | 827 | CRYPTO_ALG_NEED_FALLBACK, |
| 770 | .cra_blocksize = SHA224_BLOCK_SIZE, | 828 | .cra_blocksize = SHA224_BLOCK_SIZE, |
| 771 | .cra_ctxsize = sizeof(struct img_hash_ctx), | 829 | .cra_ctxsize = sizeof(struct img_hash_ctx), |
| 772 | .cra_init = img_hash_cra_init, | 830 | .cra_init = img_hash_cra_sha224_init, |
| 773 | .cra_exit = img_hash_cra_exit, | 831 | .cra_exit = img_hash_cra_exit, |
| 774 | .cra_module = THIS_MODULE, | 832 | .cra_module = THIS_MODULE, |
| 775 | } | 833 | } |
| @@ -780,9 +838,12 @@ static struct ahash_alg img_algs[] = { | |||
| 780 | .update = img_hash_update, | 838 | .update = img_hash_update, |
| 781 | .final = img_hash_final, | 839 | .final = img_hash_final, |
| 782 | .finup = img_hash_finup, | 840 | .finup = img_hash_finup, |
| 841 | .export = img_hash_export, | ||
| 842 | .import = img_hash_import, | ||
| 783 | .digest = img_hash_digest, | 843 | .digest = img_hash_digest, |
| 784 | .halg = { | 844 | .halg = { |
| 785 | .digestsize = SHA256_DIGEST_SIZE, | 845 | .digestsize = SHA256_DIGEST_SIZE, |
| 846 | .statesize = sizeof(struct sha256_state), | ||
| 786 | .base = { | 847 | .base = { |
| 787 | .cra_name = "sha256", | 848 | .cra_name = "sha256", |
| 788 | .cra_driver_name = "img-sha256", | 849 | .cra_driver_name = "img-sha256", |
| @@ -792,7 +853,7 @@ static struct ahash_alg img_algs[] = { | |||
| 792 | CRYPTO_ALG_NEED_FALLBACK, | 853 | CRYPTO_ALG_NEED_FALLBACK, |
| 793 | .cra_blocksize = SHA256_BLOCK_SIZE, | 854 | .cra_blocksize = SHA256_BLOCK_SIZE, |
| 794 | .cra_ctxsize = sizeof(struct img_hash_ctx), | 855 | .cra_ctxsize = sizeof(struct img_hash_ctx), |
| 795 | .cra_init = img_hash_cra_init, | 856 | .cra_init = img_hash_cra_sha256_init, |
| 796 | .cra_exit = img_hash_cra_exit, | 857 | .cra_exit = img_hash_cra_exit, |
| 797 | .cra_module = THIS_MODULE, | 858 | .cra_module = THIS_MODULE, |
| 798 | } | 859 | } |
| @@ -971,7 +1032,7 @@ static int img_hash_probe(struct platform_device *pdev) | |||
| 971 | err = img_register_algs(hdev); | 1032 | err = img_register_algs(hdev); |
| 972 | if (err) | 1033 | if (err) |
| 973 | goto err_algs; | 1034 | goto err_algs; |
| 974 | dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n"); | 1035 | dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n"); |
| 975 | 1036 | ||
| 976 | return 0; | 1037 | return 0; |
| 977 | 1038 | ||
| @@ -1013,11 +1074,38 @@ static int img_hash_remove(struct platform_device *pdev) | |||
| 1013 | return 0; | 1074 | return 0; |
| 1014 | } | 1075 | } |
| 1015 | 1076 | ||
| 1077 | #ifdef CONFIG_PM_SLEEP | ||
| 1078 | static int img_hash_suspend(struct device *dev) | ||
| 1079 | { | ||
| 1080 | struct img_hash_dev *hdev = dev_get_drvdata(dev); | ||
| 1081 | |||
| 1082 | clk_disable_unprepare(hdev->hash_clk); | ||
| 1083 | clk_disable_unprepare(hdev->sys_clk); | ||
| 1084 | |||
| 1085 | return 0; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | static int img_hash_resume(struct device *dev) | ||
| 1089 | { | ||
| 1090 | struct img_hash_dev *hdev = dev_get_drvdata(dev); | ||
| 1091 | |||
| 1092 | clk_prepare_enable(hdev->hash_clk); | ||
| 1093 | clk_prepare_enable(hdev->sys_clk); | ||
| 1094 | |||
| 1095 | return 0; | ||
| 1096 | } | ||
| 1097 | #endif /* CONFIG_PM_SLEEP */ | ||
| 1098 | |||
| 1099 | static const struct dev_pm_ops img_hash_pm_ops = { | ||
| 1100 | SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume) | ||
| 1101 | }; | ||
| 1102 | |||
| 1016 | static struct platform_driver img_hash_driver = { | 1103 | static struct platform_driver img_hash_driver = { |
| 1017 | .probe = img_hash_probe, | 1104 | .probe = img_hash_probe, |
| 1018 | .remove = img_hash_remove, | 1105 | .remove = img_hash_remove, |
| 1019 | .driver = { | 1106 | .driver = { |
| 1020 | .name = "img-hash-accelerator", | 1107 | .name = "img-hash-accelerator", |
| 1108 | .pm = &img_hash_pm_ops, | ||
| 1021 | .of_match_table = of_match_ptr(img_hash_match), | 1109 | .of_match_table = of_match_ptr(img_hash_match), |
| 1022 | } | 1110 | } |
| 1023 | }; | 1111 | }; |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 2296934455fc..7868765a70c5 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
| @@ -447,9 +447,8 @@ static int init_ixp_crypto(struct device *dev) | |||
| 447 | 447 | ||
| 448 | if (!npe_running(npe_c)) { | 448 | if (!npe_running(npe_c)) { |
| 449 | ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); | 449 | ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); |
| 450 | if (ret) { | 450 | if (ret) |
| 451 | return ret; | 451 | goto npe_release; |
| 452 | } | ||
| 453 | if (npe_recv_message(npe_c, msg, "STATUS_MSG")) | 452 | if (npe_recv_message(npe_c, msg, "STATUS_MSG")) |
| 454 | goto npe_error; | 453 | goto npe_error; |
| 455 | } else { | 454 | } else { |
| @@ -473,7 +472,8 @@ static int init_ixp_crypto(struct device *dev) | |||
| 473 | default: | 472 | default: |
| 474 | printk(KERN_ERR "Firmware of %s lacks crypto support\n", | 473 | printk(KERN_ERR "Firmware of %s lacks crypto support\n", |
| 475 | npe_name(npe_c)); | 474 | npe_name(npe_c)); |
| 476 | return -ENODEV; | 475 | ret = -ENODEV; |
| 476 | goto npe_release; | ||
| 477 | } | 477 | } |
| 478 | /* buffer_pool will also be used to sometimes store the hmac, | 478 | /* buffer_pool will also be used to sometimes store the hmac, |
| 479 | * so assure it is large enough | 479 | * so assure it is large enough |
| @@ -512,6 +512,7 @@ npe_error: | |||
| 512 | err: | 512 | err: |
| 513 | dma_pool_destroy(ctx_pool); | 513 | dma_pool_destroy(ctx_pool); |
| 514 | dma_pool_destroy(buffer_pool); | 514 | dma_pool_destroy(buffer_pool); |
| 515 | npe_release: | ||
| 515 | npe_release(npe_c); | 516 | npe_release(npe_c); |
| 516 | return ret; | 517 | return ret; |
| 517 | } | 518 | } |
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index d64af8625d7e..37dadb2a4feb 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c | |||
| @@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv) | |||
| 166 | if (!req) | 166 | if (!req) |
| 167 | break; | 167 | break; |
| 168 | 168 | ||
| 169 | ctx = crypto_tfm_ctx(req->tfm); | ||
| 169 | mv_cesa_complete_req(ctx, req, 0); | 170 | mv_cesa_complete_req(ctx, req, 0); |
| 170 | } | 171 | } |
| 171 | } | 172 | } |
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 82e0f4e6eb1c..9f284682c091 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c | |||
| @@ -374,7 +374,7 @@ static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { | |||
| 374 | .complete = mv_cesa_ahash_complete, | 374 | .complete = mv_cesa_ahash_complete, |
| 375 | }; | 375 | }; |
| 376 | 376 | ||
| 377 | static int mv_cesa_ahash_init(struct ahash_request *req, | 377 | static void mv_cesa_ahash_init(struct ahash_request *req, |
| 378 | struct mv_cesa_op_ctx *tmpl, bool algo_le) | 378 | struct mv_cesa_op_ctx *tmpl, bool algo_le) |
| 379 | { | 379 | { |
| 380 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | 380 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); |
| @@ -390,8 +390,6 @@ static int mv_cesa_ahash_init(struct ahash_request *req, | |||
| 390 | creq->op_tmpl = *tmpl; | 390 | creq->op_tmpl = *tmpl; |
| 391 | creq->len = 0; | 391 | creq->len = 0; |
| 392 | creq->algo_le = algo_le; | 392 | creq->algo_le = algo_le; |
| 393 | |||
| 394 | return 0; | ||
| 395 | } | 393 | } |
| 396 | 394 | ||
| 397 | static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) | 395 | static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) |
| @@ -405,15 +403,16 @@ static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) | |||
| 405 | return 0; | 403 | return 0; |
| 406 | } | 404 | } |
| 407 | 405 | ||
| 408 | static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) | 406 | static bool mv_cesa_ahash_cache_req(struct ahash_request *req) |
| 409 | { | 407 | { |
| 410 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | 408 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); |
| 409 | bool cached = false; | ||
| 411 | 410 | ||
| 412 | if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { | 411 | if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) { |
| 413 | *cached = true; | 412 | cached = true; |
| 414 | 413 | ||
| 415 | if (!req->nbytes) | 414 | if (!req->nbytes) |
| 416 | return 0; | 415 | return cached; |
| 417 | 416 | ||
| 418 | sg_pcopy_to_buffer(req->src, creq->src_nents, | 417 | sg_pcopy_to_buffer(req->src, creq->src_nents, |
| 419 | creq->cache + creq->cache_ptr, | 418 | creq->cache + creq->cache_ptr, |
| @@ -422,7 +421,7 @@ static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) | |||
| 422 | creq->cache_ptr += req->nbytes; | 421 | creq->cache_ptr += req->nbytes; |
| 423 | } | 422 | } |
| 424 | 423 | ||
| 425 | return 0; | 424 | return cached; |
| 426 | } | 425 | } |
| 427 | 426 | ||
| 428 | static struct mv_cesa_op_ctx * | 427 | static struct mv_cesa_op_ctx * |
| @@ -455,7 +454,6 @@ mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, | |||
| 455 | 454 | ||
| 456 | static int | 455 | static int |
| 457 | mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, | 456 | mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, |
| 458 | struct mv_cesa_ahash_dma_iter *dma_iter, | ||
| 459 | struct mv_cesa_ahash_req *creq, | 457 | struct mv_cesa_ahash_req *creq, |
| 460 | gfp_t flags) | 458 | gfp_t flags) |
| 461 | { | 459 | { |
| @@ -586,7 +584,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) | |||
| 586 | * Add the cache (left-over data from a previous block) first. | 584 | * Add the cache (left-over data from a previous block) first. |
| 587 | * This will never overflow the SRAM size. | 585 | * This will never overflow the SRAM size. |
| 588 | */ | 586 | */ |
| 589 | ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags); | 587 | ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags); |
| 590 | if (ret) | 588 | if (ret) |
| 591 | goto err_free_tdma; | 589 | goto err_free_tdma; |
| 592 | 590 | ||
| @@ -668,7 +666,6 @@ err: | |||
| 668 | static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) | 666 | static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) |
| 669 | { | 667 | { |
| 670 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | 668 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); |
| 671 | int ret; | ||
| 672 | 669 | ||
| 673 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); | 670 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); |
| 674 | if (creq->src_nents < 0) { | 671 | if (creq->src_nents < 0) { |
| @@ -676,17 +673,15 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) | |||
| 676 | return creq->src_nents; | 673 | return creq->src_nents; |
| 677 | } | 674 | } |
| 678 | 675 | ||
| 679 | ret = mv_cesa_ahash_cache_req(req, cached); | 676 | *cached = mv_cesa_ahash_cache_req(req); |
| 680 | if (ret) | ||
| 681 | return ret; | ||
| 682 | 677 | ||
| 683 | if (*cached) | 678 | if (*cached) |
| 684 | return 0; | 679 | return 0; |
| 685 | 680 | ||
| 686 | if (cesa_dev->caps->has_tdma) | 681 | if (cesa_dev->caps->has_tdma) |
| 687 | ret = mv_cesa_ahash_dma_req_init(req); | 682 | return mv_cesa_ahash_dma_req_init(req); |
| 688 | 683 | else | |
| 689 | return ret; | 684 | return 0; |
| 690 | } | 685 | } |
| 691 | 686 | ||
| 692 | static int mv_cesa_ahash_queue_req(struct ahash_request *req) | 687 | static int mv_cesa_ahash_queue_req(struct ahash_request *req) |
| @@ -805,13 +800,14 @@ static int mv_cesa_md5_init(struct ahash_request *req) | |||
| 805 | struct mv_cesa_op_ctx tmpl = { }; | 800 | struct mv_cesa_op_ctx tmpl = { }; |
| 806 | 801 | ||
| 807 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); | 802 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); |
| 803 | |||
| 804 | mv_cesa_ahash_init(req, &tmpl, true); | ||
| 805 | |||
| 808 | creq->state[0] = MD5_H0; | 806 | creq->state[0] = MD5_H0; |
| 809 | creq->state[1] = MD5_H1; | 807 | creq->state[1] = MD5_H1; |
| 810 | creq->state[2] = MD5_H2; | 808 | creq->state[2] = MD5_H2; |
| 811 | creq->state[3] = MD5_H3; | 809 | creq->state[3] = MD5_H3; |
| 812 | 810 | ||
| 813 | mv_cesa_ahash_init(req, &tmpl, true); | ||
| 814 | |||
| 815 | return 0; | 811 | return 0; |
| 816 | } | 812 | } |
| 817 | 813 | ||
| @@ -873,14 +869,15 @@ static int mv_cesa_sha1_init(struct ahash_request *req) | |||
| 873 | struct mv_cesa_op_ctx tmpl = { }; | 869 | struct mv_cesa_op_ctx tmpl = { }; |
| 874 | 870 | ||
| 875 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); | 871 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); |
| 872 | |||
| 873 | mv_cesa_ahash_init(req, &tmpl, false); | ||
| 874 | |||
| 876 | creq->state[0] = SHA1_H0; | 875 | creq->state[0] = SHA1_H0; |
| 877 | creq->state[1] = SHA1_H1; | 876 | creq->state[1] = SHA1_H1; |
| 878 | creq->state[2] = SHA1_H2; | 877 | creq->state[2] = SHA1_H2; |
| 879 | creq->state[3] = SHA1_H3; | 878 | creq->state[3] = SHA1_H3; |
| 880 | creq->state[4] = SHA1_H4; | 879 | creq->state[4] = SHA1_H4; |
| 881 | 880 | ||
| 882 | mv_cesa_ahash_init(req, &tmpl, false); | ||
| 883 | |||
| 884 | return 0; | 881 | return 0; |
| 885 | } | 882 | } |
| 886 | 883 | ||
| @@ -942,6 +939,9 @@ static int mv_cesa_sha256_init(struct ahash_request *req) | |||
| 942 | struct mv_cesa_op_ctx tmpl = { }; | 939 | struct mv_cesa_op_ctx tmpl = { }; |
| 943 | 940 | ||
| 944 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); | 941 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); |
| 942 | |||
| 943 | mv_cesa_ahash_init(req, &tmpl, false); | ||
| 944 | |||
| 945 | creq->state[0] = SHA256_H0; | 945 | creq->state[0] = SHA256_H0; |
| 946 | creq->state[1] = SHA256_H1; | 946 | creq->state[1] = SHA256_H1; |
| 947 | creq->state[2] = SHA256_H2; | 947 | creq->state[2] = SHA256_H2; |
| @@ -951,8 +951,6 @@ static int mv_cesa_sha256_init(struct ahash_request *req) | |||
| 951 | creq->state[6] = SHA256_H6; | 951 | creq->state[6] = SHA256_H6; |
| 952 | creq->state[7] = SHA256_H7; | 952 | creq->state[7] = SHA256_H7; |
| 953 | 953 | ||
| 954 | mv_cesa_ahash_init(req, &tmpl, false); | ||
| 955 | |||
| 956 | return 0; | 954 | return 0; |
| 957 | } | 955 | } |
| 958 | 956 | ||
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c index 86a065bcc187..9fd7a5fbaa1b 100644 --- a/drivers/crypto/marvell/tdma.c +++ b/drivers/crypto/marvell/tdma.c | |||
| @@ -261,6 +261,7 @@ struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, | |||
| 261 | tdma->op = op; | 261 | tdma->op = op; |
| 262 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); | 262 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); |
| 263 | tdma->src = cpu_to_le32(dma_handle); | 263 | tdma->src = cpu_to_le32(dma_handle); |
| 264 | tdma->dst = CESA_SA_CFG_SRAM_OFFSET; | ||
| 264 | tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; | 265 | tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; |
| 265 | 266 | ||
| 266 | return op; | 267 | return op; |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index e6b658faef63..104e9ce9400a 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
| @@ -1091,11 +1091,8 @@ static int mv_probe(struct platform_device *pdev) | |||
| 1091 | 1091 | ||
| 1092 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | 1092 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; |
| 1093 | 1093 | ||
| 1094 | if (pdev->dev.of_node) | 1094 | irq = platform_get_irq(pdev, 0); |
| 1095 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | 1095 | if (irq < 0) { |
| 1096 | else | ||
| 1097 | irq = platform_get_irq(pdev, 0); | ||
| 1098 | if (irq < 0 || irq == NO_IRQ) { | ||
| 1099 | ret = irq; | 1096 | ret = irq; |
| 1100 | goto err; | 1097 | goto err; |
| 1101 | } | 1098 | } |
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c index ff383ef83871..ee4be1b0d30b 100644 --- a/drivers/crypto/mxc-scc.c +++ b/drivers/crypto/mxc-scc.c | |||
| @@ -668,7 +668,9 @@ static int mxc_scc_probe(struct platform_device *pdev) | |||
| 668 | return PTR_ERR(scc->clk); | 668 | return PTR_ERR(scc->clk); |
| 669 | } | 669 | } |
| 670 | 670 | ||
| 671 | clk_prepare_enable(scc->clk); | 671 | ret = clk_prepare_enable(scc->clk); |
| 672 | if (ret) | ||
| 673 | return ret; | ||
| 672 | 674 | ||
| 673 | /* clear error status register */ | 675 | /* clear error status register */ |
| 674 | writel(0x0, scc->base + SCC_SCM_ERROR_STATUS); | 676 | writel(0x0, scc->base + SCC_SCM_ERROR_STATUS); |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 4ab53a604312..fe32dd95ae4f 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
| @@ -35,7 +35,8 @@ | |||
| 35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
| 36 | #include <crypto/scatterwalk.h> | 36 | #include <crypto/scatterwalk.h> |
| 37 | #include <crypto/aes.h> | 37 | #include <crypto/aes.h> |
| 38 | #include <crypto/algapi.h> | 38 | #include <crypto/engine.h> |
| 39 | #include <crypto/internal/skcipher.h> | ||
| 39 | 40 | ||
| 40 | #define DST_MAXBURST 4 | 41 | #define DST_MAXBURST 4 |
| 41 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) | 42 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) |
| @@ -85,6 +86,8 @@ | |||
| 85 | #define AES_REG_IRQ_DATA_OUT BIT(2) | 86 | #define AES_REG_IRQ_DATA_OUT BIT(2) |
| 86 | #define DEFAULT_TIMEOUT (5*HZ) | 87 | #define DEFAULT_TIMEOUT (5*HZ) |
| 87 | 88 | ||
| 89 | #define DEFAULT_AUTOSUSPEND_DELAY 1000 | ||
| 90 | |||
| 88 | #define FLAGS_MODE_MASK 0x000f | 91 | #define FLAGS_MODE_MASK 0x000f |
| 89 | #define FLAGS_ENCRYPT BIT(0) | 92 | #define FLAGS_ENCRYPT BIT(0) |
| 90 | #define FLAGS_CBC BIT(1) | 93 | #define FLAGS_CBC BIT(1) |
| @@ -103,6 +106,7 @@ struct omap_aes_ctx { | |||
| 103 | int keylen; | 106 | int keylen; |
| 104 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | 107 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; |
| 105 | unsigned long flags; | 108 | unsigned long flags; |
| 109 | struct crypto_skcipher *fallback; | ||
| 106 | }; | 110 | }; |
| 107 | 111 | ||
| 108 | struct omap_aes_reqctx { | 112 | struct omap_aes_reqctx { |
| @@ -238,11 +242,19 @@ static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, | |||
| 238 | 242 | ||
| 239 | static int omap_aes_hw_init(struct omap_aes_dev *dd) | 243 | static int omap_aes_hw_init(struct omap_aes_dev *dd) |
| 240 | { | 244 | { |
| 245 | int err; | ||
| 246 | |||
| 241 | if (!(dd->flags & FLAGS_INIT)) { | 247 | if (!(dd->flags & FLAGS_INIT)) { |
| 242 | dd->flags |= FLAGS_INIT; | 248 | dd->flags |= FLAGS_INIT; |
| 243 | dd->err = 0; | 249 | dd->err = 0; |
| 244 | } | 250 | } |
| 245 | 251 | ||
| 252 | err = pm_runtime_get_sync(dd->dev); | ||
| 253 | if (err < 0) { | ||
| 254 | dev_err(dd->dev, "failed to get sync: %d\n", err); | ||
| 255 | return err; | ||
| 256 | } | ||
| 257 | |||
| 246 | return 0; | 258 | return 0; |
| 247 | } | 259 | } |
| 248 | 260 | ||
| @@ -319,20 +331,12 @@ static void omap_aes_dma_stop(struct omap_aes_dev *dd) | |||
| 319 | 331 | ||
| 320 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) | 332 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) |
| 321 | { | 333 | { |
| 322 | struct omap_aes_dev *dd = NULL, *tmp; | 334 | struct omap_aes_dev *dd; |
| 323 | 335 | ||
| 324 | spin_lock_bh(&list_lock); | 336 | spin_lock_bh(&list_lock); |
| 325 | if (!ctx->dd) { | 337 | dd = list_first_entry(&dev_list, struct omap_aes_dev, list); |
| 326 | list_for_each_entry(tmp, &dev_list, list) { | 338 | list_move_tail(&dd->list, &dev_list); |
| 327 | /* FIXME: take fist available aes core */ | 339 | ctx->dd = dd; |
| 328 | dd = tmp; | ||
| 329 | break; | ||
| 330 | } | ||
| 331 | ctx->dd = dd; | ||
| 332 | } else { | ||
| 333 | /* already found before */ | ||
| 334 | dd = ctx->dd; | ||
| 335 | } | ||
| 336 | spin_unlock_bh(&list_lock); | 340 | spin_unlock_bh(&list_lock); |
| 337 | 341 | ||
| 338 | return dd; | 342 | return dd; |
| @@ -519,7 +523,10 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | |||
| 519 | 523 | ||
| 520 | pr_debug("err: %d\n", err); | 524 | pr_debug("err: %d\n", err); |
| 521 | 525 | ||
| 522 | crypto_finalize_request(dd->engine, req, err); | 526 | crypto_finalize_cipher_request(dd->engine, req, err); |
| 527 | |||
| 528 | pm_runtime_mark_last_busy(dd->dev); | ||
| 529 | pm_runtime_put_autosuspend(dd->dev); | ||
| 523 | } | 530 | } |
| 524 | 531 | ||
| 525 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | 532 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) |
| @@ -592,7 +599,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, | |||
| 592 | struct ablkcipher_request *req) | 599 | struct ablkcipher_request *req) |
| 593 | { | 600 | { |
| 594 | if (req) | 601 | if (req) |
| 595 | return crypto_transfer_request_to_engine(dd->engine, req); | 602 | return crypto_transfer_cipher_request_to_engine(dd->engine, req); |
| 596 | 603 | ||
| 597 | return 0; | 604 | return 0; |
| 598 | } | 605 | } |
| @@ -602,7 +609,7 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, | |||
| 602 | { | 609 | { |
| 603 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( | 610 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( |
| 604 | crypto_ablkcipher_reqtfm(req)); | 611 | crypto_ablkcipher_reqtfm(req)); |
| 605 | struct omap_aes_dev *dd = omap_aes_find_dev(ctx); | 612 | struct omap_aes_dev *dd = ctx->dd; |
| 606 | struct omap_aes_reqctx *rctx; | 613 | struct omap_aes_reqctx *rctx; |
| 607 | 614 | ||
| 608 | if (!dd) | 615 | if (!dd) |
| @@ -648,7 +655,7 @@ static int omap_aes_crypt_req(struct crypto_engine *engine, | |||
| 648 | { | 655 | { |
| 649 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( | 656 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( |
| 650 | crypto_ablkcipher_reqtfm(req)); | 657 | crypto_ablkcipher_reqtfm(req)); |
| 651 | struct omap_aes_dev *dd = omap_aes_find_dev(ctx); | 658 | struct omap_aes_dev *dd = ctx->dd; |
| 652 | 659 | ||
| 653 | if (!dd) | 660 | if (!dd) |
| 654 | return -ENODEV; | 661 | return -ENODEV; |
| @@ -696,11 +703,29 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
| 696 | crypto_ablkcipher_reqtfm(req)); | 703 | crypto_ablkcipher_reqtfm(req)); |
| 697 | struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); | 704 | struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); |
| 698 | struct omap_aes_dev *dd; | 705 | struct omap_aes_dev *dd; |
| 706 | int ret; | ||
| 699 | 707 | ||
| 700 | pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, | 708 | pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, |
| 701 | !!(mode & FLAGS_ENCRYPT), | 709 | !!(mode & FLAGS_ENCRYPT), |
| 702 | !!(mode & FLAGS_CBC)); | 710 | !!(mode & FLAGS_CBC)); |
| 703 | 711 | ||
| 712 | if (req->nbytes < 200) { | ||
| 713 | SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); | ||
| 714 | |||
| 715 | skcipher_request_set_tfm(subreq, ctx->fallback); | ||
| 716 | skcipher_request_set_callback(subreq, req->base.flags, NULL, | ||
| 717 | NULL); | ||
| 718 | skcipher_request_set_crypt(subreq, req->src, req->dst, | ||
| 719 | req->nbytes, req->info); | ||
| 720 | |||
| 721 | if (mode & FLAGS_ENCRYPT) | ||
| 722 | ret = crypto_skcipher_encrypt(subreq); | ||
| 723 | else | ||
| 724 | ret = crypto_skcipher_decrypt(subreq); | ||
| 725 | |||
| 726 | skcipher_request_zero(subreq); | ||
| 727 | return ret; | ||
| 728 | } | ||
| 704 | dd = omap_aes_find_dev(ctx); | 729 | dd = omap_aes_find_dev(ctx); |
| 705 | if (!dd) | 730 | if (!dd) |
| 706 | return -ENODEV; | 731 | return -ENODEV; |
| @@ -716,6 +741,7 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
| 716 | unsigned int keylen) | 741 | unsigned int keylen) |
| 717 | { | 742 | { |
| 718 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 743 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
| 744 | int ret; | ||
| 719 | 745 | ||
| 720 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | 746 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && |
| 721 | keylen != AES_KEYSIZE_256) | 747 | keylen != AES_KEYSIZE_256) |
| @@ -726,6 +752,14 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
| 726 | memcpy(ctx->key, key, keylen); | 752 | memcpy(ctx->key, key, keylen); |
| 727 | ctx->keylen = keylen; | 753 | ctx->keylen = keylen; |
| 728 | 754 | ||
| 755 | crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); | ||
| 756 | crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & | ||
| 757 | CRYPTO_TFM_REQ_MASK); | ||
| 758 | |||
| 759 | ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); | ||
| 760 | if (!ret) | ||
| 761 | return 0; | ||
| 762 | |||
| 729 | return 0; | 763 | return 0; |
| 730 | } | 764 | } |
| 731 | 765 | ||
| @@ -761,22 +795,16 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) | |||
| 761 | 795 | ||
| 762 | static int omap_aes_cra_init(struct crypto_tfm *tfm) | 796 | static int omap_aes_cra_init(struct crypto_tfm *tfm) |
| 763 | { | 797 | { |
| 764 | struct omap_aes_dev *dd = NULL; | 798 | const char *name = crypto_tfm_alg_name(tfm); |
| 765 | int err; | 799 | const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; |
| 800 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 801 | struct crypto_skcipher *blk; | ||
| 766 | 802 | ||
| 767 | /* Find AES device, currently picks the first device */ | 803 | blk = crypto_alloc_skcipher(name, 0, flags); |
| 768 | spin_lock_bh(&list_lock); | 804 | if (IS_ERR(blk)) |
| 769 | list_for_each_entry(dd, &dev_list, list) { | 805 | return PTR_ERR(blk); |
| 770 | break; | ||
| 771 | } | ||
| 772 | spin_unlock_bh(&list_lock); | ||
| 773 | 806 | ||
| 774 | err = pm_runtime_get_sync(dd->dev); | 807 | ctx->fallback = blk; |
| 775 | if (err < 0) { | ||
| 776 | dev_err(dd->dev, "%s: failed to get_sync(%d)\n", | ||
| 777 | __func__, err); | ||
| 778 | return err; | ||
| 779 | } | ||
| 780 | 808 | ||
| 781 | tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); | 809 | tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); |
| 782 | 810 | ||
| @@ -785,16 +813,12 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm) | |||
| 785 | 813 | ||
| 786 | static void omap_aes_cra_exit(struct crypto_tfm *tfm) | 814 | static void omap_aes_cra_exit(struct crypto_tfm *tfm) |
| 787 | { | 815 | { |
| 788 | struct omap_aes_dev *dd = NULL; | 816 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
| 789 | 817 | ||
| 790 | /* Find AES device, currently picks the first device */ | 818 | if (ctx->fallback) |
| 791 | spin_lock_bh(&list_lock); | 819 | crypto_free_skcipher(ctx->fallback); |
| 792 | list_for_each_entry(dd, &dev_list, list) { | ||
| 793 | break; | ||
| 794 | } | ||
| 795 | spin_unlock_bh(&list_lock); | ||
| 796 | 820 | ||
| 797 | pm_runtime_put_sync(dd->dev); | 821 | ctx->fallback = NULL; |
| 798 | } | 822 | } |
| 799 | 823 | ||
| 800 | /* ********************** ALGS ************************************ */ | 824 | /* ********************** ALGS ************************************ */ |
| @@ -806,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = { | |||
| 806 | .cra_priority = 300, | 830 | .cra_priority = 300, |
| 807 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 831 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 808 | CRYPTO_ALG_KERN_DRIVER_ONLY | | 832 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 809 | CRYPTO_ALG_ASYNC, | 833 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
| 810 | .cra_blocksize = AES_BLOCK_SIZE, | 834 | .cra_blocksize = AES_BLOCK_SIZE, |
| 811 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | 835 | .cra_ctxsize = sizeof(struct omap_aes_ctx), |
| 812 | .cra_alignmask = 0, | 836 | .cra_alignmask = 0, |
| @@ -828,7 +852,7 @@ static struct crypto_alg algs_ecb_cbc[] = { | |||
| 828 | .cra_priority = 300, | 852 | .cra_priority = 300, |
| 829 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 853 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 830 | CRYPTO_ALG_KERN_DRIVER_ONLY | | 854 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 831 | CRYPTO_ALG_ASYNC, | 855 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
| 832 | .cra_blocksize = AES_BLOCK_SIZE, | 856 | .cra_blocksize = AES_BLOCK_SIZE, |
| 833 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | 857 | .cra_ctxsize = sizeof(struct omap_aes_ctx), |
| 834 | .cra_alignmask = 0, | 858 | .cra_alignmask = 0, |
| @@ -854,7 +878,7 @@ static struct crypto_alg algs_ctr[] = { | |||
| 854 | .cra_priority = 300, | 878 | .cra_priority = 300, |
| 855 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 879 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 856 | CRYPTO_ALG_KERN_DRIVER_ONLY | | 880 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 857 | CRYPTO_ALG_ASYNC, | 881 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
| 858 | .cra_blocksize = AES_BLOCK_SIZE, | 882 | .cra_blocksize = AES_BLOCK_SIZE, |
| 859 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | 883 | .cra_ctxsize = sizeof(struct omap_aes_ctx), |
| 860 | .cra_alignmask = 0, | 884 | .cra_alignmask = 0, |
| @@ -1140,6 +1164,9 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
| 1140 | } | 1164 | } |
| 1141 | dd->phys_base = res.start; | 1165 | dd->phys_base = res.start; |
| 1142 | 1166 | ||
| 1167 | pm_runtime_use_autosuspend(dev); | ||
| 1168 | pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); | ||
| 1169 | |||
| 1143 | pm_runtime_enable(dev); | 1170 | pm_runtime_enable(dev); |
| 1144 | err = pm_runtime_get_sync(dev); | 1171 | err = pm_runtime_get_sync(dev); |
| 1145 | if (err < 0) { | 1172 | if (err < 0) { |
| @@ -1186,6 +1213,19 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
| 1186 | list_add_tail(&dd->list, &dev_list); | 1213 | list_add_tail(&dd->list, &dev_list); |
| 1187 | spin_unlock(&list_lock); | 1214 | spin_unlock(&list_lock); |
| 1188 | 1215 | ||
| 1216 | /* Initialize crypto engine */ | ||
| 1217 | dd->engine = crypto_engine_alloc_init(dev, 1); | ||
| 1218 | if (!dd->engine) { | ||
| 1219 | err = -ENOMEM; | ||
| 1220 | goto err_engine; | ||
| 1221 | } | ||
| 1222 | |||
| 1223 | dd->engine->prepare_cipher_request = omap_aes_prepare_req; | ||
| 1224 | dd->engine->cipher_one_request = omap_aes_crypt_req; | ||
| 1225 | err = crypto_engine_start(dd->engine); | ||
| 1226 | if (err) | ||
| 1227 | goto err_engine; | ||
| 1228 | |||
| 1189 | for (i = 0; i < dd->pdata->algs_info_size; i++) { | 1229 | for (i = 0; i < dd->pdata->algs_info_size; i++) { |
| 1190 | if (!dd->pdata->algs_info[i].registered) { | 1230 | if (!dd->pdata->algs_info[i].registered) { |
| 1191 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { | 1231 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { |
| @@ -1203,26 +1243,17 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
| 1203 | } | 1243 | } |
| 1204 | } | 1244 | } |
| 1205 | 1245 | ||
| 1206 | /* Initialize crypto engine */ | ||
| 1207 | dd->engine = crypto_engine_alloc_init(dev, 1); | ||
| 1208 | if (!dd->engine) | ||
| 1209 | goto err_algs; | ||
| 1210 | |||
| 1211 | dd->engine->prepare_request = omap_aes_prepare_req; | ||
| 1212 | dd->engine->crypt_one_request = omap_aes_crypt_req; | ||
| 1213 | err = crypto_engine_start(dd->engine); | ||
| 1214 | if (err) | ||
| 1215 | goto err_engine; | ||
| 1216 | |||
| 1217 | return 0; | 1246 | return 0; |
| 1218 | err_engine: | ||
| 1219 | crypto_engine_exit(dd->engine); | ||
| 1220 | err_algs: | 1247 | err_algs: |
| 1221 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) | 1248 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
| 1222 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) | 1249 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
| 1223 | crypto_unregister_alg( | 1250 | crypto_unregister_alg( |
| 1224 | &dd->pdata->algs_info[i].algs_list[j]); | 1251 | &dd->pdata->algs_info[i].algs_list[j]); |
| 1225 | 1252 | ||
| 1253 | err_engine: | ||
| 1254 | if (dd->engine) | ||
| 1255 | crypto_engine_exit(dd->engine); | ||
| 1256 | |||
| 1226 | omap_aes_dma_cleanup(dd); | 1257 | omap_aes_dma_cleanup(dd); |
| 1227 | err_irq: | 1258 | err_irq: |
| 1228 | tasklet_kill(&dd->done_task); | 1259 | tasklet_kill(&dd->done_task); |
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 5691434ffb2d..a6f65532fd16 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <crypto/scatterwalk.h> | 39 | #include <crypto/scatterwalk.h> |
| 40 | #include <crypto/des.h> | 40 | #include <crypto/des.h> |
| 41 | #include <crypto/algapi.h> | 41 | #include <crypto/algapi.h> |
| 42 | #include <crypto/engine.h> | ||
| 42 | 43 | ||
| 43 | #define DST_MAXBURST 2 | 44 | #define DST_MAXBURST 2 |
| 44 | 45 | ||
| @@ -506,7 +507,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) | |||
| 506 | pr_debug("err: %d\n", err); | 507 | pr_debug("err: %d\n", err); |
| 507 | 508 | ||
| 508 | pm_runtime_put(dd->dev); | 509 | pm_runtime_put(dd->dev); |
| 509 | crypto_finalize_request(dd->engine, req, err); | 510 | crypto_finalize_cipher_request(dd->engine, req, err); |
| 510 | } | 511 | } |
| 511 | 512 | ||
| 512 | static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) | 513 | static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) |
| @@ -574,7 +575,7 @@ static int omap_des_handle_queue(struct omap_des_dev *dd, | |||
| 574 | struct ablkcipher_request *req) | 575 | struct ablkcipher_request *req) |
| 575 | { | 576 | { |
| 576 | if (req) | 577 | if (req) |
| 577 | return crypto_transfer_request_to_engine(dd->engine, req); | 578 | return crypto_transfer_cipher_request_to_engine(dd->engine, req); |
| 578 | 579 | ||
| 579 | return 0; | 580 | return 0; |
| 580 | } | 581 | } |
| @@ -1078,6 +1079,19 @@ static int omap_des_probe(struct platform_device *pdev) | |||
| 1078 | list_add_tail(&dd->list, &dev_list); | 1079 | list_add_tail(&dd->list, &dev_list); |
| 1079 | spin_unlock(&list_lock); | 1080 | spin_unlock(&list_lock); |
| 1080 | 1081 | ||
| 1082 | /* Initialize des crypto engine */ | ||
| 1083 | dd->engine = crypto_engine_alloc_init(dev, 1); | ||
| 1084 | if (!dd->engine) { | ||
| 1085 | err = -ENOMEM; | ||
| 1086 | goto err_engine; | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | dd->engine->prepare_cipher_request = omap_des_prepare_req; | ||
| 1090 | dd->engine->cipher_one_request = omap_des_crypt_req; | ||
| 1091 | err = crypto_engine_start(dd->engine); | ||
| 1092 | if (err) | ||
| 1093 | goto err_engine; | ||
| 1094 | |||
| 1081 | for (i = 0; i < dd->pdata->algs_info_size; i++) { | 1095 | for (i = 0; i < dd->pdata->algs_info_size; i++) { |
| 1082 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { | 1096 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { |
| 1083 | algp = &dd->pdata->algs_info[i].algs_list[j]; | 1097 | algp = &dd->pdata->algs_info[i].algs_list[j]; |
| @@ -1093,27 +1107,18 @@ static int omap_des_probe(struct platform_device *pdev) | |||
| 1093 | } | 1107 | } |
| 1094 | } | 1108 | } |
| 1095 | 1109 | ||
| 1096 | /* Initialize des crypto engine */ | ||
| 1097 | dd->engine = crypto_engine_alloc_init(dev, 1); | ||
| 1098 | if (!dd->engine) | ||
| 1099 | goto err_algs; | ||
| 1100 | |||
| 1101 | dd->engine->prepare_request = omap_des_prepare_req; | ||
| 1102 | dd->engine->crypt_one_request = omap_des_crypt_req; | ||
| 1103 | err = crypto_engine_start(dd->engine); | ||
| 1104 | if (err) | ||
| 1105 | goto err_engine; | ||
| 1106 | |||
| 1107 | return 0; | 1110 | return 0; |
| 1108 | 1111 | ||
| 1109 | err_engine: | ||
| 1110 | crypto_engine_exit(dd->engine); | ||
| 1111 | err_algs: | 1112 | err_algs: |
| 1112 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) | 1113 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
| 1113 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) | 1114 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
| 1114 | crypto_unregister_alg( | 1115 | crypto_unregister_alg( |
| 1115 | &dd->pdata->algs_info[i].algs_list[j]); | 1116 | &dd->pdata->algs_info[i].algs_list[j]); |
| 1116 | 1117 | ||
| 1118 | err_engine: | ||
| 1119 | if (dd->engine) | ||
| 1120 | crypto_engine_exit(dd->engine); | ||
| 1121 | |||
| 1117 | omap_des_dma_cleanup(dd); | 1122 | omap_des_dma_cleanup(dd); |
| 1118 | err_irq: | 1123 | err_irq: |
| 1119 | tasklet_kill(&dd->done_task); | 1124 | tasklet_kill(&dd->done_task); |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 7fe4eef12fe2..d0b16e5e4ee5 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
| @@ -112,9 +112,10 @@ | |||
| 112 | #define FLAGS_DMA_READY 6 | 112 | #define FLAGS_DMA_READY 6 |
| 113 | #define FLAGS_AUTO_XOR 7 | 113 | #define FLAGS_AUTO_XOR 7 |
| 114 | #define FLAGS_BE32_SHA1 8 | 114 | #define FLAGS_BE32_SHA1 8 |
| 115 | #define FLAGS_SGS_COPIED 9 | ||
| 116 | #define FLAGS_SGS_ALLOCED 10 | ||
| 115 | /* context flags */ | 117 | /* context flags */ |
| 116 | #define FLAGS_FINUP 16 | 118 | #define FLAGS_FINUP 16 |
| 117 | #define FLAGS_SG 17 | ||
| 118 | 119 | ||
| 119 | #define FLAGS_MODE_SHIFT 18 | 120 | #define FLAGS_MODE_SHIFT 18 |
| 120 | #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT) | 121 | #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT) |
| @@ -134,7 +135,8 @@ | |||
| 134 | #define OMAP_ALIGN_MASK (sizeof(u32)-1) | 135 | #define OMAP_ALIGN_MASK (sizeof(u32)-1) |
| 135 | #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) | 136 | #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) |
| 136 | 137 | ||
| 137 | #define BUFLEN PAGE_SIZE | 138 | #define BUFLEN SHA512_BLOCK_SIZE |
| 139 | #define OMAP_SHA_DMA_THRESHOLD 256 | ||
| 138 | 140 | ||
| 139 | struct omap_sham_dev; | 141 | struct omap_sham_dev; |
| 140 | 142 | ||
| @@ -147,12 +149,12 @@ struct omap_sham_reqctx { | |||
| 147 | size_t digcnt; | 149 | size_t digcnt; |
| 148 | size_t bufcnt; | 150 | size_t bufcnt; |
| 149 | size_t buflen; | 151 | size_t buflen; |
| 150 | dma_addr_t dma_addr; | ||
| 151 | 152 | ||
| 152 | /* walk state */ | 153 | /* walk state */ |
| 153 | struct scatterlist *sg; | 154 | struct scatterlist *sg; |
| 154 | struct scatterlist sgl; | 155 | struct scatterlist sgl[2]; |
| 155 | unsigned int offset; /* offset in current sg */ | 156 | int offset; /* offset in current sg */ |
| 157 | int sg_len; | ||
| 156 | unsigned int total; /* total request */ | 158 | unsigned int total; /* total request */ |
| 157 | 159 | ||
| 158 | u8 buffer[0] OMAP_ALIGNED; | 160 | u8 buffer[0] OMAP_ALIGNED; |
| @@ -223,6 +225,7 @@ struct omap_sham_dev { | |||
| 223 | struct dma_chan *dma_lch; | 225 | struct dma_chan *dma_lch; |
| 224 | struct tasklet_struct done_task; | 226 | struct tasklet_struct done_task; |
| 225 | u8 polling_mode; | 227 | u8 polling_mode; |
| 228 | u8 xmit_buf[BUFLEN]; | ||
| 226 | 229 | ||
| 227 | unsigned long flags; | 230 | unsigned long flags; |
| 228 | struct crypto_queue queue; | 231 | struct crypto_queue queue; |
| @@ -510,12 +513,14 @@ static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) | |||
| 510 | SHA_REG_IRQSTATUS_INPUT_RDY); | 513 | SHA_REG_IRQSTATUS_INPUT_RDY); |
| 511 | } | 514 | } |
| 512 | 515 | ||
| 513 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | 516 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length, |
| 514 | size_t length, int final) | 517 | int final) |
| 515 | { | 518 | { |
| 516 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 519 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
| 517 | int count, len32, bs32, offset = 0; | 520 | int count, len32, bs32, offset = 0; |
| 518 | const u32 *buffer = (const u32 *)buf; | 521 | const u32 *buffer; |
| 522 | int mlen; | ||
| 523 | struct sg_mapping_iter mi; | ||
| 519 | 524 | ||
| 520 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | 525 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", |
| 521 | ctx->digcnt, length, final); | 526 | ctx->digcnt, length, final); |
| @@ -525,6 +530,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
| 525 | 530 | ||
| 526 | /* should be non-zero before next lines to disable clocks later */ | 531 | /* should be non-zero before next lines to disable clocks later */ |
| 527 | ctx->digcnt += length; | 532 | ctx->digcnt += length; |
| 533 | ctx->total -= length; | ||
| 528 | 534 | ||
| 529 | if (final) | 535 | if (final) |
| 530 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ | 536 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ |
| @@ -534,16 +540,35 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
| 534 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 540 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
| 535 | bs32 = get_block_size(ctx) / sizeof(u32); | 541 | bs32 = get_block_size(ctx) / sizeof(u32); |
| 536 | 542 | ||
| 543 | sg_miter_start(&mi, ctx->sg, ctx->sg_len, | ||
| 544 | SG_MITER_FROM_SG | SG_MITER_ATOMIC); | ||
| 545 | |||
| 546 | mlen = 0; | ||
| 547 | |||
| 537 | while (len32) { | 548 | while (len32) { |
| 538 | if (dd->pdata->poll_irq(dd)) | 549 | if (dd->pdata->poll_irq(dd)) |
| 539 | return -ETIMEDOUT; | 550 | return -ETIMEDOUT; |
| 540 | 551 | ||
| 541 | for (count = 0; count < min(len32, bs32); count++, offset++) | 552 | for (count = 0; count < min(len32, bs32); count++, offset++) { |
| 553 | if (!mlen) { | ||
| 554 | sg_miter_next(&mi); | ||
| 555 | mlen = mi.length; | ||
| 556 | if (!mlen) { | ||
| 557 | pr_err("sg miter failure.\n"); | ||
| 558 | return -EINVAL; | ||
| 559 | } | ||
| 560 | offset = 0; | ||
| 561 | buffer = mi.addr; | ||
| 562 | } | ||
| 542 | omap_sham_write(dd, SHA_REG_DIN(dd, count), | 563 | omap_sham_write(dd, SHA_REG_DIN(dd, count), |
| 543 | buffer[offset]); | 564 | buffer[offset]); |
| 565 | mlen -= 4; | ||
| 566 | } | ||
| 544 | len32 -= min(len32, bs32); | 567 | len32 -= min(len32, bs32); |
| 545 | } | 568 | } |
| 546 | 569 | ||
| 570 | sg_miter_stop(&mi); | ||
| 571 | |||
| 547 | return -EINPROGRESS; | 572 | return -EINPROGRESS; |
| 548 | } | 573 | } |
| 549 | 574 | ||
| @@ -555,22 +580,27 @@ static void omap_sham_dma_callback(void *param) | |||
| 555 | tasklet_schedule(&dd->done_task); | 580 | tasklet_schedule(&dd->done_task); |
| 556 | } | 581 | } |
| 557 | 582 | ||
| 558 | static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | 583 | static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length, |
| 559 | size_t length, int final, int is_sg) | 584 | int final) |
| 560 | { | 585 | { |
| 561 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 586 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
| 562 | struct dma_async_tx_descriptor *tx; | 587 | struct dma_async_tx_descriptor *tx; |
| 563 | struct dma_slave_config cfg; | 588 | struct dma_slave_config cfg; |
| 564 | int len32, ret, dma_min = get_block_size(ctx); | 589 | int ret; |
| 565 | 590 | ||
| 566 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", | 591 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", |
| 567 | ctx->digcnt, length, final); | 592 | ctx->digcnt, length, final); |
| 568 | 593 | ||
| 594 | if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) { | ||
| 595 | dev_err(dd->dev, "dma_map_sg error\n"); | ||
| 596 | return -EINVAL; | ||
| 597 | } | ||
| 598 | |||
| 569 | memset(&cfg, 0, sizeof(cfg)); | 599 | memset(&cfg, 0, sizeof(cfg)); |
| 570 | 600 | ||
| 571 | cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); | 601 | cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); |
| 572 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 602 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
| 573 | cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES; | 603 | cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES; |
| 574 | 604 | ||
| 575 | ret = dmaengine_slave_config(dd->dma_lch, &cfg); | 605 | ret = dmaengine_slave_config(dd->dma_lch, &cfg); |
| 576 | if (ret) { | 606 | if (ret) { |
| @@ -578,30 +608,12 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
| 578 | return ret; | 608 | return ret; |
| 579 | } | 609 | } |
| 580 | 610 | ||
| 581 | len32 = DIV_ROUND_UP(length, dma_min) * dma_min; | 611 | tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len, |
| 582 | 612 | DMA_MEM_TO_DEV, | |
| 583 | if (is_sg) { | 613 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
| 584 | /* | ||
| 585 | * The SG entry passed in may not have the 'length' member | ||
| 586 | * set correctly so use a local SG entry (sgl) with the | ||
| 587 | * proper value for 'length' instead. If this is not done, | ||
| 588 | * the dmaengine may try to DMA the incorrect amount of data. | ||
| 589 | */ | ||
| 590 | sg_init_table(&ctx->sgl, 1); | ||
| 591 | sg_assign_page(&ctx->sgl, sg_page(ctx->sg)); | ||
| 592 | ctx->sgl.offset = ctx->sg->offset; | ||
| 593 | sg_dma_len(&ctx->sgl) = len32; | ||
| 594 | sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); | ||
| 595 | |||
| 596 | tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1, | ||
| 597 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 598 | } else { | ||
| 599 | tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32, | ||
| 600 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 601 | } | ||
| 602 | 614 | ||
| 603 | if (!tx) { | 615 | if (!tx) { |
| 604 | dev_err(dd->dev, "prep_slave_sg/single() failed\n"); | 616 | dev_err(dd->dev, "prep_slave_sg failed\n"); |
| 605 | return -EINVAL; | 617 | return -EINVAL; |
| 606 | } | 618 | } |
| 607 | 619 | ||
| @@ -611,6 +623,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
| 611 | dd->pdata->write_ctrl(dd, length, final, 1); | 623 | dd->pdata->write_ctrl(dd, length, final, 1); |
| 612 | 624 | ||
| 613 | ctx->digcnt += length; | 625 | ctx->digcnt += length; |
| 626 | ctx->total -= length; | ||
| 614 | 627 | ||
| 615 | if (final) | 628 | if (final) |
| 616 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ | 629 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ |
| @@ -625,189 +638,257 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
| 625 | return -EINPROGRESS; | 638 | return -EINPROGRESS; |
| 626 | } | 639 | } |
| 627 | 640 | ||
| 628 | static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, | 641 | static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx, |
| 629 | const u8 *data, size_t length) | 642 | struct scatterlist *sg, int bs, int new_len) |
| 630 | { | 643 | { |
| 631 | size_t count = min(length, ctx->buflen - ctx->bufcnt); | 644 | int n = sg_nents(sg); |
| 645 | struct scatterlist *tmp; | ||
| 646 | int offset = ctx->offset; | ||
| 632 | 647 | ||
| 633 | count = min(count, ctx->total); | 648 | if (ctx->bufcnt) |
| 634 | if (count <= 0) | 649 | n++; |
| 635 | return 0; | ||
| 636 | memcpy(ctx->buffer + ctx->bufcnt, data, count); | ||
| 637 | ctx->bufcnt += count; | ||
| 638 | 650 | ||
| 639 | return count; | 651 | ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); |
| 640 | } | 652 | if (!ctx->sg) |
| 653 | return -ENOMEM; | ||
| 641 | 654 | ||
| 642 | static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) | 655 | sg_init_table(ctx->sg, n); |
| 643 | { | ||
| 644 | size_t count; | ||
| 645 | const u8 *vaddr; | ||
| 646 | 656 | ||
| 647 | while (ctx->sg) { | 657 | tmp = ctx->sg; |
| 648 | vaddr = kmap_atomic(sg_page(ctx->sg)); | ||
| 649 | vaddr += ctx->sg->offset; | ||
| 650 | 658 | ||
| 651 | count = omap_sham_append_buffer(ctx, | 659 | ctx->sg_len = 0; |
| 652 | vaddr + ctx->offset, | ||
| 653 | ctx->sg->length - ctx->offset); | ||
| 654 | 660 | ||
| 655 | kunmap_atomic((void *)vaddr); | 661 | if (ctx->bufcnt) { |
| 662 | sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); | ||
| 663 | tmp = sg_next(tmp); | ||
| 664 | ctx->sg_len++; | ||
| 665 | } | ||
| 656 | 666 | ||
| 657 | if (!count) | 667 | while (sg && new_len) { |
| 658 | break; | 668 | int len = sg->length - offset; |
| 659 | ctx->offset += count; | 669 | |
| 660 | ctx->total -= count; | 670 | if (offset) { |
| 661 | if (ctx->offset == ctx->sg->length) { | 671 | offset -= sg->length; |
| 662 | ctx->sg = sg_next(ctx->sg); | 672 | if (offset < 0) |
| 663 | if (ctx->sg) | 673 | offset = 0; |
| 664 | ctx->offset = 0; | 674 | } |
| 665 | else | 675 | |
| 666 | ctx->total = 0; | 676 | if (new_len < len) |
| 677 | len = new_len; | ||
| 678 | |||
| 679 | if (len > 0) { | ||
| 680 | new_len -= len; | ||
| 681 | sg_set_page(tmp, sg_page(sg), len, sg->offset); | ||
| 682 | if (new_len <= 0) | ||
| 683 | sg_mark_end(tmp); | ||
| 684 | tmp = sg_next(tmp); | ||
| 685 | ctx->sg_len++; | ||
| 667 | } | 686 | } |
| 687 | |||
| 688 | sg = sg_next(sg); | ||
| 668 | } | 689 | } |
| 669 | 690 | ||
| 691 | set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags); | ||
| 692 | |||
| 693 | ctx->bufcnt = 0; | ||
| 694 | |||
| 670 | return 0; | 695 | return 0; |
| 671 | } | 696 | } |
| 672 | 697 | ||
| 673 | static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, | 698 | static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx, |
| 674 | struct omap_sham_reqctx *ctx, | 699 | struct scatterlist *sg, int bs, int new_len) |
| 675 | size_t length, int final) | ||
| 676 | { | 700 | { |
| 677 | int ret; | 701 | int pages; |
| 702 | void *buf; | ||
| 703 | int len; | ||
| 678 | 704 | ||
| 679 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, | 705 | len = new_len + ctx->bufcnt; |
| 680 | DMA_TO_DEVICE); | 706 | |
| 681 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | 707 | pages = get_order(ctx->total); |
| 682 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); | 708 | |
| 683 | return -EINVAL; | 709 | buf = (void *)__get_free_pages(GFP_ATOMIC, pages); |
| 710 | if (!buf) { | ||
| 711 | pr_err("Couldn't allocate pages for unaligned cases.\n"); | ||
| 712 | return -ENOMEM; | ||
| 684 | } | 713 | } |
| 685 | 714 | ||
| 686 | ctx->flags &= ~BIT(FLAGS_SG); | 715 | if (ctx->bufcnt) |
| 716 | memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); | ||
| 687 | 717 | ||
| 688 | ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0); | 718 | scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset, |
| 689 | if (ret != -EINPROGRESS) | 719 | ctx->total - ctx->bufcnt, 0); |
| 690 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, | 720 | sg_init_table(ctx->sgl, 1); |
| 691 | DMA_TO_DEVICE); | 721 | sg_set_buf(ctx->sgl, buf, len); |
| 722 | ctx->sg = ctx->sgl; | ||
| 723 | set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags); | ||
| 724 | ctx->sg_len = 1; | ||
| 725 | ctx->bufcnt = 0; | ||
| 726 | ctx->offset = 0; | ||
| 692 | 727 | ||
| 693 | return ret; | 728 | return 0; |
| 694 | } | 729 | } |
| 695 | 730 | ||
| 696 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | 731 | static int omap_sham_align_sgs(struct scatterlist *sg, |
| 732 | int nbytes, int bs, bool final, | ||
| 733 | struct omap_sham_reqctx *rctx) | ||
| 697 | { | 734 | { |
| 698 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 735 | int n = 0; |
| 699 | unsigned int final; | 736 | bool aligned = true; |
| 700 | size_t count; | 737 | bool list_ok = true; |
| 738 | struct scatterlist *sg_tmp = sg; | ||
| 739 | int new_len; | ||
| 740 | int offset = rctx->offset; | ||
| 701 | 741 | ||
| 702 | omap_sham_append_sg(ctx); | 742 | if (!sg || !sg->length || !nbytes) |
| 743 | return 0; | ||
| 744 | |||
| 745 | new_len = nbytes; | ||
| 703 | 746 | ||
| 704 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; | 747 | if (offset) |
| 748 | list_ok = false; | ||
| 705 | 749 | ||
| 706 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | 750 | if (final) |
| 707 | ctx->bufcnt, ctx->digcnt, final); | 751 | new_len = DIV_ROUND_UP(new_len, bs) * bs; |
| 752 | else | ||
| 753 | new_len = new_len / bs * bs; | ||
| 708 | 754 | ||
| 709 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | 755 | while (nbytes > 0 && sg_tmp) { |
| 710 | count = ctx->bufcnt; | 756 | n++; |
| 711 | ctx->bufcnt = 0; | 757 | |
| 712 | return omap_sham_xmit_dma_map(dd, ctx, count, final); | 758 | if (offset < sg_tmp->length) { |
| 759 | if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { | ||
| 760 | aligned = false; | ||
| 761 | break; | ||
| 762 | } | ||
| 763 | |||
| 764 | if (!IS_ALIGNED(sg_tmp->length - offset, bs)) { | ||
| 765 | aligned = false; | ||
| 766 | break; | ||
| 767 | } | ||
| 768 | } | ||
| 769 | |||
| 770 | if (offset) { | ||
| 771 | offset -= sg_tmp->length; | ||
| 772 | if (offset < 0) { | ||
| 773 | nbytes += offset; | ||
| 774 | offset = 0; | ||
| 775 | } | ||
| 776 | } else { | ||
| 777 | nbytes -= sg_tmp->length; | ||
| 778 | } | ||
| 779 | |||
| 780 | sg_tmp = sg_next(sg_tmp); | ||
| 781 | |||
| 782 | if (nbytes < 0) { | ||
| 783 | list_ok = false; | ||
| 784 | break; | ||
| 785 | } | ||
| 713 | } | 786 | } |
| 714 | 787 | ||
| 788 | if (!aligned) | ||
| 789 | return omap_sham_copy_sgs(rctx, sg, bs, new_len); | ||
| 790 | else if (!list_ok) | ||
| 791 | return omap_sham_copy_sg_lists(rctx, sg, bs, new_len); | ||
| 792 | |||
| 793 | rctx->sg_len = n; | ||
| 794 | rctx->sg = sg; | ||
| 795 | |||
| 715 | return 0; | 796 | return 0; |
| 716 | } | 797 | } |
| 717 | 798 | ||
| 718 | /* Start address alignment */ | 799 | static int omap_sham_prepare_request(struct ahash_request *req, bool update) |
| 719 | #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) | ||
| 720 | /* SHA1 block size alignment */ | ||
| 721 | #define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs)) | ||
| 722 | |||
| 723 | static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | ||
| 724 | { | 800 | { |
| 725 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 801 | struct omap_sham_reqctx *rctx = ahash_request_ctx(req); |
| 726 | unsigned int length, final, tail; | 802 | int bs; |
| 727 | struct scatterlist *sg; | 803 | int ret; |
| 728 | int ret, bs; | 804 | int nbytes; |
| 805 | bool final = rctx->flags & BIT(FLAGS_FINUP); | ||
| 806 | int xmit_len, hash_later; | ||
| 729 | 807 | ||
| 730 | if (!ctx->total) | 808 | if (!req) |
| 731 | return 0; | 809 | return 0; |
| 732 | 810 | ||
| 733 | if (ctx->bufcnt || ctx->offset) | 811 | bs = get_block_size(rctx); |
| 734 | return omap_sham_update_dma_slow(dd); | ||
| 735 | |||
| 736 | /* | ||
| 737 | * Don't use the sg interface when the transfer size is less | ||
| 738 | * than the number of elements in a DMA frame. Otherwise, | ||
| 739 | * the dmaengine infrastructure will calculate that it needs | ||
| 740 | * to transfer 0 frames which ultimately fails. | ||
| 741 | */ | ||
| 742 | if (ctx->total < get_block_size(ctx)) | ||
| 743 | return omap_sham_update_dma_slow(dd); | ||
| 744 | |||
| 745 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | ||
| 746 | ctx->digcnt, ctx->bufcnt, ctx->total); | ||
| 747 | 812 | ||
| 748 | sg = ctx->sg; | 813 | if (update) |
| 749 | bs = get_block_size(ctx); | 814 | nbytes = req->nbytes; |
| 815 | else | ||
| 816 | nbytes = 0; | ||
| 750 | 817 | ||
| 751 | if (!SG_AA(sg)) | 818 | rctx->total = nbytes + rctx->bufcnt; |
| 752 | return omap_sham_update_dma_slow(dd); | ||
| 753 | 819 | ||
| 754 | if (!sg_is_last(sg) && !SG_SA(sg, bs)) | 820 | if (!rctx->total) |
| 755 | /* size is not BLOCK_SIZE aligned */ | 821 | return 0; |
| 756 | return omap_sham_update_dma_slow(dd); | ||
| 757 | 822 | ||
| 758 | length = min(ctx->total, sg->length); | 823 | if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) { |
| 824 | int len = bs - rctx->bufcnt % bs; | ||
| 759 | 825 | ||
| 760 | if (sg_is_last(sg)) { | 826 | if (len > nbytes) |
| 761 | if (!(ctx->flags & BIT(FLAGS_FINUP))) { | 827 | len = nbytes; |
| 762 | /* not last sg must be BLOCK_SIZE aligned */ | 828 | scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src, |
| 763 | tail = length & (bs - 1); | 829 | 0, len, 0); |
| 764 | /* without finup() we need one block to close hash */ | 830 | rctx->bufcnt += len; |
| 765 | if (!tail) | 831 | nbytes -= len; |
| 766 | tail = bs; | 832 | rctx->offset = len; |
| 767 | length -= tail; | ||
| 768 | } | ||
| 769 | } | 833 | } |
| 770 | 834 | ||
| 771 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | 835 | if (rctx->bufcnt) |
| 772 | dev_err(dd->dev, "dma_map_sg error\n"); | 836 | memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt); |
| 773 | return -EINVAL; | ||
| 774 | } | ||
| 775 | 837 | ||
| 776 | ctx->flags |= BIT(FLAGS_SG); | 838 | ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx); |
| 839 | if (ret) | ||
| 840 | return ret; | ||
| 777 | 841 | ||
| 778 | ctx->total -= length; | 842 | xmit_len = rctx->total; |
| 779 | ctx->offset = length; /* offset where to start slow */ | ||
| 780 | 843 | ||
| 781 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; | 844 | if (!IS_ALIGNED(xmit_len, bs)) { |
| 845 | if (final) | ||
| 846 | xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs; | ||
| 847 | else | ||
| 848 | xmit_len = xmit_len / bs * bs; | ||
| 849 | } | ||
| 782 | 850 | ||
| 783 | ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1); | 851 | hash_later = rctx->total - xmit_len; |
| 784 | if (ret != -EINPROGRESS) | 852 | if (hash_later < 0) |
| 785 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | 853 | hash_later = 0; |
| 786 | 854 | ||
| 787 | return ret; | 855 | if (rctx->bufcnt && nbytes) { |
| 788 | } | 856 | /* have data from previous operation and current */ |
| 857 | sg_init_table(rctx->sgl, 2); | ||
| 858 | sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt); | ||
| 789 | 859 | ||
| 790 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) | 860 | sg_chain(rctx->sgl, 2, req->src); |
| 791 | { | ||
| 792 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
| 793 | int bufcnt, final; | ||
| 794 | 861 | ||
| 795 | if (!ctx->total) | 862 | rctx->sg = rctx->sgl; |
| 796 | return 0; | ||
| 797 | 863 | ||
| 798 | omap_sham_append_sg(ctx); | 864 | rctx->sg_len++; |
| 865 | } else if (rctx->bufcnt) { | ||
| 866 | /* have buffered data only */ | ||
| 867 | sg_init_table(rctx->sgl, 1); | ||
| 868 | sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len); | ||
| 799 | 869 | ||
| 800 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; | 870 | rctx->sg = rctx->sgl; |
| 801 | 871 | ||
| 802 | dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n", | 872 | rctx->sg_len = 1; |
| 803 | ctx->bufcnt, ctx->digcnt, final); | 873 | } |
| 804 | 874 | ||
| 805 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | 875 | if (hash_later) { |
| 806 | bufcnt = ctx->bufcnt; | 876 | if (req->nbytes) { |
| 807 | ctx->bufcnt = 0; | 877 | scatterwalk_map_and_copy(rctx->buffer, req->src, |
| 808 | return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final); | 878 | req->nbytes - hash_later, |
| 879 | hash_later, 0); | ||
| 880 | } else { | ||
| 881 | memcpy(rctx->buffer, rctx->buffer + xmit_len, | ||
| 882 | hash_later); | ||
| 883 | } | ||
| 884 | rctx->bufcnt = hash_later; | ||
| 885 | } else { | ||
| 886 | rctx->bufcnt = 0; | ||
| 809 | } | 887 | } |
| 810 | 888 | ||
| 889 | if (!final) | ||
| 890 | rctx->total = xmit_len; | ||
| 891 | |||
| 811 | return 0; | 892 | return 0; |
| 812 | } | 893 | } |
| 813 | 894 | ||
| @@ -815,18 +896,9 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | |||
| 815 | { | 896 | { |
| 816 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 897 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
| 817 | 898 | ||
| 899 | dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); | ||
| 818 | 900 | ||
| 819 | if (ctx->flags & BIT(FLAGS_SG)) { | 901 | clear_bit(FLAGS_DMA_ACTIVE, &dd->flags); |
| 820 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | ||
| 821 | if (ctx->sg->length == ctx->offset) { | ||
| 822 | ctx->sg = sg_next(ctx->sg); | ||
| 823 | if (ctx->sg) | ||
| 824 | ctx->offset = 0; | ||
| 825 | } | ||
| 826 | } else { | ||
| 827 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, | ||
| 828 | DMA_TO_DEVICE); | ||
| 829 | } | ||
| 830 | 902 | ||
| 831 | return 0; | 903 | return 0; |
| 832 | } | 904 | } |
| @@ -887,6 +959,8 @@ static int omap_sham_init(struct ahash_request *req) | |||
| 887 | 959 | ||
| 888 | ctx->bufcnt = 0; | 960 | ctx->bufcnt = 0; |
| 889 | ctx->digcnt = 0; | 961 | ctx->digcnt = 0; |
| 962 | ctx->total = 0; | ||
| 963 | ctx->offset = 0; | ||
| 890 | ctx->buflen = BUFLEN; | 964 | ctx->buflen = BUFLEN; |
| 891 | 965 | ||
| 892 | if (tctx->flags & BIT(FLAGS_HMAC)) { | 966 | if (tctx->flags & BIT(FLAGS_HMAC)) { |
| @@ -909,14 +983,19 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) | |||
| 909 | struct ahash_request *req = dd->req; | 983 | struct ahash_request *req = dd->req; |
| 910 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 984 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
| 911 | int err; | 985 | int err; |
| 986 | bool final = ctx->flags & BIT(FLAGS_FINUP); | ||
| 912 | 987 | ||
| 913 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | 988 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", |
| 914 | ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); | 989 | ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); |
| 915 | 990 | ||
| 991 | if (ctx->total < get_block_size(ctx) || | ||
| 992 | ctx->total < OMAP_SHA_DMA_THRESHOLD) | ||
| 993 | ctx->flags |= BIT(FLAGS_CPU); | ||
| 994 | |||
| 916 | if (ctx->flags & BIT(FLAGS_CPU)) | 995 | if (ctx->flags & BIT(FLAGS_CPU)) |
| 917 | err = omap_sham_update_cpu(dd); | 996 | err = omap_sham_xmit_cpu(dd, ctx->total, final); |
| 918 | else | 997 | else |
| 919 | err = omap_sham_update_dma_start(dd); | 998 | err = omap_sham_xmit_dma(dd, ctx->total, final); |
| 920 | 999 | ||
| 921 | /* wait for dma completion before can take more data */ | 1000 | /* wait for dma completion before can take more data */ |
| 922 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); | 1001 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); |
| @@ -930,7 +1009,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) | |||
| 930 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 1009 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
| 931 | int err = 0, use_dma = 1; | 1010 | int err = 0, use_dma = 1; |
| 932 | 1011 | ||
| 933 | if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode) | 1012 | if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode) |
| 934 | /* | 1013 | /* |
| 935 | * faster to handle last block with cpu or | 1014 | * faster to handle last block with cpu or |
| 936 | * use cpu when dma is not present. | 1015 | * use cpu when dma is not present. |
| @@ -938,9 +1017,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) | |||
| 938 | use_dma = 0; | 1017 | use_dma = 0; |
| 939 | 1018 | ||
| 940 | if (use_dma) | 1019 | if (use_dma) |
| 941 | err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); | 1020 | err = omap_sham_xmit_dma(dd, ctx->total, 1); |
| 942 | else | 1021 | else |
| 943 | err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); | 1022 | err = omap_sham_xmit_cpu(dd, ctx->total, 1); |
| 944 | 1023 | ||
| 945 | ctx->bufcnt = 0; | 1024 | ctx->bufcnt = 0; |
| 946 | 1025 | ||
| @@ -988,6 +1067,17 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) | |||
| 988 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 1067 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
| 989 | struct omap_sham_dev *dd = ctx->dd; | 1068 | struct omap_sham_dev *dd = ctx->dd; |
| 990 | 1069 | ||
| 1070 | if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) | ||
| 1071 | free_pages((unsigned long)sg_virt(ctx->sg), | ||
| 1072 | get_order(ctx->sg->length)); | ||
| 1073 | |||
| 1074 | if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) | ||
| 1075 | kfree(ctx->sg); | ||
| 1076 | |||
| 1077 | ctx->sg = NULL; | ||
| 1078 | |||
| 1079 | dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED)); | ||
| 1080 | |||
| 991 | if (!err) { | 1081 | if (!err) { |
| 992 | dd->pdata->copy_hash(req, 1); | 1082 | dd->pdata->copy_hash(req, 1); |
| 993 | if (test_bit(FLAGS_FINAL, &dd->flags)) | 1083 | if (test_bit(FLAGS_FINAL, &dd->flags)) |
| @@ -1005,9 +1095,6 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) | |||
| 1005 | 1095 | ||
| 1006 | if (req->base.complete) | 1096 | if (req->base.complete) |
| 1007 | req->base.complete(&req->base, err); | 1097 | req->base.complete(&req->base, err); |
| 1008 | |||
| 1009 | /* handle new request */ | ||
| 1010 | tasklet_schedule(&dd->done_task); | ||
| 1011 | } | 1098 | } |
| 1012 | 1099 | ||
| 1013 | static int omap_sham_handle_queue(struct omap_sham_dev *dd, | 1100 | static int omap_sham_handle_queue(struct omap_sham_dev *dd, |
| @@ -1018,6 +1105,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
| 1018 | unsigned long flags; | 1105 | unsigned long flags; |
| 1019 | int err = 0, ret = 0; | 1106 | int err = 0, ret = 0; |
| 1020 | 1107 | ||
| 1108 | retry: | ||
| 1021 | spin_lock_irqsave(&dd->lock, flags); | 1109 | spin_lock_irqsave(&dd->lock, flags); |
| 1022 | if (req) | 1110 | if (req) |
| 1023 | ret = ahash_enqueue_request(&dd->queue, req); | 1111 | ret = ahash_enqueue_request(&dd->queue, req); |
| @@ -1041,6 +1129,10 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
| 1041 | dd->req = req; | 1129 | dd->req = req; |
| 1042 | ctx = ahash_request_ctx(req); | 1130 | ctx = ahash_request_ctx(req); |
| 1043 | 1131 | ||
| 1132 | err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE); | ||
| 1133 | if (err) | ||
| 1134 | goto err1; | ||
| 1135 | |||
| 1044 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | 1136 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", |
| 1045 | ctx->op, req->nbytes); | 1137 | ctx->op, req->nbytes); |
| 1046 | 1138 | ||
| @@ -1061,11 +1153,19 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
| 1061 | err = omap_sham_final_req(dd); | 1153 | err = omap_sham_final_req(dd); |
| 1062 | } | 1154 | } |
| 1063 | err1: | 1155 | err1: |
| 1064 | if (err != -EINPROGRESS) | 1156 | dev_dbg(dd->dev, "exit, err: %d\n", err); |
| 1157 | |||
| 1158 | if (err != -EINPROGRESS) { | ||
| 1065 | /* done_task will not finish it, so do it here */ | 1159 | /* done_task will not finish it, so do it here */ |
| 1066 | omap_sham_finish_req(req, err); | 1160 | omap_sham_finish_req(req, err); |
| 1161 | req = NULL; | ||
| 1067 | 1162 | ||
| 1068 | dev_dbg(dd->dev, "exit, err: %d\n", err); | 1163 | /* |
| 1164 | * Execute next request immediately if there is anything | ||
| 1165 | * in queue. | ||
| 1166 | */ | ||
| 1167 | goto retry; | ||
| 1168 | } | ||
| 1069 | 1169 | ||
| 1070 | return ret; | 1170 | return ret; |
| 1071 | } | 1171 | } |
| @@ -1085,34 +1185,15 @@ static int omap_sham_update(struct ahash_request *req) | |||
| 1085 | { | 1185 | { |
| 1086 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 1186 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
| 1087 | struct omap_sham_dev *dd = ctx->dd; | 1187 | struct omap_sham_dev *dd = ctx->dd; |
| 1088 | int bs = get_block_size(ctx); | ||
| 1089 | 1188 | ||
| 1090 | if (!req->nbytes) | 1189 | if (!req->nbytes) |
| 1091 | return 0; | 1190 | return 0; |
| 1092 | 1191 | ||
| 1093 | ctx->total = req->nbytes; | 1192 | if (ctx->total + req->nbytes < ctx->buflen) { |
| 1094 | ctx->sg = req->src; | 1193 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, |
| 1095 | ctx->offset = 0; | 1194 | 0, req->nbytes, 0); |
| 1096 | 1195 | ctx->bufcnt += req->nbytes; | |
| 1097 | if (ctx->flags & BIT(FLAGS_FINUP)) { | 1196 | ctx->total += req->nbytes; |
| 1098 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) { | ||
| 1099 | /* | ||
| 1100 | * OMAP HW accel works only with buffers >= 9 | ||
| 1101 | * will switch to bypass in final() | ||
| 1102 | * final has the same request and data | ||
| 1103 | */ | ||
| 1104 | omap_sham_append_sg(ctx); | ||
| 1105 | return 0; | ||
| 1106 | } else if ((ctx->bufcnt + ctx->total <= bs) || | ||
| 1107 | dd->polling_mode) { | ||
| 1108 | /* | ||
| 1109 | * faster to use CPU for short transfers or | ||
| 1110 | * use cpu when dma is not present. | ||
| 1111 | */ | ||
| 1112 | ctx->flags |= BIT(FLAGS_CPU); | ||
| 1113 | } | ||
| 1114 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | ||
| 1115 | omap_sham_append_sg(ctx); | ||
| 1116 | return 0; | 1197 | return 0; |
| 1117 | } | 1198 | } |
| 1118 | 1199 | ||
| @@ -1137,9 +1218,20 @@ static int omap_sham_final_shash(struct ahash_request *req) | |||
| 1137 | { | 1218 | { |
| 1138 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | 1219 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); |
| 1139 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 1220 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
| 1221 | int offset = 0; | ||
| 1222 | |||
| 1223 | /* | ||
| 1224 | * If we are running HMAC on limited hardware support, skip | ||
| 1225 | * the ipad in the beginning of the buffer if we are going for | ||
| 1226 | * software fallback algorithm. | ||
| 1227 | */ | ||
| 1228 | if (test_bit(FLAGS_HMAC, &ctx->flags) && | ||
| 1229 | !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags)) | ||
| 1230 | offset = get_block_size(ctx); | ||
| 1140 | 1231 | ||
| 1141 | return omap_sham_shash_digest(tctx->fallback, req->base.flags, | 1232 | return omap_sham_shash_digest(tctx->fallback, req->base.flags, |
| 1142 | ctx->buffer, ctx->bufcnt, req->result); | 1233 | ctx->buffer + offset, |
| 1234 | ctx->bufcnt - offset, req->result); | ||
| 1143 | } | 1235 | } |
| 1144 | 1236 | ||
| 1145 | static int omap_sham_final(struct ahash_request *req) | 1237 | static int omap_sham_final(struct ahash_request *req) |
| @@ -1154,10 +1246,11 @@ static int omap_sham_final(struct ahash_request *req) | |||
| 1154 | /* | 1246 | /* |
| 1155 | * OMAP HW accel works only with buffers >= 9. | 1247 | * OMAP HW accel works only with buffers >= 9. |
| 1156 | * HMAC is always >= 9 because ipad == block size. | 1248 | * HMAC is always >= 9 because ipad == block size. |
| 1157 | * If buffersize is less than 240, we use fallback SW encoding, | 1249 | * If buffersize is less than DMA_THRESHOLD, we use fallback |
| 1158 | * as using DMA + HW in this case doesn't provide any benefit. | 1250 | * SW encoding, as using DMA + HW in this case doesn't provide |
| 1251 | * any benefit. | ||
| 1159 | */ | 1252 | */ |
| 1160 | if ((ctx->digcnt + ctx->bufcnt) < 240) | 1253 | if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD) |
| 1161 | return omap_sham_final_shash(req); | 1254 | return omap_sham_final_shash(req); |
| 1162 | else if (ctx->bufcnt) | 1255 | else if (ctx->bufcnt) |
| 1163 | return omap_sham_enqueue(req, OP_FINAL); | 1256 | return omap_sham_enqueue(req, OP_FINAL); |
| @@ -1323,6 +1416,25 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm) | |||
| 1323 | } | 1416 | } |
| 1324 | } | 1417 | } |
| 1325 | 1418 | ||
| 1419 | static int omap_sham_export(struct ahash_request *req, void *out) | ||
| 1420 | { | ||
| 1421 | struct omap_sham_reqctx *rctx = ahash_request_ctx(req); | ||
| 1422 | |||
| 1423 | memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt); | ||
| 1424 | |||
| 1425 | return 0; | ||
| 1426 | } | ||
| 1427 | |||
| 1428 | static int omap_sham_import(struct ahash_request *req, const void *in) | ||
| 1429 | { | ||
| 1430 | struct omap_sham_reqctx *rctx = ahash_request_ctx(req); | ||
| 1431 | const struct omap_sham_reqctx *ctx_in = in; | ||
| 1432 | |||
| 1433 | memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt); | ||
| 1434 | |||
| 1435 | return 0; | ||
| 1436 | } | ||
| 1437 | |||
| 1326 | static struct ahash_alg algs_sha1_md5[] = { | 1438 | static struct ahash_alg algs_sha1_md5[] = { |
| 1327 | { | 1439 | { |
| 1328 | .init = omap_sham_init, | 1440 | .init = omap_sham_init, |
| @@ -1341,7 +1453,7 @@ static struct ahash_alg algs_sha1_md5[] = { | |||
| 1341 | CRYPTO_ALG_NEED_FALLBACK, | 1453 | CRYPTO_ALG_NEED_FALLBACK, |
| 1342 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1454 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 1343 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | 1455 | .cra_ctxsize = sizeof(struct omap_sham_ctx), |
| 1344 | .cra_alignmask = 0, | 1456 | .cra_alignmask = OMAP_ALIGN_MASK, |
| 1345 | .cra_module = THIS_MODULE, | 1457 | .cra_module = THIS_MODULE, |
| 1346 | .cra_init = omap_sham_cra_init, | 1458 | .cra_init = omap_sham_cra_init, |
| 1347 | .cra_exit = omap_sham_cra_exit, | 1459 | .cra_exit = omap_sham_cra_exit, |
| @@ -1440,7 +1552,7 @@ static struct ahash_alg algs_sha224_sha256[] = { | |||
| 1440 | CRYPTO_ALG_NEED_FALLBACK, | 1552 | CRYPTO_ALG_NEED_FALLBACK, |
| 1441 | .cra_blocksize = SHA224_BLOCK_SIZE, | 1553 | .cra_blocksize = SHA224_BLOCK_SIZE, |
| 1442 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | 1554 | .cra_ctxsize = sizeof(struct omap_sham_ctx), |
| 1443 | .cra_alignmask = 0, | 1555 | .cra_alignmask = OMAP_ALIGN_MASK, |
| 1444 | .cra_module = THIS_MODULE, | 1556 | .cra_module = THIS_MODULE, |
| 1445 | .cra_init = omap_sham_cra_init, | 1557 | .cra_init = omap_sham_cra_init, |
| 1446 | .cra_exit = omap_sham_cra_exit, | 1558 | .cra_exit = omap_sham_cra_exit, |
| @@ -1462,7 +1574,7 @@ static struct ahash_alg algs_sha224_sha256[] = { | |||
| 1462 | CRYPTO_ALG_NEED_FALLBACK, | 1574 | CRYPTO_ALG_NEED_FALLBACK, |
| 1463 | .cra_blocksize = SHA256_BLOCK_SIZE, | 1575 | .cra_blocksize = SHA256_BLOCK_SIZE, |
| 1464 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | 1576 | .cra_ctxsize = sizeof(struct omap_sham_ctx), |
| 1465 | .cra_alignmask = 0, | 1577 | .cra_alignmask = OMAP_ALIGN_MASK, |
| 1466 | .cra_module = THIS_MODULE, | 1578 | .cra_module = THIS_MODULE, |
| 1467 | .cra_init = omap_sham_cra_init, | 1579 | .cra_init = omap_sham_cra_init, |
| 1468 | .cra_exit = omap_sham_cra_exit, | 1580 | .cra_exit = omap_sham_cra_exit, |
| @@ -1535,7 +1647,7 @@ static struct ahash_alg algs_sha384_sha512[] = { | |||
| 1535 | CRYPTO_ALG_NEED_FALLBACK, | 1647 | CRYPTO_ALG_NEED_FALLBACK, |
| 1536 | .cra_blocksize = SHA384_BLOCK_SIZE, | 1648 | .cra_blocksize = SHA384_BLOCK_SIZE, |
| 1537 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | 1649 | .cra_ctxsize = sizeof(struct omap_sham_ctx), |
| 1538 | .cra_alignmask = 0, | 1650 | .cra_alignmask = OMAP_ALIGN_MASK, |
| 1539 | .cra_module = THIS_MODULE, | 1651 | .cra_module = THIS_MODULE, |
| 1540 | .cra_init = omap_sham_cra_init, | 1652 | .cra_init = omap_sham_cra_init, |
| 1541 | .cra_exit = omap_sham_cra_exit, | 1653 | .cra_exit = omap_sham_cra_exit, |
| @@ -1557,7 +1669,7 @@ static struct ahash_alg algs_sha384_sha512[] = { | |||
| 1557 | CRYPTO_ALG_NEED_FALLBACK, | 1669 | CRYPTO_ALG_NEED_FALLBACK, |
| 1558 | .cra_blocksize = SHA512_BLOCK_SIZE, | 1670 | .cra_blocksize = SHA512_BLOCK_SIZE, |
| 1559 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | 1671 | .cra_ctxsize = sizeof(struct omap_sham_ctx), |
| 1560 | .cra_alignmask = 0, | 1672 | .cra_alignmask = OMAP_ALIGN_MASK, |
| 1561 | .cra_module = THIS_MODULE, | 1673 | .cra_module = THIS_MODULE, |
| 1562 | .cra_init = omap_sham_cra_init, | 1674 | .cra_init = omap_sham_cra_init, |
| 1563 | .cra_exit = omap_sham_cra_exit, | 1675 | .cra_exit = omap_sham_cra_exit, |
| @@ -1624,12 +1736,8 @@ static void omap_sham_done_task(unsigned long data) | |||
| 1624 | } | 1736 | } |
| 1625 | 1737 | ||
| 1626 | if (test_bit(FLAGS_CPU, &dd->flags)) { | 1738 | if (test_bit(FLAGS_CPU, &dd->flags)) { |
| 1627 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { | 1739 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) |
| 1628 | /* hash or semi-hash ready */ | 1740 | goto finish; |
| 1629 | err = omap_sham_update_cpu(dd); | ||
| 1630 | if (err != -EINPROGRESS) | ||
| 1631 | goto finish; | ||
| 1632 | } | ||
| 1633 | } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { | 1741 | } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { |
| 1634 | if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { | 1742 | if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { |
| 1635 | omap_sham_update_dma_stop(dd); | 1743 | omap_sham_update_dma_stop(dd); |
| @@ -1641,8 +1749,6 @@ static void omap_sham_done_task(unsigned long data) | |||
| 1641 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { | 1749 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { |
| 1642 | /* hash or semi-hash ready */ | 1750 | /* hash or semi-hash ready */ |
| 1643 | clear_bit(FLAGS_DMA_READY, &dd->flags); | 1751 | clear_bit(FLAGS_DMA_READY, &dd->flags); |
| 1644 | err = omap_sham_update_dma_start(dd); | ||
| 1645 | if (err != -EINPROGRESS) | ||
| 1646 | goto finish; | 1752 | goto finish; |
| 1647 | } | 1753 | } |
| 1648 | } | 1754 | } |
| @@ -1653,6 +1759,10 @@ finish: | |||
| 1653 | dev_dbg(dd->dev, "update done: err: %d\n", err); | 1759 | dev_dbg(dd->dev, "update done: err: %d\n", err); |
| 1654 | /* finish curent request */ | 1760 | /* finish curent request */ |
| 1655 | omap_sham_finish_req(dd->req, err); | 1761 | omap_sham_finish_req(dd->req, err); |
| 1762 | |||
| 1763 | /* If we are not busy, process next req */ | ||
| 1764 | if (!test_bit(FLAGS_BUSY, &dd->flags)) | ||
| 1765 | omap_sham_handle_queue(dd, NULL); | ||
| 1656 | } | 1766 | } |
| 1657 | 1767 | ||
| 1658 | static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) | 1768 | static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) |
| @@ -1977,8 +2087,14 @@ static int omap_sham_probe(struct platform_device *pdev) | |||
| 1977 | 2087 | ||
| 1978 | for (i = 0; i < dd->pdata->algs_info_size; i++) { | 2088 | for (i = 0; i < dd->pdata->algs_info_size; i++) { |
| 1979 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { | 2089 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { |
| 1980 | err = crypto_register_ahash( | 2090 | struct ahash_alg *alg; |
| 1981 | &dd->pdata->algs_info[i].algs_list[j]); | 2091 | |
| 2092 | alg = &dd->pdata->algs_info[i].algs_list[j]; | ||
| 2093 | alg->export = omap_sham_export; | ||
| 2094 | alg->import = omap_sham_import; | ||
| 2095 | alg->halg.statesize = sizeof(struct omap_sham_reqctx) + | ||
| 2096 | BUFLEN; | ||
| 2097 | err = crypto_register_ahash(alg); | ||
| 1982 | if (err) | 2098 | if (err) |
| 1983 | goto err_algs; | 2099 | goto err_algs; |
| 1984 | 2100 | ||
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h index 2f2681d3458a..afc9a0a86747 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h | |||
| @@ -55,7 +55,7 @@ | |||
| 55 | #define ADF_C3XXX_MAX_ACCELERATORS 3 | 55 | #define ADF_C3XXX_MAX_ACCELERATORS 3 |
| 56 | #define ADF_C3XXX_MAX_ACCELENGINES 6 | 56 | #define ADF_C3XXX_MAX_ACCELENGINES 6 |
| 57 | #define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16 | 57 | #define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16 |
| 58 | #define ADF_C3XXX_ACCELERATORS_MASK 0x3 | 58 | #define ADF_C3XXX_ACCELERATORS_MASK 0x7 |
| 59 | #define ADF_C3XXX_ACCELENGINES_MASK 0x3F | 59 | #define ADF_C3XXX_ACCELENGINES_MASK 0x3F |
| 60 | #define ADF_C3XXX_ETR_MAX_BANKS 16 | 60 | #define ADF_C3XXX_ETR_MAX_BANKS 16 |
| 61 | #define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) | 61 | #define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) |
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index ce7c4626c983..3744b22f0c46 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c | |||
| @@ -146,6 +146,7 @@ struct adf_admin_comms { | |||
| 146 | dma_addr_t phy_addr; | 146 | dma_addr_t phy_addr; |
| 147 | dma_addr_t const_tbl_addr; | 147 | dma_addr_t const_tbl_addr; |
| 148 | void *virt_addr; | 148 | void *virt_addr; |
| 149 | void *virt_tbl_addr; | ||
| 149 | void __iomem *mailbox_addr; | 150 | void __iomem *mailbox_addr; |
| 150 | struct mutex lock; /* protects adf_admin_comms struct */ | 151 | struct mutex lock; /* protects adf_admin_comms struct */ |
| 151 | }; | 152 | }; |
| @@ -251,17 +252,19 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | |||
| 251 | return -ENOMEM; | 252 | return -ENOMEM; |
| 252 | } | 253 | } |
| 253 | 254 | ||
| 254 | admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev), | 255 | admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), |
| 255 | (void *) const_tab, 1024, | 256 | PAGE_SIZE, |
| 256 | DMA_TO_DEVICE); | 257 | &admin->const_tbl_addr, |
| 257 | 258 | GFP_KERNEL); | |
| 258 | if (unlikely(dma_mapping_error(&GET_DEV(accel_dev), | 259 | if (!admin->virt_tbl_addr) { |
| 259 | admin->const_tbl_addr))) { | 260 | dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); |
| 260 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | 261 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, |
| 261 | admin->virt_addr, admin->phy_addr); | 262 | admin->virt_addr, admin->phy_addr); |
| 262 | kfree(admin); | 263 | kfree(admin); |
| 263 | return -ENOMEM; | 264 | return -ENOMEM; |
| 264 | } | 265 | } |
| 266 | |||
| 267 | memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab)); | ||
| 265 | reg_val = (u64)admin->phy_addr; | 268 | reg_val = (u64)admin->phy_addr; |
| 266 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); | 269 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); |
| 267 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); | 270 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); |
| @@ -282,9 +285,10 @@ void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) | |||
| 282 | if (admin->virt_addr) | 285 | if (admin->virt_addr) |
| 283 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | 286 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, |
| 284 | admin->virt_addr, admin->phy_addr); | 287 | admin->virt_addr, admin->phy_addr); |
| 288 | if (admin->virt_tbl_addr) | ||
| 289 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
| 290 | admin->virt_tbl_addr, admin->const_tbl_addr); | ||
| 285 | 291 | ||
| 286 | dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024, | ||
| 287 | DMA_TO_DEVICE); | ||
| 288 | mutex_destroy(&admin->lock); | 292 | mutex_destroy(&admin->lock); |
| 289 | kfree(admin); | 293 | kfree(admin); |
| 290 | accel_dev->admin = NULL; | 294 | accel_dev->admin = NULL; |
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 9b961b37a282..e2454d90d949 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c | |||
| @@ -967,10 +967,6 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) | |||
| 967 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | 967 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; |
| 968 | unsigned int ae; | 968 | unsigned int ae; |
| 969 | 969 | ||
| 970 | obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), | ||
| 971 | GFP_KERNEL); | ||
| 972 | if (!obj_handle->uword_buf) | ||
| 973 | return -ENOMEM; | ||
| 974 | obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; | 970 | obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; |
| 975 | obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) | 971 | obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) |
| 976 | obj_handle->obj_hdr->file_buff; | 972 | obj_handle->obj_hdr->file_buff; |
| @@ -982,6 +978,10 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) | |||
| 982 | pr_err("QAT: UOF incompatible\n"); | 978 | pr_err("QAT: UOF incompatible\n"); |
| 983 | return -EINVAL; | 979 | return -EINVAL; |
| 984 | } | 980 | } |
| 981 | obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), | ||
| 982 | GFP_KERNEL); | ||
| 983 | if (!obj_handle->uword_buf) | ||
| 984 | return -ENOMEM; | ||
| 985 | obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; | 985 | obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; |
| 986 | if (!obj_handle->obj_hdr->file_buff || | 986 | if (!obj_handle->obj_hdr->file_buff || |
| 987 | !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, | 987 | !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, |
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index af508258d2ea..d0f80c6241f9 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c | |||
| @@ -304,11 +304,9 @@ static int rk_crypto_probe(struct platform_device *pdev) | |||
| 304 | usleep_range(10, 20); | 304 | usleep_range(10, 20); |
| 305 | reset_control_deassert(crypto_info->rst); | 305 | reset_control_deassert(crypto_info->rst); |
| 306 | 306 | ||
| 307 | err = devm_add_action(dev, rk_crypto_action, crypto_info); | 307 | err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); |
| 308 | if (err) { | 308 | if (err) |
| 309 | reset_control_assert(crypto_info->rst); | ||
| 310 | goto err_crypto; | 309 | goto err_crypto; |
| 311 | } | ||
| 312 | 310 | ||
| 313 | spin_lock_init(&crypto_info->lock); | 311 | spin_lock_init(&crypto_info->lock); |
| 314 | 312 | ||
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 3830d7c4e138..90efd10d57a1 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | |||
| @@ -29,7 +29,8 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | |||
| 29 | u32 tx_cnt = 0; | 29 | u32 tx_cnt = 0; |
| 30 | u32 spaces; | 30 | u32 spaces; |
| 31 | u32 v; | 31 | u32 v; |
| 32 | int i, err = 0; | 32 | int err = 0; |
| 33 | unsigned int i; | ||
| 33 | unsigned int ileft = areq->nbytes; | 34 | unsigned int ileft = areq->nbytes; |
| 34 | unsigned int oleft = areq->nbytes; | 35 | unsigned int oleft = areq->nbytes; |
| 35 | unsigned int todo; | 36 | unsigned int todo; |
| @@ -139,7 +140,8 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
| 139 | u32 tx_cnt = 0; | 140 | u32 tx_cnt = 0; |
| 140 | u32 v; | 141 | u32 v; |
| 141 | u32 spaces; | 142 | u32 spaces; |
| 142 | int i, err = 0; | 143 | int err = 0; |
| 144 | unsigned int i; | ||
| 143 | unsigned int ileft = areq->nbytes; | 145 | unsigned int ileft = areq->nbytes; |
| 144 | unsigned int oleft = areq->nbytes; | 146 | unsigned int oleft = areq->nbytes; |
| 145 | unsigned int todo; | 147 | unsigned int todo; |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 107cd2a41cae..3ac6c6c4ad18 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c | |||
| @@ -172,45 +172,45 @@ static struct sun4i_ss_alg_template ss_algs[] = { | |||
| 172 | }, | 172 | }, |
| 173 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 173 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 174 | .alg.crypto = { | 174 | .alg.crypto = { |
| 175 | .cra_name = "cbc(des3_ede)", | 175 | .cra_name = "cbc(des3_ede)", |
| 176 | .cra_driver_name = "cbc-des3-sun4i-ss", | 176 | .cra_driver_name = "cbc-des3-sun4i-ss", |
| 177 | .cra_priority = 300, | 177 | .cra_priority = 300, |
| 178 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 178 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 179 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | 179 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 180 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | 180 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), |
| 181 | .cra_module = THIS_MODULE, | 181 | .cra_module = THIS_MODULE, |
| 182 | .cra_alignmask = 3, | 182 | .cra_alignmask = 3, |
| 183 | .cra_type = &crypto_ablkcipher_type, | 183 | .cra_type = &crypto_ablkcipher_type, |
| 184 | .cra_init = sun4i_ss_cipher_init, | 184 | .cra_init = sun4i_ss_cipher_init, |
| 185 | .cra_u.ablkcipher = { | 185 | .cra_u.ablkcipher = { |
| 186 | .min_keysize = DES3_EDE_KEY_SIZE, | 186 | .min_keysize = DES3_EDE_KEY_SIZE, |
| 187 | .max_keysize = DES3_EDE_KEY_SIZE, | 187 | .max_keysize = DES3_EDE_KEY_SIZE, |
| 188 | .ivsize = DES3_EDE_BLOCK_SIZE, | 188 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 189 | .setkey = sun4i_ss_des3_setkey, | 189 | .setkey = sun4i_ss_des3_setkey, |
| 190 | .encrypt = sun4i_ss_cbc_des3_encrypt, | 190 | .encrypt = sun4i_ss_cbc_des3_encrypt, |
| 191 | .decrypt = sun4i_ss_cbc_des3_decrypt, | 191 | .decrypt = sun4i_ss_cbc_des3_decrypt, |
| 192 | } | 192 | } |
| 193 | } | 193 | } |
| 194 | }, | 194 | }, |
| 195 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 195 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 196 | .alg.crypto = { | 196 | .alg.crypto = { |
| 197 | .cra_name = "ecb(des3_ede)", | 197 | .cra_name = "ecb(des3_ede)", |
| 198 | .cra_driver_name = "ecb-des3-sun4i-ss", | 198 | .cra_driver_name = "ecb-des3-sun4i-ss", |
| 199 | .cra_priority = 300, | 199 | .cra_priority = 300, |
| 200 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 200 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 201 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | 201 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 202 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | 202 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), |
| 203 | .cra_module = THIS_MODULE, | 203 | .cra_module = THIS_MODULE, |
| 204 | .cra_alignmask = 3, | 204 | .cra_alignmask = 3, |
| 205 | .cra_type = &crypto_ablkcipher_type, | 205 | .cra_type = &crypto_ablkcipher_type, |
| 206 | .cra_init = sun4i_ss_cipher_init, | 206 | .cra_init = sun4i_ss_cipher_init, |
| 207 | .cra_u.ablkcipher = { | 207 | .cra_u.ablkcipher = { |
| 208 | .min_keysize = DES3_EDE_KEY_SIZE, | 208 | .min_keysize = DES3_EDE_KEY_SIZE, |
| 209 | .max_keysize = DES3_EDE_KEY_SIZE, | 209 | .max_keysize = DES3_EDE_KEY_SIZE, |
| 210 | .ivsize = DES3_EDE_BLOCK_SIZE, | 210 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 211 | .setkey = sun4i_ss_des3_setkey, | 211 | .setkey = sun4i_ss_des3_setkey, |
| 212 | .encrypt = sun4i_ss_ecb_des3_encrypt, | 212 | .encrypt = sun4i_ss_ecb_des3_encrypt, |
| 213 | .decrypt = sun4i_ss_ecb_des3_decrypt, | 213 | .decrypt = sun4i_ss_ecb_des3_decrypt, |
| 214 | } | 214 | } |
| 215 | } | 215 | } |
| 216 | }, | 216 | }, |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index ff8031498809..0de2f62d51ff 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c | |||
| @@ -20,6 +20,15 @@ | |||
| 20 | 20 | ||
| 21 | int sun4i_hash_crainit(struct crypto_tfm *tfm) | 21 | int sun4i_hash_crainit(struct crypto_tfm *tfm) |
| 22 | { | 22 | { |
| 23 | struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); | ||
| 24 | struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); | ||
| 25 | struct sun4i_ss_alg_template *algt; | ||
| 26 | |||
| 27 | memset(op, 0, sizeof(struct sun4i_tfm_ctx)); | ||
| 28 | |||
| 29 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); | ||
| 30 | op->ss = algt->ss; | ||
| 31 | |||
| 23 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 32 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 24 | sizeof(struct sun4i_req_ctx)); | 33 | sizeof(struct sun4i_req_ctx)); |
| 25 | return 0; | 34 | return 0; |
| @@ -32,13 +41,10 @@ int sun4i_hash_init(struct ahash_request *areq) | |||
| 32 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | 41 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
| 33 | struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); | 42 | struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); |
| 34 | struct sun4i_ss_alg_template *algt; | 43 | struct sun4i_ss_alg_template *algt; |
| 35 | struct sun4i_ss_ctx *ss; | ||
| 36 | 44 | ||
| 37 | memset(op, 0, sizeof(struct sun4i_req_ctx)); | 45 | memset(op, 0, sizeof(struct sun4i_req_ctx)); |
| 38 | 46 | ||
| 39 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); | 47 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); |
| 40 | ss = algt->ss; | ||
| 41 | op->ss = algt->ss; | ||
| 42 | op->mode = algt->mode; | 48 | op->mode = algt->mode; |
| 43 | 49 | ||
| 44 | return 0; | 50 | return 0; |
| @@ -129,6 +135,9 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) | |||
| 129 | return 0; | 135 | return 0; |
| 130 | } | 136 | } |
| 131 | 137 | ||
| 138 | #define SS_HASH_UPDATE 1 | ||
| 139 | #define SS_HASH_FINAL 2 | ||
| 140 | |||
| 132 | /* | 141 | /* |
| 133 | * sun4i_hash_update: update hash engine | 142 | * sun4i_hash_update: update hash engine |
| 134 | * | 143 | * |
| @@ -156,7 +165,7 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) | |||
| 156 | * write remaining data in op->buf | 165 | * write remaining data in op->buf |
| 157 | * final state op->len=56 | 166 | * final state op->len=56 |
| 158 | */ | 167 | */ |
| 159 | int sun4i_hash_update(struct ahash_request *areq) | 168 | static int sun4i_hash(struct ahash_request *areq) |
| 160 | { | 169 | { |
| 161 | u32 v, ivmode = 0; | 170 | u32 v, ivmode = 0; |
| 162 | unsigned int i = 0; | 171 | unsigned int i = 0; |
| @@ -167,8 +176,9 @@ int sun4i_hash_update(struct ahash_request *areq) | |||
| 167 | */ | 176 | */ |
| 168 | 177 | ||
| 169 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | 178 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); |
| 170 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 171 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | 179 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
| 180 | struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); | ||
| 181 | struct sun4i_ss_ctx *ss = tfmctx->ss; | ||
| 172 | unsigned int in_i = 0; /* advancement in the current SG */ | 182 | unsigned int in_i = 0; /* advancement in the current SG */ |
| 173 | unsigned int end; | 183 | unsigned int end; |
| 174 | /* | 184 | /* |
| @@ -180,22 +190,30 @@ int sun4i_hash_update(struct ahash_request *areq) | |||
| 180 | u32 spaces, rx_cnt = SS_RX_DEFAULT; | 190 | u32 spaces, rx_cnt = SS_RX_DEFAULT; |
| 181 | size_t copied = 0; | 191 | size_t copied = 0; |
| 182 | struct sg_mapping_iter mi; | 192 | struct sg_mapping_iter mi; |
| 193 | unsigned int j = 0; | ||
| 194 | int zeros; | ||
| 195 | unsigned int index, padlen; | ||
| 196 | __be64 bits; | ||
| 197 | u32 bf[32]; | ||
| 198 | u32 wb = 0; | ||
| 199 | unsigned int nwait, nbw = 0; | ||
| 200 | struct scatterlist *in_sg = areq->src; | ||
| 183 | 201 | ||
| 184 | dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", | 202 | dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", |
| 185 | __func__, crypto_tfm_alg_name(areq->base.tfm), | 203 | __func__, crypto_tfm_alg_name(areq->base.tfm), |
| 186 | op->byte_count, areq->nbytes, op->mode, | 204 | op->byte_count, areq->nbytes, op->mode, |
| 187 | op->len, op->hash[0]); | 205 | op->len, op->hash[0]); |
| 188 | 206 | ||
| 189 | if (areq->nbytes == 0) | 207 | if (unlikely(areq->nbytes == 0) && (op->flags & SS_HASH_FINAL) == 0) |
| 190 | return 0; | 208 | return 0; |
| 191 | 209 | ||
| 192 | /* protect against overflow */ | 210 | /* protect against overflow */ |
| 193 | if (areq->nbytes > UINT_MAX - op->len) { | 211 | if (unlikely(areq->nbytes > UINT_MAX - op->len)) { |
| 194 | dev_err(ss->dev, "Cannot process too large request\n"); | 212 | dev_err(ss->dev, "Cannot process too large request\n"); |
| 195 | return -EINVAL; | 213 | return -EINVAL; |
| 196 | } | 214 | } |
| 197 | 215 | ||
| 198 | if (op->len + areq->nbytes < 64) { | 216 | if (op->len + areq->nbytes < 64 && (op->flags & SS_HASH_FINAL) == 0) { |
| 199 | /* linearize data to op->buf */ | 217 | /* linearize data to op->buf */ |
| 200 | copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | 218 | copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
| 201 | op->buf + op->len, areq->nbytes, 0); | 219 | op->buf + op->len, areq->nbytes, 0); |
| @@ -203,14 +221,6 @@ int sun4i_hash_update(struct ahash_request *areq) | |||
| 203 | return 0; | 221 | return 0; |
| 204 | } | 222 | } |
| 205 | 223 | ||
| 206 | end = ((areq->nbytes + op->len) / 64) * 64 - op->len; | ||
| 207 | |||
| 208 | if (end > areq->nbytes || areq->nbytes - end > 63) { | ||
| 209 | dev_err(ss->dev, "ERROR: Bound error %u %u\n", | ||
| 210 | end, areq->nbytes); | ||
| 211 | return -EINVAL; | ||
| 212 | } | ||
| 213 | |||
| 214 | spin_lock_bh(&ss->slock); | 224 | spin_lock_bh(&ss->slock); |
| 215 | 225 | ||
| 216 | /* | 226 | /* |
| @@ -225,6 +235,34 @@ int sun4i_hash_update(struct ahash_request *areq) | |||
| 225 | /* Enable the device */ | 235 | /* Enable the device */ |
| 226 | writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); | 236 | writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); |
| 227 | 237 | ||
| 238 | if ((op->flags & SS_HASH_UPDATE) == 0) | ||
| 239 | goto hash_final; | ||
| 240 | |||
| 241 | /* start of handling data */ | ||
| 242 | if ((op->flags & SS_HASH_FINAL) == 0) { | ||
| 243 | end = ((areq->nbytes + op->len) / 64) * 64 - op->len; | ||
| 244 | |||
| 245 | if (end > areq->nbytes || areq->nbytes - end > 63) { | ||
| 246 | dev_err(ss->dev, "ERROR: Bound error %u %u\n", | ||
| 247 | end, areq->nbytes); | ||
| 248 | err = -EINVAL; | ||
| 249 | goto release_ss; | ||
| 250 | } | ||
| 251 | } else { | ||
| 252 | /* Since we have the flag final, we can go up to modulo 4 */ | ||
| 253 | end = ((areq->nbytes + op->len) / 4) * 4 - op->len; | ||
| 254 | } | ||
| 255 | |||
| 256 | /* TODO if SGlen % 4 and op->len == 0 then DMA */ | ||
| 257 | i = 1; | ||
| 258 | while (in_sg && i == 1) { | ||
| 259 | if ((in_sg->length % 4) != 0) | ||
| 260 | i = 0; | ||
| 261 | in_sg = sg_next(in_sg); | ||
| 262 | } | ||
| 263 | if (i == 1 && op->len == 0) | ||
| 264 | dev_dbg(ss->dev, "We can DMA\n"); | ||
| 265 | |||
| 228 | i = 0; | 266 | i = 0; |
| 229 | sg_miter_start(&mi, areq->src, sg_nents(areq->src), | 267 | sg_miter_start(&mi, areq->src, sg_nents(areq->src), |
| 230 | SG_MITER_FROM_SG | SG_MITER_ATOMIC); | 268 | SG_MITER_FROM_SG | SG_MITER_ATOMIC); |
| @@ -285,7 +323,11 @@ int sun4i_hash_update(struct ahash_request *areq) | |||
| 285 | } | 323 | } |
| 286 | } | 324 | } |
| 287 | } while (i < end); | 325 | } while (i < end); |
| 288 | /* final linear */ | 326 | |
| 327 | /* | ||
| 328 | * Now we have written to the device all that we can, | ||
| 329 | * store the remaining bytes in op->buf | ||
| 330 | */ | ||
| 289 | if ((areq->nbytes - i) < 64) { | 331 | if ((areq->nbytes - i) < 64) { |
| 290 | while (i < areq->nbytes && in_i < mi.length && op->len < 64) { | 332 | while (i < areq->nbytes && in_i < mi.length && op->len < 64) { |
| 291 | /* how many bytes we can read from current SG */ | 333 | /* how many bytes we can read from current SG */ |
| @@ -304,13 +346,21 @@ int sun4i_hash_update(struct ahash_request *areq) | |||
| 304 | 346 | ||
| 305 | sg_miter_stop(&mi); | 347 | sg_miter_stop(&mi); |
| 306 | 348 | ||
| 349 | /* | ||
| 350 | * End of data process | ||
| 351 | * Now if we have the flag final go to finalize part | ||
| 352 | * If not, store the partial hash | ||
| 353 | */ | ||
| 354 | if ((op->flags & SS_HASH_FINAL) > 0) | ||
| 355 | goto hash_final; | ||
| 356 | |||
| 307 | writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); | 357 | writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); |
| 308 | i = 0; | 358 | i = 0; |
| 309 | do { | 359 | do { |
| 310 | v = readl(ss->base + SS_CTL); | 360 | v = readl(ss->base + SS_CTL); |
| 311 | i++; | 361 | i++; |
| 312 | } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); | 362 | } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); |
| 313 | if (i >= SS_TIMEOUT) { | 363 | if (unlikely(i >= SS_TIMEOUT)) { |
| 314 | dev_err_ratelimited(ss->dev, | 364 | dev_err_ratelimited(ss->dev, |
| 315 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", | 365 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", |
| 316 | i, SS_TIMEOUT, v, areq->nbytes); | 366 | i, SS_TIMEOUT, v, areq->nbytes); |
| @@ -318,56 +368,24 @@ int sun4i_hash_update(struct ahash_request *areq) | |||
| 318 | goto release_ss; | 368 | goto release_ss; |
| 319 | } | 369 | } |
| 320 | 370 | ||
| 321 | /* get the partial hash only if something was written */ | ||
| 322 | for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) | 371 | for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) |
| 323 | op->hash[i] = readl(ss->base + SS_MD0 + i * 4); | 372 | op->hash[i] = readl(ss->base + SS_MD0 + i * 4); |
| 324 | 373 | ||
| 325 | release_ss: | 374 | goto release_ss; |
| 326 | writel(0, ss->base + SS_CTL); | ||
| 327 | spin_unlock_bh(&ss->slock); | ||
| 328 | return err; | ||
| 329 | } | ||
| 330 | 375 | ||
| 331 | /* | 376 | /* |
| 332 | * sun4i_hash_final: finalize hashing operation | 377 | * hash_final: finalize hashing operation |
| 333 | * | 378 | * |
| 334 | * If we have some remaining bytes, we write them. | 379 | * If we have some remaining bytes, we write them. |
| 335 | * Then ask the SS for finalizing the hashing operation | 380 | * Then ask the SS for finalizing the hashing operation |
| 336 | * | 381 | * |
| 337 | * I do not check RX FIFO size in this function since the size is 32 | 382 | * I do not check RX FIFO size in this function since the size is 32 |
| 338 | * after each enabling and this function neither write more than 32 words. | 383 | * after each enabling and this function neither write more than 32 words. |
| 384 | * If we come from the update part, we cannot have more than | ||
| 385 | * 3 remaining bytes to write and SS is fast enough to not care about it. | ||
| 339 | */ | 386 | */ |
| 340 | int sun4i_hash_final(struct ahash_request *areq) | ||
| 341 | { | ||
| 342 | u32 v, ivmode = 0; | ||
| 343 | unsigned int i; | ||
| 344 | unsigned int j = 0; | ||
| 345 | int zeros, err = 0; | ||
| 346 | unsigned int index, padlen; | ||
| 347 | __be64 bits; | ||
| 348 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 349 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 350 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
| 351 | u32 bf[32]; | ||
| 352 | u32 wb = 0; | ||
| 353 | unsigned int nwait, nbw = 0; | ||
| 354 | |||
| 355 | dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x", | ||
| 356 | __func__, op->byte_count, areq->nbytes, op->mode, | ||
| 357 | op->len, op->hash[0]); | ||
| 358 | 387 | ||
| 359 | spin_lock_bh(&ss->slock); | 388 | hash_final: |
| 360 | |||
| 361 | /* | ||
| 362 | * if we have already written something, | ||
| 363 | * restore the partial hash state | ||
| 364 | */ | ||
| 365 | if (op->byte_count > 0) { | ||
| 366 | ivmode = SS_IV_ARBITRARY; | ||
| 367 | for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) | ||
| 368 | writel(op->hash[i], ss->base + SS_IV0 + i * 4); | ||
| 369 | } | ||
| 370 | writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); | ||
| 371 | 389 | ||
| 372 | /* write the remaining words of the wait buffer */ | 390 | /* write the remaining words of the wait buffer */ |
| 373 | if (op->len > 0) { | 391 | if (op->len > 0) { |
| @@ -428,7 +446,7 @@ int sun4i_hash_final(struct ahash_request *areq) | |||
| 428 | 446 | ||
| 429 | /* | 447 | /* |
| 430 | * Wait for SS to finish the hash. | 448 | * Wait for SS to finish the hash. |
| 431 | * The timeout could happen only in case of bad overcloking | 449 | * The timeout could happen only in case of bad overclocking |
| 432 | * or driver bug. | 450 | * or driver bug. |
| 433 | */ | 451 | */ |
| 434 | i = 0; | 452 | i = 0; |
| @@ -436,7 +454,7 @@ int sun4i_hash_final(struct ahash_request *areq) | |||
| 436 | v = readl(ss->base + SS_CTL); | 454 | v = readl(ss->base + SS_CTL); |
| 437 | i++; | 455 | i++; |
| 438 | } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); | 456 | } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); |
| 439 | if (i >= SS_TIMEOUT) { | 457 | if (unlikely(i >= SS_TIMEOUT)) { |
| 440 | dev_err_ratelimited(ss->dev, | 458 | dev_err_ratelimited(ss->dev, |
| 441 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", | 459 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", |
| 442 | i, SS_TIMEOUT, v, areq->nbytes); | 460 | i, SS_TIMEOUT, v, areq->nbytes); |
| @@ -463,30 +481,41 @@ release_ss: | |||
| 463 | return err; | 481 | return err; |
| 464 | } | 482 | } |
| 465 | 483 | ||
| 484 | int sun4i_hash_final(struct ahash_request *areq) | ||
| 485 | { | ||
| 486 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 487 | |||
| 488 | op->flags = SS_HASH_FINAL; | ||
| 489 | return sun4i_hash(areq); | ||
| 490 | } | ||
| 491 | |||
| 492 | int sun4i_hash_update(struct ahash_request *areq) | ||
| 493 | { | ||
| 494 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 495 | |||
| 496 | op->flags = SS_HASH_UPDATE; | ||
| 497 | return sun4i_hash(areq); | ||
| 498 | } | ||
| 499 | |||
| 466 | /* sun4i_hash_finup: finalize hashing operation after an update */ | 500 | /* sun4i_hash_finup: finalize hashing operation after an update */ |
| 467 | int sun4i_hash_finup(struct ahash_request *areq) | 501 | int sun4i_hash_finup(struct ahash_request *areq) |
| 468 | { | 502 | { |
| 469 | int err; | 503 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); |
| 470 | |||
| 471 | err = sun4i_hash_update(areq); | ||
| 472 | if (err != 0) | ||
| 473 | return err; | ||
| 474 | 504 | ||
| 475 | return sun4i_hash_final(areq); | 505 | op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; |
| 506 | return sun4i_hash(areq); | ||
| 476 | } | 507 | } |
| 477 | 508 | ||
| 478 | /* combo of init/update/final functions */ | 509 | /* combo of init/update/final functions */ |
| 479 | int sun4i_hash_digest(struct ahash_request *areq) | 510 | int sun4i_hash_digest(struct ahash_request *areq) |
| 480 | { | 511 | { |
| 481 | int err; | 512 | int err; |
| 513 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 482 | 514 | ||
| 483 | err = sun4i_hash_init(areq); | 515 | err = sun4i_hash_init(areq); |
| 484 | if (err != 0) | 516 | if (err != 0) |
| 485 | return err; | 517 | return err; |
| 486 | 518 | ||
| 487 | err = sun4i_hash_update(areq); | 519 | op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; |
| 488 | if (err != 0) | 520 | return sun4i_hash(areq); |
| 489 | return err; | ||
| 490 | |||
| 491 | return sun4i_hash_final(areq); | ||
| 492 | } | 521 | } |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index 8e9c05f6e4d4..f04c0f8cf026 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h | |||
| @@ -163,7 +163,7 @@ struct sun4i_req_ctx { | |||
| 163 | u32 hash[5]; /* for storing SS_IVx register */ | 163 | u32 hash[5]; /* for storing SS_IVx register */ |
| 164 | char buf[64]; | 164 | char buf[64]; |
| 165 | unsigned int len; | 165 | unsigned int len; |
| 166 | struct sun4i_ss_ctx *ss; | 166 | int flags; |
| 167 | }; | 167 | }; |
| 168 | 168 | ||
| 169 | int sun4i_hash_crainit(struct crypto_tfm *tfm); | 169 | int sun4i_hash_crainit(struct crypto_tfm *tfm); |
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index a83ead109d5f..c3d524ea6998 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config CRYPTO_DEV_VMX_ENCRYPT | 1 | config CRYPTO_DEV_VMX_ENCRYPT |
| 2 | tristate "Encryption acceleration support on P8 CPU" | 2 | tristate "Encryption acceleration support on P8 CPU" |
| 3 | depends on CRYPTO_DEV_VMX | 3 | depends on CRYPTO_DEV_VMX |
| 4 | select CRYPTO_GHASH | ||
| 4 | default m | 5 | default m |
| 5 | help | 6 | help |
| 6 | Support for VMX cryptographic acceleration instructions on Power8 CPU. | 7 | Support for VMX cryptographic acceleration instructions on Power8 CPU. |
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 6c999cb01b80..27a94a119009 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c | |||
| @@ -26,16 +26,13 @@ | |||
| 26 | #include <linux/hardirq.h> | 26 | #include <linux/hardirq.h> |
| 27 | #include <asm/switch_to.h> | 27 | #include <asm/switch_to.h> |
| 28 | #include <crypto/aes.h> | 28 | #include <crypto/aes.h> |
| 29 | #include <crypto/ghash.h> | ||
| 29 | #include <crypto/scatterwalk.h> | 30 | #include <crypto/scatterwalk.h> |
| 30 | #include <crypto/internal/hash.h> | 31 | #include <crypto/internal/hash.h> |
| 31 | #include <crypto/b128ops.h> | 32 | #include <crypto/b128ops.h> |
| 32 | 33 | ||
| 33 | #define IN_INTERRUPT in_interrupt() | 34 | #define IN_INTERRUPT in_interrupt() |
| 34 | 35 | ||
| 35 | #define GHASH_BLOCK_SIZE (16) | ||
| 36 | #define GHASH_DIGEST_SIZE (16) | ||
| 37 | #define GHASH_KEY_LEN (16) | ||
| 38 | |||
| 39 | void gcm_init_p8(u128 htable[16], const u64 Xi[2]); | 36 | void gcm_init_p8(u128 htable[16], const u64 Xi[2]); |
| 40 | void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); | 37 | void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); |
| 41 | void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], | 38 | void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], |
| @@ -55,16 +52,11 @@ struct p8_ghash_desc_ctx { | |||
| 55 | 52 | ||
| 56 | static int p8_ghash_init_tfm(struct crypto_tfm *tfm) | 53 | static int p8_ghash_init_tfm(struct crypto_tfm *tfm) |
| 57 | { | 54 | { |
| 58 | const char *alg; | 55 | const char *alg = "ghash-generic"; |
| 59 | struct crypto_shash *fallback; | 56 | struct crypto_shash *fallback; |
| 60 | struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); | 57 | struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); |
| 61 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); | 58 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); |
| 62 | 59 | ||
| 63 | if (!(alg = crypto_tfm_alg_name(tfm))) { | ||
| 64 | printk(KERN_ERR "Failed to get algorithm name.\n"); | ||
| 65 | return -ENOENT; | ||
| 66 | } | ||
| 67 | |||
| 68 | fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); | 60 | fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); |
| 69 | if (IS_ERR(fallback)) { | 61 | if (IS_ERR(fallback)) { |
| 70 | printk(KERN_ERR | 62 | printk(KERN_ERR |
| @@ -78,10 +70,18 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm) | |||
| 78 | crypto_shash_set_flags(fallback, | 70 | crypto_shash_set_flags(fallback, |
| 79 | crypto_shash_get_flags((struct crypto_shash | 71 | crypto_shash_get_flags((struct crypto_shash |
| 80 | *) tfm)); | 72 | *) tfm)); |
| 81 | ctx->fallback = fallback; | ||
| 82 | 73 | ||
| 83 | shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) | 74 | /* Check if the descsize defined in the algorithm is still enough. */ |
| 84 | + crypto_shash_descsize(fallback); | 75 | if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) |
| 76 | + crypto_shash_descsize(fallback)) { | ||
| 77 | printk(KERN_ERR | ||
| 78 | "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", | ||
| 79 | alg, | ||
| 80 | shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), | ||
| 81 | crypto_shash_descsize(fallback)); | ||
| 82 | return -EINVAL; | ||
| 83 | } | ||
| 84 | ctx->fallback = fallback; | ||
| 85 | 85 | ||
| 86 | return 0; | 86 | return 0; |
| 87 | } | 87 | } |
| @@ -113,7 +113,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
| 113 | { | 113 | { |
| 114 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); | 114 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); |
| 115 | 115 | ||
| 116 | if (keylen != GHASH_KEY_LEN) | 116 | if (keylen != GHASH_BLOCK_SIZE) |
| 117 | return -EINVAL; | 117 | return -EINVAL; |
| 118 | 118 | ||
| 119 | preempt_disable(); | 119 | preempt_disable(); |
| @@ -211,7 +211,8 @@ struct shash_alg p8_ghash_alg = { | |||
| 211 | .update = p8_ghash_update, | 211 | .update = p8_ghash_update, |
| 212 | .final = p8_ghash_final, | 212 | .final = p8_ghash_final, |
| 213 | .setkey = p8_ghash_setkey, | 213 | .setkey = p8_ghash_setkey, |
| 214 | .descsize = sizeof(struct p8_ghash_desc_ctx), | 214 | .descsize = sizeof(struct p8_ghash_desc_ctx) |
| 215 | + sizeof(struct ghash_desc_ctx), | ||
| 215 | .base = { | 216 | .base = { |
| 216 | .cra_name = "ghash", | 217 | .cra_name = "ghash", |
| 217 | .cra_driver_name = "p8_ghash", | 218 | .cra_driver_name = "p8_ghash", |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index cffc1c095519..c232729f5b1b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -834,6 +834,17 @@ static void quirk_amd_ioapic(struct pci_dev *dev) | |||
| 834 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); | 834 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); |
| 835 | #endif /* CONFIG_X86_IO_APIC */ | 835 | #endif /* CONFIG_X86_IO_APIC */ |
| 836 | 836 | ||
| 837 | #if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS) | ||
| 838 | |||
| 839 | static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev) | ||
| 840 | { | ||
| 841 | /* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */ | ||
| 842 | if (dev->subsystem_device == 0xa118) | ||
| 843 | dev->sriov->link = dev->devfn; | ||
| 844 | } | ||
| 845 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link); | ||
| 846 | #endif | ||
| 847 | |||
| 837 | /* | 848 | /* |
| 838 | * Some settings of MMRBC can lead to data corruption so block changes. | 849 | * Some settings of MMRBC can lead to data corruption so block changes. |
| 839 | * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide | 850 | * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide |
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 8637cdfe382a..404e9558e879 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
| 16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
| 17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/kthread.h> | ||
| 19 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
| 20 | 19 | ||
| 21 | struct crypto_aead; | 20 | struct crypto_aead; |
| @@ -129,75 +128,6 @@ struct ablkcipher_walk { | |||
| 129 | unsigned int blocksize; | 128 | unsigned int blocksize; |
| 130 | }; | 129 | }; |
| 131 | 130 | ||
| 132 | #define ENGINE_NAME_LEN 30 | ||
| 133 | /* | ||
| 134 | * struct crypto_engine - crypto hardware engine | ||
| 135 | * @name: the engine name | ||
| 136 | * @idling: the engine is entering idle state | ||
| 137 | * @busy: request pump is busy | ||
| 138 | * @running: the engine is on working | ||
| 139 | * @cur_req_prepared: current request is prepared | ||
| 140 | * @list: link with the global crypto engine list | ||
| 141 | * @queue_lock: spinlock to syncronise access to request queue | ||
| 142 | * @queue: the crypto queue of the engine | ||
| 143 | * @rt: whether this queue is set to run as a realtime task | ||
| 144 | * @prepare_crypt_hardware: a request will soon arrive from the queue | ||
| 145 | * so the subsystem requests the driver to prepare the hardware | ||
| 146 | * by issuing this call | ||
| 147 | * @unprepare_crypt_hardware: there are currently no more requests on the | ||
| 148 | * queue so the subsystem notifies the driver that it may relax the | ||
| 149 | * hardware by issuing this call | ||
| 150 | * @prepare_request: do some prepare if need before handle the current request | ||
| 151 | * @unprepare_request: undo any work done by prepare_message() | ||
| 152 | * @crypt_one_request: do encryption for current request | ||
| 153 | * @kworker: thread struct for request pump | ||
| 154 | * @kworker_task: pointer to task for request pump kworker thread | ||
| 155 | * @pump_requests: work struct for scheduling work to the request pump | ||
| 156 | * @priv_data: the engine private data | ||
| 157 | * @cur_req: the current request which is on processing | ||
| 158 | */ | ||
| 159 | struct crypto_engine { | ||
| 160 | char name[ENGINE_NAME_LEN]; | ||
| 161 | bool idling; | ||
| 162 | bool busy; | ||
| 163 | bool running; | ||
| 164 | bool cur_req_prepared; | ||
| 165 | |||
| 166 | struct list_head list; | ||
| 167 | spinlock_t queue_lock; | ||
| 168 | struct crypto_queue queue; | ||
| 169 | |||
| 170 | bool rt; | ||
| 171 | |||
| 172 | int (*prepare_crypt_hardware)(struct crypto_engine *engine); | ||
| 173 | int (*unprepare_crypt_hardware)(struct crypto_engine *engine); | ||
| 174 | |||
| 175 | int (*prepare_request)(struct crypto_engine *engine, | ||
| 176 | struct ablkcipher_request *req); | ||
| 177 | int (*unprepare_request)(struct crypto_engine *engine, | ||
| 178 | struct ablkcipher_request *req); | ||
| 179 | int (*crypt_one_request)(struct crypto_engine *engine, | ||
| 180 | struct ablkcipher_request *req); | ||
| 181 | |||
| 182 | struct kthread_worker kworker; | ||
| 183 | struct task_struct *kworker_task; | ||
| 184 | struct kthread_work pump_requests; | ||
| 185 | |||
| 186 | void *priv_data; | ||
| 187 | struct ablkcipher_request *cur_req; | ||
| 188 | }; | ||
| 189 | |||
| 190 | int crypto_transfer_request(struct crypto_engine *engine, | ||
| 191 | struct ablkcipher_request *req, bool need_pump); | ||
| 192 | int crypto_transfer_request_to_engine(struct crypto_engine *engine, | ||
| 193 | struct ablkcipher_request *req); | ||
| 194 | void crypto_finalize_request(struct crypto_engine *engine, | ||
| 195 | struct ablkcipher_request *req, int err); | ||
| 196 | int crypto_engine_start(struct crypto_engine *engine); | ||
| 197 | int crypto_engine_stop(struct crypto_engine *engine); | ||
| 198 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); | ||
| 199 | int crypto_engine_exit(struct crypto_engine *engine); | ||
| 200 | |||
| 201 | extern const struct crypto_type crypto_ablkcipher_type; | 131 | extern const struct crypto_type crypto_ablkcipher_type; |
| 202 | extern const struct crypto_type crypto_blkcipher_type; | 132 | extern const struct crypto_type crypto_blkcipher_type; |
| 203 | 133 | ||
diff --git a/include/crypto/engine.h b/include/crypto/engine.h new file mode 100644 index 000000000000..04eb5c77addd --- /dev/null +++ b/include/crypto/engine.h | |||
| @@ -0,0 +1,107 @@ | |||
| 1 | /* | ||
| 2 | * Crypto engine API | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the Free | ||
| 8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 9 | * any later version. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | #ifndef _CRYPTO_ENGINE_H | ||
| 13 | #define _CRYPTO_ENGINE_H | ||
| 14 | |||
| 15 | #include <linux/crypto.h> | ||
| 16 | #include <linux/list.h> | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/kthread.h> | ||
| 19 | #include <crypto/algapi.h> | ||
| 20 | #include <crypto/hash.h> | ||
| 21 | |||
| 22 | #define ENGINE_NAME_LEN 30 | ||
| 23 | /* | ||
| 24 | * struct crypto_engine - crypto hardware engine | ||
| 25 | * @name: the engine name | ||
| 26 | * @idling: the engine is entering idle state | ||
| 27 | * @busy: request pump is busy | ||
| 28 | * @running: the engine is on working | ||
| 29 | * @cur_req_prepared: current request is prepared | ||
| 30 | * @list: link with the global crypto engine list | ||
| 31 | * @queue_lock: spinlock to syncronise access to request queue | ||
| 32 | * @queue: the crypto queue of the engine | ||
| 33 | * @rt: whether this queue is set to run as a realtime task | ||
| 34 | * @prepare_crypt_hardware: a request will soon arrive from the queue | ||
| 35 | * so the subsystem requests the driver to prepare the hardware | ||
| 36 | * by issuing this call | ||
| 37 | * @unprepare_crypt_hardware: there are currently no more requests on the | ||
| 38 | * queue so the subsystem notifies the driver that it may relax the | ||
| 39 | * hardware by issuing this call | ||
| 40 | * @prepare_cipher_request: do some prepare if need before handle the current request | ||
| 41 | * @unprepare_cipher_request: undo any work done by prepare_cipher_request() | ||
| 42 | * @cipher_one_request: do encryption for current request | ||
| 43 | * @prepare_hash_request: do some prepare if need before handle the current request | ||
| 44 | * @unprepare_hash_request: undo any work done by prepare_hash_request() | ||
| 45 | * @hash_one_request: do hash for current request | ||
| 46 | * @kworker: thread struct for request pump | ||
| 47 | * @kworker_task: pointer to task for request pump kworker thread | ||
| 48 | * @pump_requests: work struct for scheduling work to the request pump | ||
| 49 | * @priv_data: the engine private data | ||
| 50 | * @cur_req: the current request which is on processing | ||
| 51 | */ | ||
| 52 | struct crypto_engine { | ||
| 53 | char name[ENGINE_NAME_LEN]; | ||
| 54 | bool idling; | ||
| 55 | bool busy; | ||
| 56 | bool running; | ||
| 57 | bool cur_req_prepared; | ||
| 58 | |||
| 59 | struct list_head list; | ||
| 60 | spinlock_t queue_lock; | ||
| 61 | struct crypto_queue queue; | ||
| 62 | |||
| 63 | bool rt; | ||
| 64 | |||
| 65 | int (*prepare_crypt_hardware)(struct crypto_engine *engine); | ||
| 66 | int (*unprepare_crypt_hardware)(struct crypto_engine *engine); | ||
| 67 | |||
| 68 | int (*prepare_cipher_request)(struct crypto_engine *engine, | ||
| 69 | struct ablkcipher_request *req); | ||
| 70 | int (*unprepare_cipher_request)(struct crypto_engine *engine, | ||
| 71 | struct ablkcipher_request *req); | ||
| 72 | int (*prepare_hash_request)(struct crypto_engine *engine, | ||
| 73 | struct ahash_request *req); | ||
| 74 | int (*unprepare_hash_request)(struct crypto_engine *engine, | ||
| 75 | struct ahash_request *req); | ||
| 76 | int (*cipher_one_request)(struct crypto_engine *engine, | ||
| 77 | struct ablkcipher_request *req); | ||
| 78 | int (*hash_one_request)(struct crypto_engine *engine, | ||
| 79 | struct ahash_request *req); | ||
| 80 | |||
| 81 | struct kthread_worker kworker; | ||
| 82 | struct task_struct *kworker_task; | ||
| 83 | struct kthread_work pump_requests; | ||
| 84 | |||
| 85 | void *priv_data; | ||
| 86 | struct crypto_async_request *cur_req; | ||
| 87 | }; | ||
| 88 | |||
| 89 | int crypto_transfer_cipher_request(struct crypto_engine *engine, | ||
| 90 | struct ablkcipher_request *req, | ||
| 91 | bool need_pump); | ||
| 92 | int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, | ||
| 93 | struct ablkcipher_request *req); | ||
| 94 | int crypto_transfer_hash_request(struct crypto_engine *engine, | ||
| 95 | struct ahash_request *req, bool need_pump); | ||
| 96 | int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, | ||
| 97 | struct ahash_request *req); | ||
| 98 | void crypto_finalize_cipher_request(struct crypto_engine *engine, | ||
| 99 | struct ablkcipher_request *req, int err); | ||
| 100 | void crypto_finalize_hash_request(struct crypto_engine *engine, | ||
| 101 | struct ahash_request *req, int err); | ||
| 102 | int crypto_engine_start(struct crypto_engine *engine); | ||
| 103 | int crypto_engine_stop(struct crypto_engine *engine); | ||
| 104 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); | ||
| 105 | int crypto_engine_exit(struct crypto_engine *engine); | ||
| 106 | |||
| 107 | #endif /* _CRYPTO_ENGINE_H */ | ||
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h new file mode 100644 index 000000000000..2a61c9bbab8f --- /dev/null +++ b/include/crypto/ghash.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | /* | ||
| 2 | * Common values for GHASH algorithms | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef __CRYPTO_GHASH_H__ | ||
| 6 | #define __CRYPTO_GHASH_H__ | ||
| 7 | |||
| 8 | #include <linux/types.h> | ||
| 9 | #include <crypto/gf128mul.h> | ||
| 10 | |||
| 11 | #define GHASH_BLOCK_SIZE 16 | ||
| 12 | #define GHASH_DIGEST_SIZE 16 | ||
| 13 | |||
| 14 | struct ghash_ctx { | ||
| 15 | struct gf128mul_4k *gf128; | ||
| 16 | }; | ||
| 17 | |||
| 18 | struct ghash_desc_ctx { | ||
| 19 | u8 buffer[GHASH_BLOCK_SIZE]; | ||
| 20 | u32 bytes; | ||
| 21 | }; | ||
| 22 | |||
| 23 | #endif | ||
diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 7c2bb27c067c..a7653339fedb 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h | |||
| @@ -238,9 +238,6 @@ struct ccp_xts_aes_engine { | |||
| 238 | }; | 238 | }; |
| 239 | 239 | ||
| 240 | /***** SHA engine *****/ | 240 | /***** SHA engine *****/ |
| 241 | #define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE | ||
| 242 | #define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE | ||
| 243 | |||
| 244 | /** | 241 | /** |
| 245 | * ccp_sha_type - type of SHA operation | 242 | * ccp_sha_type - type of SHA operation |
| 246 | * | 243 | * |
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 4f7d8f4b1e9a..34a0dc18f327 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h | |||
| @@ -29,7 +29,9 @@ | |||
| 29 | * Returns the number of lower random bytes in "data". | 29 | * Returns the number of lower random bytes in "data". |
| 30 | * Must not be NULL. *OBSOLETE* | 30 | * Must not be NULL. *OBSOLETE* |
| 31 | * @read: New API. drivers can fill up to max bytes of data | 31 | * @read: New API. drivers can fill up to max bytes of data |
| 32 | * into the buffer. The buffer is aligned for any type. | 32 | * into the buffer. The buffer is aligned for any type |
| 33 | * and max is guaranteed to be >= to that alignment | ||
| 34 | * (either 4 or 8 depending on architecture). | ||
| 33 | * @priv: Private data, for use by the RNG driver. | 35 | * @priv: Private data, for use by the RNG driver. |
| 34 | * @quality: Estimation of true entropy in RNG's bitstream | 36 | * @quality: Estimation of true entropy in RNG's bitstream |
| 35 | * (per mill). | 37 | * (per mill). |
