diff options
113 files changed, 7343 insertions, 1604 deletions
diff --git a/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt b/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt new file mode 100644 index 000000000000..096df34b11c1 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt | |||
@@ -0,0 +1,29 @@ | |||
1 | Rockchip Electronics And Security Accelerator | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "rockchip,rk3288-crypto" | ||
5 | - reg: Base physical address of the engine and length of memory mapped | ||
6 | region | ||
7 | - interrupts: Interrupt number | ||
8 | - clocks: Reference to the clocks about crypto | ||
9 | - clock-names: "aclk" used to clock data | ||
10 | "hclk" used to clock data | ||
11 | "sclk" used to clock crypto accelerator | ||
12 | "apb_pclk" used to clock dma | ||
13 | - resets: Must contain an entry for each entry in reset-names. | ||
14 | See ../reset/reset.txt for details. | ||
15 | - reset-names: Must include the name "crypto-rst". | ||
16 | |||
17 | Examples: | ||
18 | |||
19 | crypto: cypto-controller@ff8a0000 { | ||
20 | compatible = "rockchip,rk3288-crypto"; | ||
21 | reg = <0xff8a0000 0x4000>; | ||
22 | interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>; | ||
23 | clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>, | ||
24 | <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>; | ||
25 | clock-names = "aclk", "hclk", "sclk", "apb_pclk"; | ||
26 | resets = <&cru SRST_CRYPTO>; | ||
27 | reset-names = "crypto-rst"; | ||
28 | status = "okay"; | ||
29 | }; | ||
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h index 9f8402b35115..27e588f6c72e 100644 --- a/arch/powerpc/include/asm/icswx.h +++ b/arch/powerpc/include/asm/icswx.h | |||
@@ -164,6 +164,7 @@ struct coprocessor_request_block { | |||
164 | #define ICSWX_INITIATED (0x8) | 164 | #define ICSWX_INITIATED (0x8) |
165 | #define ICSWX_BUSY (0x4) | 165 | #define ICSWX_BUSY (0x4) |
166 | #define ICSWX_REJECTED (0x2) | 166 | #define ICSWX_REJECTED (0x2) |
167 | #define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */ | ||
167 | 168 | ||
168 | static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb) | 169 | static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb) |
169 | { | 170 | { |
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 440df0c7a2ee..a69321a77783 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c | |||
@@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req) | |||
219 | } | 219 | } |
220 | } | 220 | } |
221 | 221 | ||
222 | static int ghash_async_import(struct ahash_request *req, const void *in) | ||
223 | { | ||
224 | struct ahash_request *cryptd_req = ahash_request_ctx(req); | ||
225 | struct shash_desc *desc = cryptd_shash_desc(cryptd_req); | ||
226 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
227 | |||
228 | ghash_async_init(req); | ||
229 | memcpy(dctx, in, sizeof(*dctx)); | ||
230 | return 0; | ||
231 | |||
232 | } | ||
233 | |||
234 | static int ghash_async_export(struct ahash_request *req, void *out) | ||
235 | { | ||
236 | struct ahash_request *cryptd_req = ahash_request_ctx(req); | ||
237 | struct shash_desc *desc = cryptd_shash_desc(cryptd_req); | ||
238 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
239 | |||
240 | memcpy(out, dctx, sizeof(*dctx)); | ||
241 | return 0; | ||
242 | |||
243 | } | ||
244 | |||
222 | static int ghash_async_digest(struct ahash_request *req) | 245 | static int ghash_async_digest(struct ahash_request *req) |
223 | { | 246 | { |
224 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 247 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
@@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = { | |||
288 | .final = ghash_async_final, | 311 | .final = ghash_async_final, |
289 | .setkey = ghash_async_setkey, | 312 | .setkey = ghash_async_setkey, |
290 | .digest = ghash_async_digest, | 313 | .digest = ghash_async_digest, |
314 | .export = ghash_async_export, | ||
315 | .import = ghash_async_import, | ||
291 | .halg = { | 316 | .halg = { |
292 | .digestsize = GHASH_DIGEST_SIZE, | 317 | .digestsize = GHASH_DIGEST_SIZE, |
318 | .statesize = sizeof(struct ghash_desc_ctx), | ||
293 | .base = { | 319 | .base = { |
294 | .cra_name = "ghash", | 320 | .cra_name = "ghash", |
295 | .cra_driver_name = "ghash-clmulni", | 321 | .cra_driver_name = "ghash-clmulni", |
diff --git a/crypto/Makefile b/crypto/Makefile index f7aba923458d..2acdbbd30475 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o | |||
40 | rsa_generic-y += rsaprivkey-asn1.o | 40 | rsa_generic-y += rsaprivkey-asn1.o |
41 | rsa_generic-y += rsa.o | 41 | rsa_generic-y += rsa.o |
42 | rsa_generic-y += rsa_helper.o | 42 | rsa_generic-y += rsa_helper.o |
43 | rsa_generic-y += rsa-pkcs1pad.o | ||
43 | obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o | 44 | obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o |
44 | 45 | ||
45 | cryptomgr-y := algboss.o testmgr.o | 46 | cryptomgr-y := algboss.o testmgr.o |
diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 120ec042ec9e..def301ed1288 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/cryptouser.h> | 21 | #include <linux/cryptouser.h> |
22 | #include <net/netlink.h> | 22 | #include <net/netlink.h> |
23 | #include <crypto/akcipher.h> | 23 | #include <crypto/akcipher.h> |
24 | #include <crypto/internal/akcipher.h> | ||
24 | #include "internal.h" | 25 | #include "internal.h" |
25 | 26 | ||
26 | #ifdef CONFIG_NET | 27 | #ifdef CONFIG_NET |
@@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm) | |||
75 | return 0; | 76 | return 0; |
76 | } | 77 | } |
77 | 78 | ||
79 | static void crypto_akcipher_free_instance(struct crypto_instance *inst) | ||
80 | { | ||
81 | struct akcipher_instance *akcipher = akcipher_instance(inst); | ||
82 | |||
83 | akcipher->free(akcipher); | ||
84 | } | ||
85 | |||
78 | static const struct crypto_type crypto_akcipher_type = { | 86 | static const struct crypto_type crypto_akcipher_type = { |
79 | .extsize = crypto_alg_extsize, | 87 | .extsize = crypto_alg_extsize, |
80 | .init_tfm = crypto_akcipher_init_tfm, | 88 | .init_tfm = crypto_akcipher_init_tfm, |
89 | .free = crypto_akcipher_free_instance, | ||
81 | #ifdef CONFIG_PROC_FS | 90 | #ifdef CONFIG_PROC_FS |
82 | .show = crypto_akcipher_show, | 91 | .show = crypto_akcipher_show, |
83 | #endif | 92 | #endif |
@@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = { | |||
88 | .tfmsize = offsetof(struct crypto_akcipher, base), | 97 | .tfmsize = offsetof(struct crypto_akcipher, base), |
89 | }; | 98 | }; |
90 | 99 | ||
100 | int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, | ||
101 | u32 type, u32 mask) | ||
102 | { | ||
103 | spawn->base.frontend = &crypto_akcipher_type; | ||
104 | return crypto_grab_spawn(&spawn->base, name, type, mask); | ||
105 | } | ||
106 | EXPORT_SYMBOL_GPL(crypto_grab_akcipher); | ||
107 | |||
91 | struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, | 108 | struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, |
92 | u32 mask) | 109 | u32 mask) |
93 | { | 110 | { |
@@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, | |||
95 | } | 112 | } |
96 | EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); | 113 | EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); |
97 | 114 | ||
98 | int crypto_register_akcipher(struct akcipher_alg *alg) | 115 | static void akcipher_prepare_alg(struct akcipher_alg *alg) |
99 | { | 116 | { |
100 | struct crypto_alg *base = &alg->base; | 117 | struct crypto_alg *base = &alg->base; |
101 | 118 | ||
102 | base->cra_type = &crypto_akcipher_type; | 119 | base->cra_type = &crypto_akcipher_type; |
103 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | 120 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
104 | base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; | 121 | base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; |
122 | } | ||
123 | |||
124 | int crypto_register_akcipher(struct akcipher_alg *alg) | ||
125 | { | ||
126 | struct crypto_alg *base = &alg->base; | ||
127 | |||
128 | akcipher_prepare_alg(alg); | ||
105 | return crypto_register_alg(base); | 129 | return crypto_register_alg(base); |
106 | } | 130 | } |
107 | EXPORT_SYMBOL_GPL(crypto_register_akcipher); | 131 | EXPORT_SYMBOL_GPL(crypto_register_akcipher); |
@@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg) | |||
112 | } | 136 | } |
113 | EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); | 137 | EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); |
114 | 138 | ||
139 | int akcipher_register_instance(struct crypto_template *tmpl, | ||
140 | struct akcipher_instance *inst) | ||
141 | { | ||
142 | akcipher_prepare_alg(&inst->alg); | ||
143 | return crypto_register_instance(tmpl, akcipher_crypto_instance(inst)); | ||
144 | } | ||
145 | EXPORT_SYMBOL_GPL(akcipher_register_instance); | ||
146 | |||
115 | MODULE_LICENSE("GPL"); | 147 | MODULE_LICENSE("GPL"); |
116 | MODULE_DESCRIPTION("Generic public key cipher type"); | 148 | MODULE_DESCRIPTION("Generic public key cipher type"); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 59bf491fe3d8..7be76aa31579 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, | |||
93 | { | 93 | { |
94 | struct crypto_spawn *spawn, *n; | 94 | struct crypto_spawn *spawn, *n; |
95 | 95 | ||
96 | if (list_empty(stack)) | 96 | spawn = list_first_entry_or_null(stack, struct crypto_spawn, list); |
97 | if (!spawn) | ||
97 | return NULL; | 98 | return NULL; |
98 | 99 | ||
99 | spawn = list_first_entry(stack, struct crypto_spawn, list); | 100 | n = list_next_entry(spawn, list); |
100 | n = list_entry(spawn->list.next, struct crypto_spawn, list); | ||
101 | 101 | ||
102 | if (spawn->alg && &n->list != stack && !n->alg) | 102 | if (spawn->alg && &n->list != stack && !n->alg) |
103 | n->alg = (n->list.next == stack) ? alg : | 103 | n->alg = (n->list.next == stack) ? alg : |
104 | &list_entry(n->list.next, struct crypto_spawn, | 104 | &list_next_entry(n, list)->inst->alg; |
105 | list)->inst->alg; | ||
106 | 105 | ||
107 | list_move(&spawn->list, secondary_spawns); | 106 | list_move(&spawn->list, secondary_spawns); |
108 | 107 | ||
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 6d4d4569447e..4c93b8a4e81e 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
@@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
213 | } | 213 | } |
214 | 214 | ||
215 | while (size) { | 215 | while (size) { |
216 | unsigned long len = size; | 216 | size_t len = size; |
217 | struct scatterlist *sg = NULL; | 217 | struct scatterlist *sg = NULL; |
218 | 218 | ||
219 | /* use the existing memory in an allocated page */ | 219 | /* use the existing memory in an allocated page */ |
@@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
247 | /* allocate a new page */ | 247 | /* allocate a new page */ |
248 | len = min_t(unsigned long, size, aead_sndbuf(sk)); | 248 | len = min_t(unsigned long, size, aead_sndbuf(sk)); |
249 | while (len) { | 249 | while (len) { |
250 | int plen = 0; | 250 | size_t plen = 0; |
251 | 251 | ||
252 | if (sgl->cur >= ALG_MAX_PAGES) { | 252 | if (sgl->cur >= ALG_MAX_PAGES) { |
253 | aead_put_sgl(sk); | 253 | aead_put_sgl(sk); |
@@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | sg = sgl->sg + sgl->cur; | 258 | sg = sgl->sg + sgl->cur; |
259 | plen = min_t(int, len, PAGE_SIZE); | 259 | plen = min_t(size_t, len, PAGE_SIZE); |
260 | 260 | ||
261 | sg_assign_page(sg, alloc_page(GFP_KERNEL)); | 261 | sg_assign_page(sg, alloc_page(GFP_KERNEL)); |
262 | err = -ENOMEM; | 262 | err = -ENOMEM; |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 634b4d1ab681..5c756b30e79b 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -40,7 +40,7 @@ struct skcipher_ctx { | |||
40 | struct af_alg_completion completion; | 40 | struct af_alg_completion completion; |
41 | 41 | ||
42 | atomic_t inflight; | 42 | atomic_t inflight; |
43 | unsigned used; | 43 | size_t used; |
44 | 44 | ||
45 | unsigned int len; | 45 | unsigned int len; |
46 | bool more; | 46 | bool more; |
@@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk) | |||
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
156 | static void skcipher_pull_sgl(struct sock *sk, int used, int put) | 156 | static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) |
157 | { | 157 | { |
158 | struct alg_sock *ask = alg_sk(sk); | 158 | struct alg_sock *ask = alg_sk(sk); |
159 | struct skcipher_ctx *ctx = ask->private; | 159 | struct skcipher_ctx *ctx = ask->private; |
@@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put) | |||
167 | sg = sgl->sg; | 167 | sg = sgl->sg; |
168 | 168 | ||
169 | for (i = 0; i < sgl->cur; i++) { | 169 | for (i = 0; i < sgl->cur; i++) { |
170 | int plen = min_t(int, used, sg[i].length); | 170 | size_t plen = min_t(size_t, used, sg[i].length); |
171 | 171 | ||
172 | if (!sg_page(sg + i)) | 172 | if (!sg_page(sg + i)) |
173 | continue; | 173 | continue; |
@@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, | |||
348 | while (size) { | 348 | while (size) { |
349 | struct scatterlist *sg; | 349 | struct scatterlist *sg; |
350 | unsigned long len = size; | 350 | unsigned long len = size; |
351 | int plen; | 351 | size_t plen; |
352 | 352 | ||
353 | if (ctx->merge) { | 353 | if (ctx->merge) { |
354 | sgl = list_entry(ctx->tsgl.prev, | 354 | sgl = list_entry(ctx->tsgl.prev, |
@@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, | |||
390 | sg_unmark_end(sg + sgl->cur); | 390 | sg_unmark_end(sg + sgl->cur); |
391 | do { | 391 | do { |
392 | i = sgl->cur; | 392 | i = sgl->cur; |
393 | plen = min_t(int, len, PAGE_SIZE); | 393 | plen = min_t(size_t, len, PAGE_SIZE); |
394 | 394 | ||
395 | sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); | 395 | sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); |
396 | err = -ENOMEM; | 396 | err = -ENOMEM; |
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 9441240f7d2a..004d5fc8e56b 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | #define pr_fmt(fmt) "SIG: "fmt | 14 | #define pr_fmt(fmt) "SIG: "fmt |
15 | #include <keys/asymmetric-subtype.h> | 15 | #include <keys/asymmetric-subtype.h> |
16 | #include <linux/module.h> | 16 | #include <linux/export.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <crypto/public_key.h> | 18 | #include <crypto/public_key.h> |
19 | #include "asymmetric_keys.h" | 19 | #include "asymmetric_keys.h" |
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 99c3cce01290..7b6b935cef23 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c | |||
@@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req) | |||
130 | struct scatterlist *src, *dst; | 130 | struct scatterlist *src, *dst; |
131 | int err; | 131 | int err; |
132 | 132 | ||
133 | if (rctx->cryptlen == 0) | ||
134 | goto skip; | ||
135 | |||
133 | chacha_iv(creq->iv, req, 1); | 136 | chacha_iv(creq->iv, req, 1); |
134 | 137 | ||
135 | sg_init_table(rctx->src, 2); | 138 | sg_init_table(rctx->src, 2); |
@@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req) | |||
150 | if (err) | 153 | if (err) |
151 | return err; | 154 | return err; |
152 | 155 | ||
156 | skip: | ||
153 | return poly_verify_tag(req); | 157 | return poly_verify_tag(req); |
154 | } | 158 | } |
155 | 159 | ||
@@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req) | |||
415 | struct scatterlist *src, *dst; | 419 | struct scatterlist *src, *dst; |
416 | int err; | 420 | int err; |
417 | 421 | ||
422 | if (req->cryptlen == 0) | ||
423 | goto skip; | ||
424 | |||
418 | chacha_iv(creq->iv, req, 1); | 425 | chacha_iv(creq->iv, req, 1); |
419 | 426 | ||
420 | sg_init_table(rctx->src, 2); | 427 | sg_init_table(rctx->src, 2); |
@@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req) | |||
435 | if (err) | 442 | if (err) |
436 | return err; | 443 | return err; |
437 | 444 | ||
445 | skip: | ||
438 | return poly_genkey(req); | 446 | return poly_genkey(req); |
439 | } | 447 | } |
440 | 448 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c81861b1350b..7921251cdb13 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |||
637 | inst->alg.halg.base.cra_flags = type; | 637 | inst->alg.halg.base.cra_flags = type; |
638 | 638 | ||
639 | inst->alg.halg.digestsize = salg->digestsize; | 639 | inst->alg.halg.digestsize = salg->digestsize; |
640 | inst->alg.halg.statesize = salg->statesize; | ||
640 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | 641 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
641 | 642 | ||
642 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; | 643 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
@@ -887,8 +888,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | |||
887 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 888 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, |
888 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 889 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) |
889 | return ERR_PTR(-EINVAL); | 890 | return ERR_PTR(-EINVAL); |
890 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | 891 | type = crypto_skcipher_type(type); |
891 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; | ||
892 | mask &= ~CRYPTO_ALG_TYPE_MASK; | 892 | mask &= ~CRYPTO_ALG_TYPE_MASK; |
893 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | 893 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); |
894 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | 894 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); |
diff --git a/crypto/drbg.c b/crypto/drbg.c index a7c23146b87f..ab6ef1d08568 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -626,7 +626,7 @@ out: | |||
626 | return len; | 626 | return len; |
627 | } | 627 | } |
628 | 628 | ||
629 | static struct drbg_state_ops drbg_ctr_ops = { | 629 | static const struct drbg_state_ops drbg_ctr_ops = { |
630 | .update = drbg_ctr_update, | 630 | .update = drbg_ctr_update, |
631 | .generate = drbg_ctr_generate, | 631 | .generate = drbg_ctr_generate, |
632 | .crypto_init = drbg_init_sym_kernel, | 632 | .crypto_init = drbg_init_sym_kernel, |
@@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg, | |||
752 | return len; | 752 | return len; |
753 | } | 753 | } |
754 | 754 | ||
755 | static struct drbg_state_ops drbg_hmac_ops = { | 755 | static const struct drbg_state_ops drbg_hmac_ops = { |
756 | .update = drbg_hmac_update, | 756 | .update = drbg_hmac_update, |
757 | .generate = drbg_hmac_generate, | 757 | .generate = drbg_hmac_generate, |
758 | .crypto_init = drbg_init_hash_kernel, | 758 | .crypto_init = drbg_init_hash_kernel, |
@@ -1032,7 +1032,7 @@ out: | |||
1032 | * scratchpad usage: as update and generate are used isolated, both | 1032 | * scratchpad usage: as update and generate are used isolated, both |
1033 | * can use the scratchpad | 1033 | * can use the scratchpad |
1034 | */ | 1034 | */ |
1035 | static struct drbg_state_ops drbg_hash_ops = { | 1035 | static const struct drbg_state_ops drbg_hash_ops = { |
1036 | .update = drbg_hash_update, | 1036 | .update = drbg_hash_update, |
1037 | .generate = drbg_hash_generate, | 1037 | .generate = drbg_hash_generate, |
1038 | .crypto_init = drbg_init_hash_kernel, | 1038 | .crypto_init = drbg_init_hash_kernel, |
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index fe5b495a434d..f78d4fc4e38a 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c | |||
@@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void) | |||
128 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | 128 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); |
129 | while (single_task_running()) { | 129 | while (single_task_running()) { |
130 | mutex_lock(&flist->lock); | 130 | mutex_lock(&flist->lock); |
131 | if (list_empty(&flist->list)) { | 131 | cstate = list_first_entry_or_null(&flist->list, |
132 | mutex_unlock(&flist->lock); | ||
133 | return; | ||
134 | } | ||
135 | cstate = list_entry(flist->list.next, | ||
136 | struct mcryptd_alg_cstate, flush_list); | 132 | struct mcryptd_alg_cstate, flush_list); |
137 | if (!cstate->flusher_engaged) { | 133 | if (!cstate || !cstate->flusher_engaged) { |
138 | mutex_unlock(&flist->lock); | 134 | mutex_unlock(&flist->lock); |
139 | return; | 135 | return; |
140 | } | 136 | } |
diff --git a/crypto/md5.c b/crypto/md5.c index 33d17e9a8702..2355a7c25c45 100644 --- a/crypto/md5.c +++ b/crypto/md5.c | |||
@@ -24,6 +24,12 @@ | |||
24 | #include <linux/cryptohash.h> | 24 | #include <linux/cryptohash.h> |
25 | #include <asm/byteorder.h> | 25 | #include <asm/byteorder.h> |
26 | 26 | ||
27 | const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { | ||
28 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | ||
29 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | ||
30 | }; | ||
31 | EXPORT_SYMBOL_GPL(md5_zero_message_hash); | ||
32 | |||
27 | /* XXX: this stuff can be optimized */ | 33 | /* XXX: this stuff can be optimized */ |
28 | static inline void le32_to_cpu_array(u32 *buf, unsigned int words) | 34 | static inline void le32_to_cpu_array(u32 *buf, unsigned int words) |
29 | { | 35 | { |
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c new file mode 100644 index 000000000000..50f5c97e1087 --- /dev/null +++ b/crypto/rsa-pkcs1pad.c | |||
@@ -0,0 +1,628 @@ | |||
1 | /* | ||
2 | * RSA padding templates. | ||
3 | * | ||
4 | * Copyright (c) 2015 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the Free | ||
8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
9 | * any later version. | ||
10 | */ | ||
11 | |||
12 | #include <crypto/algapi.h> | ||
13 | #include <crypto/akcipher.h> | ||
14 | #include <crypto/internal/akcipher.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/random.h> | ||
20 | |||
21 | struct pkcs1pad_ctx { | ||
22 | struct crypto_akcipher *child; | ||
23 | |||
24 | unsigned int key_size; | ||
25 | }; | ||
26 | |||
27 | struct pkcs1pad_request { | ||
28 | struct akcipher_request child_req; | ||
29 | |||
30 | struct scatterlist in_sg[3], out_sg[2]; | ||
31 | uint8_t *in_buf, *out_buf; | ||
32 | }; | ||
33 | |||
34 | static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, | ||
35 | unsigned int keylen) | ||
36 | { | ||
37 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
38 | int err, size; | ||
39 | |||
40 | err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); | ||
41 | |||
42 | if (!err) { | ||
43 | /* Find out new modulus size from rsa implementation */ | ||
44 | size = crypto_akcipher_maxsize(ctx->child); | ||
45 | |||
46 | ctx->key_size = size > 0 ? size : 0; | ||
47 | if (size <= 0) | ||
48 | err = size; | ||
49 | } | ||
50 | |||
51 | return err; | ||
52 | } | ||
53 | |||
54 | static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, | ||
55 | unsigned int keylen) | ||
56 | { | ||
57 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
58 | int err, size; | ||
59 | |||
60 | err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); | ||
61 | |||
62 | if (!err) { | ||
63 | /* Find out new modulus size from rsa implementation */ | ||
64 | size = crypto_akcipher_maxsize(ctx->child); | ||
65 | |||
66 | ctx->key_size = size > 0 ? size : 0; | ||
67 | if (size <= 0) | ||
68 | err = size; | ||
69 | } | ||
70 | |||
71 | return err; | ||
72 | } | ||
73 | |||
74 | static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) | ||
75 | { | ||
76 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
77 | |||
78 | /* | ||
79 | * The maximum destination buffer size for the encrypt/sign operations | ||
80 | * will be the same as for RSA, even though it's smaller for | ||
81 | * decrypt/verify. | ||
82 | */ | ||
83 | |||
84 | return ctx->key_size ?: -EINVAL; | ||
85 | } | ||
86 | |||
87 | static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, | ||
88 | struct scatterlist *next) | ||
89 | { | ||
90 | int nsegs = next ? 1 : 0; | ||
91 | |||
92 | if (offset_in_page(buf) + len <= PAGE_SIZE) { | ||
93 | nsegs += 1; | ||
94 | sg_init_table(sg, nsegs); | ||
95 | sg_set_buf(sg, buf, len); | ||
96 | } else { | ||
97 | nsegs += 2; | ||
98 | sg_init_table(sg, nsegs); | ||
99 | sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf)); | ||
100 | sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf), | ||
101 | offset_in_page(buf) + len - PAGE_SIZE); | ||
102 | } | ||
103 | |||
104 | if (next) | ||
105 | sg_chain(sg, nsegs, next); | ||
106 | } | ||
107 | |||
108 | static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) | ||
109 | { | ||
110 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
111 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
112 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); | ||
113 | size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; | ||
114 | size_t chunk_len, pad_left; | ||
115 | struct sg_mapping_iter miter; | ||
116 | |||
117 | if (!err) { | ||
118 | if (pad_len) { | ||
119 | sg_miter_start(&miter, req->dst, | ||
120 | sg_nents_for_len(req->dst, pad_len), | ||
121 | SG_MITER_ATOMIC | SG_MITER_TO_SG); | ||
122 | |||
123 | pad_left = pad_len; | ||
124 | while (pad_left) { | ||
125 | sg_miter_next(&miter); | ||
126 | |||
127 | chunk_len = min(miter.length, pad_left); | ||
128 | memset(miter.addr, 0, chunk_len); | ||
129 | pad_left -= chunk_len; | ||
130 | } | ||
131 | |||
132 | sg_miter_stop(&miter); | ||
133 | } | ||
134 | |||
135 | sg_pcopy_from_buffer(req->dst, | ||
136 | sg_nents_for_len(req->dst, ctx->key_size), | ||
137 | req_ctx->out_buf, req_ctx->child_req.dst_len, | ||
138 | pad_len); | ||
139 | } | ||
140 | req->dst_len = ctx->key_size; | ||
141 | |||
142 | kfree(req_ctx->in_buf); | ||
143 | kzfree(req_ctx->out_buf); | ||
144 | |||
145 | return err; | ||
146 | } | ||
147 | |||
148 | static void pkcs1pad_encrypt_sign_complete_cb( | ||
149 | struct crypto_async_request *child_async_req, int err) | ||
150 | { | ||
151 | struct akcipher_request *req = child_async_req->data; | ||
152 | struct crypto_async_request async_req; | ||
153 | |||
154 | if (err == -EINPROGRESS) | ||
155 | return; | ||
156 | |||
157 | async_req.data = req->base.data; | ||
158 | async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); | ||
159 | async_req.flags = child_async_req->flags; | ||
160 | req->base.complete(&async_req, | ||
161 | pkcs1pad_encrypt_sign_complete(req, err)); | ||
162 | } | ||
163 | |||
164 | static int pkcs1pad_encrypt(struct akcipher_request *req) | ||
165 | { | ||
166 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
167 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
168 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); | ||
169 | int err; | ||
170 | unsigned int i, ps_end; | ||
171 | |||
172 | if (!ctx->key_size) | ||
173 | return -EINVAL; | ||
174 | |||
175 | if (req->src_len > ctx->key_size - 11) | ||
176 | return -EOVERFLOW; | ||
177 | |||
178 | if (req->dst_len < ctx->key_size) { | ||
179 | req->dst_len = ctx->key_size; | ||
180 | return -EOVERFLOW; | ||
181 | } | ||
182 | |||
183 | if (ctx->key_size > PAGE_SIZE) | ||
184 | return -ENOTSUPP; | ||
185 | |||
186 | /* | ||
187 | * Replace both input and output to add the padding in the input and | ||
188 | * the potential missing leading zeros in the output. | ||
189 | */ | ||
190 | req_ctx->child_req.src = req_ctx->in_sg; | ||
191 | req_ctx->child_req.src_len = ctx->key_size - 1; | ||
192 | req_ctx->child_req.dst = req_ctx->out_sg; | ||
193 | req_ctx->child_req.dst_len = ctx->key_size; | ||
194 | |||
195 | req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, | ||
196 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
197 | GFP_KERNEL : GFP_ATOMIC); | ||
198 | if (!req_ctx->in_buf) | ||
199 | return -ENOMEM; | ||
200 | |||
201 | ps_end = ctx->key_size - req->src_len - 2; | ||
202 | req_ctx->in_buf[0] = 0x02; | ||
203 | for (i = 1; i < ps_end; i++) | ||
204 | req_ctx->in_buf[i] = 1 + prandom_u32_max(255); | ||
205 | req_ctx->in_buf[ps_end] = 0x00; | ||
206 | |||
207 | pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, | ||
208 | ctx->key_size - 1 - req->src_len, req->src); | ||
209 | |||
210 | req_ctx->out_buf = kmalloc(ctx->key_size, | ||
211 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
212 | GFP_KERNEL : GFP_ATOMIC); | ||
213 | if (!req_ctx->out_buf) { | ||
214 | kfree(req_ctx->in_buf); | ||
215 | return -ENOMEM; | ||
216 | } | ||
217 | |||
218 | pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, | ||
219 | ctx->key_size, NULL); | ||
220 | |||
221 | akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); | ||
222 | akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, | ||
223 | pkcs1pad_encrypt_sign_complete_cb, req); | ||
224 | |||
225 | err = crypto_akcipher_encrypt(&req_ctx->child_req); | ||
226 | if (err != -EINPROGRESS && | ||
227 | (err != -EBUSY || | ||
228 | !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
229 | return pkcs1pad_encrypt_sign_complete(req, err); | ||
230 | |||
231 | return err; | ||
232 | } | ||
233 | |||
234 | static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err) | ||
235 | { | ||
236 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
237 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
238 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); | ||
239 | unsigned int pos; | ||
240 | |||
241 | if (err == -EOVERFLOW) | ||
242 | /* Decrypted value had no leading 0 byte */ | ||
243 | err = -EINVAL; | ||
244 | |||
245 | if (err) | ||
246 | goto done; | ||
247 | |||
248 | if (req_ctx->child_req.dst_len != ctx->key_size - 1) { | ||
249 | err = -EINVAL; | ||
250 | goto done; | ||
251 | } | ||
252 | |||
253 | if (req_ctx->out_buf[0] != 0x02) { | ||
254 | err = -EINVAL; | ||
255 | goto done; | ||
256 | } | ||
257 | for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) | ||
258 | if (req_ctx->out_buf[pos] == 0x00) | ||
259 | break; | ||
260 | if (pos < 9 || pos == req_ctx->child_req.dst_len) { | ||
261 | err = -EINVAL; | ||
262 | goto done; | ||
263 | } | ||
264 | pos++; | ||
265 | |||
266 | if (req->dst_len < req_ctx->child_req.dst_len - pos) | ||
267 | err = -EOVERFLOW; | ||
268 | req->dst_len = req_ctx->child_req.dst_len - pos; | ||
269 | |||
270 | if (!err) | ||
271 | sg_copy_from_buffer(req->dst, | ||
272 | sg_nents_for_len(req->dst, req->dst_len), | ||
273 | req_ctx->out_buf + pos, req->dst_len); | ||
274 | |||
275 | done: | ||
276 | kzfree(req_ctx->out_buf); | ||
277 | |||
278 | return err; | ||
279 | } | ||
280 | |||
281 | static void pkcs1pad_decrypt_complete_cb( | ||
282 | struct crypto_async_request *child_async_req, int err) | ||
283 | { | ||
284 | struct akcipher_request *req = child_async_req->data; | ||
285 | struct crypto_async_request async_req; | ||
286 | |||
287 | if (err == -EINPROGRESS) | ||
288 | return; | ||
289 | |||
290 | async_req.data = req->base.data; | ||
291 | async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); | ||
292 | async_req.flags = child_async_req->flags; | ||
293 | req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); | ||
294 | } | ||
295 | |||
296 | static int pkcs1pad_decrypt(struct akcipher_request *req) | ||
297 | { | ||
298 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
299 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
300 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); | ||
301 | int err; | ||
302 | |||
303 | if (!ctx->key_size || req->src_len != ctx->key_size) | ||
304 | return -EINVAL; | ||
305 | |||
306 | if (ctx->key_size > PAGE_SIZE) | ||
307 | return -ENOTSUPP; | ||
308 | |||
309 | /* Reuse input buffer, output to a new buffer */ | ||
310 | req_ctx->child_req.src = req->src; | ||
311 | req_ctx->child_req.src_len = req->src_len; | ||
312 | req_ctx->child_req.dst = req_ctx->out_sg; | ||
313 | req_ctx->child_req.dst_len = ctx->key_size - 1; | ||
314 | |||
315 | req_ctx->out_buf = kmalloc(ctx->key_size - 1, | ||
316 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
317 | GFP_KERNEL : GFP_ATOMIC); | ||
318 | if (!req_ctx->out_buf) | ||
319 | return -ENOMEM; | ||
320 | |||
321 | pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, | ||
322 | ctx->key_size - 1, NULL); | ||
323 | |||
324 | akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); | ||
325 | akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, | ||
326 | pkcs1pad_decrypt_complete_cb, req); | ||
327 | |||
328 | err = crypto_akcipher_decrypt(&req_ctx->child_req); | ||
329 | if (err != -EINPROGRESS && | ||
330 | (err != -EBUSY || | ||
331 | !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
332 | return pkcs1pad_decrypt_complete(req, err); | ||
333 | |||
334 | return err; | ||
335 | } | ||
336 | |||
337 | static int pkcs1pad_sign(struct akcipher_request *req) | ||
338 | { | ||
339 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
340 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
341 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); | ||
342 | int err; | ||
343 | unsigned int ps_end; | ||
344 | |||
345 | if (!ctx->key_size) | ||
346 | return -EINVAL; | ||
347 | |||
348 | if (req->src_len > ctx->key_size - 11) | ||
349 | return -EOVERFLOW; | ||
350 | |||
351 | if (req->dst_len < ctx->key_size) { | ||
352 | req->dst_len = ctx->key_size; | ||
353 | return -EOVERFLOW; | ||
354 | } | ||
355 | |||
356 | if (ctx->key_size > PAGE_SIZE) | ||
357 | return -ENOTSUPP; | ||
358 | |||
359 | /* | ||
360 | * Replace both input and output to add the padding in the input and | ||
361 | * the potential missing leading zeros in the output. | ||
362 | */ | ||
363 | req_ctx->child_req.src = req_ctx->in_sg; | ||
364 | req_ctx->child_req.src_len = ctx->key_size - 1; | ||
365 | req_ctx->child_req.dst = req_ctx->out_sg; | ||
366 | req_ctx->child_req.dst_len = ctx->key_size; | ||
367 | |||
368 | req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, | ||
369 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
370 | GFP_KERNEL : GFP_ATOMIC); | ||
371 | if (!req_ctx->in_buf) | ||
372 | return -ENOMEM; | ||
373 | |||
374 | ps_end = ctx->key_size - req->src_len - 2; | ||
375 | req_ctx->in_buf[0] = 0x01; | ||
376 | memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); | ||
377 | req_ctx->in_buf[ps_end] = 0x00; | ||
378 | |||
379 | pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, | ||
380 | ctx->key_size - 1 - req->src_len, req->src); | ||
381 | |||
382 | req_ctx->out_buf = kmalloc(ctx->key_size, | ||
383 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
384 | GFP_KERNEL : GFP_ATOMIC); | ||
385 | if (!req_ctx->out_buf) { | ||
386 | kfree(req_ctx->in_buf); | ||
387 | return -ENOMEM; | ||
388 | } | ||
389 | |||
390 | pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, | ||
391 | ctx->key_size, NULL); | ||
392 | |||
393 | akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); | ||
394 | akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, | ||
395 | pkcs1pad_encrypt_sign_complete_cb, req); | ||
396 | |||
397 | err = crypto_akcipher_sign(&req_ctx->child_req); | ||
398 | if (err != -EINPROGRESS && | ||
399 | (err != -EBUSY || | ||
400 | !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
401 | return pkcs1pad_encrypt_sign_complete(req, err); | ||
402 | |||
403 | return err; | ||
404 | } | ||
405 | |||
406 | static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) | ||
407 | { | ||
408 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
409 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
410 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); | ||
411 | unsigned int pos; | ||
412 | |||
413 | if (err == -EOVERFLOW) | ||
414 | /* Decrypted value had no leading 0 byte */ | ||
415 | err = -EINVAL; | ||
416 | |||
417 | if (err) | ||
418 | goto done; | ||
419 | |||
420 | if (req_ctx->child_req.dst_len != ctx->key_size - 1) { | ||
421 | err = -EINVAL; | ||
422 | goto done; | ||
423 | } | ||
424 | |||
425 | if (req_ctx->out_buf[0] != 0x01) { | ||
426 | err = -EINVAL; | ||
427 | goto done; | ||
428 | } | ||
429 | for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) | ||
430 | if (req_ctx->out_buf[pos] != 0xff) | ||
431 | break; | ||
432 | if (pos < 9 || pos == req_ctx->child_req.dst_len || | ||
433 | req_ctx->out_buf[pos] != 0x00) { | ||
434 | err = -EINVAL; | ||
435 | goto done; | ||
436 | } | ||
437 | pos++; | ||
438 | |||
439 | if (req->dst_len < req_ctx->child_req.dst_len - pos) | ||
440 | err = -EOVERFLOW; | ||
441 | req->dst_len = req_ctx->child_req.dst_len - pos; | ||
442 | |||
443 | if (!err) | ||
444 | sg_copy_from_buffer(req->dst, | ||
445 | sg_nents_for_len(req->dst, req->dst_len), | ||
446 | req_ctx->out_buf + pos, req->dst_len); | ||
447 | |||
448 | done: | ||
449 | kzfree(req_ctx->out_buf); | ||
450 | |||
451 | return err; | ||
452 | } | ||
453 | |||
454 | static void pkcs1pad_verify_complete_cb( | ||
455 | struct crypto_async_request *child_async_req, int err) | ||
456 | { | ||
457 | struct akcipher_request *req = child_async_req->data; | ||
458 | struct crypto_async_request async_req; | ||
459 | |||
460 | if (err == -EINPROGRESS) | ||
461 | return; | ||
462 | |||
463 | async_req.data = req->base.data; | ||
464 | async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); | ||
465 | async_req.flags = child_async_req->flags; | ||
466 | req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * The verify operation is here for completeness similar to the verification | ||
471 | * defined in RFC2313 section 10.2 except that block type 0 is not accepted, | ||
472 | * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to | ||
473 | * retrieve the DigestInfo from a signature, instead the user is expected | ||
474 | * to call the sign operation to generate the expected signature and compare | ||
475 | * signatures instead of the message-digests. | ||
476 | */ | ||
477 | static int pkcs1pad_verify(struct akcipher_request *req) | ||
478 | { | ||
479 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
480 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
481 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); | ||
482 | int err; | ||
483 | |||
484 | if (!ctx->key_size || req->src_len != ctx->key_size) | ||
485 | return -EINVAL; | ||
486 | |||
487 | if (ctx->key_size > PAGE_SIZE) | ||
488 | return -ENOTSUPP; | ||
489 | |||
490 | /* Reuse input buffer, output to a new buffer */ | ||
491 | req_ctx->child_req.src = req->src; | ||
492 | req_ctx->child_req.src_len = req->src_len; | ||
493 | req_ctx->child_req.dst = req_ctx->out_sg; | ||
494 | req_ctx->child_req.dst_len = ctx->key_size - 1; | ||
495 | |||
496 | req_ctx->out_buf = kmalloc(ctx->key_size - 1, | ||
497 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
498 | GFP_KERNEL : GFP_ATOMIC); | ||
499 | if (!req_ctx->out_buf) | ||
500 | return -ENOMEM; | ||
501 | |||
502 | pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, | ||
503 | ctx->key_size - 1, NULL); | ||
504 | |||
505 | akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); | ||
506 | akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, | ||
507 | pkcs1pad_verify_complete_cb, req); | ||
508 | |||
509 | err = crypto_akcipher_verify(&req_ctx->child_req); | ||
510 | if (err != -EINPROGRESS && | ||
511 | (err != -EBUSY || | ||
512 | !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
513 | return pkcs1pad_verify_complete(req, err); | ||
514 | |||
515 | return err; | ||
516 | } | ||
517 | |||
518 | static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) | ||
519 | { | ||
520 | struct akcipher_instance *inst = akcipher_alg_instance(tfm); | ||
521 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
522 | struct crypto_akcipher *child_tfm; | ||
523 | |||
524 | child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst)); | ||
525 | if (IS_ERR(child_tfm)) | ||
526 | return PTR_ERR(child_tfm); | ||
527 | |||
528 | ctx->child = child_tfm; | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm) | ||
534 | { | ||
535 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
536 | |||
537 | crypto_free_akcipher(ctx->child); | ||
538 | } | ||
539 | |||
540 | static void pkcs1pad_free(struct akcipher_instance *inst) | ||
541 | { | ||
542 | struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst); | ||
543 | |||
544 | crypto_drop_akcipher(spawn); | ||
545 | |||
546 | kfree(inst); | ||
547 | } | ||
548 | |||
549 | static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
550 | { | ||
551 | struct crypto_attr_type *algt; | ||
552 | struct akcipher_instance *inst; | ||
553 | struct crypto_akcipher_spawn *spawn; | ||
554 | struct akcipher_alg *rsa_alg; | ||
555 | const char *rsa_alg_name; | ||
556 | int err; | ||
557 | |||
558 | algt = crypto_get_attr_type(tb); | ||
559 | if (IS_ERR(algt)) | ||
560 | return PTR_ERR(algt); | ||
561 | |||
562 | if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) | ||
563 | return -EINVAL; | ||
564 | |||
565 | rsa_alg_name = crypto_attr_alg_name(tb[1]); | ||
566 | if (IS_ERR(rsa_alg_name)) | ||
567 | return PTR_ERR(rsa_alg_name); | ||
568 | |||
569 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | ||
570 | if (!inst) | ||
571 | return -ENOMEM; | ||
572 | |||
573 | spawn = akcipher_instance_ctx(inst); | ||
574 | crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); | ||
575 | err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, | ||
576 | crypto_requires_sync(algt->type, algt->mask)); | ||
577 | if (err) | ||
578 | goto out_free_inst; | ||
579 | |||
580 | rsa_alg = crypto_spawn_akcipher_alg(spawn); | ||
581 | |||
582 | err = -ENAMETOOLONG; | ||
583 | if (snprintf(inst->alg.base.cra_name, | ||
584 | CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", | ||
585 | rsa_alg->base.cra_name) >= | ||
586 | CRYPTO_MAX_ALG_NAME || | ||
587 | snprintf(inst->alg.base.cra_driver_name, | ||
588 | CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", | ||
589 | rsa_alg->base.cra_driver_name) >= | ||
590 | CRYPTO_MAX_ALG_NAME) | ||
591 | goto out_drop_alg; | ||
592 | |||
593 | inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; | ||
594 | inst->alg.base.cra_priority = rsa_alg->base.cra_priority; | ||
595 | inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); | ||
596 | |||
597 | inst->alg.init = pkcs1pad_init_tfm; | ||
598 | inst->alg.exit = pkcs1pad_exit_tfm; | ||
599 | |||
600 | inst->alg.encrypt = pkcs1pad_encrypt; | ||
601 | inst->alg.decrypt = pkcs1pad_decrypt; | ||
602 | inst->alg.sign = pkcs1pad_sign; | ||
603 | inst->alg.verify = pkcs1pad_verify; | ||
604 | inst->alg.set_pub_key = pkcs1pad_set_pub_key; | ||
605 | inst->alg.set_priv_key = pkcs1pad_set_priv_key; | ||
606 | inst->alg.max_size = pkcs1pad_get_max_size; | ||
607 | inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize; | ||
608 | |||
609 | inst->free = pkcs1pad_free; | ||
610 | |||
611 | err = akcipher_register_instance(tmpl, inst); | ||
612 | if (err) | ||
613 | goto out_drop_alg; | ||
614 | |||
615 | return 0; | ||
616 | |||
617 | out_drop_alg: | ||
618 | crypto_drop_akcipher(spawn); | ||
619 | out_free_inst: | ||
620 | kfree(inst); | ||
621 | return err; | ||
622 | } | ||
623 | |||
624 | struct crypto_template rsa_pkcs1pad_tmpl = { | ||
625 | .name = "pkcs1pad", | ||
626 | .create = pkcs1pad_create, | ||
627 | .module = THIS_MODULE, | ||
628 | }; | ||
diff --git a/crypto/rsa.c b/crypto/rsa.c index 1093e041db03..77d737f52147 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <crypto/internal/rsa.h> | 13 | #include <crypto/internal/rsa.h> |
14 | #include <crypto/internal/akcipher.h> | 14 | #include <crypto/internal/akcipher.h> |
15 | #include <crypto/akcipher.h> | 15 | #include <crypto/akcipher.h> |
16 | #include <crypto/algapi.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * RSAEP function [RFC3447 sec 5.1.1] | 19 | * RSAEP function [RFC3447 sec 5.1.1] |
@@ -91,12 +92,6 @@ static int rsa_enc(struct akcipher_request *req) | |||
91 | goto err_free_c; | 92 | goto err_free_c; |
92 | } | 93 | } |
93 | 94 | ||
94 | if (req->dst_len < mpi_get_size(pkey->n)) { | ||
95 | req->dst_len = mpi_get_size(pkey->n); | ||
96 | ret = -EOVERFLOW; | ||
97 | goto err_free_c; | ||
98 | } | ||
99 | |||
100 | ret = -ENOMEM; | 95 | ret = -ENOMEM; |
101 | m = mpi_read_raw_from_sgl(req->src, req->src_len); | 96 | m = mpi_read_raw_from_sgl(req->src, req->src_len); |
102 | if (!m) | 97 | if (!m) |
@@ -136,12 +131,6 @@ static int rsa_dec(struct akcipher_request *req) | |||
136 | goto err_free_m; | 131 | goto err_free_m; |
137 | } | 132 | } |
138 | 133 | ||
139 | if (req->dst_len < mpi_get_size(pkey->n)) { | ||
140 | req->dst_len = mpi_get_size(pkey->n); | ||
141 | ret = -EOVERFLOW; | ||
142 | goto err_free_m; | ||
143 | } | ||
144 | |||
145 | ret = -ENOMEM; | 134 | ret = -ENOMEM; |
146 | c = mpi_read_raw_from_sgl(req->src, req->src_len); | 135 | c = mpi_read_raw_from_sgl(req->src, req->src_len); |
147 | if (!c) | 136 | if (!c) |
@@ -180,12 +169,6 @@ static int rsa_sign(struct akcipher_request *req) | |||
180 | goto err_free_s; | 169 | goto err_free_s; |
181 | } | 170 | } |
182 | 171 | ||
183 | if (req->dst_len < mpi_get_size(pkey->n)) { | ||
184 | req->dst_len = mpi_get_size(pkey->n); | ||
185 | ret = -EOVERFLOW; | ||
186 | goto err_free_s; | ||
187 | } | ||
188 | |||
189 | ret = -ENOMEM; | 172 | ret = -ENOMEM; |
190 | m = mpi_read_raw_from_sgl(req->src, req->src_len); | 173 | m = mpi_read_raw_from_sgl(req->src, req->src_len); |
191 | if (!m) | 174 | if (!m) |
@@ -225,12 +208,6 @@ static int rsa_verify(struct akcipher_request *req) | |||
225 | goto err_free_m; | 208 | goto err_free_m; |
226 | } | 209 | } |
227 | 210 | ||
228 | if (req->dst_len < mpi_get_size(pkey->n)) { | ||
229 | req->dst_len = mpi_get_size(pkey->n); | ||
230 | ret = -EOVERFLOW; | ||
231 | goto err_free_m; | ||
232 | } | ||
233 | |||
234 | ret = -ENOMEM; | 211 | ret = -ENOMEM; |
235 | s = mpi_read_raw_from_sgl(req->src, req->src_len); | 212 | s = mpi_read_raw_from_sgl(req->src, req->src_len); |
236 | if (!s) { | 213 | if (!s) { |
@@ -339,11 +316,24 @@ static struct akcipher_alg rsa = { | |||
339 | 316 | ||
340 | static int rsa_init(void) | 317 | static int rsa_init(void) |
341 | { | 318 | { |
342 | return crypto_register_akcipher(&rsa); | 319 | int err; |
320 | |||
321 | err = crypto_register_akcipher(&rsa); | ||
322 | if (err) | ||
323 | return err; | ||
324 | |||
325 | err = crypto_register_template(&rsa_pkcs1pad_tmpl); | ||
326 | if (err) { | ||
327 | crypto_unregister_akcipher(&rsa); | ||
328 | return err; | ||
329 | } | ||
330 | |||
331 | return 0; | ||
343 | } | 332 | } |
344 | 333 | ||
345 | static void rsa_exit(void) | 334 | static void rsa_exit(void) |
346 | { | 335 | { |
336 | crypto_unregister_template(&rsa_pkcs1pad_tmpl); | ||
347 | crypto_unregister_akcipher(&rsa); | 337 | crypto_unregister_akcipher(&rsa); |
348 | } | 338 | } |
349 | 339 | ||
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 39e3acc438d9..6877cbb9105f 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c | |||
@@ -26,6 +26,13 @@ | |||
26 | #include <crypto/sha1_base.h> | 26 | #include <crypto/sha1_base.h> |
27 | #include <asm/byteorder.h> | 27 | #include <asm/byteorder.h> |
28 | 28 | ||
29 | const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { | ||
30 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, | ||
31 | 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, | ||
32 | 0xaf, 0xd8, 0x07, 0x09 | ||
33 | }; | ||
34 | EXPORT_SYMBOL_GPL(sha1_zero_message_hash); | ||
35 | |||
29 | static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, | 36 | static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, |
30 | int blocks) | 37 | int blocks) |
31 | { | 38 | { |
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 78431163ed3c..8f9c47e1a96e 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c | |||
@@ -27,6 +27,22 @@ | |||
27 | #include <asm/byteorder.h> | 27 | #include <asm/byteorder.h> |
28 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
29 | 29 | ||
30 | const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { | ||
31 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | ||
32 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | ||
33 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | ||
34 | 0x2f | ||
35 | }; | ||
36 | EXPORT_SYMBOL_GPL(sha224_zero_message_hash); | ||
37 | |||
38 | const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { | ||
39 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, | ||
40 | 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, | ||
41 | 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, | ||
42 | 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
43 | }; | ||
44 | EXPORT_SYMBOL_GPL(sha256_zero_message_hash); | ||
45 | |||
30 | static inline u32 Ch(u32 x, u32 y, u32 z) | 46 | static inline u32 Ch(u32 x, u32 y, u32 z) |
31 | { | 47 | { |
32 | return z ^ (x & (y ^ z)); | 48 | return z ^ (x & (y ^ z)); |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 46a4a757d478..270bc4b82bd9 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
1789 | test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, | 1789 | test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, |
1790 | NULL, 0, 16, 16, aead_speed_template_20); | 1790 | NULL, 0, 16, 16, aead_speed_template_20); |
1791 | test_aead_speed("gcm(aes)", ENCRYPT, sec, | 1791 | test_aead_speed("gcm(aes)", ENCRYPT, sec, |
1792 | NULL, 0, 16, 8, aead_speed_template_20); | 1792 | NULL, 0, 16, 8, speed_template_16_24_32); |
1793 | break; | 1793 | break; |
1794 | 1794 | ||
1795 | case 212: | 1795 | case 212: |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 6f497aa1b276..9203f2d130c0 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
238 | goto out; | 238 | goto out; |
239 | } | 239 | } |
240 | 240 | ||
241 | mutex_lock(&reading_mutex); | 241 | if (mutex_lock_interruptible(&reading_mutex)) { |
242 | err = -ERESTARTSYS; | ||
243 | goto out_put; | ||
244 | } | ||
242 | if (!data_avail) { | 245 | if (!data_avail) { |
243 | bytes_read = rng_get_data(rng, rng_buffer, | 246 | bytes_read = rng_get_data(rng, rng_buffer, |
244 | rng_buffer_size(), | 247 | rng_buffer_size(), |
@@ -288,6 +291,7 @@ out: | |||
288 | 291 | ||
289 | out_unlock_reading: | 292 | out_unlock_reading: |
290 | mutex_unlock(&reading_mutex); | 293 | mutex_unlock(&reading_mutex); |
294 | out_put: | ||
291 | put_rng(rng); | 295 | put_rng(rng); |
292 | goto out; | 296 | goto out; |
293 | } | 297 | } |
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index a405cdcd8dd2..8da14f1a1f56 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/random.h> | 18 | #include <linux/random.h> |
19 | #include <linux/hw_random.h> | 19 | #include <linux/hw_random.h> |
20 | #include <linux/timer.h> | 20 | #include <linux/workqueue.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
@@ -29,11 +29,11 @@ | |||
29 | /* param1: ptr, param2: count, param3: flag */ | 29 | /* param1: ptr, param2: count, param3: flag */ |
30 | static u32 (*omap3_rom_rng_call)(u32, u32, u32); | 30 | static u32 (*omap3_rom_rng_call)(u32, u32, u32); |
31 | 31 | ||
32 | static struct timer_list idle_timer; | 32 | static struct delayed_work idle_work; |
33 | static int rng_idle; | 33 | static int rng_idle; |
34 | static struct clk *rng_clk; | 34 | static struct clk *rng_clk; |
35 | 35 | ||
36 | static void omap3_rom_rng_idle(unsigned long data) | 36 | static void omap3_rom_rng_idle(struct work_struct *work) |
37 | { | 37 | { |
38 | int r; | 38 | int r; |
39 | 39 | ||
@@ -51,7 +51,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) | |||
51 | u32 r; | 51 | u32 r; |
52 | u32 ptr; | 52 | u32 ptr; |
53 | 53 | ||
54 | del_timer_sync(&idle_timer); | 54 | cancel_delayed_work_sync(&idle_work); |
55 | if (rng_idle) { | 55 | if (rng_idle) { |
56 | clk_prepare_enable(rng_clk); | 56 | clk_prepare_enable(rng_clk); |
57 | r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); | 57 | r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); |
@@ -65,7 +65,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) | |||
65 | 65 | ||
66 | ptr = virt_to_phys(buf); | 66 | ptr = virt_to_phys(buf); |
67 | r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW); | 67 | r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW); |
68 | mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500)); | 68 | schedule_delayed_work(&idle_work, msecs_to_jiffies(500)); |
69 | if (r != 0) | 69 | if (r != 0) |
70 | return -EINVAL; | 70 | return -EINVAL; |
71 | return 0; | 71 | return 0; |
@@ -102,7 +102,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) | |||
102 | return -EINVAL; | 102 | return -EINVAL; |
103 | } | 103 | } |
104 | 104 | ||
105 | setup_timer(&idle_timer, omap3_rom_rng_idle, 0); | 105 | INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle); |
106 | rng_clk = devm_clk_get(&pdev->dev, "ick"); | 106 | rng_clk = devm_clk_get(&pdev->dev, "ick"); |
107 | if (IS_ERR(rng_clk)) { | 107 | if (IS_ERR(rng_clk)) { |
108 | pr_err("unable to get RNG clock\n"); | 108 | pr_err("unable to get RNG clock\n"); |
@@ -118,6 +118,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) | |||
118 | 118 | ||
119 | static int omap3_rom_rng_remove(struct platform_device *pdev) | 119 | static int omap3_rom_rng_remove(struct platform_device *pdev) |
120 | { | 120 | { |
121 | cancel_delayed_work_sync(&idle_work); | ||
121 | hwrng_unregister(&omap3_rom_rng_ops); | 122 | hwrng_unregister(&omap3_rom_rng_ops); |
122 | clk_disable_unprepare(rng_clk); | 123 | clk_disable_unprepare(rng_clk); |
123 | return 0; | 124 | return 0; |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 2569e043317e..3dd69df9c970 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -194,6 +194,9 @@ config CRYPTO_DEV_NIAGARA2 | |||
194 | select CRYPTO_DES | 194 | select CRYPTO_DES |
195 | select CRYPTO_BLKCIPHER | 195 | select CRYPTO_BLKCIPHER |
196 | select CRYPTO_HASH | 196 | select CRYPTO_HASH |
197 | select CRYPTO_MD5 | ||
198 | select CRYPTO_SHA1 | ||
199 | select CRYPTO_SHA256 | ||
197 | depends on SPARC64 | 200 | depends on SPARC64 |
198 | help | 201 | help |
199 | Each core of a Niagara2 processor contains a Stream | 202 | Each core of a Niagara2 processor contains a Stream |
@@ -378,10 +381,10 @@ config CRYPTO_DEV_BFIN_CRC | |||
378 | 381 | ||
379 | config CRYPTO_DEV_ATMEL_AES | 382 | config CRYPTO_DEV_ATMEL_AES |
380 | tristate "Support for Atmel AES hw accelerator" | 383 | tristate "Support for Atmel AES hw accelerator" |
381 | depends on ARCH_AT91 | 384 | depends on AT_XDMAC || AT_HDMAC || COMPILE_TEST |
382 | select CRYPTO_AES | 385 | select CRYPTO_AES |
386 | select CRYPTO_AEAD | ||
383 | select CRYPTO_BLKCIPHER | 387 | select CRYPTO_BLKCIPHER |
384 | select AT_HDMAC | ||
385 | help | 388 | help |
386 | Some Atmel processors have AES hw accelerator. | 389 | Some Atmel processors have AES hw accelerator. |
387 | Select this if you want to use the Atmel module for | 390 | Select this if you want to use the Atmel module for |
@@ -498,4 +501,15 @@ config CRYPTO_DEV_SUN4I_SS | |||
498 | To compile this driver as a module, choose M here: the module | 501 | To compile this driver as a module, choose M here: the module |
499 | will be called sun4i-ss. | 502 | will be called sun4i-ss. |
500 | 503 | ||
504 | config CRYPTO_DEV_ROCKCHIP | ||
505 | tristate "Rockchip's Cryptographic Engine driver" | ||
506 | depends on OF && ARCH_ROCKCHIP | ||
507 | select CRYPTO_AES | ||
508 | select CRYPTO_DES | ||
509 | select CRYPTO_BLKCIPHER | ||
510 | |||
511 | help | ||
512 | This driver interfaces with the hardware crypto accelerator. | ||
513 | Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. | ||
514 | |||
501 | endif # CRYPTO_HW | 515 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index c3ced6fbd1b8..713de9d11148 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -29,3 +29,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ | |||
29 | obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ | 29 | obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ |
30 | obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ | 30 | obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ |
31 | obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ | 31 | obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ |
32 | obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ | ||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 58a630e55d5d..62134c8a2260 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -781,6 +781,10 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, | |||
781 | 781 | ||
782 | /* figure how many gd is needed */ | 782 | /* figure how many gd is needed */ |
783 | num_gd = sg_nents_for_len(src, datalen); | 783 | num_gd = sg_nents_for_len(src, datalen); |
784 | if ((int)num_gd < 0) { | ||
785 | dev_err(dev->core_dev->device, "Invalid number of src SG.\n"); | ||
786 | return -EINVAL; | ||
787 | } | ||
784 | if (num_gd == 1) | 788 | if (num_gd == 1) |
785 | num_gd = 0; | 789 | num_gd = 0; |
786 | 790 | ||
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h index 2786bb1a5aa0..6c2951bb70b1 100644 --- a/drivers/crypto/atmel-aes-regs.h +++ b/drivers/crypto/atmel-aes-regs.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #define AES_MR 0x04 | 9 | #define AES_MR 0x04 |
10 | #define AES_MR_CYPHER_DEC (0 << 0) | 10 | #define AES_MR_CYPHER_DEC (0 << 0) |
11 | #define AES_MR_CYPHER_ENC (1 << 0) | 11 | #define AES_MR_CYPHER_ENC (1 << 0) |
12 | #define AES_MR_GTAGEN (1 << 1) | ||
12 | #define AES_MR_DUALBUFF (1 << 3) | 13 | #define AES_MR_DUALBUFF (1 << 3) |
13 | #define AES_MR_PROCDLY_MASK (0xF << 4) | 14 | #define AES_MR_PROCDLY_MASK (0xF << 4) |
14 | #define AES_MR_PROCDLY_OFFSET 4 | 15 | #define AES_MR_PROCDLY_OFFSET 4 |
@@ -26,6 +27,7 @@ | |||
26 | #define AES_MR_OPMOD_OFB (0x2 << 12) | 27 | #define AES_MR_OPMOD_OFB (0x2 << 12) |
27 | #define AES_MR_OPMOD_CFB (0x3 << 12) | 28 | #define AES_MR_OPMOD_CFB (0x3 << 12) |
28 | #define AES_MR_OPMOD_CTR (0x4 << 12) | 29 | #define AES_MR_OPMOD_CTR (0x4 << 12) |
30 | #define AES_MR_OPMOD_GCM (0x5 << 12) | ||
29 | #define AES_MR_LOD (0x1 << 15) | 31 | #define AES_MR_LOD (0x1 << 15) |
30 | #define AES_MR_CFBS_MASK (0x7 << 16) | 32 | #define AES_MR_CFBS_MASK (0x7 << 16) |
31 | #define AES_MR_CFBS_128b (0x0 << 16) | 33 | #define AES_MR_CFBS_128b (0x0 << 16) |
@@ -44,6 +46,7 @@ | |||
44 | #define AES_ISR 0x1C | 46 | #define AES_ISR 0x1C |
45 | #define AES_INT_DATARDY (1 << 0) | 47 | #define AES_INT_DATARDY (1 << 0) |
46 | #define AES_INT_URAD (1 << 8) | 48 | #define AES_INT_URAD (1 << 8) |
49 | #define AES_INT_TAGRDY (1 << 16) | ||
47 | #define AES_ISR_URAT_MASK (0xF << 12) | 50 | #define AES_ISR_URAT_MASK (0xF << 12) |
48 | #define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12) | 51 | #define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12) |
49 | #define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12) | 52 | #define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12) |
@@ -57,6 +60,13 @@ | |||
57 | #define AES_ODATAR(x) (0x50 + ((x) * 0x04)) | 60 | #define AES_ODATAR(x) (0x50 + ((x) * 0x04)) |
58 | #define AES_IVR(x) (0x60 + ((x) * 0x04)) | 61 | #define AES_IVR(x) (0x60 + ((x) * 0x04)) |
59 | 62 | ||
63 | #define AES_AADLENR 0x70 | ||
64 | #define AES_CLENR 0x74 | ||
65 | #define AES_GHASHR(x) (0x78 + ((x) * 0x04)) | ||
66 | #define AES_TAGR(x) (0x88 + ((x) * 0x04)) | ||
67 | #define AES_CTRR 0x98 | ||
68 | #define AES_GCMHR(x) (0x9c + ((x) * 0x04)) | ||
69 | |||
60 | #define AES_HW_VERSION 0xFC | 70 | #define AES_HW_VERSION 0xFC |
61 | 71 | ||
62 | #endif /* __ATMEL_AES_REGS_H__ */ | 72 | #endif /* __ATMEL_AES_REGS_H__ */ |
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index fb16d812c8f5..5621612ee921 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
@@ -33,68 +33,118 @@ | |||
33 | #include <linux/of_device.h> | 33 | #include <linux/of_device.h> |
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/crypto.h> | 35 | #include <linux/crypto.h> |
36 | #include <linux/cryptohash.h> | ||
37 | #include <crypto/scatterwalk.h> | 36 | #include <crypto/scatterwalk.h> |
38 | #include <crypto/algapi.h> | 37 | #include <crypto/algapi.h> |
39 | #include <crypto/aes.h> | 38 | #include <crypto/aes.h> |
40 | #include <crypto/hash.h> | 39 | #include <crypto/internal/aead.h> |
41 | #include <crypto/internal/hash.h> | ||
42 | #include <linux/platform_data/crypto-atmel.h> | 40 | #include <linux/platform_data/crypto-atmel.h> |
43 | #include <dt-bindings/dma/at91.h> | 41 | #include <dt-bindings/dma/at91.h> |
44 | #include "atmel-aes-regs.h" | 42 | #include "atmel-aes-regs.h" |
45 | 43 | ||
44 | #define ATMEL_AES_PRIORITY 300 | ||
45 | |||
46 | #define ATMEL_AES_BUFFER_ORDER 2 | ||
47 | #define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER) | ||
48 | |||
46 | #define CFB8_BLOCK_SIZE 1 | 49 | #define CFB8_BLOCK_SIZE 1 |
47 | #define CFB16_BLOCK_SIZE 2 | 50 | #define CFB16_BLOCK_SIZE 2 |
48 | #define CFB32_BLOCK_SIZE 4 | 51 | #define CFB32_BLOCK_SIZE 4 |
49 | #define CFB64_BLOCK_SIZE 8 | 52 | #define CFB64_BLOCK_SIZE 8 |
50 | 53 | ||
54 | #define SIZE_IN_WORDS(x) ((x) >> 2) | ||
55 | |||
51 | /* AES flags */ | 56 | /* AES flags */ |
52 | #define AES_FLAGS_MODE_MASK 0x03ff | 57 | /* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */ |
53 | #define AES_FLAGS_ENCRYPT BIT(0) | 58 | #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC |
54 | #define AES_FLAGS_CBC BIT(1) | 59 | #define AES_FLAGS_GTAGEN AES_MR_GTAGEN |
55 | #define AES_FLAGS_CFB BIT(2) | 60 | #define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK) |
56 | #define AES_FLAGS_CFB8 BIT(3) | 61 | #define AES_FLAGS_ECB AES_MR_OPMOD_ECB |
57 | #define AES_FLAGS_CFB16 BIT(4) | 62 | #define AES_FLAGS_CBC AES_MR_OPMOD_CBC |
58 | #define AES_FLAGS_CFB32 BIT(5) | 63 | #define AES_FLAGS_OFB AES_MR_OPMOD_OFB |
59 | #define AES_FLAGS_CFB64 BIT(6) | 64 | #define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b) |
60 | #define AES_FLAGS_CFB128 BIT(7) | 65 | #define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b) |
61 | #define AES_FLAGS_OFB BIT(8) | 66 | #define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b) |
62 | #define AES_FLAGS_CTR BIT(9) | 67 | #define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b) |
63 | 68 | #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) | |
64 | #define AES_FLAGS_INIT BIT(16) | 69 | #define AES_FLAGS_CTR AES_MR_OPMOD_CTR |
65 | #define AES_FLAGS_DMA BIT(17) | 70 | #define AES_FLAGS_GCM AES_MR_OPMOD_GCM |
66 | #define AES_FLAGS_BUSY BIT(18) | 71 | |
67 | #define AES_FLAGS_FAST BIT(19) | 72 | #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ |
73 | AES_FLAGS_ENCRYPT | \ | ||
74 | AES_FLAGS_GTAGEN) | ||
75 | |||
76 | #define AES_FLAGS_INIT BIT(2) | ||
77 | #define AES_FLAGS_BUSY BIT(3) | ||
78 | #define AES_FLAGS_DUMP_REG BIT(4) | ||
79 | |||
80 | #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) | ||
68 | 81 | ||
69 | #define ATMEL_AES_QUEUE_LENGTH 50 | 82 | #define ATMEL_AES_QUEUE_LENGTH 50 |
70 | 83 | ||
71 | #define ATMEL_AES_DMA_THRESHOLD 16 | 84 | #define ATMEL_AES_DMA_THRESHOLD 256 |
72 | 85 | ||
73 | 86 | ||
74 | struct atmel_aes_caps { | 87 | struct atmel_aes_caps { |
75 | bool has_dualbuff; | 88 | bool has_dualbuff; |
76 | bool has_cfb64; | 89 | bool has_cfb64; |
77 | u32 max_burst_size; | 90 | bool has_ctr32; |
91 | bool has_gcm; | ||
92 | u32 max_burst_size; | ||
78 | }; | 93 | }; |
79 | 94 | ||
80 | struct atmel_aes_dev; | 95 | struct atmel_aes_dev; |
81 | 96 | ||
97 | |||
98 | typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *); | ||
99 | |||
100 | |||
101 | struct atmel_aes_base_ctx { | ||
102 | struct atmel_aes_dev *dd; | ||
103 | atmel_aes_fn_t start; | ||
104 | int keylen; | ||
105 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | ||
106 | u16 block_size; | ||
107 | }; | ||
108 | |||
82 | struct atmel_aes_ctx { | 109 | struct atmel_aes_ctx { |
83 | struct atmel_aes_dev *dd; | 110 | struct atmel_aes_base_ctx base; |
111 | }; | ||
112 | |||
113 | struct atmel_aes_ctr_ctx { | ||
114 | struct atmel_aes_base_ctx base; | ||
115 | |||
116 | u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; | ||
117 | size_t offset; | ||
118 | struct scatterlist src[2]; | ||
119 | struct scatterlist dst[2]; | ||
120 | }; | ||
84 | 121 | ||
85 | int keylen; | 122 | struct atmel_aes_gcm_ctx { |
86 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | 123 | struct atmel_aes_base_ctx base; |
87 | 124 | ||
88 | u16 block_size; | 125 | struct scatterlist src[2]; |
126 | struct scatterlist dst[2]; | ||
127 | |||
128 | u32 j0[AES_BLOCK_SIZE / sizeof(u32)]; | ||
129 | u32 tag[AES_BLOCK_SIZE / sizeof(u32)]; | ||
130 | u32 ghash[AES_BLOCK_SIZE / sizeof(u32)]; | ||
131 | size_t textlen; | ||
132 | |||
133 | const u32 *ghash_in; | ||
134 | u32 *ghash_out; | ||
135 | atmel_aes_fn_t ghash_resume; | ||
89 | }; | 136 | }; |
90 | 137 | ||
91 | struct atmel_aes_reqctx { | 138 | struct atmel_aes_reqctx { |
92 | unsigned long mode; | 139 | unsigned long mode; |
93 | }; | 140 | }; |
94 | 141 | ||
95 | struct atmel_aes_dma { | 142 | struct atmel_aes_dma { |
96 | struct dma_chan *chan; | 143 | struct dma_chan *chan; |
97 | struct dma_slave_config dma_conf; | 144 | struct scatterlist *sg; |
145 | int nents; | ||
146 | unsigned int remainder; | ||
147 | unsigned int sg_len; | ||
98 | }; | 148 | }; |
99 | 149 | ||
100 | struct atmel_aes_dev { | 150 | struct atmel_aes_dev { |
@@ -102,13 +152,18 @@ struct atmel_aes_dev { | |||
102 | unsigned long phys_base; | 152 | unsigned long phys_base; |
103 | void __iomem *io_base; | 153 | void __iomem *io_base; |
104 | 154 | ||
105 | struct atmel_aes_ctx *ctx; | 155 | struct crypto_async_request *areq; |
156 | struct atmel_aes_base_ctx *ctx; | ||
157 | |||
158 | bool is_async; | ||
159 | atmel_aes_fn_t resume; | ||
160 | atmel_aes_fn_t cpu_transfer_complete; | ||
161 | |||
106 | struct device *dev; | 162 | struct device *dev; |
107 | struct clk *iclk; | 163 | struct clk *iclk; |
108 | int irq; | 164 | int irq; |
109 | 165 | ||
110 | unsigned long flags; | 166 | unsigned long flags; |
111 | int err; | ||
112 | 167 | ||
113 | spinlock_t lock; | 168 | spinlock_t lock; |
114 | struct crypto_queue queue; | 169 | struct crypto_queue queue; |
@@ -116,33 +171,21 @@ struct atmel_aes_dev { | |||
116 | struct tasklet_struct done_task; | 171 | struct tasklet_struct done_task; |
117 | struct tasklet_struct queue_task; | 172 | struct tasklet_struct queue_task; |
118 | 173 | ||
119 | struct ablkcipher_request *req; | 174 | size_t total; |
120 | size_t total; | 175 | size_t datalen; |
176 | u32 *data; | ||
121 | 177 | ||
122 | struct scatterlist *in_sg; | 178 | struct atmel_aes_dma src; |
123 | unsigned int nb_in_sg; | 179 | struct atmel_aes_dma dst; |
124 | size_t in_offset; | ||
125 | struct scatterlist *out_sg; | ||
126 | unsigned int nb_out_sg; | ||
127 | size_t out_offset; | ||
128 | 180 | ||
129 | size_t bufcnt; | 181 | size_t buflen; |
130 | size_t buflen; | 182 | void *buf; |
131 | size_t dma_size; | 183 | struct scatterlist aligned_sg; |
132 | 184 | struct scatterlist *real_dst; | |
133 | void *buf_in; | ||
134 | int dma_in; | ||
135 | dma_addr_t dma_addr_in; | ||
136 | struct atmel_aes_dma dma_lch_in; | ||
137 | |||
138 | void *buf_out; | ||
139 | int dma_out; | ||
140 | dma_addr_t dma_addr_out; | ||
141 | struct atmel_aes_dma dma_lch_out; | ||
142 | 185 | ||
143 | struct atmel_aes_caps caps; | 186 | struct atmel_aes_caps caps; |
144 | 187 | ||
145 | u32 hw_version; | 188 | u32 hw_version; |
146 | }; | 189 | }; |
147 | 190 | ||
148 | struct atmel_aes_drv { | 191 | struct atmel_aes_drv { |
@@ -155,71 +198,128 @@ static struct atmel_aes_drv atmel_aes = { | |||
155 | .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), | 198 | .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), |
156 | }; | 199 | }; |
157 | 200 | ||
158 | static int atmel_aes_sg_length(struct ablkcipher_request *req, | 201 | #ifdef VERBOSE_DEBUG |
159 | struct scatterlist *sg) | 202 | static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz) |
160 | { | 203 | { |
161 | unsigned int total = req->nbytes; | 204 | switch (offset) { |
162 | int sg_nb; | 205 | case AES_CR: |
163 | unsigned int len; | 206 | return "CR"; |
164 | struct scatterlist *sg_list; | 207 | |
165 | 208 | case AES_MR: | |
166 | sg_nb = 0; | 209 | return "MR"; |
167 | sg_list = sg; | 210 | |
168 | total = req->nbytes; | 211 | case AES_ISR: |
212 | return "ISR"; | ||
213 | |||
214 | case AES_IMR: | ||
215 | return "IMR"; | ||
216 | |||
217 | case AES_IER: | ||
218 | return "IER"; | ||
219 | |||
220 | case AES_IDR: | ||
221 | return "IDR"; | ||
222 | |||
223 | case AES_KEYWR(0): | ||
224 | case AES_KEYWR(1): | ||
225 | case AES_KEYWR(2): | ||
226 | case AES_KEYWR(3): | ||
227 | case AES_KEYWR(4): | ||
228 | case AES_KEYWR(5): | ||
229 | case AES_KEYWR(6): | ||
230 | case AES_KEYWR(7): | ||
231 | snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2); | ||
232 | break; | ||
169 | 233 | ||
170 | while (total) { | 234 | case AES_IDATAR(0): |
171 | len = min(sg_list->length, total); | 235 | case AES_IDATAR(1): |
236 | case AES_IDATAR(2): | ||
237 | case AES_IDATAR(3): | ||
238 | snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2); | ||
239 | break; | ||
172 | 240 | ||
173 | sg_nb++; | 241 | case AES_ODATAR(0): |
174 | total -= len; | 242 | case AES_ODATAR(1): |
243 | case AES_ODATAR(2): | ||
244 | case AES_ODATAR(3): | ||
245 | snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2); | ||
246 | break; | ||
175 | 247 | ||
176 | sg_list = sg_next(sg_list); | 248 | case AES_IVR(0): |
177 | if (!sg_list) | 249 | case AES_IVR(1): |
178 | total = 0; | 250 | case AES_IVR(2): |
179 | } | 251 | case AES_IVR(3): |
252 | snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2); | ||
253 | break; | ||
180 | 254 | ||
181 | return sg_nb; | 255 | case AES_AADLENR: |
182 | } | 256 | return "AADLENR"; |
183 | 257 | ||
184 | static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset, | 258 | case AES_CLENR: |
185 | void *buf, size_t buflen, size_t total, int out) | 259 | return "CLENR"; |
186 | { | ||
187 | unsigned int count, off = 0; | ||
188 | 260 | ||
189 | while (buflen && total) { | 261 | case AES_GHASHR(0): |
190 | count = min((*sg)->length - *offset, total); | 262 | case AES_GHASHR(1): |
191 | count = min(count, buflen); | 263 | case AES_GHASHR(2): |
264 | case AES_GHASHR(3): | ||
265 | snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2); | ||
266 | break; | ||
192 | 267 | ||
193 | if (!count) | 268 | case AES_TAGR(0): |
194 | return off; | 269 | case AES_TAGR(1): |
270 | case AES_TAGR(2): | ||
271 | case AES_TAGR(3): | ||
272 | snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2); | ||
273 | break; | ||
195 | 274 | ||
196 | scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); | 275 | case AES_CTRR: |
276 | return "CTRR"; | ||
197 | 277 | ||
198 | off += count; | 278 | case AES_GCMHR(0): |
199 | buflen -= count; | 279 | case AES_GCMHR(1): |
200 | *offset += count; | 280 | case AES_GCMHR(2): |
201 | total -= count; | 281 | case AES_GCMHR(3): |
282 | snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2); | ||
202 | 283 | ||
203 | if (*offset == (*sg)->length) { | 284 | default: |
204 | *sg = sg_next(*sg); | 285 | snprintf(tmp, sz, "0x%02x", offset); |
205 | if (*sg) | 286 | break; |
206 | *offset = 0; | ||
207 | else | ||
208 | total = 0; | ||
209 | } | ||
210 | } | 287 | } |
211 | 288 | ||
212 | return off; | 289 | return tmp; |
213 | } | 290 | } |
291 | #endif /* VERBOSE_DEBUG */ | ||
292 | |||
293 | /* Shared functions */ | ||
214 | 294 | ||
215 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) | 295 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) |
216 | { | 296 | { |
217 | return readl_relaxed(dd->io_base + offset); | 297 | u32 value = readl_relaxed(dd->io_base + offset); |
298 | |||
299 | #ifdef VERBOSE_DEBUG | ||
300 | if (dd->flags & AES_FLAGS_DUMP_REG) { | ||
301 | char tmp[16]; | ||
302 | |||
303 | dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, | ||
304 | atmel_aes_reg_name(offset, tmp, sizeof(tmp))); | ||
305 | } | ||
306 | #endif /* VERBOSE_DEBUG */ | ||
307 | |||
308 | return value; | ||
218 | } | 309 | } |
219 | 310 | ||
220 | static inline void atmel_aes_write(struct atmel_aes_dev *dd, | 311 | static inline void atmel_aes_write(struct atmel_aes_dev *dd, |
221 | u32 offset, u32 value) | 312 | u32 offset, u32 value) |
222 | { | 313 | { |
314 | #ifdef VERBOSE_DEBUG | ||
315 | if (dd->flags & AES_FLAGS_DUMP_REG) { | ||
316 | char tmp[16]; | ||
317 | |||
318 | dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, | ||
319 | atmel_aes_reg_name(offset, tmp)); | ||
320 | } | ||
321 | #endif /* VERBOSE_DEBUG */ | ||
322 | |||
223 | writel_relaxed(value, dd->io_base + offset); | 323 | writel_relaxed(value, dd->io_base + offset); |
224 | } | 324 | } |
225 | 325 | ||
@@ -231,13 +331,50 @@ static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, | |||
231 | } | 331 | } |
232 | 332 | ||
233 | static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, | 333 | static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, |
234 | u32 *value, int count) | 334 | const u32 *value, int count) |
235 | { | 335 | { |
236 | for (; count--; value++, offset += 4) | 336 | for (; count--; value++, offset += 4) |
237 | atmel_aes_write(dd, offset, *value); | 337 | atmel_aes_write(dd, offset, *value); |
238 | } | 338 | } |
239 | 339 | ||
240 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) | 340 | static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset, |
341 | u32 *value) | ||
342 | { | ||
343 | atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); | ||
344 | } | ||
345 | |||
346 | static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset, | ||
347 | const u32 *value) | ||
348 | { | ||
349 | atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); | ||
350 | } | ||
351 | |||
352 | static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd, | ||
353 | atmel_aes_fn_t resume) | ||
354 | { | ||
355 | u32 isr = atmel_aes_read(dd, AES_ISR); | ||
356 | |||
357 | if (unlikely(isr & AES_INT_DATARDY)) | ||
358 | return resume(dd); | ||
359 | |||
360 | dd->resume = resume; | ||
361 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | ||
362 | return -EINPROGRESS; | ||
363 | } | ||
364 | |||
365 | static inline size_t atmel_aes_padlen(size_t len, size_t block_size) | ||
366 | { | ||
367 | len &= block_size - 1; | ||
368 | return len ? block_size - len : 0; | ||
369 | } | ||
370 | |||
371 | static inline struct aead_request * | ||
372 | aead_request_cast(struct crypto_async_request *req) | ||
373 | { | ||
374 | return container_of(req, struct aead_request, base); | ||
375 | } | ||
376 | |||
377 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx) | ||
241 | { | 378 | { |
242 | struct atmel_aes_dev *aes_dd = NULL; | 379 | struct atmel_aes_dev *aes_dd = NULL; |
243 | struct atmel_aes_dev *tmp; | 380 | struct atmel_aes_dev *tmp; |
@@ -270,7 +407,6 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | |||
270 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); | 407 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); |
271 | atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); | 408 | atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); |
272 | dd->flags |= AES_FLAGS_INIT; | 409 | dd->flags |= AES_FLAGS_INIT; |
273 | dd->err = 0; | ||
274 | } | 410 | } |
275 | 411 | ||
276 | return 0; | 412 | return 0; |
@@ -281,552 +417,643 @@ static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) | |||
281 | return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; | 417 | return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; |
282 | } | 418 | } |
283 | 419 | ||
284 | static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) | 420 | static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) |
285 | { | 421 | { |
286 | atmel_aes_hw_init(dd); | 422 | int err; |
423 | |||
424 | err = atmel_aes_hw_init(dd); | ||
425 | if (err) | ||
426 | return err; | ||
287 | 427 | ||
288 | dd->hw_version = atmel_aes_get_version(dd); | 428 | dd->hw_version = atmel_aes_get_version(dd); |
289 | 429 | ||
290 | dev_info(dd->dev, | 430 | dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); |
291 | "version: 0x%x\n", dd->hw_version); | ||
292 | 431 | ||
293 | clk_disable_unprepare(dd->iclk); | 432 | clk_disable_unprepare(dd->iclk); |
433 | return 0; | ||
294 | } | 434 | } |
295 | 435 | ||
296 | static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) | 436 | static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd, |
437 | const struct atmel_aes_reqctx *rctx) | ||
297 | { | 438 | { |
298 | struct ablkcipher_request *req = dd->req; | 439 | /* Clear all but persistent flags and set request flags. */ |
440 | dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode; | ||
441 | } | ||
299 | 442 | ||
443 | static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd) | ||
444 | { | ||
445 | return (dd->flags & AES_FLAGS_ENCRYPT); | ||
446 | } | ||
447 | |||
448 | static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) | ||
449 | { | ||
300 | clk_disable_unprepare(dd->iclk); | 450 | clk_disable_unprepare(dd->iclk); |
301 | dd->flags &= ~AES_FLAGS_BUSY; | 451 | dd->flags &= ~AES_FLAGS_BUSY; |
302 | 452 | ||
303 | req->base.complete(&req->base, err); | 453 | if (dd->is_async) |
304 | } | 454 | dd->areq->complete(dd->areq, err); |
305 | 455 | ||
306 | static void atmel_aes_dma_callback(void *data) | 456 | tasklet_schedule(&dd->queue_task); |
307 | { | ||
308 | struct atmel_aes_dev *dd = data; | ||
309 | 457 | ||
310 | /* dma_lch_out - completed */ | 458 | return err; |
311 | tasklet_schedule(&dd->done_task); | ||
312 | } | 459 | } |
313 | 460 | ||
314 | static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, | 461 | static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, |
315 | dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length) | 462 | const u32 *iv) |
316 | { | 463 | { |
317 | struct scatterlist sg[2]; | 464 | u32 valmr = 0; |
318 | struct dma_async_tx_descriptor *in_desc, *out_desc; | ||
319 | 465 | ||
320 | dd->dma_size = length; | 466 | /* MR register must be set before IV registers */ |
467 | if (dd->ctx->keylen == AES_KEYSIZE_128) | ||
468 | valmr |= AES_MR_KEYSIZE_128; | ||
469 | else if (dd->ctx->keylen == AES_KEYSIZE_192) | ||
470 | valmr |= AES_MR_KEYSIZE_192; | ||
471 | else | ||
472 | valmr |= AES_MR_KEYSIZE_256; | ||
321 | 473 | ||
322 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | 474 | valmr |= dd->flags & AES_FLAGS_MODE_MASK; |
323 | DMA_TO_DEVICE); | ||
324 | dma_sync_single_for_device(dd->dev, dma_addr_out, length, | ||
325 | DMA_FROM_DEVICE); | ||
326 | 475 | ||
327 | if (dd->flags & AES_FLAGS_CFB8) { | 476 | if (use_dma) { |
328 | dd->dma_lch_in.dma_conf.dst_addr_width = | 477 | valmr |= AES_MR_SMOD_IDATAR0; |
329 | DMA_SLAVE_BUSWIDTH_1_BYTE; | 478 | if (dd->caps.has_dualbuff) |
330 | dd->dma_lch_out.dma_conf.src_addr_width = | 479 | valmr |= AES_MR_DUALBUFF; |
331 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
332 | } else if (dd->flags & AES_FLAGS_CFB16) { | ||
333 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
334 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
335 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
336 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
337 | } else { | 480 | } else { |
338 | dd->dma_lch_in.dma_conf.dst_addr_width = | 481 | valmr |= AES_MR_SMOD_AUTO; |
339 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
340 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
341 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
342 | } | 482 | } |
343 | 483 | ||
344 | if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 | | 484 | atmel_aes_write(dd, AES_MR, valmr); |
345 | AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) { | ||
346 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | ||
347 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
348 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | ||
349 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | ||
350 | } else { | ||
351 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
352 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
353 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
354 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
355 | } | ||
356 | 485 | ||
357 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | 486 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, |
358 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | 487 | SIZE_IN_WORDS(dd->ctx->keylen)); |
359 | 488 | ||
360 | dd->flags |= AES_FLAGS_DMA; | 489 | if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) |
490 | atmel_aes_write_block(dd, AES_IVR(0), iv); | ||
491 | } | ||
361 | 492 | ||
362 | sg_init_table(&sg[0], 1); | ||
363 | sg_dma_address(&sg[0]) = dma_addr_in; | ||
364 | sg_dma_len(&sg[0]) = length; | ||
365 | 493 | ||
366 | sg_init_table(&sg[1], 1); | 494 | /* CPU transfer */ |
367 | sg_dma_address(&sg[1]) = dma_addr_out; | ||
368 | sg_dma_len(&sg[1]) = length; | ||
369 | 495 | ||
370 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], | 496 | static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd) |
371 | 1, DMA_MEM_TO_DEV, | 497 | { |
372 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 498 | int err = 0; |
373 | if (!in_desc) | 499 | u32 isr; |
374 | return -EINVAL; | ||
375 | 500 | ||
376 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], | 501 | for (;;) { |
377 | 1, DMA_DEV_TO_MEM, | 502 | atmel_aes_read_block(dd, AES_ODATAR(0), dd->data); |
378 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 503 | dd->data += 4; |
379 | if (!out_desc) | 504 | dd->datalen -= AES_BLOCK_SIZE; |
380 | return -EINVAL; | ||
381 | 505 | ||
382 | out_desc->callback = atmel_aes_dma_callback; | 506 | if (dd->datalen < AES_BLOCK_SIZE) |
383 | out_desc->callback_param = dd; | 507 | break; |
384 | 508 | ||
385 | dmaengine_submit(out_desc); | 509 | atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); |
386 | dma_async_issue_pending(dd->dma_lch_out.chan); | ||
387 | 510 | ||
388 | dmaengine_submit(in_desc); | 511 | isr = atmel_aes_read(dd, AES_ISR); |
389 | dma_async_issue_pending(dd->dma_lch_in.chan); | 512 | if (!(isr & AES_INT_DATARDY)) { |
513 | dd->resume = atmel_aes_cpu_transfer; | ||
514 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | ||
515 | return -EINPROGRESS; | ||
516 | } | ||
517 | } | ||
390 | 518 | ||
391 | return 0; | 519 | if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), |
520 | dd->buf, dd->total)) | ||
521 | err = -EINVAL; | ||
522 | |||
523 | if (err) | ||
524 | return atmel_aes_complete(dd, err); | ||
525 | |||
526 | return dd->cpu_transfer_complete(dd); | ||
392 | } | 527 | } |
393 | 528 | ||
394 | static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | 529 | static int atmel_aes_cpu_start(struct atmel_aes_dev *dd, |
530 | struct scatterlist *src, | ||
531 | struct scatterlist *dst, | ||
532 | size_t len, | ||
533 | atmel_aes_fn_t resume) | ||
395 | { | 534 | { |
396 | dd->flags &= ~AES_FLAGS_DMA; | 535 | size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE); |
397 | 536 | ||
398 | dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, | 537 | if (unlikely(len == 0)) |
399 | dd->dma_size, DMA_TO_DEVICE); | ||
400 | dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, | ||
401 | dd->dma_size, DMA_FROM_DEVICE); | ||
402 | |||
403 | /* use cache buffers */ | ||
404 | dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); | ||
405 | if (!dd->nb_in_sg) | ||
406 | return -EINVAL; | 538 | return -EINVAL; |
407 | 539 | ||
408 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | 540 | sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); |
409 | if (!dd->nb_out_sg) | ||
410 | return -EINVAL; | ||
411 | 541 | ||
412 | dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, | 542 | dd->total = len; |
413 | dd->buf_in, dd->total); | 543 | dd->real_dst = dst; |
544 | dd->cpu_transfer_complete = resume; | ||
545 | dd->datalen = len + padlen; | ||
546 | dd->data = (u32 *)dd->buf; | ||
547 | atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); | ||
548 | return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer); | ||
549 | } | ||
414 | 550 | ||
415 | if (!dd->bufcnt) | ||
416 | return -EINVAL; | ||
417 | 551 | ||
418 | dd->total -= dd->bufcnt; | 552 | /* DMA transfer */ |
419 | 553 | ||
420 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | 554 | static void atmel_aes_dma_callback(void *data); |
421 | atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in, | ||
422 | dd->bufcnt >> 2); | ||
423 | 555 | ||
424 | return 0; | 556 | static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd, |
557 | struct scatterlist *sg, | ||
558 | size_t len, | ||
559 | struct atmel_aes_dma *dma) | ||
560 | { | ||
561 | int nents; | ||
562 | |||
563 | if (!IS_ALIGNED(len, dd->ctx->block_size)) | ||
564 | return false; | ||
565 | |||
566 | for (nents = 0; sg; sg = sg_next(sg), ++nents) { | ||
567 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | ||
568 | return false; | ||
569 | |||
570 | if (len <= sg->length) { | ||
571 | if (!IS_ALIGNED(len, dd->ctx->block_size)) | ||
572 | return false; | ||
573 | |||
574 | dma->nents = nents+1; | ||
575 | dma->remainder = sg->length - len; | ||
576 | sg->length = len; | ||
577 | return true; | ||
578 | } | ||
579 | |||
580 | if (!IS_ALIGNED(sg->length, dd->ctx->block_size)) | ||
581 | return false; | ||
582 | |||
583 | len -= sg->length; | ||
584 | } | ||
585 | |||
586 | return false; | ||
425 | } | 587 | } |
426 | 588 | ||
427 | static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) | 589 | static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma) |
428 | { | 590 | { |
429 | int err, fast = 0, in, out; | 591 | struct scatterlist *sg = dma->sg; |
430 | size_t count; | 592 | int nents = dma->nents; |
431 | dma_addr_t addr_in, addr_out; | ||
432 | 593 | ||
433 | if ((!dd->in_offset) && (!dd->out_offset)) { | 594 | if (!dma->remainder) |
434 | /* check for alignment */ | 595 | return; |
435 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && | ||
436 | IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); | ||
437 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && | ||
438 | IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); | ||
439 | fast = in && out; | ||
440 | 596 | ||
441 | if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) | 597 | while (--nents > 0 && sg) |
442 | fast = 0; | 598 | sg = sg_next(sg); |
443 | } | ||
444 | 599 | ||
600 | if (!sg) | ||
601 | return; | ||
445 | 602 | ||
446 | if (fast) { | 603 | sg->length += dma->remainder; |
447 | count = min(dd->total, sg_dma_len(dd->in_sg)); | 604 | } |
448 | count = min(count, sg_dma_len(dd->out_sg)); | ||
449 | 605 | ||
450 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | 606 | static int atmel_aes_map(struct atmel_aes_dev *dd, |
451 | if (!err) { | 607 | struct scatterlist *src, |
452 | dev_err(dd->dev, "dma_map_sg() error\n"); | 608 | struct scatterlist *dst, |
453 | return -EINVAL; | 609 | size_t len) |
454 | } | 610 | { |
611 | bool src_aligned, dst_aligned; | ||
612 | size_t padlen; | ||
613 | |||
614 | dd->total = len; | ||
615 | dd->src.sg = src; | ||
616 | dd->dst.sg = dst; | ||
617 | dd->real_dst = dst; | ||
455 | 618 | ||
456 | err = dma_map_sg(dd->dev, dd->out_sg, 1, | 619 | src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src); |
457 | DMA_FROM_DEVICE); | 620 | if (src == dst) |
458 | if (!err) { | 621 | dst_aligned = src_aligned; |
459 | dev_err(dd->dev, "dma_map_sg() error\n"); | 622 | else |
460 | dma_unmap_sg(dd->dev, dd->in_sg, 1, | 623 | dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst); |
461 | DMA_TO_DEVICE); | 624 | if (!src_aligned || !dst_aligned) { |
462 | return -EINVAL; | 625 | padlen = atmel_aes_padlen(len, dd->ctx->block_size); |
626 | |||
627 | if (dd->buflen < len + padlen) | ||
628 | return -ENOMEM; | ||
629 | |||
630 | if (!src_aligned) { | ||
631 | sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); | ||
632 | dd->src.sg = &dd->aligned_sg; | ||
633 | dd->src.nents = 1; | ||
634 | dd->src.remainder = 0; | ||
463 | } | 635 | } |
464 | 636 | ||
465 | addr_in = sg_dma_address(dd->in_sg); | 637 | if (!dst_aligned) { |
466 | addr_out = sg_dma_address(dd->out_sg); | 638 | dd->dst.sg = &dd->aligned_sg; |
639 | dd->dst.nents = 1; | ||
640 | dd->dst.remainder = 0; | ||
641 | } | ||
467 | 642 | ||
468 | dd->flags |= AES_FLAGS_FAST; | 643 | sg_init_table(&dd->aligned_sg, 1); |
644 | sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen); | ||
645 | } | ||
469 | 646 | ||
647 | if (dd->src.sg == dd->dst.sg) { | ||
648 | dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, | ||
649 | DMA_BIDIRECTIONAL); | ||
650 | dd->dst.sg_len = dd->src.sg_len; | ||
651 | if (!dd->src.sg_len) | ||
652 | return -EFAULT; | ||
470 | } else { | 653 | } else { |
471 | dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, | 654 | dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, |
472 | dd->dma_size, DMA_TO_DEVICE); | 655 | DMA_TO_DEVICE); |
656 | if (!dd->src.sg_len) | ||
657 | return -EFAULT; | ||
658 | |||
659 | dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents, | ||
660 | DMA_FROM_DEVICE); | ||
661 | if (!dd->dst.sg_len) { | ||
662 | dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, | ||
663 | DMA_TO_DEVICE); | ||
664 | return -EFAULT; | ||
665 | } | ||
666 | } | ||
473 | 667 | ||
474 | /* use cache buffers */ | 668 | return 0; |
475 | count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, | 669 | } |
476 | dd->buf_in, dd->buflen, dd->total, 0); | ||
477 | 670 | ||
478 | addr_in = dd->dma_addr_in; | 671 | static void atmel_aes_unmap(struct atmel_aes_dev *dd) |
479 | addr_out = dd->dma_addr_out; | 672 | { |
673 | if (dd->src.sg == dd->dst.sg) { | ||
674 | dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, | ||
675 | DMA_BIDIRECTIONAL); | ||
480 | 676 | ||
481 | dd->flags &= ~AES_FLAGS_FAST; | 677 | if (dd->src.sg != &dd->aligned_sg) |
482 | } | 678 | atmel_aes_restore_sg(&dd->src); |
679 | } else { | ||
680 | dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents, | ||
681 | DMA_FROM_DEVICE); | ||
483 | 682 | ||
484 | dd->total -= count; | 683 | if (dd->dst.sg != &dd->aligned_sg) |
684 | atmel_aes_restore_sg(&dd->dst); | ||
485 | 685 | ||
486 | err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count); | 686 | dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, |
687 | DMA_TO_DEVICE); | ||
487 | 688 | ||
488 | if (err && (dd->flags & AES_FLAGS_FAST)) { | 689 | if (dd->src.sg != &dd->aligned_sg) |
489 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | 690 | atmel_aes_restore_sg(&dd->src); |
490 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | ||
491 | } | 691 | } |
492 | 692 | ||
493 | return err; | 693 | if (dd->dst.sg == &dd->aligned_sg) |
694 | sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), | ||
695 | dd->buf, dd->total); | ||
494 | } | 696 | } |
495 | 697 | ||
496 | static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) | 698 | static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd, |
699 | enum dma_slave_buswidth addr_width, | ||
700 | enum dma_transfer_direction dir, | ||
701 | u32 maxburst) | ||
497 | { | 702 | { |
703 | struct dma_async_tx_descriptor *desc; | ||
704 | struct dma_slave_config config; | ||
705 | dma_async_tx_callback callback; | ||
706 | struct atmel_aes_dma *dma; | ||
498 | int err; | 707 | int err; |
499 | u32 valcr = 0, valmr = 0; | ||
500 | 708 | ||
501 | err = atmel_aes_hw_init(dd); | 709 | memset(&config, 0, sizeof(config)); |
710 | config.direction = dir; | ||
711 | config.src_addr_width = addr_width; | ||
712 | config.dst_addr_width = addr_width; | ||
713 | config.src_maxburst = maxburst; | ||
714 | config.dst_maxburst = maxburst; | ||
715 | |||
716 | switch (dir) { | ||
717 | case DMA_MEM_TO_DEV: | ||
718 | dma = &dd->src; | ||
719 | callback = NULL; | ||
720 | config.dst_addr = dd->phys_base + AES_IDATAR(0); | ||
721 | break; | ||
722 | |||
723 | case DMA_DEV_TO_MEM: | ||
724 | dma = &dd->dst; | ||
725 | callback = atmel_aes_dma_callback; | ||
726 | config.src_addr = dd->phys_base + AES_ODATAR(0); | ||
727 | break; | ||
502 | 728 | ||
729 | default: | ||
730 | return -EINVAL; | ||
731 | } | ||
732 | |||
733 | err = dmaengine_slave_config(dma->chan, &config); | ||
503 | if (err) | 734 | if (err) |
504 | return err; | 735 | return err; |
505 | 736 | ||
506 | /* MR register must be set before IV registers */ | 737 | desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir, |
507 | if (dd->ctx->keylen == AES_KEYSIZE_128) | 738 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
508 | valmr |= AES_MR_KEYSIZE_128; | 739 | if (!desc) |
509 | else if (dd->ctx->keylen == AES_KEYSIZE_192) | 740 | return -ENOMEM; |
510 | valmr |= AES_MR_KEYSIZE_192; | ||
511 | else | ||
512 | valmr |= AES_MR_KEYSIZE_256; | ||
513 | 741 | ||
514 | if (dd->flags & AES_FLAGS_CBC) { | 742 | desc->callback = callback; |
515 | valmr |= AES_MR_OPMOD_CBC; | 743 | desc->callback_param = dd; |
516 | } else if (dd->flags & AES_FLAGS_CFB) { | 744 | dmaengine_submit(desc); |
517 | valmr |= AES_MR_OPMOD_CFB; | 745 | dma_async_issue_pending(dma->chan); |
518 | if (dd->flags & AES_FLAGS_CFB8) | ||
519 | valmr |= AES_MR_CFBS_8b; | ||
520 | else if (dd->flags & AES_FLAGS_CFB16) | ||
521 | valmr |= AES_MR_CFBS_16b; | ||
522 | else if (dd->flags & AES_FLAGS_CFB32) | ||
523 | valmr |= AES_MR_CFBS_32b; | ||
524 | else if (dd->flags & AES_FLAGS_CFB64) | ||
525 | valmr |= AES_MR_CFBS_64b; | ||
526 | else if (dd->flags & AES_FLAGS_CFB128) | ||
527 | valmr |= AES_MR_CFBS_128b; | ||
528 | } else if (dd->flags & AES_FLAGS_OFB) { | ||
529 | valmr |= AES_MR_OPMOD_OFB; | ||
530 | } else if (dd->flags & AES_FLAGS_CTR) { | ||
531 | valmr |= AES_MR_OPMOD_CTR; | ||
532 | } else { | ||
533 | valmr |= AES_MR_OPMOD_ECB; | ||
534 | } | ||
535 | 746 | ||
536 | if (dd->flags & AES_FLAGS_ENCRYPT) | 747 | return 0; |
537 | valmr |= AES_MR_CYPHER_ENC; | 748 | } |
538 | 749 | ||
539 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) { | 750 | static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd, |
540 | valmr |= AES_MR_SMOD_IDATAR0; | 751 | enum dma_transfer_direction dir) |
541 | if (dd->caps.has_dualbuff) | 752 | { |
542 | valmr |= AES_MR_DUALBUFF; | 753 | struct atmel_aes_dma *dma; |
543 | } else { | 754 | |
544 | valmr |= AES_MR_SMOD_AUTO; | 755 | switch (dir) { |
756 | case DMA_MEM_TO_DEV: | ||
757 | dma = &dd->src; | ||
758 | break; | ||
759 | |||
760 | case DMA_DEV_TO_MEM: | ||
761 | dma = &dd->dst; | ||
762 | break; | ||
763 | |||
764 | default: | ||
765 | return; | ||
545 | } | 766 | } |
546 | 767 | ||
547 | atmel_aes_write(dd, AES_CR, valcr); | 768 | dmaengine_terminate_all(dma->chan); |
548 | atmel_aes_write(dd, AES_MR, valmr); | 769 | } |
549 | 770 | ||
550 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, | 771 | static int atmel_aes_dma_start(struct atmel_aes_dev *dd, |
551 | dd->ctx->keylen >> 2); | 772 | struct scatterlist *src, |
773 | struct scatterlist *dst, | ||
774 | size_t len, | ||
775 | atmel_aes_fn_t resume) | ||
776 | { | ||
777 | enum dma_slave_buswidth addr_width; | ||
778 | u32 maxburst; | ||
779 | int err; | ||
780 | |||
781 | switch (dd->ctx->block_size) { | ||
782 | case CFB8_BLOCK_SIZE: | ||
783 | addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
784 | maxburst = 1; | ||
785 | break; | ||
786 | |||
787 | case CFB16_BLOCK_SIZE: | ||
788 | addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
789 | maxburst = 1; | ||
790 | break; | ||
791 | |||
792 | case CFB32_BLOCK_SIZE: | ||
793 | case CFB64_BLOCK_SIZE: | ||
794 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
795 | maxburst = 1; | ||
796 | break; | ||
797 | |||
798 | case AES_BLOCK_SIZE: | ||
799 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
800 | maxburst = dd->caps.max_burst_size; | ||
801 | break; | ||
552 | 802 | ||
553 | if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || | 803 | default: |
554 | (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && | 804 | err = -EINVAL; |
555 | dd->req->info) { | 805 | goto exit; |
556 | atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4); | ||
557 | } | 806 | } |
558 | 807 | ||
559 | return 0; | 808 | err = atmel_aes_map(dd, src, dst, len); |
809 | if (err) | ||
810 | goto exit; | ||
811 | |||
812 | dd->resume = resume; | ||
813 | |||
814 | /* Set output DMA transfer first */ | ||
815 | err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM, | ||
816 | maxburst); | ||
817 | if (err) | ||
818 | goto unmap; | ||
819 | |||
820 | /* Then set input DMA transfer */ | ||
821 | err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV, | ||
822 | maxburst); | ||
823 | if (err) | ||
824 | goto output_transfer_stop; | ||
825 | |||
826 | return -EINPROGRESS; | ||
827 | |||
828 | output_transfer_stop: | ||
829 | atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); | ||
830 | unmap: | ||
831 | atmel_aes_unmap(dd); | ||
832 | exit: | ||
833 | return atmel_aes_complete(dd, err); | ||
834 | } | ||
835 | |||
836 | static void atmel_aes_dma_stop(struct atmel_aes_dev *dd) | ||
837 | { | ||
838 | atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV); | ||
839 | atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); | ||
840 | atmel_aes_unmap(dd); | ||
841 | } | ||
842 | |||
843 | static void atmel_aes_dma_callback(void *data) | ||
844 | { | ||
845 | struct atmel_aes_dev *dd = data; | ||
846 | |||
847 | atmel_aes_dma_stop(dd); | ||
848 | dd->is_async = true; | ||
849 | (void)dd->resume(dd); | ||
560 | } | 850 | } |
561 | 851 | ||
562 | static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | 852 | static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, |
563 | struct ablkcipher_request *req) | 853 | struct crypto_async_request *new_areq) |
564 | { | 854 | { |
565 | struct crypto_async_request *async_req, *backlog; | 855 | struct crypto_async_request *areq, *backlog; |
566 | struct atmel_aes_ctx *ctx; | 856 | struct atmel_aes_base_ctx *ctx; |
567 | struct atmel_aes_reqctx *rctx; | ||
568 | unsigned long flags; | 857 | unsigned long flags; |
569 | int err, ret = 0; | 858 | int err, ret = 0; |
570 | 859 | ||
571 | spin_lock_irqsave(&dd->lock, flags); | 860 | spin_lock_irqsave(&dd->lock, flags); |
572 | if (req) | 861 | if (new_areq) |
573 | ret = ablkcipher_enqueue_request(&dd->queue, req); | 862 | ret = crypto_enqueue_request(&dd->queue, new_areq); |
574 | if (dd->flags & AES_FLAGS_BUSY) { | 863 | if (dd->flags & AES_FLAGS_BUSY) { |
575 | spin_unlock_irqrestore(&dd->lock, flags); | 864 | spin_unlock_irqrestore(&dd->lock, flags); |
576 | return ret; | 865 | return ret; |
577 | } | 866 | } |
578 | backlog = crypto_get_backlog(&dd->queue); | 867 | backlog = crypto_get_backlog(&dd->queue); |
579 | async_req = crypto_dequeue_request(&dd->queue); | 868 | areq = crypto_dequeue_request(&dd->queue); |
580 | if (async_req) | 869 | if (areq) |
581 | dd->flags |= AES_FLAGS_BUSY; | 870 | dd->flags |= AES_FLAGS_BUSY; |
582 | spin_unlock_irqrestore(&dd->lock, flags); | 871 | spin_unlock_irqrestore(&dd->lock, flags); |
583 | 872 | ||
584 | if (!async_req) | 873 | if (!areq) |
585 | return ret; | 874 | return ret; |
586 | 875 | ||
587 | if (backlog) | 876 | if (backlog) |
588 | backlog->complete(backlog, -EINPROGRESS); | 877 | backlog->complete(backlog, -EINPROGRESS); |
589 | 878 | ||
590 | req = ablkcipher_request_cast(async_req); | 879 | ctx = crypto_tfm_ctx(areq->tfm); |
591 | 880 | ||
592 | /* assign new request to device */ | 881 | dd->areq = areq; |
593 | dd->req = req; | ||
594 | dd->total = req->nbytes; | ||
595 | dd->in_offset = 0; | ||
596 | dd->in_sg = req->src; | ||
597 | dd->out_offset = 0; | ||
598 | dd->out_sg = req->dst; | ||
599 | |||
600 | rctx = ablkcipher_request_ctx(req); | ||
601 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
602 | rctx->mode &= AES_FLAGS_MODE_MASK; | ||
603 | dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; | ||
604 | dd->ctx = ctx; | 882 | dd->ctx = ctx; |
605 | ctx->dd = dd; | 883 | dd->is_async = (areq != new_areq); |
606 | 884 | ||
607 | err = atmel_aes_write_ctrl(dd); | 885 | err = ctx->start(dd); |
608 | if (!err) { | 886 | return (dd->is_async) ? ret : err; |
609 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) | ||
610 | err = atmel_aes_crypt_dma_start(dd); | ||
611 | else | ||
612 | err = atmel_aes_crypt_cpu_start(dd); | ||
613 | } | ||
614 | if (err) { | ||
615 | /* aes_task will not finish it, so do it here */ | ||
616 | atmel_aes_finish_req(dd, err); | ||
617 | tasklet_schedule(&dd->queue_task); | ||
618 | } | ||
619 | |||
620 | return ret; | ||
621 | } | 887 | } |
622 | 888 | ||
623 | static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) | ||
624 | { | ||
625 | int err = -EINVAL; | ||
626 | size_t count; | ||
627 | 889 | ||
628 | if (dd->flags & AES_FLAGS_DMA) { | 890 | /* AES async block ciphers */ |
629 | err = 0; | ||
630 | if (dd->flags & AES_FLAGS_FAST) { | ||
631 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
632 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
633 | } else { | ||
634 | dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, | ||
635 | dd->dma_size, DMA_FROM_DEVICE); | ||
636 | |||
637 | /* copy data */ | ||
638 | count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, | ||
639 | dd->buf_out, dd->buflen, dd->dma_size, 1); | ||
640 | if (count != dd->dma_size) { | ||
641 | err = -EINVAL; | ||
642 | pr_err("not all data converted: %u\n", count); | ||
643 | } | ||
644 | } | ||
645 | } | ||
646 | 891 | ||
647 | return err; | 892 | static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd) |
893 | { | ||
894 | return atmel_aes_complete(dd, 0); | ||
648 | } | 895 | } |
649 | 896 | ||
650 | 897 | static int atmel_aes_start(struct atmel_aes_dev *dd) | |
651 | static int atmel_aes_buff_init(struct atmel_aes_dev *dd) | ||
652 | { | 898 | { |
653 | int err = -ENOMEM; | 899 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); |
654 | 900 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | |
655 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); | 901 | bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD || |
656 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); | 902 | dd->ctx->block_size != AES_BLOCK_SIZE); |
657 | dd->buflen = PAGE_SIZE; | 903 | int err; |
658 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | ||
659 | |||
660 | if (!dd->buf_in || !dd->buf_out) { | ||
661 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
662 | goto err_alloc; | ||
663 | } | ||
664 | 904 | ||
665 | /* MAP here */ | 905 | atmel_aes_set_mode(dd, rctx); |
666 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, | ||
667 | dd->buflen, DMA_TO_DEVICE); | ||
668 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
669 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
670 | err = -EINVAL; | ||
671 | goto err_map_in; | ||
672 | } | ||
673 | 906 | ||
674 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, | 907 | err = atmel_aes_hw_init(dd); |
675 | dd->buflen, DMA_FROM_DEVICE); | 908 | if (err) |
676 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | 909 | return atmel_aes_complete(dd, err); |
677 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
678 | err = -EINVAL; | ||
679 | goto err_map_out; | ||
680 | } | ||
681 | 910 | ||
682 | return 0; | 911 | atmel_aes_write_ctrl(dd, use_dma, req->info); |
912 | if (use_dma) | ||
913 | return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes, | ||
914 | atmel_aes_transfer_complete); | ||
683 | 915 | ||
684 | err_map_out: | 916 | return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes, |
685 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | 917 | atmel_aes_transfer_complete); |
686 | DMA_TO_DEVICE); | ||
687 | err_map_in: | ||
688 | err_alloc: | ||
689 | free_page((unsigned long)dd->buf_out); | ||
690 | free_page((unsigned long)dd->buf_in); | ||
691 | if (err) | ||
692 | pr_err("error: %d\n", err); | ||
693 | return err; | ||
694 | } | 918 | } |
695 | 919 | ||
696 | static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) | 920 | static inline struct atmel_aes_ctr_ctx * |
921 | atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx) | ||
697 | { | 922 | { |
698 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | 923 | return container_of(ctx, struct atmel_aes_ctr_ctx, base); |
699 | DMA_FROM_DEVICE); | ||
700 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
701 | DMA_TO_DEVICE); | ||
702 | free_page((unsigned long)dd->buf_out); | ||
703 | free_page((unsigned long)dd->buf_in); | ||
704 | } | 924 | } |
705 | 925 | ||
706 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | 926 | static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) |
707 | { | 927 | { |
708 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( | 928 | struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); |
709 | crypto_ablkcipher_reqtfm(req)); | 929 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); |
710 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | 930 | struct scatterlist *src, *dst; |
711 | struct atmel_aes_dev *dd; | 931 | u32 ctr, blocks; |
712 | 932 | size_t datalen; | |
713 | if (mode & AES_FLAGS_CFB8) { | 933 | bool use_dma, fragmented = false; |
714 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { | 934 | |
715 | pr_err("request size is not exact amount of CFB8 blocks\n"); | 935 | /* Check for transfer completion. */ |
716 | return -EINVAL; | 936 | ctx->offset += dd->total; |
717 | } | 937 | if (ctx->offset >= req->nbytes) |
718 | ctx->block_size = CFB8_BLOCK_SIZE; | 938 | return atmel_aes_transfer_complete(dd); |
719 | } else if (mode & AES_FLAGS_CFB16) { | 939 | |
720 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { | 940 | /* Compute data length. */ |
721 | pr_err("request size is not exact amount of CFB16 blocks\n"); | 941 | datalen = req->nbytes - ctx->offset; |
722 | return -EINVAL; | 942 | blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); |
723 | } | 943 | ctr = be32_to_cpu(ctx->iv[3]); |
724 | ctx->block_size = CFB16_BLOCK_SIZE; | 944 | if (dd->caps.has_ctr32) { |
725 | } else if (mode & AES_FLAGS_CFB32) { | 945 | /* Check 32bit counter overflow. */ |
726 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { | 946 | u32 start = ctr; |
727 | pr_err("request size is not exact amount of CFB32 blocks\n"); | 947 | u32 end = start + blocks - 1; |
728 | return -EINVAL; | 948 | |
729 | } | 949 | if (end < start) { |
730 | ctx->block_size = CFB32_BLOCK_SIZE; | 950 | ctr |= 0xffffffff; |
731 | } else if (mode & AES_FLAGS_CFB64) { | 951 | datalen = AES_BLOCK_SIZE * -start; |
732 | if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) { | 952 | fragmented = true; |
733 | pr_err("request size is not exact amount of CFB64 blocks\n"); | ||
734 | return -EINVAL; | ||
735 | } | 953 | } |
736 | ctx->block_size = CFB64_BLOCK_SIZE; | ||
737 | } else { | 954 | } else { |
738 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | 955 | /* Check 16bit counter overflow. */ |
739 | pr_err("request size is not exact amount of AES blocks\n"); | 956 | u16 start = ctr & 0xffff; |
740 | return -EINVAL; | 957 | u16 end = start + (u16)blocks - 1; |
958 | |||
959 | if (blocks >> 16 || end < start) { | ||
960 | ctr |= 0xffff; | ||
961 | datalen = AES_BLOCK_SIZE * (0x10000-start); | ||
962 | fragmented = true; | ||
741 | } | 963 | } |
742 | ctx->block_size = AES_BLOCK_SIZE; | 964 | } |
965 | use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD); | ||
966 | |||
967 | /* Jump to offset. */ | ||
968 | src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset); | ||
969 | dst = ((req->src == req->dst) ? src : | ||
970 | scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset)); | ||
971 | |||
972 | /* Configure hardware. */ | ||
973 | atmel_aes_write_ctrl(dd, use_dma, ctx->iv); | ||
974 | if (unlikely(fragmented)) { | ||
975 | /* | ||
976 | * Increment the counter manually to cope with the hardware | ||
977 | * counter overflow. | ||
978 | */ | ||
979 | ctx->iv[3] = cpu_to_be32(ctr); | ||
980 | crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE); | ||
743 | } | 981 | } |
744 | 982 | ||
745 | dd = atmel_aes_find_dev(ctx); | 983 | if (use_dma) |
746 | if (!dd) | 984 | return atmel_aes_dma_start(dd, src, dst, datalen, |
747 | return -ENODEV; | 985 | atmel_aes_ctr_transfer); |
748 | |||
749 | rctx->mode = mode; | ||
750 | 986 | ||
751 | return atmel_aes_handle_queue(dd, req); | 987 | return atmel_aes_cpu_start(dd, src, dst, datalen, |
988 | atmel_aes_ctr_transfer); | ||
752 | } | 989 | } |
753 | 990 | ||
754 | static bool atmel_aes_filter(struct dma_chan *chan, void *slave) | 991 | static int atmel_aes_ctr_start(struct atmel_aes_dev *dd) |
755 | { | 992 | { |
756 | struct at_dma_slave *sl = slave; | 993 | struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); |
994 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); | ||
995 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
996 | int err; | ||
757 | 997 | ||
758 | if (sl && sl->dma_dev == chan->device->dev) { | 998 | atmel_aes_set_mode(dd, rctx); |
759 | chan->private = sl; | 999 | |
760 | return true; | 1000 | err = atmel_aes_hw_init(dd); |
761 | } else { | 1001 | if (err) |
762 | return false; | 1002 | return atmel_aes_complete(dd, err); |
763 | } | 1003 | |
1004 | memcpy(ctx->iv, req->info, AES_BLOCK_SIZE); | ||
1005 | ctx->offset = 0; | ||
1006 | dd->total = 0; | ||
1007 | return atmel_aes_ctr_transfer(dd); | ||
764 | } | 1008 | } |
765 | 1009 | ||
766 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd, | 1010 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) |
767 | struct crypto_platform_data *pdata) | ||
768 | { | 1011 | { |
769 | int err = -ENOMEM; | 1012 | struct atmel_aes_base_ctx *ctx; |
770 | dma_cap_mask_t mask; | 1013 | struct atmel_aes_reqctx *rctx; |
1014 | struct atmel_aes_dev *dd; | ||
771 | 1015 | ||
772 | dma_cap_zero(mask); | 1016 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); |
773 | dma_cap_set(DMA_SLAVE, mask); | 1017 | switch (mode & AES_FLAGS_OPMODE_MASK) { |
1018 | case AES_FLAGS_CFB8: | ||
1019 | ctx->block_size = CFB8_BLOCK_SIZE; | ||
1020 | break; | ||
774 | 1021 | ||
775 | /* Try to grab 2 DMA channels */ | 1022 | case AES_FLAGS_CFB16: |
776 | dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask, | 1023 | ctx->block_size = CFB16_BLOCK_SIZE; |
777 | atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); | 1024 | break; |
778 | if (!dd->dma_lch_in.chan) | ||
779 | goto err_dma_in; | ||
780 | 1025 | ||
781 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | 1026 | case AES_FLAGS_CFB32: |
782 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | 1027 | ctx->block_size = CFB32_BLOCK_SIZE; |
783 | AES_IDATAR(0); | 1028 | break; |
784 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
785 | dd->dma_lch_in.dma_conf.src_addr_width = | ||
786 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
787 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
788 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
789 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
790 | dd->dma_lch_in.dma_conf.device_fc = false; | ||
791 | |||
792 | dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask, | ||
793 | atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx"); | ||
794 | if (!dd->dma_lch_out.chan) | ||
795 | goto err_dma_out; | ||
796 | 1029 | ||
797 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; | 1030 | case AES_FLAGS_CFB64: |
798 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + | 1031 | ctx->block_size = CFB64_BLOCK_SIZE; |
799 | AES_ODATAR(0); | 1032 | break; |
800 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
801 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
802 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
803 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
804 | dd->dma_lch_out.dma_conf.dst_addr_width = | ||
805 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
806 | dd->dma_lch_out.dma_conf.device_fc = false; | ||
807 | 1033 | ||
808 | return 0; | 1034 | default: |
1035 | ctx->block_size = AES_BLOCK_SIZE; | ||
1036 | break; | ||
1037 | } | ||
809 | 1038 | ||
810 | err_dma_out: | 1039 | dd = atmel_aes_find_dev(ctx); |
811 | dma_release_channel(dd->dma_lch_in.chan); | 1040 | if (!dd) |
812 | err_dma_in: | 1041 | return -ENODEV; |
813 | dev_warn(dd->dev, "no DMA channel available\n"); | ||
814 | return err; | ||
815 | } | ||
816 | 1042 | ||
817 | static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) | 1043 | rctx = ablkcipher_request_ctx(req); |
818 | { | 1044 | rctx->mode = mode; |
819 | dma_release_channel(dd->dma_lch_in.chan); | 1045 | |
820 | dma_release_channel(dd->dma_lch_out.chan); | 1046 | return atmel_aes_handle_queue(dd, &req->base); |
821 | } | 1047 | } |
822 | 1048 | ||
823 | static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 1049 | static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
824 | unsigned int keylen) | 1050 | unsigned int keylen) |
825 | { | 1051 | { |
826 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 1052 | struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
827 | 1053 | ||
828 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | 1054 | if (keylen != AES_KEYSIZE_128 && |
829 | keylen != AES_KEYSIZE_256) { | 1055 | keylen != AES_KEYSIZE_192 && |
1056 | keylen != AES_KEYSIZE_256) { | ||
830 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 1057 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
831 | return -EINVAL; | 1058 | return -EINVAL; |
832 | } | 1059 | } |
@@ -839,115 +1066,110 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
839 | 1066 | ||
840 | static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) | 1067 | static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) |
841 | { | 1068 | { |
842 | return atmel_aes_crypt(req, | 1069 | return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT); |
843 | AES_FLAGS_ENCRYPT); | ||
844 | } | 1070 | } |
845 | 1071 | ||
846 | static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) | 1072 | static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) |
847 | { | 1073 | { |
848 | return atmel_aes_crypt(req, | 1074 | return atmel_aes_crypt(req, AES_FLAGS_ECB); |
849 | 0); | ||
850 | } | 1075 | } |
851 | 1076 | ||
852 | static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) | 1077 | static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) |
853 | { | 1078 | { |
854 | return atmel_aes_crypt(req, | 1079 | return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT); |
855 | AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); | ||
856 | } | 1080 | } |
857 | 1081 | ||
858 | static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) | 1082 | static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) |
859 | { | 1083 | { |
860 | return atmel_aes_crypt(req, | 1084 | return atmel_aes_crypt(req, AES_FLAGS_CBC); |
861 | AES_FLAGS_CBC); | ||
862 | } | 1085 | } |
863 | 1086 | ||
864 | static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) | 1087 | static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) |
865 | { | 1088 | { |
866 | return atmel_aes_crypt(req, | 1089 | return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT); |
867 | AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); | ||
868 | } | 1090 | } |
869 | 1091 | ||
870 | static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) | 1092 | static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) |
871 | { | 1093 | { |
872 | return atmel_aes_crypt(req, | 1094 | return atmel_aes_crypt(req, AES_FLAGS_OFB); |
873 | AES_FLAGS_OFB); | ||
874 | } | 1095 | } |
875 | 1096 | ||
876 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) | 1097 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) |
877 | { | 1098 | { |
878 | return atmel_aes_crypt(req, | 1099 | return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT); |
879 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128); | ||
880 | } | 1100 | } |
881 | 1101 | ||
882 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) | 1102 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) |
883 | { | 1103 | { |
884 | return atmel_aes_crypt(req, | 1104 | return atmel_aes_crypt(req, AES_FLAGS_CFB128); |
885 | AES_FLAGS_CFB | AES_FLAGS_CFB128); | ||
886 | } | 1105 | } |
887 | 1106 | ||
888 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) | 1107 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) |
889 | { | 1108 | { |
890 | return atmel_aes_crypt(req, | 1109 | return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT); |
891 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64); | ||
892 | } | 1110 | } |
893 | 1111 | ||
894 | static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) | 1112 | static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) |
895 | { | 1113 | { |
896 | return atmel_aes_crypt(req, | 1114 | return atmel_aes_crypt(req, AES_FLAGS_CFB64); |
897 | AES_FLAGS_CFB | AES_FLAGS_CFB64); | ||
898 | } | 1115 | } |
899 | 1116 | ||
900 | static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) | 1117 | static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) |
901 | { | 1118 | { |
902 | return atmel_aes_crypt(req, | 1119 | return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT); |
903 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32); | ||
904 | } | 1120 | } |
905 | 1121 | ||
906 | static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) | 1122 | static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) |
907 | { | 1123 | { |
908 | return atmel_aes_crypt(req, | 1124 | return atmel_aes_crypt(req, AES_FLAGS_CFB32); |
909 | AES_FLAGS_CFB | AES_FLAGS_CFB32); | ||
910 | } | 1125 | } |
911 | 1126 | ||
912 | static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) | 1127 | static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) |
913 | { | 1128 | { |
914 | return atmel_aes_crypt(req, | 1129 | return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT); |
915 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16); | ||
916 | } | 1130 | } |
917 | 1131 | ||
918 | static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) | 1132 | static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) |
919 | { | 1133 | { |
920 | return atmel_aes_crypt(req, | 1134 | return atmel_aes_crypt(req, AES_FLAGS_CFB16); |
921 | AES_FLAGS_CFB | AES_FLAGS_CFB16); | ||
922 | } | 1135 | } |
923 | 1136 | ||
924 | static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) | 1137 | static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) |
925 | { | 1138 | { |
926 | return atmel_aes_crypt(req, | 1139 | return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT); |
927 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8); | ||
928 | } | 1140 | } |
929 | 1141 | ||
930 | static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) | 1142 | static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) |
931 | { | 1143 | { |
932 | return atmel_aes_crypt(req, | 1144 | return atmel_aes_crypt(req, AES_FLAGS_CFB8); |
933 | AES_FLAGS_CFB | AES_FLAGS_CFB8); | ||
934 | } | 1145 | } |
935 | 1146 | ||
936 | static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) | 1147 | static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) |
937 | { | 1148 | { |
938 | return atmel_aes_crypt(req, | 1149 | return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT); |
939 | AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); | ||
940 | } | 1150 | } |
941 | 1151 | ||
942 | static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) | 1152 | static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) |
943 | { | 1153 | { |
944 | return atmel_aes_crypt(req, | 1154 | return atmel_aes_crypt(req, AES_FLAGS_CTR); |
945 | AES_FLAGS_CTR); | ||
946 | } | 1155 | } |
947 | 1156 | ||
948 | static int atmel_aes_cra_init(struct crypto_tfm *tfm) | 1157 | static int atmel_aes_cra_init(struct crypto_tfm *tfm) |
949 | { | 1158 | { |
1159 | struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1160 | |||
1161 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); | ||
1162 | ctx->base.start = atmel_aes_start; | ||
1163 | |||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1167 | static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm) | ||
1168 | { | ||
1169 | struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1170 | |||
950 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); | 1171 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); |
1172 | ctx->base.start = atmel_aes_ctr_start; | ||
951 | 1173 | ||
952 | return 0; | 1174 | return 0; |
953 | } | 1175 | } |
@@ -960,7 +1182,7 @@ static struct crypto_alg aes_algs[] = { | |||
960 | { | 1182 | { |
961 | .cra_name = "ecb(aes)", | 1183 | .cra_name = "ecb(aes)", |
962 | .cra_driver_name = "atmel-ecb-aes", | 1184 | .cra_driver_name = "atmel-ecb-aes", |
963 | .cra_priority = 100, | 1185 | .cra_priority = ATMEL_AES_PRIORITY, |
964 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1186 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
965 | .cra_blocksize = AES_BLOCK_SIZE, | 1187 | .cra_blocksize = AES_BLOCK_SIZE, |
966 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1188 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
@@ -980,7 +1202,7 @@ static struct crypto_alg aes_algs[] = { | |||
980 | { | 1202 | { |
981 | .cra_name = "cbc(aes)", | 1203 | .cra_name = "cbc(aes)", |
982 | .cra_driver_name = "atmel-cbc-aes", | 1204 | .cra_driver_name = "atmel-cbc-aes", |
983 | .cra_priority = 100, | 1205 | .cra_priority = ATMEL_AES_PRIORITY, |
984 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1206 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
985 | .cra_blocksize = AES_BLOCK_SIZE, | 1207 | .cra_blocksize = AES_BLOCK_SIZE, |
986 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1208 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
@@ -1001,7 +1223,7 @@ static struct crypto_alg aes_algs[] = { | |||
1001 | { | 1223 | { |
1002 | .cra_name = "ofb(aes)", | 1224 | .cra_name = "ofb(aes)", |
1003 | .cra_driver_name = "atmel-ofb-aes", | 1225 | .cra_driver_name = "atmel-ofb-aes", |
1004 | .cra_priority = 100, | 1226 | .cra_priority = ATMEL_AES_PRIORITY, |
1005 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1227 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1006 | .cra_blocksize = AES_BLOCK_SIZE, | 1228 | .cra_blocksize = AES_BLOCK_SIZE, |
1007 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1229 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
@@ -1022,7 +1244,7 @@ static struct crypto_alg aes_algs[] = { | |||
1022 | { | 1244 | { |
1023 | .cra_name = "cfb(aes)", | 1245 | .cra_name = "cfb(aes)", |
1024 | .cra_driver_name = "atmel-cfb-aes", | 1246 | .cra_driver_name = "atmel-cfb-aes", |
1025 | .cra_priority = 100, | 1247 | .cra_priority = ATMEL_AES_PRIORITY, |
1026 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1248 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1027 | .cra_blocksize = AES_BLOCK_SIZE, | 1249 | .cra_blocksize = AES_BLOCK_SIZE, |
1028 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1250 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
@@ -1043,7 +1265,7 @@ static struct crypto_alg aes_algs[] = { | |||
1043 | { | 1265 | { |
1044 | .cra_name = "cfb32(aes)", | 1266 | .cra_name = "cfb32(aes)", |
1045 | .cra_driver_name = "atmel-cfb32-aes", | 1267 | .cra_driver_name = "atmel-cfb32-aes", |
1046 | .cra_priority = 100, | 1268 | .cra_priority = ATMEL_AES_PRIORITY, |
1047 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1269 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1048 | .cra_blocksize = CFB32_BLOCK_SIZE, | 1270 | .cra_blocksize = CFB32_BLOCK_SIZE, |
1049 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1271 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
@@ -1064,7 +1286,7 @@ static struct crypto_alg aes_algs[] = { | |||
1064 | { | 1286 | { |
1065 | .cra_name = "cfb16(aes)", | 1287 | .cra_name = "cfb16(aes)", |
1066 | .cra_driver_name = "atmel-cfb16-aes", | 1288 | .cra_driver_name = "atmel-cfb16-aes", |
1067 | .cra_priority = 100, | 1289 | .cra_priority = ATMEL_AES_PRIORITY, |
1068 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1290 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1069 | .cra_blocksize = CFB16_BLOCK_SIZE, | 1291 | .cra_blocksize = CFB16_BLOCK_SIZE, |
1070 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1292 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
@@ -1085,7 +1307,7 @@ static struct crypto_alg aes_algs[] = { | |||
1085 | { | 1307 | { |
1086 | .cra_name = "cfb8(aes)", | 1308 | .cra_name = "cfb8(aes)", |
1087 | .cra_driver_name = "atmel-cfb8-aes", | 1309 | .cra_driver_name = "atmel-cfb8-aes", |
1088 | .cra_priority = 100, | 1310 | .cra_priority = ATMEL_AES_PRIORITY, |
1089 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1311 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1090 | .cra_blocksize = CFB8_BLOCK_SIZE, | 1312 | .cra_blocksize = CFB8_BLOCK_SIZE, |
1091 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1313 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
@@ -1106,14 +1328,14 @@ static struct crypto_alg aes_algs[] = { | |||
1106 | { | 1328 | { |
1107 | .cra_name = "ctr(aes)", | 1329 | .cra_name = "ctr(aes)", |
1108 | .cra_driver_name = "atmel-ctr-aes", | 1330 | .cra_driver_name = "atmel-ctr-aes", |
1109 | .cra_priority = 100, | 1331 | .cra_priority = ATMEL_AES_PRIORITY, |
1110 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1332 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1111 | .cra_blocksize = AES_BLOCK_SIZE, | 1333 | .cra_blocksize = 1, |
1112 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1334 | .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx), |
1113 | .cra_alignmask = 0xf, | 1335 | .cra_alignmask = 0xf, |
1114 | .cra_type = &crypto_ablkcipher_type, | 1336 | .cra_type = &crypto_ablkcipher_type, |
1115 | .cra_module = THIS_MODULE, | 1337 | .cra_module = THIS_MODULE, |
1116 | .cra_init = atmel_aes_cra_init, | 1338 | .cra_init = atmel_aes_ctr_cra_init, |
1117 | .cra_exit = atmel_aes_cra_exit, | 1339 | .cra_exit = atmel_aes_cra_exit, |
1118 | .cra_u.ablkcipher = { | 1340 | .cra_u.ablkcipher = { |
1119 | .min_keysize = AES_MIN_KEY_SIZE, | 1341 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -1129,7 +1351,7 @@ static struct crypto_alg aes_algs[] = { | |||
1129 | static struct crypto_alg aes_cfb64_alg = { | 1351 | static struct crypto_alg aes_cfb64_alg = { |
1130 | .cra_name = "cfb64(aes)", | 1352 | .cra_name = "cfb64(aes)", |
1131 | .cra_driver_name = "atmel-cfb64-aes", | 1353 | .cra_driver_name = "atmel-cfb64-aes", |
1132 | .cra_priority = 100, | 1354 | .cra_priority = ATMEL_AES_PRIORITY, |
1133 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1355 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1134 | .cra_blocksize = CFB64_BLOCK_SIZE, | 1356 | .cra_blocksize = CFB64_BLOCK_SIZE, |
1135 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | 1357 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
@@ -1148,53 +1370,496 @@ static struct crypto_alg aes_cfb64_alg = { | |||
1148 | } | 1370 | } |
1149 | }; | 1371 | }; |
1150 | 1372 | ||
1151 | static void atmel_aes_queue_task(unsigned long data) | 1373 | |
1374 | /* gcm aead functions */ | ||
1375 | |||
1376 | static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd, | ||
1377 | const u32 *data, size_t datalen, | ||
1378 | const u32 *ghash_in, u32 *ghash_out, | ||
1379 | atmel_aes_fn_t resume); | ||
1380 | static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd); | ||
1381 | static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd); | ||
1382 | |||
1383 | static int atmel_aes_gcm_start(struct atmel_aes_dev *dd); | ||
1384 | static int atmel_aes_gcm_process(struct atmel_aes_dev *dd); | ||
1385 | static int atmel_aes_gcm_length(struct atmel_aes_dev *dd); | ||
1386 | static int atmel_aes_gcm_data(struct atmel_aes_dev *dd); | ||
1387 | static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd); | ||
1388 | static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd); | ||
1389 | static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd); | ||
1390 | |||
1391 | static inline struct atmel_aes_gcm_ctx * | ||
1392 | atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx) | ||
1152 | { | 1393 | { |
1153 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; | 1394 | return container_of(ctx, struct atmel_aes_gcm_ctx, base); |
1395 | } | ||
1154 | 1396 | ||
1155 | atmel_aes_handle_queue(dd, NULL); | 1397 | static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd, |
1398 | const u32 *data, size_t datalen, | ||
1399 | const u32 *ghash_in, u32 *ghash_out, | ||
1400 | atmel_aes_fn_t resume) | ||
1401 | { | ||
1402 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); | ||
1403 | |||
1404 | dd->data = (u32 *)data; | ||
1405 | dd->datalen = datalen; | ||
1406 | ctx->ghash_in = ghash_in; | ||
1407 | ctx->ghash_out = ghash_out; | ||
1408 | ctx->ghash_resume = resume; | ||
1409 | |||
1410 | atmel_aes_write_ctrl(dd, false, NULL); | ||
1411 | return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init); | ||
1156 | } | 1412 | } |
1157 | 1413 | ||
1158 | static void atmel_aes_done_task(unsigned long data) | 1414 | static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd) |
1159 | { | 1415 | { |
1160 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data; | 1416 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); |
1417 | |||
1418 | /* Set the data length. */ | ||
1419 | atmel_aes_write(dd, AES_AADLENR, dd->total); | ||
1420 | atmel_aes_write(dd, AES_CLENR, 0); | ||
1421 | |||
1422 | /* If needed, overwrite the GCM Intermediate Hash Word Registers */ | ||
1423 | if (ctx->ghash_in) | ||
1424 | atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in); | ||
1425 | |||
1426 | return atmel_aes_gcm_ghash_finalize(dd); | ||
1427 | } | ||
1428 | |||
1429 | static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd) | ||
1430 | { | ||
1431 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); | ||
1432 | u32 isr; | ||
1433 | |||
1434 | /* Write data into the Input Data Registers. */ | ||
1435 | while (dd->datalen > 0) { | ||
1436 | atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); | ||
1437 | dd->data += 4; | ||
1438 | dd->datalen -= AES_BLOCK_SIZE; | ||
1439 | |||
1440 | isr = atmel_aes_read(dd, AES_ISR); | ||
1441 | if (!(isr & AES_INT_DATARDY)) { | ||
1442 | dd->resume = atmel_aes_gcm_ghash_finalize; | ||
1443 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | ||
1444 | return -EINPROGRESS; | ||
1445 | } | ||
1446 | } | ||
1447 | |||
1448 | /* Read the computed hash from GHASHRx. */ | ||
1449 | atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out); | ||
1450 | |||
1451 | return ctx->ghash_resume(dd); | ||
1452 | } | ||
1453 | |||
1454 | |||
1455 | static int atmel_aes_gcm_start(struct atmel_aes_dev *dd) | ||
1456 | { | ||
1457 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); | ||
1458 | struct aead_request *req = aead_request_cast(dd->areq); | ||
1459 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1460 | struct atmel_aes_reqctx *rctx = aead_request_ctx(req); | ||
1461 | size_t ivsize = crypto_aead_ivsize(tfm); | ||
1462 | size_t datalen, padlen; | ||
1463 | const void *iv = req->iv; | ||
1464 | u8 *data = dd->buf; | ||
1161 | int err; | 1465 | int err; |
1162 | 1466 | ||
1163 | if (!(dd->flags & AES_FLAGS_DMA)) { | 1467 | atmel_aes_set_mode(dd, rctx); |
1164 | atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, | ||
1165 | dd->bufcnt >> 2); | ||
1166 | 1468 | ||
1167 | if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, | 1469 | err = atmel_aes_hw_init(dd); |
1168 | dd->buf_out, dd->bufcnt)) | 1470 | if (err) |
1169 | err = 0; | 1471 | return atmel_aes_complete(dd, err); |
1170 | else | 1472 | |
1171 | err = -EINVAL; | 1473 | if (likely(ivsize == 12)) { |
1474 | memcpy(ctx->j0, iv, ivsize); | ||
1475 | ctx->j0[3] = cpu_to_be32(1); | ||
1476 | return atmel_aes_gcm_process(dd); | ||
1477 | } | ||
1478 | |||
1479 | padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE); | ||
1480 | datalen = ivsize + padlen + AES_BLOCK_SIZE; | ||
1481 | if (datalen > dd->buflen) | ||
1482 | return atmel_aes_complete(dd, -EINVAL); | ||
1483 | |||
1484 | memcpy(data, iv, ivsize); | ||
1485 | memset(data + ivsize, 0, padlen + sizeof(u64)); | ||
1486 | ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8); | ||
1487 | |||
1488 | return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen, | ||
1489 | NULL, ctx->j0, atmel_aes_gcm_process); | ||
1490 | } | ||
1172 | 1491 | ||
1173 | goto cpu_end; | 1492 | static int atmel_aes_gcm_process(struct atmel_aes_dev *dd) |
1493 | { | ||
1494 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); | ||
1495 | struct aead_request *req = aead_request_cast(dd->areq); | ||
1496 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1497 | bool enc = atmel_aes_is_encrypt(dd); | ||
1498 | u32 authsize; | ||
1499 | |||
1500 | /* Compute text length. */ | ||
1501 | authsize = crypto_aead_authsize(tfm); | ||
1502 | ctx->textlen = req->cryptlen - (enc ? 0 : authsize); | ||
1503 | |||
1504 | /* | ||
1505 | * According to tcrypt test suite, the GCM Automatic Tag Generation | ||
1506 | * fails when both the message and its associated data are empty. | ||
1507 | */ | ||
1508 | if (likely(req->assoclen != 0 || ctx->textlen != 0)) | ||
1509 | dd->flags |= AES_FLAGS_GTAGEN; | ||
1510 | |||
1511 | atmel_aes_write_ctrl(dd, false, NULL); | ||
1512 | return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length); | ||
1513 | } | ||
1514 | |||
1515 | static int atmel_aes_gcm_length(struct atmel_aes_dev *dd) | ||
1516 | { | ||
1517 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); | ||
1518 | struct aead_request *req = aead_request_cast(dd->areq); | ||
1519 | u32 j0_lsw, *j0 = ctx->j0; | ||
1520 | size_t padlen; | ||
1521 | |||
1522 | /* Write incr32(J0) into IV. */ | ||
1523 | j0_lsw = j0[3]; | ||
1524 | j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1); | ||
1525 | atmel_aes_write_block(dd, AES_IVR(0), j0); | ||
1526 | j0[3] = j0_lsw; | ||
1527 | |||
1528 | /* Set aad and text lengths. */ | ||
1529 | atmel_aes_write(dd, AES_AADLENR, req->assoclen); | ||
1530 | atmel_aes_write(dd, AES_CLENR, ctx->textlen); | ||
1531 | |||
1532 | /* Check whether AAD are present. */ | ||
1533 | if (unlikely(req->assoclen == 0)) { | ||
1534 | dd->datalen = 0; | ||
1535 | return atmel_aes_gcm_data(dd); | ||
1174 | } | 1536 | } |
1175 | 1537 | ||
1176 | err = atmel_aes_crypt_dma_stop(dd); | 1538 | /* Copy assoc data and add padding. */ |
1539 | padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE); | ||
1540 | if (unlikely(req->assoclen + padlen > dd->buflen)) | ||
1541 | return atmel_aes_complete(dd, -EINVAL); | ||
1542 | sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen); | ||
1177 | 1543 | ||
1178 | err = dd->err ? : err; | 1544 | /* Write assoc data into the Input Data register. */ |
1545 | dd->data = (u32 *)dd->buf; | ||
1546 | dd->datalen = req->assoclen + padlen; | ||
1547 | return atmel_aes_gcm_data(dd); | ||
1548 | } | ||
1179 | 1549 | ||
1180 | if (dd->total && !err) { | 1550 | static int atmel_aes_gcm_data(struct atmel_aes_dev *dd) |
1181 | if (dd->flags & AES_FLAGS_FAST) { | 1551 | { |
1182 | dd->in_sg = sg_next(dd->in_sg); | 1552 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); |
1183 | dd->out_sg = sg_next(dd->out_sg); | 1553 | struct aead_request *req = aead_request_cast(dd->areq); |
1184 | if (!dd->in_sg || !dd->out_sg) | 1554 | bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD); |
1185 | err = -EINVAL; | 1555 | struct scatterlist *src, *dst; |
1556 | u32 isr, mr; | ||
1557 | |||
1558 | /* Write AAD first. */ | ||
1559 | while (dd->datalen > 0) { | ||
1560 | atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); | ||
1561 | dd->data += 4; | ||
1562 | dd->datalen -= AES_BLOCK_SIZE; | ||
1563 | |||
1564 | isr = atmel_aes_read(dd, AES_ISR); | ||
1565 | if (!(isr & AES_INT_DATARDY)) { | ||
1566 | dd->resume = atmel_aes_gcm_data; | ||
1567 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | ||
1568 | return -EINPROGRESS; | ||
1186 | } | 1569 | } |
1187 | if (!err) | ||
1188 | err = atmel_aes_crypt_dma_start(dd); | ||
1189 | if (!err) | ||
1190 | return; /* DMA started. Not fininishing. */ | ||
1191 | } | 1570 | } |
1192 | 1571 | ||
1193 | cpu_end: | 1572 | /* GMAC only. */ |
1194 | atmel_aes_finish_req(dd, err); | 1573 | if (unlikely(ctx->textlen == 0)) |
1574 | return atmel_aes_gcm_tag_init(dd); | ||
1575 | |||
1576 | /* Prepare src and dst scatter lists to transfer cipher/plain texts */ | ||
1577 | src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen); | ||
1578 | dst = ((req->src == req->dst) ? src : | ||
1579 | scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen)); | ||
1580 | |||
1581 | if (use_dma) { | ||
1582 | /* Update the Mode Register for DMA transfers. */ | ||
1583 | mr = atmel_aes_read(dd, AES_MR); | ||
1584 | mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF); | ||
1585 | mr |= AES_MR_SMOD_IDATAR0; | ||
1586 | if (dd->caps.has_dualbuff) | ||
1587 | mr |= AES_MR_DUALBUFF; | ||
1588 | atmel_aes_write(dd, AES_MR, mr); | ||
1589 | |||
1590 | return atmel_aes_dma_start(dd, src, dst, ctx->textlen, | ||
1591 | atmel_aes_gcm_tag_init); | ||
1592 | } | ||
1593 | |||
1594 | return atmel_aes_cpu_start(dd, src, dst, ctx->textlen, | ||
1595 | atmel_aes_gcm_tag_init); | ||
1596 | } | ||
1597 | |||
1598 | static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd) | ||
1599 | { | ||
1600 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); | ||
1601 | struct aead_request *req = aead_request_cast(dd->areq); | ||
1602 | u64 *data = dd->buf; | ||
1603 | |||
1604 | if (likely(dd->flags & AES_FLAGS_GTAGEN)) { | ||
1605 | if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) { | ||
1606 | dd->resume = atmel_aes_gcm_tag_init; | ||
1607 | atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY); | ||
1608 | return -EINPROGRESS; | ||
1609 | } | ||
1610 | |||
1611 | return atmel_aes_gcm_finalize(dd); | ||
1612 | } | ||
1613 | |||
1614 | /* Read the GCM Intermediate Hash Word Registers. */ | ||
1615 | atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash); | ||
1616 | |||
1617 | data[0] = cpu_to_be64(req->assoclen * 8); | ||
1618 | data[1] = cpu_to_be64(ctx->textlen * 8); | ||
1619 | |||
1620 | return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE, | ||
1621 | ctx->ghash, ctx->ghash, atmel_aes_gcm_tag); | ||
1622 | } | ||
1623 | |||
1624 | static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd) | ||
1625 | { | ||
1626 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); | ||
1627 | unsigned long flags; | ||
1628 | |||
1629 | /* | ||
1630 | * Change mode to CTR to complete the tag generation. | ||
1631 | * Use J0 as Initialization Vector. | ||
1632 | */ | ||
1633 | flags = dd->flags; | ||
1634 | dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN); | ||
1635 | dd->flags |= AES_FLAGS_CTR; | ||
1636 | atmel_aes_write_ctrl(dd, false, ctx->j0); | ||
1637 | dd->flags = flags; | ||
1638 | |||
1639 | atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash); | ||
1640 | return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize); | ||
1641 | } | ||
1642 | |||
1643 | static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd) | ||
1644 | { | ||
1645 | struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); | ||
1646 | struct aead_request *req = aead_request_cast(dd->areq); | ||
1647 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1648 | bool enc = atmel_aes_is_encrypt(dd); | ||
1649 | u32 offset, authsize, itag[4], *otag = ctx->tag; | ||
1650 | int err; | ||
1651 | |||
1652 | /* Read the computed tag. */ | ||
1653 | if (likely(dd->flags & AES_FLAGS_GTAGEN)) | ||
1654 | atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag); | ||
1655 | else | ||
1656 | atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag); | ||
1657 | |||
1658 | offset = req->assoclen + ctx->textlen; | ||
1659 | authsize = crypto_aead_authsize(tfm); | ||
1660 | if (enc) { | ||
1661 | scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1); | ||
1662 | err = 0; | ||
1663 | } else { | ||
1664 | scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0); | ||
1665 | err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0; | ||
1666 | } | ||
1667 | |||
1668 | return atmel_aes_complete(dd, err); | ||
1669 | } | ||
1670 | |||
1671 | static int atmel_aes_gcm_crypt(struct aead_request *req, | ||
1672 | unsigned long mode) | ||
1673 | { | ||
1674 | struct atmel_aes_base_ctx *ctx; | ||
1675 | struct atmel_aes_reqctx *rctx; | ||
1676 | struct atmel_aes_dev *dd; | ||
1677 | |||
1678 | ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
1679 | ctx->block_size = AES_BLOCK_SIZE; | ||
1680 | |||
1681 | dd = atmel_aes_find_dev(ctx); | ||
1682 | if (!dd) | ||
1683 | return -ENODEV; | ||
1684 | |||
1685 | rctx = aead_request_ctx(req); | ||
1686 | rctx->mode = AES_FLAGS_GCM | mode; | ||
1687 | |||
1688 | return atmel_aes_handle_queue(dd, &req->base); | ||
1689 | } | ||
1690 | |||
1691 | static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, | ||
1692 | unsigned int keylen) | ||
1693 | { | ||
1694 | struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); | ||
1695 | |||
1696 | if (keylen != AES_KEYSIZE_256 && | ||
1697 | keylen != AES_KEYSIZE_192 && | ||
1698 | keylen != AES_KEYSIZE_128) { | ||
1699 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1700 | return -EINVAL; | ||
1701 | } | ||
1702 | |||
1703 | memcpy(ctx->key, key, keylen); | ||
1704 | ctx->keylen = keylen; | ||
1705 | |||
1706 | return 0; | ||
1707 | } | ||
1708 | |||
1709 | static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm, | ||
1710 | unsigned int authsize) | ||
1711 | { | ||
1712 | /* Same as crypto_gcm_authsize() from crypto/gcm.c */ | ||
1713 | switch (authsize) { | ||
1714 | case 4: | ||
1715 | case 8: | ||
1716 | case 12: | ||
1717 | case 13: | ||
1718 | case 14: | ||
1719 | case 15: | ||
1720 | case 16: | ||
1721 | break; | ||
1722 | default: | ||
1723 | return -EINVAL; | ||
1724 | } | ||
1725 | |||
1726 | return 0; | ||
1727 | } | ||
1728 | |||
1729 | static int atmel_aes_gcm_encrypt(struct aead_request *req) | ||
1730 | { | ||
1731 | return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT); | ||
1732 | } | ||
1733 | |||
1734 | static int atmel_aes_gcm_decrypt(struct aead_request *req) | ||
1735 | { | ||
1736 | return atmel_aes_gcm_crypt(req, 0); | ||
1737 | } | ||
1738 | |||
1739 | static int atmel_aes_gcm_init(struct crypto_aead *tfm) | ||
1740 | { | ||
1741 | struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); | ||
1742 | |||
1743 | crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); | ||
1744 | ctx->base.start = atmel_aes_gcm_start; | ||
1745 | |||
1746 | return 0; | ||
1747 | } | ||
1748 | |||
1749 | static void atmel_aes_gcm_exit(struct crypto_aead *tfm) | ||
1750 | { | ||
1751 | |||
1752 | } | ||
1753 | |||
1754 | static struct aead_alg aes_gcm_alg = { | ||
1755 | .setkey = atmel_aes_gcm_setkey, | ||
1756 | .setauthsize = atmel_aes_gcm_setauthsize, | ||
1757 | .encrypt = atmel_aes_gcm_encrypt, | ||
1758 | .decrypt = atmel_aes_gcm_decrypt, | ||
1759 | .init = atmel_aes_gcm_init, | ||
1760 | .exit = atmel_aes_gcm_exit, | ||
1761 | .ivsize = 12, | ||
1762 | .maxauthsize = AES_BLOCK_SIZE, | ||
1763 | |||
1764 | .base = { | ||
1765 | .cra_name = "gcm(aes)", | ||
1766 | .cra_driver_name = "atmel-gcm-aes", | ||
1767 | .cra_priority = ATMEL_AES_PRIORITY, | ||
1768 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
1769 | .cra_blocksize = 1, | ||
1770 | .cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx), | ||
1771 | .cra_alignmask = 0xf, | ||
1772 | .cra_module = THIS_MODULE, | ||
1773 | }, | ||
1774 | }; | ||
1775 | |||
1776 | |||
1777 | /* Probe functions */ | ||
1778 | |||
1779 | static int atmel_aes_buff_init(struct atmel_aes_dev *dd) | ||
1780 | { | ||
1781 | dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER); | ||
1782 | dd->buflen = ATMEL_AES_BUFFER_SIZE; | ||
1783 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | ||
1784 | |||
1785 | if (!dd->buf) { | ||
1786 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
1787 | return -ENOMEM; | ||
1788 | } | ||
1789 | |||
1790 | return 0; | ||
1791 | } | ||
1792 | |||
1793 | static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) | ||
1794 | { | ||
1795 | free_page((unsigned long)dd->buf); | ||
1796 | } | ||
1797 | |||
1798 | static bool atmel_aes_filter(struct dma_chan *chan, void *slave) | ||
1799 | { | ||
1800 | struct at_dma_slave *sl = slave; | ||
1801 | |||
1802 | if (sl && sl->dma_dev == chan->device->dev) { | ||
1803 | chan->private = sl; | ||
1804 | return true; | ||
1805 | } else { | ||
1806 | return false; | ||
1807 | } | ||
1808 | } | ||
1809 | |||
1810 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd, | ||
1811 | struct crypto_platform_data *pdata) | ||
1812 | { | ||
1813 | struct at_dma_slave *slave; | ||
1814 | int err = -ENOMEM; | ||
1815 | dma_cap_mask_t mask; | ||
1816 | |||
1817 | dma_cap_zero(mask); | ||
1818 | dma_cap_set(DMA_SLAVE, mask); | ||
1819 | |||
1820 | /* Try to grab 2 DMA channels */ | ||
1821 | slave = &pdata->dma_slave->rxdata; | ||
1822 | dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, | ||
1823 | slave, dd->dev, "tx"); | ||
1824 | if (!dd->src.chan) | ||
1825 | goto err_dma_in; | ||
1826 | |||
1827 | slave = &pdata->dma_slave->txdata; | ||
1828 | dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, | ||
1829 | slave, dd->dev, "rx"); | ||
1830 | if (!dd->dst.chan) | ||
1831 | goto err_dma_out; | ||
1832 | |||
1833 | return 0; | ||
1834 | |||
1835 | err_dma_out: | ||
1836 | dma_release_channel(dd->src.chan); | ||
1837 | err_dma_in: | ||
1838 | dev_warn(dd->dev, "no DMA channel available\n"); | ||
1839 | return err; | ||
1840 | } | ||
1841 | |||
1842 | static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) | ||
1843 | { | ||
1844 | dma_release_channel(dd->dst.chan); | ||
1845 | dma_release_channel(dd->src.chan); | ||
1846 | } | ||
1847 | |||
1848 | static void atmel_aes_queue_task(unsigned long data) | ||
1849 | { | ||
1850 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; | ||
1851 | |||
1195 | atmel_aes_handle_queue(dd, NULL); | 1852 | atmel_aes_handle_queue(dd, NULL); |
1196 | } | 1853 | } |
1197 | 1854 | ||
1855 | static void atmel_aes_done_task(unsigned long data) | ||
1856 | { | ||
1857 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; | ||
1858 | |||
1859 | dd->is_async = true; | ||
1860 | (void)dd->resume(dd); | ||
1861 | } | ||
1862 | |||
1198 | static irqreturn_t atmel_aes_irq(int irq, void *dev_id) | 1863 | static irqreturn_t atmel_aes_irq(int irq, void *dev_id) |
1199 | { | 1864 | { |
1200 | struct atmel_aes_dev *aes_dd = dev_id; | 1865 | struct atmel_aes_dev *aes_dd = dev_id; |
@@ -1217,10 +1882,14 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) | |||
1217 | { | 1882 | { |
1218 | int i; | 1883 | int i; |
1219 | 1884 | ||
1220 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | 1885 | if (dd->caps.has_gcm) |
1221 | crypto_unregister_alg(&aes_algs[i]); | 1886 | crypto_unregister_aead(&aes_gcm_alg); |
1887 | |||
1222 | if (dd->caps.has_cfb64) | 1888 | if (dd->caps.has_cfb64) |
1223 | crypto_unregister_alg(&aes_cfb64_alg); | 1889 | crypto_unregister_alg(&aes_cfb64_alg); |
1890 | |||
1891 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | ||
1892 | crypto_unregister_alg(&aes_algs[i]); | ||
1224 | } | 1893 | } |
1225 | 1894 | ||
1226 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | 1895 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) |
@@ -1239,8 +1908,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | |||
1239 | goto err_aes_cfb64_alg; | 1908 | goto err_aes_cfb64_alg; |
1240 | } | 1909 | } |
1241 | 1910 | ||
1911 | if (dd->caps.has_gcm) { | ||
1912 | err = crypto_register_aead(&aes_gcm_alg); | ||
1913 | if (err) | ||
1914 | goto err_aes_gcm_alg; | ||
1915 | } | ||
1916 | |||
1242 | return 0; | 1917 | return 0; |
1243 | 1918 | ||
1919 | err_aes_gcm_alg: | ||
1920 | crypto_unregister_alg(&aes_cfb64_alg); | ||
1244 | err_aes_cfb64_alg: | 1921 | err_aes_cfb64_alg: |
1245 | i = ARRAY_SIZE(aes_algs); | 1922 | i = ARRAY_SIZE(aes_algs); |
1246 | err_aes_algs: | 1923 | err_aes_algs: |
@@ -1254,13 +1931,24 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) | |||
1254 | { | 1931 | { |
1255 | dd->caps.has_dualbuff = 0; | 1932 | dd->caps.has_dualbuff = 0; |
1256 | dd->caps.has_cfb64 = 0; | 1933 | dd->caps.has_cfb64 = 0; |
1934 | dd->caps.has_ctr32 = 0; | ||
1935 | dd->caps.has_gcm = 0; | ||
1257 | dd->caps.max_burst_size = 1; | 1936 | dd->caps.max_burst_size = 1; |
1258 | 1937 | ||
1259 | /* keep only major version number */ | 1938 | /* keep only major version number */ |
1260 | switch (dd->hw_version & 0xff0) { | 1939 | switch (dd->hw_version & 0xff0) { |
1940 | case 0x500: | ||
1941 | dd->caps.has_dualbuff = 1; | ||
1942 | dd->caps.has_cfb64 = 1; | ||
1943 | dd->caps.has_ctr32 = 1; | ||
1944 | dd->caps.has_gcm = 1; | ||
1945 | dd->caps.max_burst_size = 4; | ||
1946 | break; | ||
1261 | case 0x200: | 1947 | case 0x200: |
1262 | dd->caps.has_dualbuff = 1; | 1948 | dd->caps.has_dualbuff = 1; |
1263 | dd->caps.has_cfb64 = 1; | 1949 | dd->caps.has_cfb64 = 1; |
1950 | dd->caps.has_ctr32 = 1; | ||
1951 | dd->caps.has_gcm = 1; | ||
1264 | dd->caps.max_burst_size = 4; | 1952 | dd->caps.max_burst_size = 4; |
1265 | break; | 1953 | break; |
1266 | case 0x130: | 1954 | case 0x130: |
@@ -1402,7 +2090,9 @@ static int atmel_aes_probe(struct platform_device *pdev) | |||
1402 | goto res_err; | 2090 | goto res_err; |
1403 | } | 2091 | } |
1404 | 2092 | ||
1405 | atmel_aes_hw_version_init(aes_dd); | 2093 | err = atmel_aes_hw_version_init(aes_dd); |
2094 | if (err) | ||
2095 | goto res_err; | ||
1406 | 2096 | ||
1407 | atmel_aes_get_cap(aes_dd); | 2097 | atmel_aes_get_cap(aes_dd); |
1408 | 2098 | ||
@@ -1423,8 +2113,8 @@ static int atmel_aes_probe(struct platform_device *pdev) | |||
1423 | goto err_algs; | 2113 | goto err_algs; |
1424 | 2114 | ||
1425 | dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", | 2115 | dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", |
1426 | dma_chan_name(aes_dd->dma_lch_in.chan), | 2116 | dma_chan_name(aes_dd->src.chan), |
1427 | dma_chan_name(aes_dd->dma_lch_out.chan)); | 2117 | dma_chan_name(aes_dd->dst.chan)); |
1428 | 2118 | ||
1429 | return 0; | 2119 | return 0; |
1430 | 2120 | ||
@@ -1462,6 +2152,7 @@ static int atmel_aes_remove(struct platform_device *pdev) | |||
1462 | tasklet_kill(&aes_dd->queue_task); | 2152 | tasklet_kill(&aes_dd->queue_task); |
1463 | 2153 | ||
1464 | atmel_aes_dma_cleanup(aes_dd); | 2154 | atmel_aes_dma_cleanup(aes_dd); |
2155 | atmel_aes_buff_cleanup(aes_dd); | ||
1465 | 2156 | ||
1466 | return 0; | 2157 | return 0; |
1467 | } | 2158 | } |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 660d8c06540b..20de861aa0ea 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
@@ -755,7 +755,6 @@ static int atmel_sha_finish(struct ahash_request *req) | |||
755 | { | 755 | { |
756 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | 756 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); |
757 | struct atmel_sha_dev *dd = ctx->dd; | 757 | struct atmel_sha_dev *dd = ctx->dd; |
758 | int err = 0; | ||
759 | 758 | ||
760 | if (ctx->digcnt[0] || ctx->digcnt[1]) | 759 | if (ctx->digcnt[0] || ctx->digcnt[1]) |
761 | atmel_sha_copy_ready_hash(req); | 760 | atmel_sha_copy_ready_hash(req); |
@@ -763,7 +762,7 @@ static int atmel_sha_finish(struct ahash_request *req) | |||
763 | dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], | 762 | dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], |
764 | ctx->digcnt[0], ctx->bufcnt); | 763 | ctx->digcnt[0], ctx->bufcnt); |
765 | 764 | ||
766 | return err; | 765 | return 0; |
767 | } | 766 | } |
768 | 767 | ||
769 | static void atmel_sha_finish_req(struct ahash_request *req, int err) | 768 | static void atmel_sha_finish_req(struct ahash_request *req, int err) |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 49106ea42887..5845d4a08797 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -803,6 +803,10 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
803 | if (to_hash) { | 803 | if (to_hash) { |
804 | src_nents = sg_nents_for_len(req->src, | 804 | src_nents = sg_nents_for_len(req->src, |
805 | req->nbytes - (*next_buflen)); | 805 | req->nbytes - (*next_buflen)); |
806 | if (src_nents < 0) { | ||
807 | dev_err(jrdev, "Invalid number of src SG.\n"); | ||
808 | return src_nents; | ||
809 | } | ||
806 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); | 810 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); |
807 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | 811 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * |
808 | sizeof(struct sec4_sg_entry); | 812 | sizeof(struct sec4_sg_entry); |
@@ -1002,6 +1006,10 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
1002 | int sh_len; | 1006 | int sh_len; |
1003 | 1007 | ||
1004 | src_nents = sg_nents_for_len(req->src, req->nbytes); | 1008 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
1009 | if (src_nents < 0) { | ||
1010 | dev_err(jrdev, "Invalid number of src SG.\n"); | ||
1011 | return src_nents; | ||
1012 | } | ||
1005 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); | 1013 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); |
1006 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | 1014 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * |
1007 | sizeof(struct sec4_sg_entry); | 1015 | sizeof(struct sec4_sg_entry); |
@@ -1086,6 +1094,10 @@ static int ahash_digest(struct ahash_request *req) | |||
1086 | int sh_len; | 1094 | int sh_len; |
1087 | 1095 | ||
1088 | src_nents = sg_count(req->src, req->nbytes); | 1096 | src_nents = sg_count(req->src, req->nbytes); |
1097 | if (src_nents < 0) { | ||
1098 | dev_err(jrdev, "Invalid number of src SG.\n"); | ||
1099 | return src_nents; | ||
1100 | } | ||
1089 | dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); | 1101 | dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); |
1090 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | 1102 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); |
1091 | 1103 | ||
@@ -1234,6 +1246,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1234 | if (to_hash) { | 1246 | if (to_hash) { |
1235 | src_nents = sg_nents_for_len(req->src, | 1247 | src_nents = sg_nents_for_len(req->src, |
1236 | req->nbytes - (*next_buflen)); | 1248 | req->nbytes - (*next_buflen)); |
1249 | if (src_nents < 0) { | ||
1250 | dev_err(jrdev, "Invalid number of src SG.\n"); | ||
1251 | return src_nents; | ||
1252 | } | ||
1237 | sec4_sg_bytes = (1 + src_nents) * | 1253 | sec4_sg_bytes = (1 + src_nents) * |
1238 | sizeof(struct sec4_sg_entry); | 1254 | sizeof(struct sec4_sg_entry); |
1239 | 1255 | ||
@@ -1342,6 +1358,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
1342 | int ret = 0; | 1358 | int ret = 0; |
1343 | 1359 | ||
1344 | src_nents = sg_nents_for_len(req->src, req->nbytes); | 1360 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
1361 | if (src_nents < 0) { | ||
1362 | dev_err(jrdev, "Invalid number of src SG.\n"); | ||
1363 | return src_nents; | ||
1364 | } | ||
1345 | sec4_sg_src_index = 2; | 1365 | sec4_sg_src_index = 2; |
1346 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | 1366 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * |
1347 | sizeof(struct sec4_sg_entry); | 1367 | sizeof(struct sec4_sg_entry); |
@@ -1430,6 +1450,10 @@ static int ahash_update_first(struct ahash_request *req) | |||
1430 | 1450 | ||
1431 | if (to_hash) { | 1451 | if (to_hash) { |
1432 | src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); | 1452 | src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); |
1453 | if (src_nents < 0) { | ||
1454 | dev_err(jrdev, "Invalid number of src SG.\n"); | ||
1455 | return src_nents; | ||
1456 | } | ||
1433 | dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); | 1457 | dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); |
1434 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | 1458 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); |
1435 | 1459 | ||
@@ -1572,7 +1596,7 @@ static int ahash_export(struct ahash_request *req, void *out) | |||
1572 | len = state->buflen_1; | 1596 | len = state->buflen_1; |
1573 | } else { | 1597 | } else { |
1574 | buf = state->buf_0; | 1598 | buf = state->buf_0; |
1575 | len = state->buflen_1; | 1599 | len = state->buflen_0; |
1576 | } | 1600 | } |
1577 | 1601 | ||
1578 | memcpy(export->buf, buf, len); | 1602 | memcpy(export->buf, buf, len); |
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 3cd8481065f8..6e37845abf8f 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig | |||
@@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD | |||
3 | depends on CRYPTO_DEV_CCP | 3 | depends on CRYPTO_DEV_CCP |
4 | default m | 4 | default m |
5 | select HW_RANDOM | 5 | select HW_RANDOM |
6 | select CRYPTO_SHA1 | ||
7 | select CRYPTO_SHA256 | ||
6 | help | 8 | help |
7 | Provides the interface to use the AMD Cryptographic Coprocessor | 9 | Provides the interface to use the AMD Cryptographic Coprocessor |
8 | which can be used to offload encryption operations such as SHA, | 10 | which can be used to offload encryption operations such as SHA, |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c6e883b296a9..6613aee79b87 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -152,32 +152,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | |||
152 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), | 152 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), |
153 | }; | 153 | }; |
154 | 154 | ||
155 | /* The CCP cannot perform zero-length sha operations so the caller | ||
156 | * is required to buffer data for the final operation. However, a | ||
157 | * sha operation for a message with a total length of zero is valid | ||
158 | * so known values are required to supply the result. | ||
159 | */ | ||
160 | static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = { | ||
161 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, | ||
162 | 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, | ||
163 | 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00, | ||
164 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
165 | }; | ||
166 | |||
167 | static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = { | ||
168 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, | ||
169 | 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, | ||
170 | 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, | ||
171 | 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00, | ||
172 | }; | ||
173 | |||
174 | static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = { | ||
175 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, | ||
176 | 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, | ||
177 | 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, | ||
178 | 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, | ||
179 | }; | ||
180 | |||
181 | static u32 ccp_addr_lo(struct ccp_dma_info *info) | 155 | static u32 ccp_addr_lo(struct ccp_dma_info *info) |
182 | { | 156 | { |
183 | return lower_32_bits(info->address + info->offset); | 157 | return lower_32_bits(info->address + info->offset); |
@@ -1391,18 +1365,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1391 | if (sha->msg_bits) | 1365 | if (sha->msg_bits) |
1392 | return -EINVAL; | 1366 | return -EINVAL; |
1393 | 1367 | ||
1394 | /* A sha operation for a message with a total length of zero, | 1368 | /* The CCP cannot perform zero-length sha operations so the |
1395 | * return known result. | 1369 | * caller is required to buffer data for the final operation. |
1370 | * However, a sha operation for a message with a total length | ||
1371 | * of zero is valid so known values are required to supply | ||
1372 | * the result. | ||
1396 | */ | 1373 | */ |
1397 | switch (sha->type) { | 1374 | switch (sha->type) { |
1398 | case CCP_SHA_TYPE_1: | 1375 | case CCP_SHA_TYPE_1: |
1399 | sha_zero = ccp_sha1_zero; | 1376 | sha_zero = sha1_zero_message_hash; |
1400 | break; | 1377 | break; |
1401 | case CCP_SHA_TYPE_224: | 1378 | case CCP_SHA_TYPE_224: |
1402 | sha_zero = ccp_sha224_zero; | 1379 | sha_zero = sha224_zero_message_hash; |
1403 | break; | 1380 | break; |
1404 | case CCP_SHA_TYPE_256: | 1381 | case CCP_SHA_TYPE_256: |
1405 | sha_zero = ccp_sha256_zero; | 1382 | sha_zero = sha256_zero_message_hash; |
1406 | break; | 1383 | break; |
1407 | default: | 1384 | default: |
1408 | return -EINVAL; | 1385 | return -EINVAL; |
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 6ade02f04f91..7690467c42f8 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c | |||
@@ -44,7 +44,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp) | |||
44 | { | 44 | { |
45 | struct ccp_pci *ccp_pci = ccp->dev_specific; | 45 | struct ccp_pci *ccp_pci = ccp->dev_specific; |
46 | struct device *dev = ccp->dev; | 46 | struct device *dev = ccp->dev; |
47 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | 47 | struct pci_dev *pdev = to_pci_dev(dev); |
48 | struct msix_entry msix_entry[MSIX_VECTORS]; | 48 | struct msix_entry msix_entry[MSIX_VECTORS]; |
49 | unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; | 49 | unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; |
50 | int v, ret; | 50 | int v, ret; |
@@ -86,7 +86,7 @@ e_irq: | |||
86 | static int ccp_get_msi_irq(struct ccp_device *ccp) | 86 | static int ccp_get_msi_irq(struct ccp_device *ccp) |
87 | { | 87 | { |
88 | struct device *dev = ccp->dev; | 88 | struct device *dev = ccp->dev; |
89 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | 89 | struct pci_dev *pdev = to_pci_dev(dev); |
90 | int ret; | 90 | int ret; |
91 | 91 | ||
92 | ret = pci_enable_msi(pdev); | 92 | ret = pci_enable_msi(pdev); |
@@ -133,7 +133,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) | |||
133 | { | 133 | { |
134 | struct ccp_pci *ccp_pci = ccp->dev_specific; | 134 | struct ccp_pci *ccp_pci = ccp->dev_specific; |
135 | struct device *dev = ccp->dev; | 135 | struct device *dev = ccp->dev; |
136 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | 136 | struct pci_dev *pdev = to_pci_dev(dev); |
137 | 137 | ||
138 | if (ccp_pci->msix_count) { | 138 | if (ccp_pci->msix_count) { |
139 | while (ccp_pci->msix_count--) | 139 | while (ccp_pci->msix_count--) |
@@ -149,7 +149,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) | |||
149 | static int ccp_find_mmio_area(struct ccp_device *ccp) | 149 | static int ccp_find_mmio_area(struct ccp_device *ccp) |
150 | { | 150 | { |
151 | struct device *dev = ccp->dev; | 151 | struct device *dev = ccp->dev; |
152 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | 152 | struct pci_dev *pdev = to_pci_dev(dev); |
153 | resource_size_t io_len; | 153 | resource_size_t io_len; |
154 | unsigned long io_flags; | 154 | unsigned long io_flags; |
155 | 155 | ||
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index 01b50cb4c982..66dd7c9d08c3 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c | |||
@@ -35,8 +35,7 @@ struct ccp_platform { | |||
35 | static int ccp_get_irq(struct ccp_device *ccp) | 35 | static int ccp_get_irq(struct ccp_device *ccp) |
36 | { | 36 | { |
37 | struct device *dev = ccp->dev; | 37 | struct device *dev = ccp->dev; |
38 | struct platform_device *pdev = container_of(dev, | 38 | struct platform_device *pdev = to_platform_device(dev); |
39 | struct platform_device, dev); | ||
40 | int ret; | 39 | int ret; |
41 | 40 | ||
42 | ret = platform_get_irq(pdev, 0); | 41 | ret = platform_get_irq(pdev, 0); |
@@ -78,8 +77,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) | |||
78 | static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) | 77 | static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) |
79 | { | 78 | { |
80 | struct device *dev = ccp->dev; | 79 | struct device *dev = ccp->dev; |
81 | struct platform_device *pdev = container_of(dev, | 80 | struct platform_device *pdev = to_platform_device(dev); |
82 | struct platform_device, dev); | ||
83 | struct resource *ior; | 81 | struct resource *ior; |
84 | 82 | ||
85 | ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 83 | ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index ca5c71ab4b4d..eee2c7e6c299 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -11,10 +11,6 @@ | |||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | 14 | */ |
19 | 15 | ||
20 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -36,14 +32,6 @@ | |||
36 | #include <crypto/algapi.h> | 32 | #include <crypto/algapi.h> |
37 | #include <crypto/des.h> | 33 | #include <crypto/des.h> |
38 | 34 | ||
39 | //#define HIFN_DEBUG | ||
40 | |||
41 | #ifdef HIFN_DEBUG | ||
42 | #define dprintk(f, a...) printk(f, ##a) | ||
43 | #else | ||
44 | #define dprintk(f, a...) do {} while (0) | ||
45 | #endif | ||
46 | |||
47 | static char hifn_pll_ref[sizeof("extNNN")] = "ext"; | 35 | static char hifn_pll_ref[sizeof("extNNN")] = "ext"; |
48 | module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); | 36 | module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); |
49 | MODULE_PARM_DESC(hifn_pll_ref, | 37 | MODULE_PARM_DESC(hifn_pll_ref, |
@@ -79,12 +67,12 @@ static atomic_t hifn_dev_number; | |||
79 | 67 | ||
80 | /* DMA registres */ | 68 | /* DMA registres */ |
81 | 69 | ||
82 | #define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ | 70 | #define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ |
83 | #define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ | 71 | #define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ |
84 | #define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */ | 72 | #define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */ |
85 | #define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */ | 73 | #define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */ |
86 | #define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */ | 74 | #define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */ |
87 | #define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ | 75 | #define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ |
88 | #define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */ | 76 | #define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */ |
89 | #define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */ | 77 | #define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */ |
90 | #define HIFN_CHIP_ID 0x98 /* Chip ID */ | 78 | #define HIFN_CHIP_ID 0x98 /* Chip ID */ |
@@ -358,10 +346,10 @@ static atomic_t hifn_dev_number; | |||
358 | #define HIFN_NAMESIZE 32 | 346 | #define HIFN_NAMESIZE 32 |
359 | #define HIFN_MAX_RESULT_ORDER 5 | 347 | #define HIFN_MAX_RESULT_ORDER 5 |
360 | 348 | ||
361 | #define HIFN_D_CMD_RSIZE 24*1 | 349 | #define HIFN_D_CMD_RSIZE (24 * 1) |
362 | #define HIFN_D_SRC_RSIZE 80*1 | 350 | #define HIFN_D_SRC_RSIZE (80 * 1) |
363 | #define HIFN_D_DST_RSIZE 80*1 | 351 | #define HIFN_D_DST_RSIZE (80 * 1) |
364 | #define HIFN_D_RES_RSIZE 24*1 | 352 | #define HIFN_D_RES_RSIZE (24 * 1) |
365 | 353 | ||
366 | #define HIFN_D_DST_DALIGN 4 | 354 | #define HIFN_D_DST_DALIGN 4 |
367 | 355 | ||
@@ -386,17 +374,16 @@ static atomic_t hifn_dev_number; | |||
386 | #define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4) | 374 | #define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4) |
387 | #define HIFN_USED_RESULT 12 | 375 | #define HIFN_USED_RESULT 12 |
388 | 376 | ||
389 | struct hifn_desc | 377 | struct hifn_desc { |
390 | { | ||
391 | volatile __le32 l; | 378 | volatile __le32 l; |
392 | volatile __le32 p; | 379 | volatile __le32 p; |
393 | }; | 380 | }; |
394 | 381 | ||
395 | struct hifn_dma { | 382 | struct hifn_dma { |
396 | struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1]; | 383 | struct hifn_desc cmdr[HIFN_D_CMD_RSIZE + 1]; |
397 | struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1]; | 384 | struct hifn_desc srcr[HIFN_D_SRC_RSIZE + 1]; |
398 | struct hifn_desc dstr[HIFN_D_DST_RSIZE+1]; | 385 | struct hifn_desc dstr[HIFN_D_DST_RSIZE + 1]; |
399 | struct hifn_desc resr[HIFN_D_RES_RSIZE+1]; | 386 | struct hifn_desc resr[HIFN_D_RES_RSIZE + 1]; |
400 | 387 | ||
401 | u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; | 388 | u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; |
402 | u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; | 389 | u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; |
@@ -410,16 +397,15 @@ struct hifn_dma { | |||
410 | int cmdk, srck, dstk, resk; | 397 | int cmdk, srck, dstk, resk; |
411 | }; | 398 | }; |
412 | 399 | ||
413 | #define HIFN_FLAG_CMD_BUSY (1<<0) | 400 | #define HIFN_FLAG_CMD_BUSY (1 << 0) |
414 | #define HIFN_FLAG_SRC_BUSY (1<<1) | 401 | #define HIFN_FLAG_SRC_BUSY (1 << 1) |
415 | #define HIFN_FLAG_DST_BUSY (1<<2) | 402 | #define HIFN_FLAG_DST_BUSY (1 << 2) |
416 | #define HIFN_FLAG_RES_BUSY (1<<3) | 403 | #define HIFN_FLAG_RES_BUSY (1 << 3) |
417 | #define HIFN_FLAG_OLD_KEY (1<<4) | 404 | #define HIFN_FLAG_OLD_KEY (1 << 4) |
418 | 405 | ||
419 | #define HIFN_DEFAULT_ACTIVE_NUM 5 | 406 | #define HIFN_DEFAULT_ACTIVE_NUM 5 |
420 | 407 | ||
421 | struct hifn_device | 408 | struct hifn_device { |
422 | { | ||
423 | char name[HIFN_NAMESIZE]; | 409 | char name[HIFN_NAMESIZE]; |
424 | 410 | ||
425 | int irq; | 411 | int irq; |
@@ -432,7 +418,7 @@ struct hifn_device | |||
432 | 418 | ||
433 | u32 dmareg; | 419 | u32 dmareg; |
434 | 420 | ||
435 | void *sa[HIFN_D_RES_RSIZE]; | 421 | void *sa[HIFN_D_RES_RSIZE]; |
436 | 422 | ||
437 | spinlock_t lock; | 423 | spinlock_t lock; |
438 | 424 | ||
@@ -447,7 +433,7 @@ struct hifn_device | |||
447 | 433 | ||
448 | struct tasklet_struct tasklet; | 434 | struct tasklet_struct tasklet; |
449 | 435 | ||
450 | struct crypto_queue queue; | 436 | struct crypto_queue queue; |
451 | struct list_head alg_list; | 437 | struct list_head alg_list; |
452 | 438 | ||
453 | unsigned int pk_clk_freq; | 439 | unsigned int pk_clk_freq; |
@@ -468,8 +454,7 @@ struct hifn_device | |||
468 | #define HIFN_D_JUMP 0x40000000 | 454 | #define HIFN_D_JUMP 0x40000000 |
469 | #define HIFN_D_VALID 0x80000000 | 455 | #define HIFN_D_VALID 0x80000000 |
470 | 456 | ||
471 | struct hifn_base_command | 457 | struct hifn_base_command { |
472 | { | ||
473 | volatile __le16 masks; | 458 | volatile __le16 masks; |
474 | volatile __le16 session_num; | 459 | volatile __le16 session_num; |
475 | volatile __le16 total_source_count; | 460 | volatile __le16 total_source_count; |
@@ -491,12 +476,11 @@ struct hifn_base_command | |||
491 | /* | 476 | /* |
492 | * Structure to help build up the command data structure. | 477 | * Structure to help build up the command data structure. |
493 | */ | 478 | */ |
494 | struct hifn_crypt_command | 479 | struct hifn_crypt_command { |
495 | { | 480 | volatile __le16 masks; |
496 | volatile __le16 masks; | 481 | volatile __le16 header_skip; |
497 | volatile __le16 header_skip; | 482 | volatile __le16 source_count; |
498 | volatile __le16 source_count; | 483 | volatile __le16 reserved; |
499 | volatile __le16 reserved; | ||
500 | }; | 484 | }; |
501 | 485 | ||
502 | #define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */ | 486 | #define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */ |
@@ -522,12 +506,11 @@ struct hifn_crypt_command | |||
522 | /* | 506 | /* |
523 | * Structure to help build up the command data structure. | 507 | * Structure to help build up the command data structure. |
524 | */ | 508 | */ |
525 | struct hifn_mac_command | 509 | struct hifn_mac_command { |
526 | { | 510 | volatile __le16 masks; |
527 | volatile __le16 masks; | 511 | volatile __le16 header_skip; |
528 | volatile __le16 header_skip; | 512 | volatile __le16 source_count; |
529 | volatile __le16 source_count; | 513 | volatile __le16 reserved; |
530 | volatile __le16 reserved; | ||
531 | }; | 514 | }; |
532 | 515 | ||
533 | #define HIFN_MAC_CMD_ALG_MASK 0x0001 | 516 | #define HIFN_MAC_CMD_ALG_MASK 0x0001 |
@@ -551,12 +534,11 @@ struct hifn_mac_command | |||
551 | #define HIFN_MAC_CMD_POS_IPSEC 0x0200 | 534 | #define HIFN_MAC_CMD_POS_IPSEC 0x0200 |
552 | #define HIFN_MAC_CMD_NEW_KEY 0x0800 | 535 | #define HIFN_MAC_CMD_NEW_KEY 0x0800 |
553 | 536 | ||
554 | struct hifn_comp_command | 537 | struct hifn_comp_command { |
555 | { | 538 | volatile __le16 masks; |
556 | volatile __le16 masks; | 539 | volatile __le16 header_skip; |
557 | volatile __le16 header_skip; | 540 | volatile __le16 source_count; |
558 | volatile __le16 source_count; | 541 | volatile __le16 reserved; |
559 | volatile __le16 reserved; | ||
560 | }; | 542 | }; |
561 | 543 | ||
562 | #define HIFN_COMP_CMD_SRCLEN_M 0xc000 | 544 | #define HIFN_COMP_CMD_SRCLEN_M 0xc000 |
@@ -570,12 +552,11 @@ struct hifn_comp_command | |||
570 | #define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */ | 552 | #define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */ |
571 | #define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */ | 553 | #define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */ |
572 | 554 | ||
573 | struct hifn_base_result | 555 | struct hifn_base_result { |
574 | { | 556 | volatile __le16 flags; |
575 | volatile __le16 flags; | 557 | volatile __le16 session; |
576 | volatile __le16 session; | 558 | volatile __le16 src_cnt; /* 15:0 of source count */ |
577 | volatile __le16 src_cnt; /* 15:0 of source count */ | 559 | volatile __le16 dst_cnt; /* 15:0 of dest count */ |
578 | volatile __le16 dst_cnt; /* 15:0 of dest count */ | ||
579 | }; | 560 | }; |
580 | 561 | ||
581 | #define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ | 562 | #define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ |
@@ -584,8 +565,7 @@ struct hifn_base_result | |||
584 | #define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */ | 565 | #define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */ |
585 | #define HIFN_BASE_RES_DSTLEN_S 12 | 566 | #define HIFN_BASE_RES_DSTLEN_S 12 |
586 | 567 | ||
587 | struct hifn_comp_result | 568 | struct hifn_comp_result { |
588 | { | ||
589 | volatile __le16 flags; | 569 | volatile __le16 flags; |
590 | volatile __le16 crc; | 570 | volatile __le16 crc; |
591 | }; | 571 | }; |
@@ -596,18 +576,16 @@ struct hifn_comp_result | |||
596 | #define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */ | 576 | #define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */ |
597 | #define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */ | 577 | #define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */ |
598 | 578 | ||
599 | struct hifn_mac_result | 579 | struct hifn_mac_result { |
600 | { | 580 | volatile __le16 flags; |
601 | volatile __le16 flags; | 581 | volatile __le16 reserved; |
602 | volatile __le16 reserved; | ||
603 | /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ | 582 | /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ |
604 | }; | 583 | }; |
605 | 584 | ||
606 | #define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */ | 585 | #define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */ |
607 | #define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */ | 586 | #define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */ |
608 | 587 | ||
609 | struct hifn_crypt_result | 588 | struct hifn_crypt_result { |
610 | { | ||
611 | volatile __le16 flags; | 589 | volatile __le16 flags; |
612 | volatile __le16 reserved; | 590 | volatile __le16 reserved; |
613 | }; | 591 | }; |
@@ -622,11 +600,10 @@ struct hifn_crypt_result | |||
622 | #define HIFN_POLL_SCALAR 0x0 | 600 | #define HIFN_POLL_SCALAR 0x0 |
623 | #endif | 601 | #endif |
624 | 602 | ||
625 | #define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ | 603 | #define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ |
626 | #define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */ | 604 | #define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */ |
627 | 605 | ||
628 | struct hifn_crypto_alg | 606 | struct hifn_crypto_alg { |
629 | { | ||
630 | struct list_head entry; | 607 | struct list_head entry; |
631 | struct crypto_alg alg; | 608 | struct crypto_alg alg; |
632 | struct hifn_device *dev; | 609 | struct hifn_device *dev; |
@@ -634,24 +611,21 @@ struct hifn_crypto_alg | |||
634 | 611 | ||
635 | #define ASYNC_SCATTERLIST_CACHE 16 | 612 | #define ASYNC_SCATTERLIST_CACHE 16 |
636 | 613 | ||
637 | #define ASYNC_FLAGS_MISALIGNED (1<<0) | 614 | #define ASYNC_FLAGS_MISALIGNED (1 << 0) |
638 | 615 | ||
639 | struct hifn_cipher_walk | 616 | struct hifn_cipher_walk { |
640 | { | ||
641 | struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; | 617 | struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; |
642 | u32 flags; | 618 | u32 flags; |
643 | int num; | 619 | int num; |
644 | }; | 620 | }; |
645 | 621 | ||
646 | struct hifn_context | 622 | struct hifn_context { |
647 | { | ||
648 | u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; | 623 | u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; |
649 | struct hifn_device *dev; | 624 | struct hifn_device *dev; |
650 | unsigned int keysize; | 625 | unsigned int keysize; |
651 | }; | 626 | }; |
652 | 627 | ||
653 | struct hifn_request_context | 628 | struct hifn_request_context { |
654 | { | ||
655 | u8 *iv; | 629 | u8 *iv; |
656 | unsigned int ivsize; | 630 | unsigned int ivsize; |
657 | u8 op, type, mode, unused; | 631 | u8 op, type, mode, unused; |
@@ -693,7 +667,7 @@ static void hifn_wait_puc(struct hifn_device *dev) | |||
693 | int i; | 667 | int i; |
694 | u32 ret; | 668 | u32 ret; |
695 | 669 | ||
696 | for (i=10000; i > 0; --i) { | 670 | for (i = 10000; i > 0; --i) { |
697 | ret = hifn_read_0(dev, HIFN_0_PUCTRL); | 671 | ret = hifn_read_0(dev, HIFN_0_PUCTRL); |
698 | if (!(ret & HIFN_PUCTRL_RESET)) | 672 | if (!(ret & HIFN_PUCTRL_RESET)) |
699 | break; | 673 | break; |
@@ -702,7 +676,7 @@ static void hifn_wait_puc(struct hifn_device *dev) | |||
702 | } | 676 | } |
703 | 677 | ||
704 | if (!i) | 678 | if (!i) |
705 | dprintk("%s: Failed to reset PUC unit.\n", dev->name); | 679 | dev_err(&dev->pdev->dev, "Failed to reset PUC unit.\n"); |
706 | } | 680 | } |
707 | 681 | ||
708 | static void hifn_reset_puc(struct hifn_device *dev) | 682 | static void hifn_reset_puc(struct hifn_device *dev) |
@@ -749,13 +723,12 @@ static void hifn_reset_dma(struct hifn_device *dev, int full) | |||
749 | hifn_reset_puc(dev); | 723 | hifn_reset_puc(dev); |
750 | } | 724 | } |
751 | 725 | ||
752 | static u32 hifn_next_signature(u_int32_t a, u_int cnt) | 726 | static u32 hifn_next_signature(u32 a, u_int cnt) |
753 | { | 727 | { |
754 | int i; | 728 | int i; |
755 | u32 v; | 729 | u32 v; |
756 | 730 | ||
757 | for (i = 0; i < cnt; i++) { | 731 | for (i = 0; i < cnt; i++) { |
758 | |||
759 | /* get the parity */ | 732 | /* get the parity */ |
760 | v = a & 0x80080125; | 733 | v = a & 0x80080125; |
761 | v ^= v >> 16; | 734 | v ^= v >> 16; |
@@ -846,33 +819,28 @@ static int hifn_init_pubrng(struct hifn_device *dev) | |||
846 | hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) | | 819 | hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) | |
847 | HIFN_PUBRST_RESET); | 820 | HIFN_PUBRST_RESET); |
848 | 821 | ||
849 | for (i=100; i > 0; --i) { | 822 | for (i = 100; i > 0; --i) { |
850 | mdelay(1); | 823 | mdelay(1); |
851 | 824 | ||
852 | if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) | 825 | if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) |
853 | break; | 826 | break; |
854 | } | 827 | } |
855 | 828 | ||
856 | if (!i) | 829 | if (!i) { |
857 | dprintk("Chip %s: Failed to initialise public key engine.\n", | 830 | dev_err(&dev->pdev->dev, "Failed to initialise public key engine.\n"); |
858 | dev->name); | 831 | } else { |
859 | else { | ||
860 | hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); | 832 | hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); |
861 | dev->dmareg |= HIFN_DMAIER_PUBDONE; | 833 | dev->dmareg |= HIFN_DMAIER_PUBDONE; |
862 | hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); | 834 | hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); |
863 | 835 | ||
864 | dprintk("Chip %s: Public key engine has been successfully " | 836 | dev_dbg(&dev->pdev->dev, "Public key engine has been successfully initialised.\n"); |
865 | "initialised.\n", dev->name); | ||
866 | } | 837 | } |
867 | 838 | ||
868 | /* | 839 | /* Enable RNG engine. */ |
869 | * Enable RNG engine. | ||
870 | */ | ||
871 | 840 | ||
872 | hifn_write_1(dev, HIFN_1_RNG_CONFIG, | 841 | hifn_write_1(dev, HIFN_1_RNG_CONFIG, |
873 | hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); | 842 | hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); |
874 | dprintk("Chip %s: RNG engine has been successfully initialised.\n", | 843 | dev_dbg(&dev->pdev->dev, "RNG engine has been successfully initialised.\n"); |
875 | dev->name); | ||
876 | 844 | ||
877 | #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG | 845 | #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG |
878 | /* First value must be discarded */ | 846 | /* First value must be discarded */ |
@@ -896,8 +864,8 @@ static int hifn_enable_crypto(struct hifn_device *dev) | |||
896 | } | 864 | } |
897 | } | 865 | } |
898 | 866 | ||
899 | if (offtbl == NULL) { | 867 | if (!offtbl) { |
900 | dprintk("Chip %s: Unknown card!\n", dev->name); | 868 | dev_err(&dev->pdev->dev, "Unknown card!\n"); |
901 | return -ENODEV; | 869 | return -ENODEV; |
902 | } | 870 | } |
903 | 871 | ||
@@ -912,7 +880,7 @@ static int hifn_enable_crypto(struct hifn_device *dev) | |||
912 | hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0); | 880 | hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0); |
913 | mdelay(1); | 881 | mdelay(1); |
914 | 882 | ||
915 | for (i=0; i<12; ++i) { | 883 | for (i = 0; i < 12; ++i) { |
916 | addr = hifn_next_signature(addr, offtbl[i] + 0x101); | 884 | addr = hifn_next_signature(addr, offtbl[i] + 0x101); |
917 | hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr); | 885 | hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr); |
918 | 886 | ||
@@ -920,7 +888,7 @@ static int hifn_enable_crypto(struct hifn_device *dev) | |||
920 | } | 888 | } |
921 | hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg); | 889 | hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg); |
922 | 890 | ||
923 | dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev)); | 891 | dev_dbg(&dev->pdev->dev, "%s %s.\n", dev->name, pci_name(dev->pdev)); |
924 | 892 | ||
925 | return 0; | 893 | return 0; |
926 | } | 894 | } |
@@ -931,16 +899,14 @@ static void hifn_init_dma(struct hifn_device *dev) | |||
931 | u32 dptr = dev->desc_dma; | 899 | u32 dptr = dev->desc_dma; |
932 | int i; | 900 | int i; |
933 | 901 | ||
934 | for (i=0; i<HIFN_D_CMD_RSIZE; ++i) | 902 | for (i = 0; i < HIFN_D_CMD_RSIZE; ++i) |
935 | dma->cmdr[i].p = __cpu_to_le32(dptr + | 903 | dma->cmdr[i].p = __cpu_to_le32(dptr + |
936 | offsetof(struct hifn_dma, command_bufs[i][0])); | 904 | offsetof(struct hifn_dma, command_bufs[i][0])); |
937 | for (i=0; i<HIFN_D_RES_RSIZE; ++i) | 905 | for (i = 0; i < HIFN_D_RES_RSIZE; ++i) |
938 | dma->resr[i].p = __cpu_to_le32(dptr + | 906 | dma->resr[i].p = __cpu_to_le32(dptr + |
939 | offsetof(struct hifn_dma, result_bufs[i][0])); | 907 | offsetof(struct hifn_dma, result_bufs[i][0])); |
940 | 908 | ||
941 | /* | 909 | /* Setup LAST descriptors. */ |
942 | * Setup LAST descriptors. | ||
943 | */ | ||
944 | dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr + | 910 | dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr + |
945 | offsetof(struct hifn_dma, cmdr[0])); | 911 | offsetof(struct hifn_dma, cmdr[0])); |
946 | dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr + | 912 | dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr + |
@@ -960,7 +926,7 @@ static void hifn_init_dma(struct hifn_device *dev) | |||
960 | * to calculate the optimal multiplier. For PCI we assume 66MHz, since that | 926 | * to calculate the optimal multiplier. For PCI we assume 66MHz, since that |
961 | * allows us to operate without the risk of overclocking the chip. If it | 927 | * allows us to operate without the risk of overclocking the chip. If it |
962 | * actually uses 33MHz, the chip will operate at half the speed, this can be | 928 | * actually uses 33MHz, the chip will operate at half the speed, this can be |
963 | * overriden by specifying the frequency as module parameter (pci33). | 929 | * overridden by specifying the frequency as module parameter (pci33). |
964 | * | 930 | * |
965 | * Unfortunately the PCI clock is not very suitable since the HIFN needs a | 931 | * Unfortunately the PCI clock is not very suitable since the HIFN needs a |
966 | * stable clock and the PCI clock frequency may vary, so the default is the | 932 | * stable clock and the PCI clock frequency may vary, so the default is the |
@@ -984,9 +950,8 @@ static void hifn_init_pll(struct hifn_device *dev) | |||
984 | freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); | 950 | freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); |
985 | else { | 951 | else { |
986 | freq = 66; | 952 | freq = 66; |
987 | printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, " | 953 | dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n", |
988 | "override with hifn_pll_ref=%.3s<frequency>\n", | 954 | freq, hifn_pll_ref); |
989 | freq, hifn_pll_ref); | ||
990 | } | 955 | } |
991 | 956 | ||
992 | m = HIFN_PLL_FCK_MAX / freq; | 957 | m = HIFN_PLL_FCK_MAX / freq; |
@@ -1174,17 +1139,17 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, | |||
1174 | 1139 | ||
1175 | mask = 0; | 1140 | mask = 0; |
1176 | switch (rctx->op) { | 1141 | switch (rctx->op) { |
1177 | case ACRYPTO_OP_DECRYPT: | 1142 | case ACRYPTO_OP_DECRYPT: |
1178 | mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; | 1143 | mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; |
1179 | break; | 1144 | break; |
1180 | case ACRYPTO_OP_ENCRYPT: | 1145 | case ACRYPTO_OP_ENCRYPT: |
1181 | mask = HIFN_BASE_CMD_CRYPT; | 1146 | mask = HIFN_BASE_CMD_CRYPT; |
1182 | break; | 1147 | break; |
1183 | case ACRYPTO_OP_HMAC: | 1148 | case ACRYPTO_OP_HMAC: |
1184 | mask = HIFN_BASE_CMD_MAC; | 1149 | mask = HIFN_BASE_CMD_MAC; |
1185 | break; | 1150 | break; |
1186 | default: | 1151 | default: |
1187 | goto err_out; | 1152 | goto err_out; |
1188 | } | 1153 | } |
1189 | 1154 | ||
1190 | buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, | 1155 | buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, |
@@ -1199,53 +1164,53 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, | |||
1199 | md |= HIFN_CRYPT_CMD_NEW_IV; | 1164 | md |= HIFN_CRYPT_CMD_NEW_IV; |
1200 | 1165 | ||
1201 | switch (rctx->mode) { | 1166 | switch (rctx->mode) { |
1202 | case ACRYPTO_MODE_ECB: | 1167 | case ACRYPTO_MODE_ECB: |
1203 | md |= HIFN_CRYPT_CMD_MODE_ECB; | 1168 | md |= HIFN_CRYPT_CMD_MODE_ECB; |
1204 | break; | 1169 | break; |
1205 | case ACRYPTO_MODE_CBC: | 1170 | case ACRYPTO_MODE_CBC: |
1206 | md |= HIFN_CRYPT_CMD_MODE_CBC; | 1171 | md |= HIFN_CRYPT_CMD_MODE_CBC; |
1207 | break; | 1172 | break; |
1208 | case ACRYPTO_MODE_CFB: | 1173 | case ACRYPTO_MODE_CFB: |
1209 | md |= HIFN_CRYPT_CMD_MODE_CFB; | 1174 | md |= HIFN_CRYPT_CMD_MODE_CFB; |
1210 | break; | 1175 | break; |
1211 | case ACRYPTO_MODE_OFB: | 1176 | case ACRYPTO_MODE_OFB: |
1212 | md |= HIFN_CRYPT_CMD_MODE_OFB; | 1177 | md |= HIFN_CRYPT_CMD_MODE_OFB; |
1213 | break; | 1178 | break; |
1214 | default: | 1179 | default: |
1215 | goto err_out; | 1180 | goto err_out; |
1216 | } | 1181 | } |
1217 | 1182 | ||
1218 | switch (rctx->type) { | 1183 | switch (rctx->type) { |
1219 | case ACRYPTO_TYPE_AES_128: | 1184 | case ACRYPTO_TYPE_AES_128: |
1220 | if (ctx->keysize != 16) | 1185 | if (ctx->keysize != 16) |
1221 | goto err_out; | 1186 | goto err_out; |
1222 | md |= HIFN_CRYPT_CMD_KSZ_128 | | 1187 | md |= HIFN_CRYPT_CMD_KSZ_128 | |
1223 | HIFN_CRYPT_CMD_ALG_AES; | 1188 | HIFN_CRYPT_CMD_ALG_AES; |
1224 | break; | 1189 | break; |
1225 | case ACRYPTO_TYPE_AES_192: | 1190 | case ACRYPTO_TYPE_AES_192: |
1226 | if (ctx->keysize != 24) | 1191 | if (ctx->keysize != 24) |
1227 | goto err_out; | ||
1228 | md |= HIFN_CRYPT_CMD_KSZ_192 | | ||
1229 | HIFN_CRYPT_CMD_ALG_AES; | ||
1230 | break; | ||
1231 | case ACRYPTO_TYPE_AES_256: | ||
1232 | if (ctx->keysize != 32) | ||
1233 | goto err_out; | ||
1234 | md |= HIFN_CRYPT_CMD_KSZ_256 | | ||
1235 | HIFN_CRYPT_CMD_ALG_AES; | ||
1236 | break; | ||
1237 | case ACRYPTO_TYPE_3DES: | ||
1238 | if (ctx->keysize != 24) | ||
1239 | goto err_out; | ||
1240 | md |= HIFN_CRYPT_CMD_ALG_3DES; | ||
1241 | break; | ||
1242 | case ACRYPTO_TYPE_DES: | ||
1243 | if (ctx->keysize != 8) | ||
1244 | goto err_out; | ||
1245 | md |= HIFN_CRYPT_CMD_ALG_DES; | ||
1246 | break; | ||
1247 | default: | ||
1248 | goto err_out; | 1192 | goto err_out; |
1193 | md |= HIFN_CRYPT_CMD_KSZ_192 | | ||
1194 | HIFN_CRYPT_CMD_ALG_AES; | ||
1195 | break; | ||
1196 | case ACRYPTO_TYPE_AES_256: | ||
1197 | if (ctx->keysize != 32) | ||
1198 | goto err_out; | ||
1199 | md |= HIFN_CRYPT_CMD_KSZ_256 | | ||
1200 | HIFN_CRYPT_CMD_ALG_AES; | ||
1201 | break; | ||
1202 | case ACRYPTO_TYPE_3DES: | ||
1203 | if (ctx->keysize != 24) | ||
1204 | goto err_out; | ||
1205 | md |= HIFN_CRYPT_CMD_ALG_3DES; | ||
1206 | break; | ||
1207 | case ACRYPTO_TYPE_DES: | ||
1208 | if (ctx->keysize != 8) | ||
1209 | goto err_out; | ||
1210 | md |= HIFN_CRYPT_CMD_ALG_DES; | ||
1211 | break; | ||
1212 | default: | ||
1213 | goto err_out; | ||
1249 | } | 1214 | } |
1250 | 1215 | ||
1251 | buf_pos += hifn_setup_crypto_command(dev, buf_pos, | 1216 | buf_pos += hifn_setup_crypto_command(dev, buf_pos, |
@@ -1265,8 +1230,9 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, | |||
1265 | HIFN_D_VALID | HIFN_D_LAST | | 1230 | HIFN_D_VALID | HIFN_D_LAST | |
1266 | HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); | 1231 | HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); |
1267 | dma->cmdi = 0; | 1232 | dma->cmdi = 0; |
1268 | } else | 1233 | } else { |
1269 | dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID); | 1234 | dma->cmdr[dma->cmdi - 1].l |= __cpu_to_le32(HIFN_D_VALID); |
1235 | } | ||
1270 | 1236 | ||
1271 | if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) { | 1237 | if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) { |
1272 | hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); | 1238 | hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); |
@@ -1424,7 +1390,7 @@ static int hifn_cipher_walk_init(struct hifn_cipher_walk *w, | |||
1424 | sg_init_table(w->cache, num); | 1390 | sg_init_table(w->cache, num); |
1425 | 1391 | ||
1426 | w->num = 0; | 1392 | w->num = 0; |
1427 | for (i=0; i<num; ++i) { | 1393 | for (i = 0; i < num; ++i) { |
1428 | struct page *page = alloc_page(gfp_flags); | 1394 | struct page *page = alloc_page(gfp_flags); |
1429 | struct scatterlist *s; | 1395 | struct scatterlist *s; |
1430 | 1396 | ||
@@ -1444,7 +1410,7 @@ static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w) | |||
1444 | { | 1410 | { |
1445 | int i; | 1411 | int i; |
1446 | 1412 | ||
1447 | for (i=0; i<w->num; ++i) { | 1413 | for (i = 0; i < w->num; ++i) { |
1448 | struct scatterlist *s = &w->cache[i]; | 1414 | struct scatterlist *s = &w->cache[i]; |
1449 | 1415 | ||
1450 | __free_page(sg_page(s)); | 1416 | __free_page(sg_page(s)); |
@@ -1471,8 +1437,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, | |||
1471 | drest -= copy; | 1437 | drest -= copy; |
1472 | nbytes -= copy; | 1438 | nbytes -= copy; |
1473 | 1439 | ||
1474 | dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", | 1440 | pr_debug("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", |
1475 | __func__, copy, size, drest, nbytes); | 1441 | __func__, copy, size, drest, nbytes); |
1476 | 1442 | ||
1477 | dst++; | 1443 | dst++; |
1478 | idx++; | 1444 | idx++; |
@@ -1499,8 +1465,8 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, | |||
1499 | 1465 | ||
1500 | dst = &req->dst[idx]; | 1466 | dst = &req->dst[idx]; |
1501 | 1467 | ||
1502 | dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", | 1468 | pr_debug("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", |
1503 | __func__, dst->length, dst->offset, offset, nbytes); | 1469 | __func__, dst->length, dst->offset, offset, nbytes); |
1504 | 1470 | ||
1505 | if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || | 1471 | if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || |
1506 | !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || | 1472 | !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || |
@@ -1525,10 +1491,10 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, | |||
1525 | * to put there additional blocksized chunk, | 1491 | * to put there additional blocksized chunk, |
1526 | * so we mark that page as containing only | 1492 | * so we mark that page as containing only |
1527 | * blocksize aligned chunks: | 1493 | * blocksize aligned chunks: |
1528 | * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1)); | 1494 | * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1)); |
1529 | * and increase number of bytes to be processed | 1495 | * and increase number of bytes to be processed |
1530 | * in next chunk: | 1496 | * in next chunk: |
1531 | * nbytes += diff; | 1497 | * nbytes += diff; |
1532 | */ | 1498 | */ |
1533 | nbytes += diff; | 1499 | nbytes += diff; |
1534 | 1500 | ||
@@ -1536,14 +1502,13 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, | |||
1536 | * Temporary of course... | 1502 | * Temporary of course... |
1537 | * Kick author if you will catch this one. | 1503 | * Kick author if you will catch this one. |
1538 | */ | 1504 | */ |
1539 | printk(KERN_ERR "%s: dlen: %u, nbytes: %u," | 1505 | pr_err("%s: dlen: %u, nbytes: %u, slen: %u, offset: %u.\n", |
1540 | "slen: %u, offset: %u.\n", | 1506 | __func__, dlen, nbytes, slen, offset); |
1541 | __func__, dlen, nbytes, slen, offset); | 1507 | pr_err("%s: please contact author to fix this " |
1542 | printk(KERN_ERR "%s: please contact author to fix this " | 1508 | "issue, generally you should not catch " |
1543 | "issue, generally you should not catch " | 1509 | "this path under any condition but who " |
1544 | "this path under any condition but who " | 1510 | "knows how did you use crypto code.\n" |
1545 | "knows how did you use crypto code.\n" | 1511 | "Thank you.\n", __func__); |
1546 | "Thank you.\n", __func__); | ||
1547 | BUG(); | 1512 | BUG(); |
1548 | } else { | 1513 | } else { |
1549 | copy += diff + nbytes; | 1514 | copy += diff + nbytes; |
@@ -1630,70 +1595,16 @@ err_out: | |||
1630 | spin_unlock_irqrestore(&dev->lock, flags); | 1595 | spin_unlock_irqrestore(&dev->lock, flags); |
1631 | err_out_exit: | 1596 | err_out_exit: |
1632 | if (err) { | 1597 | if (err) { |
1633 | printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " | 1598 | dev_info(&dev->pdev->dev, "iv: %p [%d], key: %p [%d], mode: %u, op: %u, " |
1634 | "type: %u, err: %d.\n", | 1599 | "type: %u, err: %d.\n", |
1635 | dev->name, rctx->iv, rctx->ivsize, | 1600 | rctx->iv, rctx->ivsize, |
1636 | ctx->key, ctx->keysize, | 1601 | ctx->key, ctx->keysize, |
1637 | rctx->mode, rctx->op, rctx->type, err); | 1602 | rctx->mode, rctx->op, rctx->type, err); |
1638 | } | 1603 | } |
1639 | 1604 | ||
1640 | return err; | 1605 | return err; |
1641 | } | 1606 | } |
1642 | 1607 | ||
1643 | static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) | ||
1644 | { | ||
1645 | int n, err; | ||
1646 | u8 src[16]; | ||
1647 | struct hifn_context ctx; | ||
1648 | struct hifn_request_context rctx; | ||
1649 | u8 fips_aes_ecb_from_zero[16] = { | ||
1650 | 0x66, 0xE9, 0x4B, 0xD4, | ||
1651 | 0xEF, 0x8A, 0x2C, 0x3B, | ||
1652 | 0x88, 0x4C, 0xFA, 0x59, | ||
1653 | 0xCA, 0x34, 0x2B, 0x2E}; | ||
1654 | struct scatterlist sg; | ||
1655 | |||
1656 | memset(src, 0, sizeof(src)); | ||
1657 | memset(ctx.key, 0, sizeof(ctx.key)); | ||
1658 | |||
1659 | ctx.dev = dev; | ||
1660 | ctx.keysize = 16; | ||
1661 | rctx.ivsize = 0; | ||
1662 | rctx.iv = NULL; | ||
1663 | rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; | ||
1664 | rctx.mode = ACRYPTO_MODE_ECB; | ||
1665 | rctx.type = ACRYPTO_TYPE_AES_128; | ||
1666 | rctx.walk.cache[0].length = 0; | ||
1667 | |||
1668 | sg_init_one(&sg, &src, sizeof(src)); | ||
1669 | |||
1670 | err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL); | ||
1671 | if (err) | ||
1672 | goto err_out; | ||
1673 | |||
1674 | dev->started = 0; | ||
1675 | msleep(200); | ||
1676 | |||
1677 | dprintk("%s: decoded: ", dev->name); | ||
1678 | for (n=0; n<sizeof(src); ++n) | ||
1679 | dprintk("%02x ", src[n]); | ||
1680 | dprintk("\n"); | ||
1681 | dprintk("%s: FIPS : ", dev->name); | ||
1682 | for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n) | ||
1683 | dprintk("%02x ", fips_aes_ecb_from_zero[n]); | ||
1684 | dprintk("\n"); | ||
1685 | |||
1686 | if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) { | ||
1687 | printk(KERN_INFO "%s: AES 128 ECB test has been successfully " | ||
1688 | "passed.\n", dev->name); | ||
1689 | return 0; | ||
1690 | } | ||
1691 | |||
1692 | err_out: | ||
1693 | printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name); | ||
1694 | return -1; | ||
1695 | } | ||
1696 | |||
1697 | static int hifn_start_device(struct hifn_device *dev) | 1608 | static int hifn_start_device(struct hifn_device *dev) |
1698 | { | 1609 | { |
1699 | int err; | 1610 | int err; |
@@ -1739,8 +1650,8 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset | |||
1739 | saddr += copy; | 1650 | saddr += copy; |
1740 | offset = 0; | 1651 | offset = 0; |
1741 | 1652 | ||
1742 | dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", | 1653 | pr_debug("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", |
1743 | __func__, copy, size, srest, nbytes); | 1654 | __func__, copy, size, srest, nbytes); |
1744 | 1655 | ||
1745 | dst++; | 1656 | dst++; |
1746 | idx++; | 1657 | idx++; |
@@ -1760,7 +1671,8 @@ static inline void hifn_complete_sa(struct hifn_device *dev, int i) | |||
1760 | dev->sa[i] = NULL; | 1671 | dev->sa[i] = NULL; |
1761 | dev->started--; | 1672 | dev->started--; |
1762 | if (dev->started < 0) | 1673 | if (dev->started < 0) |
1763 | printk("%s: started: %d.\n", __func__, dev->started); | 1674 | dev_info(&dev->pdev->dev, "%s: started: %d.\n", __func__, |
1675 | dev->started); | ||
1764 | spin_unlock_irqrestore(&dev->lock, flags); | 1676 | spin_unlock_irqrestore(&dev->lock, flags); |
1765 | BUG_ON(dev->started < 0); | 1677 | BUG_ON(dev->started < 0); |
1766 | } | 1678 | } |
@@ -1779,7 +1691,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error) | |||
1779 | t = &rctx->walk.cache[idx]; | 1691 | t = &rctx->walk.cache[idx]; |
1780 | dst = &req->dst[idx]; | 1692 | dst = &req->dst[idx]; |
1781 | 1693 | ||
1782 | dprintk("\n%s: sg_page(t): %p, t->length: %u, " | 1694 | pr_debug("\n%s: sg_page(t): %p, t->length: %u, " |
1783 | "sg_page(dst): %p, dst->length: %u, " | 1695 | "sg_page(dst): %p, dst->length: %u, " |
1784 | "nbytes: %u.\n", | 1696 | "nbytes: %u.\n", |
1785 | __func__, sg_page(t), t->length, | 1697 | __func__, sg_page(t), t->length, |
@@ -1815,9 +1727,8 @@ static void hifn_clear_rings(struct hifn_device *dev, int error) | |||
1815 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 1727 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
1816 | int i, u; | 1728 | int i, u; |
1817 | 1729 | ||
1818 | dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " | 1730 | dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " |
1819 | "k: %d.%d.%d.%d.\n", | 1731 | "k: %d.%d.%d.%d.\n", |
1820 | dev->name, | ||
1821 | dma->cmdi, dma->srci, dma->dsti, dma->resi, | 1732 | dma->cmdi, dma->srci, dma->dsti, dma->resi, |
1822 | dma->cmdu, dma->srcu, dma->dstu, dma->resu, | 1733 | dma->cmdu, dma->srcu, dma->dstu, dma->resu, |
1823 | dma->cmdk, dma->srck, dma->dstk, dma->resk); | 1734 | dma->cmdk, dma->srck, dma->dstk, dma->resk); |
@@ -1870,9 +1781,8 @@ static void hifn_clear_rings(struct hifn_device *dev, int error) | |||
1870 | } | 1781 | } |
1871 | dma->dstk = i; dma->dstu = u; | 1782 | dma->dstk = i; dma->dstu = u; |
1872 | 1783 | ||
1873 | dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " | 1784 | dev_dbg(&dev->pdev->dev, "ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " |
1874 | "k: %d.%d.%d.%d.\n", | 1785 | "k: %d.%d.%d.%d.\n", |
1875 | dev->name, | ||
1876 | dma->cmdi, dma->srci, dma->dsti, dma->resi, | 1786 | dma->cmdi, dma->srci, dma->dsti, dma->resi, |
1877 | dma->cmdu, dma->srcu, dma->dstu, dma->resu, | 1787 | dma->cmdu, dma->srcu, dma->dstu, dma->resu, |
1878 | dma->cmdk, dma->srck, dma->dstk, dma->resk); | 1788 | dma->cmdk, dma->srck, dma->dstk, dma->resk); |
@@ -1921,21 +1831,22 @@ static void hifn_work(struct work_struct *work) | |||
1921 | int i; | 1831 | int i; |
1922 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 1832 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
1923 | 1833 | ||
1924 | printk("%s: r: %08x, active: %d, started: %d, " | 1834 | dev_info(&dev->pdev->dev, |
1925 | "success: %lu: qlen: %u/%u, reset: %d.\n", | 1835 | "r: %08x, active: %d, started: %d, " |
1926 | dev->name, r, dev->active, dev->started, | 1836 | "success: %lu: qlen: %u/%u, reset: %d.\n", |
1927 | dev->success, dev->queue.qlen, dev->queue.max_qlen, | 1837 | r, dev->active, dev->started, |
1928 | reset); | 1838 | dev->success, dev->queue.qlen, dev->queue.max_qlen, |
1839 | reset); | ||
1929 | 1840 | ||
1930 | printk("%s: res: ", __func__); | 1841 | dev_info(&dev->pdev->dev, "%s: res: ", __func__); |
1931 | for (i=0; i<HIFN_D_RES_RSIZE; ++i) { | 1842 | for (i = 0; i < HIFN_D_RES_RSIZE; ++i) { |
1932 | printk("%x.%p ", dma->resr[i].l, dev->sa[i]); | 1843 | pr_info("%x.%p ", dma->resr[i].l, dev->sa[i]); |
1933 | if (dev->sa[i]) { | 1844 | if (dev->sa[i]) { |
1934 | hifn_process_ready(dev->sa[i], -ENODEV); | 1845 | hifn_process_ready(dev->sa[i], -ENODEV); |
1935 | hifn_complete_sa(dev, i); | 1846 | hifn_complete_sa(dev, i); |
1936 | } | 1847 | } |
1937 | } | 1848 | } |
1938 | printk("\n"); | 1849 | pr_info("\n"); |
1939 | 1850 | ||
1940 | hifn_reset_dma(dev, 1); | 1851 | hifn_reset_dma(dev, 1); |
1941 | hifn_stop_device(dev); | 1852 | hifn_stop_device(dev); |
@@ -1957,9 +1868,9 @@ static irqreturn_t hifn_interrupt(int irq, void *data) | |||
1957 | 1868 | ||
1958 | dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR); | 1869 | dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR); |
1959 | 1870 | ||
1960 | dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " | 1871 | dev_dbg(&dev->pdev->dev, "1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " |
1961 | "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", | 1872 | "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", |
1962 | dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, | 1873 | dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, |
1963 | dma->cmdi, dma->srci, dma->dsti, dma->resi, | 1874 | dma->cmdi, dma->srci, dma->dsti, dma->resi, |
1964 | dma->cmdu, dma->srcu, dma->dstu, dma->resu); | 1875 | dma->cmdu, dma->srcu, dma->dstu, dma->resu); |
1965 | 1876 | ||
@@ -1978,9 +1889,9 @@ static irqreturn_t hifn_interrupt(int irq, void *data) | |||
1978 | if (restart) { | 1889 | if (restart) { |
1979 | u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); | 1890 | u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); |
1980 | 1891 | ||
1981 | printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", | 1892 | dev_warn(&dev->pdev->dev, "overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", |
1982 | dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), | 1893 | !!(dmacsr & HIFN_DMACSR_R_OVER), |
1983 | !!(dmacsr & HIFN_DMACSR_D_OVER), | 1894 | !!(dmacsr & HIFN_DMACSR_D_OVER), |
1984 | puisr, !!(puisr & HIFN_PUISR_DSTOVER)); | 1895 | puisr, !!(puisr & HIFN_PUISR_DSTOVER)); |
1985 | if (!!(puisr & HIFN_PUISR_DSTOVER)) | 1896 | if (!!(puisr & HIFN_PUISR_DSTOVER)) |
1986 | hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); | 1897 | hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); |
@@ -1991,18 +1902,18 @@ static irqreturn_t hifn_interrupt(int irq, void *data) | |||
1991 | restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | | 1902 | restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | |
1992 | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); | 1903 | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); |
1993 | if (restart) { | 1904 | if (restart) { |
1994 | printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n", | 1905 | dev_warn(&dev->pdev->dev, "abort: c: %d, s: %d, d: %d, r: %d.\n", |
1995 | dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), | 1906 | !!(dmacsr & HIFN_DMACSR_C_ABORT), |
1996 | !!(dmacsr & HIFN_DMACSR_S_ABORT), | 1907 | !!(dmacsr & HIFN_DMACSR_S_ABORT), |
1997 | !!(dmacsr & HIFN_DMACSR_D_ABORT), | 1908 | !!(dmacsr & HIFN_DMACSR_D_ABORT), |
1998 | !!(dmacsr & HIFN_DMACSR_R_ABORT)); | 1909 | !!(dmacsr & HIFN_DMACSR_R_ABORT)); |
1999 | hifn_reset_dma(dev, 1); | 1910 | hifn_reset_dma(dev, 1); |
2000 | hifn_init_dma(dev); | 1911 | hifn_init_dma(dev); |
2001 | hifn_init_registers(dev); | 1912 | hifn_init_registers(dev); |
2002 | } | 1913 | } |
2003 | 1914 | ||
2004 | if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { | 1915 | if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { |
2005 | dprintk("%s: wait on command.\n", dev->name); | 1916 | dev_dbg(&dev->pdev->dev, "wait on command.\n"); |
2006 | dev->dmareg &= ~(HIFN_DMAIER_C_WAIT); | 1917 | dev->dmareg &= ~(HIFN_DMAIER_C_WAIT); |
2007 | hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); | 1918 | hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); |
2008 | } | 1919 | } |
@@ -2020,19 +1931,19 @@ static void hifn_flush(struct hifn_device *dev) | |||
2020 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 1931 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
2021 | int i; | 1932 | int i; |
2022 | 1933 | ||
2023 | for (i=0; i<HIFN_D_RES_RSIZE; ++i) { | 1934 | for (i = 0; i < HIFN_D_RES_RSIZE; ++i) { |
2024 | struct hifn_desc *d = &dma->resr[i]; | 1935 | struct hifn_desc *d = &dma->resr[i]; |
2025 | 1936 | ||
2026 | if (dev->sa[i]) { | 1937 | if (dev->sa[i]) { |
2027 | hifn_process_ready(dev->sa[i], | 1938 | hifn_process_ready(dev->sa[i], |
2028 | (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); | 1939 | (d->l & __cpu_to_le32(HIFN_D_VALID)) ? -ENODEV : 0); |
2029 | hifn_complete_sa(dev, i); | 1940 | hifn_complete_sa(dev, i); |
2030 | } | 1941 | } |
2031 | } | 1942 | } |
2032 | 1943 | ||
2033 | spin_lock_irqsave(&dev->lock, flags); | 1944 | spin_lock_irqsave(&dev->lock, flags); |
2034 | while ((async_req = crypto_dequeue_request(&dev->queue))) { | 1945 | while ((async_req = crypto_dequeue_request(&dev->queue))) { |
2035 | req = container_of(async_req, struct ablkcipher_request, base); | 1946 | req = ablkcipher_request_cast(async_req); |
2036 | spin_unlock_irqrestore(&dev->lock, flags); | 1947 | spin_unlock_irqrestore(&dev->lock, flags); |
2037 | 1948 | ||
2038 | hifn_process_ready(req, -ENODEV); | 1949 | hifn_process_ready(req, -ENODEV); |
@@ -2057,7 +1968,7 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
2057 | if (len == HIFN_DES_KEY_LENGTH) { | 1968 | if (len == HIFN_DES_KEY_LENGTH) { |
2058 | u32 tmp[DES_EXPKEY_WORDS]; | 1969 | u32 tmp[DES_EXPKEY_WORDS]; |
2059 | int ret = des_ekey(tmp, key); | 1970 | int ret = des_ekey(tmp, key); |
2060 | 1971 | ||
2061 | if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | 1972 | if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
2062 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | 1973 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; |
2063 | return -EINVAL; | 1974 | return -EINVAL; |
@@ -2151,7 +2062,7 @@ static int hifn_process_queue(struct hifn_device *dev) | |||
2151 | if (backlog) | 2062 | if (backlog) |
2152 | backlog->complete(backlog, -EINPROGRESS); | 2063 | backlog->complete(backlog, -EINPROGRESS); |
2153 | 2064 | ||
2154 | req = container_of(async_req, struct ablkcipher_request, base); | 2065 | req = ablkcipher_request_cast(async_req); |
2155 | 2066 | ||
2156 | err = hifn_handle_req(req); | 2067 | err = hifn_handle_req(req); |
2157 | if (err) | 2068 | if (err) |
@@ -2298,9 +2209,7 @@ static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req) | |||
2298 | ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); | 2209 | ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); |
2299 | } | 2210 | } |
2300 | 2211 | ||
2301 | /* | 2212 | /* 3DES decryption functions. */ |
2302 | * 3DES decryption functions. | ||
2303 | */ | ||
2304 | static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req) | 2213 | static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req) |
2305 | { | 2214 | { |
2306 | return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, | 2215 | return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, |
@@ -2322,8 +2231,7 @@ static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req) | |||
2322 | ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); | 2231 | ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); |
2323 | } | 2232 | } |
2324 | 2233 | ||
2325 | struct hifn_alg_template | 2234 | struct hifn_alg_template { |
2326 | { | ||
2327 | char name[CRYPTO_MAX_ALG_NAME]; | 2235 | char name[CRYPTO_MAX_ALG_NAME]; |
2328 | char drv_name[CRYPTO_MAX_ALG_NAME]; | 2236 | char drv_name[CRYPTO_MAX_ALG_NAME]; |
2329 | unsigned int bsize; | 2237 | unsigned int bsize; |
@@ -2483,7 +2391,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t) | |||
2483 | struct hifn_crypto_alg *alg; | 2391 | struct hifn_crypto_alg *alg; |
2484 | int err; | 2392 | int err; |
2485 | 2393 | ||
2486 | alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL); | 2394 | alg = kzalloc(sizeof(*alg), GFP_KERNEL); |
2487 | if (!alg) | 2395 | if (!alg) |
2488 | return -ENOMEM; | 2396 | return -ENOMEM; |
2489 | 2397 | ||
@@ -2530,7 +2438,7 @@ static int hifn_register_alg(struct hifn_device *dev) | |||
2530 | { | 2438 | { |
2531 | int i, err; | 2439 | int i, err; |
2532 | 2440 | ||
2533 | for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) { | 2441 | for (i = 0; i < ARRAY_SIZE(hifn_alg_templates); ++i) { |
2534 | err = hifn_alg_alloc(dev, &hifn_alg_templates[i]); | 2442 | err = hifn_alg_alloc(dev, &hifn_alg_templates[i]); |
2535 | if (err) | 2443 | if (err) |
2536 | goto err_out_exit; | 2444 | goto err_out_exit; |
@@ -2575,7 +2483,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2575 | goto err_out_disable_pci_device; | 2483 | goto err_out_disable_pci_device; |
2576 | 2484 | ||
2577 | snprintf(name, sizeof(name), "hifn%d", | 2485 | snprintf(name, sizeof(name), "hifn%d", |
2578 | atomic_inc_return(&hifn_dev_number)-1); | 2486 | atomic_inc_return(&hifn_dev_number) - 1); |
2579 | 2487 | ||
2580 | err = pci_request_regions(pdev, name); | 2488 | err = pci_request_regions(pdev, name); |
2581 | if (err) | 2489 | if (err) |
@@ -2584,8 +2492,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2584 | if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE || | 2492 | if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE || |
2585 | pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE || | 2493 | pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE || |
2586 | pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) { | 2494 | pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) { |
2587 | dprintk("%s: Broken hardware - I/O regions are too small.\n", | 2495 | dev_err(&pdev->dev, "Broken hardware - I/O regions are too small.\n"); |
2588 | pci_name(pdev)); | ||
2589 | err = -ENODEV; | 2496 | err = -ENODEV; |
2590 | goto err_out_free_regions; | 2497 | goto err_out_free_regions; |
2591 | } | 2498 | } |
@@ -2602,7 +2509,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2602 | snprintf(dev->name, sizeof(dev->name), "%s", name); | 2509 | snprintf(dev->name, sizeof(dev->name), "%s", name); |
2603 | spin_lock_init(&dev->lock); | 2510 | spin_lock_init(&dev->lock); |
2604 | 2511 | ||
2605 | for (i=0; i<3; ++i) { | 2512 | for (i = 0; i < 3; ++i) { |
2606 | unsigned long addr, size; | 2513 | unsigned long addr, size; |
2607 | 2514 | ||
2608 | addr = pci_resource_start(pdev, i); | 2515 | addr = pci_resource_start(pdev, i); |
@@ -2618,7 +2525,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2618 | dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma), | 2525 | dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma), |
2619 | &dev->desc_dma); | 2526 | &dev->desc_dma); |
2620 | if (!dev->desc_virt) { | 2527 | if (!dev->desc_virt) { |
2621 | dprintk("Failed to allocate descriptor rings.\n"); | 2528 | dev_err(&pdev->dev, "Failed to allocate descriptor rings.\n"); |
2622 | err = -ENOMEM; | 2529 | err = -ENOMEM; |
2623 | goto err_out_unmap_bars; | 2530 | goto err_out_unmap_bars; |
2624 | } | 2531 | } |
@@ -2626,7 +2533,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2626 | dev->pdev = pdev; | 2533 | dev->pdev = pdev; |
2627 | dev->irq = pdev->irq; | 2534 | dev->irq = pdev->irq; |
2628 | 2535 | ||
2629 | for (i=0; i<HIFN_D_RES_RSIZE; ++i) | 2536 | for (i = 0; i < HIFN_D_RES_RSIZE; ++i) |
2630 | dev->sa[i] = NULL; | 2537 | dev->sa[i] = NULL; |
2631 | 2538 | ||
2632 | pci_set_drvdata(pdev, dev); | 2539 | pci_set_drvdata(pdev, dev); |
@@ -2637,7 +2544,8 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2637 | 2544 | ||
2638 | err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev); | 2545 | err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev); |
2639 | if (err) { | 2546 | if (err) { |
2640 | dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err); | 2547 | dev_err(&pdev->dev, "Failed to request IRQ%d: err: %d.\n", |
2548 | dev->irq, err); | ||
2641 | dev->irq = 0; | 2549 | dev->irq = 0; |
2642 | goto err_out_free_desc; | 2550 | goto err_out_free_desc; |
2643 | } | 2551 | } |
@@ -2646,10 +2554,6 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2646 | if (err) | 2554 | if (err) |
2647 | goto err_out_free_irq; | 2555 | goto err_out_free_irq; |
2648 | 2556 | ||
2649 | err = hifn_test(dev, 1, 0); | ||
2650 | if (err) | ||
2651 | goto err_out_stop_device; | ||
2652 | |||
2653 | err = hifn_register_rng(dev); | 2557 | err = hifn_register_rng(dev); |
2654 | if (err) | 2558 | if (err) |
2655 | goto err_out_stop_device; | 2559 | goto err_out_stop_device; |
@@ -2661,9 +2565,9 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2661 | INIT_DELAYED_WORK(&dev->work, hifn_work); | 2565 | INIT_DELAYED_WORK(&dev->work, hifn_work); |
2662 | schedule_delayed_work(&dev->work, HZ); | 2566 | schedule_delayed_work(&dev->work, HZ); |
2663 | 2567 | ||
2664 | dprintk("HIFN crypto accelerator card at %s has been " | 2568 | dev_dbg(&pdev->dev, "HIFN crypto accelerator card at %s has been " |
2665 | "successfully registered as %s.\n", | 2569 | "successfully registered as %s.\n", |
2666 | pci_name(pdev), dev->name); | 2570 | pci_name(pdev), dev->name); |
2667 | 2571 | ||
2668 | return 0; | 2572 | return 0; |
2669 | 2573 | ||
@@ -2680,7 +2584,7 @@ err_out_free_desc: | |||
2680 | dev->desc_virt, dev->desc_dma); | 2584 | dev->desc_virt, dev->desc_dma); |
2681 | 2585 | ||
2682 | err_out_unmap_bars: | 2586 | err_out_unmap_bars: |
2683 | for (i=0; i<3; ++i) | 2587 | for (i = 0; i < 3; ++i) |
2684 | if (dev->bar[i]) | 2588 | if (dev->bar[i]) |
2685 | iounmap(dev->bar[i]); | 2589 | iounmap(dev->bar[i]); |
2686 | 2590 | ||
@@ -2715,7 +2619,7 @@ static void hifn_remove(struct pci_dev *pdev) | |||
2715 | 2619 | ||
2716 | pci_free_consistent(pdev, sizeof(struct hifn_dma), | 2620 | pci_free_consistent(pdev, sizeof(struct hifn_dma), |
2717 | dev->desc_virt, dev->desc_dma); | 2621 | dev->desc_virt, dev->desc_dma); |
2718 | for (i=0; i<3; ++i) | 2622 | for (i = 0; i < 3; ++i) |
2719 | if (dev->bar[i]) | 2623 | if (dev->bar[i]) |
2720 | iounmap(dev->bar[i]); | 2624 | iounmap(dev->bar[i]); |
2721 | 2625 | ||
@@ -2750,8 +2654,7 @@ static int __init hifn_init(void) | |||
2750 | 2654 | ||
2751 | if (strncmp(hifn_pll_ref, "ext", 3) && | 2655 | if (strncmp(hifn_pll_ref, "ext", 3) && |
2752 | strncmp(hifn_pll_ref, "pci", 3)) { | 2656 | strncmp(hifn_pll_ref, "pci", 3)) { |
2753 | printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " | 2657 | pr_err("hifn795x: invalid hifn_pll_ref clock, must be pci or ext"); |
2754 | "must be pci or ext"); | ||
2755 | return -EINVAL; | 2658 | return -EINVAL; |
2756 | } | 2659 | } |
2757 | 2660 | ||
@@ -2763,22 +2666,21 @@ static int __init hifn_init(void) | |||
2763 | if (hifn_pll_ref[3] != '\0') { | 2666 | if (hifn_pll_ref[3] != '\0') { |
2764 | freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); | 2667 | freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); |
2765 | if (freq < 20 || freq > 100) { | 2668 | if (freq < 20 || freq > 100) { |
2766 | printk(KERN_ERR "hifn795x: invalid hifn_pll_ref " | 2669 | pr_err("hifn795x: invalid hifn_pll_ref frequency, must" |
2767 | "frequency, must be in the range " | 2670 | "be in the range of 20-100"); |
2768 | "of 20-100"); | ||
2769 | return -EINVAL; | 2671 | return -EINVAL; |
2770 | } | 2672 | } |
2771 | } | 2673 | } |
2772 | 2674 | ||
2773 | err = pci_register_driver(&hifn_pci_driver); | 2675 | err = pci_register_driver(&hifn_pci_driver); |
2774 | if (err < 0) { | 2676 | if (err < 0) { |
2775 | dprintk("Failed to register PCI driver for %s device.\n", | 2677 | pr_err("Failed to register PCI driver for %s device.\n", |
2776 | hifn_pci_driver.name); | 2678 | hifn_pci_driver.name); |
2777 | return -ENODEV; | 2679 | return -ENODEV; |
2778 | } | 2680 | } |
2779 | 2681 | ||
2780 | printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " | 2682 | pr_info("Driver for HIFN 795x crypto accelerator chip " |
2781 | "has been successfully registered.\n"); | 2683 | "has been successfully registered.\n"); |
2782 | 2684 | ||
2783 | return 0; | 2685 | return 0; |
2784 | } | 2686 | } |
@@ -2787,8 +2689,8 @@ static void __exit hifn_fini(void) | |||
2787 | { | 2689 | { |
2788 | pci_unregister_driver(&hifn_pci_driver); | 2690 | pci_unregister_driver(&hifn_pci_driver); |
2789 | 2691 | ||
2790 | printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " | 2692 | pr_info("Driver for HIFN 795x crypto accelerator chip " |
2791 | "has been successfully unregistered.\n"); | 2693 | "has been successfully unregistered.\n"); |
2792 | } | 2694 | } |
2793 | 2695 | ||
2794 | module_init(hifn_init); | 2696 | module_init(hifn_init); |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 8f2790353281..e52496a172d0 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -510,10 +510,8 @@ npe_error: | |||
510 | printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); | 510 | printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); |
511 | ret = -EIO; | 511 | ret = -EIO; |
512 | err: | 512 | err: |
513 | if (ctx_pool) | 513 | dma_pool_destroy(ctx_pool); |
514 | dma_pool_destroy(ctx_pool); | 514 | dma_pool_destroy(buffer_pool); |
515 | if (buffer_pool) | ||
516 | dma_pool_destroy(buffer_pool); | ||
517 | npe_release(npe_c); | 515 | npe_release(npe_c); |
518 | return ret; | 516 | return ret; |
519 | } | 517 | } |
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 6edae64bb387..dcf1fceb9336 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c | |||
@@ -401,7 +401,15 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, | |||
401 | return -EINVAL; | 401 | return -EINVAL; |
402 | 402 | ||
403 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); | 403 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); |
404 | if (creq->src_nents < 0) { | ||
405 | dev_err(cesa_dev->dev, "Invalid number of src SG"); | ||
406 | return creq->src_nents; | ||
407 | } | ||
404 | creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); | 408 | creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); |
409 | if (creq->dst_nents < 0) { | ||
410 | dev_err(cesa_dev->dev, "Invalid number of dst SG"); | ||
411 | return creq->dst_nents; | ||
412 | } | ||
405 | 413 | ||
406 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, | 414 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, |
407 | CESA_SA_DESC_CFG_OP_MSK); | 415 | CESA_SA_DESC_CFG_OP_MSK); |
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 6ec55b4a087b..683cca9ac3c4 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c | |||
@@ -712,6 +712,10 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) | |||
712 | creq->req.base.type = CESA_STD_REQ; | 712 | creq->req.base.type = CESA_STD_REQ; |
713 | 713 | ||
714 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); | 714 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); |
715 | if (creq->src_nents < 0) { | ||
716 | dev_err(cesa_dev->dev, "Invalid number of src SG"); | ||
717 | return creq->src_nents; | ||
718 | } | ||
715 | 719 | ||
716 | ret = mv_cesa_ahash_cache_req(req, cached); | 720 | ret = mv_cesa_ahash_cache_req(req, cached); |
717 | if (ret) | 721 | if (ret) |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 5450880abb7b..b85a7a7dbf63 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -241,7 +241,7 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) | |||
241 | 241 | ||
242 | struct n2_ahash_alg { | 242 | struct n2_ahash_alg { |
243 | struct list_head entry; | 243 | struct list_head entry; |
244 | const char *hash_zero; | 244 | const u8 *hash_zero; |
245 | const u32 *hash_init; | 245 | const u32 *hash_init; |
246 | u8 hw_op_hashsz; | 246 | u8 hw_op_hashsz; |
247 | u8 digest_size; | 247 | u8 digest_size; |
@@ -1267,7 +1267,7 @@ static LIST_HEAD(cipher_algs); | |||
1267 | 1267 | ||
1268 | struct n2_hash_tmpl { | 1268 | struct n2_hash_tmpl { |
1269 | const char *name; | 1269 | const char *name; |
1270 | const char *hash_zero; | 1270 | const u8 *hash_zero; |
1271 | const u32 *hash_init; | 1271 | const u32 *hash_init; |
1272 | u8 hw_op_hashsz; | 1272 | u8 hw_op_hashsz; |
1273 | u8 digest_size; | 1273 | u8 digest_size; |
@@ -1276,40 +1276,19 @@ struct n2_hash_tmpl { | |||
1276 | u8 hmac_type; | 1276 | u8 hmac_type; |
1277 | }; | 1277 | }; |
1278 | 1278 | ||
1279 | static const char md5_zero[MD5_DIGEST_SIZE] = { | ||
1280 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | ||
1281 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | ||
1282 | }; | ||
1283 | static const u32 md5_init[MD5_HASH_WORDS] = { | 1279 | static const u32 md5_init[MD5_HASH_WORDS] = { |
1284 | cpu_to_le32(MD5_H0), | 1280 | cpu_to_le32(MD5_H0), |
1285 | cpu_to_le32(MD5_H1), | 1281 | cpu_to_le32(MD5_H1), |
1286 | cpu_to_le32(MD5_H2), | 1282 | cpu_to_le32(MD5_H2), |
1287 | cpu_to_le32(MD5_H3), | 1283 | cpu_to_le32(MD5_H3), |
1288 | }; | 1284 | }; |
1289 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | ||
1290 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | ||
1291 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | ||
1292 | 0x07, 0x09 | ||
1293 | }; | ||
1294 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | 1285 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { |
1295 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | 1286 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, |
1296 | }; | 1287 | }; |
1297 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | ||
1298 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | ||
1299 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | ||
1300 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | ||
1301 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
1302 | }; | ||
1303 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { | 1288 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { |
1304 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | 1289 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, |
1305 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, | 1290 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, |
1306 | }; | 1291 | }; |
1307 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | ||
1308 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | ||
1309 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | ||
1310 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | ||
1311 | 0x2f | ||
1312 | }; | ||
1313 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | 1292 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { |
1314 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, | 1293 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, |
1315 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, | 1294 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, |
@@ -1317,7 +1296,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | |||
1317 | 1296 | ||
1318 | static const struct n2_hash_tmpl hash_tmpls[] = { | 1297 | static const struct n2_hash_tmpl hash_tmpls[] = { |
1319 | { .name = "md5", | 1298 | { .name = "md5", |
1320 | .hash_zero = md5_zero, | 1299 | .hash_zero = md5_zero_message_hash, |
1321 | .hash_init = md5_init, | 1300 | .hash_init = md5_init, |
1322 | .auth_type = AUTH_TYPE_MD5, | 1301 | .auth_type = AUTH_TYPE_MD5, |
1323 | .hmac_type = AUTH_TYPE_HMAC_MD5, | 1302 | .hmac_type = AUTH_TYPE_HMAC_MD5, |
@@ -1325,7 +1304,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { | |||
1325 | .digest_size = MD5_DIGEST_SIZE, | 1304 | .digest_size = MD5_DIGEST_SIZE, |
1326 | .block_size = MD5_HMAC_BLOCK_SIZE }, | 1305 | .block_size = MD5_HMAC_BLOCK_SIZE }, |
1327 | { .name = "sha1", | 1306 | { .name = "sha1", |
1328 | .hash_zero = sha1_zero, | 1307 | .hash_zero = sha1_zero_message_hash, |
1329 | .hash_init = sha1_init, | 1308 | .hash_init = sha1_init, |
1330 | .auth_type = AUTH_TYPE_SHA1, | 1309 | .auth_type = AUTH_TYPE_SHA1, |
1331 | .hmac_type = AUTH_TYPE_HMAC_SHA1, | 1310 | .hmac_type = AUTH_TYPE_HMAC_SHA1, |
@@ -1333,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { | |||
1333 | .digest_size = SHA1_DIGEST_SIZE, | 1312 | .digest_size = SHA1_DIGEST_SIZE, |
1334 | .block_size = SHA1_BLOCK_SIZE }, | 1313 | .block_size = SHA1_BLOCK_SIZE }, |
1335 | { .name = "sha256", | 1314 | { .name = "sha256", |
1336 | .hash_zero = sha256_zero, | 1315 | .hash_zero = sha256_zero_message_hash, |
1337 | .hash_init = sha256_init, | 1316 | .hash_init = sha256_init, |
1338 | .auth_type = AUTH_TYPE_SHA256, | 1317 | .auth_type = AUTH_TYPE_SHA256, |
1339 | .hmac_type = AUTH_TYPE_HMAC_SHA256, | 1318 | .hmac_type = AUTH_TYPE_HMAC_SHA256, |
@@ -1341,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { | |||
1341 | .digest_size = SHA256_DIGEST_SIZE, | 1320 | .digest_size = SHA256_DIGEST_SIZE, |
1342 | .block_size = SHA256_BLOCK_SIZE }, | 1321 | .block_size = SHA256_BLOCK_SIZE }, |
1343 | { .name = "sha224", | 1322 | { .name = "sha224", |
1344 | .hash_zero = sha224_zero, | 1323 | .hash_zero = sha224_zero_message_hash, |
1345 | .hash_init = sha224_init, | 1324 | .hash_init = sha224_init, |
1346 | .auth_type = AUTH_TYPE_SHA256, | 1325 | .auth_type = AUTH_TYPE_SHA256, |
1347 | .hmac_type = AUTH_TYPE_RESERVED, | 1326 | .hmac_type = AUTH_TYPE_RESERVED, |
@@ -2243,22 +2222,19 @@ static struct platform_driver n2_mau_driver = { | |||
2243 | .remove = n2_mau_remove, | 2222 | .remove = n2_mau_remove, |
2244 | }; | 2223 | }; |
2245 | 2224 | ||
2225 | static struct platform_driver * const drivers[] = { | ||
2226 | &n2_crypto_driver, | ||
2227 | &n2_mau_driver, | ||
2228 | }; | ||
2229 | |||
2246 | static int __init n2_init(void) | 2230 | static int __init n2_init(void) |
2247 | { | 2231 | { |
2248 | int err = platform_driver_register(&n2_crypto_driver); | 2232 | return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); |
2249 | |||
2250 | if (!err) { | ||
2251 | err = platform_driver_register(&n2_mau_driver); | ||
2252 | if (err) | ||
2253 | platform_driver_unregister(&n2_crypto_driver); | ||
2254 | } | ||
2255 | return err; | ||
2256 | } | 2233 | } |
2257 | 2234 | ||
2258 | static void __exit n2_exit(void) | 2235 | static void __exit n2_exit(void) |
2259 | { | 2236 | { |
2260 | platform_driver_unregister(&n2_mau_driver); | 2237 | platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); |
2261 | platform_driver_unregister(&n2_crypto_driver); | ||
2262 | } | 2238 | } |
2263 | 2239 | ||
2264 | module_init(n2_init); | 2240 | module_init(n2_init); |
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 9ef51fafdbff..1710f80a09ec 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
@@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, | |||
442 | (unsigned int)ccw, | 442 | (unsigned int)ccw, |
443 | (unsigned int)be32_to_cpu(crb->ccw)); | 443 | (unsigned int)be32_to_cpu(crb->ccw)); |
444 | 444 | ||
445 | /* | ||
446 | * NX842 coprocessor sets 3rd bit in CR register with XER[S0]. | ||
447 | * XER[S0] is the integer summary overflow bit which is nothing | ||
448 | * to do NX. Since this bit can be set with other return values, | ||
449 | * mask this bit. | ||
450 | */ | ||
451 | ret &= ~ICSWX_XERS0; | ||
452 | |||
445 | switch (ret) { | 453 | switch (ret) { |
446 | case ICSWX_INITIATED: | 454 | case ICSWX_INITIATED: |
447 | ret = wait_for_csb(wmem, csb); | 455 | ret = wait_for_csb(wmem, csb); |
@@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, | |||
454 | pr_err_ratelimited("ICSWX rejected\n"); | 462 | pr_err_ratelimited("ICSWX rejected\n"); |
455 | ret = -EPROTO; | 463 | ret = -EPROTO; |
456 | break; | 464 | break; |
457 | default: | ||
458 | pr_err_ratelimited("Invalid ICSWX return code %x\n", ret); | ||
459 | ret = -EPROTO; | ||
460 | break; | ||
461 | } | 465 | } |
462 | 466 | ||
463 | if (!ret) | 467 | if (!ret) |
@@ -525,7 +529,6 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, | |||
525 | static int __init nx842_powernv_probe(struct device_node *dn) | 529 | static int __init nx842_powernv_probe(struct device_node *dn) |
526 | { | 530 | { |
527 | struct nx842_coproc *coproc; | 531 | struct nx842_coproc *coproc; |
528 | struct property *ct_prop, *ci_prop; | ||
529 | unsigned int ct, ci; | 532 | unsigned int ct, ci; |
530 | int chip_id; | 533 | int chip_id; |
531 | 534 | ||
@@ -534,18 +537,16 @@ static int __init nx842_powernv_probe(struct device_node *dn) | |||
534 | pr_err("ibm,chip-id missing\n"); | 537 | pr_err("ibm,chip-id missing\n"); |
535 | return -EINVAL; | 538 | return -EINVAL; |
536 | } | 539 | } |
537 | ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL); | 540 | |
538 | if (!ct_prop) { | 541 | if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) { |
539 | pr_err("ibm,842-coprocessor-type missing\n"); | 542 | pr_err("ibm,842-coprocessor-type missing\n"); |
540 | return -EINVAL; | 543 | return -EINVAL; |
541 | } | 544 | } |
542 | ct = be32_to_cpu(*(unsigned int *)ct_prop->value); | 545 | |
543 | ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL); | 546 | if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) { |
544 | if (!ci_prop) { | ||
545 | pr_err("ibm,842-coprocessor-instance missing\n"); | 547 | pr_err("ibm,842-coprocessor-instance missing\n"); |
546 | return -EINVAL; | 548 | return -EINVAL; |
547 | } | 549 | } |
548 | ci = be32_to_cpu(*(unsigned int *)ci_prop->value); | ||
549 | 550 | ||
550 | coproc = kmalloc(sizeof(*coproc), GFP_KERNEL); | 551 | coproc = kmalloc(sizeof(*coproc), GFP_KERNEL); |
551 | if (!coproc) | 552 | if (!coproc) |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index eba23147c0ee..dd355bd19474 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -539,8 +539,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | |||
539 | 539 | ||
540 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | 540 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) |
541 | { | 541 | { |
542 | int err = 0; | ||
543 | |||
544 | pr_debug("total: %d\n", dd->total); | 542 | pr_debug("total: %d\n", dd->total); |
545 | 543 | ||
546 | omap_aes_dma_stop(dd); | 544 | omap_aes_dma_stop(dd); |
@@ -548,7 +546,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
548 | dmaengine_terminate_all(dd->dma_lch_in); | 546 | dmaengine_terminate_all(dd->dma_lch_in); |
549 | dmaengine_terminate_all(dd->dma_lch_out); | 547 | dmaengine_terminate_all(dd->dma_lch_out); |
550 | 548 | ||
551 | return err; | 549 | return 0; |
552 | } | 550 | } |
553 | 551 | ||
554 | static int omap_aes_check_aligned(struct scatterlist *sg, int total) | 552 | static int omap_aes_check_aligned(struct scatterlist *sg, int total) |
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 0a70e46d5416..dd7b93f2f94c 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c | |||
@@ -527,8 +527,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) | |||
527 | 527 | ||
528 | static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) | 528 | static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) |
529 | { | 529 | { |
530 | int err = 0; | ||
531 | |||
532 | pr_debug("total: %d\n", dd->total); | 530 | pr_debug("total: %d\n", dd->total); |
533 | 531 | ||
534 | omap_des_dma_stop(dd); | 532 | omap_des_dma_stop(dd); |
@@ -536,7 +534,7 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) | |||
536 | dmaengine_terminate_all(dd->dma_lch_in); | 534 | dmaengine_terminate_all(dd->dma_lch_in); |
537 | dmaengine_terminate_all(dd->dma_lch_out); | 535 | dmaengine_terminate_all(dd->dma_lch_out); |
538 | 536 | ||
539 | return err; | 537 | return 0; |
540 | } | 538 | } |
541 | 539 | ||
542 | static int omap_des_copy_needed(struct scatterlist *sg) | 540 | static int omap_des_copy_needed(struct scatterlist *sg) |
@@ -1086,6 +1084,7 @@ static int omap_des_probe(struct platform_device *pdev) | |||
1086 | dd->phys_base = res->start; | 1084 | dd->phys_base = res->start; |
1087 | 1085 | ||
1088 | pm_runtime_enable(dev); | 1086 | pm_runtime_enable(dev); |
1087 | pm_runtime_irq_safe(dev); | ||
1089 | err = pm_runtime_get_sync(dev); | 1088 | err = pm_runtime_get_sync(dev); |
1090 | if (err < 0) { | 1089 | if (err < 0) { |
1091 | pm_runtime_put_noidle(dev); | 1090 | pm_runtime_put_noidle(dev); |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 97a364694bfc..441e86b23571 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -238,7 +238,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, | |||
238 | /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. | 238 | /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. |
239 | * We could avoid some copying here but it's probably not worth it. | 239 | * We could avoid some copying here but it's probably not worth it. |
240 | */ | 240 | */ |
241 | if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) { | 241 | if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { |
242 | ecb_crypt_copy(in, out, key, cword, count); | 242 | ecb_crypt_copy(in, out, key, cword, count); |
243 | return; | 243 | return; |
244 | } | 244 | } |
@@ -250,7 +250,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, | |||
250 | u8 *iv, struct cword *cword, int count) | 250 | u8 *iv, struct cword *cword, int count) |
251 | { | 251 | { |
252 | /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ | 252 | /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ |
253 | if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE)) | 253 | if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) |
254 | return cbc_crypt_copy(in, out, key, iv, cword, count); | 254 | return cbc_crypt_copy(in, out, key, iv, cword, count); |
255 | 255 | ||
256 | return rep_xcrypt_cbc(in, out, key, iv, cword, count); | 256 | return rep_xcrypt_cbc(in, out, key, iv, cword, count); |
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 615da961c4d8..3b1c7ecf078f 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -272,12 +272,6 @@ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, | |||
272 | return indx; | 272 | return indx; |
273 | } | 273 | } |
274 | 274 | ||
275 | /* Count the number of scatterlist entries in a scatterlist. */ | ||
276 | static inline int sg_count(struct scatterlist *sg_list, int nbytes) | ||
277 | { | ||
278 | return sg_nents_for_len(sg_list, nbytes); | ||
279 | } | ||
280 | |||
281 | static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) | 275 | static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) |
282 | { | 276 | { |
283 | ddt->p = phys; | 277 | ddt->p = phys; |
@@ -295,12 +289,17 @@ static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, | |||
295 | enum dma_data_direction dir, | 289 | enum dma_data_direction dir, |
296 | dma_addr_t *ddt_phys) | 290 | dma_addr_t *ddt_phys) |
297 | { | 291 | { |
298 | unsigned nents, mapped_ents; | 292 | unsigned mapped_ents; |
299 | struct scatterlist *cur; | 293 | struct scatterlist *cur; |
300 | struct spacc_ddt *ddt; | 294 | struct spacc_ddt *ddt; |
301 | int i; | 295 | int i; |
296 | int nents; | ||
302 | 297 | ||
303 | nents = sg_count(payload, nbytes); | 298 | nents = sg_nents_for_len(payload, nbytes); |
299 | if (nents < 0) { | ||
300 | dev_err(engine->dev, "Invalid numbers of SG.\n"); | ||
301 | return NULL; | ||
302 | } | ||
304 | mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); | 303 | mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); |
305 | 304 | ||
306 | if (mapped_ents + 1 > MAX_DDT_LEN) | 305 | if (mapped_ents + 1 > MAX_DDT_LEN) |
@@ -328,7 +327,7 @@ static int spacc_aead_make_ddts(struct aead_request *areq) | |||
328 | struct spacc_engine *engine = req->engine; | 327 | struct spacc_engine *engine = req->engine; |
329 | struct spacc_ddt *src_ddt, *dst_ddt; | 328 | struct spacc_ddt *src_ddt, *dst_ddt; |
330 | unsigned total; | 329 | unsigned total; |
331 | unsigned int src_nents, dst_nents; | 330 | int src_nents, dst_nents; |
332 | struct scatterlist *cur; | 331 | struct scatterlist *cur; |
333 | int i, dst_ents, src_ents; | 332 | int i, dst_ents, src_ents; |
334 | 333 | ||
@@ -336,13 +335,21 @@ static int spacc_aead_make_ddts(struct aead_request *areq) | |||
336 | if (req->is_encrypt) | 335 | if (req->is_encrypt) |
337 | total += crypto_aead_authsize(aead); | 336 | total += crypto_aead_authsize(aead); |
338 | 337 | ||
339 | src_nents = sg_count(areq->src, total); | 338 | src_nents = sg_nents_for_len(areq->src, total); |
339 | if (src_nents < 0) { | ||
340 | dev_err(engine->dev, "Invalid numbers of src SG.\n"); | ||
341 | return src_nents; | ||
342 | } | ||
340 | if (src_nents + 1 > MAX_DDT_LEN) | 343 | if (src_nents + 1 > MAX_DDT_LEN) |
341 | return -E2BIG; | 344 | return -E2BIG; |
342 | 345 | ||
343 | dst_nents = 0; | 346 | dst_nents = 0; |
344 | if (areq->src != areq->dst) { | 347 | if (areq->src != areq->dst) { |
345 | dst_nents = sg_count(areq->dst, total); | 348 | dst_nents = sg_nents_for_len(areq->dst, total); |
349 | if (dst_nents < 0) { | ||
350 | dev_err(engine->dev, "Invalid numbers of dst SG.\n"); | ||
351 | return dst_nents; | ||
352 | } | ||
346 | if (src_nents + 1 > MAX_DDT_LEN) | 353 | if (src_nents + 1 > MAX_DDT_LEN) |
347 | return -E2BIG; | 354 | return -E2BIG; |
348 | } | 355 | } |
@@ -422,13 +429,22 @@ static void spacc_aead_free_ddts(struct spacc_req *req) | |||
422 | (req->is_encrypt ? crypto_aead_authsize(aead) : 0); | 429 | (req->is_encrypt ? crypto_aead_authsize(aead) : 0); |
423 | struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead); | 430 | struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead); |
424 | struct spacc_engine *engine = aead_ctx->generic.engine; | 431 | struct spacc_engine *engine = aead_ctx->generic.engine; |
425 | unsigned nents = sg_count(areq->src, total); | 432 | int nents = sg_nents_for_len(areq->src, total); |
433 | |||
434 | /* sg_nents_for_len should not fail since it works when mapping sg */ | ||
435 | if (unlikely(nents < 0)) { | ||
436 | dev_err(engine->dev, "Invalid numbers of src SG.\n"); | ||
437 | return; | ||
438 | } | ||
426 | 439 | ||
427 | if (areq->src != areq->dst) { | 440 | if (areq->src != areq->dst) { |
428 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); | 441 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); |
429 | dma_unmap_sg(engine->dev, areq->dst, | 442 | nents = sg_nents_for_len(areq->dst, total); |
430 | sg_count(areq->dst, total), | 443 | if (unlikely(nents < 0)) { |
431 | DMA_FROM_DEVICE); | 444 | dev_err(engine->dev, "Invalid numbers of dst SG.\n"); |
445 | return; | ||
446 | } | ||
447 | dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE); | ||
432 | } else | 448 | } else |
433 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); | 449 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); |
434 | 450 | ||
@@ -440,7 +456,12 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, | |||
440 | dma_addr_t ddt_addr, struct scatterlist *payload, | 456 | dma_addr_t ddt_addr, struct scatterlist *payload, |
441 | unsigned nbytes, enum dma_data_direction dir) | 457 | unsigned nbytes, enum dma_data_direction dir) |
442 | { | 458 | { |
443 | unsigned nents = sg_count(payload, nbytes); | 459 | int nents = sg_nents_for_len(payload, nbytes); |
460 | |||
461 | if (nents < 0) { | ||
462 | dev_err(req->engine->dev, "Invalid numbers of SG.\n"); | ||
463 | return; | ||
464 | } | ||
444 | 465 | ||
445 | dma_unmap_sg(req->engine->dev, payload, nents, dir); | 466 | dma_unmap_sg(req->engine->dev, payload, nents, dir); |
446 | dma_pool_free(req->engine->req_pool, ddt, ddt_addr); | 467 | dma_pool_free(req->engine->req_pool, ddt, ddt_addr); |
@@ -835,8 +856,7 @@ static int spacc_ablk_need_fallback(struct spacc_req *req) | |||
835 | 856 | ||
836 | static void spacc_ablk_complete(struct spacc_req *req) | 857 | static void spacc_ablk_complete(struct spacc_req *req) |
837 | { | 858 | { |
838 | struct ablkcipher_request *ablk_req = | 859 | struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); |
839 | container_of(req->req, struct ablkcipher_request, base); | ||
840 | 860 | ||
841 | if (ablk_req->src != ablk_req->dst) { | 861 | if (ablk_req->src != ablk_req->dst) { |
842 | spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, | 862 | spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, |
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index eefccf7b8be7..85b44e577684 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig | |||
@@ -22,6 +22,28 @@ config CRYPTO_DEV_QAT_DH895xCC | |||
22 | To compile this as a module, choose M here: the module | 22 | To compile this as a module, choose M here: the module |
23 | will be called qat_dh895xcc. | 23 | will be called qat_dh895xcc. |
24 | 24 | ||
25 | config CRYPTO_DEV_QAT_C3XXX | ||
26 | tristate "Support for Intel(R) C3XXX" | ||
27 | depends on X86 && PCI | ||
28 | select CRYPTO_DEV_QAT | ||
29 | help | ||
30 | Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology | ||
31 | for accelerating crypto and compression workloads. | ||
32 | |||
33 | To compile this as a module, choose M here: the module | ||
34 | will be called qat_c3xxx. | ||
35 | |||
36 | config CRYPTO_DEV_QAT_C62X | ||
37 | tristate "Support for Intel(R) C62X" | ||
38 | depends on X86 && PCI | ||
39 | select CRYPTO_DEV_QAT | ||
40 | help | ||
41 | Support for Intel(R) C62x with Intel(R) QuickAssist Technology | ||
42 | for accelerating crypto and compression workloads. | ||
43 | |||
44 | To compile this as a module, choose M here: the module | ||
45 | will be called qat_c62x. | ||
46 | |||
25 | config CRYPTO_DEV_QAT_DH895xCCVF | 47 | config CRYPTO_DEV_QAT_DH895xCCVF |
26 | tristate "Support for Intel(R) DH895xCC Virtual Function" | 48 | tristate "Support for Intel(R) DH895xCC Virtual Function" |
27 | depends on X86 && PCI | 49 | depends on X86 && PCI |
@@ -34,3 +56,27 @@ config CRYPTO_DEV_QAT_DH895xCCVF | |||
34 | 56 | ||
35 | To compile this as a module, choose M here: the module | 57 | To compile this as a module, choose M here: the module |
36 | will be called qat_dh895xccvf. | 58 | will be called qat_dh895xccvf. |
59 | |||
60 | config CRYPTO_DEV_QAT_C3XXXVF | ||
61 | tristate "Support for Intel(R) C3XXX Virtual Function" | ||
62 | depends on X86 && PCI | ||
63 | select PCI_IOV | ||
64 | select CRYPTO_DEV_QAT | ||
65 | help | ||
66 | Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology | ||
67 | Virtual Function for accelerating crypto and compression workloads. | ||
68 | |||
69 | To compile this as a module, choose M here: the module | ||
70 | will be called qat_c3xxxvf. | ||
71 | |||
72 | config CRYPTO_DEV_QAT_C62XVF | ||
73 | tristate "Support for Intel(R) C62X Virtual Function" | ||
74 | depends on X86 && PCI | ||
75 | select PCI_IOV | ||
76 | select CRYPTO_DEV_QAT | ||
77 | help | ||
78 | Support for Intel(R) C62x with Intel(R) QuickAssist Technology | ||
79 | Virtual Function for accelerating crypto and compression workloads. | ||
80 | |||
81 | To compile this as a module, choose M here: the module | ||
82 | will be called qat_c62xvf. | ||
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index a3ce0b70e32f..8265106f1c8e 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile | |||
@@ -1,3 +1,7 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ | 1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ |
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ | 2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ |
3 | obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ | ||
4 | obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ | ||
3 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ | 5 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ |
6 | obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ | ||
7 | obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ | ||
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile new file mode 100644 index 000000000000..8f5fd4838a96 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | ccflags-y := -I$(src)/../qat_common | ||
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o | ||
3 | qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o | ||
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c new file mode 100644 index 000000000000..c5bd5a9abc4d --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c | |||
@@ -0,0 +1,238 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <adf_accel_devices.h> | ||
48 | #include <adf_common_drv.h> | ||
49 | #include <adf_pf2vf_msg.h> | ||
50 | #include "adf_c3xxx_hw_data.h" | ||
51 | |||
52 | /* Worker thread to service arbiter mappings based on dev SKUs */ | ||
53 | static const u32 thrd_to_arb_map_6_me_sku[] = { | ||
54 | 0x12222AAA, 0x11222AAA, 0x12222AAA, | ||
55 | 0x11222AAA, 0x12222AAA, 0x11222AAA | ||
56 | }; | ||
57 | |||
58 | static struct adf_hw_device_class c3xxx_class = { | ||
59 | .name = ADF_C3XXX_DEVICE_NAME, | ||
60 | .type = DEV_C3XXX, | ||
61 | .instances = 0 | ||
62 | }; | ||
63 | |||
64 | static u32 get_accel_mask(u32 fuse) | ||
65 | { | ||
66 | return (~fuse) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET & | ||
67 | ADF_C3XXX_ACCELERATORS_MASK; | ||
68 | } | ||
69 | |||
70 | static u32 get_ae_mask(u32 fuse) | ||
71 | { | ||
72 | return (~fuse) & ADF_C3XXX_ACCELENGINES_MASK; | ||
73 | } | ||
74 | |||
75 | static u32 get_num_accels(struct adf_hw_device_data *self) | ||
76 | { | ||
77 | u32 i, ctr = 0; | ||
78 | |||
79 | if (!self || !self->accel_mask) | ||
80 | return 0; | ||
81 | |||
82 | for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) { | ||
83 | if (self->accel_mask & (1 << i)) | ||
84 | ctr++; | ||
85 | } | ||
86 | return ctr; | ||
87 | } | ||
88 | |||
89 | static u32 get_num_aes(struct adf_hw_device_data *self) | ||
90 | { | ||
91 | u32 i, ctr = 0; | ||
92 | |||
93 | if (!self || !self->ae_mask) | ||
94 | return 0; | ||
95 | |||
96 | for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) { | ||
97 | if (self->ae_mask & (1 << i)) | ||
98 | ctr++; | ||
99 | } | ||
100 | return ctr; | ||
101 | } | ||
102 | |||
103 | static u32 get_misc_bar_id(struct adf_hw_device_data *self) | ||
104 | { | ||
105 | return ADF_C3XXX_PMISC_BAR; | ||
106 | } | ||
107 | |||
108 | static u32 get_etr_bar_id(struct adf_hw_device_data *self) | ||
109 | { | ||
110 | return ADF_C3XXX_ETR_BAR; | ||
111 | } | ||
112 | |||
113 | static u32 get_sram_bar_id(struct adf_hw_device_data *self) | ||
114 | { | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | ||
119 | { | ||
120 | int aes = get_num_aes(self); | ||
121 | |||
122 | if (aes == 6) | ||
123 | return DEV_SKU_4; | ||
124 | |||
125 | return DEV_SKU_UNKNOWN; | ||
126 | } | ||
127 | |||
128 | static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | ||
129 | u32 const **arb_map_config) | ||
130 | { | ||
131 | switch (accel_dev->accel_pci_dev.sku) { | ||
132 | case DEV_SKU_4: | ||
133 | *arb_map_config = thrd_to_arb_map_6_me_sku; | ||
134 | break; | ||
135 | default: | ||
136 | dev_err(&GET_DEV(accel_dev), | ||
137 | "The configuration doesn't match any SKU"); | ||
138 | *arb_map_config = NULL; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static u32 get_pf2vf_offset(u32 i) | ||
143 | { | ||
144 | return ADF_C3XXX_PF2VF_OFFSET(i); | ||
145 | } | ||
146 | |||
147 | static u32 get_vintmsk_offset(u32 i) | ||
148 | { | ||
149 | return ADF_C3XXX_VINTMSK_OFFSET(i); | ||
150 | } | ||
151 | |||
152 | static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) | ||
153 | { | ||
154 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
155 | struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR]; | ||
156 | void __iomem *csr = misc_bar->virt_addr; | ||
157 | unsigned int val, i; | ||
158 | |||
159 | /* Enable Accel Engine error detection & correction */ | ||
160 | for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { | ||
161 | val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i)); | ||
162 | val |= ADF_C3XXX_ENABLE_AE_ECC_ERR; | ||
163 | ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val); | ||
164 | val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i)); | ||
165 | val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR; | ||
166 | ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val); | ||
167 | } | ||
168 | |||
169 | /* Enable shared memory error detection & correction */ | ||
170 | for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { | ||
171 | val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i)); | ||
172 | val |= ADF_C3XXX_ERRSSMSH_EN; | ||
173 | ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val); | ||
174 | val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i)); | ||
175 | val |= ADF_C3XXX_ERRSSMSH_EN; | ||
176 | ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val); | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static void adf_enable_ints(struct adf_accel_dev *accel_dev) | ||
181 | { | ||
182 | void __iomem *addr; | ||
183 | |||
184 | addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr; | ||
185 | |||
186 | /* Enable bundle and misc interrupts */ | ||
187 | ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET, | ||
188 | ADF_C3XXX_SMIA0_MASK); | ||
189 | ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET, | ||
190 | ADF_C3XXX_SMIA1_MASK); | ||
191 | } | ||
192 | |||
193 | static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) | ||
194 | { | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) | ||
199 | { | ||
200 | hw_data->dev_class = &c3xxx_class; | ||
201 | hw_data->instance_id = c3xxx_class.instances++; | ||
202 | hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS; | ||
203 | hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS; | ||
204 | hw_data->num_logical_accel = 1; | ||
205 | hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES; | ||
206 | hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET; | ||
207 | hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK; | ||
208 | hw_data->alloc_irq = adf_isr_resource_alloc; | ||
209 | hw_data->free_irq = adf_isr_resource_free; | ||
210 | hw_data->enable_error_correction = adf_enable_error_correction; | ||
211 | hw_data->get_accel_mask = get_accel_mask; | ||
212 | hw_data->get_ae_mask = get_ae_mask; | ||
213 | hw_data->get_num_accels = get_num_accels; | ||
214 | hw_data->get_num_aes = get_num_aes; | ||
215 | hw_data->get_sram_bar_id = get_sram_bar_id; | ||
216 | hw_data->get_etr_bar_id = get_etr_bar_id; | ||
217 | hw_data->get_misc_bar_id = get_misc_bar_id; | ||
218 | hw_data->get_pf2vf_offset = get_pf2vf_offset; | ||
219 | hw_data->get_vintmsk_offset = get_vintmsk_offset; | ||
220 | hw_data->get_sku = get_sku; | ||
221 | hw_data->fw_name = ADF_C3XXX_FW; | ||
222 | hw_data->fw_mmp_name = ADF_C3XXX_MMP; | ||
223 | hw_data->init_admin_comms = adf_init_admin_comms; | ||
224 | hw_data->exit_admin_comms = adf_exit_admin_comms; | ||
225 | hw_data->disable_iov = adf_disable_sriov; | ||
226 | hw_data->send_admin_init = adf_send_admin_init; | ||
227 | hw_data->init_arb = adf_init_arb; | ||
228 | hw_data->exit_arb = adf_exit_arb; | ||
229 | hw_data->get_arb_mapping = adf_get_arbiter_mapping; | ||
230 | hw_data->enable_ints = adf_enable_ints; | ||
231 | hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; | ||
232 | hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; | ||
233 | } | ||
234 | |||
235 | void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data) | ||
236 | { | ||
237 | hw_data->dev_class->instances--; | ||
238 | } | ||
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h new file mode 100644 index 000000000000..2f2681d3458a --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_C3XXX_HW_DATA_H_ | ||
48 | #define ADF_C3XXX_HW_DATA_H_ | ||
49 | |||
50 | /* PCIe configuration space */ | ||
51 | #define ADF_C3XXX_PMISC_BAR 0 | ||
52 | #define ADF_C3XXX_ETR_BAR 1 | ||
53 | #define ADF_C3XXX_RX_RINGS_OFFSET 8 | ||
54 | #define ADF_C3XXX_TX_RINGS_MASK 0xFF | ||
55 | #define ADF_C3XXX_MAX_ACCELERATORS 3 | ||
56 | #define ADF_C3XXX_MAX_ACCELENGINES 6 | ||
57 | #define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16 | ||
58 | #define ADF_C3XXX_ACCELERATORS_MASK 0x3 | ||
59 | #define ADF_C3XXX_ACCELENGINES_MASK 0x3F | ||
60 | #define ADF_C3XXX_ETR_MAX_BANKS 16 | ||
61 | #define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) | ||
62 | #define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) | ||
63 | #define ADF_C3XXX_SMIA0_MASK 0xFFFF | ||
64 | #define ADF_C3XXX_SMIA1_MASK 0x1 | ||
65 | /* Error detection and correction */ | ||
66 | #define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) | ||
67 | #define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) | ||
68 | #define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28) | ||
69 | #define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) | ||
70 | #define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18) | ||
71 | #define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10) | ||
72 | #define ADF_C3XXX_ERRSSMSH_EN BIT(3) | ||
73 | |||
74 | #define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) | ||
75 | #define ADF_C3XXX_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) | ||
76 | |||
77 | /* Firmware Binary */ | ||
78 | #define ADF_C3XXX_FW "qat_c3xxx.bin" | ||
79 | #define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin" | ||
80 | |||
81 | void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data); | ||
82 | void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data); | ||
83 | #endif | ||
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c new file mode 100644 index 000000000000..e13bd08ddd1e --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c | |||
@@ -0,0 +1,335 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/types.h> | ||
52 | #include <linux/fs.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/errno.h> | ||
55 | #include <linux/device.h> | ||
56 | #include <linux/dma-mapping.h> | ||
57 | #include <linux/platform_device.h> | ||
58 | #include <linux/workqueue.h> | ||
59 | #include <linux/io.h> | ||
60 | #include <adf_accel_devices.h> | ||
61 | #include <adf_common_drv.h> | ||
62 | #include <adf_cfg.h> | ||
63 | #include "adf_c3xxx_hw_data.h" | ||
64 | |||
65 | #define ADF_SYSTEM_DEVICE(device_id) \ | ||
66 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | ||
67 | |||
68 | static const struct pci_device_id adf_pci_tbl[] = { | ||
69 | ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID), | ||
70 | {0,} | ||
71 | }; | ||
72 | MODULE_DEVICE_TABLE(pci, adf_pci_tbl); | ||
73 | |||
74 | static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); | ||
75 | static void adf_remove(struct pci_dev *dev); | ||
76 | |||
77 | static struct pci_driver adf_driver = { | ||
78 | .id_table = adf_pci_tbl, | ||
79 | .name = ADF_C3XXX_DEVICE_NAME, | ||
80 | .probe = adf_probe, | ||
81 | .remove = adf_remove, | ||
82 | .sriov_configure = adf_sriov_configure, | ||
83 | }; | ||
84 | |||
85 | static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) | ||
86 | { | ||
87 | pci_release_regions(accel_dev->accel_pci_dev.pci_dev); | ||
88 | pci_disable_device(accel_dev->accel_pci_dev.pci_dev); | ||
89 | } | ||
90 | |||
91 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | ||
92 | { | ||
93 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | ||
94 | int i; | ||
95 | |||
96 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
97 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
98 | |||
99 | if (bar->virt_addr) | ||
100 | pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); | ||
101 | } | ||
102 | |||
103 | if (accel_dev->hw_device) { | ||
104 | switch (accel_pci_dev->pci_dev->device) { | ||
105 | case ADF_C3XXX_PCI_DEVICE_ID: | ||
106 | adf_clean_hw_data_c3xxx(accel_dev->hw_device); | ||
107 | break; | ||
108 | default: | ||
109 | break; | ||
110 | } | ||
111 | kfree(accel_dev->hw_device); | ||
112 | accel_dev->hw_device = NULL; | ||
113 | } | ||
114 | adf_cfg_dev_remove(accel_dev); | ||
115 | debugfs_remove(accel_dev->debugfs_dir); | ||
116 | adf_devmgr_rm_dev(accel_dev, NULL); | ||
117 | } | ||
118 | |||
119 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
120 | { | ||
121 | struct adf_accel_dev *accel_dev; | ||
122 | struct adf_accel_pci *accel_pci_dev; | ||
123 | struct adf_hw_device_data *hw_data; | ||
124 | char name[ADF_DEVICE_NAME_LENGTH]; | ||
125 | unsigned int i, bar_nr; | ||
126 | int ret, bar_mask; | ||
127 | |||
128 | switch (ent->device) { | ||
129 | case ADF_C3XXX_PCI_DEVICE_ID: | ||
130 | break; | ||
131 | default: | ||
132 | dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); | ||
133 | return -ENODEV; | ||
134 | } | ||
135 | |||
136 | if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { | ||
137 | /* If the accelerator is connected to a node with no memory | ||
138 | * there is no point in using the accelerator since the remote | ||
139 | * memory transaction will be very slow. */ | ||
140 | dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, | ||
145 | dev_to_node(&pdev->dev)); | ||
146 | if (!accel_dev) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
150 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
151 | accel_pci_dev->pci_dev = pdev; | ||
152 | |||
153 | /* Add accel device to accel table. | ||
154 | * This should be called before adf_cleanup_accel is called */ | ||
155 | if (adf_devmgr_add_dev(accel_dev, NULL)) { | ||
156 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); | ||
157 | kfree(accel_dev); | ||
158 | return -EFAULT; | ||
159 | } | ||
160 | |||
161 | accel_dev->owner = THIS_MODULE; | ||
162 | /* Allocate and configure device configuration structure */ | ||
163 | hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, | ||
164 | dev_to_node(&pdev->dev)); | ||
165 | if (!hw_data) { | ||
166 | ret = -ENOMEM; | ||
167 | goto out_err; | ||
168 | } | ||
169 | |||
170 | accel_dev->hw_device = hw_data; | ||
171 | adf_init_hw_data_c3xxx(accel_dev->hw_device); | ||
172 | pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); | ||
173 | pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, | ||
174 | &hw_data->fuses); | ||
175 | |||
176 | /* Get Accelerators and Accelerators Engines masks */ | ||
177 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | ||
178 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); | ||
179 | accel_pci_dev->sku = hw_data->get_sku(hw_data); | ||
180 | /* If the device has no acceleration engines then ignore it. */ | ||
181 | if (!hw_data->accel_mask || !hw_data->ae_mask || | ||
182 | ((~hw_data->ae_mask) & 0x01)) { | ||
183 | dev_err(&pdev->dev, "No acceleration units found"); | ||
184 | ret = -EFAULT; | ||
185 | goto out_err; | ||
186 | } | ||
187 | |||
188 | /* Create dev top level debugfs entry */ | ||
189 | snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", | ||
190 | ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, | ||
191 | pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
192 | PCI_FUNC(pdev->devfn)); | ||
193 | |||
194 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); | ||
195 | if (!accel_dev->debugfs_dir) { | ||
196 | dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); | ||
197 | ret = -EINVAL; | ||
198 | goto out_err; | ||
199 | } | ||
200 | |||
201 | /* Create device configuration table */ | ||
202 | ret = adf_cfg_dev_add(accel_dev); | ||
203 | if (ret) | ||
204 | goto out_err; | ||
205 | |||
206 | /* enable PCI device */ | ||
207 | if (pci_enable_device(pdev)) { | ||
208 | ret = -EFAULT; | ||
209 | goto out_err; | ||
210 | } | ||
211 | |||
212 | /* set dma identifier */ | ||
213 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
214 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
215 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | ||
216 | ret = -EFAULT; | ||
217 | goto out_err_disable; | ||
218 | } else { | ||
219 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
220 | } | ||
221 | |||
222 | } else { | ||
223 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
224 | } | ||
225 | |||
226 | if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) { | ||
227 | ret = -EFAULT; | ||
228 | goto out_err_disable; | ||
229 | } | ||
230 | |||
231 | /* Read accelerator capabilities mask */ | ||
232 | pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, | ||
233 | &hw_data->accel_capabilities_mask); | ||
234 | |||
235 | /* Find and map all the device's BARS */ | ||
236 | i = 0; | ||
237 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | ||
238 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | ||
239 | ADF_PCI_MAX_BARS * 2) { | ||
240 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | ||
241 | |||
242 | bar->base_addr = pci_resource_start(pdev, bar_nr); | ||
243 | if (!bar->base_addr) | ||
244 | break; | ||
245 | bar->size = pci_resource_len(pdev, bar_nr); | ||
246 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); | ||
247 | if (!bar->virt_addr) { | ||
248 | dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); | ||
249 | ret = -EFAULT; | ||
250 | goto out_err_free_reg; | ||
251 | } | ||
252 | } | ||
253 | pci_set_master(pdev); | ||
254 | |||
255 | if (adf_enable_aer(accel_dev, &adf_driver)) { | ||
256 | dev_err(&pdev->dev, "Failed to enable aer\n"); | ||
257 | ret = -EFAULT; | ||
258 | goto out_err_free_reg; | ||
259 | } | ||
260 | |||
261 | if (pci_save_state(pdev)) { | ||
262 | dev_err(&pdev->dev, "Failed to save pci state\n"); | ||
263 | ret = -ENOMEM; | ||
264 | goto out_err_free_reg; | ||
265 | } | ||
266 | |||
267 | ret = qat_crypto_dev_config(accel_dev); | ||
268 | if (ret) | ||
269 | goto out_err_free_reg; | ||
270 | |||
271 | ret = adf_dev_init(accel_dev); | ||
272 | if (ret) | ||
273 | goto out_err_dev_shutdown; | ||
274 | |||
275 | ret = adf_dev_start(accel_dev); | ||
276 | if (ret) | ||
277 | goto out_err_dev_stop; | ||
278 | |||
279 | return ret; | ||
280 | |||
281 | out_err_dev_stop: | ||
282 | adf_dev_stop(accel_dev); | ||
283 | out_err_dev_shutdown: | ||
284 | adf_dev_shutdown(accel_dev); | ||
285 | out_err_free_reg: | ||
286 | pci_release_regions(accel_pci_dev->pci_dev); | ||
287 | out_err_disable: | ||
288 | pci_disable_device(accel_pci_dev->pci_dev); | ||
289 | out_err: | ||
290 | adf_cleanup_accel(accel_dev); | ||
291 | kfree(accel_dev); | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | static void adf_remove(struct pci_dev *pdev) | ||
296 | { | ||
297 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
298 | |||
299 | if (!accel_dev) { | ||
300 | pr_err("QAT: Driver removal failed\n"); | ||
301 | return; | ||
302 | } | ||
303 | if (adf_dev_stop(accel_dev)) | ||
304 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); | ||
305 | |||
306 | adf_dev_shutdown(accel_dev); | ||
307 | adf_disable_aer(accel_dev); | ||
308 | adf_cleanup_accel(accel_dev); | ||
309 | adf_cleanup_pci_dev(accel_dev); | ||
310 | kfree(accel_dev); | ||
311 | } | ||
312 | |||
313 | static int __init adfdrv_init(void) | ||
314 | { | ||
315 | request_module("intel_qat"); | ||
316 | |||
317 | if (pci_register_driver(&adf_driver)) { | ||
318 | pr_err("QAT: Driver initialization failed\n"); | ||
319 | return -EFAULT; | ||
320 | } | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void __exit adfdrv_release(void) | ||
325 | { | ||
326 | pci_unregister_driver(&adf_driver); | ||
327 | } | ||
328 | |||
329 | module_init(adfdrv_init); | ||
330 | module_exit(adfdrv_release); | ||
331 | |||
332 | MODULE_LICENSE("Dual BSD/GPL"); | ||
333 | MODULE_AUTHOR("Intel"); | ||
334 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
335 | MODULE_VERSION(ADF_DRV_VERSION); | ||
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile new file mode 100644 index 000000000000..16d178e2eaa2 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | ccflags-y := -I$(src)/../qat_common | ||
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o | ||
3 | qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o | ||
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c new file mode 100644 index 000000000000..1af321c2ce1a --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c | |||
@@ -0,0 +1,173 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2015 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2015 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <adf_accel_devices.h> | ||
48 | #include <adf_pf2vf_msg.h> | ||
49 | #include <adf_common_drv.h> | ||
50 | #include "adf_c3xxxvf_hw_data.h" | ||
51 | |||
52 | static struct adf_hw_device_class c3xxxiov_class = { | ||
53 | .name = ADF_C3XXXVF_DEVICE_NAME, | ||
54 | .type = DEV_C3XXXVF, | ||
55 | .instances = 0 | ||
56 | }; | ||
57 | |||
58 | static u32 get_accel_mask(u32 fuse) | ||
59 | { | ||
60 | return ADF_C3XXXIOV_ACCELERATORS_MASK; | ||
61 | } | ||
62 | |||
63 | static u32 get_ae_mask(u32 fuse) | ||
64 | { | ||
65 | return ADF_C3XXXIOV_ACCELENGINES_MASK; | ||
66 | } | ||
67 | |||
68 | static u32 get_num_accels(struct adf_hw_device_data *self) | ||
69 | { | ||
70 | return ADF_C3XXXIOV_MAX_ACCELERATORS; | ||
71 | } | ||
72 | |||
73 | static u32 get_num_aes(struct adf_hw_device_data *self) | ||
74 | { | ||
75 | return ADF_C3XXXIOV_MAX_ACCELENGINES; | ||
76 | } | ||
77 | |||
78 | static u32 get_misc_bar_id(struct adf_hw_device_data *self) | ||
79 | { | ||
80 | return ADF_C3XXXIOV_PMISC_BAR; | ||
81 | } | ||
82 | |||
83 | static u32 get_etr_bar_id(struct adf_hw_device_data *self) | ||
84 | { | ||
85 | return ADF_C3XXXIOV_ETR_BAR; | ||
86 | } | ||
87 | |||
88 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | ||
89 | { | ||
90 | return DEV_SKU_VF; | ||
91 | } | ||
92 | |||
93 | static u32 get_pf2vf_offset(u32 i) | ||
94 | { | ||
95 | return ADF_C3XXXIOV_PF2VF_OFFSET; | ||
96 | } | ||
97 | |||
98 | static u32 get_vintmsk_offset(u32 i) | ||
99 | { | ||
100 | return ADF_C3XXXIOV_VINTMSK_OFFSET; | ||
101 | } | ||
102 | |||
103 | static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) | ||
113 | { | ||
114 | u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | | ||
115 | (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); | ||
116 | |||
117 | if (adf_iov_putmsg(accel_dev, msg, 0)) { | ||
118 | dev_err(&GET_DEV(accel_dev), | ||
119 | "Failed to send Init event to PF\n"); | ||
120 | return -EFAULT; | ||
121 | } | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) | ||
126 | { | ||
127 | u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | | ||
128 | (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); | ||
129 | |||
130 | if (adf_iov_putmsg(accel_dev, msg, 0)) | ||
131 | dev_err(&GET_DEV(accel_dev), | ||
132 | "Failed to send Shutdown event to PF\n"); | ||
133 | } | ||
134 | |||
135 | void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) | ||
136 | { | ||
137 | hw_data->dev_class = &c3xxxiov_class; | ||
138 | hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS; | ||
139 | hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS; | ||
140 | hw_data->num_logical_accel = 1; | ||
141 | hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES; | ||
142 | hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET; | ||
143 | hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK; | ||
144 | hw_data->alloc_irq = adf_vf_isr_resource_alloc; | ||
145 | hw_data->free_irq = adf_vf_isr_resource_free; | ||
146 | hw_data->enable_error_correction = adf_vf_void_noop; | ||
147 | hw_data->init_admin_comms = adf_vf_int_noop; | ||
148 | hw_data->exit_admin_comms = adf_vf_void_noop; | ||
149 | hw_data->send_admin_init = adf_vf2pf_init; | ||
150 | hw_data->init_arb = adf_vf_int_noop; | ||
151 | hw_data->exit_arb = adf_vf_void_noop; | ||
152 | hw_data->disable_iov = adf_vf2pf_shutdown; | ||
153 | hw_data->get_accel_mask = get_accel_mask; | ||
154 | hw_data->get_ae_mask = get_ae_mask; | ||
155 | hw_data->get_num_accels = get_num_accels; | ||
156 | hw_data->get_num_aes = get_num_aes; | ||
157 | hw_data->get_etr_bar_id = get_etr_bar_id; | ||
158 | hw_data->get_misc_bar_id = get_misc_bar_id; | ||
159 | hw_data->get_pf2vf_offset = get_pf2vf_offset; | ||
160 | hw_data->get_vintmsk_offset = get_vintmsk_offset; | ||
161 | hw_data->get_sku = get_sku; | ||
162 | hw_data->enable_ints = adf_vf_void_noop; | ||
163 | hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; | ||
164 | hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; | ||
165 | hw_data->dev_class->instances++; | ||
166 | adf_devmgr_update_class_index(hw_data); | ||
167 | } | ||
168 | |||
169 | void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) | ||
170 | { | ||
171 | hw_data->dev_class->instances--; | ||
172 | adf_devmgr_update_class_index(hw_data); | ||
173 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h index e270e4a63d14..934f216acf39 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h | |||
@@ -3,7 +3,7 @@ | |||
3 | redistributing this file, you may do so under either license. | 3 | redistributing this file, you may do so under either license. |
4 | 4 | ||
5 | GPL LICENSE SUMMARY | 5 | GPL LICENSE SUMMARY |
6 | Copyright(c) 2014 Intel Corporation. | 6 | Copyright(c) 2015 Intel Corporation. |
7 | This program is free software; you can redistribute it and/or modify | 7 | This program is free software; you can redistribute it and/or modify |
8 | it under the terms of version 2 of the GNU General Public License as | 8 | it under the terms of version 2 of the GNU General Public License as |
9 | published by the Free Software Foundation. | 9 | published by the Free Software Foundation. |
@@ -17,7 +17,7 @@ | |||
17 | qat-linux@intel.com | 17 | qat-linux@intel.com |
18 | 18 | ||
19 | BSD LICENSE | 19 | BSD LICENSE |
20 | Copyright(c) 2014 Intel Corporation. | 20 | Copyright(c) 2015 Intel Corporation. |
21 | Redistribution and use in source and binary forms, with or without | 21 | Redistribution and use in source and binary forms, with or without |
22 | modification, are permitted provided that the following conditions | 22 | modification, are permitted provided that the following conditions |
23 | are met: | 23 | are met: |
@@ -44,14 +44,21 @@ | |||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
46 | */ | 46 | */ |
47 | #ifndef ADF_DH895xVF_DRV_H_ | 47 | #ifndef ADF_C3XXXVF_HW_DATA_H_ |
48 | #define ADF_DH895xVF_DRV_H_ | 48 | #define ADF_C3XXXVF_HW_DATA_H_ |
49 | #include <adf_accel_devices.h> | 49 | |
50 | #include <adf_transport.h> | 50 | #define ADF_C3XXXIOV_PMISC_BAR 1 |
51 | 51 | #define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1 | |
52 | void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); | 52 | #define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1 |
53 | void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); | 53 | #define ADF_C3XXXIOV_MAX_ACCELERATORS 1 |
54 | int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); | 54 | #define ADF_C3XXXIOV_MAX_ACCELENGINES 1 |
55 | void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); | 55 | #define ADF_C3XXXIOV_RX_RINGS_OFFSET 8 |
56 | void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); | 56 | #define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF |
57 | #define ADF_C3XXXIOV_ETR_BAR 0 | ||
58 | #define ADF_C3XXXIOV_ETR_MAX_BANKS 1 | ||
59 | #define ADF_C3XXXIOV_PF2VF_OFFSET 0x200 | ||
60 | #define ADF_C3XXXIOV_VINTMSK_OFFSET 0x208 | ||
61 | |||
62 | void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data); | ||
63 | void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data); | ||
57 | #endif | 64 | #endif |
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c new file mode 100644 index 000000000000..1ac4ae90e072 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c | |||
@@ -0,0 +1,305 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/types.h> | ||
52 | #include <linux/fs.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/errno.h> | ||
55 | #include <linux/device.h> | ||
56 | #include <linux/dma-mapping.h> | ||
57 | #include <linux/platform_device.h> | ||
58 | #include <linux/workqueue.h> | ||
59 | #include <linux/io.h> | ||
60 | #include <adf_accel_devices.h> | ||
61 | #include <adf_common_drv.h> | ||
62 | #include <adf_cfg.h> | ||
63 | #include "adf_c3xxxvf_hw_data.h" | ||
64 | |||
65 | #define ADF_SYSTEM_DEVICE(device_id) \ | ||
66 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | ||
67 | |||
68 | static const struct pci_device_id adf_pci_tbl[] = { | ||
69 | ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID), | ||
70 | {0,} | ||
71 | }; | ||
72 | MODULE_DEVICE_TABLE(pci, adf_pci_tbl); | ||
73 | |||
74 | static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); | ||
75 | static void adf_remove(struct pci_dev *dev); | ||
76 | |||
77 | static struct pci_driver adf_driver = { | ||
78 | .id_table = adf_pci_tbl, | ||
79 | .name = ADF_C3XXXVF_DEVICE_NAME, | ||
80 | .probe = adf_probe, | ||
81 | .remove = adf_remove, | ||
82 | }; | ||
83 | |||
84 | static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) | ||
85 | { | ||
86 | pci_release_regions(accel_dev->accel_pci_dev.pci_dev); | ||
87 | pci_disable_device(accel_dev->accel_pci_dev.pci_dev); | ||
88 | } | ||
89 | |||
90 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | ||
91 | { | ||
92 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | ||
93 | struct adf_accel_dev *pf; | ||
94 | int i; | ||
95 | |||
96 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
97 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
98 | |||
99 | if (bar->virt_addr) | ||
100 | pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); | ||
101 | } | ||
102 | |||
103 | if (accel_dev->hw_device) { | ||
104 | switch (accel_pci_dev->pci_dev->device) { | ||
105 | case ADF_C3XXXIOV_PCI_DEVICE_ID: | ||
106 | adf_clean_hw_data_c3xxxiov(accel_dev->hw_device); | ||
107 | break; | ||
108 | default: | ||
109 | break; | ||
110 | } | ||
111 | kfree(accel_dev->hw_device); | ||
112 | accel_dev->hw_device = NULL; | ||
113 | } | ||
114 | adf_cfg_dev_remove(accel_dev); | ||
115 | debugfs_remove(accel_dev->debugfs_dir); | ||
116 | pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); | ||
117 | adf_devmgr_rm_dev(accel_dev, pf); | ||
118 | } | ||
119 | |||
120 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
121 | { | ||
122 | struct adf_accel_dev *accel_dev; | ||
123 | struct adf_accel_dev *pf; | ||
124 | struct adf_accel_pci *accel_pci_dev; | ||
125 | struct adf_hw_device_data *hw_data; | ||
126 | char name[ADF_DEVICE_NAME_LENGTH]; | ||
127 | unsigned int i, bar_nr; | ||
128 | int ret, bar_mask; | ||
129 | |||
130 | switch (ent->device) { | ||
131 | case ADF_C3XXXIOV_PCI_DEVICE_ID: | ||
132 | break; | ||
133 | default: | ||
134 | dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); | ||
135 | return -ENODEV; | ||
136 | } | ||
137 | |||
138 | accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, | ||
139 | dev_to_node(&pdev->dev)); | ||
140 | if (!accel_dev) | ||
141 | return -ENOMEM; | ||
142 | |||
143 | accel_dev->is_vf = true; | ||
144 | pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); | ||
145 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
146 | accel_pci_dev->pci_dev = pdev; | ||
147 | |||
148 | /* Add accel device to accel table */ | ||
149 | if (adf_devmgr_add_dev(accel_dev, pf)) { | ||
150 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); | ||
151 | kfree(accel_dev); | ||
152 | return -EFAULT; | ||
153 | } | ||
154 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
155 | |||
156 | accel_dev->owner = THIS_MODULE; | ||
157 | /* Allocate and configure device configuration structure */ | ||
158 | hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, | ||
159 | dev_to_node(&pdev->dev)); | ||
160 | if (!hw_data) { | ||
161 | ret = -ENOMEM; | ||
162 | goto out_err; | ||
163 | } | ||
164 | accel_dev->hw_device = hw_data; | ||
165 | adf_init_hw_data_c3xxxiov(accel_dev->hw_device); | ||
166 | |||
167 | /* Get Accelerators and Accelerators Engines masks */ | ||
168 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | ||
169 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); | ||
170 | accel_pci_dev->sku = hw_data->get_sku(hw_data); | ||
171 | |||
172 | /* Create dev top level debugfs entry */ | ||
173 | snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", | ||
174 | ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, | ||
175 | pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
176 | PCI_FUNC(pdev->devfn)); | ||
177 | |||
178 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); | ||
179 | if (!accel_dev->debugfs_dir) { | ||
180 | dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); | ||
181 | ret = -EINVAL; | ||
182 | goto out_err; | ||
183 | } | ||
184 | |||
185 | /* Create device configuration table */ | ||
186 | ret = adf_cfg_dev_add(accel_dev); | ||
187 | if (ret) | ||
188 | goto out_err; | ||
189 | |||
190 | /* enable PCI device */ | ||
191 | if (pci_enable_device(pdev)) { | ||
192 | ret = -EFAULT; | ||
193 | goto out_err; | ||
194 | } | ||
195 | |||
196 | /* set dma identifier */ | ||
197 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
198 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
199 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | ||
200 | ret = -EFAULT; | ||
201 | goto out_err_disable; | ||
202 | } else { | ||
203 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
204 | } | ||
205 | |||
206 | } else { | ||
207 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
208 | } | ||
209 | |||
210 | if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) { | ||
211 | ret = -EFAULT; | ||
212 | goto out_err_disable; | ||
213 | } | ||
214 | |||
215 | /* Find and map all the device's BARS */ | ||
216 | i = 0; | ||
217 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | ||
218 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | ||
219 | ADF_PCI_MAX_BARS * 2) { | ||
220 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | ||
221 | |||
222 | bar->base_addr = pci_resource_start(pdev, bar_nr); | ||
223 | if (!bar->base_addr) | ||
224 | break; | ||
225 | bar->size = pci_resource_len(pdev, bar_nr); | ||
226 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); | ||
227 | if (!bar->virt_addr) { | ||
228 | dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); | ||
229 | ret = -EFAULT; | ||
230 | goto out_err_free_reg; | ||
231 | } | ||
232 | } | ||
233 | pci_set_master(pdev); | ||
234 | /* Completion for VF2PF request/response message exchange */ | ||
235 | init_completion(&accel_dev->vf.iov_msg_completion); | ||
236 | |||
237 | ret = qat_crypto_dev_config(accel_dev); | ||
238 | if (ret) | ||
239 | goto out_err_free_reg; | ||
240 | |||
241 | ret = adf_dev_init(accel_dev); | ||
242 | if (ret) | ||
243 | goto out_err_dev_shutdown; | ||
244 | |||
245 | ret = adf_dev_start(accel_dev); | ||
246 | if (ret) | ||
247 | goto out_err_dev_stop; | ||
248 | |||
249 | return ret; | ||
250 | |||
251 | out_err_dev_stop: | ||
252 | adf_dev_stop(accel_dev); | ||
253 | out_err_dev_shutdown: | ||
254 | adf_dev_shutdown(accel_dev); | ||
255 | out_err_free_reg: | ||
256 | pci_release_regions(accel_pci_dev->pci_dev); | ||
257 | out_err_disable: | ||
258 | pci_disable_device(accel_pci_dev->pci_dev); | ||
259 | out_err: | ||
260 | adf_cleanup_accel(accel_dev); | ||
261 | kfree(accel_dev); | ||
262 | return ret; | ||
263 | } | ||
264 | |||
265 | static void adf_remove(struct pci_dev *pdev) | ||
266 | { | ||
267 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
268 | |||
269 | if (!accel_dev) { | ||
270 | pr_err("QAT: Driver removal failed\n"); | ||
271 | return; | ||
272 | } | ||
273 | if (adf_dev_stop(accel_dev)) | ||
274 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); | ||
275 | |||
276 | adf_dev_shutdown(accel_dev); | ||
277 | adf_cleanup_accel(accel_dev); | ||
278 | adf_cleanup_pci_dev(accel_dev); | ||
279 | kfree(accel_dev); | ||
280 | } | ||
281 | |||
282 | static int __init adfdrv_init(void) | ||
283 | { | ||
284 | request_module("intel_qat"); | ||
285 | |||
286 | if (pci_register_driver(&adf_driver)) { | ||
287 | pr_err("QAT: Driver initialization failed\n"); | ||
288 | return -EFAULT; | ||
289 | } | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static void __exit adfdrv_release(void) | ||
294 | { | ||
295 | pci_unregister_driver(&adf_driver); | ||
296 | adf_clean_vf_map(true); | ||
297 | } | ||
298 | |||
299 | module_init(adfdrv_init); | ||
300 | module_exit(adfdrv_release); | ||
301 | |||
302 | MODULE_LICENSE("Dual BSD/GPL"); | ||
303 | MODULE_AUTHOR("Intel"); | ||
304 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
305 | MODULE_VERSION(ADF_DRV_VERSION); | ||
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile new file mode 100644 index 000000000000..bd75ace59b76 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | ccflags-y := -I$(src)/../qat_common | ||
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o | ||
3 | qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o | ||
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c new file mode 100644 index 000000000000..879e04cae714 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <adf_accel_devices.h> | ||
48 | #include <adf_common_drv.h> | ||
49 | #include <adf_pf2vf_msg.h> | ||
50 | #include "adf_c62x_hw_data.h" | ||
51 | |||
52 | /* Worker thread to service arbiter mappings based on dev SKUs */ | ||
53 | static const u32 thrd_to_arb_map_8_me_sku[] = { | ||
54 | 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, | ||
55 | 0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0 | ||
56 | }; | ||
57 | |||
58 | static const u32 thrd_to_arb_map_10_me_sku[] = { | ||
59 | 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, | ||
60 | 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA | ||
61 | }; | ||
62 | |||
63 | static struct adf_hw_device_class c62x_class = { | ||
64 | .name = ADF_C62X_DEVICE_NAME, | ||
65 | .type = DEV_C62X, | ||
66 | .instances = 0 | ||
67 | }; | ||
68 | |||
69 | static u32 get_accel_mask(u32 fuse) | ||
70 | { | ||
71 | return (~fuse) >> ADF_C62X_ACCELERATORS_REG_OFFSET & | ||
72 | ADF_C62X_ACCELERATORS_MASK; | ||
73 | } | ||
74 | |||
75 | static u32 get_ae_mask(u32 fuse) | ||
76 | { | ||
77 | return (~fuse) & ADF_C62X_ACCELENGINES_MASK; | ||
78 | } | ||
79 | |||
80 | static u32 get_num_accels(struct adf_hw_device_data *self) | ||
81 | { | ||
82 | u32 i, ctr = 0; | ||
83 | |||
84 | if (!self || !self->accel_mask) | ||
85 | return 0; | ||
86 | |||
87 | for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) { | ||
88 | if (self->accel_mask & (1 << i)) | ||
89 | ctr++; | ||
90 | } | ||
91 | return ctr; | ||
92 | } | ||
93 | |||
94 | static u32 get_num_aes(struct adf_hw_device_data *self) | ||
95 | { | ||
96 | u32 i, ctr = 0; | ||
97 | |||
98 | if (!self || !self->ae_mask) | ||
99 | return 0; | ||
100 | |||
101 | for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) { | ||
102 | if (self->ae_mask & (1 << i)) | ||
103 | ctr++; | ||
104 | } | ||
105 | return ctr; | ||
106 | } | ||
107 | |||
108 | static u32 get_misc_bar_id(struct adf_hw_device_data *self) | ||
109 | { | ||
110 | return ADF_C62X_PMISC_BAR; | ||
111 | } | ||
112 | |||
113 | static u32 get_etr_bar_id(struct adf_hw_device_data *self) | ||
114 | { | ||
115 | return ADF_C62X_ETR_BAR; | ||
116 | } | ||
117 | |||
118 | static u32 get_sram_bar_id(struct adf_hw_device_data *self) | ||
119 | { | ||
120 | return ADF_C62X_SRAM_BAR; | ||
121 | } | ||
122 | |||
123 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | ||
124 | { | ||
125 | int aes = get_num_aes(self); | ||
126 | |||
127 | if (aes == 8) | ||
128 | return DEV_SKU_2; | ||
129 | else if (aes == 10) | ||
130 | return DEV_SKU_4; | ||
131 | |||
132 | return DEV_SKU_UNKNOWN; | ||
133 | } | ||
134 | |||
135 | static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | ||
136 | u32 const **arb_map_config) | ||
137 | { | ||
138 | switch (accel_dev->accel_pci_dev.sku) { | ||
139 | case DEV_SKU_2: | ||
140 | *arb_map_config = thrd_to_arb_map_8_me_sku; | ||
141 | break; | ||
142 | case DEV_SKU_4: | ||
143 | *arb_map_config = thrd_to_arb_map_10_me_sku; | ||
144 | break; | ||
145 | default: | ||
146 | dev_err(&GET_DEV(accel_dev), | ||
147 | "The configuration doesn't match any SKU"); | ||
148 | *arb_map_config = NULL; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | static u32 get_pf2vf_offset(u32 i) | ||
153 | { | ||
154 | return ADF_C62X_PF2VF_OFFSET(i); | ||
155 | } | ||
156 | |||
157 | static u32 get_vintmsk_offset(u32 i) | ||
158 | { | ||
159 | return ADF_C62X_VINTMSK_OFFSET(i); | ||
160 | } | ||
161 | |||
162 | static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) | ||
163 | { | ||
164 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
165 | struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR]; | ||
166 | void __iomem *csr = misc_bar->virt_addr; | ||
167 | unsigned int val, i; | ||
168 | |||
169 | /* Enable Accel Engine error detection & correction */ | ||
170 | for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { | ||
171 | val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i)); | ||
172 | val |= ADF_C62X_ENABLE_AE_ECC_ERR; | ||
173 | ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val); | ||
174 | val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i)); | ||
175 | val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR; | ||
176 | ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val); | ||
177 | } | ||
178 | |||
179 | /* Enable shared memory error detection & correction */ | ||
180 | for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { | ||
181 | val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i)); | ||
182 | val |= ADF_C62X_ERRSSMSH_EN; | ||
183 | ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val); | ||
184 | val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i)); | ||
185 | val |= ADF_C62X_ERRSSMSH_EN; | ||
186 | ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | static void adf_enable_ints(struct adf_accel_dev *accel_dev) | ||
191 | { | ||
192 | void __iomem *addr; | ||
193 | |||
194 | addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr; | ||
195 | |||
196 | /* Enable bundle and misc interrupts */ | ||
197 | ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET, | ||
198 | ADF_C62X_SMIA0_MASK); | ||
199 | ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET, | ||
200 | ADF_C62X_SMIA1_MASK); | ||
201 | } | ||
202 | |||
203 | static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) | ||
204 | { | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) | ||
209 | { | ||
210 | hw_data->dev_class = &c62x_class; | ||
211 | hw_data->instance_id = c62x_class.instances++; | ||
212 | hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS; | ||
213 | hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS; | ||
214 | hw_data->num_logical_accel = 1; | ||
215 | hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES; | ||
216 | hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET; | ||
217 | hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK; | ||
218 | hw_data->alloc_irq = adf_isr_resource_alloc; | ||
219 | hw_data->free_irq = adf_isr_resource_free; | ||
220 | hw_data->enable_error_correction = adf_enable_error_correction; | ||
221 | hw_data->get_accel_mask = get_accel_mask; | ||
222 | hw_data->get_ae_mask = get_ae_mask; | ||
223 | hw_data->get_num_accels = get_num_accels; | ||
224 | hw_data->get_num_aes = get_num_aes; | ||
225 | hw_data->get_sram_bar_id = get_sram_bar_id; | ||
226 | hw_data->get_etr_bar_id = get_etr_bar_id; | ||
227 | hw_data->get_misc_bar_id = get_misc_bar_id; | ||
228 | hw_data->get_pf2vf_offset = get_pf2vf_offset; | ||
229 | hw_data->get_vintmsk_offset = get_vintmsk_offset; | ||
230 | hw_data->get_sku = get_sku; | ||
231 | hw_data->fw_name = ADF_C62X_FW; | ||
232 | hw_data->fw_mmp_name = ADF_C62X_MMP; | ||
233 | hw_data->init_admin_comms = adf_init_admin_comms; | ||
234 | hw_data->exit_admin_comms = adf_exit_admin_comms; | ||
235 | hw_data->disable_iov = adf_disable_sriov; | ||
236 | hw_data->send_admin_init = adf_send_admin_init; | ||
237 | hw_data->init_arb = adf_init_arb; | ||
238 | hw_data->exit_arb = adf_exit_arb; | ||
239 | hw_data->get_arb_mapping = adf_get_arbiter_mapping; | ||
240 | hw_data->enable_ints = adf_enable_ints; | ||
241 | hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; | ||
242 | hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; | ||
243 | } | ||
244 | |||
245 | void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data) | ||
246 | { | ||
247 | hw_data->dev_class->instances--; | ||
248 | } | ||
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h new file mode 100644 index 000000000000..17a8a32d5c63 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #ifndef ADF_C62X_HW_DATA_H_ | ||
48 | #define ADF_C62X_HW_DATA_H_ | ||
49 | |||
50 | /* PCIe configuration space */ | ||
51 | #define ADF_C62X_SRAM_BAR 0 | ||
52 | #define ADF_C62X_PMISC_BAR 1 | ||
53 | #define ADF_C62X_ETR_BAR 2 | ||
54 | #define ADF_C62X_RX_RINGS_OFFSET 8 | ||
55 | #define ADF_C62X_TX_RINGS_MASK 0xFF | ||
56 | #define ADF_C62X_MAX_ACCELERATORS 5 | ||
57 | #define ADF_C62X_MAX_ACCELENGINES 10 | ||
58 | #define ADF_C62X_ACCELERATORS_REG_OFFSET 16 | ||
59 | #define ADF_C62X_ACCELERATORS_MASK 0x1F | ||
60 | #define ADF_C62X_ACCELENGINES_MASK 0x3FF | ||
61 | #define ADF_C62X_ETR_MAX_BANKS 16 | ||
62 | #define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) | ||
63 | #define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) | ||
64 | #define ADF_C62X_SMIA0_MASK 0xFFFF | ||
65 | #define ADF_C62X_SMIA1_MASK 0x1 | ||
66 | /* Error detection and correction */ | ||
67 | #define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) | ||
68 | #define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) | ||
69 | #define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28) | ||
70 | #define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) | ||
71 | #define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18) | ||
72 | #define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10) | ||
73 | #define ADF_C62X_ERRSSMSH_EN BIT(3) | ||
74 | |||
75 | #define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) | ||
76 | #define ADF_C62X_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) | ||
77 | |||
78 | /* Firmware Binary */ | ||
79 | #define ADF_C62X_FW "qat_c62x.bin" | ||
80 | #define ADF_C62X_MMP "qat_c62x_mmp.bin" | ||
81 | |||
82 | void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data); | ||
83 | void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data); | ||
84 | #endif | ||
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c new file mode 100644 index 000000000000..512c56509718 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c | |||
@@ -0,0 +1,335 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/types.h> | ||
52 | #include <linux/fs.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/errno.h> | ||
55 | #include <linux/device.h> | ||
56 | #include <linux/dma-mapping.h> | ||
57 | #include <linux/platform_device.h> | ||
58 | #include <linux/workqueue.h> | ||
59 | #include <linux/io.h> | ||
60 | #include <adf_accel_devices.h> | ||
61 | #include <adf_common_drv.h> | ||
62 | #include <adf_cfg.h> | ||
63 | #include "adf_c62x_hw_data.h" | ||
64 | |||
65 | #define ADF_SYSTEM_DEVICE(device_id) \ | ||
66 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | ||
67 | |||
68 | static const struct pci_device_id adf_pci_tbl[] = { | ||
69 | ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID), | ||
70 | {0,} | ||
71 | }; | ||
72 | MODULE_DEVICE_TABLE(pci, adf_pci_tbl); | ||
73 | |||
74 | static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); | ||
75 | static void adf_remove(struct pci_dev *dev); | ||
76 | |||
77 | static struct pci_driver adf_driver = { | ||
78 | .id_table = adf_pci_tbl, | ||
79 | .name = ADF_C62X_DEVICE_NAME, | ||
80 | .probe = adf_probe, | ||
81 | .remove = adf_remove, | ||
82 | .sriov_configure = adf_sriov_configure, | ||
83 | }; | ||
84 | |||
85 | static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) | ||
86 | { | ||
87 | pci_release_regions(accel_dev->accel_pci_dev.pci_dev); | ||
88 | pci_disable_device(accel_dev->accel_pci_dev.pci_dev); | ||
89 | } | ||
90 | |||
91 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | ||
92 | { | ||
93 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | ||
94 | int i; | ||
95 | |||
96 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
97 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
98 | |||
99 | if (bar->virt_addr) | ||
100 | pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); | ||
101 | } | ||
102 | |||
103 | if (accel_dev->hw_device) { | ||
104 | switch (accel_pci_dev->pci_dev->device) { | ||
105 | case ADF_C62X_PCI_DEVICE_ID: | ||
106 | adf_clean_hw_data_c62x(accel_dev->hw_device); | ||
107 | break; | ||
108 | default: | ||
109 | break; | ||
110 | } | ||
111 | kfree(accel_dev->hw_device); | ||
112 | accel_dev->hw_device = NULL; | ||
113 | } | ||
114 | adf_cfg_dev_remove(accel_dev); | ||
115 | debugfs_remove(accel_dev->debugfs_dir); | ||
116 | adf_devmgr_rm_dev(accel_dev, NULL); | ||
117 | } | ||
118 | |||
119 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
120 | { | ||
121 | struct adf_accel_dev *accel_dev; | ||
122 | struct adf_accel_pci *accel_pci_dev; | ||
123 | struct adf_hw_device_data *hw_data; | ||
124 | char name[ADF_DEVICE_NAME_LENGTH]; | ||
125 | unsigned int i, bar_nr; | ||
126 | int ret, bar_mask; | ||
127 | |||
128 | switch (ent->device) { | ||
129 | case ADF_C62X_PCI_DEVICE_ID: | ||
130 | break; | ||
131 | default: | ||
132 | dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); | ||
133 | return -ENODEV; | ||
134 | } | ||
135 | |||
136 | if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { | ||
137 | /* If the accelerator is connected to a node with no memory | ||
138 | * there is no point in using the accelerator since the remote | ||
139 | * memory transaction will be very slow. */ | ||
140 | dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, | ||
145 | dev_to_node(&pdev->dev)); | ||
146 | if (!accel_dev) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
150 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
151 | accel_pci_dev->pci_dev = pdev; | ||
152 | |||
153 | /* Add accel device to accel table. | ||
154 | * This should be called before adf_cleanup_accel is called */ | ||
155 | if (adf_devmgr_add_dev(accel_dev, NULL)) { | ||
156 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); | ||
157 | kfree(accel_dev); | ||
158 | return -EFAULT; | ||
159 | } | ||
160 | |||
161 | accel_dev->owner = THIS_MODULE; | ||
162 | /* Allocate and configure device configuration structure */ | ||
163 | hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, | ||
164 | dev_to_node(&pdev->dev)); | ||
165 | if (!hw_data) { | ||
166 | ret = -ENOMEM; | ||
167 | goto out_err; | ||
168 | } | ||
169 | |||
170 | accel_dev->hw_device = hw_data; | ||
171 | adf_init_hw_data_c62x(accel_dev->hw_device); | ||
172 | pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); | ||
173 | pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, | ||
174 | &hw_data->fuses); | ||
175 | |||
176 | /* Get Accelerators and Accelerators Engines masks */ | ||
177 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | ||
178 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); | ||
179 | accel_pci_dev->sku = hw_data->get_sku(hw_data); | ||
180 | /* If the device has no acceleration engines then ignore it. */ | ||
181 | if (!hw_data->accel_mask || !hw_data->ae_mask || | ||
182 | ((~hw_data->ae_mask) & 0x01)) { | ||
183 | dev_err(&pdev->dev, "No acceleration units found"); | ||
184 | ret = -EFAULT; | ||
185 | goto out_err; | ||
186 | } | ||
187 | |||
188 | /* Create dev top level debugfs entry */ | ||
189 | snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", | ||
190 | ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, | ||
191 | pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
192 | PCI_FUNC(pdev->devfn)); | ||
193 | |||
194 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); | ||
195 | if (!accel_dev->debugfs_dir) { | ||
196 | dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); | ||
197 | ret = -EINVAL; | ||
198 | goto out_err; | ||
199 | } | ||
200 | |||
201 | /* Create device configuration table */ | ||
202 | ret = adf_cfg_dev_add(accel_dev); | ||
203 | if (ret) | ||
204 | goto out_err; | ||
205 | |||
206 | /* enable PCI device */ | ||
207 | if (pci_enable_device(pdev)) { | ||
208 | ret = -EFAULT; | ||
209 | goto out_err; | ||
210 | } | ||
211 | |||
212 | /* set dma identifier */ | ||
213 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
214 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
215 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | ||
216 | ret = -EFAULT; | ||
217 | goto out_err_disable; | ||
218 | } else { | ||
219 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
220 | } | ||
221 | |||
222 | } else { | ||
223 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
224 | } | ||
225 | |||
226 | if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) { | ||
227 | ret = -EFAULT; | ||
228 | goto out_err_disable; | ||
229 | } | ||
230 | |||
231 | /* Read accelerator capabilities mask */ | ||
232 | pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, | ||
233 | &hw_data->accel_capabilities_mask); | ||
234 | |||
235 | /* Find and map all the device's BARS */ | ||
236 | i = 0; | ||
237 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | ||
238 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | ||
239 | ADF_PCI_MAX_BARS * 2) { | ||
240 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | ||
241 | |||
242 | bar->base_addr = pci_resource_start(pdev, bar_nr); | ||
243 | if (!bar->base_addr) | ||
244 | break; | ||
245 | bar->size = pci_resource_len(pdev, bar_nr); | ||
246 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); | ||
247 | if (!bar->virt_addr) { | ||
248 | dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); | ||
249 | ret = -EFAULT; | ||
250 | goto out_err_free_reg; | ||
251 | } | ||
252 | } | ||
253 | pci_set_master(pdev); | ||
254 | |||
255 | if (adf_enable_aer(accel_dev, &adf_driver)) { | ||
256 | dev_err(&pdev->dev, "Failed to enable aer\n"); | ||
257 | ret = -EFAULT; | ||
258 | goto out_err_free_reg; | ||
259 | } | ||
260 | |||
261 | if (pci_save_state(pdev)) { | ||
262 | dev_err(&pdev->dev, "Failed to save pci state\n"); | ||
263 | ret = -ENOMEM; | ||
264 | goto out_err_free_reg; | ||
265 | } | ||
266 | |||
267 | ret = qat_crypto_dev_config(accel_dev); | ||
268 | if (ret) | ||
269 | goto out_err_free_reg; | ||
270 | |||
271 | ret = adf_dev_init(accel_dev); | ||
272 | if (ret) | ||
273 | goto out_err_dev_shutdown; | ||
274 | |||
275 | ret = adf_dev_start(accel_dev); | ||
276 | if (ret) | ||
277 | goto out_err_dev_stop; | ||
278 | |||
279 | return ret; | ||
280 | |||
281 | out_err_dev_stop: | ||
282 | adf_dev_stop(accel_dev); | ||
283 | out_err_dev_shutdown: | ||
284 | adf_dev_shutdown(accel_dev); | ||
285 | out_err_free_reg: | ||
286 | pci_release_regions(accel_pci_dev->pci_dev); | ||
287 | out_err_disable: | ||
288 | pci_disable_device(accel_pci_dev->pci_dev); | ||
289 | out_err: | ||
290 | adf_cleanup_accel(accel_dev); | ||
291 | kfree(accel_dev); | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | static void adf_remove(struct pci_dev *pdev) | ||
296 | { | ||
297 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
298 | |||
299 | if (!accel_dev) { | ||
300 | pr_err("QAT: Driver removal failed\n"); | ||
301 | return; | ||
302 | } | ||
303 | if (adf_dev_stop(accel_dev)) | ||
304 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); | ||
305 | |||
306 | adf_dev_shutdown(accel_dev); | ||
307 | adf_disable_aer(accel_dev); | ||
308 | adf_cleanup_accel(accel_dev); | ||
309 | adf_cleanup_pci_dev(accel_dev); | ||
310 | kfree(accel_dev); | ||
311 | } | ||
312 | |||
313 | static int __init adfdrv_init(void) | ||
314 | { | ||
315 | request_module("intel_qat"); | ||
316 | |||
317 | if (pci_register_driver(&adf_driver)) { | ||
318 | pr_err("QAT: Driver initialization failed\n"); | ||
319 | return -EFAULT; | ||
320 | } | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void __exit adfdrv_release(void) | ||
325 | { | ||
326 | pci_unregister_driver(&adf_driver); | ||
327 | } | ||
328 | |||
329 | module_init(adfdrv_init); | ||
330 | module_exit(adfdrv_release); | ||
331 | |||
332 | MODULE_LICENSE("Dual BSD/GPL"); | ||
333 | MODULE_AUTHOR("Intel"); | ||
334 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
335 | MODULE_VERSION(ADF_DRV_VERSION); | ||
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile new file mode 100644 index 000000000000..ecd708c213b2 --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | ccflags-y := -I$(src)/../qat_common | ||
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o | ||
3 | qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o | ||
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c new file mode 100644 index 000000000000..baf4b509c892 --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c | |||
@@ -0,0 +1,173 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2015 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2015 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <adf_accel_devices.h> | ||
48 | #include <adf_pf2vf_msg.h> | ||
49 | #include <adf_common_drv.h> | ||
50 | #include "adf_c62xvf_hw_data.h" | ||
51 | |||
52 | static struct adf_hw_device_class c62xiov_class = { | ||
53 | .name = ADF_C62XVF_DEVICE_NAME, | ||
54 | .type = DEV_C62XVF, | ||
55 | .instances = 0 | ||
56 | }; | ||
57 | |||
58 | static u32 get_accel_mask(u32 fuse) | ||
59 | { | ||
60 | return ADF_C62XIOV_ACCELERATORS_MASK; | ||
61 | } | ||
62 | |||
63 | static u32 get_ae_mask(u32 fuse) | ||
64 | { | ||
65 | return ADF_C62XIOV_ACCELENGINES_MASK; | ||
66 | } | ||
67 | |||
68 | static u32 get_num_accels(struct adf_hw_device_data *self) | ||
69 | { | ||
70 | return ADF_C62XIOV_MAX_ACCELERATORS; | ||
71 | } | ||
72 | |||
73 | static u32 get_num_aes(struct adf_hw_device_data *self) | ||
74 | { | ||
75 | return ADF_C62XIOV_MAX_ACCELENGINES; | ||
76 | } | ||
77 | |||
78 | static u32 get_misc_bar_id(struct adf_hw_device_data *self) | ||
79 | { | ||
80 | return ADF_C62XIOV_PMISC_BAR; | ||
81 | } | ||
82 | |||
83 | static u32 get_etr_bar_id(struct adf_hw_device_data *self) | ||
84 | { | ||
85 | return ADF_C62XIOV_ETR_BAR; | ||
86 | } | ||
87 | |||
88 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | ||
89 | { | ||
90 | return DEV_SKU_VF; | ||
91 | } | ||
92 | |||
93 | static u32 get_pf2vf_offset(u32 i) | ||
94 | { | ||
95 | return ADF_C62XIOV_PF2VF_OFFSET; | ||
96 | } | ||
97 | |||
98 | static u32 get_vintmsk_offset(u32 i) | ||
99 | { | ||
100 | return ADF_C62XIOV_VINTMSK_OFFSET; | ||
101 | } | ||
102 | |||
103 | static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) | ||
113 | { | ||
114 | u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | | ||
115 | (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); | ||
116 | |||
117 | if (adf_iov_putmsg(accel_dev, msg, 0)) { | ||
118 | dev_err(&GET_DEV(accel_dev), | ||
119 | "Failed to send Init event to PF\n"); | ||
120 | return -EFAULT; | ||
121 | } | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) | ||
126 | { | ||
127 | u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | | ||
128 | (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); | ||
129 | |||
130 | if (adf_iov_putmsg(accel_dev, msg, 0)) | ||
131 | dev_err(&GET_DEV(accel_dev), | ||
132 | "Failed to send Shutdown event to PF\n"); | ||
133 | } | ||
134 | |||
135 | void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data) | ||
136 | { | ||
137 | hw_data->dev_class = &c62xiov_class; | ||
138 | hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS; | ||
139 | hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS; | ||
140 | hw_data->num_logical_accel = 1; | ||
141 | hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES; | ||
142 | hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET; | ||
143 | hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK; | ||
144 | hw_data->alloc_irq = adf_vf_isr_resource_alloc; | ||
145 | hw_data->free_irq = adf_vf_isr_resource_free; | ||
146 | hw_data->enable_error_correction = adf_vf_void_noop; | ||
147 | hw_data->init_admin_comms = adf_vf_int_noop; | ||
148 | hw_data->exit_admin_comms = adf_vf_void_noop; | ||
149 | hw_data->send_admin_init = adf_vf2pf_init; | ||
150 | hw_data->init_arb = adf_vf_int_noop; | ||
151 | hw_data->exit_arb = adf_vf_void_noop; | ||
152 | hw_data->disable_iov = adf_vf2pf_shutdown; | ||
153 | hw_data->get_accel_mask = get_accel_mask; | ||
154 | hw_data->get_ae_mask = get_ae_mask; | ||
155 | hw_data->get_num_accels = get_num_accels; | ||
156 | hw_data->get_num_aes = get_num_aes; | ||
157 | hw_data->get_etr_bar_id = get_etr_bar_id; | ||
158 | hw_data->get_misc_bar_id = get_misc_bar_id; | ||
159 | hw_data->get_pf2vf_offset = get_pf2vf_offset; | ||
160 | hw_data->get_vintmsk_offset = get_vintmsk_offset; | ||
161 | hw_data->get_sku = get_sku; | ||
162 | hw_data->enable_ints = adf_vf_void_noop; | ||
163 | hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; | ||
164 | hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; | ||
165 | hw_data->dev_class->instances++; | ||
166 | adf_devmgr_update_class_index(hw_data); | ||
167 | } | ||
168 | |||
169 | void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data) | ||
170 | { | ||
171 | hw_data->dev_class->instances--; | ||
172 | adf_devmgr_update_class_index(hw_data); | ||
173 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h index 85ff245bd1d8..a28d83e77422 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h | |||
@@ -3,7 +3,7 @@ | |||
3 | redistributing this file, you may do so under either license. | 3 | redistributing this file, you may do so under either license. |
4 | 4 | ||
5 | GPL LICENSE SUMMARY | 5 | GPL LICENSE SUMMARY |
6 | Copyright(c) 2014 Intel Corporation. | 6 | Copyright(c) 2015 Intel Corporation. |
7 | This program is free software; you can redistribute it and/or modify | 7 | This program is free software; you can redistribute it and/or modify |
8 | it under the terms of version 2 of the GNU General Public License as | 8 | it under the terms of version 2 of the GNU General Public License as |
9 | published by the Free Software Foundation. | 9 | published by the Free Software Foundation. |
@@ -17,7 +17,7 @@ | |||
17 | qat-linux@intel.com | 17 | qat-linux@intel.com |
18 | 18 | ||
19 | BSD LICENSE | 19 | BSD LICENSE |
20 | Copyright(c) 2014 Intel Corporation. | 20 | Copyright(c) 2015 Intel Corporation. |
21 | Redistribution and use in source and binary forms, with or without | 21 | Redistribution and use in source and binary forms, with or without |
22 | modification, are permitted provided that the following conditions | 22 | modification, are permitted provided that the following conditions |
23 | are met: | 23 | are met: |
@@ -44,15 +44,21 @@ | |||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
46 | */ | 46 | */ |
47 | #ifndef ADF_DH895x_DRV_H_ | 47 | #ifndef ADF_C62XVF_HW_DATA_H_ |
48 | #define ADF_DH895x_DRV_H_ | 48 | #define ADF_C62XVF_HW_DATA_H_ |
49 | #include <adf_accel_devices.h> | 49 | |
50 | #include <adf_transport.h> | 50 | #define ADF_C62XIOV_PMISC_BAR 1 |
51 | 51 | #define ADF_C62XIOV_ACCELERATORS_MASK 0x1 | |
52 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | 52 | #define ADF_C62XIOV_ACCELENGINES_MASK 0x1 |
53 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | 53 | #define ADF_C62XIOV_MAX_ACCELERATORS 1 |
54 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); | 54 | #define ADF_C62XIOV_MAX_ACCELENGINES 1 |
55 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev); | 55 | #define ADF_C62XIOV_RX_RINGS_OFFSET 8 |
56 | void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | 56 | #define ADF_C62XIOV_TX_RINGS_MASK 0xFF |
57 | uint32_t const **arb_map_config); | 57 | #define ADF_C62XIOV_ETR_BAR 0 |
58 | #define ADF_C62XIOV_ETR_MAX_BANKS 1 | ||
59 | #define ADF_C62XIOV_PF2VF_OFFSET 0x200 | ||
60 | #define ADF_C62XIOV_VINTMSK_OFFSET 0x208 | ||
61 | |||
62 | void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data); | ||
63 | void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data); | ||
58 | #endif | 64 | #endif |
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c new file mode 100644 index 000000000000..d2e4b928f3be --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c | |||
@@ -0,0 +1,305 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/kernel.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/types.h> | ||
52 | #include <linux/fs.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/errno.h> | ||
55 | #include <linux/device.h> | ||
56 | #include <linux/dma-mapping.h> | ||
57 | #include <linux/platform_device.h> | ||
58 | #include <linux/workqueue.h> | ||
59 | #include <linux/io.h> | ||
60 | #include <adf_accel_devices.h> | ||
61 | #include <adf_common_drv.h> | ||
62 | #include <adf_cfg.h> | ||
63 | #include "adf_c62xvf_hw_data.h" | ||
64 | |||
65 | #define ADF_SYSTEM_DEVICE(device_id) \ | ||
66 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | ||
67 | |||
68 | static const struct pci_device_id adf_pci_tbl[] = { | ||
69 | ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID), | ||
70 | {0,} | ||
71 | }; | ||
72 | MODULE_DEVICE_TABLE(pci, adf_pci_tbl); | ||
73 | |||
74 | static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); | ||
75 | static void adf_remove(struct pci_dev *dev); | ||
76 | |||
77 | static struct pci_driver adf_driver = { | ||
78 | .id_table = adf_pci_tbl, | ||
79 | .name = ADF_C62XVF_DEVICE_NAME, | ||
80 | .probe = adf_probe, | ||
81 | .remove = adf_remove, | ||
82 | }; | ||
83 | |||
84 | static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) | ||
85 | { | ||
86 | pci_release_regions(accel_dev->accel_pci_dev.pci_dev); | ||
87 | pci_disable_device(accel_dev->accel_pci_dev.pci_dev); | ||
88 | } | ||
89 | |||
90 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | ||
91 | { | ||
92 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | ||
93 | struct adf_accel_dev *pf; | ||
94 | int i; | ||
95 | |||
96 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
97 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
98 | |||
99 | if (bar->virt_addr) | ||
100 | pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); | ||
101 | } | ||
102 | |||
103 | if (accel_dev->hw_device) { | ||
104 | switch (accel_pci_dev->pci_dev->device) { | ||
105 | case ADF_C62XIOV_PCI_DEVICE_ID: | ||
106 | adf_clean_hw_data_c62xiov(accel_dev->hw_device); | ||
107 | break; | ||
108 | default: | ||
109 | break; | ||
110 | } | ||
111 | kfree(accel_dev->hw_device); | ||
112 | accel_dev->hw_device = NULL; | ||
113 | } | ||
114 | adf_cfg_dev_remove(accel_dev); | ||
115 | debugfs_remove(accel_dev->debugfs_dir); | ||
116 | pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); | ||
117 | adf_devmgr_rm_dev(accel_dev, pf); | ||
118 | } | ||
119 | |||
120 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
121 | { | ||
122 | struct adf_accel_dev *accel_dev; | ||
123 | struct adf_accel_dev *pf; | ||
124 | struct adf_accel_pci *accel_pci_dev; | ||
125 | struct adf_hw_device_data *hw_data; | ||
126 | char name[ADF_DEVICE_NAME_LENGTH]; | ||
127 | unsigned int i, bar_nr; | ||
128 | int ret, bar_mask; | ||
129 | |||
130 | switch (ent->device) { | ||
131 | case ADF_C62XIOV_PCI_DEVICE_ID: | ||
132 | break; | ||
133 | default: | ||
134 | dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); | ||
135 | return -ENODEV; | ||
136 | } | ||
137 | |||
138 | accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, | ||
139 | dev_to_node(&pdev->dev)); | ||
140 | if (!accel_dev) | ||
141 | return -ENOMEM; | ||
142 | |||
143 | accel_dev->is_vf = true; | ||
144 | pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); | ||
145 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
146 | accel_pci_dev->pci_dev = pdev; | ||
147 | |||
148 | /* Add accel device to accel table */ | ||
149 | if (adf_devmgr_add_dev(accel_dev, pf)) { | ||
150 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); | ||
151 | kfree(accel_dev); | ||
152 | return -EFAULT; | ||
153 | } | ||
154 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
155 | |||
156 | accel_dev->owner = THIS_MODULE; | ||
157 | /* Allocate and configure device configuration structure */ | ||
158 | hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, | ||
159 | dev_to_node(&pdev->dev)); | ||
160 | if (!hw_data) { | ||
161 | ret = -ENOMEM; | ||
162 | goto out_err; | ||
163 | } | ||
164 | accel_dev->hw_device = hw_data; | ||
165 | adf_init_hw_data_c62xiov(accel_dev->hw_device); | ||
166 | |||
167 | /* Get Accelerators and Accelerators Engines masks */ | ||
168 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | ||
169 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); | ||
170 | accel_pci_dev->sku = hw_data->get_sku(hw_data); | ||
171 | |||
172 | /* Create dev top level debugfs entry */ | ||
173 | snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", | ||
174 | ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, | ||
175 | pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
176 | PCI_FUNC(pdev->devfn)); | ||
177 | |||
178 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); | ||
179 | if (!accel_dev->debugfs_dir) { | ||
180 | dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); | ||
181 | ret = -EINVAL; | ||
182 | goto out_err; | ||
183 | } | ||
184 | |||
185 | /* Create device configuration table */ | ||
186 | ret = adf_cfg_dev_add(accel_dev); | ||
187 | if (ret) | ||
188 | goto out_err; | ||
189 | |||
190 | /* enable PCI device */ | ||
191 | if (pci_enable_device(pdev)) { | ||
192 | ret = -EFAULT; | ||
193 | goto out_err; | ||
194 | } | ||
195 | |||
196 | /* set dma identifier */ | ||
197 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
198 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
199 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | ||
200 | ret = -EFAULT; | ||
201 | goto out_err_disable; | ||
202 | } else { | ||
203 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
204 | } | ||
205 | |||
206 | } else { | ||
207 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
208 | } | ||
209 | |||
210 | if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) { | ||
211 | ret = -EFAULT; | ||
212 | goto out_err_disable; | ||
213 | } | ||
214 | |||
215 | /* Find and map all the device's BARS */ | ||
216 | i = 0; | ||
217 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | ||
218 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | ||
219 | ADF_PCI_MAX_BARS * 2) { | ||
220 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | ||
221 | |||
222 | bar->base_addr = pci_resource_start(pdev, bar_nr); | ||
223 | if (!bar->base_addr) | ||
224 | break; | ||
225 | bar->size = pci_resource_len(pdev, bar_nr); | ||
226 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); | ||
227 | if (!bar->virt_addr) { | ||
228 | dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); | ||
229 | ret = -EFAULT; | ||
230 | goto out_err_free_reg; | ||
231 | } | ||
232 | } | ||
233 | pci_set_master(pdev); | ||
234 | /* Completion for VF2PF request/response message exchange */ | ||
235 | init_completion(&accel_dev->vf.iov_msg_completion); | ||
236 | |||
237 | ret = qat_crypto_dev_config(accel_dev); | ||
238 | if (ret) | ||
239 | goto out_err_free_reg; | ||
240 | |||
241 | ret = adf_dev_init(accel_dev); | ||
242 | if (ret) | ||
243 | goto out_err_dev_shutdown; | ||
244 | |||
245 | ret = adf_dev_start(accel_dev); | ||
246 | if (ret) | ||
247 | goto out_err_dev_stop; | ||
248 | |||
249 | return ret; | ||
250 | |||
251 | out_err_dev_stop: | ||
252 | adf_dev_stop(accel_dev); | ||
253 | out_err_dev_shutdown: | ||
254 | adf_dev_shutdown(accel_dev); | ||
255 | out_err_free_reg: | ||
256 | pci_release_regions(accel_pci_dev->pci_dev); | ||
257 | out_err_disable: | ||
258 | pci_disable_device(accel_pci_dev->pci_dev); | ||
259 | out_err: | ||
260 | adf_cleanup_accel(accel_dev); | ||
261 | kfree(accel_dev); | ||
262 | return ret; | ||
263 | } | ||
264 | |||
265 | static void adf_remove(struct pci_dev *pdev) | ||
266 | { | ||
267 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
268 | |||
269 | if (!accel_dev) { | ||
270 | pr_err("QAT: Driver removal failed\n"); | ||
271 | return; | ||
272 | } | ||
273 | if (adf_dev_stop(accel_dev)) | ||
274 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); | ||
275 | |||
276 | adf_dev_shutdown(accel_dev); | ||
277 | adf_cleanup_accel(accel_dev); | ||
278 | adf_cleanup_pci_dev(accel_dev); | ||
279 | kfree(accel_dev); | ||
280 | } | ||
281 | |||
282 | static int __init adfdrv_init(void) | ||
283 | { | ||
284 | request_module("intel_qat"); | ||
285 | |||
286 | if (pci_register_driver(&adf_driver)) { | ||
287 | pr_err("QAT: Driver initialization failed\n"); | ||
288 | return -EFAULT; | ||
289 | } | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static void __exit adfdrv_release(void) | ||
294 | { | ||
295 | pci_unregister_driver(&adf_driver); | ||
296 | adf_clean_vf_map(true); | ||
297 | } | ||
298 | |||
299 | module_init(adfdrv_init); | ||
300 | module_exit(adfdrv_release); | ||
301 | |||
302 | MODULE_LICENSE("Dual BSD/GPL"); | ||
303 | MODULE_AUTHOR("Intel"); | ||
304 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
305 | MODULE_VERSION(ADF_DRV_VERSION); | ||
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 9e9e196c6d51..29c7c53d2845 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile | |||
@@ -4,10 +4,12 @@ $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \ | |||
4 | $(obj)/qat_rsaprivkey-asn1.h | 4 | $(obj)/qat_rsaprivkey-asn1.h |
5 | 5 | ||
6 | clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h | 6 | clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h |
7 | clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h | 7 | clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h |
8 | 8 | ||
9 | obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o | 9 | obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o |
10 | intel_qat-objs := adf_cfg.o \ | 10 | intel_qat-objs := adf_cfg.o \ |
11 | adf_isr.o \ | ||
12 | adf_vf_isr.o \ | ||
11 | adf_ctl_drv.o \ | 13 | adf_ctl_drv.o \ |
12 | adf_dev_mgr.o \ | 14 | adf_dev_mgr.o \ |
13 | adf_init.o \ | 15 | adf_init.o \ |
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index ca853d50b4b7..f96d427e502c 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h | |||
@@ -55,8 +55,20 @@ | |||
55 | 55 | ||
56 | #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" | 56 | #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" |
57 | #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" | 57 | #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" |
58 | #define ADF_C62X_DEVICE_NAME "c62x" | ||
59 | #define ADF_C62XVF_DEVICE_NAME "c62xvf" | ||
60 | #define ADF_C3XXX_DEVICE_NAME "c3xxx" | ||
61 | #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" | ||
58 | #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 | 62 | #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 |
59 | #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 | 63 | #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 |
64 | #define ADF_C62X_PCI_DEVICE_ID 0x37c8 | ||
65 | #define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9 | ||
66 | #define ADF_C3XXX_PCI_DEVICE_ID 0x19e2 | ||
67 | #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 | ||
68 | #define ADF_ERRSOU3 (0x3A000 + 0x0C) | ||
69 | #define ADF_ERRSOU5 (0x3A000 + 0xD8) | ||
70 | #define ADF_DEVICE_FUSECTL_OFFSET 0x40 | ||
71 | #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C | ||
60 | #define ADF_PCI_MAX_BARS 3 | 72 | #define ADF_PCI_MAX_BARS 3 |
61 | #define ADF_DEVICE_NAME_LENGTH 32 | 73 | #define ADF_DEVICE_NAME_LENGTH 32 |
62 | #define ADF_ETR_MAX_RINGS_PER_BANK 16 | 74 | #define ADF_ETR_MAX_RINGS_PER_BANK 16 |
@@ -168,11 +180,11 @@ struct adf_hw_device_data { | |||
168 | const char *fw_mmp_name; | 180 | const char *fw_mmp_name; |
169 | uint32_t fuses; | 181 | uint32_t fuses; |
170 | uint32_t accel_capabilities_mask; | 182 | uint32_t accel_capabilities_mask; |
183 | uint32_t instance_id; | ||
171 | uint16_t accel_mask; | 184 | uint16_t accel_mask; |
172 | uint16_t ae_mask; | 185 | uint16_t ae_mask; |
173 | uint16_t tx_rings_mask; | 186 | uint16_t tx_rings_mask; |
174 | uint8_t tx_rx_gap; | 187 | uint8_t tx_rx_gap; |
175 | uint8_t instance_id; | ||
176 | uint8_t num_banks; | 188 | uint8_t num_banks; |
177 | uint8_t num_accel; | 189 | uint8_t num_accel; |
178 | uint8_t num_logical_accel; | 190 | uint8_t num_logical_accel; |
@@ -239,6 +251,6 @@ struct adf_accel_dev { | |||
239 | } vf; | 251 | } vf; |
240 | }; | 252 | }; |
241 | bool is_vf; | 253 | bool is_vf; |
242 | uint8_t accel_id; | 254 | u32 accel_id; |
243 | } __packed; | 255 | } __packed; |
244 | #endif | 256 | #endif |
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index 20b08bdcb146..a42fc42704be 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c | |||
@@ -78,9 +78,12 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev) | |||
78 | uof_addr = (void *)loader_data->uof_fw->data; | 78 | uof_addr = (void *)loader_data->uof_fw->data; |
79 | mmp_size = loader_data->mmp_fw->size; | 79 | mmp_size = loader_data->mmp_fw->size; |
80 | mmp_addr = (void *)loader_data->mmp_fw->data; | 80 | mmp_addr = (void *)loader_data->mmp_fw->data; |
81 | qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size); | 81 | if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) { |
82 | if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { | 82 | dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n"); |
83 | dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); | 83 | goto out_err; |
84 | } | ||
85 | if (qat_uclo_map_obj(loader_data->fw_loader, uof_addr, uof_size)) { | ||
86 | dev_err(&GET_DEV(accel_dev), "Failed to map FW\n"); | ||
84 | goto out_err; | 87 | goto out_err; |
85 | } | 88 | } |
86 | if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { | 89 | if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { |
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 147d755fed97..eb557f69e367 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/pci.h> | 51 | #include <linux/pci.h> |
52 | #include <linux/dma-mapping.h> | 52 | #include <linux/dma-mapping.h> |
53 | #include "adf_accel_devices.h" | 53 | #include "adf_accel_devices.h" |
54 | #include "adf_common_drv.h" | ||
54 | #include "icp_qat_fw_init_admin.h" | 55 | #include "icp_qat_fw_init_admin.h" |
55 | 56 | ||
56 | /* Admin Messages Registers */ | 57 | /* Admin Messages Registers */ |
@@ -234,7 +235,8 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | |||
234 | struct adf_bar *pmisc = | 235 | struct adf_bar *pmisc = |
235 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | 236 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; |
236 | void __iomem *csr = pmisc->virt_addr; | 237 | void __iomem *csr = pmisc->virt_addr; |
237 | void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; | 238 | void __iomem *mailbox = (void __iomem *)((uintptr_t)csr + |
239 | ADF_DH895XCC_MAILBOX_BASE_OFFSET); | ||
238 | u64 reg_val; | 240 | u64 reg_val; |
239 | 241 | ||
240 | admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, | 242 | admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, |
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 0a5ca0ba5d64..e78a1d7d88fc 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c | |||
@@ -82,7 +82,7 @@ struct adf_reset_dev_data { | |||
82 | struct work_struct reset_work; | 82 | struct work_struct reset_work; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | static void adf_dev_restore(struct adf_accel_dev *accel_dev) | 85 | void adf_dev_restore(struct adf_accel_dev *accel_dev) |
86 | { | 86 | { |
87 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | 87 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); |
88 | struct pci_dev *parent = pdev->bus->self; | 88 | struct pci_dev *parent = pdev->bus->self; |
@@ -197,7 +197,7 @@ static void adf_resume(struct pci_dev *pdev) | |||
197 | dev_info(&pdev->dev, "Device is up and runnig\n"); | 197 | dev_info(&pdev->dev, "Device is up and runnig\n"); |
198 | } | 198 | } |
199 | 199 | ||
200 | static struct pci_error_handlers adf_err_handler = { | 200 | static const struct pci_error_handlers adf_err_handler = { |
201 | .error_detected = adf_error_detected, | 201 | .error_detected = adf_error_detected, |
202 | .slot_reset = adf_slot_reset, | 202 | .slot_reset = adf_slot_reset, |
203 | .resume = adf_resume, | 203 | .resume = adf_resume, |
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index c697fb1cdfb5..8c4f6573ce59 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h | |||
@@ -72,12 +72,16 @@ enum adf_device_type { | |||
72 | DEV_UNKNOWN = 0, | 72 | DEV_UNKNOWN = 0, |
73 | DEV_DH895XCC, | 73 | DEV_DH895XCC, |
74 | DEV_DH895XCCVF, | 74 | DEV_DH895XCCVF, |
75 | DEV_C62X, | ||
76 | DEV_C62XVF, | ||
77 | DEV_C3XXX, | ||
78 | DEV_C3XXXVF | ||
75 | }; | 79 | }; |
76 | 80 | ||
77 | struct adf_dev_status_info { | 81 | struct adf_dev_status_info { |
78 | enum adf_device_type type; | 82 | enum adf_device_type type; |
79 | uint8_t accel_id; | 83 | u32 accel_id; |
80 | uint8_t instance_id; | 84 | u32 instance_id; |
81 | uint8_t num_ae; | 85 | uint8_t num_ae; |
82 | uint8_t num_accel; | 86 | uint8_t num_accel; |
83 | uint8_t num_logical_accel; | 87 | uint8_t num_logical_accel; |
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 3f76bd495bcb..0e82ce3c383e 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h | |||
@@ -54,7 +54,7 @@ | |||
54 | #include "icp_qat_hal.h" | 54 | #include "icp_qat_hal.h" |
55 | 55 | ||
56 | #define ADF_MAJOR_VERSION 0 | 56 | #define ADF_MAJOR_VERSION 0 |
57 | #define ADF_MINOR_VERSION 2 | 57 | #define ADF_MINOR_VERSION 6 |
58 | #define ADF_BUILD_VERSION 0 | 58 | #define ADF_BUILD_VERSION 0 |
59 | #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ | 59 | #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ |
60 | __stringify(ADF_MINOR_VERSION) "." \ | 60 | __stringify(ADF_MINOR_VERSION) "." \ |
@@ -106,8 +106,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev); | |||
106 | int adf_dev_stop(struct adf_accel_dev *accel_dev); | 106 | int adf_dev_stop(struct adf_accel_dev *accel_dev); |
107 | void adf_dev_shutdown(struct adf_accel_dev *accel_dev); | 107 | void adf_dev_shutdown(struct adf_accel_dev *accel_dev); |
108 | 108 | ||
109 | void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); | ||
110 | void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); | ||
111 | int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); | 109 | int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); |
112 | void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); | 110 | void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); |
113 | int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); | 111 | int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); |
@@ -143,6 +141,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); | |||
143 | 141 | ||
144 | int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); | 142 | int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); |
145 | void adf_disable_aer(struct adf_accel_dev *accel_dev); | 143 | void adf_disable_aer(struct adf_accel_dev *accel_dev); |
144 | void adf_dev_restore(struct adf_accel_dev *accel_dev); | ||
146 | int adf_init_aer(void); | 145 | int adf_init_aer(void); |
147 | void adf_exit_aer(void); | 146 | void adf_exit_aer(void); |
148 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev); | 147 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev); |
@@ -159,6 +158,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev); | |||
159 | void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); | 158 | void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); |
160 | int qat_crypto_register(void); | 159 | int qat_crypto_register(void); |
161 | int qat_crypto_unregister(void); | 160 | int qat_crypto_unregister(void); |
161 | int qat_crypto_dev_config(struct adf_accel_dev *accel_dev); | ||
162 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node); | 162 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node); |
163 | void qat_crypto_put_instance(struct qat_crypto_instance *inst); | 163 | void qat_crypto_put_instance(struct qat_crypto_instance *inst); |
164 | void qat_alg_callback(void *resp); | 164 | void qat_alg_callback(void *resp); |
@@ -168,6 +168,11 @@ void qat_algs_unregister(void); | |||
168 | int qat_asym_algs_register(void); | 168 | int qat_asym_algs_register(void); |
169 | void qat_asym_algs_unregister(void); | 169 | void qat_asym_algs_unregister(void); |
170 | 170 | ||
171 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); | ||
172 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev); | ||
173 | int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); | ||
174 | void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); | ||
175 | |||
171 | int qat_hal_init(struct adf_accel_dev *accel_dev); | 176 | int qat_hal_init(struct adf_accel_dev *accel_dev); |
172 | void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); | 177 | void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); |
173 | void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | 178 | void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, |
@@ -178,6 +183,8 @@ void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); | |||
178 | int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); | 183 | int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); |
179 | void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, | 184 | void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, |
180 | unsigned char ae, unsigned int ctx_mask); | 185 | unsigned char ae, unsigned int ctx_mask); |
186 | int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, | ||
187 | unsigned int ae); | ||
181 | int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, | 188 | int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, |
182 | unsigned char ae, enum icp_qat_uof_regtype lm_type, | 189 | unsigned char ae, enum icp_qat_uof_regtype lm_type, |
183 | unsigned char mode); | 190 | unsigned char mode); |
@@ -216,10 +223,10 @@ int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, | |||
216 | unsigned char ae, unsigned short lm_addr, unsigned int value); | 223 | unsigned char ae, unsigned short lm_addr, unsigned int value); |
217 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); | 224 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); |
218 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); | 225 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); |
219 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | 226 | int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, |
220 | void *addr_ptr, int mem_size); | 227 | int mem_size); |
221 | void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, | 228 | int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, |
222 | void *addr_ptr, int mem_size); | 229 | void *addr_ptr, int mem_size); |
223 | #if defined(CONFIG_PCI_IOV) | 230 | #if defined(CONFIG_PCI_IOV) |
224 | int adf_sriov_configure(struct pci_dev *pdev, int numvfs); | 231 | int adf_sriov_configure(struct pci_dev *pdev, int numvfs); |
225 | void adf_disable_sriov(struct adf_accel_dev *accel_dev); | 232 | void adf_disable_sriov(struct adf_accel_dev *accel_dev); |
@@ -227,6 +234,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, | |||
227 | uint32_t vf_mask); | 234 | uint32_t vf_mask); |
228 | void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, | 235 | void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, |
229 | uint32_t vf_mask); | 236 | uint32_t vf_mask); |
237 | void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); | ||
238 | void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); | ||
230 | #else | 239 | #else |
231 | static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) | 240 | static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) |
232 | { | 241 | { |
@@ -236,5 +245,13 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) | |||
236 | static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) | 245 | static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) |
237 | { | 246 | { |
238 | } | 247 | } |
248 | |||
249 | static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) | ||
250 | { | ||
251 | } | ||
252 | |||
253 | static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) | ||
254 | { | ||
255 | } | ||
239 | #endif | 256 | #endif |
240 | #endif | 257 | #endif |
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 473d36d91644..5c897e6e7994 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c | |||
@@ -255,12 +255,9 @@ out: | |||
255 | 255 | ||
256 | static int adf_ctl_is_device_in_use(int id) | 256 | static int adf_ctl_is_device_in_use(int id) |
257 | { | 257 | { |
258 | struct list_head *itr, *head = adf_devmgr_get_head(); | 258 | struct adf_accel_dev *dev; |
259 | |||
260 | list_for_each(itr, head) { | ||
261 | struct adf_accel_dev *dev = | ||
262 | list_entry(itr, struct adf_accel_dev, list); | ||
263 | 259 | ||
260 | list_for_each_entry(dev, adf_devmgr_get_head(), list) { | ||
264 | if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { | 261 | if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { |
265 | if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { | 262 | if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { |
266 | dev_info(&GET_DEV(dev), | 263 | dev_info(&GET_DEV(dev), |
@@ -275,12 +272,10 @@ static int adf_ctl_is_device_in_use(int id) | |||
275 | 272 | ||
276 | static int adf_ctl_stop_devices(uint32_t id) | 273 | static int adf_ctl_stop_devices(uint32_t id) |
277 | { | 274 | { |
278 | struct list_head *itr, *head = adf_devmgr_get_head(); | 275 | struct adf_accel_dev *accel_dev; |
279 | int ret = 0; | 276 | int ret = 0; |
280 | 277 | ||
281 | list_for_each(itr, head) { | 278 | list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) { |
282 | struct adf_accel_dev *accel_dev = | ||
283 | list_entry(itr, struct adf_accel_dev, list); | ||
284 | if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { | 279 | if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { |
285 | if (!adf_dev_started(accel_dev)) | 280 | if (!adf_dev_started(accel_dev)) |
286 | continue; | 281 | continue; |
@@ -342,12 +337,10 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, | |||
342 | if (ret) | 337 | if (ret) |
343 | return ret; | 338 | return ret; |
344 | 339 | ||
340 | ret = -ENODEV; | ||
345 | accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); | 341 | accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); |
346 | if (!accel_dev) { | 342 | if (!accel_dev) |
347 | pr_err("QAT: Device %d not found\n", ctl_data->device_id); | ||
348 | ret = -ENODEV; | ||
349 | goto out; | 343 | goto out; |
350 | } | ||
351 | 344 | ||
352 | if (!adf_dev_started(accel_dev)) { | 345 | if (!adf_dev_started(accel_dev)) { |
353 | dev_info(&GET_DEV(accel_dev), | 346 | dev_info(&GET_DEV(accel_dev), |
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 8dfdb8f90797..b3ebb25f9ca7 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c | |||
@@ -53,6 +53,7 @@ static LIST_HEAD(accel_table); | |||
53 | static LIST_HEAD(vfs_table); | 53 | static LIST_HEAD(vfs_table); |
54 | static DEFINE_MUTEX(table_lock); | 54 | static DEFINE_MUTEX(table_lock); |
55 | static uint32_t num_devices; | 55 | static uint32_t num_devices; |
56 | static u8 id_map[ADF_MAX_DEVICES]; | ||
56 | 57 | ||
57 | struct vf_id_map { | 58 | struct vf_id_map { |
58 | u32 bdf; | 59 | u32 bdf; |
@@ -116,8 +117,10 @@ void adf_clean_vf_map(bool vf) | |||
116 | mutex_lock(&table_lock); | 117 | mutex_lock(&table_lock); |
117 | list_for_each_safe(ptr, tmp, &vfs_table) { | 118 | list_for_each_safe(ptr, tmp, &vfs_table) { |
118 | map = list_entry(ptr, struct vf_id_map, list); | 119 | map = list_entry(ptr, struct vf_id_map, list); |
119 | if (map->bdf != -1) | 120 | if (map->bdf != -1) { |
121 | id_map[map->id] = 0; | ||
120 | num_devices--; | 122 | num_devices--; |
123 | } | ||
121 | 124 | ||
122 | if (vf && map->bdf == -1) | 125 | if (vf && map->bdf == -1) |
123 | continue; | 126 | continue; |
@@ -154,6 +157,19 @@ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) | |||
154 | } | 157 | } |
155 | EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); | 158 | EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); |
156 | 159 | ||
160 | static unsigned int adf_find_free_id(void) | ||
161 | { | ||
162 | unsigned int i; | ||
163 | |||
164 | for (i = 0; i < ADF_MAX_DEVICES; i++) { | ||
165 | if (!id_map[i]) { | ||
166 | id_map[i] = 1; | ||
167 | return i; | ||
168 | } | ||
169 | } | ||
170 | return ADF_MAX_DEVICES + 1; | ||
171 | } | ||
172 | |||
157 | /** | 173 | /** |
158 | * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework | 174 | * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework |
159 | * @accel_dev: Pointer to acceleration device. | 175 | * @accel_dev: Pointer to acceleration device. |
@@ -194,8 +210,12 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, | |||
194 | } | 210 | } |
195 | 211 | ||
196 | list_add_tail(&accel_dev->list, &accel_table); | 212 | list_add_tail(&accel_dev->list, &accel_table); |
197 | accel_dev->accel_id = num_devices++; | 213 | accel_dev->accel_id = adf_find_free_id(); |
198 | 214 | if (accel_dev->accel_id > ADF_MAX_DEVICES) { | |
215 | ret = -EFAULT; | ||
216 | goto unlock; | ||
217 | } | ||
218 | num_devices++; | ||
199 | map = kzalloc(sizeof(*map), GFP_KERNEL); | 219 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
200 | if (!map) { | 220 | if (!map) { |
201 | ret = -ENOMEM; | 221 | ret = -ENOMEM; |
@@ -236,8 +256,13 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, | |||
236 | ret = -ENOMEM; | 256 | ret = -ENOMEM; |
237 | goto unlock; | 257 | goto unlock; |
238 | } | 258 | } |
239 | 259 | accel_dev->accel_id = adf_find_free_id(); | |
240 | accel_dev->accel_id = num_devices++; | 260 | if (accel_dev->accel_id > ADF_MAX_DEVICES) { |
261 | kfree(map); | ||
262 | ret = -EFAULT; | ||
263 | goto unlock; | ||
264 | } | ||
265 | num_devices++; | ||
241 | list_add_tail(&accel_dev->list, &accel_table); | 266 | list_add_tail(&accel_dev->list, &accel_table); |
242 | map->bdf = adf_get_vf_num(accel_dev); | 267 | map->bdf = adf_get_vf_num(accel_dev); |
243 | map->id = accel_dev->accel_id; | 268 | map->id = accel_dev->accel_id; |
@@ -271,6 +296,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, | |||
271 | { | 296 | { |
272 | mutex_lock(&table_lock); | 297 | mutex_lock(&table_lock); |
273 | if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { | 298 | if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { |
299 | id_map[accel_dev->accel_id] = 0; | ||
274 | num_devices--; | 300 | num_devices--; |
275 | } else if (accel_dev->is_vf && pf) { | 301 | } else if (accel_dev->is_vf && pf) { |
276 | struct vf_id_map *map, *next; | 302 | struct vf_id_map *map, *next; |
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index 6849422e04bb..f267d9e42e0b 100644 --- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c | |||
@@ -45,6 +45,7 @@ | |||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
46 | */ | 46 | */ |
47 | #include "adf_accel_devices.h" | 47 | #include "adf_accel_devices.h" |
48 | #include "adf_common_drv.h" | ||
48 | #include "adf_transport_internal.h" | 49 | #include "adf_transport_internal.h" |
49 | 50 | ||
50 | #define ADF_ARB_NUM 4 | 51 | #define ADF_ARB_NUM 4 |
@@ -124,19 +125,12 @@ int adf_init_arb(struct adf_accel_dev *accel_dev) | |||
124 | } | 125 | } |
125 | EXPORT_SYMBOL_GPL(adf_init_arb); | 126 | EXPORT_SYMBOL_GPL(adf_init_arb); |
126 | 127 | ||
127 | /** | ||
128 | * adf_update_ring_arb() - update ring arbitration rgister | ||
129 | * @accel_dev: Pointer to ring data. | ||
130 | * | ||
131 | * Function enables or disables rings for/from arbitration. | ||
132 | */ | ||
133 | void adf_update_ring_arb(struct adf_etr_ring_data *ring) | 128 | void adf_update_ring_arb(struct adf_etr_ring_data *ring) |
134 | { | 129 | { |
135 | WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, | 130 | WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, |
136 | ring->bank->bank_number, | 131 | ring->bank->bank_number, |
137 | ring->bank->ring_mask & 0xFF); | 132 | ring->bank->ring_mask & 0xFF); |
138 | } | 133 | } |
139 | EXPORT_SYMBOL_GPL(adf_update_ring_arb); | ||
140 | 134 | ||
141 | void adf_exit_arb(struct adf_accel_dev *accel_dev) | 135 | void adf_exit_arb(struct adf_accel_dev *accel_dev) |
142 | { | 136 | { |
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index d873eeecc363..ef5575e4a215 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c | |||
@@ -62,15 +62,6 @@ static void adf_service_add(struct service_hndl *service) | |||
62 | mutex_unlock(&service_lock); | 62 | mutex_unlock(&service_lock); |
63 | } | 63 | } |
64 | 64 | ||
65 | /** | ||
66 | * adf_service_register() - Register acceleration service in the accel framework | ||
67 | * @service: Pointer to the service | ||
68 | * | ||
69 | * Function adds the acceleration service to the acceleration framework. | ||
70 | * To be used by QAT device specific drivers. | ||
71 | * | ||
72 | * Return: 0 on success, error code otherwise. | ||
73 | */ | ||
74 | int adf_service_register(struct service_hndl *service) | 65 | int adf_service_register(struct service_hndl *service) |
75 | { | 66 | { |
76 | service->init_status = 0; | 67 | service->init_status = 0; |
@@ -78,7 +69,6 @@ int adf_service_register(struct service_hndl *service) | |||
78 | adf_service_add(service); | 69 | adf_service_add(service); |
79 | return 0; | 70 | return 0; |
80 | } | 71 | } |
81 | EXPORT_SYMBOL_GPL(adf_service_register); | ||
82 | 72 | ||
83 | static void adf_service_remove(struct service_hndl *service) | 73 | static void adf_service_remove(struct service_hndl *service) |
84 | { | 74 | { |
@@ -87,15 +77,6 @@ static void adf_service_remove(struct service_hndl *service) | |||
87 | mutex_unlock(&service_lock); | 77 | mutex_unlock(&service_lock); |
88 | } | 78 | } |
89 | 79 | ||
90 | /** | ||
91 | * adf_service_unregister() - Unregister acceleration service from the framework | ||
92 | * @service: Pointer to the service | ||
93 | * | ||
94 | * Function remove the acceleration service from the acceleration framework. | ||
95 | * To be used by QAT device specific drivers. | ||
96 | * | ||
97 | * Return: 0 on success, error code otherwise. | ||
98 | */ | ||
99 | int adf_service_unregister(struct service_hndl *service) | 80 | int adf_service_unregister(struct service_hndl *service) |
100 | { | 81 | { |
101 | if (service->init_status || service->start_status) { | 82 | if (service->init_status || service->start_status) { |
@@ -105,7 +86,6 @@ int adf_service_unregister(struct service_hndl *service) | |||
105 | adf_service_remove(service); | 86 | adf_service_remove(service); |
106 | return 0; | 87 | return 0; |
107 | } | 88 | } |
108 | EXPORT_SYMBOL_GPL(adf_service_unregister); | ||
109 | 89 | ||
110 | /** | 90 | /** |
111 | * adf_dev_init() - Init data structures and services for the given accel device | 91 | * adf_dev_init() - Init data structures and services for the given accel device |
@@ -366,6 +346,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) | |||
366 | 346 | ||
367 | hw_data->disable_iov(accel_dev); | 347 | hw_data->disable_iov(accel_dev); |
368 | adf_cleanup_etr_data(accel_dev); | 348 | adf_cleanup_etr_data(accel_dev); |
349 | adf_dev_restore(accel_dev); | ||
369 | } | 350 | } |
370 | EXPORT_SYMBOL_GPL(adf_dev_shutdown); | 351 | EXPORT_SYMBOL_GPL(adf_dev_shutdown); |
371 | 352 | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index 5570f78795c1..b81f79acc4ea 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c | |||
@@ -51,15 +51,13 @@ | |||
51 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
52 | #include <linux/errno.h> | 52 | #include <linux/errno.h> |
53 | #include <linux/interrupt.h> | 53 | #include <linux/interrupt.h> |
54 | #include <adf_accel_devices.h> | 54 | #include "adf_accel_devices.h" |
55 | #include <adf_common_drv.h> | 55 | #include "adf_common_drv.h" |
56 | #include <adf_cfg.h> | 56 | #include "adf_cfg.h" |
57 | #include <adf_cfg_strings.h> | 57 | #include "adf_cfg_strings.h" |
58 | #include <adf_cfg_common.h> | 58 | #include "adf_cfg_common.h" |
59 | #include <adf_transport_access_macros.h> | 59 | #include "adf_transport_access_macros.h" |
60 | #include <adf_transport_internal.h> | 60 | #include "adf_transport_internal.h" |
61 | #include "adf_drv.h" | ||
62 | #include "adf_dh895xcc_hw_data.h" | ||
63 | 61 | ||
64 | static int adf_enable_msix(struct adf_accel_dev *accel_dev) | 62 | static int adf_enable_msix(struct adf_accel_dev *accel_dev) |
65 | { | 63 | { |
@@ -109,14 +107,16 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) | |||
109 | #ifdef CONFIG_PCI_IOV | 107 | #ifdef CONFIG_PCI_IOV |
110 | /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ | 108 | /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ |
111 | if (accel_dev->pf.vf_info) { | 109 | if (accel_dev->pf.vf_info) { |
112 | void __iomem *pmisc_bar_addr = | 110 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
113 | (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; | 111 | struct adf_bar *pmisc = |
112 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | ||
113 | void __iomem *pmisc_bar_addr = pmisc->virt_addr; | ||
114 | u32 vf_mask; | 114 | u32 vf_mask; |
115 | 115 | ||
116 | /* Get the interrupt sources triggered by VFs */ | 116 | /* Get the interrupt sources triggered by VFs */ |
117 | vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) & | 117 | vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) & |
118 | 0x0000FFFF) << 16) | | 118 | 0x0000FFFF) << 16) | |
119 | ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) & | 119 | ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) & |
120 | 0x01FFFE00) >> 9); | 120 | 0x01FFFE00) >> 9); |
121 | 121 | ||
122 | if (vf_mask) { | 122 | if (vf_mask) { |
@@ -301,6 +301,12 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) | |||
301 | } | 301 | } |
302 | } | 302 | } |
303 | 303 | ||
304 | /** | ||
305 | * adf_vf_isr_resource_free() - Free IRQ for acceleration device | ||
306 | * @accel_dev: Pointer to acceleration device. | ||
307 | * | ||
308 | * Function frees interrupts for acceleration device. | ||
309 | */ | ||
304 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev) | 310 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev) |
305 | { | 311 | { |
306 | adf_free_irqs(accel_dev); | 312 | adf_free_irqs(accel_dev); |
@@ -308,7 +314,16 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev) | |||
308 | adf_disable_msix(&accel_dev->accel_pci_dev); | 314 | adf_disable_msix(&accel_dev->accel_pci_dev); |
309 | adf_isr_free_msix_entry_table(accel_dev); | 315 | adf_isr_free_msix_entry_table(accel_dev); |
310 | } | 316 | } |
311 | 317 | EXPORT_SYMBOL_GPL(adf_isr_resource_free); | |
318 | |||
319 | /** | ||
320 | * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device | ||
321 | * @accel_dev: Pointer to acceleration device. | ||
322 | * | ||
323 | * Function allocates interrupts for acceleration device. | ||
324 | * | ||
325 | * Return: 0 on success, error code otherwise. | ||
326 | */ | ||
312 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) | 327 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) |
313 | { | 328 | { |
314 | int ret; | 329 | int ret; |
@@ -330,3 +345,4 @@ err_out: | |||
330 | adf_isr_resource_free(accel_dev); | 345 | adf_isr_resource_free(accel_dev); |
331 | return -EFAULT; | 346 | return -EFAULT; |
332 | } | 347 | } |
348 | EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 5fdbad809343..b3875fdf6cd7 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c | |||
@@ -45,8 +45,6 @@ | |||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
46 | */ | 46 | */ |
47 | 47 | ||
48 | #include <linux/pci.h> | ||
49 | #include <linux/mutex.h> | ||
50 | #include <linux/delay.h> | 48 | #include <linux/delay.h> |
51 | #include "adf_accel_devices.h" | 49 | #include "adf_accel_devices.h" |
52 | #include "adf_common_drv.h" | 50 | #include "adf_common_drv.h" |
@@ -58,12 +56,6 @@ | |||
58 | #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC) | 56 | #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC) |
59 | #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16) | 57 | #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16) |
60 | 58 | ||
61 | /** | ||
62 | * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts | ||
63 | * @accel_dev: Pointer to acceleration device. | ||
64 | * | ||
65 | * Function enables PF to VF interrupts | ||
66 | */ | ||
67 | void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) | 59 | void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) |
68 | { | 60 | { |
69 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | 61 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; |
@@ -73,14 +65,7 @@ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) | |||
73 | 65 | ||
74 | ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0); | 66 | ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0); |
75 | } | 67 | } |
76 | EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts); | ||
77 | 68 | ||
78 | /** | ||
79 | * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts | ||
80 | * @accel_dev: Pointer to acceleration device. | ||
81 | * | ||
82 | * Function disables PF to VF interrupts | ||
83 | */ | ||
84 | void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) | 69 | void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) |
85 | { | 70 | { |
86 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | 71 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; |
@@ -90,7 +75,6 @@ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) | |||
90 | 75 | ||
91 | ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2); | 76 | ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2); |
92 | } | 77 | } |
93 | EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); | ||
94 | 78 | ||
95 | void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, | 79 | void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, |
96 | u32 vf_mask) | 80 | u32 vf_mask) |
@@ -116,12 +100,6 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, | |||
116 | } | 100 | } |
117 | } | 101 | } |
118 | 102 | ||
119 | /** | ||
120 | * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts | ||
121 | * @accel_dev: Pointer to acceleration device. | ||
122 | * | ||
123 | * Function disables VF to PF interrupts | ||
124 | */ | ||
125 | void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) | 103 | void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) |
126 | { | 104 | { |
127 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 105 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
@@ -144,7 +122,6 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) | |||
144 | ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); | 122 | ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); |
145 | } | 123 | } |
146 | } | 124 | } |
147 | EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts); | ||
148 | 125 | ||
149 | static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) | 126 | static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) |
150 | { | 127 | { |
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 3865ae8d96d9..57d2622728a5 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c | |||
@@ -122,7 +122,7 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) | |||
122 | return -EAGAIN; | 122 | return -EAGAIN; |
123 | } | 123 | } |
124 | spin_lock_bh(&ring->lock); | 124 | spin_lock_bh(&ring->lock); |
125 | memcpy(ring->base_addr + ring->tail, msg, | 125 | memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg, |
126 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); | 126 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); |
127 | 127 | ||
128 | ring->tail = adf_modulo(ring->tail + | 128 | ring->tail = adf_modulo(ring->tail + |
@@ -137,23 +137,22 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) | |||
137 | static int adf_handle_response(struct adf_etr_ring_data *ring) | 137 | static int adf_handle_response(struct adf_etr_ring_data *ring) |
138 | { | 138 | { |
139 | uint32_t msg_counter = 0; | 139 | uint32_t msg_counter = 0; |
140 | uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head); | 140 | uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); |
141 | 141 | ||
142 | while (*msg != ADF_RING_EMPTY_SIG) { | 142 | while (*msg != ADF_RING_EMPTY_SIG) { |
143 | ring->callback((uint32_t *)msg); | 143 | ring->callback((uint32_t *)msg); |
144 | atomic_dec(ring->inflights); | ||
144 | *msg = ADF_RING_EMPTY_SIG; | 145 | *msg = ADF_RING_EMPTY_SIG; |
145 | ring->head = adf_modulo(ring->head + | 146 | ring->head = adf_modulo(ring->head + |
146 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size), | 147 | ADF_MSG_SIZE_TO_BYTES(ring->msg_size), |
147 | ADF_RING_SIZE_MODULO(ring->ring_size)); | 148 | ADF_RING_SIZE_MODULO(ring->ring_size)); |
148 | msg_counter++; | 149 | msg_counter++; |
149 | msg = (uint32_t *)(ring->base_addr + ring->head); | 150 | msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); |
150 | } | 151 | } |
151 | if (msg_counter > 0) { | 152 | if (msg_counter > 0) |
152 | WRITE_CSR_RING_HEAD(ring->bank->csr_addr, | 153 | WRITE_CSR_RING_HEAD(ring->bank->csr_addr, |
153 | ring->bank->bank_number, | 154 | ring->bank->bank_number, |
154 | ring->ring_number, ring->head); | 155 | ring->ring_number, ring->head); |
155 | atomic_sub(msg_counter, ring->inflights); | ||
156 | } | ||
157 | return 0; | 156 | return 0; |
158 | } | 157 | } |
159 | 158 | ||
@@ -342,27 +341,15 @@ static void adf_ring_response_handler(struct adf_etr_bank_data *bank) | |||
342 | } | 341 | } |
343 | } | 342 | } |
344 | 343 | ||
345 | /** | 344 | void adf_response_handler(uintptr_t bank_addr) |
346 | * adf_response_handler() - Bottom half handler response handler | ||
347 | * @bank_addr: Address of a ring bank for with the BH was scheduled. | ||
348 | * | ||
349 | * Function is the bottom half handler for the response from acceleration | ||
350 | * device. There is one handler for every ring bank. Function checks all | ||
351 | * communication rings in the bank. | ||
352 | * To be used by QAT device specific drivers. | ||
353 | * | ||
354 | * Return: void | ||
355 | */ | ||
356 | void adf_response_handler(unsigned long bank_addr) | ||
357 | { | 345 | { |
358 | struct adf_etr_bank_data *bank = (void *)bank_addr; | 346 | struct adf_etr_bank_data *bank = (void *)bank_addr; |
359 | 347 | ||
360 | /* Handle all the responses nad reenable IRQs */ | 348 | /* Handle all the responses and reenable IRQs */ |
361 | adf_ring_response_handler(bank); | 349 | adf_ring_response_handler(bank); |
362 | WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, | 350 | WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, |
363 | bank->irq_mask); | 351 | bank->irq_mask); |
364 | } | 352 | } |
365 | EXPORT_SYMBOL_GPL(adf_response_handler); | ||
366 | 353 | ||
367 | static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, | 354 | static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, |
368 | const char *section, const char *format, | 355 | const char *section, const char *format, |
@@ -447,6 +434,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, | |||
447 | goto err; | 434 | goto err; |
448 | } | 435 | } |
449 | 436 | ||
437 | WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK); | ||
450 | WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); | 438 | WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); |
451 | return 0; | 439 | return 0; |
452 | err: | 440 | err: |
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 6ad7e4e1edca..80e02a2a0a09 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h | |||
@@ -50,12 +50,14 @@ | |||
50 | #include "adf_accel_devices.h" | 50 | #include "adf_accel_devices.h" |
51 | #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL | 51 | #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL |
52 | #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL | 52 | #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL |
53 | #define ADF_BANK_INT_FLAG_CLEAR_MASK 0xFFFF | ||
53 | #define ADF_RING_CSR_RING_CONFIG 0x000 | 54 | #define ADF_RING_CSR_RING_CONFIG 0x000 |
54 | #define ADF_RING_CSR_RING_LBASE 0x040 | 55 | #define ADF_RING_CSR_RING_LBASE 0x040 |
55 | #define ADF_RING_CSR_RING_UBASE 0x080 | 56 | #define ADF_RING_CSR_RING_UBASE 0x080 |
56 | #define ADF_RING_CSR_RING_HEAD 0x0C0 | 57 | #define ADF_RING_CSR_RING_HEAD 0x0C0 |
57 | #define ADF_RING_CSR_RING_TAIL 0x100 | 58 | #define ADF_RING_CSR_RING_TAIL 0x100 |
58 | #define ADF_RING_CSR_E_STAT 0x14C | 59 | #define ADF_RING_CSR_E_STAT 0x14C |
60 | #define ADF_RING_CSR_INT_FLAG 0x170 | ||
59 | #define ADF_RING_CSR_INT_SRCSEL 0x174 | 61 | #define ADF_RING_CSR_INT_SRCSEL 0x174 |
60 | #define ADF_RING_CSR_INT_SRCSEL_2 0x178 | 62 | #define ADF_RING_CSR_INT_SRCSEL_2 0x178 |
61 | #define ADF_RING_CSR_INT_COL_EN 0x17C | 63 | #define ADF_RING_CSR_INT_COL_EN 0x17C |
@@ -144,6 +146,9 @@ do { \ | |||
144 | #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ | 146 | #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ |
145 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | 147 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ |
146 | ADF_RING_CSR_RING_TAIL + (ring << 2), value) | 148 | ADF_RING_CSR_RING_TAIL + (ring << 2), value) |
149 | #define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ | ||
150 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ | ||
151 | ADF_RING_CSR_INT_FLAG, value) | ||
147 | #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ | 152 | #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ |
148 | do { \ | 153 | do { \ |
149 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ | 154 | ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ |
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index a4869627fd57..bb883368ac01 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h | |||
@@ -91,7 +91,7 @@ struct adf_etr_data { | |||
91 | struct dentry *debug; | 91 | struct dentry *debug; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | void adf_response_handler(unsigned long bank_addr); | 94 | void adf_response_handler(uintptr_t bank_addr); |
95 | #ifdef CONFIG_DEBUG_FS | 95 | #ifdef CONFIG_DEBUG_FS |
96 | #include <linux/debugfs.h> | 96 | #include <linux/debugfs.h> |
97 | int adf_bank_debugfs_add(struct adf_etr_bank_data *bank); | 97 | int adf_bank_debugfs_add(struct adf_etr_bank_data *bank); |
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 87c5d8adb125..09427b3d4d55 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c | |||
@@ -51,16 +51,18 @@ | |||
51 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
52 | #include <linux/errno.h> | 52 | #include <linux/errno.h> |
53 | #include <linux/interrupt.h> | 53 | #include <linux/interrupt.h> |
54 | #include <adf_accel_devices.h> | 54 | #include "adf_accel_devices.h" |
55 | #include <adf_common_drv.h> | 55 | #include "adf_common_drv.h" |
56 | #include <adf_cfg.h> | 56 | #include "adf_cfg.h" |
57 | #include <adf_cfg_strings.h> | 57 | #include "adf_cfg_strings.h" |
58 | #include <adf_cfg_common.h> | 58 | #include "adf_cfg_common.h" |
59 | #include <adf_transport_access_macros.h> | 59 | #include "adf_transport_access_macros.h" |
60 | #include <adf_transport_internal.h> | 60 | #include "adf_transport_internal.h" |
61 | #include <adf_pf2vf_msg.h> | 61 | #include "adf_pf2vf_msg.h" |
62 | #include "adf_drv.h" | 62 | |
63 | #include "adf_dh895xccvf_hw_data.h" | 63 | #define ADF_VINTSOU_OFFSET 0x204 |
64 | #define ADF_VINTSOU_BUN BIT(0) | ||
65 | #define ADF_VINTSOU_PF2VF BIT(1) | ||
64 | 66 | ||
65 | static int adf_enable_msi(struct adf_accel_dev *accel_dev) | 67 | static int adf_enable_msi(struct adf_accel_dev *accel_dev) |
66 | { | 68 | { |
@@ -91,12 +93,14 @@ static void adf_disable_msi(struct adf_accel_dev *accel_dev) | |||
91 | static void adf_pf2vf_bh_handler(void *data) | 93 | static void adf_pf2vf_bh_handler(void *data) |
92 | { | 94 | { |
93 | struct adf_accel_dev *accel_dev = data; | 95 | struct adf_accel_dev *accel_dev = data; |
94 | void __iomem *pmisc_bar_addr = | 96 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
95 | (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; | 97 | struct adf_bar *pmisc = |
98 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | ||
99 | void __iomem *pmisc_bar_addr = pmisc->virt_addr; | ||
96 | u32 msg; | 100 | u32 msg; |
97 | 101 | ||
98 | /* Read the message from PF */ | 102 | /* Read the message from PF */ |
99 | msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET); | 103 | msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0)); |
100 | 104 | ||
101 | if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) | 105 | if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) |
102 | /* Ignore legacy non-system (non-kernel) PF2VF messages */ | 106 | /* Ignore legacy non-system (non-kernel) PF2VF messages */ |
@@ -124,8 +128,8 @@ static void adf_pf2vf_bh_handler(void *data) | |||
124 | } | 128 | } |
125 | 129 | ||
126 | /* To ack, clear the PF2VFINT bit */ | 130 | /* To ack, clear the PF2VFINT bit */ |
127 | msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT; | 131 | msg &= ~BIT(0); |
128 | ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg); | 132 | ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); |
129 | 133 | ||
130 | /* Re-enable PF2VF interrupts */ | 134 | /* Re-enable PF2VF interrupts */ |
131 | adf_enable_pf2vf_interrupts(accel_dev); | 135 | adf_enable_pf2vf_interrupts(accel_dev); |
@@ -155,15 +159,17 @@ static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) | |||
155 | static irqreturn_t adf_isr(int irq, void *privdata) | 159 | static irqreturn_t adf_isr(int irq, void *privdata) |
156 | { | 160 | { |
157 | struct adf_accel_dev *accel_dev = privdata; | 161 | struct adf_accel_dev *accel_dev = privdata; |
158 | void __iomem *pmisc_bar_addr = | 162 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
159 | (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; | 163 | struct adf_bar *pmisc = |
164 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | ||
165 | void __iomem *pmisc_bar_addr = pmisc->virt_addr; | ||
160 | u32 v_int; | 166 | u32 v_int; |
161 | 167 | ||
162 | /* Read VF INT source CSR to determine the source of VF interrupt */ | 168 | /* Read VF INT source CSR to determine the source of VF interrupt */ |
163 | v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET); | 169 | v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET); |
164 | 170 | ||
165 | /* Check for PF2VF interrupt */ | 171 | /* Check for PF2VF interrupt */ |
166 | if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) { | 172 | if (v_int & ADF_VINTSOU_PF2VF) { |
167 | /* Disable PF to VF interrupt */ | 173 | /* Disable PF to VF interrupt */ |
168 | adf_disable_pf2vf_interrupts(accel_dev); | 174 | adf_disable_pf2vf_interrupts(accel_dev); |
169 | 175 | ||
@@ -173,7 +179,7 @@ static irqreturn_t adf_isr(int irq, void *privdata) | |||
173 | } | 179 | } |
174 | 180 | ||
175 | /* Check bundle interrupt */ | 181 | /* Check bundle interrupt */ |
176 | if (v_int & ADF_DH895XCC_VINTSOU_BUN) { | 182 | if (v_int & ADF_VINTSOU_BUN) { |
177 | struct adf_etr_data *etr_data = accel_dev->transport; | 183 | struct adf_etr_data *etr_data = accel_dev->transport; |
178 | struct adf_etr_bank_data *bank = &etr_data->banks[0]; | 184 | struct adf_etr_bank_data *bank = &etr_data->banks[0]; |
179 | 185 | ||
@@ -226,6 +232,12 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) | |||
226 | tasklet_kill(&priv_data->banks[0].resp_handler); | 232 | tasklet_kill(&priv_data->banks[0].resp_handler); |
227 | } | 233 | } |
228 | 234 | ||
235 | /** | ||
236 | * adf_vf_isr_resource_free() - Free IRQ for acceleration device | ||
237 | * @accel_dev: Pointer to acceleration device. | ||
238 | * | ||
239 | * Function frees interrupts for acceleration device virtual function. | ||
240 | */ | ||
229 | void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) | 241 | void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) |
230 | { | 242 | { |
231 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | 243 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); |
@@ -236,7 +248,16 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) | |||
236 | adf_cleanup_pf2vf_bh(accel_dev); | 248 | adf_cleanup_pf2vf_bh(accel_dev); |
237 | adf_disable_msi(accel_dev); | 249 | adf_disable_msi(accel_dev); |
238 | } | 250 | } |
239 | 251 | EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free); | |
252 | |||
253 | /** | ||
254 | * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device | ||
255 | * @accel_dev: Pointer to acceleration device. | ||
256 | * | ||
257 | * Function allocates interrupts for acceleration device virtual function. | ||
258 | * | ||
259 | * Return: 0 on success, error code otherwise. | ||
260 | */ | ||
240 | int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) | 261 | int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) |
241 | { | 262 | { |
242 | if (adf_enable_msi(accel_dev)) | 263 | if (adf_enable_msi(accel_dev)) |
@@ -256,3 +277,4 @@ err_out: | |||
256 | adf_vf_isr_resource_free(accel_dev); | 277 | adf_vf_isr_resource_free(accel_dev); |
257 | return -EFAULT; | 278 | return -EFAULT; |
258 | } | 279 | } |
280 | EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h index 5e1aa40c0404..2ffef3e4fd68 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h | |||
@@ -68,11 +68,21 @@ struct icp_qat_fw_loader_hal_handle { | |||
68 | 68 | ||
69 | struct icp_qat_fw_loader_handle { | 69 | struct icp_qat_fw_loader_handle { |
70 | struct icp_qat_fw_loader_hal_handle *hal_handle; | 70 | struct icp_qat_fw_loader_hal_handle *hal_handle; |
71 | struct pci_dev *pci_dev; | ||
71 | void *obj_handle; | 72 | void *obj_handle; |
73 | void *sobj_handle; | ||
74 | bool fw_auth; | ||
72 | void __iomem *hal_sram_addr_v; | 75 | void __iomem *hal_sram_addr_v; |
73 | void __iomem *hal_cap_g_ctl_csr_addr_v; | 76 | void __iomem *hal_cap_g_ctl_csr_addr_v; |
74 | void __iomem *hal_cap_ae_xfer_csr_addr_v; | 77 | void __iomem *hal_cap_ae_xfer_csr_addr_v; |
75 | void __iomem *hal_cap_ae_local_csr_addr_v; | 78 | void __iomem *hal_cap_ae_local_csr_addr_v; |
76 | void __iomem *hal_ep_csr_addr_v; | 79 | void __iomem *hal_ep_csr_addr_v; |
77 | }; | 80 | }; |
81 | |||
82 | struct icp_firml_dram_desc { | ||
83 | void __iomem *dram_base_addr; | ||
84 | void *dram_base_addr_v; | ||
85 | dma_addr_t dram_bus_addr; | ||
86 | u64 dram_size; | ||
87 | }; | ||
78 | #endif | 88 | #endif |
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h index 85b6d241ea82..7187917533d0 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hal.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h | |||
@@ -81,6 +81,31 @@ enum hal_ae_csr { | |||
81 | LOCAL_CSR_STATUS = 0x180, | 81 | LOCAL_CSR_STATUS = 0x180, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | enum fcu_csr { | ||
85 | FCU_CONTROL = 0x8c0, | ||
86 | FCU_STATUS = 0x8c4, | ||
87 | FCU_STATUS1 = 0x8c8, | ||
88 | FCU_DRAM_ADDR_LO = 0x8cc, | ||
89 | FCU_DRAM_ADDR_HI = 0x8d0, | ||
90 | FCU_RAMBASE_ADDR_HI = 0x8d4, | ||
91 | FCU_RAMBASE_ADDR_LO = 0x8d8 | ||
92 | }; | ||
93 | |||
94 | enum fcu_cmd { | ||
95 | FCU_CTRL_CMD_NOOP = 0, | ||
96 | FCU_CTRL_CMD_AUTH = 1, | ||
97 | FCU_CTRL_CMD_LOAD = 2, | ||
98 | FCU_CTRL_CMD_START = 3 | ||
99 | }; | ||
100 | |||
101 | enum fcu_sts { | ||
102 | FCU_STS_NO_STS = 0, | ||
103 | FCU_STS_VERI_DONE = 1, | ||
104 | FCU_STS_LOAD_DONE = 2, | ||
105 | FCU_STS_VERI_FAIL = 3, | ||
106 | FCU_STS_LOAD_FAIL = 4, | ||
107 | FCU_STS_BUSY = 5 | ||
108 | }; | ||
84 | #define UA_ECS (0x1 << 31) | 109 | #define UA_ECS (0x1 << 31) |
85 | #define ACS_ABO_BITPOS 31 | 110 | #define ACS_ABO_BITPOS 31 |
86 | #define ACS_ACNO 0x7 | 111 | #define ACS_ACNO 0x7 |
@@ -98,6 +123,13 @@ enum hal_ae_csr { | |||
98 | #define LCS_STATUS (0x1) | 123 | #define LCS_STATUS (0x1) |
99 | #define MMC_SHARE_CS_BITPOS 2 | 124 | #define MMC_SHARE_CS_BITPOS 2 |
100 | #define GLOBAL_CSR 0xA00 | 125 | #define GLOBAL_CSR 0xA00 |
126 | #define FCU_CTRL_AE_POS 0x8 | ||
127 | #define FCU_AUTH_STS_MASK 0x7 | ||
128 | #define FCU_STS_DONE_POS 0x9 | ||
129 | #define FCU_STS_AUTHFWLD_POS 0X8 | ||
130 | #define FCU_LOADED_AE_POS 0x16 | ||
131 | #define FW_AUTH_WAIT_PERIOD 10 | ||
132 | #define FW_AUTH_MAX_RETRY 300 | ||
101 | 133 | ||
102 | #define SET_CAP_CSR(handle, csr, val) \ | 134 | #define SET_CAP_CSR(handle, csr, val) \ |
103 | ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) | 135 | ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) |
@@ -106,14 +138,14 @@ enum hal_ae_csr { | |||
106 | #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) | 138 | #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) |
107 | #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) | 139 | #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) |
108 | #define AE_CSR(handle, ae) \ | 140 | #define AE_CSR(handle, ae) \ |
109 | (handle->hal_cap_ae_local_csr_addr_v + \ | 141 | ((char __iomem *)handle->hal_cap_ae_local_csr_addr_v + \ |
110 | ((ae & handle->hal_handle->ae_mask) << 12)) | 142 | ((ae & handle->hal_handle->ae_mask) << 12)) |
111 | #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) | 143 | #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) |
112 | #define SET_AE_CSR(handle, ae, csr, val) \ | 144 | #define SET_AE_CSR(handle, ae, csr, val) \ |
113 | ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) | 145 | ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) |
114 | #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) | 146 | #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) |
115 | #define AE_XFER(handle, ae) \ | 147 | #define AE_XFER(handle, ae) \ |
116 | (handle->hal_cap_ae_xfer_csr_addr_v + \ | 148 | ((char __iomem *)handle->hal_cap_ae_xfer_csr_addr_v + \ |
117 | ((ae & handle->hal_handle->ae_mask) << 12)) | 149 | ((ae & handle->hal_handle->ae_mask) << 12)) |
118 | #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ | 150 | #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ |
119 | ((reg & 0xff) << 2)) | 151 | ((reg & 0xff) << 2)) |
@@ -121,5 +153,4 @@ enum hal_ae_csr { | |||
121 | ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) | 153 | ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) |
122 | #define SRAM_WRITE(handle, addr, val) \ | 154 | #define SRAM_WRITE(handle, addr, val) \ |
123 | ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) | 155 | ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) |
124 | #define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr) | ||
125 | #endif | 156 | #endif |
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 2132a8cbc4ec..d97db990955d 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h | |||
@@ -47,32 +47,55 @@ | |||
47 | #ifndef __ICP_QAT_UCLO_H__ | 47 | #ifndef __ICP_QAT_UCLO_H__ |
48 | #define __ICP_QAT_UCLO_H__ | 48 | #define __ICP_QAT_UCLO_H__ |
49 | 49 | ||
50 | #define ICP_QAT_AC_C_CPU_TYPE 0x00400000 | 50 | #define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000 |
51 | #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 | ||
52 | #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 | ||
51 | #define ICP_QAT_UCLO_MAX_AE 12 | 53 | #define ICP_QAT_UCLO_MAX_AE 12 |
52 | #define ICP_QAT_UCLO_MAX_CTX 8 | 54 | #define ICP_QAT_UCLO_MAX_CTX 8 |
53 | #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) | 55 | #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) |
54 | #define ICP_QAT_UCLO_MAX_USTORE 0x4000 | 56 | #define ICP_QAT_UCLO_MAX_USTORE 0x4000 |
55 | #define ICP_QAT_UCLO_MAX_XFER_REG 128 | 57 | #define ICP_QAT_UCLO_MAX_XFER_REG 128 |
56 | #define ICP_QAT_UCLO_MAX_GPR_REG 128 | 58 | #define ICP_QAT_UCLO_MAX_GPR_REG 128 |
57 | #define ICP_QAT_UCLO_MAX_NN_REG 128 | ||
58 | #define ICP_QAT_UCLO_MAX_LMEM_REG 1024 | 59 | #define ICP_QAT_UCLO_MAX_LMEM_REG 1024 |
59 | #define ICP_QAT_UCLO_AE_ALL_CTX 0xff | 60 | #define ICP_QAT_UCLO_AE_ALL_CTX 0xff |
60 | #define ICP_QAT_UOF_OBJID_LEN 8 | 61 | #define ICP_QAT_UOF_OBJID_LEN 8 |
61 | #define ICP_QAT_UOF_FID 0xc6c2 | 62 | #define ICP_QAT_UOF_FID 0xc6c2 |
62 | #define ICP_QAT_UOF_MAJVER 0x4 | 63 | #define ICP_QAT_UOF_MAJVER 0x4 |
63 | #define ICP_QAT_UOF_MINVER 0x11 | 64 | #define ICP_QAT_UOF_MINVER 0x11 |
64 | #define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff | ||
65 | #define ICP_QAT_UOF_OBJS "UOF_OBJS" | 65 | #define ICP_QAT_UOF_OBJS "UOF_OBJS" |
66 | #define ICP_QAT_UOF_STRT "UOF_STRT" | 66 | #define ICP_QAT_UOF_STRT "UOF_STRT" |
67 | #define ICP_QAT_UOF_GTID "UOF_GTID" | ||
68 | #define ICP_QAT_UOF_IMAG "UOF_IMAG" | 67 | #define ICP_QAT_UOF_IMAG "UOF_IMAG" |
69 | #define ICP_QAT_UOF_IMEM "UOF_IMEM" | 68 | #define ICP_QAT_UOF_IMEM "UOF_IMEM" |
70 | #define ICP_QAT_UOF_MSEG "UOF_MSEG" | ||
71 | #define ICP_QAT_UOF_LOCAL_SCOPE 1 | 69 | #define ICP_QAT_UOF_LOCAL_SCOPE 1 |
72 | #define ICP_QAT_UOF_INIT_EXPR 0 | 70 | #define ICP_QAT_UOF_INIT_EXPR 0 |
73 | #define ICP_QAT_UOF_INIT_REG 1 | 71 | #define ICP_QAT_UOF_INIT_REG 1 |
74 | #define ICP_QAT_UOF_INIT_REG_CTX 2 | 72 | #define ICP_QAT_UOF_INIT_REG_CTX 2 |
75 | #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 | 73 | #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 |
74 | #define ICP_QAT_SUOF_OBJ_ID_LEN 8 | ||
75 | #define ICP_QAT_SUOF_FID 0x53554f46 | ||
76 | #define ICP_QAT_SUOF_MAJVER 0x0 | ||
77 | #define ICP_QAT_SUOF_MINVER 0x1 | ||
78 | #define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) | ||
79 | #define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) | ||
80 | #define ICP_QAT_CSS_FWSK_MODULUS_LEN 256 | ||
81 | #define ICP_QAT_CSS_FWSK_EXPONENT_LEN 4 | ||
82 | #define ICP_QAT_CSS_FWSK_PAD_LEN 252 | ||
83 | #define ICP_QAT_CSS_FWSK_PUB_LEN (ICP_QAT_CSS_FWSK_MODULUS_LEN + \ | ||
84 | ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ | ||
85 | ICP_QAT_CSS_FWSK_PAD_LEN) | ||
86 | #define ICP_QAT_CSS_SIGNATURE_LEN 256 | ||
87 | #define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \ | ||
88 | ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \ | ||
89 | ICP_QAT_SIMG_AE_INSTS_LEN) | ||
90 | #define ICP_QAT_CSS_AE_SIMG_LEN (sizeof(struct icp_qat_css_hdr) + \ | ||
91 | ICP_QAT_CSS_FWSK_PUB_LEN + \ | ||
92 | ICP_QAT_CSS_SIGNATURE_LEN + \ | ||
93 | ICP_QAT_CSS_AE_IMG_LEN) | ||
94 | #define ICP_QAT_AE_IMG_OFFSET (sizeof(struct icp_qat_css_hdr) + \ | ||
95 | ICP_QAT_CSS_FWSK_MODULUS_LEN + \ | ||
96 | ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ | ||
97 | ICP_QAT_CSS_SIGNATURE_LEN) | ||
98 | #define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000 | ||
76 | 99 | ||
77 | #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) | 100 | #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) |
78 | #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) | 101 | #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) |
@@ -112,6 +135,11 @@ enum icp_qat_uof_regtype { | |||
112 | ICP_NEIGH_REL, | 135 | ICP_NEIGH_REL, |
113 | }; | 136 | }; |
114 | 137 | ||
138 | enum icp_qat_css_fwtype { | ||
139 | CSS_AE_FIRMWARE = 0, | ||
140 | CSS_MMP_FIRMWARE = 1 | ||
141 | }; | ||
142 | |||
115 | struct icp_qat_uclo_page { | 143 | struct icp_qat_uclo_page { |
116 | struct icp_qat_uclo_encap_page *encap_page; | 144 | struct icp_qat_uclo_encap_page *encap_page; |
117 | struct icp_qat_uclo_region *region; | 145 | struct icp_qat_uclo_region *region; |
@@ -235,7 +263,7 @@ struct icp_qat_uof_filechunkhdr { | |||
235 | }; | 263 | }; |
236 | 264 | ||
237 | struct icp_qat_uof_objhdr { | 265 | struct icp_qat_uof_objhdr { |
238 | unsigned int cpu_type; | 266 | unsigned int ac_dev_type; |
239 | unsigned short min_cpu_ver; | 267 | unsigned short min_cpu_ver; |
240 | unsigned short max_cpu_ver; | 268 | unsigned short max_cpu_ver; |
241 | short max_chunks; | 269 | short max_chunks; |
@@ -326,7 +354,7 @@ struct icp_qat_uof_image { | |||
326 | unsigned int img_name; | 354 | unsigned int img_name; |
327 | unsigned int ae_assigned; | 355 | unsigned int ae_assigned; |
328 | unsigned int ctx_assigned; | 356 | unsigned int ctx_assigned; |
329 | unsigned int cpu_type; | 357 | unsigned int ac_dev_type; |
330 | unsigned int entry_address; | 358 | unsigned int entry_address; |
331 | unsigned int fill_pattern[2]; | 359 | unsigned int fill_pattern[2]; |
332 | unsigned int reloadable_size; | 360 | unsigned int reloadable_size; |
@@ -374,4 +402,127 @@ struct icp_qat_uof_batch_init { | |||
374 | unsigned int size; | 402 | unsigned int size; |
375 | struct icp_qat_uof_batch_init *next; | 403 | struct icp_qat_uof_batch_init *next; |
376 | }; | 404 | }; |
405 | |||
406 | struct icp_qat_suof_img_hdr { | ||
407 | char *simg_buf; | ||
408 | unsigned long simg_len; | ||
409 | char *css_header; | ||
410 | char *css_key; | ||
411 | char *css_signature; | ||
412 | char *css_simg; | ||
413 | unsigned long simg_size; | ||
414 | unsigned int ae_num; | ||
415 | unsigned int ae_mask; | ||
416 | unsigned int fw_type; | ||
417 | unsigned long simg_name; | ||
418 | unsigned long appmeta_data; | ||
419 | }; | ||
420 | |||
421 | struct icp_qat_suof_img_tbl { | ||
422 | unsigned int num_simgs; | ||
423 | struct icp_qat_suof_img_hdr *simg_hdr; | ||
424 | }; | ||
425 | |||
426 | struct icp_qat_suof_handle { | ||
427 | unsigned int file_id; | ||
428 | unsigned int check_sum; | ||
429 | char min_ver; | ||
430 | char maj_ver; | ||
431 | char fw_type; | ||
432 | char *suof_buf; | ||
433 | unsigned int suof_size; | ||
434 | char *sym_str; | ||
435 | unsigned int sym_size; | ||
436 | struct icp_qat_suof_img_tbl img_table; | ||
437 | }; | ||
438 | |||
439 | struct icp_qat_fw_auth_desc { | ||
440 | unsigned int img_len; | ||
441 | unsigned int reserved; | ||
442 | unsigned int css_hdr_high; | ||
443 | unsigned int css_hdr_low; | ||
444 | unsigned int img_high; | ||
445 | unsigned int img_low; | ||
446 | unsigned int signature_high; | ||
447 | unsigned int signature_low; | ||
448 | unsigned int fwsk_pub_high; | ||
449 | unsigned int fwsk_pub_low; | ||
450 | unsigned int img_ae_mode_data_high; | ||
451 | unsigned int img_ae_mode_data_low; | ||
452 | unsigned int img_ae_init_data_high; | ||
453 | unsigned int img_ae_init_data_low; | ||
454 | unsigned int img_ae_insts_high; | ||
455 | unsigned int img_ae_insts_low; | ||
456 | }; | ||
457 | |||
458 | struct icp_qat_auth_chunk { | ||
459 | struct icp_qat_fw_auth_desc fw_auth_desc; | ||
460 | u64 chunk_size; | ||
461 | u64 chunk_bus_addr; | ||
462 | }; | ||
463 | |||
464 | struct icp_qat_css_hdr { | ||
465 | unsigned int module_type; | ||
466 | unsigned int header_len; | ||
467 | unsigned int header_ver; | ||
468 | unsigned int module_id; | ||
469 | unsigned int module_vendor; | ||
470 | unsigned int date; | ||
471 | unsigned int size; | ||
472 | unsigned int key_size; | ||
473 | unsigned int module_size; | ||
474 | unsigned int exponent_size; | ||
475 | unsigned int fw_type; | ||
476 | unsigned int reserved[21]; | ||
477 | }; | ||
478 | |||
479 | struct icp_qat_simg_ae_mode { | ||
480 | unsigned int file_id; | ||
481 | unsigned short maj_ver; | ||
482 | unsigned short min_ver; | ||
483 | unsigned int dev_type; | ||
484 | unsigned short devmax_ver; | ||
485 | unsigned short devmin_ver; | ||
486 | unsigned int ae_mask; | ||
487 | unsigned int ctx_enables; | ||
488 | char fw_type; | ||
489 | char ctx_mode; | ||
490 | char nn_mode; | ||
491 | char lm0_mode; | ||
492 | char lm1_mode; | ||
493 | char scs_mode; | ||
494 | char lm2_mode; | ||
495 | char lm3_mode; | ||
496 | char tindex_mode; | ||
497 | unsigned char reserved[7]; | ||
498 | char simg_name[256]; | ||
499 | char appmeta_data[256]; | ||
500 | }; | ||
501 | |||
502 | struct icp_qat_suof_filehdr { | ||
503 | unsigned int file_id; | ||
504 | unsigned int check_sum; | ||
505 | char min_ver; | ||
506 | char maj_ver; | ||
507 | char fw_type; | ||
508 | char reserved; | ||
509 | unsigned short max_chunks; | ||
510 | unsigned short num_chunks; | ||
511 | }; | ||
512 | |||
513 | struct icp_qat_suof_chunk_hdr { | ||
514 | char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN]; | ||
515 | u64 offset; | ||
516 | u64 size; | ||
517 | }; | ||
518 | |||
519 | struct icp_qat_suof_strtable { | ||
520 | unsigned int tab_length; | ||
521 | unsigned int strings; | ||
522 | }; | ||
523 | |||
524 | struct icp_qat_suof_objhdr { | ||
525 | unsigned int img_length; | ||
526 | unsigned int reserved; | ||
527 | }; | ||
377 | #endif | 528 | #endif |
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 9cab15497f04..3852d31ce0a4 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include "adf_accel_devices.h" | 49 | #include "adf_accel_devices.h" |
50 | #include "adf_common_drv.h" | 50 | #include "adf_common_drv.h" |
51 | #include "adf_transport.h" | 51 | #include "adf_transport.h" |
52 | #include "adf_transport_access_macros.h" | ||
52 | #include "adf_cfg.h" | 53 | #include "adf_cfg.h" |
53 | #include "adf_cfg_strings.h" | 54 | #include "adf_cfg_strings.h" |
54 | #include "qat_crypto.h" | 55 | #include "qat_crypto.h" |
@@ -66,13 +67,10 @@ void qat_crypto_put_instance(struct qat_crypto_instance *inst) | |||
66 | 67 | ||
67 | static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) | 68 | static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) |
68 | { | 69 | { |
69 | struct qat_crypto_instance *inst; | 70 | struct qat_crypto_instance *inst, *tmp; |
70 | struct list_head *list_ptr, *tmp; | ||
71 | int i; | 71 | int i; |
72 | 72 | ||
73 | list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) { | 73 | list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) { |
74 | inst = list_entry(list_ptr, struct qat_crypto_instance, list); | ||
75 | |||
76 | for (i = 0; i < atomic_read(&inst->refctr); i++) | 74 | for (i = 0; i < atomic_read(&inst->refctr); i++) |
77 | qat_crypto_put_instance(inst); | 75 | qat_crypto_put_instance(inst); |
78 | 76 | ||
@@ -88,7 +86,7 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) | |||
88 | if (inst->pke_rx) | 86 | if (inst->pke_rx) |
89 | adf_remove_ring(inst->pke_rx); | 87 | adf_remove_ring(inst->pke_rx); |
90 | 88 | ||
91 | list_del(list_ptr); | 89 | list_del(&inst->list); |
92 | kfree(inst); | 90 | kfree(inst); |
93 | } | 91 | } |
94 | return 0; | 92 | return 0; |
@@ -96,17 +94,13 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) | |||
96 | 94 | ||
97 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node) | 95 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node) |
98 | { | 96 | { |
99 | struct adf_accel_dev *accel_dev = NULL; | 97 | struct adf_accel_dev *accel_dev = NULL, *tmp_dev; |
100 | struct qat_crypto_instance *inst = NULL; | 98 | struct qat_crypto_instance *inst = NULL, *tmp_inst; |
101 | struct list_head *itr; | ||
102 | unsigned long best = ~0; | 99 | unsigned long best = ~0; |
103 | 100 | ||
104 | list_for_each(itr, adf_devmgr_get_head()) { | 101 | list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { |
105 | struct adf_accel_dev *tmp_dev; | ||
106 | unsigned long ctr; | 102 | unsigned long ctr; |
107 | 103 | ||
108 | tmp_dev = list_entry(itr, struct adf_accel_dev, list); | ||
109 | |||
110 | if ((node == dev_to_node(&GET_DEV(tmp_dev)) || | 104 | if ((node == dev_to_node(&GET_DEV(tmp_dev)) || |
111 | dev_to_node(&GET_DEV(tmp_dev)) < 0) && | 105 | dev_to_node(&GET_DEV(tmp_dev)) < 0) && |
112 | adf_dev_started(tmp_dev) && | 106 | adf_dev_started(tmp_dev) && |
@@ -118,19 +112,16 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) | |||
118 | } | 112 | } |
119 | } | 113 | } |
120 | } | 114 | } |
121 | if (!accel_dev) | ||
122 | pr_info("QAT: Could not find a device on node %d\n", node); | ||
123 | |||
124 | /* Get any started device */ | ||
125 | list_for_each(itr, adf_devmgr_get_head()) { | ||
126 | struct adf_accel_dev *tmp_dev; | ||
127 | 115 | ||
128 | tmp_dev = list_entry(itr, struct adf_accel_dev, list); | 116 | if (!accel_dev) { |
129 | 117 | pr_info("QAT: Could not find a device on node %d\n", node); | |
130 | if (adf_dev_started(tmp_dev) && | 118 | /* Get any started device */ |
131 | !list_empty(&tmp_dev->crypto_list)) { | 119 | list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { |
132 | accel_dev = tmp_dev; | 120 | if (adf_dev_started(tmp_dev) && |
133 | break; | 121 | !list_empty(&tmp_dev->crypto_list)) { |
122 | accel_dev = tmp_dev; | ||
123 | break; | ||
124 | } | ||
134 | } | 125 | } |
135 | } | 126 | } |
136 | 127 | ||
@@ -138,11 +129,9 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) | |||
138 | return NULL; | 129 | return NULL; |
139 | 130 | ||
140 | best = ~0; | 131 | best = ~0; |
141 | list_for_each(itr, &accel_dev->crypto_list) { | 132 | list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) { |
142 | struct qat_crypto_instance *tmp_inst; | ||
143 | unsigned long ctr; | 133 | unsigned long ctr; |
144 | 134 | ||
145 | tmp_inst = list_entry(itr, struct qat_crypto_instance, list); | ||
146 | ctr = atomic_read(&tmp_inst->refctr); | 135 | ctr = atomic_read(&tmp_inst->refctr); |
147 | if (best > ctr) { | 136 | if (best > ctr) { |
148 | inst = tmp_inst; | 137 | inst = tmp_inst; |
@@ -159,6 +148,97 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) | |||
159 | return inst; | 148 | return inst; |
160 | } | 149 | } |
161 | 150 | ||
151 | /** | ||
152 | * qat_crypto_dev_config() - create dev config required to create crypto inst. | ||
153 | * | ||
154 | * @accel_dev: Pointer to acceleration device. | ||
155 | * | ||
156 | * Function creates device configuration required to create crypto instances | ||
157 | * | ||
158 | * Return: 0 on success, error code otherwise. | ||
159 | */ | ||
160 | int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) | ||
161 | { | ||
162 | int cpus = num_online_cpus(); | ||
163 | int banks = GET_MAX_BANKS(accel_dev); | ||
164 | int instances = min(cpus, banks); | ||
165 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
166 | int i; | ||
167 | unsigned long val; | ||
168 | |||
169 | if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) | ||
170 | goto err; | ||
171 | if (adf_cfg_section_add(accel_dev, "Accelerator0")) | ||
172 | goto err; | ||
173 | for (i = 0; i < instances; i++) { | ||
174 | val = i; | ||
175 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); | ||
176 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
177 | key, (void *)&val, ADF_DEC)) | ||
178 | goto err; | ||
179 | |||
180 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, | ||
181 | i); | ||
182 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
183 | key, (void *)&val, ADF_DEC)) | ||
184 | goto err; | ||
185 | |||
186 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); | ||
187 | val = 128; | ||
188 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
189 | key, (void *)&val, ADF_DEC)) | ||
190 | goto err; | ||
191 | |||
192 | val = 512; | ||
193 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); | ||
194 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
195 | key, (void *)&val, ADF_DEC)) | ||
196 | goto err; | ||
197 | |||
198 | val = 0; | ||
199 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); | ||
200 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
201 | key, (void *)&val, ADF_DEC)) | ||
202 | goto err; | ||
203 | |||
204 | val = 2; | ||
205 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); | ||
206 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
207 | key, (void *)&val, ADF_DEC)) | ||
208 | goto err; | ||
209 | |||
210 | val = 8; | ||
211 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); | ||
212 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
213 | key, (void *)&val, ADF_DEC)) | ||
214 | goto err; | ||
215 | |||
216 | val = 10; | ||
217 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); | ||
218 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
219 | key, (void *)&val, ADF_DEC)) | ||
220 | goto err; | ||
221 | |||
222 | val = ADF_COALESCING_DEF_TIME; | ||
223 | snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); | ||
224 | if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", | ||
225 | key, (void *)&val, ADF_DEC)) | ||
226 | goto err; | ||
227 | } | ||
228 | |||
229 | val = i; | ||
230 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
231 | ADF_NUM_CY, (void *)&val, ADF_DEC)) | ||
232 | goto err; | ||
233 | |||
234 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
235 | return 0; | ||
236 | err: | ||
237 | dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); | ||
238 | return -EINVAL; | ||
239 | } | ||
240 | EXPORT_SYMBOL_GPL(qat_crypto_dev_config); | ||
241 | |||
162 | static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) | 242 | static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) |
163 | { | 243 | { |
164 | int i; | 244 | int i; |
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 380e761801a7..0ac0ba867611 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c | |||
@@ -45,21 +45,22 @@ | |||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
46 | */ | 46 | */ |
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | #include <linux/delay.h> | ||
48 | 49 | ||
49 | #include "adf_accel_devices.h" | 50 | #include "adf_accel_devices.h" |
50 | #include "adf_common_drv.h" | 51 | #include "adf_common_drv.h" |
51 | #include "icp_qat_hal.h" | 52 | #include "icp_qat_hal.h" |
52 | #include "icp_qat_uclo.h" | 53 | #include "icp_qat_uclo.h" |
53 | 54 | ||
54 | #define BAD_REGADDR 0xffff | 55 | #define BAD_REGADDR 0xffff |
55 | #define MAX_RETRY_TIMES 10000 | 56 | #define MAX_RETRY_TIMES 10000 |
56 | #define INIT_CTX_ARB_VALUE 0x0 | 57 | #define INIT_CTX_ARB_VALUE 0x0 |
57 | #define INIT_CTX_ENABLE_VALUE 0x0 | 58 | #define INIT_CTX_ENABLE_VALUE 0x0 |
58 | #define INIT_PC_VALUE 0x0 | 59 | #define INIT_PC_VALUE 0x0 |
59 | #define INIT_WAKEUP_EVENTS_VALUE 0x1 | 60 | #define INIT_WAKEUP_EVENTS_VALUE 0x1 |
60 | #define INIT_SIG_EVENTS_VALUE 0x1 | 61 | #define INIT_SIG_EVENTS_VALUE 0x1 |
61 | #define INIT_CCENABLE_VALUE 0x2000 | 62 | #define INIT_CCENABLE_VALUE 0x2000 |
62 | #define RST_CSR_QAT_LSB 20 | 63 | #define RST_CSR_QAT_LSB 20 |
63 | #define RST_CSR_AE_LSB 0 | 64 | #define RST_CSR_AE_LSB 0 |
64 | #define MC_TIMESTAMP_ENABLE (0x1 << 7) | 65 | #define MC_TIMESTAMP_ENABLE (0x1 << 7) |
65 | 66 | ||
@@ -185,7 +186,7 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, | |||
185 | if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) | 186 | if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) |
186 | return 0; | 187 | return 0; |
187 | } | 188 | } |
188 | if (!times) { | 189 | if (times < 0) { |
189 | pr_err("QAT: wait_num_cycles time out\n"); | 190 | pr_err("QAT: wait_num_cycles time out\n"); |
190 | return -EFAULT; | 191 | return -EFAULT; |
191 | } | 192 | } |
@@ -391,9 +392,6 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) | |||
391 | unsigned int times = MAX_RETRY_TIMES; | 392 | unsigned int times = MAX_RETRY_TIMES; |
392 | 393 | ||
393 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 394 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
394 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
395 | continue; | ||
396 | |||
397 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, | 395 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, |
398 | (unsigned int *)&base_cnt); | 396 | (unsigned int *)&base_cnt); |
399 | base_cnt &= 0xffff; | 397 | base_cnt &= 0xffff; |
@@ -413,6 +411,20 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) | |||
413 | return 0; | 411 | return 0; |
414 | } | 412 | } |
415 | 413 | ||
414 | int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, | ||
415 | unsigned int ae) | ||
416 | { | ||
417 | unsigned int enable = 0, active = 0; | ||
418 | |||
419 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); | ||
420 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); | ||
421 | if ((enable & (0xff << CE_ENABLE_BITPOS)) || | ||
422 | (active & (1 << ACS_ABO_BITPOS))) | ||
423 | return 1; | ||
424 | else | ||
425 | return 0; | ||
426 | } | ||
427 | |||
416 | static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) | 428 | static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) |
417 | { | 429 | { |
418 | unsigned int misc_ctl; | 430 | unsigned int misc_ctl; |
@@ -425,8 +437,6 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) | |||
425 | (~MC_TIMESTAMP_ENABLE)); | 437 | (~MC_TIMESTAMP_ENABLE)); |
426 | 438 | ||
427 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 439 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
428 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
429 | continue; | ||
430 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); | 440 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); |
431 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); | 441 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); |
432 | } | 442 | } |
@@ -440,8 +450,9 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) | |||
440 | #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C | 450 | #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C |
441 | static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) | 451 | static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) |
442 | { | 452 | { |
443 | void __iomem *csr_addr = handle->hal_ep_csr_addr_v + | 453 | void __iomem *csr_addr = |
444 | ESRAM_AUTO_INIT_CSR_OFFSET; | 454 | (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v + |
455 | ESRAM_AUTO_INIT_CSR_OFFSET); | ||
445 | unsigned int csr_val, times = 30; | 456 | unsigned int csr_val, times = 30; |
446 | 457 | ||
447 | csr_val = ADF_CSR_RD(csr_addr, 0); | 458 | csr_val = ADF_CSR_RD(csr_addr, 0); |
@@ -493,8 +504,6 @@ int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) | |||
493 | 504 | ||
494 | /* Set undefined power-up/reset states to reasonable default values */ | 505 | /* Set undefined power-up/reset states to reasonable default values */ |
495 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 506 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
496 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
497 | continue; | ||
498 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, | 507 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, |
499 | INIT_CTX_ENABLE_VALUE); | 508 | INIT_CTX_ENABLE_VALUE); |
500 | qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, | 509 | qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, |
@@ -598,25 +607,31 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, | |||
598 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); | 607 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); |
599 | } | 608 | } |
600 | 609 | ||
601 | static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | 610 | static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle) |
602 | { | 611 | { |
603 | unsigned char ae; | 612 | unsigned char ae; |
604 | unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; | ||
605 | int times = MAX_RETRY_TIMES; | ||
606 | unsigned int csr_val = 0; | ||
607 | unsigned short reg; | 613 | unsigned short reg; |
608 | unsigned int savctx = 0; | ||
609 | int ret = 0; | ||
610 | 614 | ||
611 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 615 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
612 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
613 | continue; | ||
614 | for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { | 616 | for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { |
615 | qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, | 617 | qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, |
616 | reg, 0); | 618 | reg, 0); |
617 | qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, | 619 | qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, |
618 | reg, 0); | 620 | reg, 0); |
619 | } | 621 | } |
622 | } | ||
623 | } | ||
624 | |||
625 | static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | ||
626 | { | ||
627 | unsigned char ae; | ||
628 | unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; | ||
629 | int times = MAX_RETRY_TIMES; | ||
630 | unsigned int csr_val = 0; | ||
631 | unsigned int savctx = 0; | ||
632 | int ret = 0; | ||
633 | |||
634 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
620 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); | 635 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); |
621 | csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); | 636 | csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); |
622 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); | 637 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); |
@@ -638,8 +653,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | |||
638 | qat_hal_enable_ctx(handle, ae, ctx_mask); | 653 | qat_hal_enable_ctx(handle, ae, ctx_mask); |
639 | } | 654 | } |
640 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 655 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
641 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | ||
642 | continue; | ||
643 | /* wait for AE to finish */ | 656 | /* wait for AE to finish */ |
644 | do { | 657 | do { |
645 | ret = qat_hal_wait_cycles(handle, ae, 20, 1); | 658 | ret = qat_hal_wait_cycles(handle, ae, 20, 1); |
@@ -667,10 +680,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | |||
667 | return 0; | 680 | return 0; |
668 | } | 681 | } |
669 | 682 | ||
670 | #define ICP_DH895XCC_AE_OFFSET 0x20000 | 683 | #define ICP_QAT_AE_OFFSET 0x20000 |
671 | #define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) | 684 | #define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000) |
672 | #define LOCAL_TO_XFER_REG_OFFSET 0x800 | 685 | #define LOCAL_TO_XFER_REG_OFFSET 0x800 |
673 | #define ICP_DH895XCC_EP_OFFSET 0x3a000 | 686 | #define ICP_QAT_EP_OFFSET 0x3a000 |
674 | int qat_hal_init(struct adf_accel_dev *accel_dev) | 687 | int qat_hal_init(struct adf_accel_dev *accel_dev) |
675 | { | 688 | { |
676 | unsigned char ae; | 689 | unsigned char ae; |
@@ -687,15 +700,22 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) | |||
687 | if (!handle) | 700 | if (!handle) |
688 | return -ENOMEM; | 701 | return -ENOMEM; |
689 | 702 | ||
690 | handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr + | ||
691 | ICP_DH895XCC_CAP_OFFSET; | ||
692 | handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr + | ||
693 | ICP_DH895XCC_AE_OFFSET; | ||
694 | handle->hal_ep_csr_addr_v = misc_bar->virt_addr + | ||
695 | ICP_DH895XCC_EP_OFFSET; | ||
696 | handle->hal_cap_ae_local_csr_addr_v = | ||
697 | handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; | ||
698 | handle->hal_sram_addr_v = sram_bar->virt_addr; | 703 | handle->hal_sram_addr_v = sram_bar->virt_addr; |
704 | handle->hal_cap_g_ctl_csr_addr_v = | ||
705 | (void __iomem *)((uintptr_t)misc_bar->virt_addr + | ||
706 | ICP_QAT_CAP_OFFSET); | ||
707 | handle->hal_cap_ae_xfer_csr_addr_v = | ||
708 | (void __iomem *)((uintptr_t)misc_bar->virt_addr + | ||
709 | ICP_QAT_AE_OFFSET); | ||
710 | handle->hal_ep_csr_addr_v = | ||
711 | (void __iomem *)((uintptr_t)misc_bar->virt_addr + | ||
712 | ICP_QAT_EP_OFFSET); | ||
713 | handle->hal_cap_ae_local_csr_addr_v = | ||
714 | (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + | ||
715 | LOCAL_TO_XFER_REG_OFFSET); | ||
716 | handle->pci_dev = pci_info->pci_dev; | ||
717 | handle->fw_auth = (handle->pci_dev->device == | ||
718 | ADF_DH895XCC_PCI_DEVICE_ID) ? false : true; | ||
699 | handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); | 719 | handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); |
700 | if (!handle->hal_handle) | 720 | if (!handle->hal_handle) |
701 | goto out_hal_handle; | 721 | goto out_hal_handle; |
@@ -723,14 +743,16 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) | |||
723 | dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n"); | 743 | dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n"); |
724 | goto out_err; | 744 | goto out_err; |
725 | } | 745 | } |
726 | if (qat_hal_clear_gpr(handle)) | 746 | qat_hal_clear_xfer(handle); |
727 | goto out_err; | 747 | if (!handle->fw_auth) { |
748 | if (qat_hal_clear_gpr(handle)) | ||
749 | goto out_err; | ||
750 | } | ||
751 | |||
728 | /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ | 752 | /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ |
729 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 753 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
730 | unsigned int csr_val = 0; | 754 | unsigned int csr_val = 0; |
731 | 755 | ||
732 | if (!(hw_data->ae_mask & (1 << ae))) | ||
733 | continue; | ||
734 | qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); | 756 | qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); |
735 | csr_val |= 0x1; | 757 | csr_val |= 0x1; |
736 | qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); | 758 | qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); |
@@ -756,15 +778,31 @@ void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) | |||
756 | void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | 778 | void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, |
757 | unsigned int ctx_mask) | 779 | unsigned int ctx_mask) |
758 | { | 780 | { |
759 | qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & | 781 | int retry = 0; |
782 | unsigned int fcu_sts = 0; | ||
783 | |||
784 | if (handle->fw_auth) { | ||
785 | SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START); | ||
786 | do { | ||
787 | msleep(FW_AUTH_WAIT_PERIOD); | ||
788 | fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); | ||
789 | if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1)) | ||
790 | return; | ||
791 | } while (retry++ < FW_AUTH_MAX_RETRY); | ||
792 | pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae, | ||
793 | fcu_sts); | ||
794 | } else { | ||
795 | qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & | ||
760 | ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); | 796 | ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); |
761 | qat_hal_enable_ctx(handle, ae, ctx_mask); | 797 | qat_hal_enable_ctx(handle, ae, ctx_mask); |
798 | } | ||
762 | } | 799 | } |
763 | 800 | ||
764 | void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, | 801 | void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, |
765 | unsigned int ctx_mask) | 802 | unsigned int ctx_mask) |
766 | { | 803 | { |
767 | qat_hal_disable_ctx(handle, ae, ctx_mask); | 804 | if (!handle->fw_auth) |
805 | qat_hal_disable_ctx(handle, ae, ctx_mask); | ||
768 | } | 806 | } |
769 | 807 | ||
770 | void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, | 808 | void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, |
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index c48f181e8941..25d15f19c2b3 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | #include <linux/ctype.h> | 48 | #include <linux/ctype.h> |
49 | #include <linux/kernel.h> | 49 | #include <linux/kernel.h> |
50 | 50 | #include <linux/delay.h> | |
51 | #include "adf_accel_devices.h" | 51 | #include "adf_accel_devices.h" |
52 | #include "adf_common_drv.h" | 52 | #include "adf_common_drv.h" |
53 | #include "icp_qat_uclo.h" | 53 | #include "icp_qat_uclo.h" |
@@ -119,10 +119,10 @@ static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, | |||
119 | { | 119 | { |
120 | if ((!str_table->table_len) || (str_offset > str_table->table_len)) | 120 | if ((!str_table->table_len) || (str_offset > str_table->table_len)) |
121 | return NULL; | 121 | return NULL; |
122 | return (char *)(((unsigned long)(str_table->strings)) + str_offset); | 122 | return (char *)(((uintptr_t)(str_table->strings)) + str_offset); |
123 | } | 123 | } |
124 | 124 | ||
125 | static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) | 125 | static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) |
126 | { | 126 | { |
127 | int maj = hdr->maj_ver & 0xff; | 127 | int maj = hdr->maj_ver & 0xff; |
128 | int min = hdr->min_ver & 0xff; | 128 | int min = hdr->min_ver & 0xff; |
@@ -139,6 +139,31 @@ static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) | |||
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | 141 | ||
142 | static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr) | ||
143 | { | ||
144 | int maj = suof_hdr->maj_ver & 0xff; | ||
145 | int min = suof_hdr->min_ver & 0xff; | ||
146 | |||
147 | if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { | ||
148 | pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); | ||
149 | return -EINVAL; | ||
150 | } | ||
151 | if (suof_hdr->fw_type != 0) { | ||
152 | pr_err("QAT: unsupported firmware type\n"); | ||
153 | return -EINVAL; | ||
154 | } | ||
155 | if (suof_hdr->num_chunks <= 0x1) { | ||
156 | pr_err("QAT: SUOF chunk amount is incorrect\n"); | ||
157 | return -EINVAL; | ||
158 | } | ||
159 | if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { | ||
160 | pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", | ||
161 | maj, min); | ||
162 | return -EINVAL; | ||
163 | } | ||
164 | return 0; | ||
165 | } | ||
166 | |||
142 | static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, | 167 | static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, |
143 | unsigned int addr, unsigned int *val, | 168 | unsigned int addr, unsigned int *val, |
144 | unsigned int num_in_bytes) | 169 | unsigned int num_in_bytes) |
@@ -275,7 +300,7 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle | |||
275 | unsigned int i, flag = 0; | 300 | unsigned int i, flag = 0; |
276 | 301 | ||
277 | mem_val_attr = | 302 | mem_val_attr = |
278 | (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + | 303 | (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + |
279 | sizeof(struct icp_qat_uof_initmem)); | 304 | sizeof(struct icp_qat_uof_initmem)); |
280 | 305 | ||
281 | init_header = *init_tab_base; | 306 | init_header = *init_tab_base; |
@@ -425,8 +450,8 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) | |||
425 | if (qat_uclo_init_ae_memory(handle, initmem)) | 450 | if (qat_uclo_init_ae_memory(handle, initmem)) |
426 | return -EINVAL; | 451 | return -EINVAL; |
427 | } | 452 | } |
428 | initmem = (struct icp_qat_uof_initmem *)((unsigned long)( | 453 | initmem = (struct icp_qat_uof_initmem *)((uintptr_t)( |
429 | (unsigned long)initmem + | 454 | (uintptr_t)initmem + |
430 | sizeof(struct icp_qat_uof_initmem)) + | 455 | sizeof(struct icp_qat_uof_initmem)) + |
431 | (sizeof(struct icp_qat_uof_memvar_attr) * | 456 | (sizeof(struct icp_qat_uof_memvar_attr) * |
432 | initmem->val_attr_num)); | 457 | initmem->val_attr_num)); |
@@ -454,7 +479,7 @@ static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, | |||
454 | int i; | 479 | int i; |
455 | struct icp_qat_uof_chunkhdr *chunk_hdr = | 480 | struct icp_qat_uof_chunkhdr *chunk_hdr = |
456 | (struct icp_qat_uof_chunkhdr *) | 481 | (struct icp_qat_uof_chunkhdr *) |
457 | ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); | 482 | ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); |
458 | 483 | ||
459 | for (i = 0; i < obj_hdr->num_chunks; i++) { | 484 | for (i = 0; i < obj_hdr->num_chunks; i++) { |
460 | if ((cur < (void *)&chunk_hdr[i]) && | 485 | if ((cur < (void *)&chunk_hdr[i]) && |
@@ -596,7 +621,7 @@ static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj | |||
596 | page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; | 621 | page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; |
597 | for (i = 0; i < uword_block_tab->entry_num; i++) | 622 | for (i = 0; i < uword_block_tab->entry_num; i++) |
598 | page->uwblock[i].micro_words = | 623 | page->uwblock[i].micro_words = |
599 | (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; | 624 | (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; |
600 | } | 625 | } |
601 | 626 | ||
602 | static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, | 627 | static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, |
@@ -697,7 +722,7 @@ qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, | |||
697 | memcpy(&str_table->table_len, obj_hdr->file_buff + | 722 | memcpy(&str_table->table_len, obj_hdr->file_buff + |
698 | chunk_hdr->offset, sizeof(str_table->table_len)); | 723 | chunk_hdr->offset, sizeof(str_table->table_len)); |
699 | hdr_size = (char *)&str_table->strings - (char *)str_table; | 724 | hdr_size = (char *)&str_table->strings - (char *)str_table; |
700 | str_table->strings = (unsigned long)obj_hdr->file_buff + | 725 | str_table->strings = (uintptr_t)obj_hdr->file_buff + |
701 | chunk_hdr->offset + hdr_size; | 726 | chunk_hdr->offset + hdr_size; |
702 | return str_table; | 727 | return str_table; |
703 | } | 728 | } |
@@ -721,13 +746,31 @@ qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, | |||
721 | } | 746 | } |
722 | } | 747 | } |
723 | 748 | ||
749 | static unsigned int | ||
750 | qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) | ||
751 | { | ||
752 | switch (handle->pci_dev->device) { | ||
753 | case ADF_DH895XCC_PCI_DEVICE_ID: | ||
754 | return ICP_QAT_AC_895XCC_DEV_TYPE; | ||
755 | case ADF_C62X_PCI_DEVICE_ID: | ||
756 | return ICP_QAT_AC_C62X_DEV_TYPE; | ||
757 | case ADF_C3XXX_PCI_DEVICE_ID: | ||
758 | return ICP_QAT_AC_C3XXX_DEV_TYPE; | ||
759 | default: | ||
760 | pr_err("QAT: unsupported device 0x%x\n", | ||
761 | handle->pci_dev->device); | ||
762 | return 0; | ||
763 | } | ||
764 | } | ||
765 | |||
724 | static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) | 766 | static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) |
725 | { | 767 | { |
726 | unsigned int maj_ver, prod_type = obj_handle->prod_type; | 768 | unsigned int maj_ver, prod_type = obj_handle->prod_type; |
727 | 769 | ||
728 | if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { | 770 | if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { |
729 | pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", | 771 | pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", |
730 | obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); | 772 | obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, |
773 | prod_type); | ||
731 | return -EINVAL; | 774 | return -EINVAL; |
732 | } | 775 | } |
733 | maj_ver = obj_handle->prod_rev & 0xff; | 776 | maj_ver = obj_handle->prod_rev & 0xff; |
@@ -932,7 +975,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) | |||
932 | obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) | 975 | obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) |
933 | obj_handle->obj_hdr->file_buff; | 976 | obj_handle->obj_hdr->file_buff; |
934 | obj_handle->uword_in_bytes = 6; | 977 | obj_handle->uword_in_bytes = 6; |
935 | obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; | 978 | obj_handle->prod_type = qat_uclo_get_dev_type(handle); |
936 | obj_handle->prod_rev = PID_MAJOR_REV | | 979 | obj_handle->prod_rev = PID_MAJOR_REV | |
937 | (PID_MINOR_REV & handle->hal_handle->revision_id); | 980 | (PID_MINOR_REV & handle->hal_handle->revision_id); |
938 | if (qat_uclo_check_uof_compat(obj_handle)) { | 981 | if (qat_uclo_check_uof_compat(obj_handle)) { |
@@ -969,23 +1012,435 @@ out_err: | |||
969 | return -EFAULT; | 1012 | return -EFAULT; |
970 | } | 1013 | } |
971 | 1014 | ||
972 | void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, | 1015 | static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, |
973 | void *addr_ptr, int mem_size) | 1016 | struct icp_qat_suof_filehdr *suof_ptr, |
1017 | int suof_size) | ||
974 | { | 1018 | { |
975 | qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4)); | 1019 | unsigned int check_sum = 0; |
1020 | unsigned int min_ver_offset = 0; | ||
1021 | struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; | ||
1022 | |||
1023 | suof_handle->file_id = ICP_QAT_SUOF_FID; | ||
1024 | suof_handle->suof_buf = (char *)suof_ptr; | ||
1025 | suof_handle->suof_size = suof_size; | ||
1026 | min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr, | ||
1027 | min_ver); | ||
1028 | check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver, | ||
1029 | min_ver_offset); | ||
1030 | if (check_sum != suof_ptr->check_sum) { | ||
1031 | pr_err("QAT: incorrect SUOF checksum\n"); | ||
1032 | return -EINVAL; | ||
1033 | } | ||
1034 | suof_handle->check_sum = suof_ptr->check_sum; | ||
1035 | suof_handle->min_ver = suof_ptr->min_ver; | ||
1036 | suof_handle->maj_ver = suof_ptr->maj_ver; | ||
1037 | suof_handle->fw_type = suof_ptr->fw_type; | ||
1038 | return 0; | ||
976 | } | 1039 | } |
977 | 1040 | ||
978 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | 1041 | static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle, |
979 | void *addr_ptr, int mem_size) | 1042 | struct icp_qat_suof_img_hdr *suof_img_hdr, |
1043 | struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) | ||
980 | { | 1044 | { |
981 | struct icp_qat_uof_filehdr *filehdr; | 1045 | struct icp_qat_simg_ae_mode *ae_mode; |
982 | struct icp_qat_uclo_objhandle *objhdl; | 1046 | struct icp_qat_suof_objhdr *suof_objhdr; |
1047 | |||
1048 | suof_img_hdr->simg_buf = (suof_handle->suof_buf + | ||
1049 | suof_chunk_hdr->offset + | ||
1050 | sizeof(*suof_objhdr)); | ||
1051 | suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t) | ||
1052 | (suof_handle->suof_buf + | ||
1053 | suof_chunk_hdr->offset))->img_length; | ||
1054 | |||
1055 | suof_img_hdr->css_header = suof_img_hdr->simg_buf; | ||
1056 | suof_img_hdr->css_key = (suof_img_hdr->css_header + | ||
1057 | sizeof(struct icp_qat_css_hdr)); | ||
1058 | suof_img_hdr->css_signature = suof_img_hdr->css_key + | ||
1059 | ICP_QAT_CSS_FWSK_MODULUS_LEN + | ||
1060 | ICP_QAT_CSS_FWSK_EXPONENT_LEN; | ||
1061 | suof_img_hdr->css_simg = suof_img_hdr->css_signature + | ||
1062 | ICP_QAT_CSS_SIGNATURE_LEN; | ||
1063 | |||
1064 | ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); | ||
1065 | suof_img_hdr->ae_mask = ae_mode->ae_mask; | ||
1066 | suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; | ||
1067 | suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; | ||
1068 | suof_img_hdr->fw_type = ae_mode->fw_type; | ||
1069 | } | ||
983 | 1070 | ||
984 | BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= | 1071 | static void |
985 | (sizeof(handle->hal_handle->ae_mask) * 8)); | 1072 | qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, |
1073 | struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) | ||
1074 | { | ||
1075 | char **sym_str = (char **)&suof_handle->sym_str; | ||
1076 | unsigned int *sym_size = &suof_handle->sym_size; | ||
1077 | struct icp_qat_suof_strtable *str_table_obj; | ||
1078 | |||
1079 | *sym_size = *(unsigned int *)(uintptr_t) | ||
1080 | (suof_chunk_hdr->offset + suof_handle->suof_buf); | ||
1081 | *sym_str = (char *)(uintptr_t) | ||
1082 | (suof_handle->suof_buf + suof_chunk_hdr->offset + | ||
1083 | sizeof(str_table_obj->tab_length)); | ||
1084 | } | ||
986 | 1085 | ||
987 | if (!handle || !addr_ptr || mem_size < 24) | 1086 | static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, |
1087 | struct icp_qat_suof_img_hdr *img_hdr) | ||
1088 | { | ||
1089 | struct icp_qat_simg_ae_mode *img_ae_mode = NULL; | ||
1090 | unsigned int prod_rev, maj_ver, prod_type; | ||
1091 | |||
1092 | prod_type = qat_uclo_get_dev_type(handle); | ||
1093 | img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg; | ||
1094 | prod_rev = PID_MAJOR_REV | | ||
1095 | (PID_MINOR_REV & handle->hal_handle->revision_id); | ||
1096 | if (img_ae_mode->dev_type != prod_type) { | ||
1097 | pr_err("QAT: incompatible product type %x\n", | ||
1098 | img_ae_mode->dev_type); | ||
988 | return -EINVAL; | 1099 | return -EINVAL; |
1100 | } | ||
1101 | maj_ver = prod_rev & 0xff; | ||
1102 | if ((maj_ver > img_ae_mode->devmax_ver) || | ||
1103 | (maj_ver < img_ae_mode->devmin_ver)) { | ||
1104 | pr_err("QAT: incompatible device majver 0x%x\n", maj_ver); | ||
1105 | return -EINVAL; | ||
1106 | } | ||
1107 | return 0; | ||
1108 | } | ||
1109 | |||
1110 | static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) | ||
1111 | { | ||
1112 | struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; | ||
1113 | |||
1114 | kfree(sobj_handle->img_table.simg_hdr); | ||
1115 | sobj_handle->img_table.simg_hdr = NULL; | ||
1116 | kfree(handle->sobj_handle); | ||
1117 | handle->sobj_handle = NULL; | ||
1118 | } | ||
1119 | |||
1120 | static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, | ||
1121 | unsigned int img_id, unsigned int num_simgs) | ||
1122 | { | ||
1123 | struct icp_qat_suof_img_hdr img_header; | ||
1124 | |||
1125 | if (img_id != num_simgs - 1) { | ||
1126 | memcpy(&img_header, &suof_img_hdr[num_simgs - 1], | ||
1127 | sizeof(*suof_img_hdr)); | ||
1128 | memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id], | ||
1129 | sizeof(*suof_img_hdr)); | ||
1130 | memcpy(&suof_img_hdr[img_id], &img_header, | ||
1131 | sizeof(*suof_img_hdr)); | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, | ||
1136 | struct icp_qat_suof_filehdr *suof_ptr, | ||
1137 | int suof_size) | ||
1138 | { | ||
1139 | struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; | ||
1140 | struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; | ||
1141 | struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; | ||
1142 | int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE; | ||
1143 | unsigned int i = 0; | ||
1144 | struct icp_qat_suof_img_hdr img_header; | ||
1145 | |||
1146 | if (!suof_ptr || (suof_size == 0)) { | ||
1147 | pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); | ||
1148 | return -EINVAL; | ||
1149 | } | ||
1150 | if (qat_uclo_check_suof_format(suof_ptr)) | ||
1151 | return -EINVAL; | ||
1152 | ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); | ||
1153 | if (ret) | ||
1154 | return ret; | ||
1155 | suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *) | ||
1156 | ((uintptr_t)suof_ptr + sizeof(*suof_ptr)); | ||
1157 | |||
1158 | qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); | ||
1159 | suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; | ||
1160 | |||
1161 | if (suof_handle->img_table.num_simgs != 0) { | ||
1162 | suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs * | ||
1163 | sizeof(img_header), GFP_KERNEL); | ||
1164 | if (!suof_img_hdr) | ||
1165 | return -ENOMEM; | ||
1166 | suof_handle->img_table.simg_hdr = suof_img_hdr; | ||
1167 | } | ||
1168 | |||
1169 | for (i = 0; i < suof_handle->img_table.num_simgs; i++) { | ||
1170 | qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i], | ||
1171 | &suof_chunk_hdr[1 + i]); | ||
1172 | ret = qat_uclo_check_simg_compat(handle, | ||
1173 | &suof_img_hdr[i]); | ||
1174 | if (ret) | ||
1175 | return ret; | ||
1176 | if ((suof_img_hdr[i].ae_mask & 0x1) != 0) | ||
1177 | ae0_img = i; | ||
1178 | } | ||
1179 | qat_uclo_tail_img(suof_img_hdr, ae0_img, | ||
1180 | suof_handle->img_table.num_simgs); | ||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low) | ||
1185 | #define BITS_IN_DWORD 32 | ||
1186 | |||
1187 | static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, | ||
1188 | struct icp_qat_fw_auth_desc *desc) | ||
1189 | { | ||
1190 | unsigned int fcu_sts, retry = 0; | ||
1191 | u64 bus_addr; | ||
1192 | |||
1193 | bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) | ||
1194 | - sizeof(struct icp_qat_auth_chunk); | ||
1195 | SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD)); | ||
1196 | SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr); | ||
1197 | SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH); | ||
1198 | |||
1199 | do { | ||
1200 | msleep(FW_AUTH_WAIT_PERIOD); | ||
1201 | fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); | ||
1202 | if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) | ||
1203 | goto auth_fail; | ||
1204 | if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) | ||
1205 | if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) | ||
1206 | return 0; | ||
1207 | } while (retry++ < FW_AUTH_MAX_RETRY); | ||
1208 | auth_fail: | ||
1209 | pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", | ||
1210 | fcu_sts & FCU_AUTH_STS_MASK, retry); | ||
1211 | return -EINVAL; | ||
1212 | } | ||
1213 | |||
1214 | static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, | ||
1215 | struct icp_firml_dram_desc *dram_desc, | ||
1216 | unsigned int size) | ||
1217 | { | ||
1218 | void *vptr; | ||
1219 | dma_addr_t ptr; | ||
1220 | |||
1221 | vptr = dma_alloc_coherent(&handle->pci_dev->dev, | ||
1222 | size, &ptr, GFP_KERNEL); | ||
1223 | if (!vptr) | ||
1224 | return -ENOMEM; | ||
1225 | dram_desc->dram_base_addr_v = vptr; | ||
1226 | dram_desc->dram_bus_addr = ptr; | ||
1227 | dram_desc->dram_size = size; | ||
1228 | return 0; | ||
1229 | } | ||
1230 | |||
1231 | static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, | ||
1232 | struct icp_firml_dram_desc *dram_desc) | ||
1233 | { | ||
1234 | dma_free_coherent(&handle->pci_dev->dev, | ||
1235 | (size_t)(dram_desc->dram_size), | ||
1236 | (dram_desc->dram_base_addr_v), | ||
1237 | dram_desc->dram_bus_addr); | ||
1238 | memset(dram_desc, 0, sizeof(*dram_desc)); | ||
1239 | } | ||
1240 | |||
1241 | static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, | ||
1242 | struct icp_qat_fw_auth_desc **desc) | ||
1243 | { | ||
1244 | struct icp_firml_dram_desc dram_desc; | ||
1245 | |||
1246 | dram_desc.dram_base_addr_v = *desc; | ||
1247 | dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *) | ||
1248 | (*desc))->chunk_bus_addr; | ||
1249 | dram_desc.dram_size = ((struct icp_qat_auth_chunk *) | ||
1250 | (*desc))->chunk_size; | ||
1251 | qat_uclo_simg_free(handle, &dram_desc); | ||
1252 | } | ||
1253 | |||
1254 | static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, | ||
1255 | char *image, unsigned int size, | ||
1256 | struct icp_qat_fw_auth_desc **desc) | ||
1257 | { | ||
1258 | struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; | ||
1259 | struct icp_qat_fw_auth_desc *auth_desc; | ||
1260 | struct icp_qat_auth_chunk *auth_chunk; | ||
1261 | u64 virt_addr, bus_addr, virt_base; | ||
1262 | unsigned int length, simg_offset = sizeof(*auth_chunk); | ||
1263 | struct icp_firml_dram_desc img_desc; | ||
1264 | |||
1265 | if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) { | ||
1266 | pr_err("QAT: error, input image size overflow %d\n", size); | ||
1267 | return -EINVAL; | ||
1268 | } | ||
1269 | length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? | ||
1270 | ICP_QAT_CSS_AE_SIMG_LEN + simg_offset : | ||
1271 | size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset; | ||
1272 | if (qat_uclo_simg_alloc(handle, &img_desc, length)) { | ||
1273 | pr_err("QAT: error, allocate continuous dram fail\n"); | ||
1274 | return -ENOMEM; | ||
1275 | } | ||
1276 | |||
1277 | auth_chunk = img_desc.dram_base_addr_v; | ||
1278 | auth_chunk->chunk_size = img_desc.dram_size; | ||
1279 | auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; | ||
1280 | virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset; | ||
1281 | bus_addr = img_desc.dram_bus_addr + simg_offset; | ||
1282 | auth_desc = img_desc.dram_base_addr_v; | ||
1283 | auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); | ||
1284 | auth_desc->css_hdr_low = (unsigned int)bus_addr; | ||
1285 | virt_addr = virt_base; | ||
1286 | |||
1287 | memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); | ||
1288 | /* pub key */ | ||
1289 | bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + | ||
1290 | sizeof(*css_hdr); | ||
1291 | virt_addr = virt_addr + sizeof(*css_hdr); | ||
1292 | |||
1293 | auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); | ||
1294 | auth_desc->fwsk_pub_low = (unsigned int)bus_addr; | ||
1295 | |||
1296 | memcpy((void *)(uintptr_t)virt_addr, | ||
1297 | (void *)(image + sizeof(*css_hdr)), | ||
1298 | ICP_QAT_CSS_FWSK_MODULUS_LEN); | ||
1299 | /* padding */ | ||
1300 | memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN), | ||
1301 | 0, ICP_QAT_CSS_FWSK_PAD_LEN); | ||
1302 | |||
1303 | /* exponent */ | ||
1304 | memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN + | ||
1305 | ICP_QAT_CSS_FWSK_PAD_LEN), | ||
1306 | (void *)(image + sizeof(*css_hdr) + | ||
1307 | ICP_QAT_CSS_FWSK_MODULUS_LEN), | ||
1308 | sizeof(unsigned int)); | ||
1309 | |||
1310 | /* signature */ | ||
1311 | bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, | ||
1312 | auth_desc->fwsk_pub_low) + | ||
1313 | ICP_QAT_CSS_FWSK_PUB_LEN; | ||
1314 | virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN; | ||
1315 | auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); | ||
1316 | auth_desc->signature_low = (unsigned int)bus_addr; | ||
1317 | |||
1318 | memcpy((void *)(uintptr_t)virt_addr, | ||
1319 | (void *)(image + sizeof(*css_hdr) + | ||
1320 | ICP_QAT_CSS_FWSK_MODULUS_LEN + | ||
1321 | ICP_QAT_CSS_FWSK_EXPONENT_LEN), | ||
1322 | ICP_QAT_CSS_SIGNATURE_LEN); | ||
1323 | |||
1324 | bus_addr = ADD_ADDR(auth_desc->signature_high, | ||
1325 | auth_desc->signature_low) + | ||
1326 | ICP_QAT_CSS_SIGNATURE_LEN; | ||
1327 | virt_addr += ICP_QAT_CSS_SIGNATURE_LEN; | ||
1328 | |||
1329 | auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); | ||
1330 | auth_desc->img_low = (unsigned int)bus_addr; | ||
1331 | auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET; | ||
1332 | memcpy((void *)(uintptr_t)virt_addr, | ||
1333 | (void *)(image + ICP_QAT_AE_IMG_OFFSET), | ||
1334 | auth_desc->img_len); | ||
1335 | virt_addr = virt_base; | ||
1336 | /* AE firmware */ | ||
1337 | if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == | ||
1338 | CSS_AE_FIRMWARE) { | ||
1339 | auth_desc->img_ae_mode_data_high = auth_desc->img_high; | ||
1340 | auth_desc->img_ae_mode_data_low = auth_desc->img_low; | ||
1341 | bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, | ||
1342 | auth_desc->img_ae_mode_data_low) + | ||
1343 | sizeof(struct icp_qat_simg_ae_mode); | ||
1344 | |||
1345 | auth_desc->img_ae_init_data_high = (unsigned int) | ||
1346 | (bus_addr >> BITS_IN_DWORD); | ||
1347 | auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; | ||
1348 | bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; | ||
1349 | auth_desc->img_ae_insts_high = (unsigned int) | ||
1350 | (bus_addr >> BITS_IN_DWORD); | ||
1351 | auth_desc->img_ae_insts_low = (unsigned int)bus_addr; | ||
1352 | } else { | ||
1353 | auth_desc->img_ae_insts_high = auth_desc->img_high; | ||
1354 | auth_desc->img_ae_insts_low = auth_desc->img_low; | ||
1355 | } | ||
1356 | *desc = auth_desc; | ||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, | ||
1361 | struct icp_qat_fw_auth_desc *desc) | ||
1362 | { | ||
1363 | unsigned int i; | ||
1364 | unsigned int fcu_sts; | ||
1365 | struct icp_qat_simg_ae_mode *virt_addr; | ||
1366 | unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS; | ||
1367 | |||
1368 | virt_addr = (void *)((uintptr_t)desc + | ||
1369 | sizeof(struct icp_qat_auth_chunk) + | ||
1370 | sizeof(struct icp_qat_css_hdr) + | ||
1371 | ICP_QAT_CSS_FWSK_PUB_LEN + | ||
1372 | ICP_QAT_CSS_SIGNATURE_LEN); | ||
1373 | for (i = 0; i < handle->hal_handle->ae_max_num; i++) { | ||
1374 | int retry = 0; | ||
1375 | |||
1376 | if (!((virt_addr->ae_mask >> i) & 0x1)) | ||
1377 | continue; | ||
1378 | if (qat_hal_check_ae_active(handle, i)) { | ||
1379 | pr_err("QAT: AE %d is active\n", i); | ||
1380 | return -EINVAL; | ||
1381 | } | ||
1382 | SET_CAP_CSR(handle, FCU_CONTROL, | ||
1383 | (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS))); | ||
1384 | |||
1385 | do { | ||
1386 | msleep(FW_AUTH_WAIT_PERIOD); | ||
1387 | fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); | ||
1388 | if (((fcu_sts & FCU_AUTH_STS_MASK) == | ||
1389 | FCU_STS_LOAD_DONE) && | ||
1390 | ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i))) | ||
1391 | break; | ||
1392 | } while (retry++ < FW_AUTH_MAX_RETRY); | ||
1393 | if (retry > FW_AUTH_MAX_RETRY) { | ||
1394 | pr_err("QAT: firmware load failed timeout %x\n", retry); | ||
1395 | return -EINVAL; | ||
1396 | } | ||
1397 | } | ||
1398 | return 0; | ||
1399 | } | ||
1400 | |||
1401 | static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, | ||
1402 | void *addr_ptr, int mem_size) | ||
1403 | { | ||
1404 | struct icp_qat_suof_handle *suof_handle; | ||
1405 | |||
1406 | suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL); | ||
1407 | if (!suof_handle) | ||
1408 | return -ENOMEM; | ||
1409 | handle->sobj_handle = suof_handle; | ||
1410 | if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { | ||
1411 | qat_uclo_del_suof(handle); | ||
1412 | pr_err("QAT: map SUOF failed\n"); | ||
1413 | return -EINVAL; | ||
1414 | } | ||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1418 | int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, | ||
1419 | void *addr_ptr, int mem_size) | ||
1420 | { | ||
1421 | struct icp_qat_fw_auth_desc *desc = NULL; | ||
1422 | int status = 0; | ||
1423 | |||
1424 | if (handle->fw_auth) { | ||
1425 | if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc)) | ||
1426 | status = qat_uclo_auth_fw(handle, desc); | ||
1427 | qat_uclo_ummap_auth_fw(handle, &desc); | ||
1428 | } else { | ||
1429 | if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) { | ||
1430 | pr_err("QAT: C3XXX doesn't support unsigned MMP\n"); | ||
1431 | return -EINVAL; | ||
1432 | } | ||
1433 | qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size); | ||
1434 | } | ||
1435 | return status; | ||
1436 | } | ||
1437 | |||
1438 | static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | ||
1439 | void *addr_ptr, int mem_size) | ||
1440 | { | ||
1441 | struct icp_qat_uof_filehdr *filehdr; | ||
1442 | struct icp_qat_uclo_objhandle *objhdl; | ||
1443 | |||
989 | objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); | 1444 | objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); |
990 | if (!objhdl) | 1445 | if (!objhdl) |
991 | return -ENOMEM; | 1446 | return -ENOMEM; |
@@ -993,7 +1448,7 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | |||
993 | if (!objhdl->obj_buf) | 1448 | if (!objhdl->obj_buf) |
994 | goto out_objbuf_err; | 1449 | goto out_objbuf_err; |
995 | filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; | 1450 | filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; |
996 | if (qat_uclo_check_format(filehdr)) | 1451 | if (qat_uclo_check_uof_format(filehdr)) |
997 | goto out_objhdr_err; | 1452 | goto out_objhdr_err; |
998 | objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, | 1453 | objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, |
999 | ICP_QAT_UOF_OBJS); | 1454 | ICP_QAT_UOF_OBJS); |
@@ -1016,11 +1471,27 @@ out_objbuf_err: | |||
1016 | return -ENOMEM; | 1471 | return -ENOMEM; |
1017 | } | 1472 | } |
1018 | 1473 | ||
1474 | int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, | ||
1475 | void *addr_ptr, int mem_size) | ||
1476 | { | ||
1477 | BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= | ||
1478 | (sizeof(handle->hal_handle->ae_mask) * 8)); | ||
1479 | |||
1480 | if (!handle || !addr_ptr || mem_size < 24) | ||
1481 | return -EINVAL; | ||
1482 | |||
1483 | return (handle->fw_auth) ? | ||
1484 | qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) : | ||
1485 | qat_uclo_map_uof_obj(handle, addr_ptr, mem_size); | ||
1486 | } | ||
1487 | |||
1019 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) | 1488 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) |
1020 | { | 1489 | { |
1021 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | 1490 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; |
1022 | unsigned int a; | 1491 | unsigned int a; |
1023 | 1492 | ||
1493 | if (handle->sobj_handle) | ||
1494 | qat_uclo_del_suof(handle); | ||
1024 | if (!obj_handle) | 1495 | if (!obj_handle) |
1025 | return; | 1496 | return; |
1026 | 1497 | ||
@@ -1055,7 +1526,7 @@ static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, | |||
1055 | encap_page->uwblock[i].words_num - 1) { | 1526 | encap_page->uwblock[i].words_num - 1) { |
1056 | raddr -= encap_page->uwblock[i].start_addr; | 1527 | raddr -= encap_page->uwblock[i].start_addr; |
1057 | raddr *= obj_handle->uword_in_bytes; | 1528 | raddr *= obj_handle->uword_in_bytes; |
1058 | memcpy(&uwrd, (void *)(((unsigned long) | 1529 | memcpy(&uwrd, (void *)(((uintptr_t) |
1059 | encap_page->uwblock[i].micro_words) + raddr), | 1530 | encap_page->uwblock[i].micro_words) + raddr), |
1060 | obj_handle->uword_in_bytes); | 1531 | obj_handle->uword_in_bytes); |
1061 | uwrd = uwrd & 0xbffffffffffull; | 1532 | uwrd = uwrd & 0xbffffffffffull; |
@@ -1147,7 +1618,33 @@ static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, | |||
1147 | } | 1618 | } |
1148 | } | 1619 | } |
1149 | 1620 | ||
1150 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) | 1621 | static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) |
1622 | { | ||
1623 | unsigned int i; | ||
1624 | struct icp_qat_fw_auth_desc *desc = NULL; | ||
1625 | struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; | ||
1626 | struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; | ||
1627 | |||
1628 | for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { | ||
1629 | if (qat_uclo_map_auth_fw(handle, | ||
1630 | (char *)simg_hdr[i].simg_buf, | ||
1631 | (unsigned int) | ||
1632 | (simg_hdr[i].simg_len), | ||
1633 | &desc)) | ||
1634 | goto wr_err; | ||
1635 | if (qat_uclo_auth_fw(handle, desc)) | ||
1636 | goto wr_err; | ||
1637 | if (qat_uclo_load_fw(handle, desc)) | ||
1638 | goto wr_err; | ||
1639 | qat_uclo_ummap_auth_fw(handle, &desc); | ||
1640 | } | ||
1641 | return 0; | ||
1642 | wr_err: | ||
1643 | qat_uclo_ummap_auth_fw(handle, &desc); | ||
1644 | return -EINVAL; | ||
1645 | } | ||
1646 | |||
1647 | static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) | ||
1151 | { | 1648 | { |
1152 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | 1649 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; |
1153 | unsigned int i; | 1650 | unsigned int i; |
@@ -1164,3 +1661,9 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) | |||
1164 | } | 1661 | } |
1165 | return 0; | 1662 | return 0; |
1166 | } | 1663 | } |
1664 | |||
1665 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) | ||
1666 | { | ||
1667 | return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) : | ||
1668 | qat_uclo_wr_uof_img(handle); | ||
1669 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile index 8c79c543740f..180a00ed7f89 100644 --- a/drivers/crypto/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile | |||
@@ -1,5 +1,3 @@ | |||
1 | ccflags-y := -I$(src)/../qat_common | 1 | ccflags-y := -I$(src)/../qat_common |
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o | 2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o |
3 | qat_dh895xcc-objs := adf_drv.o \ | 3 | qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o |
4 | adf_isr.o \ | ||
5 | adf_dh895xcc_hw_data.o | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index ff54257eced4..6e1d5e185526 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | |||
@@ -48,7 +48,6 @@ | |||
48 | #include <adf_pf2vf_msg.h> | 48 | #include <adf_pf2vf_msg.h> |
49 | #include <adf_common_drv.h> | 49 | #include <adf_common_drv.h> |
50 | #include "adf_dh895xcc_hw_data.h" | 50 | #include "adf_dh895xcc_hw_data.h" |
51 | #include "adf_drv.h" | ||
52 | 51 | ||
53 | /* Worker thread to service arbiter mappings based on dev SKUs */ | 52 | /* Worker thread to service arbiter mappings based on dev SKUs */ |
54 | static const uint32_t thrd_to_arb_map_sku4[] = { | 53 | static const uint32_t thrd_to_arb_map_sku4[] = { |
@@ -143,8 +142,8 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | |||
143 | return DEV_SKU_UNKNOWN; | 142 | return DEV_SKU_UNKNOWN; |
144 | } | 143 | } |
145 | 144 | ||
146 | void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | 145 | static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, |
147 | uint32_t const **arb_map_config) | 146 | u32 const **arb_map_config) |
148 | { | 147 | { |
149 | switch (accel_dev->accel_pci_dev.sku) { | 148 | switch (accel_dev->accel_pci_dev.sku) { |
150 | case DEV_SKU_1: | 149 | case DEV_SKU_1: |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 88dffb297346..092f7353ed23 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | |||
@@ -53,7 +53,6 @@ | |||
53 | #define ADF_DH895XCC_ETR_BAR 2 | 53 | #define ADF_DH895XCC_ETR_BAR 2 |
54 | #define ADF_DH895XCC_RX_RINGS_OFFSET 8 | 54 | #define ADF_DH895XCC_RX_RINGS_OFFSET 8 |
55 | #define ADF_DH895XCC_TX_RINGS_MASK 0xFF | 55 | #define ADF_DH895XCC_TX_RINGS_MASK 0xFF |
56 | #define ADF_DH895XCC_FUSECTL_OFFSET 0x40 | ||
57 | #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 | 56 | #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 |
58 | #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 | 57 | #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 |
59 | #define ADF_DH895XCC_FUSECTL_SKU_1 0x0 | 58 | #define ADF_DH895XCC_FUSECTL_SKU_1 0x0 |
@@ -65,7 +64,6 @@ | |||
65 | #define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13 | 64 | #define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13 |
66 | #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F | 65 | #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F |
67 | #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF | 66 | #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF |
68 | #define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C | ||
69 | #define ADF_DH895XCC_ETR_MAX_BANKS 32 | 67 | #define ADF_DH895XCC_ETR_MAX_BANKS 32 |
70 | #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) | 68 | #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) |
71 | #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) | 69 | #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) |
@@ -80,11 +78,12 @@ | |||
80 | #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) | 78 | #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) |
81 | #define ADF_DH895XCC_ERRSSMSH_EN BIT(3) | 79 | #define ADF_DH895XCC_ERRSSMSH_EN BIT(3) |
82 | 80 | ||
83 | #define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C) | ||
84 | #define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8) | ||
85 | #define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) | 81 | #define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) |
86 | #define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) | 82 | #define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) |
87 | /* FW names */ | 83 | /* FW names */ |
88 | #define ADF_DH895XCC_FW "qat_895xcc.bin" | 84 | #define ADF_DH895XCC_FW "qat_895xcc.bin" |
89 | #define ADF_DH895XCC_MMP "qat_mmp.bin" | 85 | #define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin" |
86 | |||
87 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | ||
88 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | ||
90 | #endif | 89 | #endif |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index f8dd14f232c8..a8c4b92a7cbd 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c | |||
@@ -60,11 +60,7 @@ | |||
60 | #include <adf_accel_devices.h> | 60 | #include <adf_accel_devices.h> |
61 | #include <adf_common_drv.h> | 61 | #include <adf_common_drv.h> |
62 | #include <adf_cfg.h> | 62 | #include <adf_cfg.h> |
63 | #include <adf_transport_access_macros.h> | ||
64 | #include "adf_dh895xcc_hw_data.h" | 63 | #include "adf_dh895xcc_hw_data.h" |
65 | #include "adf_drv.h" | ||
66 | |||
67 | static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME; | ||
68 | 64 | ||
69 | #define ADF_SYSTEM_DEVICE(device_id) \ | 65 | #define ADF_SYSTEM_DEVICE(device_id) \ |
70 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | 66 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} |
@@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev); | |||
80 | 76 | ||
81 | static struct pci_driver adf_driver = { | 77 | static struct pci_driver adf_driver = { |
82 | .id_table = adf_pci_tbl, | 78 | .id_table = adf_pci_tbl, |
83 | .name = adf_driver_name, | 79 | .name = ADF_DH895XCC_DEVICE_NAME, |
84 | .probe = adf_probe, | 80 | .probe = adf_probe, |
85 | .remove = adf_remove, | 81 | .remove = adf_remove, |
86 | .sriov_configure = adf_sriov_configure, | 82 | .sriov_configure = adf_sriov_configure, |
@@ -120,87 +116,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | |||
120 | adf_devmgr_rm_dev(accel_dev, NULL); | 116 | adf_devmgr_rm_dev(accel_dev, NULL); |
121 | } | 117 | } |
122 | 118 | ||
123 | static int adf_dev_configure(struct adf_accel_dev *accel_dev) | ||
124 | { | ||
125 | int cpus = num_online_cpus(); | ||
126 | int banks = GET_MAX_BANKS(accel_dev); | ||
127 | int instances = min(cpus, banks); | ||
128 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
129 | int i; | ||
130 | unsigned long val; | ||
131 | |||
132 | if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) | ||
133 | goto err; | ||
134 | if (adf_cfg_section_add(accel_dev, "Accelerator0")) | ||
135 | goto err; | ||
136 | for (i = 0; i < instances; i++) { | ||
137 | val = i; | ||
138 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); | ||
139 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
140 | key, (void *)&val, ADF_DEC)) | ||
141 | goto err; | ||
142 | |||
143 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, | ||
144 | i); | ||
145 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
146 | key, (void *)&val, ADF_DEC)) | ||
147 | goto err; | ||
148 | |||
149 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); | ||
150 | val = 128; | ||
151 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
152 | key, (void *)&val, ADF_DEC)) | ||
153 | goto err; | ||
154 | |||
155 | val = 512; | ||
156 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); | ||
157 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
158 | key, (void *)&val, ADF_DEC)) | ||
159 | goto err; | ||
160 | |||
161 | val = 0; | ||
162 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); | ||
163 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
164 | key, (void *)&val, ADF_DEC)) | ||
165 | goto err; | ||
166 | |||
167 | val = 2; | ||
168 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); | ||
169 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
170 | key, (void *)&val, ADF_DEC)) | ||
171 | goto err; | ||
172 | |||
173 | val = 8; | ||
174 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); | ||
175 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
176 | key, (void *)&val, ADF_DEC)) | ||
177 | goto err; | ||
178 | |||
179 | val = 10; | ||
180 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); | ||
181 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
182 | key, (void *)&val, ADF_DEC)) | ||
183 | goto err; | ||
184 | |||
185 | val = ADF_COALESCING_DEF_TIME; | ||
186 | snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); | ||
187 | if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", | ||
188 | key, (void *)&val, ADF_DEC)) | ||
189 | goto err; | ||
190 | } | ||
191 | |||
192 | val = i; | ||
193 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
194 | ADF_NUM_CY, (void *)&val, ADF_DEC)) | ||
195 | goto err; | ||
196 | |||
197 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
198 | return 0; | ||
199 | err: | ||
200 | dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
204 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 119 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
205 | { | 120 | { |
206 | struct adf_accel_dev *accel_dev; | 121 | struct adf_accel_dev *accel_dev; |
@@ -253,15 +168,9 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
253 | } | 168 | } |
254 | 169 | ||
255 | accel_dev->hw_device = hw_data; | 170 | accel_dev->hw_device = hw_data; |
256 | switch (ent->device) { | 171 | adf_init_hw_data_dh895xcc(accel_dev->hw_device); |
257 | case ADF_DH895XCC_PCI_DEVICE_ID: | ||
258 | adf_init_hw_data_dh895xcc(accel_dev->hw_device); | ||
259 | break; | ||
260 | default: | ||
261 | return -ENODEV; | ||
262 | } | ||
263 | pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); | 172 | pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); |
264 | pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, | 173 | pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, |
265 | &hw_data->fuses); | 174 | &hw_data->fuses); |
266 | 175 | ||
267 | /* Get Accelerators and Accelerators Engines masks */ | 176 | /* Get Accelerators and Accelerators Engines masks */ |
@@ -316,13 +225,13 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
316 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 225 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
317 | } | 226 | } |
318 | 227 | ||
319 | if (pci_request_regions(pdev, adf_driver_name)) { | 228 | if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) { |
320 | ret = -EFAULT; | 229 | ret = -EFAULT; |
321 | goto out_err_disable; | 230 | goto out_err_disable; |
322 | } | 231 | } |
323 | 232 | ||
324 | /* Read accelerator capabilities mask */ | 233 | /* Read accelerator capabilities mask */ |
325 | pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET, | 234 | pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, |
326 | &hw_data->accel_capabilities_mask); | 235 | &hw_data->accel_capabilities_mask); |
327 | 236 | ||
328 | /* Find and map all the device's BARS */ | 237 | /* Find and map all the device's BARS */ |
@@ -357,7 +266,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
357 | goto out_err_free_reg; | 266 | goto out_err_free_reg; |
358 | } | 267 | } |
359 | 268 | ||
360 | ret = adf_dev_configure(accel_dev); | 269 | ret = qat_crypto_dev_config(accel_dev); |
361 | if (ret) | 270 | if (ret) |
362 | goto out_err_free_reg; | 271 | goto out_err_free_reg; |
363 | 272 | ||
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile index 85399fcbbad4..5c3ccf8267eb 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile | |||
@@ -1,5 +1,3 @@ | |||
1 | ccflags-y := -I$(src)/../qat_common | 1 | ccflags-y := -I$(src)/../qat_common |
2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o | 2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o |
3 | qat_dh895xccvf-objs := adf_drv.o \ | 3 | qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o |
4 | adf_isr.o \ | ||
5 | adf_dh895xccvf_hw_data.o | ||
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index a9a27eff41fb..dc04ab68d24d 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c | |||
@@ -48,7 +48,6 @@ | |||
48 | #include <adf_pf2vf_msg.h> | 48 | #include <adf_pf2vf_msg.h> |
49 | #include <adf_common_drv.h> | 49 | #include <adf_common_drv.h> |
50 | #include "adf_dh895xccvf_hw_data.h" | 50 | #include "adf_dh895xccvf_hw_data.h" |
51 | #include "adf_drv.h" | ||
52 | 51 | ||
53 | static struct adf_hw_device_class dh895xcciov_class = { | 52 | static struct adf_hw_device_class dh895xcciov_class = { |
54 | .name = ADF_DH895XCCVF_DEVICE_NAME, | 53 | .name = ADF_DH895XCCVF_DEVICE_NAME, |
@@ -136,7 +135,6 @@ static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) | |||
136 | void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) | 135 | void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) |
137 | { | 136 | { |
138 | hw_data->dev_class = &dh895xcciov_class; | 137 | hw_data->dev_class = &dh895xcciov_class; |
139 | hw_data->instance_id = dh895xcciov_class.instances++; | ||
140 | hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS; | 138 | hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS; |
141 | hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS; | 139 | hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS; |
142 | hw_data->num_logical_accel = 1; | 140 | hw_data->num_logical_accel = 1; |
@@ -164,9 +162,12 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) | |||
164 | hw_data->enable_ints = adf_vf_void_noop; | 162 | hw_data->enable_ints = adf_vf_void_noop; |
165 | hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; | 163 | hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; |
166 | hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; | 164 | hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; |
165 | hw_data->dev_class->instances++; | ||
166 | adf_devmgr_update_class_index(hw_data); | ||
167 | } | 167 | } |
168 | 168 | ||
169 | void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) | 169 | void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) |
170 | { | 170 | { |
171 | hw_data->dev_class->instances--; | 171 | hw_data->dev_class->instances--; |
172 | adf_devmgr_update_class_index(hw_data); | ||
172 | } | 173 | } |
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h index 8f6babfef629..6ddc19bd4410 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h | |||
@@ -56,13 +56,9 @@ | |||
56 | #define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF | 56 | #define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF |
57 | #define ADF_DH895XCCIOV_ETR_BAR 0 | 57 | #define ADF_DH895XCCIOV_ETR_BAR 0 |
58 | #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1 | 58 | #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1 |
59 | |||
60 | #define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200 | 59 | #define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200 |
61 | #define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0) | ||
62 | |||
63 | #define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204 | ||
64 | #define ADF_DH895XCC_VINTSOU_BUN BIT(0) | ||
65 | #define ADF_DH895XCC_VINTSOU_PF2VF BIT(1) | ||
66 | |||
67 | #define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208 | 60 | #define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208 |
61 | |||
62 | void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); | ||
63 | void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); | ||
68 | #endif | 64 | #endif |
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 789426f21882..f8cc4bf0a50c 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c | |||
@@ -60,11 +60,7 @@ | |||
60 | #include <adf_accel_devices.h> | 60 | #include <adf_accel_devices.h> |
61 | #include <adf_common_drv.h> | 61 | #include <adf_common_drv.h> |
62 | #include <adf_cfg.h> | 62 | #include <adf_cfg.h> |
63 | #include <adf_transport_access_macros.h> | ||
64 | #include "adf_dh895xccvf_hw_data.h" | 63 | #include "adf_dh895xccvf_hw_data.h" |
65 | #include "adf_drv.h" | ||
66 | |||
67 | static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME; | ||
68 | 64 | ||
69 | #define ADF_SYSTEM_DEVICE(device_id) \ | 65 | #define ADF_SYSTEM_DEVICE(device_id) \ |
70 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | 66 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} |
@@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev); | |||
80 | 76 | ||
81 | static struct pci_driver adf_driver = { | 77 | static struct pci_driver adf_driver = { |
82 | .id_table = adf_pci_tbl, | 78 | .id_table = adf_pci_tbl, |
83 | .name = adf_driver_name, | 79 | .name = ADF_DH895XCCVF_DEVICE_NAME, |
84 | .probe = adf_probe, | 80 | .probe = adf_probe, |
85 | .remove = adf_remove, | 81 | .remove = adf_remove, |
86 | }; | 82 | }; |
@@ -121,83 +117,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | |||
121 | adf_devmgr_rm_dev(accel_dev, pf); | 117 | adf_devmgr_rm_dev(accel_dev, pf); |
122 | } | 118 | } |
123 | 119 | ||
124 | static int adf_dev_configure(struct adf_accel_dev *accel_dev) | ||
125 | { | ||
126 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
127 | unsigned long val, bank = 0; | ||
128 | |||
129 | if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) | ||
130 | goto err; | ||
131 | if (adf_cfg_section_add(accel_dev, "Accelerator0")) | ||
132 | goto err; | ||
133 | |||
134 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0); | ||
135 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, | ||
136 | (void *)&bank, ADF_DEC)) | ||
137 | goto err; | ||
138 | |||
139 | val = bank; | ||
140 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0); | ||
141 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, | ||
142 | (void *)&val, ADF_DEC)) | ||
143 | goto err; | ||
144 | |||
145 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0); | ||
146 | |||
147 | val = 128; | ||
148 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, | ||
149 | (void *)&val, ADF_DEC)) | ||
150 | goto err; | ||
151 | |||
152 | val = 512; | ||
153 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0); | ||
154 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
155 | key, (void *)&val, ADF_DEC)) | ||
156 | goto err; | ||
157 | |||
158 | val = 0; | ||
159 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0); | ||
160 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
161 | key, (void *)&val, ADF_DEC)) | ||
162 | goto err; | ||
163 | |||
164 | val = 2; | ||
165 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0); | ||
166 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
167 | key, (void *)&val, ADF_DEC)) | ||
168 | goto err; | ||
169 | |||
170 | val = 8; | ||
171 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0); | ||
172 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
173 | key, (void *)&val, ADF_DEC)) | ||
174 | goto err; | ||
175 | |||
176 | val = 10; | ||
177 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0); | ||
178 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
179 | key, (void *)&val, ADF_DEC)) | ||
180 | goto err; | ||
181 | |||
182 | val = ADF_COALESCING_DEF_TIME; | ||
183 | snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, | ||
184 | (int)bank); | ||
185 | if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", | ||
186 | key, (void *)&val, ADF_DEC)) | ||
187 | goto err; | ||
188 | |||
189 | val = 1; | ||
190 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
191 | ADF_NUM_CY, (void *)&val, ADF_DEC)) | ||
192 | goto err; | ||
193 | |||
194 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
195 | return 0; | ||
196 | err: | ||
197 | dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n"); | ||
198 | return -EINVAL; | ||
199 | } | ||
200 | |||
201 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 120 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
202 | { | 121 | { |
203 | struct adf_accel_dev *accel_dev; | 122 | struct adf_accel_dev *accel_dev; |
@@ -243,14 +162,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
243 | goto out_err; | 162 | goto out_err; |
244 | } | 163 | } |
245 | accel_dev->hw_device = hw_data; | 164 | accel_dev->hw_device = hw_data; |
246 | switch (ent->device) { | 165 | adf_init_hw_data_dh895xcciov(accel_dev->hw_device); |
247 | case ADF_DH895XCCIOV_PCI_DEVICE_ID: | ||
248 | adf_init_hw_data_dh895xcciov(accel_dev->hw_device); | ||
249 | break; | ||
250 | default: | ||
251 | ret = -ENODEV; | ||
252 | goto out_err; | ||
253 | } | ||
254 | 166 | ||
255 | /* Get Accelerators and Accelerators Engines masks */ | 167 | /* Get Accelerators and Accelerators Engines masks */ |
256 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | 168 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); |
@@ -295,7 +207,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
295 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 207 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
296 | } | 208 | } |
297 | 209 | ||
298 | if (pci_request_regions(pdev, adf_driver_name)) { | 210 | if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) { |
299 | ret = -EFAULT; | 211 | ret = -EFAULT; |
300 | goto out_err_disable; | 212 | goto out_err_disable; |
301 | } | 213 | } |
@@ -322,7 +234,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
322 | /* Completion for VF2PF request/response message exchange */ | 234 | /* Completion for VF2PF request/response message exchange */ |
323 | init_completion(&accel_dev->vf.iov_msg_completion); | 235 | init_completion(&accel_dev->vf.iov_msg_completion); |
324 | 236 | ||
325 | ret = adf_dev_configure(accel_dev); | 237 | ret = qat_crypto_dev_config(accel_dev); |
326 | if (ret) | 238 | if (ret) |
327 | goto out_err_free_reg; | 239 | goto out_err_free_reg; |
328 | 240 | ||
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index 2c0d63d48747..dbcbbe242bd6 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c | |||
@@ -83,6 +83,14 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) | |||
83 | rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); | 83 | rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); |
84 | else | 84 | else |
85 | rctx->dst_nents = rctx->src_nents; | 85 | rctx->dst_nents = rctx->src_nents; |
86 | if (rctx->src_nents < 0) { | ||
87 | dev_err(qce->dev, "Invalid numbers of src SG.\n"); | ||
88 | return rctx->src_nents; | ||
89 | } | ||
90 | if (rctx->dst_nents < 0) { | ||
91 | dev_err(qce->dev, "Invalid numbers of dst SG.\n"); | ||
92 | return -rctx->dst_nents; | ||
93 | } | ||
86 | 94 | ||
87 | rctx->dst_nents += 1; | 95 | rctx->dst_nents += 1; |
88 | 96 | ||
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 0c9973ec80eb..47e114ac09d0 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c | |||
@@ -92,6 +92,11 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); | 94 | rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); |
95 | if (rctx->src_nents < 0) { | ||
96 | dev_err(qce->dev, "Invalid numbers of src SG.\n"); | ||
97 | return rctx->src_nents; | ||
98 | } | ||
99 | |||
95 | ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); | 100 | ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
96 | if (ret < 0) | 101 | if (ret < 0) |
97 | return ret; | 102 | return ret; |
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile new file mode 100644 index 000000000000..7051c6c715f3 --- /dev/null +++ b/drivers/crypto/rockchip/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o | ||
2 | rk_crypto-objs := rk3288_crypto.o \ | ||
3 | rk3288_crypto_ablkcipher.o \ | ||
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c new file mode 100644 index 000000000000..da9c73dce4af --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto.c | |||
@@ -0,0 +1,394 @@ | |||
1 | /* | ||
2 | * Crypto acceleration support for Rockchip RK3288 | ||
3 | * | ||
4 | * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd | ||
5 | * | ||
6 | * Author: Zain Wang <zain.wang@rock-chips.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * Some ideas are from marvell-cesa.c and s5p-sss.c driver. | ||
13 | */ | ||
14 | |||
15 | #include "rk3288_crypto.h" | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/clk.h> | ||
20 | #include <linux/crypto.h> | ||
21 | #include <linux/reset.h> | ||
22 | |||
23 | static int rk_crypto_enable_clk(struct rk_crypto_info *dev) | ||
24 | { | ||
25 | int err; | ||
26 | |||
27 | err = clk_prepare_enable(dev->sclk); | ||
28 | if (err) { | ||
29 | dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", | ||
30 | __func__, __LINE__); | ||
31 | goto err_return; | ||
32 | } | ||
33 | err = clk_prepare_enable(dev->aclk); | ||
34 | if (err) { | ||
35 | dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", | ||
36 | __func__, __LINE__); | ||
37 | goto err_aclk; | ||
38 | } | ||
39 | err = clk_prepare_enable(dev->hclk); | ||
40 | if (err) { | ||
41 | dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", | ||
42 | __func__, __LINE__); | ||
43 | goto err_hclk; | ||
44 | } | ||
45 | err = clk_prepare_enable(dev->dmaclk); | ||
46 | if (err) { | ||
47 | dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", | ||
48 | __func__, __LINE__); | ||
49 | goto err_dmaclk; | ||
50 | } | ||
51 | return err; | ||
52 | err_dmaclk: | ||
53 | clk_disable_unprepare(dev->hclk); | ||
54 | err_hclk: | ||
55 | clk_disable_unprepare(dev->aclk); | ||
56 | err_aclk: | ||
57 | clk_disable_unprepare(dev->sclk); | ||
58 | err_return: | ||
59 | return err; | ||
60 | } | ||
61 | |||
62 | static void rk_crypto_disable_clk(struct rk_crypto_info *dev) | ||
63 | { | ||
64 | clk_disable_unprepare(dev->dmaclk); | ||
65 | clk_disable_unprepare(dev->hclk); | ||
66 | clk_disable_unprepare(dev->aclk); | ||
67 | clk_disable_unprepare(dev->sclk); | ||
68 | } | ||
69 | |||
70 | static int check_alignment(struct scatterlist *sg_src, | ||
71 | struct scatterlist *sg_dst, | ||
72 | int align_mask) | ||
73 | { | ||
74 | int in, out, align; | ||
75 | |||
76 | in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && | ||
77 | IS_ALIGNED((uint32_t)sg_src->length, align_mask); | ||
78 | if (!sg_dst) | ||
79 | return in; | ||
80 | out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && | ||
81 | IS_ALIGNED((uint32_t)sg_dst->length, align_mask); | ||
82 | align = in && out; | ||
83 | |||
84 | return (align && (sg_src->length == sg_dst->length)); | ||
85 | } | ||
86 | |||
87 | static int rk_load_data(struct rk_crypto_info *dev, | ||
88 | struct scatterlist *sg_src, | ||
89 | struct scatterlist *sg_dst) | ||
90 | { | ||
91 | unsigned int count; | ||
92 | |||
93 | dev->aligned = dev->aligned ? | ||
94 | check_alignment(sg_src, sg_dst, dev->align_size) : | ||
95 | dev->aligned; | ||
96 | if (dev->aligned) { | ||
97 | count = min(dev->left_bytes, sg_src->length); | ||
98 | dev->left_bytes -= count; | ||
99 | |||
100 | if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { | ||
101 | dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", | ||
102 | __func__, __LINE__); | ||
103 | return -EINVAL; | ||
104 | } | ||
105 | dev->addr_in = sg_dma_address(sg_src); | ||
106 | |||
107 | if (sg_dst) { | ||
108 | if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { | ||
109 | dev_err(dev->dev, | ||
110 | "[%s:%d] dma_map_sg(dst) error\n", | ||
111 | __func__, __LINE__); | ||
112 | dma_unmap_sg(dev->dev, sg_src, 1, | ||
113 | DMA_TO_DEVICE); | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | dev->addr_out = sg_dma_address(sg_dst); | ||
117 | } | ||
118 | } else { | ||
119 | count = (dev->left_bytes > PAGE_SIZE) ? | ||
120 | PAGE_SIZE : dev->left_bytes; | ||
121 | |||
122 | if (!sg_pcopy_to_buffer(dev->first, dev->nents, | ||
123 | dev->addr_vir, count, | ||
124 | dev->total - dev->left_bytes)) { | ||
125 | dev_err(dev->dev, "[%s:%d] pcopy err\n", | ||
126 | __func__, __LINE__); | ||
127 | return -EINVAL; | ||
128 | } | ||
129 | dev->left_bytes -= count; | ||
130 | sg_init_one(&dev->sg_tmp, dev->addr_vir, count); | ||
131 | if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { | ||
132 | dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", | ||
133 | __func__, __LINE__); | ||
134 | return -ENOMEM; | ||
135 | } | ||
136 | dev->addr_in = sg_dma_address(&dev->sg_tmp); | ||
137 | |||
138 | if (sg_dst) { | ||
139 | if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, | ||
140 | DMA_FROM_DEVICE)) { | ||
141 | dev_err(dev->dev, | ||
142 | "[%s:%d] dma_map_sg(sg_tmp) error\n", | ||
143 | __func__, __LINE__); | ||
144 | dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, | ||
145 | DMA_TO_DEVICE); | ||
146 | return -ENOMEM; | ||
147 | } | ||
148 | dev->addr_out = sg_dma_address(&dev->sg_tmp); | ||
149 | } | ||
150 | } | ||
151 | dev->count = count; | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static void rk_unload_data(struct rk_crypto_info *dev) | ||
156 | { | ||
157 | struct scatterlist *sg_in, *sg_out; | ||
158 | |||
159 | sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; | ||
160 | dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); | ||
161 | |||
162 | if (dev->sg_dst) { | ||
163 | sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; | ||
164 | dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); | ||
165 | } | ||
166 | } | ||
167 | |||
168 | static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) | ||
169 | { | ||
170 | struct rk_crypto_info *dev = platform_get_drvdata(dev_id); | ||
171 | u32 interrupt_status; | ||
172 | int err = 0; | ||
173 | |||
174 | spin_lock(&dev->lock); | ||
175 | interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); | ||
176 | CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); | ||
177 | if (interrupt_status & 0x0a) { | ||
178 | dev_warn(dev->dev, "DMA Error\n"); | ||
179 | err = -EFAULT; | ||
180 | } else if (interrupt_status & 0x05) { | ||
181 | err = dev->update(dev); | ||
182 | } | ||
183 | if (err) | ||
184 | dev->complete(dev, err); | ||
185 | spin_unlock(&dev->lock); | ||
186 | return IRQ_HANDLED; | ||
187 | } | ||
188 | |||
189 | static void rk_crypto_tasklet_cb(unsigned long data) | ||
190 | { | ||
191 | struct rk_crypto_info *dev = (struct rk_crypto_info *)data; | ||
192 | struct crypto_async_request *async_req, *backlog; | ||
193 | unsigned long flags; | ||
194 | int err = 0; | ||
195 | |||
196 | spin_lock_irqsave(&dev->lock, flags); | ||
197 | backlog = crypto_get_backlog(&dev->queue); | ||
198 | async_req = crypto_dequeue_request(&dev->queue); | ||
199 | spin_unlock_irqrestore(&dev->lock, flags); | ||
200 | if (!async_req) { | ||
201 | dev_err(dev->dev, "async_req is NULL !!\n"); | ||
202 | return; | ||
203 | } | ||
204 | if (backlog) { | ||
205 | backlog->complete(backlog, -EINPROGRESS); | ||
206 | backlog = NULL; | ||
207 | } | ||
208 | |||
209 | if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) | ||
210 | dev->ablk_req = ablkcipher_request_cast(async_req); | ||
211 | err = dev->start(dev); | ||
212 | if (err) | ||
213 | dev->complete(dev, err); | ||
214 | } | ||
215 | |||
216 | static struct rk_crypto_tmp *rk_cipher_algs[] = { | ||
217 | &rk_ecb_aes_alg, | ||
218 | &rk_cbc_aes_alg, | ||
219 | &rk_ecb_des_alg, | ||
220 | &rk_cbc_des_alg, | ||
221 | &rk_ecb_des3_ede_alg, | ||
222 | &rk_cbc_des3_ede_alg, | ||
223 | }; | ||
224 | |||
225 | static int rk_crypto_register(struct rk_crypto_info *crypto_info) | ||
226 | { | ||
227 | unsigned int i, k; | ||
228 | int err = 0; | ||
229 | |||
230 | for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { | ||
231 | rk_cipher_algs[i]->dev = crypto_info; | ||
232 | err = crypto_register_alg(&rk_cipher_algs[i]->alg); | ||
233 | if (err) | ||
234 | goto err_cipher_algs; | ||
235 | } | ||
236 | return 0; | ||
237 | |||
238 | err_cipher_algs: | ||
239 | for (k = 0; k < i; k++) | ||
240 | crypto_unregister_alg(&rk_cipher_algs[k]->alg); | ||
241 | return err; | ||
242 | } | ||
243 | |||
244 | static void rk_crypto_unregister(void) | ||
245 | { | ||
246 | unsigned int i; | ||
247 | |||
248 | for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) | ||
249 | crypto_unregister_alg(&rk_cipher_algs[i]->alg); | ||
250 | } | ||
251 | |||
252 | static void rk_crypto_action(void *data) | ||
253 | { | ||
254 | struct rk_crypto_info *crypto_info = data; | ||
255 | |||
256 | reset_control_assert(crypto_info->rst); | ||
257 | } | ||
258 | |||
259 | static const struct of_device_id crypto_of_id_table[] = { | ||
260 | { .compatible = "rockchip,rk3288-crypto" }, | ||
261 | {} | ||
262 | }; | ||
263 | MODULE_DEVICE_TABLE(of, crypto_of_id_table); | ||
264 | |||
265 | static int rk_crypto_probe(struct platform_device *pdev) | ||
266 | { | ||
267 | struct resource *res; | ||
268 | struct device *dev = &pdev->dev; | ||
269 | struct rk_crypto_info *crypto_info; | ||
270 | int err = 0; | ||
271 | |||
272 | crypto_info = devm_kzalloc(&pdev->dev, | ||
273 | sizeof(*crypto_info), GFP_KERNEL); | ||
274 | if (!crypto_info) { | ||
275 | err = -ENOMEM; | ||
276 | goto err_crypto; | ||
277 | } | ||
278 | |||
279 | crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); | ||
280 | if (IS_ERR(crypto_info->rst)) { | ||
281 | err = PTR_ERR(crypto_info->rst); | ||
282 | goto err_crypto; | ||
283 | } | ||
284 | |||
285 | reset_control_assert(crypto_info->rst); | ||
286 | usleep_range(10, 20); | ||
287 | reset_control_deassert(crypto_info->rst); | ||
288 | |||
289 | err = devm_add_action(dev, rk_crypto_action, crypto_info); | ||
290 | if (err) { | ||
291 | reset_control_assert(crypto_info->rst); | ||
292 | goto err_crypto; | ||
293 | } | ||
294 | |||
295 | spin_lock_init(&crypto_info->lock); | ||
296 | |||
297 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
298 | crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); | ||
299 | if (IS_ERR(crypto_info->reg)) { | ||
300 | err = PTR_ERR(crypto_info->reg); | ||
301 | goto err_crypto; | ||
302 | } | ||
303 | |||
304 | crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); | ||
305 | if (IS_ERR(crypto_info->aclk)) { | ||
306 | err = PTR_ERR(crypto_info->aclk); | ||
307 | goto err_crypto; | ||
308 | } | ||
309 | |||
310 | crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); | ||
311 | if (IS_ERR(crypto_info->hclk)) { | ||
312 | err = PTR_ERR(crypto_info->hclk); | ||
313 | goto err_crypto; | ||
314 | } | ||
315 | |||
316 | crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); | ||
317 | if (IS_ERR(crypto_info->sclk)) { | ||
318 | err = PTR_ERR(crypto_info->sclk); | ||
319 | goto err_crypto; | ||
320 | } | ||
321 | |||
322 | crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); | ||
323 | if (IS_ERR(crypto_info->dmaclk)) { | ||
324 | err = PTR_ERR(crypto_info->dmaclk); | ||
325 | goto err_crypto; | ||
326 | } | ||
327 | |||
328 | crypto_info->irq = platform_get_irq(pdev, 0); | ||
329 | if (crypto_info->irq < 0) { | ||
330 | dev_warn(crypto_info->dev, | ||
331 | "control Interrupt is not available.\n"); | ||
332 | err = crypto_info->irq; | ||
333 | goto err_crypto; | ||
334 | } | ||
335 | |||
336 | err = devm_request_irq(&pdev->dev, crypto_info->irq, | ||
337 | rk_crypto_irq_handle, IRQF_SHARED, | ||
338 | "rk-crypto", pdev); | ||
339 | |||
340 | if (err) { | ||
341 | dev_err(crypto_info->dev, "irq request failed.\n"); | ||
342 | goto err_crypto; | ||
343 | } | ||
344 | |||
345 | crypto_info->dev = &pdev->dev; | ||
346 | platform_set_drvdata(pdev, crypto_info); | ||
347 | |||
348 | tasklet_init(&crypto_info->crypto_tasklet, | ||
349 | rk_crypto_tasklet_cb, (unsigned long)crypto_info); | ||
350 | crypto_init_queue(&crypto_info->queue, 50); | ||
351 | |||
352 | crypto_info->enable_clk = rk_crypto_enable_clk; | ||
353 | crypto_info->disable_clk = rk_crypto_disable_clk; | ||
354 | crypto_info->load_data = rk_load_data; | ||
355 | crypto_info->unload_data = rk_unload_data; | ||
356 | |||
357 | err = rk_crypto_register(crypto_info); | ||
358 | if (err) { | ||
359 | dev_err(dev, "err in register alg"); | ||
360 | goto err_register_alg; | ||
361 | } | ||
362 | |||
363 | dev_info(dev, "Crypto Accelerator successfully registered\n"); | ||
364 | return 0; | ||
365 | |||
366 | err_register_alg: | ||
367 | tasklet_kill(&crypto_info->crypto_tasklet); | ||
368 | err_crypto: | ||
369 | return err; | ||
370 | } | ||
371 | |||
372 | static int rk_crypto_remove(struct platform_device *pdev) | ||
373 | { | ||
374 | struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); | ||
375 | |||
376 | rk_crypto_unregister(); | ||
377 | tasklet_kill(&crypto_tmp->crypto_tasklet); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static struct platform_driver crypto_driver = { | ||
382 | .probe = rk_crypto_probe, | ||
383 | .remove = rk_crypto_remove, | ||
384 | .driver = { | ||
385 | .name = "rk3288-crypto", | ||
386 | .of_match_table = crypto_of_id_table, | ||
387 | }, | ||
388 | }; | ||
389 | |||
390 | module_platform_driver(crypto_driver); | ||
391 | |||
392 | MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>"); | ||
393 | MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); | ||
394 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h new file mode 100644 index 000000000000..e499c2c6c903 --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto.h | |||
@@ -0,0 +1,216 @@ | |||
1 | #ifndef __RK3288_CRYPTO_H__ | ||
2 | #define __RK3288_CRYPTO_H__ | ||
3 | |||
4 | #include <crypto/aes.h> | ||
5 | #include <crypto/des.h> | ||
6 | #include <crypto/algapi.h> | ||
7 | #include <linux/interrupt.h> | ||
8 | #include <linux/delay.h> | ||
9 | |||
10 | #define _SBF(v, f) ((v) << (f)) | ||
11 | |||
12 | /* Crypto control registers*/ | ||
13 | #define RK_CRYPTO_INTSTS 0x0000 | ||
14 | #define RK_CRYPTO_PKA_DONE_INT BIT(5) | ||
15 | #define RK_CRYPTO_HASH_DONE_INT BIT(4) | ||
16 | #define RK_CRYPTO_HRDMA_ERR_INT BIT(3) | ||
17 | #define RK_CRYPTO_HRDMA_DONE_INT BIT(2) | ||
18 | #define RK_CRYPTO_BCDMA_ERR_INT BIT(1) | ||
19 | #define RK_CRYPTO_BCDMA_DONE_INT BIT(0) | ||
20 | |||
21 | #define RK_CRYPTO_INTENA 0x0004 | ||
22 | #define RK_CRYPTO_PKA_DONE_ENA BIT(5) | ||
23 | #define RK_CRYPTO_HASH_DONE_ENA BIT(4) | ||
24 | #define RK_CRYPTO_HRDMA_ERR_ENA BIT(3) | ||
25 | #define RK_CRYPTO_HRDMA_DONE_ENA BIT(2) | ||
26 | #define RK_CRYPTO_BCDMA_ERR_ENA BIT(1) | ||
27 | #define RK_CRYPTO_BCDMA_DONE_ENA BIT(0) | ||
28 | |||
29 | #define RK_CRYPTO_CTRL 0x0008 | ||
30 | #define RK_CRYPTO_WRITE_MASK _SBF(0xFFFF, 16) | ||
31 | #define RK_CRYPTO_TRNG_FLUSH BIT(9) | ||
32 | #define RK_CRYPTO_TRNG_START BIT(8) | ||
33 | #define RK_CRYPTO_PKA_FLUSH BIT(7) | ||
34 | #define RK_CRYPTO_HASH_FLUSH BIT(6) | ||
35 | #define RK_CRYPTO_BLOCK_FLUSH BIT(5) | ||
36 | #define RK_CRYPTO_PKA_START BIT(4) | ||
37 | #define RK_CRYPTO_HASH_START BIT(3) | ||
38 | #define RK_CRYPTO_BLOCK_START BIT(2) | ||
39 | #define RK_CRYPTO_TDES_START BIT(1) | ||
40 | #define RK_CRYPTO_AES_START BIT(0) | ||
41 | |||
42 | #define RK_CRYPTO_CONF 0x000c | ||
43 | /* HASH Receive DMA Address Mode: fix | increment */ | ||
44 | #define RK_CRYPTO_HR_ADDR_MODE BIT(8) | ||
45 | /* Block Transmit DMA Address Mode: fix | increment */ | ||
46 | #define RK_CRYPTO_BT_ADDR_MODE BIT(7) | ||
47 | /* Block Receive DMA Address Mode: fix | increment */ | ||
48 | #define RK_CRYPTO_BR_ADDR_MODE BIT(6) | ||
49 | #define RK_CRYPTO_BYTESWAP_HRFIFO BIT(5) | ||
50 | #define RK_CRYPTO_BYTESWAP_BTFIFO BIT(4) | ||
51 | #define RK_CRYPTO_BYTESWAP_BRFIFO BIT(3) | ||
52 | /* AES = 0 OR DES = 1 */ | ||
53 | #define RK_CRYPTO_DESSEL BIT(2) | ||
54 | #define RK_CYYPTO_HASHINSEL_INDEPENDENT_SOURCE _SBF(0x00, 0) | ||
55 | #define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_INPUT _SBF(0x01, 0) | ||
56 | #define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_OUTPUT _SBF(0x02, 0) | ||
57 | |||
58 | /* Block Receiving DMA Start Address Register */ | ||
59 | #define RK_CRYPTO_BRDMAS 0x0010 | ||
60 | /* Block Transmitting DMA Start Address Register */ | ||
61 | #define RK_CRYPTO_BTDMAS 0x0014 | ||
62 | /* Block Receiving DMA Length Register */ | ||
63 | #define RK_CRYPTO_BRDMAL 0x0018 | ||
64 | /* Hash Receiving DMA Start Address Register */ | ||
65 | #define RK_CRYPTO_HRDMAS 0x001c | ||
66 | /* Hash Receiving DMA Length Register */ | ||
67 | #define RK_CRYPTO_HRDMAL 0x0020 | ||
68 | |||
69 | /* AES registers */ | ||
70 | #define RK_CRYPTO_AES_CTRL 0x0080 | ||
71 | #define RK_CRYPTO_AES_BYTESWAP_CNT BIT(11) | ||
72 | #define RK_CRYPTO_AES_BYTESWAP_KEY BIT(10) | ||
73 | #define RK_CRYPTO_AES_BYTESWAP_IV BIT(9) | ||
74 | #define RK_CRYPTO_AES_BYTESWAP_DO BIT(8) | ||
75 | #define RK_CRYPTO_AES_BYTESWAP_DI BIT(7) | ||
76 | #define RK_CRYPTO_AES_KEY_CHANGE BIT(6) | ||
77 | #define RK_CRYPTO_AES_ECB_MODE _SBF(0x00, 4) | ||
78 | #define RK_CRYPTO_AES_CBC_MODE _SBF(0x01, 4) | ||
79 | #define RK_CRYPTO_AES_CTR_MODE _SBF(0x02, 4) | ||
80 | #define RK_CRYPTO_AES_128BIT_key _SBF(0x00, 2) | ||
81 | #define RK_CRYPTO_AES_192BIT_key _SBF(0x01, 2) | ||
82 | #define RK_CRYPTO_AES_256BIT_key _SBF(0x02, 2) | ||
83 | /* Slave = 0 / fifo = 1 */ | ||
84 | #define RK_CRYPTO_AES_FIFO_MODE BIT(1) | ||
85 | /* Encryption = 0 , Decryption = 1 */ | ||
86 | #define RK_CRYPTO_AES_DEC BIT(0) | ||
87 | |||
88 | #define RK_CRYPTO_AES_STS 0x0084 | ||
89 | #define RK_CRYPTO_AES_DONE BIT(0) | ||
90 | |||
91 | /* AES Input Data 0-3 Register */ | ||
92 | #define RK_CRYPTO_AES_DIN_0 0x0088 | ||
93 | #define RK_CRYPTO_AES_DIN_1 0x008c | ||
94 | #define RK_CRYPTO_AES_DIN_2 0x0090 | ||
95 | #define RK_CRYPTO_AES_DIN_3 0x0094 | ||
96 | |||
97 | /* AES output Data 0-3 Register */ | ||
98 | #define RK_CRYPTO_AES_DOUT_0 0x0098 | ||
99 | #define RK_CRYPTO_AES_DOUT_1 0x009c | ||
100 | #define RK_CRYPTO_AES_DOUT_2 0x00a0 | ||
101 | #define RK_CRYPTO_AES_DOUT_3 0x00a4 | ||
102 | |||
103 | /* AES IV Data 0-3 Register */ | ||
104 | #define RK_CRYPTO_AES_IV_0 0x00a8 | ||
105 | #define RK_CRYPTO_AES_IV_1 0x00ac | ||
106 | #define RK_CRYPTO_AES_IV_2 0x00b0 | ||
107 | #define RK_CRYPTO_AES_IV_3 0x00b4 | ||
108 | |||
109 | /* AES Key Data 0-3 Register */ | ||
110 | #define RK_CRYPTO_AES_KEY_0 0x00b8 | ||
111 | #define RK_CRYPTO_AES_KEY_1 0x00bc | ||
112 | #define RK_CRYPTO_AES_KEY_2 0x00c0 | ||
113 | #define RK_CRYPTO_AES_KEY_3 0x00c4 | ||
114 | #define RK_CRYPTO_AES_KEY_4 0x00c8 | ||
115 | #define RK_CRYPTO_AES_KEY_5 0x00cc | ||
116 | #define RK_CRYPTO_AES_KEY_6 0x00d0 | ||
117 | #define RK_CRYPTO_AES_KEY_7 0x00d4 | ||
118 | |||
119 | /* des/tdes */ | ||
120 | #define RK_CRYPTO_TDES_CTRL 0x0100 | ||
121 | #define RK_CRYPTO_TDES_BYTESWAP_KEY BIT(8) | ||
122 | #define RK_CRYPTO_TDES_BYTESWAP_IV BIT(7) | ||
123 | #define RK_CRYPTO_TDES_BYTESWAP_DO BIT(6) | ||
124 | #define RK_CRYPTO_TDES_BYTESWAP_DI BIT(5) | ||
125 | /* 0: ECB, 1: CBC */ | ||
126 | #define RK_CRYPTO_TDES_CHAINMODE_CBC BIT(4) | ||
127 | /* TDES Key Mode, 0 : EDE, 1 : EEE */ | ||
128 | #define RK_CRYPTO_TDES_EEE BIT(3) | ||
129 | /* 0: DES, 1:TDES */ | ||
130 | #define RK_CRYPTO_TDES_SELECT BIT(2) | ||
131 | /* 0: Slave, 1:Fifo */ | ||
132 | #define RK_CRYPTO_TDES_FIFO_MODE BIT(1) | ||
133 | /* Encryption = 0 , Decryption = 1 */ | ||
134 | #define RK_CRYPTO_TDES_DEC BIT(0) | ||
135 | |||
136 | #define RK_CRYPTO_TDES_STS 0x0104 | ||
137 | #define RK_CRYPTO_TDES_DONE BIT(0) | ||
138 | |||
139 | #define RK_CRYPTO_TDES_DIN_0 0x0108 | ||
140 | #define RK_CRYPTO_TDES_DIN_1 0x010c | ||
141 | #define RK_CRYPTO_TDES_DOUT_0 0x0110 | ||
142 | #define RK_CRYPTO_TDES_DOUT_1 0x0114 | ||
143 | #define RK_CRYPTO_TDES_IV_0 0x0118 | ||
144 | #define RK_CRYPTO_TDES_IV_1 0x011c | ||
145 | #define RK_CRYPTO_TDES_KEY1_0 0x0120 | ||
146 | #define RK_CRYPTO_TDES_KEY1_1 0x0124 | ||
147 | #define RK_CRYPTO_TDES_KEY2_0 0x0128 | ||
148 | #define RK_CRYPTO_TDES_KEY2_1 0x012c | ||
149 | #define RK_CRYPTO_TDES_KEY3_0 0x0130 | ||
150 | #define RK_CRYPTO_TDES_KEY3_1 0x0134 | ||
151 | |||
152 | #define CRYPTO_READ(dev, offset) \ | ||
153 | readl_relaxed(((dev)->reg + (offset))) | ||
154 | #define CRYPTO_WRITE(dev, offset, val) \ | ||
155 | writel_relaxed((val), ((dev)->reg + (offset))) | ||
156 | |||
157 | struct rk_crypto_info { | ||
158 | struct device *dev; | ||
159 | struct clk *aclk; | ||
160 | struct clk *hclk; | ||
161 | struct clk *sclk; | ||
162 | struct clk *dmaclk; | ||
163 | struct reset_control *rst; | ||
164 | void __iomem *reg; | ||
165 | int irq; | ||
166 | struct crypto_queue queue; | ||
167 | struct tasklet_struct crypto_tasklet; | ||
168 | struct ablkcipher_request *ablk_req; | ||
169 | /* device lock */ | ||
170 | spinlock_t lock; | ||
171 | |||
172 | /* the public variable */ | ||
173 | struct scatterlist *sg_src; | ||
174 | struct scatterlist *sg_dst; | ||
175 | struct scatterlist sg_tmp; | ||
176 | struct scatterlist *first; | ||
177 | unsigned int left_bytes; | ||
178 | void *addr_vir; | ||
179 | int aligned; | ||
180 | int align_size; | ||
181 | size_t nents; | ||
182 | unsigned int total; | ||
183 | unsigned int count; | ||
184 | u32 mode; | ||
185 | dma_addr_t addr_in; | ||
186 | dma_addr_t addr_out; | ||
187 | int (*start)(struct rk_crypto_info *dev); | ||
188 | int (*update)(struct rk_crypto_info *dev); | ||
189 | void (*complete)(struct rk_crypto_info *dev, int err); | ||
190 | int (*enable_clk)(struct rk_crypto_info *dev); | ||
191 | void (*disable_clk)(struct rk_crypto_info *dev); | ||
192 | int (*load_data)(struct rk_crypto_info *dev, | ||
193 | struct scatterlist *sg_src, | ||
194 | struct scatterlist *sg_dst); | ||
195 | void (*unload_data)(struct rk_crypto_info *dev); | ||
196 | }; | ||
197 | |||
198 | /* the private variable of cipher */ | ||
199 | struct rk_cipher_ctx { | ||
200 | struct rk_crypto_info *dev; | ||
201 | unsigned int keylen; | ||
202 | }; | ||
203 | |||
204 | struct rk_crypto_tmp { | ||
205 | struct rk_crypto_info *dev; | ||
206 | struct crypto_alg alg; | ||
207 | }; | ||
208 | |||
209 | extern struct rk_crypto_tmp rk_ecb_aes_alg; | ||
210 | extern struct rk_crypto_tmp rk_cbc_aes_alg; | ||
211 | extern struct rk_crypto_tmp rk_ecb_des_alg; | ||
212 | extern struct rk_crypto_tmp rk_cbc_des_alg; | ||
213 | extern struct rk_crypto_tmp rk_ecb_des3_ede_alg; | ||
214 | extern struct rk_crypto_tmp rk_cbc_des3_ede_alg; | ||
215 | |||
216 | #endif | ||
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c new file mode 100644 index 000000000000..d98b681f6c06 --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c | |||
@@ -0,0 +1,505 @@ | |||
1 | /* | ||
2 | * Crypto acceleration support for Rockchip RK3288 | ||
3 | * | ||
4 | * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd | ||
5 | * | ||
6 | * Author: Zain Wang <zain.wang@rock-chips.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * Some ideas are from marvell-cesa.c and s5p-sss.c driver. | ||
13 | */ | ||
14 | #include "rk3288_crypto.h" | ||
15 | |||
16 | #define RK_CRYPTO_DEC BIT(0) | ||
17 | |||
18 | static void rk_crypto_complete(struct rk_crypto_info *dev, int err) | ||
19 | { | ||
20 | if (dev->ablk_req->base.complete) | ||
21 | dev->ablk_req->base.complete(&dev->ablk_req->base, err); | ||
22 | } | ||
23 | |||
24 | static int rk_handle_req(struct rk_crypto_info *dev, | ||
25 | struct ablkcipher_request *req) | ||
26 | { | ||
27 | unsigned long flags; | ||
28 | int err; | ||
29 | |||
30 | if (!IS_ALIGNED(req->nbytes, dev->align_size)) | ||
31 | return -EINVAL; | ||
32 | |||
33 | dev->left_bytes = req->nbytes; | ||
34 | dev->total = req->nbytes; | ||
35 | dev->sg_src = req->src; | ||
36 | dev->first = req->src; | ||
37 | dev->nents = sg_nents(req->src); | ||
38 | dev->sg_dst = req->dst; | ||
39 | dev->aligned = 1; | ||
40 | dev->ablk_req = req; | ||
41 | |||
42 | spin_lock_irqsave(&dev->lock, flags); | ||
43 | err = ablkcipher_enqueue_request(&dev->queue, req); | ||
44 | spin_unlock_irqrestore(&dev->lock, flags); | ||
45 | tasklet_schedule(&dev->crypto_tasklet); | ||
46 | return err; | ||
47 | } | ||
48 | |||
49 | static int rk_aes_setkey(struct crypto_ablkcipher *cipher, | ||
50 | const u8 *key, unsigned int keylen) | ||
51 | { | ||
52 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
53 | struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
54 | |||
55 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | ||
56 | keylen != AES_KEYSIZE_256) { | ||
57 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
58 | return -EINVAL; | ||
59 | } | ||
60 | ctx->keylen = keylen; | ||
61 | memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int rk_tdes_setkey(struct crypto_ablkcipher *cipher, | ||
66 | const u8 *key, unsigned int keylen) | ||
67 | { | ||
68 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
69 | struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
70 | u32 tmp[DES_EXPKEY_WORDS]; | ||
71 | |||
72 | if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) { | ||
73 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
74 | return -EINVAL; | ||
75 | } | ||
76 | |||
77 | if (keylen == DES_KEY_SIZE) { | ||
78 | if (!des_ekey(tmp, key) && | ||
79 | (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
80 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
81 | return -EINVAL; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | ctx->keylen = keylen; | ||
86 | memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static int rk_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
91 | { | ||
92 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
93 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
94 | struct rk_crypto_info *dev = ctx->dev; | ||
95 | |||
96 | dev->mode = RK_CRYPTO_AES_ECB_MODE; | ||
97 | return rk_handle_req(dev, req); | ||
98 | } | ||
99 | |||
100 | static int rk_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
101 | { | ||
102 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
103 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
104 | struct rk_crypto_info *dev = ctx->dev; | ||
105 | |||
106 | dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; | ||
107 | return rk_handle_req(dev, req); | ||
108 | } | ||
109 | |||
110 | static int rk_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
111 | { | ||
112 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
113 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
114 | struct rk_crypto_info *dev = ctx->dev; | ||
115 | |||
116 | dev->mode = RK_CRYPTO_AES_CBC_MODE; | ||
117 | return rk_handle_req(dev, req); | ||
118 | } | ||
119 | |||
120 | static int rk_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
121 | { | ||
122 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
123 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
124 | struct rk_crypto_info *dev = ctx->dev; | ||
125 | |||
126 | dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; | ||
127 | return rk_handle_req(dev, req); | ||
128 | } | ||
129 | |||
130 | static int rk_des_ecb_encrypt(struct ablkcipher_request *req) | ||
131 | { | ||
132 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
133 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
134 | struct rk_crypto_info *dev = ctx->dev; | ||
135 | |||
136 | dev->mode = 0; | ||
137 | return rk_handle_req(dev, req); | ||
138 | } | ||
139 | |||
140 | static int rk_des_ecb_decrypt(struct ablkcipher_request *req) | ||
141 | { | ||
142 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
143 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
144 | struct rk_crypto_info *dev = ctx->dev; | ||
145 | |||
146 | dev->mode = RK_CRYPTO_DEC; | ||
147 | return rk_handle_req(dev, req); | ||
148 | } | ||
149 | |||
150 | static int rk_des_cbc_encrypt(struct ablkcipher_request *req) | ||
151 | { | ||
152 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
153 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
154 | struct rk_crypto_info *dev = ctx->dev; | ||
155 | |||
156 | dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; | ||
157 | return rk_handle_req(dev, req); | ||
158 | } | ||
159 | |||
160 | static int rk_des_cbc_decrypt(struct ablkcipher_request *req) | ||
161 | { | ||
162 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
163 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
164 | struct rk_crypto_info *dev = ctx->dev; | ||
165 | |||
166 | dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; | ||
167 | return rk_handle_req(dev, req); | ||
168 | } | ||
169 | |||
170 | static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req) | ||
171 | { | ||
172 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
173 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
174 | struct rk_crypto_info *dev = ctx->dev; | ||
175 | |||
176 | dev->mode = RK_CRYPTO_TDES_SELECT; | ||
177 | return rk_handle_req(dev, req); | ||
178 | } | ||
179 | |||
180 | static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req) | ||
181 | { | ||
182 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
183 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
184 | struct rk_crypto_info *dev = ctx->dev; | ||
185 | |||
186 | dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; | ||
187 | return rk_handle_req(dev, req); | ||
188 | } | ||
189 | |||
190 | static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req) | ||
191 | { | ||
192 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
193 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
194 | struct rk_crypto_info *dev = ctx->dev; | ||
195 | |||
196 | dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; | ||
197 | return rk_handle_req(dev, req); | ||
198 | } | ||
199 | |||
200 | static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req) | ||
201 | { | ||
202 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
203 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
204 | struct rk_crypto_info *dev = ctx->dev; | ||
205 | |||
206 | dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | | ||
207 | RK_CRYPTO_DEC; | ||
208 | return rk_handle_req(dev, req); | ||
209 | } | ||
210 | |||
211 | static void rk_ablk_hw_init(struct rk_crypto_info *dev) | ||
212 | { | ||
213 | struct crypto_ablkcipher *cipher = | ||
214 | crypto_ablkcipher_reqtfm(dev->ablk_req); | ||
215 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
216 | struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
217 | u32 ivsize, block, conf_reg = 0; | ||
218 | |||
219 | block = crypto_tfm_alg_blocksize(tfm); | ||
220 | ivsize = crypto_ablkcipher_ivsize(cipher); | ||
221 | |||
222 | if (block == DES_BLOCK_SIZE) { | ||
223 | dev->mode |= RK_CRYPTO_TDES_FIFO_MODE | | ||
224 | RK_CRYPTO_TDES_BYTESWAP_KEY | | ||
225 | RK_CRYPTO_TDES_BYTESWAP_IV; | ||
226 | CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode); | ||
227 | memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, | ||
228 | dev->ablk_req->info, ivsize); | ||
229 | conf_reg = RK_CRYPTO_DESSEL; | ||
230 | } else { | ||
231 | dev->mode |= RK_CRYPTO_AES_FIFO_MODE | | ||
232 | RK_CRYPTO_AES_KEY_CHANGE | | ||
233 | RK_CRYPTO_AES_BYTESWAP_KEY | | ||
234 | RK_CRYPTO_AES_BYTESWAP_IV; | ||
235 | if (ctx->keylen == AES_KEYSIZE_192) | ||
236 | dev->mode |= RK_CRYPTO_AES_192BIT_key; | ||
237 | else if (ctx->keylen == AES_KEYSIZE_256) | ||
238 | dev->mode |= RK_CRYPTO_AES_256BIT_key; | ||
239 | CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode); | ||
240 | memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, | ||
241 | dev->ablk_req->info, ivsize); | ||
242 | } | ||
243 | conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | | ||
244 | RK_CRYPTO_BYTESWAP_BRFIFO; | ||
245 | CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg); | ||
246 | CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, | ||
247 | RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); | ||
248 | } | ||
249 | |||
250 | static void crypto_dma_start(struct rk_crypto_info *dev) | ||
251 | { | ||
252 | CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); | ||
253 | CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); | ||
254 | CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); | ||
255 | CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | | ||
256 | _SBF(RK_CRYPTO_BLOCK_START, 16)); | ||
257 | } | ||
258 | |||
259 | static int rk_set_data_start(struct rk_crypto_info *dev) | ||
260 | { | ||
261 | int err; | ||
262 | |||
263 | err = dev->load_data(dev, dev->sg_src, dev->sg_dst); | ||
264 | if (!err) | ||
265 | crypto_dma_start(dev); | ||
266 | return err; | ||
267 | } | ||
268 | |||
269 | static int rk_ablk_start(struct rk_crypto_info *dev) | ||
270 | { | ||
271 | unsigned long flags; | ||
272 | int err; | ||
273 | |||
274 | spin_lock_irqsave(&dev->lock, flags); | ||
275 | rk_ablk_hw_init(dev); | ||
276 | err = rk_set_data_start(dev); | ||
277 | spin_unlock_irqrestore(&dev->lock, flags); | ||
278 | return err; | ||
279 | } | ||
280 | |||
281 | static void rk_iv_copyback(struct rk_crypto_info *dev) | ||
282 | { | ||
283 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req); | ||
284 | u32 ivsize = crypto_ablkcipher_ivsize(tfm); | ||
285 | |||
286 | if (ivsize == DES_BLOCK_SIZE) | ||
287 | memcpy_fromio(dev->ablk_req->info, | ||
288 | dev->reg + RK_CRYPTO_TDES_IV_0, ivsize); | ||
289 | else if (ivsize == AES_BLOCK_SIZE) | ||
290 | memcpy_fromio(dev->ablk_req->info, | ||
291 | dev->reg + RK_CRYPTO_AES_IV_0, ivsize); | ||
292 | } | ||
293 | |||
294 | /* return: | ||
295 | * true some err was occurred | ||
296 | * fault no err, continue | ||
297 | */ | ||
298 | static int rk_ablk_rx(struct rk_crypto_info *dev) | ||
299 | { | ||
300 | int err = 0; | ||
301 | |||
302 | dev->unload_data(dev); | ||
303 | if (!dev->aligned) { | ||
304 | if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents, | ||
305 | dev->addr_vir, dev->count, | ||
306 | dev->total - dev->left_bytes - | ||
307 | dev->count)) { | ||
308 | err = -EINVAL; | ||
309 | goto out_rx; | ||
310 | } | ||
311 | } | ||
312 | if (dev->left_bytes) { | ||
313 | if (dev->aligned) { | ||
314 | if (sg_is_last(dev->sg_src)) { | ||
315 | dev_err(dev->dev, "[%s:%d] Lack of data\n", | ||
316 | __func__, __LINE__); | ||
317 | err = -ENOMEM; | ||
318 | goto out_rx; | ||
319 | } | ||
320 | dev->sg_src = sg_next(dev->sg_src); | ||
321 | dev->sg_dst = sg_next(dev->sg_dst); | ||
322 | } | ||
323 | err = rk_set_data_start(dev); | ||
324 | } else { | ||
325 | rk_iv_copyback(dev); | ||
326 | /* here show the calculation is over without any err */ | ||
327 | dev->complete(dev, 0); | ||
328 | } | ||
329 | out_rx: | ||
330 | return err; | ||
331 | } | ||
332 | |||
333 | static int rk_ablk_cra_init(struct crypto_tfm *tfm) | ||
334 | { | ||
335 | struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
336 | struct crypto_alg *alg = tfm->__crt_alg; | ||
337 | struct rk_crypto_tmp *algt; | ||
338 | |||
339 | algt = container_of(alg, struct rk_crypto_tmp, alg); | ||
340 | |||
341 | ctx->dev = algt->dev; | ||
342 | ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1; | ||
343 | ctx->dev->start = rk_ablk_start; | ||
344 | ctx->dev->update = rk_ablk_rx; | ||
345 | ctx->dev->complete = rk_crypto_complete; | ||
346 | ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); | ||
347 | |||
348 | return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; | ||
349 | } | ||
350 | |||
351 | static void rk_ablk_cra_exit(struct crypto_tfm *tfm) | ||
352 | { | ||
353 | struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
354 | |||
355 | free_page((unsigned long)ctx->dev->addr_vir); | ||
356 | ctx->dev->disable_clk(ctx->dev); | ||
357 | } | ||
358 | |||
359 | struct rk_crypto_tmp rk_ecb_aes_alg = { | ||
360 | .alg = { | ||
361 | .cra_name = "ecb(aes)", | ||
362 | .cra_driver_name = "ecb-aes-rk", | ||
363 | .cra_priority = 300, | ||
364 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
365 | CRYPTO_ALG_ASYNC, | ||
366 | .cra_blocksize = AES_BLOCK_SIZE, | ||
367 | .cra_ctxsize = sizeof(struct rk_cipher_ctx), | ||
368 | .cra_alignmask = 0x0f, | ||
369 | .cra_type = &crypto_ablkcipher_type, | ||
370 | .cra_module = THIS_MODULE, | ||
371 | .cra_init = rk_ablk_cra_init, | ||
372 | .cra_exit = rk_ablk_cra_exit, | ||
373 | .cra_u.ablkcipher = { | ||
374 | .min_keysize = AES_MIN_KEY_SIZE, | ||
375 | .max_keysize = AES_MAX_KEY_SIZE, | ||
376 | .setkey = rk_aes_setkey, | ||
377 | .encrypt = rk_aes_ecb_encrypt, | ||
378 | .decrypt = rk_aes_ecb_decrypt, | ||
379 | } | ||
380 | } | ||
381 | }; | ||
382 | |||
383 | struct rk_crypto_tmp rk_cbc_aes_alg = { | ||
384 | .alg = { | ||
385 | .cra_name = "cbc(aes)", | ||
386 | .cra_driver_name = "cbc-aes-rk", | ||
387 | .cra_priority = 300, | ||
388 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
389 | CRYPTO_ALG_ASYNC, | ||
390 | .cra_blocksize = AES_BLOCK_SIZE, | ||
391 | .cra_ctxsize = sizeof(struct rk_cipher_ctx), | ||
392 | .cra_alignmask = 0x0f, | ||
393 | .cra_type = &crypto_ablkcipher_type, | ||
394 | .cra_module = THIS_MODULE, | ||
395 | .cra_init = rk_ablk_cra_init, | ||
396 | .cra_exit = rk_ablk_cra_exit, | ||
397 | .cra_u.ablkcipher = { | ||
398 | .min_keysize = AES_MIN_KEY_SIZE, | ||
399 | .max_keysize = AES_MAX_KEY_SIZE, | ||
400 | .ivsize = AES_BLOCK_SIZE, | ||
401 | .setkey = rk_aes_setkey, | ||
402 | .encrypt = rk_aes_cbc_encrypt, | ||
403 | .decrypt = rk_aes_cbc_decrypt, | ||
404 | } | ||
405 | } | ||
406 | }; | ||
407 | |||
408 | struct rk_crypto_tmp rk_ecb_des_alg = { | ||
409 | .alg = { | ||
410 | .cra_name = "ecb(des)", | ||
411 | .cra_driver_name = "ecb-des-rk", | ||
412 | .cra_priority = 300, | ||
413 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
414 | CRYPTO_ALG_ASYNC, | ||
415 | .cra_blocksize = DES_BLOCK_SIZE, | ||
416 | .cra_ctxsize = sizeof(struct rk_cipher_ctx), | ||
417 | .cra_alignmask = 0x07, | ||
418 | .cra_type = &crypto_ablkcipher_type, | ||
419 | .cra_module = THIS_MODULE, | ||
420 | .cra_init = rk_ablk_cra_init, | ||
421 | .cra_exit = rk_ablk_cra_exit, | ||
422 | .cra_u.ablkcipher = { | ||
423 | .min_keysize = DES_KEY_SIZE, | ||
424 | .max_keysize = DES_KEY_SIZE, | ||
425 | .setkey = rk_tdes_setkey, | ||
426 | .encrypt = rk_des_ecb_encrypt, | ||
427 | .decrypt = rk_des_ecb_decrypt, | ||
428 | } | ||
429 | } | ||
430 | }; | ||
431 | |||
432 | struct rk_crypto_tmp rk_cbc_des_alg = { | ||
433 | .alg = { | ||
434 | .cra_name = "cbc(des)", | ||
435 | .cra_driver_name = "cbc-des-rk", | ||
436 | .cra_priority = 300, | ||
437 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
438 | CRYPTO_ALG_ASYNC, | ||
439 | .cra_blocksize = DES_BLOCK_SIZE, | ||
440 | .cra_ctxsize = sizeof(struct rk_cipher_ctx), | ||
441 | .cra_alignmask = 0x07, | ||
442 | .cra_type = &crypto_ablkcipher_type, | ||
443 | .cra_module = THIS_MODULE, | ||
444 | .cra_init = rk_ablk_cra_init, | ||
445 | .cra_exit = rk_ablk_cra_exit, | ||
446 | .cra_u.ablkcipher = { | ||
447 | .min_keysize = DES_KEY_SIZE, | ||
448 | .max_keysize = DES_KEY_SIZE, | ||
449 | .ivsize = DES_BLOCK_SIZE, | ||
450 | .setkey = rk_tdes_setkey, | ||
451 | .encrypt = rk_des_cbc_encrypt, | ||
452 | .decrypt = rk_des_cbc_decrypt, | ||
453 | } | ||
454 | } | ||
455 | }; | ||
456 | |||
457 | struct rk_crypto_tmp rk_ecb_des3_ede_alg = { | ||
458 | .alg = { | ||
459 | .cra_name = "ecb(des3_ede)", | ||
460 | .cra_driver_name = "ecb-des3-ede-rk", | ||
461 | .cra_priority = 300, | ||
462 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
463 | CRYPTO_ALG_ASYNC, | ||
464 | .cra_blocksize = DES_BLOCK_SIZE, | ||
465 | .cra_ctxsize = sizeof(struct rk_cipher_ctx), | ||
466 | .cra_alignmask = 0x07, | ||
467 | .cra_type = &crypto_ablkcipher_type, | ||
468 | .cra_module = THIS_MODULE, | ||
469 | .cra_init = rk_ablk_cra_init, | ||
470 | .cra_exit = rk_ablk_cra_exit, | ||
471 | .cra_u.ablkcipher = { | ||
472 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
473 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
474 | .ivsize = DES_BLOCK_SIZE, | ||
475 | .setkey = rk_tdes_setkey, | ||
476 | .encrypt = rk_des3_ede_ecb_encrypt, | ||
477 | .decrypt = rk_des3_ede_ecb_decrypt, | ||
478 | } | ||
479 | } | ||
480 | }; | ||
481 | |||
482 | struct rk_crypto_tmp rk_cbc_des3_ede_alg = { | ||
483 | .alg = { | ||
484 | .cra_name = "cbc(des3_ede)", | ||
485 | .cra_driver_name = "cbc-des3-ede-rk", | ||
486 | .cra_priority = 300, | ||
487 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
488 | CRYPTO_ALG_ASYNC, | ||
489 | .cra_blocksize = DES_BLOCK_SIZE, | ||
490 | .cra_ctxsize = sizeof(struct rk_cipher_ctx), | ||
491 | .cra_alignmask = 0x07, | ||
492 | .cra_type = &crypto_ablkcipher_type, | ||
493 | .cra_module = THIS_MODULE, | ||
494 | .cra_init = rk_ablk_cra_init, | ||
495 | .cra_exit = rk_ablk_cra_exit, | ||
496 | .cra_u.ablkcipher = { | ||
497 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
498 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
499 | .ivsize = DES_BLOCK_SIZE, | ||
500 | .setkey = rk_tdes_setkey, | ||
501 | .encrypt = rk_des3_ede_cbc_encrypt, | ||
502 | .decrypt = rk_des3_ede_cbc_decrypt, | ||
503 | } | ||
504 | } | ||
505 | }; | ||
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index f68c24a98277..6c4f91c5e6b3 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
@@ -130,18 +130,18 @@ | |||
130 | #define SAHARA_REG_IDAR 0x20 | 130 | #define SAHARA_REG_IDAR 0x20 |
131 | 131 | ||
132 | struct sahara_hw_desc { | 132 | struct sahara_hw_desc { |
133 | u32 hdr; | 133 | u32 hdr; |
134 | u32 len1; | 134 | u32 len1; |
135 | dma_addr_t p1; | 135 | u32 p1; |
136 | u32 len2; | 136 | u32 len2; |
137 | dma_addr_t p2; | 137 | u32 p2; |
138 | dma_addr_t next; | 138 | u32 next; |
139 | }; | 139 | }; |
140 | 140 | ||
141 | struct sahara_hw_link { | 141 | struct sahara_hw_link { |
142 | u32 len; | 142 | u32 len; |
143 | dma_addr_t p; | 143 | u32 p; |
144 | dma_addr_t next; | 144 | u32 next; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | struct sahara_ctx { | 147 | struct sahara_ctx { |
@@ -228,9 +228,9 @@ struct sahara_dev { | |||
228 | 228 | ||
229 | size_t total; | 229 | size_t total; |
230 | struct scatterlist *in_sg; | 230 | struct scatterlist *in_sg; |
231 | unsigned int nb_in_sg; | 231 | int nb_in_sg; |
232 | struct scatterlist *out_sg; | 232 | struct scatterlist *out_sg; |
233 | unsigned int nb_out_sg; | 233 | int nb_out_sg; |
234 | 234 | ||
235 | u32 error; | 235 | u32 error; |
236 | }; | 236 | }; |
@@ -416,8 +416,8 @@ static void sahara_dump_descriptors(struct sahara_dev *dev) | |||
416 | return; | 416 | return; |
417 | 417 | ||
418 | for (i = 0; i < SAHARA_MAX_HW_DESC; i++) { | 418 | for (i = 0; i < SAHARA_MAX_HW_DESC; i++) { |
419 | dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n", | 419 | dev_dbg(dev->device, "Descriptor (%d) (%pad):\n", |
420 | i, dev->hw_phys_desc[i]); | 420 | i, &dev->hw_phys_desc[i]); |
421 | dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr); | 421 | dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr); |
422 | dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1); | 422 | dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1); |
423 | dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1); | 423 | dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1); |
@@ -437,8 +437,8 @@ static void sahara_dump_links(struct sahara_dev *dev) | |||
437 | return; | 437 | return; |
438 | 438 | ||
439 | for (i = 0; i < SAHARA_MAX_HW_LINK; i++) { | 439 | for (i = 0; i < SAHARA_MAX_HW_LINK; i++) { |
440 | dev_dbg(dev->device, "Link (%d) (0x%08x):\n", | 440 | dev_dbg(dev->device, "Link (%d) (%pad):\n", |
441 | i, dev->hw_phys_link[i]); | 441 | i, &dev->hw_phys_link[i]); |
442 | dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len); | 442 | dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len); |
443 | dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p); | 443 | dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p); |
444 | dev_dbg(dev->device, "\tnext = 0x%08x\n", | 444 | dev_dbg(dev->device, "\tnext = 0x%08x\n", |
@@ -477,7 +477,15 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) | |||
477 | } | 477 | } |
478 | 478 | ||
479 | dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total); | 479 | dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total); |
480 | if (dev->nb_in_sg < 0) { | ||
481 | dev_err(dev->device, "Invalid numbers of src SG.\n"); | ||
482 | return dev->nb_in_sg; | ||
483 | } | ||
480 | dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total); | 484 | dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total); |
485 | if (dev->nb_out_sg < 0) { | ||
486 | dev_err(dev->device, "Invalid numbers of dst SG.\n"); | ||
487 | return dev->nb_out_sg; | ||
488 | } | ||
481 | if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) { | 489 | if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) { |
482 | dev_err(dev->device, "not enough hw links (%d)\n", | 490 | dev_err(dev->device, "not enough hw links (%d)\n", |
483 | dev->nb_in_sg + dev->nb_out_sg); | 491 | dev->nb_in_sg + dev->nb_out_sg); |
@@ -793,6 +801,10 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev, | |||
793 | dev->in_sg = rctx->in_sg; | 801 | dev->in_sg = rctx->in_sg; |
794 | 802 | ||
795 | dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total); | 803 | dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total); |
804 | if (dev->nb_in_sg < 0) { | ||
805 | dev_err(dev->device, "Invalid numbers of src SG.\n"); | ||
806 | return dev->nb_in_sg; | ||
807 | } | ||
796 | if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) { | 808 | if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) { |
797 | dev_err(dev->device, "not enough hw links (%d)\n", | 809 | dev_err(dev->device, "not enough hw links (%d)\n", |
798 | dev->nb_in_sg + dev->nb_out_sg); | 810 | dev->nb_in_sg + dev->nb_out_sg); |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index eab6fe227fa0..107cd2a41cae 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c | |||
@@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { | |||
39 | .import = sun4i_hash_import_md5, | 39 | .import = sun4i_hash_import_md5, |
40 | .halg = { | 40 | .halg = { |
41 | .digestsize = MD5_DIGEST_SIZE, | 41 | .digestsize = MD5_DIGEST_SIZE, |
42 | .statesize = sizeof(struct md5_state), | ||
42 | .base = { | 43 | .base = { |
43 | .cra_name = "md5", | 44 | .cra_name = "md5", |
44 | .cra_driver_name = "md5-sun4i-ss", | 45 | .cra_driver_name = "md5-sun4i-ss", |
@@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { | |||
66 | .import = sun4i_hash_import_sha1, | 67 | .import = sun4i_hash_import_sha1, |
67 | .halg = { | 68 | .halg = { |
68 | .digestsize = SHA1_DIGEST_SIZE, | 69 | .digestsize = SHA1_DIGEST_SIZE, |
70 | .statesize = sizeof(struct sha1_state), | ||
69 | .base = { | 71 | .base = { |
70 | .cra_name = "sha1", | 72 | .cra_name = "sha1", |
71 | .cra_driver_name = "sha1-sun4i-ss", | 73 | .cra_driver_name = "sha1-sun4i-ss", |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index b6f9f42e2985..a0d4a08313ae 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1216,6 +1216,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1216 | struct talitos_private *priv = dev_get_drvdata(dev); | 1216 | struct talitos_private *priv = dev_get_drvdata(dev); |
1217 | bool is_sec1 = has_ftr_sec1(priv); | 1217 | bool is_sec1 = has_ftr_sec1(priv); |
1218 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; | 1218 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; |
1219 | void *err; | ||
1219 | 1220 | ||
1220 | if (cryptlen + authsize > max_len) { | 1221 | if (cryptlen + authsize > max_len) { |
1221 | dev_err(dev, "length exceeds h/w max limit\n"); | 1222 | dev_err(dev, "length exceeds h/w max limit\n"); |
@@ -1228,14 +1229,29 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1228 | if (!dst || dst == src) { | 1229 | if (!dst || dst == src) { |
1229 | src_nents = sg_nents_for_len(src, | 1230 | src_nents = sg_nents_for_len(src, |
1230 | assoclen + cryptlen + authsize); | 1231 | assoclen + cryptlen + authsize); |
1232 | if (src_nents < 0) { | ||
1233 | dev_err(dev, "Invalid number of src SG.\n"); | ||
1234 | err = ERR_PTR(-EINVAL); | ||
1235 | goto error_sg; | ||
1236 | } | ||
1231 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1237 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1232 | dst_nents = dst ? src_nents : 0; | 1238 | dst_nents = dst ? src_nents : 0; |
1233 | } else { /* dst && dst != src*/ | 1239 | } else { /* dst && dst != src*/ |
1234 | src_nents = sg_nents_for_len(src, assoclen + cryptlen + | 1240 | src_nents = sg_nents_for_len(src, assoclen + cryptlen + |
1235 | (encrypt ? 0 : authsize)); | 1241 | (encrypt ? 0 : authsize)); |
1242 | if (src_nents < 0) { | ||
1243 | dev_err(dev, "Invalid number of src SG.\n"); | ||
1244 | err = ERR_PTR(-EINVAL); | ||
1245 | goto error_sg; | ||
1246 | } | ||
1236 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1247 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1237 | dst_nents = sg_nents_for_len(dst, assoclen + cryptlen + | 1248 | dst_nents = sg_nents_for_len(dst, assoclen + cryptlen + |
1238 | (encrypt ? authsize : 0)); | 1249 | (encrypt ? authsize : 0)); |
1250 | if (dst_nents < 0) { | ||
1251 | dev_err(dev, "Invalid number of dst SG.\n"); | ||
1252 | err = ERR_PTR(-EINVAL); | ||
1253 | goto error_sg; | ||
1254 | } | ||
1239 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1255 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
1240 | } | 1256 | } |
1241 | 1257 | ||
@@ -1260,11 +1276,9 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1260 | 1276 | ||
1261 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1277 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1262 | if (!edesc) { | 1278 | if (!edesc) { |
1263 | if (iv_dma) | ||
1264 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | ||
1265 | |||
1266 | dev_err(dev, "could not allocate edescriptor\n"); | 1279 | dev_err(dev, "could not allocate edescriptor\n"); |
1267 | return ERR_PTR(-ENOMEM); | 1280 | err = ERR_PTR(-ENOMEM); |
1281 | goto error_sg; | ||
1268 | } | 1282 | } |
1269 | 1283 | ||
1270 | edesc->src_nents = src_nents; | 1284 | edesc->src_nents = src_nents; |
@@ -1277,6 +1291,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1277 | DMA_BIDIRECTIONAL); | 1291 | DMA_BIDIRECTIONAL); |
1278 | 1292 | ||
1279 | return edesc; | 1293 | return edesc; |
1294 | error_sg: | ||
1295 | if (iv_dma) | ||
1296 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | ||
1297 | return err; | ||
1280 | } | 1298 | } |
1281 | 1299 | ||
1282 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | 1300 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
@@ -1830,11 +1848,16 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1830 | unsigned int nbytes_to_hash; | 1848 | unsigned int nbytes_to_hash; |
1831 | unsigned int to_hash_later; | 1849 | unsigned int to_hash_later; |
1832 | unsigned int nsg; | 1850 | unsigned int nsg; |
1851 | int nents; | ||
1833 | 1852 | ||
1834 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { | 1853 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { |
1835 | /* Buffer up to one whole block */ | 1854 | /* Buffer up to one whole block */ |
1836 | sg_copy_to_buffer(areq->src, | 1855 | nents = sg_nents_for_len(areq->src, nbytes); |
1837 | sg_nents_for_len(areq->src, nbytes), | 1856 | if (nents < 0) { |
1857 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | ||
1858 | return nents; | ||
1859 | } | ||
1860 | sg_copy_to_buffer(areq->src, nents, | ||
1838 | req_ctx->buf + req_ctx->nbuf, nbytes); | 1861 | req_ctx->buf + req_ctx->nbuf, nbytes); |
1839 | req_ctx->nbuf += nbytes; | 1862 | req_ctx->nbuf += nbytes; |
1840 | return 0; | 1863 | return 0; |
@@ -1867,7 +1890,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1867 | req_ctx->psrc = areq->src; | 1890 | req_ctx->psrc = areq->src; |
1868 | 1891 | ||
1869 | if (to_hash_later) { | 1892 | if (to_hash_later) { |
1870 | int nents = sg_nents_for_len(areq->src, nbytes); | 1893 | nents = sg_nents_for_len(areq->src, nbytes); |
1894 | if (nents < 0) { | ||
1895 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | ||
1896 | return nents; | ||
1897 | } | ||
1871 | sg_pcopy_to_buffer(areq->src, nents, | 1898 | sg_pcopy_to_buffer(areq->src, nents, |
1872 | req_ctx->bufnext, | 1899 | req_ctx->bufnext, |
1873 | to_hash_later, | 1900 | to_hash_later, |
@@ -2297,6 +2324,22 @@ static struct talitos_alg_template driver_algs[] = { | |||
2297 | /* ABLKCIPHER algorithms. */ | 2324 | /* ABLKCIPHER algorithms. */ |
2298 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 2325 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
2299 | .alg.crypto = { | 2326 | .alg.crypto = { |
2327 | .cra_name = "ecb(aes)", | ||
2328 | .cra_driver_name = "ecb-aes-talitos", | ||
2329 | .cra_blocksize = AES_BLOCK_SIZE, | ||
2330 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
2331 | CRYPTO_ALG_ASYNC, | ||
2332 | .cra_ablkcipher = { | ||
2333 | .min_keysize = AES_MIN_KEY_SIZE, | ||
2334 | .max_keysize = AES_MAX_KEY_SIZE, | ||
2335 | .ivsize = AES_BLOCK_SIZE, | ||
2336 | } | ||
2337 | }, | ||
2338 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2339 | DESC_HDR_SEL0_AESU, | ||
2340 | }, | ||
2341 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2342 | .alg.crypto = { | ||
2300 | .cra_name = "cbc(aes)", | 2343 | .cra_name = "cbc(aes)", |
2301 | .cra_driver_name = "cbc-aes-talitos", | 2344 | .cra_driver_name = "cbc-aes-talitos", |
2302 | .cra_blocksize = AES_BLOCK_SIZE, | 2345 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -2314,6 +2357,73 @@ static struct talitos_alg_template driver_algs[] = { | |||
2314 | }, | 2357 | }, |
2315 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 2358 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
2316 | .alg.crypto = { | 2359 | .alg.crypto = { |
2360 | .cra_name = "ctr(aes)", | ||
2361 | .cra_driver_name = "ctr-aes-talitos", | ||
2362 | .cra_blocksize = AES_BLOCK_SIZE, | ||
2363 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
2364 | CRYPTO_ALG_ASYNC, | ||
2365 | .cra_ablkcipher = { | ||
2366 | .min_keysize = AES_MIN_KEY_SIZE, | ||
2367 | .max_keysize = AES_MAX_KEY_SIZE, | ||
2368 | .ivsize = AES_BLOCK_SIZE, | ||
2369 | } | ||
2370 | }, | ||
2371 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2372 | DESC_HDR_SEL0_AESU | | ||
2373 | DESC_HDR_MODE0_AESU_CTR, | ||
2374 | }, | ||
2375 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2376 | .alg.crypto = { | ||
2377 | .cra_name = "ecb(des)", | ||
2378 | .cra_driver_name = "ecb-des-talitos", | ||
2379 | .cra_blocksize = DES_BLOCK_SIZE, | ||
2380 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
2381 | CRYPTO_ALG_ASYNC, | ||
2382 | .cra_ablkcipher = { | ||
2383 | .min_keysize = DES_KEY_SIZE, | ||
2384 | .max_keysize = DES_KEY_SIZE, | ||
2385 | .ivsize = DES_BLOCK_SIZE, | ||
2386 | } | ||
2387 | }, | ||
2388 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2389 | DESC_HDR_SEL0_DEU, | ||
2390 | }, | ||
2391 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2392 | .alg.crypto = { | ||
2393 | .cra_name = "cbc(des)", | ||
2394 | .cra_driver_name = "cbc-des-talitos", | ||
2395 | .cra_blocksize = DES_BLOCK_SIZE, | ||
2396 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
2397 | CRYPTO_ALG_ASYNC, | ||
2398 | .cra_ablkcipher = { | ||
2399 | .min_keysize = DES_KEY_SIZE, | ||
2400 | .max_keysize = DES_KEY_SIZE, | ||
2401 | .ivsize = DES_BLOCK_SIZE, | ||
2402 | } | ||
2403 | }, | ||
2404 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2405 | DESC_HDR_SEL0_DEU | | ||
2406 | DESC_HDR_MODE0_DEU_CBC, | ||
2407 | }, | ||
2408 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2409 | .alg.crypto = { | ||
2410 | .cra_name = "ecb(des3_ede)", | ||
2411 | .cra_driver_name = "ecb-3des-talitos", | ||
2412 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
2413 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
2414 | CRYPTO_ALG_ASYNC, | ||
2415 | .cra_ablkcipher = { | ||
2416 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
2417 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
2418 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
2419 | } | ||
2420 | }, | ||
2421 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2422 | DESC_HDR_SEL0_DEU | | ||
2423 | DESC_HDR_MODE0_DEU_3DES, | ||
2424 | }, | ||
2425 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2426 | .alg.crypto = { | ||
2317 | .cra_name = "cbc(des3_ede)", | 2427 | .cra_name = "cbc(des3_ede)", |
2318 | .cra_driver_name = "cbc-3des-talitos", | 2428 | .cra_driver_name = "cbc-3des-talitos", |
2319 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2429 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 0090f3211d68..8dd8f40e2771 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -345,6 +345,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) | |||
345 | /* primary execution unit mode (MODE0) and derivatives */ | 345 | /* primary execution unit mode (MODE0) and derivatives */ |
346 | #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) | 346 | #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) |
347 | #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) | 347 | #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) |
348 | #define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000) | ||
348 | #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) | 349 | #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) |
349 | #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) | 350 | #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) |
350 | #define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000) | 351 | #define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000) |
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index 30796441b0a6..0e338bf6dfb7 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig | |||
@@ -18,6 +18,8 @@ config CRYPTO_DEV_UX500_HASH | |||
18 | tristate "UX500 crypto driver for HASH block" | 18 | tristate "UX500 crypto driver for HASH block" |
19 | depends on CRYPTO_DEV_UX500 | 19 | depends on CRYPTO_DEV_UX500 |
20 | select CRYPTO_HASH | 20 | select CRYPTO_HASH |
21 | select CRYPTO_SHA1 | ||
22 | select CRYPTO_SHA256 | ||
21 | help | 23 | help |
22 | This selects the hash driver for the UX500_HASH hardware. | 24 | This selects the hash driver for the UX500_HASH hardware. |
23 | Depends on UX500/STM DMA if running in DMA mode. | 25 | Depends on UX500/STM DMA if running in DMA mode. |
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index f47d112041b2..d6fdc583ce5d 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -41,22 +41,6 @@ static int hash_mode; | |||
41 | module_param(hash_mode, int, 0); | 41 | module_param(hash_mode, int, 0); |
42 | MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); | 42 | MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); |
43 | 43 | ||
44 | /** | ||
45 | * Pre-calculated empty message digests. | ||
46 | */ | ||
47 | static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { | ||
48 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, | ||
49 | 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, | ||
50 | 0xaf, 0xd8, 0x07, 0x09 | ||
51 | }; | ||
52 | |||
53 | static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { | ||
54 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, | ||
55 | 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, | ||
56 | 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, | ||
57 | 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
58 | }; | ||
59 | |||
60 | /* HMAC-SHA1, no key */ | 44 | /* HMAC-SHA1, no key */ |
61 | static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { | 45 | static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { |
62 | 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, | 46 | 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, |
@@ -242,13 +226,13 @@ static int get_empty_message_digest( | |||
242 | 226 | ||
243 | if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { | 227 | if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { |
244 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { | 228 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { |
245 | memcpy(zero_hash, &zero_message_hash_sha1[0], | 229 | memcpy(zero_hash, &sha1_zero_message_hash[0], |
246 | SHA1_DIGEST_SIZE); | 230 | SHA1_DIGEST_SIZE); |
247 | *zero_hash_size = SHA1_DIGEST_SIZE; | 231 | *zero_hash_size = SHA1_DIGEST_SIZE; |
248 | *zero_digest = true; | 232 | *zero_digest = true; |
249 | } else if (HASH_ALGO_SHA256 == | 233 | } else if (HASH_ALGO_SHA256 == |
250 | ctx->config.algorithm) { | 234 | ctx->config.algorithm) { |
251 | memcpy(zero_hash, &zero_message_hash_sha256[0], | 235 | memcpy(zero_hash, &sha256_zero_message_hash[0], |
252 | SHA256_DIGEST_SIZE); | 236 | SHA256_DIGEST_SIZE); |
253 | *zero_hash_size = SHA256_DIGEST_SIZE; | 237 | *zero_hash_size = SHA256_DIGEST_SIZE; |
254 | *zero_digest = true; | 238 | *zero_digest = true; |
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 0b8fe2ec5315..78a978613ca8 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
@@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = { | |||
191 | .cra_init = p8_aes_cbc_init, | 191 | .cra_init = p8_aes_cbc_init, |
192 | .cra_exit = p8_aes_cbc_exit, | 192 | .cra_exit = p8_aes_cbc_exit, |
193 | .cra_blkcipher = { | 193 | .cra_blkcipher = { |
194 | .ivsize = 0, | 194 | .ivsize = AES_BLOCK_SIZE, |
195 | .min_keysize = AES_MIN_KEY_SIZE, | 195 | .min_keysize = AES_MIN_KEY_SIZE, |
196 | .max_keysize = AES_MAX_KEY_SIZE, | 196 | .max_keysize = AES_MAX_KEY_SIZE, |
197 | .setkey = p8_aes_cbc_setkey, | 197 | .setkey = p8_aes_cbc_setkey, |
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index ee1306cd8f59..1febc4f1d9af 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c | |||
@@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = { | |||
175 | .cra_init = p8_aes_ctr_init, | 175 | .cra_init = p8_aes_ctr_init, |
176 | .cra_exit = p8_aes_ctr_exit, | 176 | .cra_exit = p8_aes_ctr_exit, |
177 | .cra_blkcipher = { | 177 | .cra_blkcipher = { |
178 | .ivsize = 0, | 178 | .ivsize = AES_BLOCK_SIZE, |
179 | .min_keysize = AES_MIN_KEY_SIZE, | 179 | .min_keysize = AES_MIN_KEY_SIZE, |
180 | .max_keysize = AES_MAX_KEY_SIZE, | 180 | .max_keysize = AES_MAX_KEY_SIZE, |
181 | .setkey = p8_aes_ctr_setkey, | 181 | .setkey = p8_aes_ctr_setkey, |
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 077cae1e6b51..84d13b11ad7b 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h | |||
@@ -128,6 +128,7 @@ struct aead_request { | |||
128 | * @exit: Deinitialize the cryptographic transformation object. This is a | 128 | * @exit: Deinitialize the cryptographic transformation object. This is a |
129 | * counterpart to @init, used to remove various changes set in | 129 | * counterpart to @init, used to remove various changes set in |
130 | * @init. | 130 | * @init. |
131 | * @base: Definition of a generic crypto cipher algorithm. | ||
131 | * | 132 | * |
132 | * All fields except @ivsize is mandatory and must be filled. | 133 | * All fields except @ivsize is mandatory and must be filled. |
133 | */ | 134 | */ |
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 45cd5b328040..354de15cea6b 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h | |||
@@ -21,9 +21,9 @@ | |||
21 | * @src: Source data | 21 | * @src: Source data |
22 | * @dst: Destination data | 22 | * @dst: Destination data |
23 | * @src_len: Size of the input buffer | 23 | * @src_len: Size of the input buffer |
24 | * @dst_len: Size of the output buffer. It needs to be at leaset | 24 | * @dst_len: Size of the output buffer. It needs to be at least |
25 | * as big as the expected result depending on the operation | 25 | * as big as the expected result depending on the operation |
26 | * After operation it will be updated with the acctual size of the | 26 | * After operation it will be updated with the actual size of the |
27 | * result. | 27 | * result. |
28 | * In case of error where the dst sgl size was insufficient, | 28 | * In case of error where the dst sgl size was insufficient, |
29 | * it will be updated to the size required for the operation. | 29 | * it will be updated to the size required for the operation. |
@@ -59,7 +59,7 @@ struct crypto_akcipher { | |||
59 | * algorithm. In case of error, where the dst_len was insufficient, | 59 | * algorithm. In case of error, where the dst_len was insufficient, |
60 | * the req->dst_len will be updated to the size required for the | 60 | * the req->dst_len will be updated to the size required for the |
61 | * operation | 61 | * operation |
62 | * @encrypt: Function performs an encrytp operation as defined by public key | 62 | * @encrypt: Function performs an encrypt operation as defined by public key |
63 | * algorithm. In case of error, where the dst_len was insufficient, | 63 | * algorithm. In case of error, where the dst_len was insufficient, |
64 | * the req->dst_len will be updated to the size required for the | 64 | * the req->dst_len will be updated to the size required for the |
65 | * operation | 65 | * operation |
@@ -73,7 +73,7 @@ struct crypto_akcipher { | |||
73 | * @set_priv_key: Function invokes the algorithm specific set private key | 73 | * @set_priv_key: Function invokes the algorithm specific set private key |
74 | * function, which knows how to decode and interpret | 74 | * function, which knows how to decode and interpret |
75 | * the BER encoded private key | 75 | * the BER encoded private key |
76 | * @max_size: Function returns dest buffer size reqired for a given key. | 76 | * @max_size: Function returns dest buffer size required for a given key. |
77 | * @init: Initialize the cryptographic transformation object. | 77 | * @init: Initialize the cryptographic transformation object. |
78 | * This function is used to initialize the cryptographic | 78 | * This function is used to initialize the cryptographic |
79 | * transformation object. This function is called only once at | 79 | * transformation object. This function is called only once at |
@@ -232,7 +232,7 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req, | |||
232 | } | 232 | } |
233 | 233 | ||
234 | /** | 234 | /** |
235 | * akcipher_request_set_crypt() -- Sets reqest parameters | 235 | * akcipher_request_set_crypt() -- Sets request parameters |
236 | * | 236 | * |
237 | * Sets parameters required by crypto operation | 237 | * Sets parameters required by crypto operation |
238 | * | 238 | * |
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index 9a2bda15e454..479a0078f0f7 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h | |||
@@ -13,6 +13,22 @@ | |||
13 | #ifndef _CRYPTO_AKCIPHER_INT_H | 13 | #ifndef _CRYPTO_AKCIPHER_INT_H |
14 | #define _CRYPTO_AKCIPHER_INT_H | 14 | #define _CRYPTO_AKCIPHER_INT_H |
15 | #include <crypto/akcipher.h> | 15 | #include <crypto/akcipher.h> |
16 | #include <crypto/algapi.h> | ||
17 | |||
18 | struct akcipher_instance { | ||
19 | void (*free)(struct akcipher_instance *inst); | ||
20 | union { | ||
21 | struct { | ||
22 | char head[offsetof(struct akcipher_alg, base)]; | ||
23 | struct crypto_instance base; | ||
24 | } s; | ||
25 | struct akcipher_alg alg; | ||
26 | }; | ||
27 | }; | ||
28 | |||
29 | struct crypto_akcipher_spawn { | ||
30 | struct crypto_spawn base; | ||
31 | }; | ||
16 | 32 | ||
17 | /* | 33 | /* |
18 | * Transform internal helpers. | 34 | * Transform internal helpers. |
@@ -38,6 +54,56 @@ static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) | |||
38 | return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; | 54 | return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; |
39 | } | 55 | } |
40 | 56 | ||
57 | static inline struct crypto_instance *akcipher_crypto_instance( | ||
58 | struct akcipher_instance *inst) | ||
59 | { | ||
60 | return container_of(&inst->alg.base, struct crypto_instance, alg); | ||
61 | } | ||
62 | |||
63 | static inline struct akcipher_instance *akcipher_instance( | ||
64 | struct crypto_instance *inst) | ||
65 | { | ||
66 | return container_of(&inst->alg, struct akcipher_instance, alg.base); | ||
67 | } | ||
68 | |||
69 | static inline struct akcipher_instance *akcipher_alg_instance( | ||
70 | struct crypto_akcipher *akcipher) | ||
71 | { | ||
72 | return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); | ||
73 | } | ||
74 | |||
75 | static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) | ||
76 | { | ||
77 | return crypto_instance_ctx(akcipher_crypto_instance(inst)); | ||
78 | } | ||
79 | |||
80 | static inline void crypto_set_akcipher_spawn( | ||
81 | struct crypto_akcipher_spawn *spawn, | ||
82 | struct crypto_instance *inst) | ||
83 | { | ||
84 | crypto_set_spawn(&spawn->base, inst); | ||
85 | } | ||
86 | |||
87 | int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, | ||
88 | u32 type, u32 mask); | ||
89 | |||
90 | static inline struct crypto_akcipher *crypto_spawn_akcipher( | ||
91 | struct crypto_akcipher_spawn *spawn) | ||
92 | { | ||
93 | return crypto_spawn_tfm2(&spawn->base); | ||
94 | } | ||
95 | |||
96 | static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) | ||
97 | { | ||
98 | crypto_drop_spawn(&spawn->base); | ||
99 | } | ||
100 | |||
101 | static inline struct akcipher_alg *crypto_spawn_akcipher_alg( | ||
102 | struct crypto_akcipher_spawn *spawn) | ||
103 | { | ||
104 | return container_of(spawn->base.alg, struct akcipher_alg, base); | ||
105 | } | ||
106 | |||
41 | /** | 107 | /** |
42 | * crypto_register_akcipher() -- Register public key algorithm | 108 | * crypto_register_akcipher() -- Register public key algorithm |
43 | * | 109 | * |
@@ -57,4 +123,16 @@ int crypto_register_akcipher(struct akcipher_alg *alg); | |||
57 | * @alg: algorithm definition | 123 | * @alg: algorithm definition |
58 | */ | 124 | */ |
59 | void crypto_unregister_akcipher(struct akcipher_alg *alg); | 125 | void crypto_unregister_akcipher(struct akcipher_alg *alg); |
126 | |||
127 | /** | ||
128 | * akcipher_register_instance() -- Unregister public key template instance | ||
129 | * | ||
130 | * Function registers an implementation of an asymmetric key algorithm | ||
131 | * created from a template | ||
132 | * | ||
133 | * @tmpl: the template from which the algorithm was created | ||
134 | * @inst: the template instance | ||
135 | */ | ||
136 | int akcipher_register_instance(struct crypto_template *tmpl, | ||
137 | struct akcipher_instance *inst); | ||
60 | #endif | 138 | #endif |
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index f997e2d29b5a..c7585bdecbc2 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h | |||
@@ -27,4 +27,6 @@ int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, | |||
27 | unsigned int key_len); | 27 | unsigned int key_len); |
28 | 28 | ||
29 | void rsa_free_key(struct rsa_key *rsa_key); | 29 | void rsa_free_key(struct rsa_key *rsa_key); |
30 | |||
31 | extern struct crypto_template rsa_pkcs1pad_tmpl; | ||
30 | #endif | 32 | #endif |
diff --git a/include/crypto/md5.h b/include/crypto/md5.h index 146af825eedb..327deac963c0 100644 --- a/include/crypto/md5.h +++ b/include/crypto/md5.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #define MD5_H2 0x98badcfeUL | 13 | #define MD5_H2 0x98badcfeUL |
14 | #define MD5_H3 0x10325476UL | 14 | #define MD5_H3 0x10325476UL |
15 | 15 | ||
16 | extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; | ||
17 | |||
16 | struct md5_state { | 18 | struct md5_state { |
17 | u32 hash[MD5_HASH_WORDS]; | 19 | u32 hash[MD5_HASH_WORDS]; |
18 | u32 block[MD5_BLOCK_WORDS]; | 20 | u32 block[MD5_BLOCK_WORDS]; |
diff --git a/include/crypto/sha.h b/include/crypto/sha.h index dd7905a3c22e..c94d3eb1cefd 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h | |||
@@ -64,6 +64,12 @@ | |||
64 | #define SHA512_H6 0x1f83d9abfb41bd6bULL | 64 | #define SHA512_H6 0x1f83d9abfb41bd6bULL |
65 | #define SHA512_H7 0x5be0cd19137e2179ULL | 65 | #define SHA512_H7 0x5be0cd19137e2179ULL |
66 | 66 | ||
67 | extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE]; | ||
68 | |||
69 | extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; | ||
70 | |||
71 | extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; | ||
72 | |||
67 | struct sha1_state { | 73 | struct sha1_state { |
68 | u32 state[SHA1_DIGEST_SIZE / 4]; | 74 | u32 state[SHA1_DIGEST_SIZE / 4]; |
69 | u64 count; | 75 | u64 count; |
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c index 8881dad2a6a0..a7f278d2ed8f 100644 --- a/lib/842/842_decompress.c +++ b/lib/842/842_decompress.c | |||
@@ -69,7 +69,7 @@ struct sw842_param { | |||
69 | ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ | 69 | ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ |
70 | (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ | 70 | (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ |
71 | (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ | 71 | (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ |
72 | WARN(1, "pr_debug param err invalid size %x\n", s)) | 72 | 0) |
73 | 73 | ||
74 | static int next_bits(struct sw842_param *p, u64 *d, u8 n); | 74 | static int next_bits(struct sw842_param *p, u64 *d, u8 n); |
75 | 75 | ||
@@ -202,10 +202,14 @@ static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize) | |||
202 | return -EINVAL; | 202 | return -EINVAL; |
203 | } | 203 | } |
204 | 204 | ||
205 | pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", | 205 | if (size != 2 && size != 4 && size != 8) |
206 | size, (unsigned long)index, (unsigned long)(index * size), | 206 | WARN(1, "__do_index invalid size %x\n", size); |
207 | (unsigned long)offset, (unsigned long)total, | 207 | else |
208 | (unsigned long)beN_to_cpu(&p->ostart[offset], size)); | 208 | pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", |
209 | size, (unsigned long)index, | ||
210 | (unsigned long)(index * size), (unsigned long)offset, | ||
211 | (unsigned long)total, | ||
212 | (unsigned long)beN_to_cpu(&p->ostart[offset], size)); | ||
209 | 213 | ||
210 | memcpy(p->out, &p->ostart[offset], size); | 214 | memcpy(p->out, &p->ostart[offset], size); |
211 | p->out += size; | 215 | p->out += size; |
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 3db76b8c1115..ec533a6c77b5 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c | |||
@@ -135,7 +135,9 @@ EXPORT_SYMBOL_GPL(mpi_read_from_buffer); | |||
135 | * @buf: bufer to which the output will be written to. Needs to be at | 135 | * @buf: bufer to which the output will be written to. Needs to be at |
136 | * leaset mpi_get_size(a) long. | 136 | * leaset mpi_get_size(a) long. |
137 | * @buf_len: size of the buf. | 137 | * @buf_len: size of the buf. |
138 | * @nbytes: receives the actual length of the data written. | 138 | * @nbytes: receives the actual length of the data written on success and |
139 | * the data to-be-written on -EOVERFLOW in case buf_len was too | ||
140 | * small. | ||
139 | * @sign: if not NULL, it will be set to the sign of a. | 141 | * @sign: if not NULL, it will be set to the sign of a. |
140 | * | 142 | * |
141 | * Return: 0 on success or error code in case of error | 143 | * Return: 0 on success or error code in case of error |
@@ -148,7 +150,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | |||
148 | unsigned int n = mpi_get_size(a); | 150 | unsigned int n = mpi_get_size(a); |
149 | int i, lzeros = 0; | 151 | int i, lzeros = 0; |
150 | 152 | ||
151 | if (buf_len < n || !buf || !nbytes) | 153 | if (!buf || !nbytes) |
152 | return -EINVAL; | 154 | return -EINVAL; |
153 | 155 | ||
154 | if (sign) | 156 | if (sign) |
@@ -163,6 +165,11 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | |||
163 | break; | 165 | break; |
164 | } | 166 | } |
165 | 167 | ||
168 | if (buf_len < n - lzeros) { | ||
169 | *nbytes = n - lzeros; | ||
170 | return -EOVERFLOW; | ||
171 | } | ||
172 | |||
166 | p = buf; | 173 | p = buf; |
167 | *nbytes = n - lzeros; | 174 | *nbytes = n - lzeros; |
168 | 175 | ||
@@ -332,7 +339,8 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer); | |||
332 | * @nbytes: in/out param - it has the be set to the maximum number of | 339 | * @nbytes: in/out param - it has the be set to the maximum number of |
333 | * bytes that can be written to sgl. This has to be at least | 340 | * bytes that can be written to sgl. This has to be at least |
334 | * the size of the integer a. On return it receives the actual | 341 | * the size of the integer a. On return it receives the actual |
335 | * length of the data written. | 342 | * length of the data written on success or the data that would |
343 | * be written if buffer was too small. | ||
336 | * @sign: if not NULL, it will be set to the sign of a. | 344 | * @sign: if not NULL, it will be set to the sign of a. |
337 | * | 345 | * |
338 | * Return: 0 on success or error code in case of error | 346 | * Return: 0 on success or error code in case of error |
@@ -345,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, | |||
345 | unsigned int n = mpi_get_size(a); | 353 | unsigned int n = mpi_get_size(a); |
346 | int i, x, y = 0, lzeros = 0, buf_len; | 354 | int i, x, y = 0, lzeros = 0, buf_len; |
347 | 355 | ||
348 | if (!nbytes || *nbytes < n) | 356 | if (!nbytes) |
349 | return -EINVAL; | 357 | return -EINVAL; |
350 | 358 | ||
351 | if (sign) | 359 | if (sign) |
@@ -360,6 +368,11 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, | |||
360 | break; | 368 | break; |
361 | } | 369 | } |
362 | 370 | ||
371 | if (*nbytes < n - lzeros) { | ||
372 | *nbytes = n - lzeros; | ||
373 | return -EOVERFLOW; | ||
374 | } | ||
375 | |||
363 | *nbytes = n - lzeros; | 376 | *nbytes = n - lzeros; |
364 | buf_len = sgl->length; | 377 | buf_len = sgl->length; |
365 | p2 = sg_virt(sgl); | 378 | p2 = sg_virt(sgl); |