diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-04 12:06:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-04 12:06:34 -0400 |
commit | d66e6737d454553e1e62109d8298ede5351178a4 (patch) | |
tree | c28b205045935b111527f461d2b114daa26e4fb8 /drivers/crypto | |
parent | 612a9aab56a93533e76e3ad91642db7033e03b69 (diff) | |
parent | c9f97a27ceee84998999bf3341e6d5d207b05539 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- Optimised AES/SHA1 for ARM.
- IPsec ESN support in talitos and caam.
- x86_64/avx implementation of cast5/cast6.
- Add/use multi-algorithm registration helpers where possible.
- Added IBM Power7+ in-Nest support.
- Misc fixes.
Fix up trivial conflicts in crypto/Kconfig due to the sparc64 crypto
config options being added next to the new ARM ones.
[ Side note: cut-and-paste duplicate help texts make those conflicts
harder to read than necessary, thanks to git being smart about
minimizing conflicts and maximizing the common parts... ]
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (71 commits)
crypto: x86/glue_helper - fix storing of new IV in CBC encryption
crypto: cast5/avx - fix storing of new IV in CBC encryption
crypto: tcrypt - add missing tests for camellia and ghash
crypto: testmgr - make test_aead also test 'dst != src' code paths
crypto: testmgr - make test_skcipher also test 'dst != src' code paths
crypto: testmgr - add test vectors for CTR mode IV increasement
crypto: testmgr - add test vectors for partial ctr(cast5) and ctr(cast6)
crypto: testmgr - allow non-multi page and multi page skcipher tests from same test template
crypto: caam - increase TRNG clocks per sample
crypto, tcrypt: remove local_bh_disable/enable() around local_irq_disable/enable()
crypto: tegra-aes - fix error return code
crypto: crypto4xx - fix error return code
crypto: hifn_795x - fix error return code
crypto: ux500 - fix error return code
crypto: caam - fix error IDs for SEC v5.x RNG4
hwrng: mxc-rnga - Access data via structure
hwrng: mxc-rnga - Adapt clocks to new i.mx clock framework
crypto: caam - add IPsec ESN support
crypto: 842 - remove .cra_list initialization
Revert "[CRYPTO] cast6: inline bloat--"
...
Diffstat (limited to 'drivers/crypto')
29 files changed, 1924 insertions, 343 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 7d74d092aa8f..308c7fb92a60 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -298,21 +298,15 @@ config CRYPTO_DEV_TEGRA_AES | |||
298 | will be called tegra-aes. | 298 | will be called tegra-aes. |
299 | 299 | ||
300 | config CRYPTO_DEV_NX | 300 | config CRYPTO_DEV_NX |
301 | tristate "Support for Power7+ in-Nest cryptographic acceleration" | 301 | bool "Support for IBM Power7+ in-Nest cryptographic acceleration" |
302 | depends on PPC64 && IBMVIO | 302 | depends on PPC64 && IBMVIO |
303 | select CRYPTO_AES | 303 | default n |
304 | select CRYPTO_CBC | ||
305 | select CRYPTO_ECB | ||
306 | select CRYPTO_CCM | ||
307 | select CRYPTO_GCM | ||
308 | select CRYPTO_AUTHENC | ||
309 | select CRYPTO_XCBC | ||
310 | select CRYPTO_SHA256 | ||
311 | select CRYPTO_SHA512 | ||
312 | help | 304 | help |
313 | Support for Power7+ in-Nest cryptographic acceleration. This | 305 | Support for Power7+ in-Nest cryptographic acceleration. |
314 | module supports acceleration for AES and SHA2 algorithms. If you | 306 | |
315 | choose 'M' here, this module will be called nx_crypto. | 307 | if CRYPTO_DEV_NX |
308 | source "drivers/crypto/nx/Kconfig" | ||
309 | endif | ||
316 | 310 | ||
317 | config CRYPTO_DEV_UX500 | 311 | config CRYPTO_DEV_UX500 |
318 | tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" | 312 | tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" |
@@ -340,7 +334,7 @@ config CRYPTO_DEV_ATMEL_AES | |||
340 | select CRYPTO_AES | 334 | select CRYPTO_AES |
341 | select CRYPTO_ALGAPI | 335 | select CRYPTO_ALGAPI |
342 | select CRYPTO_BLKCIPHER | 336 | select CRYPTO_BLKCIPHER |
343 | select CONFIG_AT_HDMAC | 337 | select AT_HDMAC |
344 | help | 338 | help |
345 | Some Atmel processors have AES hw accelerator. | 339 | Some Atmel processors have AES hw accelerator. |
346 | Select this if you want to use the Atmel module for | 340 | Select this if you want to use the Atmel module for |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 802e85102c32..f88e3d8f6b64 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1226,6 +1226,7 @@ static int __init crypto4xx_probe(struct platform_device *ofdev) | |||
1226 | core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); | 1226 | core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); |
1227 | if (!core_dev->dev->ce_base) { | 1227 | if (!core_dev->dev->ce_base) { |
1228 | dev_err(dev, "failed to of_iomap\n"); | 1228 | dev_err(dev, "failed to of_iomap\n"); |
1229 | rc = -ENOMEM; | ||
1229 | goto err_iomap; | 1230 | goto err_iomap; |
1230 | } | 1231 | } |
1231 | 1232 | ||
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 6bb20fffbf49..8061336e07e7 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
@@ -24,15 +24,10 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | 25 | ||
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
29 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
30 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
31 | #include <linux/kernel.h> | ||
32 | #include <linux/clk.h> | ||
33 | #include <linux/irq.h> | 30 | #include <linux/irq.h> |
34 | #include <linux/io.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
37 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
38 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
@@ -1017,7 +1012,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | |||
1017 | int err, i, j; | 1012 | int err, i, j; |
1018 | 1013 | ||
1019 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | 1014 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { |
1020 | INIT_LIST_HEAD(&aes_algs[i].cra_list); | ||
1021 | err = crypto_register_alg(&aes_algs[i]); | 1015 | err = crypto_register_alg(&aes_algs[i]); |
1022 | if (err) | 1016 | if (err) |
1023 | goto err_aes_algs; | 1017 | goto err_aes_algs; |
@@ -1026,7 +1020,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | |||
1026 | atmel_aes_hw_version_init(dd); | 1020 | atmel_aes_hw_version_init(dd); |
1027 | 1021 | ||
1028 | if (dd->hw_version >= 0x130) { | 1022 | if (dd->hw_version >= 0x130) { |
1029 | INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list); | ||
1030 | err = crypto_register_alg(&aes_cfb64_alg[0]); | 1023 | err = crypto_register_alg(&aes_cfb64_alg[0]); |
1031 | if (err) | 1024 | if (err) |
1032 | goto err_aes_cfb64_alg; | 1025 | goto err_aes_cfb64_alg; |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index f938b9d79b66..bcdf55fdc623 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
@@ -24,15 +24,10 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | 25 | ||
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
29 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
30 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
31 | #include <linux/kernel.h> | ||
32 | #include <linux/clk.h> | ||
33 | #include <linux/irq.h> | 30 | #include <linux/irq.h> |
34 | #include <linux/io.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
37 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
38 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index eb2b61e57e2d..7495f98c7221 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c | |||
@@ -24,15 +24,10 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | 25 | ||
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
29 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
30 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
31 | #include <linux/kernel.h> | ||
32 | #include <linux/clk.h> | ||
33 | #include <linux/irq.h> | 30 | #include <linux/irq.h> |
34 | #include <linux/io.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
37 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
38 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
@@ -1044,7 +1039,6 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd) | |||
1044 | int err, i, j; | 1039 | int err, i, j; |
1045 | 1040 | ||
1046 | for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) { | 1041 | for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) { |
1047 | INIT_LIST_HEAD(&tdes_algs[i].cra_list); | ||
1048 | err = crypto_register_alg(&tdes_algs[i]); | 1042 | err = crypto_register_alg(&tdes_algs[i]); |
1049 | if (err) | 1043 | if (err) |
1050 | goto err_tdes_algs; | 1044 | goto err_tdes_algs; |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 0c1ea8492eff..b2a0a0726a54 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -205,7 +205,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, | |||
205 | { | 205 | { |
206 | u32 *key_jump_cmd; | 206 | u32 *key_jump_cmd; |
207 | 207 | ||
208 | init_sh_desc(desc, HDR_SHARE_WAIT); | 208 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
209 | 209 | ||
210 | /* Skip if already shared */ | 210 | /* Skip if already shared */ |
211 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | 211 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
@@ -224,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
224 | struct aead_tfm *tfm = &aead->base.crt_aead; | 224 | struct aead_tfm *tfm = &aead->base.crt_aead; |
225 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 225 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
226 | struct device *jrdev = ctx->jrdev; | 226 | struct device *jrdev = ctx->jrdev; |
227 | bool keys_fit_inline = 0; | 227 | bool keys_fit_inline = false; |
228 | u32 *key_jump_cmd, *jump_cmd; | 228 | u32 *key_jump_cmd, *jump_cmd; |
229 | u32 geniv, moveiv; | 229 | u32 geniv, moveiv; |
230 | u32 *desc; | 230 | u32 *desc; |
@@ -239,7 +239,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
239 | if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + | 239 | if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + |
240 | ctx->split_key_pad_len + ctx->enckeylen <= | 240 | ctx->split_key_pad_len + ctx->enckeylen <= |
241 | CAAM_DESC_BYTES_MAX) | 241 | CAAM_DESC_BYTES_MAX) |
242 | keys_fit_inline = 1; | 242 | keys_fit_inline = true; |
243 | 243 | ||
244 | /* aead_encrypt shared descriptor */ | 244 | /* aead_encrypt shared descriptor */ |
245 | desc = ctx->sh_desc_enc; | 245 | desc = ctx->sh_desc_enc; |
@@ -297,12 +297,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
297 | if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + | 297 | if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + |
298 | ctx->split_key_pad_len + ctx->enckeylen <= | 298 | ctx->split_key_pad_len + ctx->enckeylen <= |
299 | CAAM_DESC_BYTES_MAX) | 299 | CAAM_DESC_BYTES_MAX) |
300 | keys_fit_inline = 1; | 300 | keys_fit_inline = true; |
301 | 301 | ||
302 | desc = ctx->sh_desc_dec; | 302 | desc = ctx->sh_desc_dec; |
303 | 303 | ||
304 | /* aead_decrypt shared descriptor */ | 304 | /* aead_decrypt shared descriptor */ |
305 | init_sh_desc(desc, HDR_SHARE_WAIT); | 305 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
306 | 306 | ||
307 | /* Skip if already shared */ | 307 | /* Skip if already shared */ |
308 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | 308 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
@@ -365,7 +365,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
365 | if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + | 365 | if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + |
366 | ctx->split_key_pad_len + ctx->enckeylen <= | 366 | ctx->split_key_pad_len + ctx->enckeylen <= |
367 | CAAM_DESC_BYTES_MAX) | 367 | CAAM_DESC_BYTES_MAX) |
368 | keys_fit_inline = 1; | 368 | keys_fit_inline = true; |
369 | 369 | ||
370 | /* aead_givencrypt shared descriptor */ | 370 | /* aead_givencrypt shared descriptor */ |
371 | desc = ctx->sh_desc_givenc; | 371 | desc = ctx->sh_desc_givenc; |
@@ -564,7 +564,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
564 | 564 | ||
565 | /* ablkcipher_encrypt shared descriptor */ | 565 | /* ablkcipher_encrypt shared descriptor */ |
566 | desc = ctx->sh_desc_enc; | 566 | desc = ctx->sh_desc_enc; |
567 | init_sh_desc(desc, HDR_SHARE_WAIT); | 567 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
568 | /* Skip if already shared */ | 568 | /* Skip if already shared */ |
569 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | 569 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
570 | JUMP_COND_SHRD); | 570 | JUMP_COND_SHRD); |
@@ -605,7 +605,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
605 | /* ablkcipher_decrypt shared descriptor */ | 605 | /* ablkcipher_decrypt shared descriptor */ |
606 | desc = ctx->sh_desc_dec; | 606 | desc = ctx->sh_desc_dec; |
607 | 607 | ||
608 | init_sh_desc(desc, HDR_SHARE_WAIT); | 608 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
609 | /* Skip if already shared */ | 609 | /* Skip if already shared */ |
610 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | 610 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
611 | JUMP_COND_SHRD); | 611 | JUMP_COND_SHRD); |
@@ -1354,10 +1354,10 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1354 | contig &= ~GIV_SRC_CONTIG; | 1354 | contig &= ~GIV_SRC_CONTIG; |
1355 | if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) | 1355 | if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) |
1356 | contig &= ~GIV_DST_CONTIG; | 1356 | contig &= ~GIV_DST_CONTIG; |
1357 | if (unlikely(req->src != req->dst)) { | 1357 | if (unlikely(req->src != req->dst)) { |
1358 | dst_nents = dst_nents ? : 1; | 1358 | dst_nents = dst_nents ? : 1; |
1359 | sec4_sg_len += 1; | 1359 | sec4_sg_len += 1; |
1360 | } | 1360 | } |
1361 | if (!(contig & GIV_SRC_CONTIG)) { | 1361 | if (!(contig & GIV_SRC_CONTIG)) { |
1362 | assoc_nents = assoc_nents ? : 1; | 1362 | assoc_nents = assoc_nents ? : 1; |
1363 | src_nents = src_nents ? : 1; | 1363 | src_nents = src_nents ? : 1; |
@@ -1650,7 +1650,11 @@ struct caam_alg_template { | |||
1650 | }; | 1650 | }; |
1651 | 1651 | ||
1652 | static struct caam_alg_template driver_algs[] = { | 1652 | static struct caam_alg_template driver_algs[] = { |
1653 | /* single-pass ipsec_esp descriptor */ | 1653 | /* |
1654 | * single-pass ipsec_esp descriptor | ||
1655 | * authencesn(*,*) is also registered, although not present | ||
1656 | * explicitly here. | ||
1657 | */ | ||
1654 | { | 1658 | { |
1655 | .name = "authenc(hmac(md5),cbc(aes))", | 1659 | .name = "authenc(hmac(md5),cbc(aes))", |
1656 | .driver_name = "authenc-hmac-md5-cbc-aes-caam", | 1660 | .driver_name = "authenc-hmac-md5-cbc-aes-caam", |
@@ -2213,7 +2217,9 @@ static int __init caam_algapi_init(void) | |||
2213 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 2217 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
2214 | /* TODO: check if h/w supports alg */ | 2218 | /* TODO: check if h/w supports alg */ |
2215 | struct caam_crypto_alg *t_alg; | 2219 | struct caam_crypto_alg *t_alg; |
2220 | bool done = false; | ||
2216 | 2221 | ||
2222 | authencesn: | ||
2217 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); | 2223 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); |
2218 | if (IS_ERR(t_alg)) { | 2224 | if (IS_ERR(t_alg)) { |
2219 | err = PTR_ERR(t_alg); | 2225 | err = PTR_ERR(t_alg); |
@@ -2227,8 +2233,25 @@ static int __init caam_algapi_init(void) | |||
2227 | dev_warn(ctrldev, "%s alg registration failed\n", | 2233 | dev_warn(ctrldev, "%s alg registration failed\n", |
2228 | t_alg->crypto_alg.cra_driver_name); | 2234 | t_alg->crypto_alg.cra_driver_name); |
2229 | kfree(t_alg); | 2235 | kfree(t_alg); |
2230 | } else | 2236 | } else { |
2231 | list_add_tail(&t_alg->entry, &priv->alg_list); | 2237 | list_add_tail(&t_alg->entry, &priv->alg_list); |
2238 | if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD && | ||
2239 | !memcmp(driver_algs[i].name, "authenc", 7) && | ||
2240 | !done) { | ||
2241 | char *name; | ||
2242 | |||
2243 | name = driver_algs[i].name; | ||
2244 | memmove(name + 10, name + 7, strlen(name) - 7); | ||
2245 | memcpy(name + 7, "esn", 3); | ||
2246 | |||
2247 | name = driver_algs[i].driver_name; | ||
2248 | memmove(name + 10, name + 7, strlen(name) - 7); | ||
2249 | memcpy(name + 7, "esn", 3); | ||
2250 | |||
2251 | done = true; | ||
2252 | goto authencesn; | ||
2253 | } | ||
2254 | } | ||
2232 | } | 2255 | } |
2233 | if (!list_empty(&priv->alg_list)) | 2256 | if (!list_empty(&priv->alg_list)) |
2234 | dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", | 2257 | dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 895aaf2bca92..32aba7a61503 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -225,7 +225,7 @@ static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | |||
225 | { | 225 | { |
226 | u32 *key_jump_cmd; | 226 | u32 *key_jump_cmd; |
227 | 227 | ||
228 | init_sh_desc(desc, HDR_SHARE_WAIT); | 228 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
229 | 229 | ||
230 | if (ctx->split_key_len) { | 230 | if (ctx->split_key_len) { |
231 | /* Skip if already shared */ | 231 | /* Skip if already shared */ |
@@ -311,7 +311,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
311 | /* ahash_update shared descriptor */ | 311 | /* ahash_update shared descriptor */ |
312 | desc = ctx->sh_desc_update; | 312 | desc = ctx->sh_desc_update; |
313 | 313 | ||
314 | init_sh_desc(desc, HDR_SHARE_WAIT); | 314 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
315 | 315 | ||
316 | /* Import context from software */ | 316 | /* Import context from software */ |
317 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | 317 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | |
@@ -430,6 +430,10 @@ static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
430 | int ret = 0; | 430 | int ret = 0; |
431 | 431 | ||
432 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | 432 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); |
433 | if (!desc) { | ||
434 | dev_err(jrdev, "unable to allocate key input memory\n"); | ||
435 | return -ENOMEM; | ||
436 | } | ||
433 | 437 | ||
434 | init_job_desc(desc, 0); | 438 | init_job_desc(desc, 0); |
435 | 439 | ||
@@ -1736,8 +1740,11 @@ static void __exit caam_algapi_hash_exit(void) | |||
1736 | struct caam_hash_alg *t_alg, *n; | 1740 | struct caam_hash_alg *t_alg, *n; |
1737 | 1741 | ||
1738 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 1742 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
1739 | if (!dev_node) | 1743 | if (!dev_node) { |
1740 | return; | 1744 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); |
1745 | if (!dev_node) | ||
1746 | return; | ||
1747 | } | ||
1741 | 1748 | ||
1742 | pdev = of_find_device_by_node(dev_node); | 1749 | pdev = of_find_device_by_node(dev_node); |
1743 | if (!pdev) | 1750 | if (!pdev) |
@@ -1812,8 +1819,11 @@ static int __init caam_algapi_hash_init(void) | |||
1812 | int i = 0, err = 0; | 1819 | int i = 0, err = 0; |
1813 | 1820 | ||
1814 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 1821 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
1815 | if (!dev_node) | 1822 | if (!dev_node) { |
1816 | return -ENODEV; | 1823 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); |
1824 | if (!dev_node) | ||
1825 | return -ENODEV; | ||
1826 | } | ||
1817 | 1827 | ||
1818 | pdev = of_find_device_by_node(dev_node); | 1828 | pdev = of_find_device_by_node(dev_node); |
1819 | if (!pdev) | 1829 | if (!pdev) |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index e2bfe161dece..d1939a9539c0 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
@@ -193,7 +193,7 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) | |||
193 | struct device *jrdev = ctx->jrdev; | 193 | struct device *jrdev = ctx->jrdev; |
194 | u32 *desc = ctx->sh_desc; | 194 | u32 *desc = ctx->sh_desc; |
195 | 195 | ||
196 | init_sh_desc(desc, HDR_SHARE_WAIT); | 196 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
197 | 197 | ||
198 | /* Propagate errors from shared to job descriptor */ | 198 | /* Propagate errors from shared to job descriptor */ |
199 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | 199 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); |
@@ -284,8 +284,11 @@ static int __init caam_rng_init(void) | |||
284 | struct caam_drv_private *priv; | 284 | struct caam_drv_private *priv; |
285 | 285 | ||
286 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 286 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
287 | if (!dev_node) | 287 | if (!dev_node) { |
288 | return -ENODEV; | 288 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); |
289 | if (!dev_node) | ||
290 | return -ENODEV; | ||
291 | } | ||
289 | 292 | ||
290 | pdev = of_find_device_by_node(dev_node); | 293 | pdev = of_find_device_by_node(dev_node); |
291 | if (!pdev) | 294 | if (!pdev) |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 762aeff626ac..cf15e7813801 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
25 | #include <linux/circ_buf.h> | 25 | #include <linux/circ_buf.h> |
26 | #include <linux/string.h> | ||
26 | #include <net/xfrm.h> | 27 | #include <net/xfrm.h> |
27 | 28 | ||
28 | #include <crypto/algapi.h> | 29 | #include <crypto/algapi.h> |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 414ba20c05a1..bf20dd891705 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -129,7 +129,7 @@ static int instantiate_rng(struct device *jrdev) | |||
129 | 129 | ||
130 | /* | 130 | /* |
131 | * By default, the TRNG runs for 200 clocks per sample; | 131 | * By default, the TRNG runs for 200 clocks per sample; |
132 | * 800 clocks per sample generates better entropy. | 132 | * 1600 clocks per sample generates better entropy. |
133 | */ | 133 | */ |
134 | static void kick_trng(struct platform_device *pdev) | 134 | static void kick_trng(struct platform_device *pdev) |
135 | { | 135 | { |
@@ -144,9 +144,9 @@ static void kick_trng(struct platform_device *pdev) | |||
144 | 144 | ||
145 | /* put RNG4 into program mode */ | 145 | /* put RNG4 into program mode */ |
146 | setbits32(&r4tst->rtmctl, RTMCTL_PRGM); | 146 | setbits32(&r4tst->rtmctl, RTMCTL_PRGM); |
147 | /* 800 clocks per sample */ | 147 | /* 1600 clocks per sample */ |
148 | val = rd_reg32(&r4tst->rtsdctl); | 148 | val = rd_reg32(&r4tst->rtsdctl); |
149 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | (800 << RTSDCTL_ENT_DLY_SHIFT); | 149 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT); |
150 | wr_reg32(&r4tst->rtsdctl, val); | 150 | wr_reg32(&r4tst->rtsdctl, val); |
151 | /* min. freq. count */ | 151 | /* min. freq. count */ |
152 | wr_reg32(&r4tst->rtfrqmin, 400); | 152 | wr_reg32(&r4tst->rtfrqmin, 400); |
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 9955ed9643e6..30b8f74833d4 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
@@ -77,10 +77,8 @@ static void report_ccb_status(u32 status, char *outstr) | |||
77 | "Not instantiated", | 77 | "Not instantiated", |
78 | "Test instantiate", | 78 | "Test instantiate", |
79 | "Prediction resistance", | 79 | "Prediction resistance", |
80 | "", | ||
81 | "Prediction resistance and test request", | 80 | "Prediction resistance and test request", |
82 | "Uninstantiate", | 81 | "Uninstantiate", |
83 | "", | ||
84 | "Secure key generation", | 82 | "Secure key generation", |
85 | }; | 83 | }; |
86 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> | 84 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> |
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index d216cd3cc569..f6dba10246c3 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c | |||
@@ -54,6 +54,10 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | |||
54 | int ret = 0; | 54 | int ret = 0; |
55 | 55 | ||
56 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | 56 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); |
57 | if (!desc) { | ||
58 | dev_err(jrdev, "unable to allocate key input memory\n"); | ||
59 | return -ENOMEM; | ||
60 | } | ||
57 | 61 | ||
58 | init_job_desc(desc, 0); | 62 | init_job_desc(desc, 0); |
59 | 63 | ||
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index f3e36c86b6c3..51f196d77f21 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -289,7 +289,6 @@ static struct crypto_alg geode_alg = { | |||
289 | .cra_blocksize = AES_MIN_BLOCK_SIZE, | 289 | .cra_blocksize = AES_MIN_BLOCK_SIZE, |
290 | .cra_ctxsize = sizeof(struct geode_aes_op), | 290 | .cra_ctxsize = sizeof(struct geode_aes_op), |
291 | .cra_module = THIS_MODULE, | 291 | .cra_module = THIS_MODULE, |
292 | .cra_list = LIST_HEAD_INIT(geode_alg.cra_list), | ||
293 | .cra_u = { | 292 | .cra_u = { |
294 | .cipher = { | 293 | .cipher = { |
295 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 294 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
@@ -402,7 +401,6 @@ static struct crypto_alg geode_cbc_alg = { | |||
402 | .cra_alignmask = 15, | 401 | .cra_alignmask = 15, |
403 | .cra_type = &crypto_blkcipher_type, | 402 | .cra_type = &crypto_blkcipher_type, |
404 | .cra_module = THIS_MODULE, | 403 | .cra_module = THIS_MODULE, |
405 | .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list), | ||
406 | .cra_u = { | 404 | .cra_u = { |
407 | .blkcipher = { | 405 | .blkcipher = { |
408 | .min_keysize = AES_MIN_KEY_SIZE, | 406 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -489,7 +487,6 @@ static struct crypto_alg geode_ecb_alg = { | |||
489 | .cra_alignmask = 15, | 487 | .cra_alignmask = 15, |
490 | .cra_type = &crypto_blkcipher_type, | 488 | .cra_type = &crypto_blkcipher_type, |
491 | .cra_module = THIS_MODULE, | 489 | .cra_module = THIS_MODULE, |
492 | .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list), | ||
493 | .cra_u = { | 490 | .cra_u = { |
494 | .blkcipher = { | 491 | .blkcipher = { |
495 | .min_keysize = AES_MIN_KEY_SIZE, | 492 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -588,21 +585,8 @@ static struct pci_driver geode_aes_driver = { | |||
588 | .remove = __devexit_p(geode_aes_remove) | 585 | .remove = __devexit_p(geode_aes_remove) |
589 | }; | 586 | }; |
590 | 587 | ||
591 | static int __init | 588 | module_pci_driver(geode_aes_driver); |
592 | geode_aes_init(void) | ||
593 | { | ||
594 | return pci_register_driver(&geode_aes_driver); | ||
595 | } | ||
596 | |||
597 | static void __exit | ||
598 | geode_aes_exit(void) | ||
599 | { | ||
600 | pci_unregister_driver(&geode_aes_driver); | ||
601 | } | ||
602 | 589 | ||
603 | MODULE_AUTHOR("Advanced Micro Devices, Inc."); | 590 | MODULE_AUTHOR("Advanced Micro Devices, Inc."); |
604 | MODULE_DESCRIPTION("Geode LX Hardware AES driver"); | 591 | MODULE_DESCRIPTION("Geode LX Hardware AES driver"); |
605 | MODULE_LICENSE("GPL"); | 592 | MODULE_LICENSE("GPL"); |
606 | |||
607 | module_init(geode_aes_init); | ||
608 | module_exit(geode_aes_exit); | ||
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index df14358d7fa1..fda32968a66b 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -2611,14 +2611,17 @@ static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id | |||
2611 | size = pci_resource_len(pdev, i); | 2611 | size = pci_resource_len(pdev, i); |
2612 | 2612 | ||
2613 | dev->bar[i] = ioremap_nocache(addr, size); | 2613 | dev->bar[i] = ioremap_nocache(addr, size); |
2614 | if (!dev->bar[i]) | 2614 | if (!dev->bar[i]) { |
2615 | err = -ENOMEM; | ||
2615 | goto err_out_unmap_bars; | 2616 | goto err_out_unmap_bars; |
2617 | } | ||
2616 | } | 2618 | } |
2617 | 2619 | ||
2618 | dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma), | 2620 | dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma), |
2619 | &dev->desc_dma); | 2621 | &dev->desc_dma); |
2620 | if (!dev->desc_virt) { | 2622 | if (!dev->desc_virt) { |
2621 | dprintk("Failed to allocate descriptor rings.\n"); | 2623 | dprintk("Failed to allocate descriptor rings.\n"); |
2624 | err = -ENOMEM; | ||
2622 | goto err_out_unmap_bars; | 2625 | goto err_out_unmap_bars; |
2623 | } | 2626 | } |
2624 | memset(dev->desc_virt, 0, sizeof(struct hifn_dma)); | 2627 | memset(dev->desc_virt, 0, sizeof(struct hifn_dma)); |
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig new file mode 100644 index 000000000000..f82616621ae1 --- /dev/null +++ b/drivers/crypto/nx/Kconfig | |||
@@ -0,0 +1,26 @@ | |||
1 | config CRYPTO_DEV_NX_ENCRYPT | ||
2 | tristate "Encryption acceleration support" | ||
3 | depends on PPC64 && IBMVIO | ||
4 | default y | ||
5 | select CRYPTO_AES | ||
6 | select CRYPTO_CBC | ||
7 | select CRYPTO_ECB | ||
8 | select CRYPTO_CCM | ||
9 | select CRYPTO_GCM | ||
10 | select CRYPTO_AUTHENC | ||
11 | select CRYPTO_XCBC | ||
12 | select CRYPTO_SHA256 | ||
13 | select CRYPTO_SHA512 | ||
14 | help | ||
15 | Support for Power7+ in-Nest encryption acceleration. This | ||
16 | module supports acceleration for AES and SHA2 algorithms. If you | ||
17 | choose 'M' here, this module will be called nx_crypto. | ||
18 | |||
19 | config CRYPTO_DEV_NX_COMPRESS | ||
20 | tristate "Compression acceleration support" | ||
21 | depends on PPC64 && IBMVIO | ||
22 | default y | ||
23 | help | ||
24 | Support for Power7+ in-Nest compression acceleration. This | ||
25 | module supports acceleration for AES and SHA2 algorithms. If you | ||
26 | choose 'M' here, this module will be called nx_compress. | ||
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index 411ce59c80d1..bb770ea45ce9 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_NX) += nx-crypto.o | 1 | obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o |
2 | nx-crypto-objs := nx.o \ | 2 | nx-crypto-objs := nx.o \ |
3 | nx_debugfs.o \ | 3 | nx_debugfs.o \ |
4 | nx-aes-cbc.o \ | 4 | nx-aes-cbc.o \ |
@@ -9,3 +9,6 @@ nx-crypto-objs := nx.o \ | |||
9 | nx-aes-xcbc.o \ | 9 | nx-aes-xcbc.o \ |
10 | nx-sha256.o \ | 10 | nx-sha256.o \ |
11 | nx-sha512.o | 11 | nx-sha512.o |
12 | |||
13 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o | ||
14 | nx-compress-objs := nx-842.o | ||
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c new file mode 100644 index 000000000000..0ce625738677 --- /dev/null +++ b/drivers/crypto/nx/nx-842.c | |||
@@ -0,0 +1,1617 @@ | |||
1 | /* | ||
2 | * Driver for IBM Power 842 compression accelerator | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2012 | ||
19 | * | ||
20 | * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> | ||
21 | * Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
22 | */ | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/nx842.h> | ||
27 | #include <linux/of.h> | ||
28 | #include <linux/slab.h> | ||
29 | |||
30 | #include <asm/page.h> | ||
31 | #include <asm/pSeries_reconfig.h> | ||
32 | #include <asm/vio.h> | ||
33 | |||
34 | #include "nx_csbcpb.h" /* struct nx_csbcpb */ | ||
35 | |||
36 | #define MODULE_NAME "nx-compress" | ||
37 | MODULE_LICENSE("GPL"); | ||
38 | MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); | ||
39 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); | ||
40 | |||
41 | #define SHIFT_4K 12 | ||
42 | #define SHIFT_64K 16 | ||
43 | #define SIZE_4K (1UL << SHIFT_4K) | ||
44 | #define SIZE_64K (1UL << SHIFT_64K) | ||
45 | |||
46 | /* IO buffer must be 128 byte aligned */ | ||
47 | #define IO_BUFFER_ALIGN 128 | ||
48 | |||
49 | struct nx842_header { | ||
50 | int blocks_nr; /* number of compressed blocks */ | ||
51 | int offset; /* offset of the first block (from beginning of header) */ | ||
52 | int sizes[0]; /* size of compressed blocks */ | ||
53 | }; | ||
54 | |||
55 | static inline int nx842_header_size(const struct nx842_header *hdr) | ||
56 | { | ||
57 | return sizeof(struct nx842_header) + | ||
58 | hdr->blocks_nr * sizeof(hdr->sizes[0]); | ||
59 | } | ||
60 | |||
61 | /* Macros for fields within nx_csbcpb */ | ||
62 | /* Check the valid bit within the csbcpb valid field */ | ||
63 | #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) | ||
64 | |||
65 | /* CE macros operate on the completion_extension field bits in the csbcpb. | ||
66 | * CE0 0=full completion, 1=partial completion | ||
67 | * CE1 0=CE0 indicates completion, 1=termination (output may be modified) | ||
68 | * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ | ||
69 | #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) | ||
70 | #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) | ||
71 | #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) | ||
72 | |||
73 | /* The NX unit accepts data only on 4K page boundaries */ | ||
74 | #define NX842_HW_PAGE_SHIFT SHIFT_4K | ||
75 | #define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT) | ||
76 | #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) | ||
77 | |||
78 | enum nx842_status { | ||
79 | UNAVAILABLE, | ||
80 | AVAILABLE | ||
81 | }; | ||
82 | |||
83 | struct ibm_nx842_counters { | ||
84 | atomic64_t comp_complete; | ||
85 | atomic64_t comp_failed; | ||
86 | atomic64_t decomp_complete; | ||
87 | atomic64_t decomp_failed; | ||
88 | atomic64_t swdecomp; | ||
89 | atomic64_t comp_times[32]; | ||
90 | atomic64_t decomp_times[32]; | ||
91 | }; | ||
92 | |||
93 | static struct nx842_devdata { | ||
94 | struct vio_dev *vdev; | ||
95 | struct device *dev; | ||
96 | struct ibm_nx842_counters *counters; | ||
97 | unsigned int max_sg_len; | ||
98 | unsigned int max_sync_size; | ||
99 | unsigned int max_sync_sg; | ||
100 | enum nx842_status status; | ||
101 | } __rcu *devdata; | ||
102 | static DEFINE_SPINLOCK(devdata_mutex); | ||
103 | |||
104 | #define NX842_COUNTER_INC(_x) \ | ||
105 | static inline void nx842_inc_##_x( \ | ||
106 | const struct nx842_devdata *dev) { \ | ||
107 | if (dev) \ | ||
108 | atomic64_inc(&dev->counters->_x); \ | ||
109 | } | ||
110 | NX842_COUNTER_INC(comp_complete); | ||
111 | NX842_COUNTER_INC(comp_failed); | ||
112 | NX842_COUNTER_INC(decomp_complete); | ||
113 | NX842_COUNTER_INC(decomp_failed); | ||
114 | NX842_COUNTER_INC(swdecomp); | ||
115 | |||
116 | #define NX842_HIST_SLOTS 16 | ||
117 | |||
118 | static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) | ||
119 | { | ||
120 | int bucket = fls(time); | ||
121 | |||
122 | if (bucket) | ||
123 | bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); | ||
124 | |||
125 | atomic64_inc(×[bucket]); | ||
126 | } | ||
127 | |||
128 | /* NX unit operation flags */ | ||
129 | #define NX842_OP_COMPRESS 0x0 | ||
130 | #define NX842_OP_CRC 0x1 | ||
131 | #define NX842_OP_DECOMPRESS 0x2 | ||
132 | #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) | ||
133 | #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) | ||
134 | #define NX842_OP_ASYNC (1<<23) | ||
135 | #define NX842_OP_NOTIFY (1<<22) | ||
136 | #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) | ||
137 | |||
138 | static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) | ||
139 | { | ||
140 | /* No use of DMA mappings within the driver. */ | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | struct nx842_slentry { | ||
145 | unsigned long ptr; /* Real address (use __pa()) */ | ||
146 | unsigned long len; | ||
147 | }; | ||
148 | |||
149 | /* pHyp scatterlist entry */ | ||
150 | struct nx842_scatterlist { | ||
151 | int entry_nr; /* number of slentries */ | ||
152 | struct nx842_slentry *entries; /* ptr to array of slentries */ | ||
153 | }; | ||
154 | |||
155 | /* Does not include sizeof(entry_nr) in the size */ | ||
156 | static inline unsigned long nx842_get_scatterlist_size( | ||
157 | struct nx842_scatterlist *sl) | ||
158 | { | ||
159 | return sl->entry_nr * sizeof(struct nx842_slentry); | ||
160 | } | ||
161 | |||
162 | static int nx842_build_scatterlist(unsigned long buf, int len, | ||
163 | struct nx842_scatterlist *sl) | ||
164 | { | ||
165 | unsigned long nextpage; | ||
166 | struct nx842_slentry *entry; | ||
167 | |||
168 | sl->entry_nr = 0; | ||
169 | |||
170 | entry = sl->entries; | ||
171 | while (len) { | ||
172 | entry->ptr = __pa(buf); | ||
173 | nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); | ||
174 | if (nextpage < buf + len) { | ||
175 | /* we aren't at the end yet */ | ||
176 | if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE)) | ||
177 | /* we are in the middle (or beginning) */ | ||
178 | entry->len = NX842_HW_PAGE_SIZE; | ||
179 | else | ||
180 | /* we are at the beginning */ | ||
181 | entry->len = nextpage - buf; | ||
182 | } else { | ||
183 | /* at the end */ | ||
184 | entry->len = len; | ||
185 | } | ||
186 | |||
187 | len -= entry->len; | ||
188 | buf += entry->len; | ||
189 | sl->entry_nr++; | ||
190 | entry++; | ||
191 | } | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Working memory for software decompression | ||
198 | */ | ||
199 | struct sw842_fifo { | ||
200 | union { | ||
201 | char f8[256][8]; | ||
202 | char f4[512][4]; | ||
203 | }; | ||
204 | char f2[256][2]; | ||
205 | unsigned char f84_full; | ||
206 | unsigned char f2_full; | ||
207 | unsigned char f8_count; | ||
208 | unsigned char f2_count; | ||
209 | unsigned int f4_count; | ||
210 | }; | ||
211 | |||
212 | /* | ||
213 | * Working memory for crypto API | ||
214 | */ | ||
215 | struct nx842_workmem { | ||
216 | char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */ | ||
217 | union { | ||
218 | /* hardware working memory */ | ||
219 | struct { | ||
220 | /* scatterlist */ | ||
221 | char slin[SIZE_4K]; | ||
222 | char slout[SIZE_4K]; | ||
223 | /* coprocessor status/parameter block */ | ||
224 | struct nx_csbcpb csbcpb; | ||
225 | }; | ||
226 | /* software working memory */ | ||
227 | struct sw842_fifo swfifo; /* software decompression fifo */ | ||
228 | }; | ||
229 | }; | ||
230 | |||
231 | int nx842_get_workmem_size(void) | ||
232 | { | ||
233 | return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE; | ||
234 | } | ||
235 | EXPORT_SYMBOL_GPL(nx842_get_workmem_size); | ||
236 | |||
237 | int nx842_get_workmem_size_aligned(void) | ||
238 | { | ||
239 | return sizeof(struct nx842_workmem); | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned); | ||
242 | |||
243 | static int nx842_validate_result(struct device *dev, | ||
244 | struct cop_status_block *csb) | ||
245 | { | ||
246 | /* The csb must be valid after returning from vio_h_cop_sync */ | ||
247 | if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { | ||
248 | dev_err(dev, "%s: cspcbp not valid upon completion.\n", | ||
249 | __func__); | ||
250 | dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", | ||
251 | csb->valid, | ||
252 | csb->crb_seq_number, | ||
253 | csb->completion_code, | ||
254 | csb->completion_extension); | ||
255 | dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", | ||
256 | csb->processed_byte_count, | ||
257 | (unsigned long)csb->address); | ||
258 | return -EIO; | ||
259 | } | ||
260 | |||
261 | /* Check return values from the hardware in the CSB */ | ||
262 | switch (csb->completion_code) { | ||
263 | case 0: /* Completed without error */ | ||
264 | break; | ||
265 | case 64: /* Target bytes > Source bytes during compression */ | ||
266 | case 13: /* Output buffer too small */ | ||
267 | dev_dbg(dev, "%s: Compression output larger than input\n", | ||
268 | __func__); | ||
269 | return -ENOSPC; | ||
270 | case 66: /* Input data contains an illegal template field */ | ||
271 | case 67: /* Template indicates data past the end of the input stream */ | ||
272 | dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", | ||
273 | __func__, csb->completion_code); | ||
274 | return -EINVAL; | ||
275 | default: | ||
276 | dev_dbg(dev, "%s: Unspecified error (code:%d)\n", | ||
277 | __func__, csb->completion_code); | ||
278 | return -EIO; | ||
279 | } | ||
280 | |||
281 | /* Hardware sanity check */ | ||
282 | if (!NX842_CSBCPB_CE2(csb->completion_extension)) { | ||
283 | dev_err(dev, "%s: No error returned by hardware, but " | ||
284 | "data returned is unusable, contact support.\n" | ||
285 | "(Additional info: csbcbp->processed bytes " | ||
286 | "does not specify processed bytes for the " | ||
287 | "target buffer.)\n", __func__); | ||
288 | return -EIO; | ||
289 | } | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | /** | ||
295 | * nx842_compress - Compress data using the 842 algorithm | ||
296 | * | ||
297 | * Compression provide by the NX842 coprocessor on IBM Power systems. | ||
298 | * The input buffer is compressed and the result is stored in the | ||
299 | * provided output buffer. | ||
300 | * | ||
301 | * Upon return from this function @outlen contains the length of the | ||
302 | * compressed data. If there is an error then @outlen will be 0 and an | ||
303 | * error will be specified by the return code from this function. | ||
304 | * | ||
305 | * @in: Pointer to input buffer, must be page aligned | ||
306 | * @inlen: Length of input buffer, must be PAGE_SIZE | ||
307 | * @out: Pointer to output buffer | ||
308 | * @outlen: Length of output buffer | ||
309 | * @wrkmem: ptr to buffer for working memory, size determined by | ||
310 | * nx842_get_workmem_size() | ||
311 | * | ||
312 | * Returns: | ||
313 | * 0 Success, output of length @outlen stored in the buffer at @out | ||
314 | * -ENOMEM Unable to allocate internal buffers | ||
315 | * -ENOSPC Output buffer is to small | ||
316 | * -EMSGSIZE XXX Difficult to describe this limitation | ||
317 | * -EIO Internal error | ||
318 | * -ENODEV Hardware unavailable | ||
319 | */ | ||
320 | int nx842_compress(const unsigned char *in, unsigned int inlen, | ||
321 | unsigned char *out, unsigned int *outlen, void *wmem) | ||
322 | { | ||
323 | struct nx842_header *hdr; | ||
324 | struct nx842_devdata *local_devdata; | ||
325 | struct device *dev = NULL; | ||
326 | struct nx842_workmem *workmem; | ||
327 | struct nx842_scatterlist slin, slout; | ||
328 | struct nx_csbcpb *csbcpb; | ||
329 | int ret = 0, max_sync_size, i, bytesleft, size, hdrsize; | ||
330 | unsigned long inbuf, outbuf, padding; | ||
331 | struct vio_pfo_op op = { | ||
332 | .done = NULL, | ||
333 | .handle = 0, | ||
334 | .timeout = 0, | ||
335 | }; | ||
336 | unsigned long start_time = get_tb(); | ||
337 | |||
338 | /* | ||
339 | * Make sure input buffer is 64k page aligned. This is assumed since | ||
340 | * this driver is designed for page compression only (for now). This | ||
341 | * is very nice since we can now use direct DDE(s) for the input and | ||
342 | * the alignment is guaranteed. | ||
343 | */ | ||
344 | inbuf = (unsigned long)in; | ||
345 | if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE) | ||
346 | return -EINVAL; | ||
347 | |||
348 | rcu_read_lock(); | ||
349 | local_devdata = rcu_dereference(devdata); | ||
350 | if (!local_devdata || !local_devdata->dev) { | ||
351 | rcu_read_unlock(); | ||
352 | return -ENODEV; | ||
353 | } | ||
354 | max_sync_size = local_devdata->max_sync_size; | ||
355 | dev = local_devdata->dev; | ||
356 | |||
357 | /* Create the header */ | ||
358 | hdr = (struct nx842_header *)out; | ||
359 | hdr->blocks_nr = PAGE_SIZE / max_sync_size; | ||
360 | hdrsize = nx842_header_size(hdr); | ||
361 | outbuf = (unsigned long)out + hdrsize; | ||
362 | bytesleft = *outlen - hdrsize; | ||
363 | |||
364 | /* Init scatterlist */ | ||
365 | workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, | ||
366 | NX842_HW_PAGE_SIZE); | ||
367 | slin.entries = (struct nx842_slentry *)workmem->slin; | ||
368 | slout.entries = (struct nx842_slentry *)workmem->slout; | ||
369 | |||
370 | /* Init operation */ | ||
371 | op.flags = NX842_OP_COMPRESS; | ||
372 | csbcpb = &workmem->csbcpb; | ||
373 | memset(csbcpb, 0, sizeof(*csbcpb)); | ||
374 | op.csbcpb = __pa(csbcpb); | ||
375 | op.out = __pa(slout.entries); | ||
376 | |||
377 | for (i = 0; i < hdr->blocks_nr; i++) { | ||
378 | /* | ||
379 | * Aligning the output blocks to 128 bytes does waste space, | ||
380 | * but it prevents the need for bounce buffers and memory | ||
381 | * copies. It also simplifies the code a lot. In the worst | ||
382 | * case (64k page, 4k max_sync_size), you lose up to | ||
383 | * (128*16)/64k = ~3% the compression factor. For 64k | ||
384 | * max_sync_size, the loss would be at most 128/64k = ~0.2%. | ||
385 | */ | ||
386 | padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf; | ||
387 | outbuf += padding; | ||
388 | bytesleft -= padding; | ||
389 | if (i == 0) | ||
390 | /* save offset into first block in header */ | ||
391 | hdr->offset = padding + hdrsize; | ||
392 | |||
393 | if (bytesleft <= 0) { | ||
394 | ret = -ENOSPC; | ||
395 | goto unlock; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * NOTE: If the default max_sync_size is changed from 4k | ||
400 | * to 64k, remove the "likely" case below, since a | ||
401 | * scatterlist will always be needed. | ||
402 | */ | ||
403 | if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { | ||
404 | /* Create direct DDE */ | ||
405 | op.in = __pa(inbuf); | ||
406 | op.inlen = max_sync_size; | ||
407 | |||
408 | } else { | ||
409 | /* Create indirect DDE (scatterlist) */ | ||
410 | nx842_build_scatterlist(inbuf, max_sync_size, &slin); | ||
411 | op.in = __pa(slin.entries); | ||
412 | op.inlen = -nx842_get_scatterlist_size(&slin); | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect | ||
417 | * DDE is required for the outbuf. | ||
418 | * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must | ||
419 | * also be page aligned (1 in 128/4k=32 chance) in order | ||
420 | * to use a direct DDE. | ||
421 | * This is unlikely, just use an indirect DDE always. | ||
422 | */ | ||
423 | nx842_build_scatterlist(outbuf, | ||
424 | min(bytesleft, max_sync_size), &slout); | ||
425 | /* op.out set before loop */ | ||
426 | op.outlen = -nx842_get_scatterlist_size(&slout); | ||
427 | |||
428 | /* Send request to pHyp */ | ||
429 | ret = vio_h_cop_sync(local_devdata->vdev, &op); | ||
430 | |||
431 | /* Check for pHyp error */ | ||
432 | if (ret) { | ||
433 | dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", | ||
434 | __func__, ret, op.hcall_err); | ||
435 | ret = -EIO; | ||
436 | goto unlock; | ||
437 | } | ||
438 | |||
439 | /* Check for hardware error */ | ||
440 | ret = nx842_validate_result(dev, &csbcpb->csb); | ||
441 | if (ret && ret != -ENOSPC) | ||
442 | goto unlock; | ||
443 | |||
444 | /* Handle incompressible data */ | ||
445 | if (unlikely(ret == -ENOSPC)) { | ||
446 | if (bytesleft < max_sync_size) { | ||
447 | /* | ||
448 | * Not enough space left in the output buffer | ||
449 | * to store uncompressed block | ||
450 | */ | ||
451 | goto unlock; | ||
452 | } else { | ||
453 | /* Store incompressible block */ | ||
454 | memcpy((void *)outbuf, (void *)inbuf, | ||
455 | max_sync_size); | ||
456 | hdr->sizes[i] = -max_sync_size; | ||
457 | outbuf += max_sync_size; | ||
458 | bytesleft -= max_sync_size; | ||
459 | /* Reset ret, incompressible data handled */ | ||
460 | ret = 0; | ||
461 | } | ||
462 | } else { | ||
463 | /* Normal case, compression was successful */ | ||
464 | size = csbcpb->csb.processed_byte_count; | ||
465 | dev_dbg(dev, "%s: processed_bytes=%d\n", | ||
466 | __func__, size); | ||
467 | hdr->sizes[i] = size; | ||
468 | outbuf += size; | ||
469 | bytesleft -= size; | ||
470 | } | ||
471 | |||
472 | inbuf += max_sync_size; | ||
473 | } | ||
474 | |||
475 | *outlen = (unsigned int)(outbuf - (unsigned long)out); | ||
476 | |||
477 | unlock: | ||
478 | if (ret) | ||
479 | nx842_inc_comp_failed(local_devdata); | ||
480 | else { | ||
481 | nx842_inc_comp_complete(local_devdata); | ||
482 | ibm_nx842_incr_hist(local_devdata->counters->comp_times, | ||
483 | (get_tb() - start_time) / tb_ticks_per_usec); | ||
484 | } | ||
485 | rcu_read_unlock(); | ||
486 | return ret; | ||
487 | } | ||
488 | EXPORT_SYMBOL_GPL(nx842_compress); | ||
489 | |||
490 | static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, | ||
491 | const void *); | ||
492 | |||
493 | /** | ||
494 | * nx842_decompress - Decompress data using the 842 algorithm | ||
495 | * | ||
496 | * Decompression provide by the NX842 coprocessor on IBM Power systems. | ||
497 | * The input buffer is decompressed and the result is stored in the | ||
498 | * provided output buffer. The size allocated to the output buffer is | ||
499 | * provided by the caller of this function in @outlen. Upon return from | ||
500 | * this function @outlen contains the length of the decompressed data. | ||
501 | * If there is an error then @outlen will be 0 and an error will be | ||
502 | * specified by the return code from this function. | ||
503 | * | ||
504 | * @in: Pointer to input buffer, will use bounce buffer if not 128 byte | ||
505 | * aligned | ||
506 | * @inlen: Length of input buffer | ||
507 | * @out: Pointer to output buffer, must be page aligned | ||
508 | * @outlen: Length of output buffer, must be PAGE_SIZE | ||
509 | * @wrkmem: ptr to buffer for working memory, size determined by | ||
510 | * nx842_get_workmem_size() | ||
511 | * | ||
512 | * Returns: | ||
513 | * 0 Success, output of length @outlen stored in the buffer at @out | ||
514 | * -ENODEV Hardware decompression device is unavailable | ||
515 | * -ENOMEM Unable to allocate internal buffers | ||
516 | * -ENOSPC Output buffer is to small | ||
517 | * -EINVAL Bad input data encountered when attempting decompress | ||
518 | * -EIO Internal error | ||
519 | */ | ||
520 | int nx842_decompress(const unsigned char *in, unsigned int inlen, | ||
521 | unsigned char *out, unsigned int *outlen, void *wmem) | ||
522 | { | ||
523 | struct nx842_header *hdr; | ||
524 | struct nx842_devdata *local_devdata; | ||
525 | struct device *dev = NULL; | ||
526 | struct nx842_workmem *workmem; | ||
527 | struct nx842_scatterlist slin, slout; | ||
528 | struct nx_csbcpb *csbcpb; | ||
529 | int ret = 0, i, size, max_sync_size; | ||
530 | unsigned long inbuf, outbuf; | ||
531 | struct vio_pfo_op op = { | ||
532 | .done = NULL, | ||
533 | .handle = 0, | ||
534 | .timeout = 0, | ||
535 | }; | ||
536 | unsigned long start_time = get_tb(); | ||
537 | |||
538 | /* Ensure page alignment and size */ | ||
539 | outbuf = (unsigned long)out; | ||
540 | if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE) | ||
541 | return -EINVAL; | ||
542 | |||
543 | rcu_read_lock(); | ||
544 | local_devdata = rcu_dereference(devdata); | ||
545 | if (local_devdata) | ||
546 | dev = local_devdata->dev; | ||
547 | |||
548 | /* Get header */ | ||
549 | hdr = (struct nx842_header *)in; | ||
550 | |||
551 | workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, | ||
552 | NX842_HW_PAGE_SIZE); | ||
553 | |||
554 | inbuf = (unsigned long)in + hdr->offset; | ||
555 | if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) { | ||
556 | /* Copy block(s) into bounce buffer for alignment */ | ||
557 | memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset); | ||
558 | inbuf = (unsigned long)workmem->bounce; | ||
559 | } | ||
560 | |||
561 | /* Init scatterlist */ | ||
562 | slin.entries = (struct nx842_slentry *)workmem->slin; | ||
563 | slout.entries = (struct nx842_slentry *)workmem->slout; | ||
564 | |||
565 | /* Init operation */ | ||
566 | op.flags = NX842_OP_DECOMPRESS; | ||
567 | csbcpb = &workmem->csbcpb; | ||
568 | memset(csbcpb, 0, sizeof(*csbcpb)); | ||
569 | op.csbcpb = __pa(csbcpb); | ||
570 | |||
571 | /* | ||
572 | * max_sync_size may have changed since compression, | ||
573 | * so we can't read it from the device info. We need | ||
574 | * to derive it from hdr->blocks_nr. | ||
575 | */ | ||
576 | max_sync_size = PAGE_SIZE / hdr->blocks_nr; | ||
577 | |||
578 | for (i = 0; i < hdr->blocks_nr; i++) { | ||
579 | /* Skip padding */ | ||
580 | inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN); | ||
581 | |||
582 | if (hdr->sizes[i] < 0) { | ||
583 | /* Negative sizes indicate uncompressed data blocks */ | ||
584 | size = abs(hdr->sizes[i]); | ||
585 | memcpy((void *)outbuf, (void *)inbuf, size); | ||
586 | outbuf += size; | ||
587 | inbuf += size; | ||
588 | continue; | ||
589 | } | ||
590 | |||
591 | if (!dev) | ||
592 | goto sw; | ||
593 | |||
594 | /* | ||
595 | * The better the compression, the more likely the "likely" | ||
596 | * case becomes. | ||
597 | */ | ||
598 | if (likely((inbuf & NX842_HW_PAGE_MASK) == | ||
599 | ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { | ||
600 | /* Create direct DDE */ | ||
601 | op.in = __pa(inbuf); | ||
602 | op.inlen = hdr->sizes[i]; | ||
603 | } else { | ||
604 | /* Create indirect DDE (scatterlist) */ | ||
605 | nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); | ||
606 | op.in = __pa(slin.entries); | ||
607 | op.inlen = -nx842_get_scatterlist_size(&slin); | ||
608 | } | ||
609 | |||
610 | /* | ||
611 | * NOTE: If the default max_sync_size is changed from 4k | ||
612 | * to 64k, remove the "likely" case below, since a | ||
613 | * scatterlist will always be needed. | ||
614 | */ | ||
615 | if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { | ||
616 | /* Create direct DDE */ | ||
617 | op.out = __pa(outbuf); | ||
618 | op.outlen = max_sync_size; | ||
619 | } else { | ||
620 | /* Create indirect DDE (scatterlist) */ | ||
621 | nx842_build_scatterlist(outbuf, max_sync_size, &slout); | ||
622 | op.out = __pa(slout.entries); | ||
623 | op.outlen = -nx842_get_scatterlist_size(&slout); | ||
624 | } | ||
625 | |||
626 | /* Send request to pHyp */ | ||
627 | ret = vio_h_cop_sync(local_devdata->vdev, &op); | ||
628 | |||
629 | /* Check for pHyp error */ | ||
630 | if (ret) { | ||
631 | dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", | ||
632 | __func__, ret, op.hcall_err); | ||
633 | dev = NULL; | ||
634 | goto sw; | ||
635 | } | ||
636 | |||
637 | /* Check for hardware error */ | ||
638 | ret = nx842_validate_result(dev, &csbcpb->csb); | ||
639 | if (ret) { | ||
640 | dev = NULL; | ||
641 | goto sw; | ||
642 | } | ||
643 | |||
644 | /* HW decompression success */ | ||
645 | inbuf += hdr->sizes[i]; | ||
646 | outbuf += csbcpb->csb.processed_byte_count; | ||
647 | continue; | ||
648 | |||
649 | sw: | ||
650 | /* software decompression */ | ||
651 | size = max_sync_size; | ||
652 | ret = sw842_decompress( | ||
653 | (unsigned char *)inbuf, hdr->sizes[i], | ||
654 | (unsigned char *)outbuf, &size, wmem); | ||
655 | if (ret) | ||
656 | pr_debug("%s: sw842_decompress failed with %d\n", | ||
657 | __func__, ret); | ||
658 | |||
659 | if (ret) { | ||
660 | if (ret != -ENOSPC && ret != -EINVAL && | ||
661 | ret != -EMSGSIZE) | ||
662 | ret = -EIO; | ||
663 | goto unlock; | ||
664 | } | ||
665 | |||
666 | /* SW decompression success */ | ||
667 | inbuf += hdr->sizes[i]; | ||
668 | outbuf += size; | ||
669 | } | ||
670 | |||
671 | *outlen = (unsigned int)(outbuf - (unsigned long)out); | ||
672 | |||
673 | unlock: | ||
674 | if (ret) | ||
675 | /* decompress fail */ | ||
676 | nx842_inc_decomp_failed(local_devdata); | ||
677 | else { | ||
678 | if (!dev) | ||
679 | /* software decompress */ | ||
680 | nx842_inc_swdecomp(local_devdata); | ||
681 | nx842_inc_decomp_complete(local_devdata); | ||
682 | ibm_nx842_incr_hist(local_devdata->counters->decomp_times, | ||
683 | (get_tb() - start_time) / tb_ticks_per_usec); | ||
684 | } | ||
685 | |||
686 | rcu_read_unlock(); | ||
687 | return ret; | ||
688 | } | ||
689 | EXPORT_SYMBOL_GPL(nx842_decompress); | ||
690 | |||
691 | /** | ||
692 | * nx842_OF_set_defaults -- Set default (disabled) values for devdata | ||
693 | * | ||
694 | * @devdata - struct nx842_devdata to update | ||
695 | * | ||
696 | * Returns: | ||
697 | * 0 on success | ||
698 | * -ENOENT if @devdata ptr is NULL | ||
699 | */ | ||
700 | static int nx842_OF_set_defaults(struct nx842_devdata *devdata) | ||
701 | { | ||
702 | if (devdata) { | ||
703 | devdata->max_sync_size = 0; | ||
704 | devdata->max_sync_sg = 0; | ||
705 | devdata->max_sg_len = 0; | ||
706 | devdata->status = UNAVAILABLE; | ||
707 | return 0; | ||
708 | } else | ||
709 | return -ENOENT; | ||
710 | } | ||
711 | |||
712 | /** | ||
713 | * nx842_OF_upd_status -- Update the device info from OF status prop | ||
714 | * | ||
715 | * The status property indicates if the accelerator is enabled. If the | ||
716 | * device is in the OF tree it indicates that the hardware is present. | ||
717 | * The status field indicates if the device is enabled when the status | ||
718 | * is 'okay'. Otherwise the device driver will be disabled. | ||
719 | * | ||
720 | * @devdata - struct nx842_devdata to update | ||
721 | * @prop - struct property point containing the maxsyncop for the update | ||
722 | * | ||
723 | * Returns: | ||
724 | * 0 - Device is available | ||
725 | * -EINVAL - Device is not available | ||
726 | */ | ||
727 | static int nx842_OF_upd_status(struct nx842_devdata *devdata, | ||
728 | struct property *prop) { | ||
729 | int ret = 0; | ||
730 | const char *status = (const char *)prop->value; | ||
731 | |||
732 | if (!strncmp(status, "okay", (size_t)prop->length)) { | ||
733 | devdata->status = AVAILABLE; | ||
734 | } else { | ||
735 | dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", | ||
736 | __func__, status); | ||
737 | devdata->status = UNAVAILABLE; | ||
738 | } | ||
739 | |||
740 | return ret; | ||
741 | } | ||
742 | |||
743 | /** | ||
744 | * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop | ||
745 | * | ||
746 | * Definition of the 'ibm,max-sg-len' OF property: | ||
747 | * This field indicates the maximum byte length of a scatter list | ||
748 | * for the platform facility. It is a single cell encoded as with encode-int. | ||
749 | * | ||
750 | * Example: | ||
751 | * # od -x ibm,max-sg-len | ||
752 | * 0000000 0000 0ff0 | ||
753 | * | ||
754 | * In this example, the maximum byte length of a scatter list is | ||
755 | * 0x0ff0 (4,080). | ||
756 | * | ||
757 | * @devdata - struct nx842_devdata to update | ||
758 | * @prop - struct property point containing the maxsyncop for the update | ||
759 | * | ||
760 | * Returns: | ||
761 | * 0 on success | ||
762 | * -EINVAL on failure | ||
763 | */ | ||
764 | static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, | ||
765 | struct property *prop) { | ||
766 | int ret = 0; | ||
767 | const int *maxsglen = prop->value; | ||
768 | |||
769 | if (prop->length != sizeof(*maxsglen)) { | ||
770 | dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); | ||
771 | dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, | ||
772 | prop->length, sizeof(*maxsglen)); | ||
773 | ret = -EINVAL; | ||
774 | } else { | ||
775 | devdata->max_sg_len = (unsigned int)min(*maxsglen, | ||
776 | (int)NX842_HW_PAGE_SIZE); | ||
777 | } | ||
778 | |||
779 | return ret; | ||
780 | } | ||
781 | |||
782 | /** | ||
783 | * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop | ||
784 | * | ||
785 | * Definition of the 'ibm,max-sync-cop' OF property: | ||
786 | * Two series of cells. The first series of cells represents the maximums | ||
787 | * that can be synchronously compressed. The second series of cells | ||
788 | * represents the maximums that can be synchronously decompressed. | ||
789 | * 1. The first cell in each series contains the count of the number of | ||
790 | * data length, scatter list elements pairs that follow – each being | ||
791 | * of the form | ||
792 | * a. One cell data byte length | ||
793 | * b. One cell total number of scatter list elements | ||
794 | * | ||
795 | * Example: | ||
796 | * # od -x ibm,max-sync-cop | ||
797 | * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 | ||
798 | * 0000020 0000 1000 0000 01fe | ||
799 | * | ||
800 | * In this example, compression supports 0x1000 (4,096) data byte length | ||
801 | * and 0x1fe (510) total scatter list elements. Decompression supports | ||
802 | * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list | ||
803 | * elements. | ||
804 | * | ||
805 | * @devdata - struct nx842_devdata to update | ||
806 | * @prop - struct property point containing the maxsyncop for the update | ||
807 | * | ||
808 | * Returns: | ||
809 | * 0 on success | ||
810 | * -EINVAL on failure | ||
811 | */ | ||
812 | static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, | ||
813 | struct property *prop) { | ||
814 | int ret = 0; | ||
815 | const struct maxsynccop_t { | ||
816 | int comp_elements; | ||
817 | int comp_data_limit; | ||
818 | int comp_sg_limit; | ||
819 | int decomp_elements; | ||
820 | int decomp_data_limit; | ||
821 | int decomp_sg_limit; | ||
822 | } *maxsynccop; | ||
823 | |||
824 | if (prop->length != sizeof(*maxsynccop)) { | ||
825 | dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); | ||
826 | dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, | ||
827 | sizeof(*maxsynccop)); | ||
828 | ret = -EINVAL; | ||
829 | goto out; | ||
830 | } | ||
831 | |||
832 | maxsynccop = (const struct maxsynccop_t *)prop->value; | ||
833 | |||
834 | /* Use one limit rather than separate limits for compression and | ||
835 | * decompression. Set a maximum for this so as not to exceed the | ||
836 | * size that the header can support and round the value down to | ||
837 | * the hardware page size (4K) */ | ||
838 | devdata->max_sync_size = | ||
839 | (unsigned int)min(maxsynccop->comp_data_limit, | ||
840 | maxsynccop->decomp_data_limit); | ||
841 | |||
842 | devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, | ||
843 | SIZE_64K); | ||
844 | |||
845 | if (devdata->max_sync_size < SIZE_4K) { | ||
846 | dev_err(devdata->dev, "%s: hardware max data size (%u) is " | ||
847 | "less than the driver minimum, unable to use " | ||
848 | "the hardware device\n", | ||
849 | __func__, devdata->max_sync_size); | ||
850 | ret = -EINVAL; | ||
851 | goto out; | ||
852 | } | ||
853 | |||
854 | devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit, | ||
855 | maxsynccop->decomp_sg_limit); | ||
856 | if (devdata->max_sync_sg < 1) { | ||
857 | dev_err(devdata->dev, "%s: hardware max sg size (%u) is " | ||
858 | "less than the driver minimum, unable to use " | ||
859 | "the hardware device\n", | ||
860 | __func__, devdata->max_sync_sg); | ||
861 | ret = -EINVAL; | ||
862 | goto out; | ||
863 | } | ||
864 | |||
865 | out: | ||
866 | return ret; | ||
867 | } | ||
868 | |||
869 | /** | ||
870 | * | ||
871 | * nx842_OF_upd -- Handle OF properties updates for the device. | ||
872 | * | ||
873 | * Set all properties from the OF tree. Optionally, a new property | ||
874 | * can be provided by the @new_prop pointer to overwrite an existing value. | ||
875 | * The device will remain disabled until all values are valid, this function | ||
876 | * will return an error for updates unless all values are valid. | ||
877 | * | ||
878 | * @new_prop: If not NULL, this property is being updated. If NULL, update | ||
879 | * all properties from the current values in the OF tree. | ||
880 | * | ||
881 | * Returns: | ||
882 | * 0 - Success | ||
883 | * -ENOMEM - Could not allocate memory for new devdata structure | ||
884 | * -EINVAL - property value not found, new_prop is not a recognized | ||
885 | * property for the device or property value is not valid. | ||
886 | * -ENODEV - Device is not available | ||
887 | */ | ||
888 | static int nx842_OF_upd(struct property *new_prop) | ||
889 | { | ||
890 | struct nx842_devdata *old_devdata = NULL; | ||
891 | struct nx842_devdata *new_devdata = NULL; | ||
892 | struct device_node *of_node = NULL; | ||
893 | struct property *status = NULL; | ||
894 | struct property *maxsglen = NULL; | ||
895 | struct property *maxsyncop = NULL; | ||
896 | int ret = 0; | ||
897 | unsigned long flags; | ||
898 | |||
899 | spin_lock_irqsave(&devdata_mutex, flags); | ||
900 | old_devdata = rcu_dereference_check(devdata, | ||
901 | lockdep_is_held(&devdata_mutex)); | ||
902 | if (old_devdata) | ||
903 | of_node = old_devdata->dev->of_node; | ||
904 | |||
905 | if (!old_devdata || !of_node) { | ||
906 | pr_err("%s: device is not available\n", __func__); | ||
907 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
908 | return -ENODEV; | ||
909 | } | ||
910 | |||
911 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
912 | if (!new_devdata) { | ||
913 | dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); | ||
914 | ret = -ENOMEM; | ||
915 | goto error_out; | ||
916 | } | ||
917 | |||
918 | memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); | ||
919 | new_devdata->counters = old_devdata->counters; | ||
920 | |||
921 | /* Set ptrs for existing properties */ | ||
922 | status = of_find_property(of_node, "status", NULL); | ||
923 | maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); | ||
924 | maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); | ||
925 | if (!status || !maxsglen || !maxsyncop) { | ||
926 | dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); | ||
927 | ret = -EINVAL; | ||
928 | goto error_out; | ||
929 | } | ||
930 | |||
931 | /* Set ptr to new property if provided */ | ||
932 | if (new_prop) { | ||
933 | /* Single property */ | ||
934 | if (!strncmp(new_prop->name, "status", new_prop->length)) { | ||
935 | status = new_prop; | ||
936 | |||
937 | } else if (!strncmp(new_prop->name, "ibm,max-sg-len", | ||
938 | new_prop->length)) { | ||
939 | maxsglen = new_prop; | ||
940 | |||
941 | } else if (!strncmp(new_prop->name, "ibm,max-sync-cop", | ||
942 | new_prop->length)) { | ||
943 | maxsyncop = new_prop; | ||
944 | |||
945 | } else { | ||
946 | /* | ||
947 | * Skip the update, the property being updated | ||
948 | * has no impact. | ||
949 | */ | ||
950 | goto out; | ||
951 | } | ||
952 | } | ||
953 | |||
954 | /* Perform property updates */ | ||
955 | ret = nx842_OF_upd_status(new_devdata, status); | ||
956 | if (ret) | ||
957 | goto error_out; | ||
958 | |||
959 | ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); | ||
960 | if (ret) | ||
961 | goto error_out; | ||
962 | |||
963 | ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); | ||
964 | if (ret) | ||
965 | goto error_out; | ||
966 | |||
967 | out: | ||
968 | dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", | ||
969 | __func__, new_devdata->max_sync_size, | ||
970 | old_devdata->max_sync_size); | ||
971 | dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", | ||
972 | __func__, new_devdata->max_sync_sg, | ||
973 | old_devdata->max_sync_sg); | ||
974 | dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", | ||
975 | __func__, new_devdata->max_sg_len, | ||
976 | old_devdata->max_sg_len); | ||
977 | |||
978 | rcu_assign_pointer(devdata, new_devdata); | ||
979 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
980 | synchronize_rcu(); | ||
981 | dev_set_drvdata(new_devdata->dev, new_devdata); | ||
982 | kfree(old_devdata); | ||
983 | return 0; | ||
984 | |||
985 | error_out: | ||
986 | if (new_devdata) { | ||
987 | dev_info(old_devdata->dev, "%s: device disabled\n", __func__); | ||
988 | nx842_OF_set_defaults(new_devdata); | ||
989 | rcu_assign_pointer(devdata, new_devdata); | ||
990 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
991 | synchronize_rcu(); | ||
992 | dev_set_drvdata(new_devdata->dev, new_devdata); | ||
993 | kfree(old_devdata); | ||
994 | } else { | ||
995 | dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); | ||
996 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
997 | } | ||
998 | |||
999 | if (!ret) | ||
1000 | ret = -EINVAL; | ||
1001 | return ret; | ||
1002 | } | ||
1003 | |||
1004 | /** | ||
1005 | * nx842_OF_notifier - Process updates to OF properties for the device | ||
1006 | * | ||
1007 | * @np: notifier block | ||
1008 | * @action: notifier action | ||
1009 | * @update: struct pSeries_reconfig_prop_update pointer if action is | ||
1010 | * PSERIES_UPDATE_PROPERTY | ||
1011 | * | ||
1012 | * Returns: | ||
1013 | * NOTIFY_OK on success | ||
1014 | * NOTIFY_BAD encoded with error number on failure, use | ||
1015 | * notifier_to_errno() to decode this value | ||
1016 | */ | ||
1017 | static int nx842_OF_notifier(struct notifier_block *np, | ||
1018 | unsigned long action, | ||
1019 | void *update) | ||
1020 | { | ||
1021 | struct pSeries_reconfig_prop_update *upd; | ||
1022 | struct nx842_devdata *local_devdata; | ||
1023 | struct device_node *node = NULL; | ||
1024 | |||
1025 | upd = (struct pSeries_reconfig_prop_update *)update; | ||
1026 | |||
1027 | rcu_read_lock(); | ||
1028 | local_devdata = rcu_dereference(devdata); | ||
1029 | if (local_devdata) | ||
1030 | node = local_devdata->dev->of_node; | ||
1031 | |||
1032 | if (local_devdata && | ||
1033 | action == PSERIES_UPDATE_PROPERTY && | ||
1034 | !strcmp(upd->node->name, node->name)) { | ||
1035 | rcu_read_unlock(); | ||
1036 | nx842_OF_upd(upd->property); | ||
1037 | } else | ||
1038 | rcu_read_unlock(); | ||
1039 | |||
1040 | return NOTIFY_OK; | ||
1041 | } | ||
1042 | |||
1043 | static struct notifier_block nx842_of_nb = { | ||
1044 | .notifier_call = nx842_OF_notifier, | ||
1045 | }; | ||
1046 | |||
1047 | #define nx842_counter_read(_name) \ | ||
1048 | static ssize_t nx842_##_name##_show(struct device *dev, \ | ||
1049 | struct device_attribute *attr, \ | ||
1050 | char *buf) { \ | ||
1051 | struct nx842_devdata *local_devdata; \ | ||
1052 | int p = 0; \ | ||
1053 | rcu_read_lock(); \ | ||
1054 | local_devdata = rcu_dereference(devdata); \ | ||
1055 | if (local_devdata) \ | ||
1056 | p = snprintf(buf, PAGE_SIZE, "%ld\n", \ | ||
1057 | atomic64_read(&local_devdata->counters->_name)); \ | ||
1058 | rcu_read_unlock(); \ | ||
1059 | return p; \ | ||
1060 | } | ||
1061 | |||
1062 | #define NX842DEV_COUNTER_ATTR_RO(_name) \ | ||
1063 | nx842_counter_read(_name); \ | ||
1064 | static struct device_attribute dev_attr_##_name = __ATTR(_name, \ | ||
1065 | 0444, \ | ||
1066 | nx842_##_name##_show,\ | ||
1067 | NULL); | ||
1068 | |||
1069 | NX842DEV_COUNTER_ATTR_RO(comp_complete); | ||
1070 | NX842DEV_COUNTER_ATTR_RO(comp_failed); | ||
1071 | NX842DEV_COUNTER_ATTR_RO(decomp_complete); | ||
1072 | NX842DEV_COUNTER_ATTR_RO(decomp_failed); | ||
1073 | NX842DEV_COUNTER_ATTR_RO(swdecomp); | ||
1074 | |||
1075 | static ssize_t nx842_timehist_show(struct device *, | ||
1076 | struct device_attribute *, char *); | ||
1077 | |||
1078 | static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, | ||
1079 | nx842_timehist_show, NULL); | ||
1080 | static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, | ||
1081 | 0444, nx842_timehist_show, NULL); | ||
1082 | |||
1083 | static ssize_t nx842_timehist_show(struct device *dev, | ||
1084 | struct device_attribute *attr, char *buf) { | ||
1085 | char *p = buf; | ||
1086 | struct nx842_devdata *local_devdata; | ||
1087 | atomic64_t *times; | ||
1088 | int bytes_remain = PAGE_SIZE; | ||
1089 | int bytes; | ||
1090 | int i; | ||
1091 | |||
1092 | rcu_read_lock(); | ||
1093 | local_devdata = rcu_dereference(devdata); | ||
1094 | if (!local_devdata) { | ||
1095 | rcu_read_unlock(); | ||
1096 | return 0; | ||
1097 | } | ||
1098 | |||
1099 | if (attr == &dev_attr_comp_times) | ||
1100 | times = local_devdata->counters->comp_times; | ||
1101 | else if (attr == &dev_attr_decomp_times) | ||
1102 | times = local_devdata->counters->decomp_times; | ||
1103 | else { | ||
1104 | rcu_read_unlock(); | ||
1105 | return 0; | ||
1106 | } | ||
1107 | |||
1108 | for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { | ||
1109 | bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n", | ||
1110 | i ? (2<<(i-1)) : 0, (2<<i)-1, | ||
1111 | atomic64_read(×[i])); | ||
1112 | bytes_remain -= bytes; | ||
1113 | p += bytes; | ||
1114 | } | ||
1115 | /* The last bucket holds everything over | ||
1116 | * 2<<(NX842_HIST_SLOTS - 2) us */ | ||
1117 | bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n", | ||
1118 | 2<<(NX842_HIST_SLOTS - 2), | ||
1119 | atomic64_read(×[(NX842_HIST_SLOTS - 1)])); | ||
1120 | p += bytes; | ||
1121 | |||
1122 | rcu_read_unlock(); | ||
1123 | return p - buf; | ||
1124 | } | ||
1125 | |||
1126 | static struct attribute *nx842_sysfs_entries[] = { | ||
1127 | &dev_attr_comp_complete.attr, | ||
1128 | &dev_attr_comp_failed.attr, | ||
1129 | &dev_attr_decomp_complete.attr, | ||
1130 | &dev_attr_decomp_failed.attr, | ||
1131 | &dev_attr_swdecomp.attr, | ||
1132 | &dev_attr_comp_times.attr, | ||
1133 | &dev_attr_decomp_times.attr, | ||
1134 | NULL, | ||
1135 | }; | ||
1136 | |||
1137 | static struct attribute_group nx842_attribute_group = { | ||
1138 | .name = NULL, /* put in device directory */ | ||
1139 | .attrs = nx842_sysfs_entries, | ||
1140 | }; | ||
1141 | |||
1142 | static int __init nx842_probe(struct vio_dev *viodev, | ||
1143 | const struct vio_device_id *id) | ||
1144 | { | ||
1145 | struct nx842_devdata *old_devdata, *new_devdata = NULL; | ||
1146 | unsigned long flags; | ||
1147 | int ret = 0; | ||
1148 | |||
1149 | spin_lock_irqsave(&devdata_mutex, flags); | ||
1150 | old_devdata = rcu_dereference_check(devdata, | ||
1151 | lockdep_is_held(&devdata_mutex)); | ||
1152 | |||
1153 | if (old_devdata && old_devdata->vdev != NULL) { | ||
1154 | dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); | ||
1155 | ret = -1; | ||
1156 | goto error_unlock; | ||
1157 | } | ||
1158 | |||
1159 | dev_set_drvdata(&viodev->dev, NULL); | ||
1160 | |||
1161 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
1162 | if (!new_devdata) { | ||
1163 | dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); | ||
1164 | ret = -ENOMEM; | ||
1165 | goto error_unlock; | ||
1166 | } | ||
1167 | |||
1168 | new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), | ||
1169 | GFP_NOFS); | ||
1170 | if (!new_devdata->counters) { | ||
1171 | dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); | ||
1172 | ret = -ENOMEM; | ||
1173 | goto error_unlock; | ||
1174 | } | ||
1175 | |||
1176 | new_devdata->vdev = viodev; | ||
1177 | new_devdata->dev = &viodev->dev; | ||
1178 | nx842_OF_set_defaults(new_devdata); | ||
1179 | |||
1180 | rcu_assign_pointer(devdata, new_devdata); | ||
1181 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1182 | synchronize_rcu(); | ||
1183 | kfree(old_devdata); | ||
1184 | |||
1185 | pSeries_reconfig_notifier_register(&nx842_of_nb); | ||
1186 | |||
1187 | ret = nx842_OF_upd(NULL); | ||
1188 | if (ret && ret != -ENODEV) { | ||
1189 | dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); | ||
1190 | ret = -1; | ||
1191 | goto error; | ||
1192 | } | ||
1193 | |||
1194 | rcu_read_lock(); | ||
1195 | if (dev_set_drvdata(&viodev->dev, rcu_dereference(devdata))) { | ||
1196 | rcu_read_unlock(); | ||
1197 | dev_err(&viodev->dev, "failed to set driver data for device\n"); | ||
1198 | ret = -1; | ||
1199 | goto error; | ||
1200 | } | ||
1201 | rcu_read_unlock(); | ||
1202 | |||
1203 | if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { | ||
1204 | dev_err(&viodev->dev, "could not create sysfs device attributes\n"); | ||
1205 | ret = -1; | ||
1206 | goto error; | ||
1207 | } | ||
1208 | |||
1209 | return 0; | ||
1210 | |||
1211 | error_unlock: | ||
1212 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1213 | if (new_devdata) | ||
1214 | kfree(new_devdata->counters); | ||
1215 | kfree(new_devdata); | ||
1216 | error: | ||
1217 | return ret; | ||
1218 | } | ||
1219 | |||
1220 | static int __exit nx842_remove(struct vio_dev *viodev) | ||
1221 | { | ||
1222 | struct nx842_devdata *old_devdata; | ||
1223 | unsigned long flags; | ||
1224 | |||
1225 | pr_info("Removing IBM Power 842 compression device\n"); | ||
1226 | sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); | ||
1227 | |||
1228 | spin_lock_irqsave(&devdata_mutex, flags); | ||
1229 | old_devdata = rcu_dereference_check(devdata, | ||
1230 | lockdep_is_held(&devdata_mutex)); | ||
1231 | pSeries_reconfig_notifier_unregister(&nx842_of_nb); | ||
1232 | rcu_assign_pointer(devdata, NULL); | ||
1233 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1234 | synchronize_rcu(); | ||
1235 | dev_set_drvdata(&viodev->dev, NULL); | ||
1236 | if (old_devdata) | ||
1237 | kfree(old_devdata->counters); | ||
1238 | kfree(old_devdata); | ||
1239 | return 0; | ||
1240 | } | ||
1241 | |||
1242 | static struct vio_device_id nx842_driver_ids[] = { | ||
1243 | {"ibm,compression-v1", "ibm,compression"}, | ||
1244 | {"", ""}, | ||
1245 | }; | ||
1246 | |||
1247 | static struct vio_driver nx842_driver = { | ||
1248 | .name = MODULE_NAME, | ||
1249 | .probe = nx842_probe, | ||
1250 | .remove = nx842_remove, | ||
1251 | .get_desired_dma = nx842_get_desired_dma, | ||
1252 | .id_table = nx842_driver_ids, | ||
1253 | }; | ||
1254 | |||
1255 | static int __init nx842_init(void) | ||
1256 | { | ||
1257 | struct nx842_devdata *new_devdata; | ||
1258 | pr_info("Registering IBM Power 842 compression driver\n"); | ||
1259 | |||
1260 | RCU_INIT_POINTER(devdata, NULL); | ||
1261 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); | ||
1262 | if (!new_devdata) { | ||
1263 | pr_err("Could not allocate memory for device data\n"); | ||
1264 | return -ENOMEM; | ||
1265 | } | ||
1266 | new_devdata->status = UNAVAILABLE; | ||
1267 | RCU_INIT_POINTER(devdata, new_devdata); | ||
1268 | |||
1269 | return vio_register_driver(&nx842_driver); | ||
1270 | } | ||
1271 | |||
1272 | module_init(nx842_init); | ||
1273 | |||
1274 | static void __exit nx842_exit(void) | ||
1275 | { | ||
1276 | struct nx842_devdata *old_devdata; | ||
1277 | unsigned long flags; | ||
1278 | |||
1279 | pr_info("Exiting IBM Power 842 compression driver\n"); | ||
1280 | spin_lock_irqsave(&devdata_mutex, flags); | ||
1281 | old_devdata = rcu_dereference_check(devdata, | ||
1282 | lockdep_is_held(&devdata_mutex)); | ||
1283 | rcu_assign_pointer(devdata, NULL); | ||
1284 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1285 | synchronize_rcu(); | ||
1286 | if (old_devdata) | ||
1287 | dev_set_drvdata(old_devdata->dev, NULL); | ||
1288 | kfree(old_devdata); | ||
1289 | vio_unregister_driver(&nx842_driver); | ||
1290 | } | ||
1291 | |||
1292 | module_exit(nx842_exit); | ||
1293 | |||
1294 | /********************************* | ||
1295 | * 842 software decompressor | ||
1296 | *********************************/ | ||
1297 | typedef int (*sw842_template_op)(const char **, int *, unsigned char **, | ||
1298 | struct sw842_fifo *); | ||
1299 | |||
1300 | static int sw842_data8(const char **, int *, unsigned char **, | ||
1301 | struct sw842_fifo *); | ||
1302 | static int sw842_data4(const char **, int *, unsigned char **, | ||
1303 | struct sw842_fifo *); | ||
1304 | static int sw842_data2(const char **, int *, unsigned char **, | ||
1305 | struct sw842_fifo *); | ||
1306 | static int sw842_ptr8(const char **, int *, unsigned char **, | ||
1307 | struct sw842_fifo *); | ||
1308 | static int sw842_ptr4(const char **, int *, unsigned char **, | ||
1309 | struct sw842_fifo *); | ||
1310 | static int sw842_ptr2(const char **, int *, unsigned char **, | ||
1311 | struct sw842_fifo *); | ||
1312 | |||
1313 | /* special templates */ | ||
1314 | #define SW842_TMPL_REPEAT 0x1B | ||
1315 | #define SW842_TMPL_ZEROS 0x1C | ||
1316 | #define SW842_TMPL_EOF 0x1E | ||
1317 | |||
1318 | static sw842_template_op sw842_tmpl_ops[26][4] = { | ||
1319 | { sw842_data8, NULL}, /* 0 (00000) */ | ||
1320 | { sw842_data4, sw842_data2, sw842_ptr2, NULL}, | ||
1321 | { sw842_data4, sw842_ptr2, sw842_data2, NULL}, | ||
1322 | { sw842_data4, sw842_ptr2, sw842_ptr2, NULL}, | ||
1323 | { sw842_data4, sw842_ptr4, NULL}, | ||
1324 | { sw842_data2, sw842_ptr2, sw842_data4, NULL}, | ||
1325 | { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2}, | ||
1326 | { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2}, | ||
1327 | { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,}, | ||
1328 | { sw842_data2, sw842_ptr2, sw842_ptr4, NULL}, | ||
1329 | { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */ | ||
1330 | { sw842_ptr2, sw842_data4, sw842_ptr2, NULL}, | ||
1331 | { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2}, | ||
1332 | { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2}, | ||
1333 | { sw842_ptr2, sw842_data2, sw842_ptr4, NULL}, | ||
1334 | { sw842_ptr2, sw842_ptr2, sw842_data4, NULL}, | ||
1335 | { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2}, | ||
1336 | { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2}, | ||
1337 | { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2}, | ||
1338 | { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL}, | ||
1339 | { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */ | ||
1340 | { sw842_ptr4, sw842_data2, sw842_ptr2, NULL}, | ||
1341 | { sw842_ptr4, sw842_ptr2, sw842_data2, NULL}, | ||
1342 | { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL}, | ||
1343 | { sw842_ptr4, sw842_ptr4, NULL}, | ||
1344 | { sw842_ptr8, NULL} | ||
1345 | }; | ||
1346 | |||
1347 | /* Software decompress helpers */ | ||
1348 | |||
1349 | static uint8_t sw842_get_byte(const char *buf, int bit) | ||
1350 | { | ||
1351 | uint8_t tmpl; | ||
1352 | uint16_t tmp; | ||
1353 | tmp = htons(*(uint16_t *)(buf)); | ||
1354 | tmp = (uint16_t)(tmp << bit); | ||
1355 | tmp = ntohs(tmp); | ||
1356 | memcpy(&tmpl, &tmp, 1); | ||
1357 | return tmpl; | ||
1358 | } | ||
1359 | |||
1360 | static uint8_t sw842_get_template(const char **buf, int *bit) | ||
1361 | { | ||
1362 | uint8_t byte; | ||
1363 | byte = sw842_get_byte(*buf, *bit); | ||
1364 | byte = byte >> 3; | ||
1365 | byte &= 0x1F; | ||
1366 | *buf += (*bit + 5) / 8; | ||
1367 | *bit = (*bit + 5) % 8; | ||
1368 | return byte; | ||
1369 | } | ||
1370 | |||
1371 | /* repeat_count happens to be 5-bit too (like the template) */ | ||
1372 | static uint8_t sw842_get_repeat_count(const char **buf, int *bit) | ||
1373 | { | ||
1374 | uint8_t byte; | ||
1375 | byte = sw842_get_byte(*buf, *bit); | ||
1376 | byte = byte >> 2; | ||
1377 | byte &= 0x3F; | ||
1378 | *buf += (*bit + 6) / 8; | ||
1379 | *bit = (*bit + 6) % 8; | ||
1380 | return byte; | ||
1381 | } | ||
1382 | |||
1383 | static uint8_t sw842_get_ptr2(const char **buf, int *bit) | ||
1384 | { | ||
1385 | uint8_t ptr; | ||
1386 | ptr = sw842_get_byte(*buf, *bit); | ||
1387 | (*buf)++; | ||
1388 | return ptr; | ||
1389 | } | ||
1390 | |||
1391 | static uint16_t sw842_get_ptr4(const char **buf, int *bit, | ||
1392 | struct sw842_fifo *fifo) | ||
1393 | { | ||
1394 | uint16_t ptr; | ||
1395 | ptr = htons(*(uint16_t *)(*buf)); | ||
1396 | ptr = (uint16_t)(ptr << *bit); | ||
1397 | ptr = ptr >> 7; | ||
1398 | ptr &= 0x01FF; | ||
1399 | *buf += (*bit + 9) / 8; | ||
1400 | *bit = (*bit + 9) % 8; | ||
1401 | return ptr; | ||
1402 | } | ||
1403 | |||
1404 | static uint8_t sw842_get_ptr8(const char **buf, int *bit, | ||
1405 | struct sw842_fifo *fifo) | ||
1406 | { | ||
1407 | return sw842_get_ptr2(buf, bit); | ||
1408 | } | ||
1409 | |||
1410 | /* Software decompress template ops */ | ||
1411 | |||
1412 | static int sw842_data8(const char **inbuf, int *inbit, | ||
1413 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1414 | { | ||
1415 | int ret; | ||
1416 | |||
1417 | ret = sw842_data4(inbuf, inbit, outbuf, fifo); | ||
1418 | if (ret) | ||
1419 | return ret; | ||
1420 | ret = sw842_data4(inbuf, inbit, outbuf, fifo); | ||
1421 | return ret; | ||
1422 | } | ||
1423 | |||
1424 | static int sw842_data4(const char **inbuf, int *inbit, | ||
1425 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1426 | { | ||
1427 | int ret; | ||
1428 | |||
1429 | ret = sw842_data2(inbuf, inbit, outbuf, fifo); | ||
1430 | if (ret) | ||
1431 | return ret; | ||
1432 | ret = sw842_data2(inbuf, inbit, outbuf, fifo); | ||
1433 | return ret; | ||
1434 | } | ||
1435 | |||
1436 | static int sw842_data2(const char **inbuf, int *inbit, | ||
1437 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1438 | { | ||
1439 | **outbuf = sw842_get_byte(*inbuf, *inbit); | ||
1440 | (*inbuf)++; | ||
1441 | (*outbuf)++; | ||
1442 | **outbuf = sw842_get_byte(*inbuf, *inbit); | ||
1443 | (*inbuf)++; | ||
1444 | (*outbuf)++; | ||
1445 | return 0; | ||
1446 | } | ||
1447 | |||
1448 | static int sw842_ptr8(const char **inbuf, int *inbit, | ||
1449 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1450 | { | ||
1451 | uint8_t ptr; | ||
1452 | ptr = sw842_get_ptr8(inbuf, inbit, fifo); | ||
1453 | if (!fifo->f84_full && (ptr >= fifo->f8_count)) | ||
1454 | return 1; | ||
1455 | memcpy(*outbuf, fifo->f8[ptr], 8); | ||
1456 | *outbuf += 8; | ||
1457 | return 0; | ||
1458 | } | ||
1459 | |||
1460 | static int sw842_ptr4(const char **inbuf, int *inbit, | ||
1461 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1462 | { | ||
1463 | uint16_t ptr; | ||
1464 | ptr = sw842_get_ptr4(inbuf, inbit, fifo); | ||
1465 | if (!fifo->f84_full && (ptr >= fifo->f4_count)) | ||
1466 | return 1; | ||
1467 | memcpy(*outbuf, fifo->f4[ptr], 4); | ||
1468 | *outbuf += 4; | ||
1469 | return 0; | ||
1470 | } | ||
1471 | |||
1472 | static int sw842_ptr2(const char **inbuf, int *inbit, | ||
1473 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1474 | { | ||
1475 | uint8_t ptr; | ||
1476 | ptr = sw842_get_ptr2(inbuf, inbit); | ||
1477 | if (!fifo->f2_full && (ptr >= fifo->f2_count)) | ||
1478 | return 1; | ||
1479 | memcpy(*outbuf, fifo->f2[ptr], 2); | ||
1480 | *outbuf += 2; | ||
1481 | return 0; | ||
1482 | } | ||
1483 | |||
1484 | static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo) | ||
1485 | { | ||
1486 | unsigned char initial_f2count = fifo->f2_count; | ||
1487 | |||
1488 | memcpy(fifo->f8[fifo->f8_count], buf, 8); | ||
1489 | fifo->f4_count += 2; | ||
1490 | fifo->f8_count += 1; | ||
1491 | |||
1492 | if (!fifo->f84_full && fifo->f4_count >= 512) { | ||
1493 | fifo->f84_full = 1; | ||
1494 | fifo->f4_count /= 512; | ||
1495 | } | ||
1496 | |||
1497 | memcpy(fifo->f2[fifo->f2_count++], buf, 2); | ||
1498 | memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2); | ||
1499 | memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2); | ||
1500 | memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2); | ||
1501 | if (fifo->f2_count < initial_f2count) | ||
1502 | fifo->f2_full = 1; | ||
1503 | } | ||
1504 | |||
1505 | static int sw842_decompress(const unsigned char *src, int srclen, | ||
1506 | unsigned char *dst, int *destlen, | ||
1507 | const void *wrkmem) | ||
1508 | { | ||
1509 | uint8_t tmpl; | ||
1510 | const char *inbuf; | ||
1511 | int inbit = 0; | ||
1512 | unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf; | ||
1513 | const char *inbuf_end; | ||
1514 | sw842_template_op op; | ||
1515 | int opindex; | ||
1516 | int i, repeat_count; | ||
1517 | struct sw842_fifo *fifo; | ||
1518 | int ret = 0; | ||
1519 | |||
1520 | fifo = &((struct nx842_workmem *)(wrkmem))->swfifo; | ||
1521 | memset(fifo, 0, sizeof(*fifo)); | ||
1522 | |||
1523 | origbuf = NULL; | ||
1524 | inbuf = src; | ||
1525 | inbuf_end = src + srclen; | ||
1526 | outbuf = dst; | ||
1527 | outbuf_end = dst + *destlen; | ||
1528 | |||
1529 | while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) { | ||
1530 | if (inbuf >= inbuf_end) { | ||
1531 | ret = -EINVAL; | ||
1532 | goto out; | ||
1533 | } | ||
1534 | |||
1535 | opindex = 0; | ||
1536 | prevbuf = origbuf; | ||
1537 | origbuf = outbuf; | ||
1538 | switch (tmpl) { | ||
1539 | case SW842_TMPL_REPEAT: | ||
1540 | if (prevbuf == NULL) { | ||
1541 | ret = -EINVAL; | ||
1542 | goto out; | ||
1543 | } | ||
1544 | |||
1545 | repeat_count = sw842_get_repeat_count(&inbuf, | ||
1546 | &inbit) + 1; | ||
1547 | |||
1548 | /* Did the repeat count advance past the end of input */ | ||
1549 | if (inbuf > inbuf_end) { | ||
1550 | ret = -EINVAL; | ||
1551 | goto out; | ||
1552 | } | ||
1553 | |||
1554 | for (i = 0; i < repeat_count; i++) { | ||
1555 | /* Would this overflow the output buffer */ | ||
1556 | if ((outbuf + 8) > outbuf_end) { | ||
1557 | ret = -ENOSPC; | ||
1558 | goto out; | ||
1559 | } | ||
1560 | |||
1561 | memcpy(outbuf, prevbuf, 8); | ||
1562 | sw842_copy_to_fifo(outbuf, fifo); | ||
1563 | outbuf += 8; | ||
1564 | } | ||
1565 | break; | ||
1566 | |||
1567 | case SW842_TMPL_ZEROS: | ||
1568 | /* Would this overflow the output buffer */ | ||
1569 | if ((outbuf + 8) > outbuf_end) { | ||
1570 | ret = -ENOSPC; | ||
1571 | goto out; | ||
1572 | } | ||
1573 | |||
1574 | memset(outbuf, 0, 8); | ||
1575 | sw842_copy_to_fifo(outbuf, fifo); | ||
1576 | outbuf += 8; | ||
1577 | break; | ||
1578 | |||
1579 | default: | ||
1580 | if (tmpl > 25) { | ||
1581 | ret = -EINVAL; | ||
1582 | goto out; | ||
1583 | } | ||
1584 | |||
1585 | /* Does this go past the end of the input buffer */ | ||
1586 | if ((inbuf + 2) > inbuf_end) { | ||
1587 | ret = -EINVAL; | ||
1588 | goto out; | ||
1589 | } | ||
1590 | |||
1591 | /* Would this overflow the output buffer */ | ||
1592 | if ((outbuf + 8) > outbuf_end) { | ||
1593 | ret = -ENOSPC; | ||
1594 | goto out; | ||
1595 | } | ||
1596 | |||
1597 | while (opindex < 4 && | ||
1598 | (op = sw842_tmpl_ops[tmpl][opindex++]) | ||
1599 | != NULL) { | ||
1600 | ret = (*op)(&inbuf, &inbit, &outbuf, fifo); | ||
1601 | if (ret) { | ||
1602 | ret = -EINVAL; | ||
1603 | goto out; | ||
1604 | } | ||
1605 | sw842_copy_to_fifo(origbuf, fifo); | ||
1606 | } | ||
1607 | } | ||
1608 | } | ||
1609 | |||
1610 | out: | ||
1611 | if (!ret) | ||
1612 | *destlen = (unsigned int)(outbuf - dst); | ||
1613 | else | ||
1614 | *destlen = 0; | ||
1615 | |||
1616 | return ret; | ||
1617 | } | ||
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index 69ed796ee327..a76d4c4f29f5 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c | |||
@@ -127,7 +127,6 @@ struct crypto_alg nx_cbc_aes_alg = { | |||
127 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 127 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
128 | .cra_type = &crypto_blkcipher_type, | 128 | .cra_type = &crypto_blkcipher_type, |
129 | .cra_module = THIS_MODULE, | 129 | .cra_module = THIS_MODULE, |
130 | .cra_list = LIST_HEAD_INIT(nx_cbc_aes_alg.cra_list), | ||
131 | .cra_init = nx_crypto_ctx_aes_cbc_init, | 130 | .cra_init = nx_crypto_ctx_aes_cbc_init, |
132 | .cra_exit = nx_crypto_ctx_exit, | 131 | .cra_exit = nx_crypto_ctx_exit, |
133 | .cra_blkcipher = { | 132 | .cra_blkcipher = { |
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 7aeac678b9c0..ef5eae6d1400 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c | |||
@@ -430,7 +430,6 @@ struct crypto_alg nx_ccm_aes_alg = { | |||
430 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 430 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
431 | .cra_type = &crypto_aead_type, | 431 | .cra_type = &crypto_aead_type, |
432 | .cra_module = THIS_MODULE, | 432 | .cra_module = THIS_MODULE, |
433 | .cra_list = LIST_HEAD_INIT(nx_ccm_aes_alg.cra_list), | ||
434 | .cra_init = nx_crypto_ctx_aes_ccm_init, | 433 | .cra_init = nx_crypto_ctx_aes_ccm_init, |
435 | .cra_exit = nx_crypto_ctx_exit, | 434 | .cra_exit = nx_crypto_ctx_exit, |
436 | .cra_aead = { | 435 | .cra_aead = { |
@@ -453,7 +452,6 @@ struct crypto_alg nx_ccm4309_aes_alg = { | |||
453 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 452 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
454 | .cra_type = &crypto_nivaead_type, | 453 | .cra_type = &crypto_nivaead_type, |
455 | .cra_module = THIS_MODULE, | 454 | .cra_module = THIS_MODULE, |
456 | .cra_list = LIST_HEAD_INIT(nx_ccm4309_aes_alg.cra_list), | ||
457 | .cra_init = nx_crypto_ctx_aes_ccm_init, | 455 | .cra_init = nx_crypto_ctx_aes_ccm_init, |
458 | .cra_exit = nx_crypto_ctx_exit, | 456 | .cra_exit = nx_crypto_ctx_exit, |
459 | .cra_aead = { | 457 | .cra_aead = { |
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 52d4eb05e8f7..b6286f14680b 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c | |||
@@ -141,7 +141,6 @@ struct crypto_alg nx_ctr_aes_alg = { | |||
141 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 141 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
142 | .cra_type = &crypto_blkcipher_type, | 142 | .cra_type = &crypto_blkcipher_type, |
143 | .cra_module = THIS_MODULE, | 143 | .cra_module = THIS_MODULE, |
144 | .cra_list = LIST_HEAD_INIT(nx_ctr_aes_alg.cra_list), | ||
145 | .cra_init = nx_crypto_ctx_aes_ctr_init, | 144 | .cra_init = nx_crypto_ctx_aes_ctr_init, |
146 | .cra_exit = nx_crypto_ctx_exit, | 145 | .cra_exit = nx_crypto_ctx_exit, |
147 | .cra_blkcipher = { | 146 | .cra_blkcipher = { |
@@ -163,7 +162,6 @@ struct crypto_alg nx_ctr3686_aes_alg = { | |||
163 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 162 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
164 | .cra_type = &crypto_blkcipher_type, | 163 | .cra_type = &crypto_blkcipher_type, |
165 | .cra_module = THIS_MODULE, | 164 | .cra_module = THIS_MODULE, |
166 | .cra_list = LIST_HEAD_INIT(nx_ctr3686_aes_alg.cra_list), | ||
167 | .cra_init = nx_crypto_ctx_aes_ctr_init, | 165 | .cra_init = nx_crypto_ctx_aes_ctr_init, |
168 | .cra_exit = nx_crypto_ctx_exit, | 166 | .cra_exit = nx_crypto_ctx_exit, |
169 | .cra_blkcipher = { | 167 | .cra_blkcipher = { |
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 7b77bc2d1df4..ba5f1611336f 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c | |||
@@ -126,7 +126,6 @@ struct crypto_alg nx_ecb_aes_alg = { | |||
126 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 126 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
127 | .cra_type = &crypto_blkcipher_type, | 127 | .cra_type = &crypto_blkcipher_type, |
128 | .cra_module = THIS_MODULE, | 128 | .cra_module = THIS_MODULE, |
129 | .cra_list = LIST_HEAD_INIT(nx_ecb_aes_alg.cra_list), | ||
130 | .cra_init = nx_crypto_ctx_aes_ecb_init, | 129 | .cra_init = nx_crypto_ctx_aes_ecb_init, |
131 | .cra_exit = nx_crypto_ctx_exit, | 130 | .cra_exit = nx_crypto_ctx_exit, |
132 | .cra_blkcipher = { | 131 | .cra_blkcipher = { |
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 9ab1c7341dac..c8109edc5cfb 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c | |||
@@ -316,7 +316,6 @@ struct crypto_alg nx_gcm_aes_alg = { | |||
316 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 316 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
317 | .cra_type = &crypto_aead_type, | 317 | .cra_type = &crypto_aead_type, |
318 | .cra_module = THIS_MODULE, | 318 | .cra_module = THIS_MODULE, |
319 | .cra_list = LIST_HEAD_INIT(nx_gcm_aes_alg.cra_list), | ||
320 | .cra_init = nx_crypto_ctx_aes_gcm_init, | 319 | .cra_init = nx_crypto_ctx_aes_gcm_init, |
321 | .cra_exit = nx_crypto_ctx_exit, | 320 | .cra_exit = nx_crypto_ctx_exit, |
322 | .cra_aead = { | 321 | .cra_aead = { |
@@ -338,7 +337,6 @@ struct crypto_alg nx_gcm4106_aes_alg = { | |||
338 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 337 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
339 | .cra_type = &crypto_nivaead_type, | 338 | .cra_type = &crypto_nivaead_type, |
340 | .cra_module = THIS_MODULE, | 339 | .cra_module = THIS_MODULE, |
341 | .cra_list = LIST_HEAD_INIT(nx_gcm4106_aes_alg.cra_list), | ||
342 | .cra_init = nx_crypto_ctx_aes_gcm_init, | 340 | .cra_init = nx_crypto_ctx_aes_gcm_init, |
343 | .cra_exit = nx_crypto_ctx_exit, | 341 | .cra_exit = nx_crypto_ctx_exit, |
344 | .cra_aead = { | 342 | .cra_aead = { |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 63e57b57a12c..093a8af59cbe 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -876,7 +876,6 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
876 | 876 | ||
877 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | 877 | for (i = 0; i < ARRAY_SIZE(algs); i++) { |
878 | pr_debug("i: %d\n", i); | 878 | pr_debug("i: %d\n", i); |
879 | INIT_LIST_HEAD(&algs[i].cra_list); | ||
880 | err = crypto_register_alg(&algs[i]); | 879 | err = crypto_register_alg(&algs[i]); |
881 | if (err) | 880 | if (err) |
882 | goto err_algs; | 881 | goto err_algs; |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 37b2e9406af6..633ba945e153 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -328,7 +328,6 @@ static struct crypto_alg aes_alg = { | |||
328 | .cra_ctxsize = sizeof(struct aes_ctx), | 328 | .cra_ctxsize = sizeof(struct aes_ctx), |
329 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 329 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
330 | .cra_module = THIS_MODULE, | 330 | .cra_module = THIS_MODULE, |
331 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | ||
332 | .cra_u = { | 331 | .cra_u = { |
333 | .cipher = { | 332 | .cipher = { |
334 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 333 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
@@ -408,7 +407,6 @@ static struct crypto_alg ecb_aes_alg = { | |||
408 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 407 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
409 | .cra_type = &crypto_blkcipher_type, | 408 | .cra_type = &crypto_blkcipher_type, |
410 | .cra_module = THIS_MODULE, | 409 | .cra_module = THIS_MODULE, |
411 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), | ||
412 | .cra_u = { | 410 | .cra_u = { |
413 | .blkcipher = { | 411 | .blkcipher = { |
414 | .min_keysize = AES_MIN_KEY_SIZE, | 412 | .min_keysize = AES_MIN_KEY_SIZE, |
@@ -491,7 +489,6 @@ static struct crypto_alg cbc_aes_alg = { | |||
491 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 489 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
492 | .cra_type = &crypto_blkcipher_type, | 490 | .cra_type = &crypto_blkcipher_type, |
493 | .cra_module = THIS_MODULE, | 491 | .cra_module = THIS_MODULE, |
494 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), | ||
495 | .cra_u = { | 492 | .cra_u = { |
496 | .blkcipher = { | 493 | .blkcipher = { |
497 | .min_keysize = AES_MIN_KEY_SIZE, | 494 | .min_keysize = AES_MIN_KEY_SIZE, |
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index bc986f806086..a22714412cda 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -626,7 +626,6 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
626 | crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); | 626 | crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); |
627 | 627 | ||
628 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | 628 | for (i = 0; i < ARRAY_SIZE(algs); i++) { |
629 | INIT_LIST_HEAD(&algs[i].cra_list); | ||
630 | err = crypto_register_alg(&algs[i]); | 629 | err = crypto_register_alg(&algs[i]); |
631 | if (err) | 630 | if (err) |
632 | goto err_algs; | 631 | goto err_algs; |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index efff788d2f1d..da1112765a44 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
39 | #include <linux/rtnetlink.h> | 39 | #include <linux/rtnetlink.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/string.h> | ||
41 | 42 | ||
42 | #include <crypto/algapi.h> | 43 | #include <crypto/algapi.h> |
43 | #include <crypto/aes.h> | 44 | #include <crypto/aes.h> |
@@ -714,8 +715,13 @@ badkey: | |||
714 | 715 | ||
715 | /* | 716 | /* |
716 | * talitos_edesc - s/w-extended descriptor | 717 | * talitos_edesc - s/w-extended descriptor |
718 | * @assoc_nents: number of segments in associated data scatterlist | ||
717 | * @src_nents: number of segments in input scatterlist | 719 | * @src_nents: number of segments in input scatterlist |
718 | * @dst_nents: number of segments in output scatterlist | 720 | * @dst_nents: number of segments in output scatterlist |
721 | * @assoc_chained: whether assoc is chained or not | ||
722 | * @src_chained: whether src is chained or not | ||
723 | * @dst_chained: whether dst is chained or not | ||
724 | * @iv_dma: dma address of iv for checking continuity and link table | ||
719 | * @dma_len: length of dma mapped link_tbl space | 725 | * @dma_len: length of dma mapped link_tbl space |
720 | * @dma_link_tbl: bus physical address of link_tbl | 726 | * @dma_link_tbl: bus physical address of link_tbl |
721 | * @desc: h/w descriptor | 727 | * @desc: h/w descriptor |
@@ -726,10 +732,13 @@ badkey: | |||
726 | * of link_tbl data | 732 | * of link_tbl data |
727 | */ | 733 | */ |
728 | struct talitos_edesc { | 734 | struct talitos_edesc { |
735 | int assoc_nents; | ||
729 | int src_nents; | 736 | int src_nents; |
730 | int dst_nents; | 737 | int dst_nents; |
731 | int src_is_chained; | 738 | bool assoc_chained; |
732 | int dst_is_chained; | 739 | bool src_chained; |
740 | bool dst_chained; | ||
741 | dma_addr_t iv_dma; | ||
733 | int dma_len; | 742 | int dma_len; |
734 | dma_addr_t dma_link_tbl; | 743 | dma_addr_t dma_link_tbl; |
735 | struct talitos_desc desc; | 744 | struct talitos_desc desc; |
@@ -738,7 +747,7 @@ struct talitos_edesc { | |||
738 | 747 | ||
739 | static int talitos_map_sg(struct device *dev, struct scatterlist *sg, | 748 | static int talitos_map_sg(struct device *dev, struct scatterlist *sg, |
740 | unsigned int nents, enum dma_data_direction dir, | 749 | unsigned int nents, enum dma_data_direction dir, |
741 | int chained) | 750 | bool chained) |
742 | { | 751 | { |
743 | if (unlikely(chained)) | 752 | if (unlikely(chained)) |
744 | while (sg) { | 753 | while (sg) { |
@@ -768,13 +777,13 @@ static void talitos_sg_unmap(struct device *dev, | |||
768 | unsigned int dst_nents = edesc->dst_nents ? : 1; | 777 | unsigned int dst_nents = edesc->dst_nents ? : 1; |
769 | 778 | ||
770 | if (src != dst) { | 779 | if (src != dst) { |
771 | if (edesc->src_is_chained) | 780 | if (edesc->src_chained) |
772 | talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); | 781 | talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); |
773 | else | 782 | else |
774 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | 783 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); |
775 | 784 | ||
776 | if (dst) { | 785 | if (dst) { |
777 | if (edesc->dst_is_chained) | 786 | if (edesc->dst_chained) |
778 | talitos_unmap_sg_chain(dev, dst, | 787 | talitos_unmap_sg_chain(dev, dst, |
779 | DMA_FROM_DEVICE); | 788 | DMA_FROM_DEVICE); |
780 | else | 789 | else |
@@ -782,7 +791,7 @@ static void talitos_sg_unmap(struct device *dev, | |||
782 | DMA_FROM_DEVICE); | 791 | DMA_FROM_DEVICE); |
783 | } | 792 | } |
784 | } else | 793 | } else |
785 | if (edesc->src_is_chained) | 794 | if (edesc->src_chained) |
786 | talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); | 795 | talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); |
787 | else | 796 | else |
788 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); | 797 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); |
@@ -797,7 +806,13 @@ static void ipsec_esp_unmap(struct device *dev, | |||
797 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); | 806 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); |
798 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); | 807 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); |
799 | 808 | ||
800 | dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); | 809 | if (edesc->assoc_chained) |
810 | talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); | ||
811 | else | ||
812 | /* assoc_nents counts also for IV in non-contiguous cases */ | ||
813 | dma_unmap_sg(dev, areq->assoc, | ||
814 | edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, | ||
815 | DMA_TO_DEVICE); | ||
801 | 816 | ||
802 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); | 817 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); |
803 | 818 | ||
@@ -825,9 +840,10 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
825 | ipsec_esp_unmap(dev, edesc, areq); | 840 | ipsec_esp_unmap(dev, edesc, areq); |
826 | 841 | ||
827 | /* copy the generated ICV to dst */ | 842 | /* copy the generated ICV to dst */ |
828 | if (edesc->dma_len) { | 843 | if (edesc->dst_nents) { |
829 | icvdata = &edesc->link_tbl[edesc->src_nents + | 844 | icvdata = &edesc->link_tbl[edesc->src_nents + |
830 | edesc->dst_nents + 2]; | 845 | edesc->dst_nents + 2 + |
846 | edesc->assoc_nents]; | ||
831 | sg = sg_last(areq->dst, edesc->dst_nents); | 847 | sg = sg_last(areq->dst, edesc->dst_nents); |
832 | memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, | 848 | memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, |
833 | icvdata, ctx->authsize); | 849 | icvdata, ctx->authsize); |
@@ -857,7 +873,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, | |||
857 | /* auth check */ | 873 | /* auth check */ |
858 | if (edesc->dma_len) | 874 | if (edesc->dma_len) |
859 | icvdata = &edesc->link_tbl[edesc->src_nents + | 875 | icvdata = &edesc->link_tbl[edesc->src_nents + |
860 | edesc->dst_nents + 2]; | 876 | edesc->dst_nents + 2 + |
877 | edesc->assoc_nents]; | ||
861 | else | 878 | else |
862 | icvdata = &edesc->link_tbl[0]; | 879 | icvdata = &edesc->link_tbl[0]; |
863 | 880 | ||
@@ -932,10 +949,9 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
932 | * fill in and submit ipsec_esp descriptor | 949 | * fill in and submit ipsec_esp descriptor |
933 | */ | 950 | */ |
934 | static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | 951 | static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, |
935 | u8 *giv, u64 seq, | 952 | u64 seq, void (*callback) (struct device *dev, |
936 | void (*callback) (struct device *dev, | 953 | struct talitos_desc *desc, |
937 | struct talitos_desc *desc, | 954 | void *context, int error)) |
938 | void *context, int error)) | ||
939 | { | 955 | { |
940 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 956 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); |
941 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); | 957 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); |
@@ -950,12 +966,42 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
950 | /* hmac key */ | 966 | /* hmac key */ |
951 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, | 967 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, |
952 | 0, DMA_TO_DEVICE); | 968 | 0, DMA_TO_DEVICE); |
969 | |||
953 | /* hmac data */ | 970 | /* hmac data */ |
954 | map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize, | 971 | desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); |
955 | sg_virt(areq->assoc), 0, DMA_TO_DEVICE); | 972 | if (edesc->assoc_nents) { |
973 | int tbl_off = edesc->src_nents + edesc->dst_nents + 2; | ||
974 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; | ||
975 | |||
976 | to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * | ||
977 | sizeof(struct talitos_ptr)); | ||
978 | desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; | ||
979 | |||
980 | /* assoc_nents - 1 entries for assoc, 1 for IV */ | ||
981 | sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1, | ||
982 | areq->assoclen, tbl_ptr); | ||
983 | |||
984 | /* add IV to link table */ | ||
985 | tbl_ptr += sg_count - 1; | ||
986 | tbl_ptr->j_extent = 0; | ||
987 | tbl_ptr++; | ||
988 | to_talitos_ptr(tbl_ptr, edesc->iv_dma); | ||
989 | tbl_ptr->len = cpu_to_be16(ivsize); | ||
990 | tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | ||
991 | |||
992 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | ||
993 | edesc->dma_len, DMA_BIDIRECTIONAL); | ||
994 | } else { | ||
995 | to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc)); | ||
996 | desc->ptr[1].j_extent = 0; | ||
997 | } | ||
998 | |||
956 | /* cipher iv */ | 999 | /* cipher iv */ |
957 | map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, | 1000 | to_talitos_ptr(&desc->ptr[2], edesc->iv_dma); |
958 | DMA_TO_DEVICE); | 1001 | desc->ptr[2].len = cpu_to_be16(ivsize); |
1002 | desc->ptr[2].j_extent = 0; | ||
1003 | /* Sync needed for the aead_givencrypt case */ | ||
1004 | dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); | ||
959 | 1005 | ||
960 | /* cipher key */ | 1006 | /* cipher key */ |
961 | map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, | 1007 | map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, |
@@ -974,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
974 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, | 1020 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, |
975 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL | 1021 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL |
976 | : DMA_TO_DEVICE, | 1022 | : DMA_TO_DEVICE, |
977 | edesc->src_is_chained); | 1023 | edesc->src_chained); |
978 | 1024 | ||
979 | if (sg_count == 1) { | 1025 | if (sg_count == 1) { |
980 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); | 1026 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); |
@@ -1006,32 +1052,30 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1006 | if (areq->src != areq->dst) | 1052 | if (areq->src != areq->dst) |
1007 | sg_count = talitos_map_sg(dev, areq->dst, | 1053 | sg_count = talitos_map_sg(dev, areq->dst, |
1008 | edesc->dst_nents ? : 1, | 1054 | edesc->dst_nents ? : 1, |
1009 | DMA_FROM_DEVICE, | 1055 | DMA_FROM_DEVICE, edesc->dst_chained); |
1010 | edesc->dst_is_chained); | ||
1011 | 1056 | ||
1012 | if (sg_count == 1) { | 1057 | if (sg_count == 1) { |
1013 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); | 1058 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); |
1014 | } else { | 1059 | } else { |
1015 | struct talitos_ptr *link_tbl_ptr = | 1060 | int tbl_off = edesc->src_nents + 1; |
1016 | &edesc->link_tbl[edesc->src_nents + 1]; | 1061 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; |
1017 | 1062 | ||
1018 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + | 1063 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + |
1019 | (edesc->src_nents + 1) * | 1064 | tbl_off * sizeof(struct talitos_ptr)); |
1020 | sizeof(struct talitos_ptr)); | ||
1021 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 1065 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
1022 | link_tbl_ptr); | 1066 | tbl_ptr); |
1023 | 1067 | ||
1024 | /* Add an entry to the link table for ICV data */ | 1068 | /* Add an entry to the link table for ICV data */ |
1025 | link_tbl_ptr += sg_count - 1; | 1069 | tbl_ptr += sg_count - 1; |
1026 | link_tbl_ptr->j_extent = 0; | 1070 | tbl_ptr->j_extent = 0; |
1027 | sg_count++; | 1071 | tbl_ptr++; |
1028 | link_tbl_ptr++; | 1072 | tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; |
1029 | link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | 1073 | tbl_ptr->len = cpu_to_be16(authsize); |
1030 | link_tbl_ptr->len = cpu_to_be16(authsize); | ||
1031 | 1074 | ||
1032 | /* icv data follows link tables */ | 1075 | /* icv data follows link tables */ |
1033 | to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl + | 1076 | to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + |
1034 | (edesc->src_nents + edesc->dst_nents + 2) * | 1077 | (tbl_off + edesc->dst_nents + 1 + |
1078 | edesc->assoc_nents) * | ||
1035 | sizeof(struct talitos_ptr)); | 1079 | sizeof(struct talitos_ptr)); |
1036 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1080 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1037 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1081 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
@@ -1053,17 +1097,17 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1053 | /* | 1097 | /* |
1054 | * derive number of elements in scatterlist | 1098 | * derive number of elements in scatterlist |
1055 | */ | 1099 | */ |
1056 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | 1100 | static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) |
1057 | { | 1101 | { |
1058 | struct scatterlist *sg = sg_list; | 1102 | struct scatterlist *sg = sg_list; |
1059 | int sg_nents = 0; | 1103 | int sg_nents = 0; |
1060 | 1104 | ||
1061 | *chained = 0; | 1105 | *chained = false; |
1062 | while (nbytes > 0) { | 1106 | while (nbytes > 0) { |
1063 | sg_nents++; | 1107 | sg_nents++; |
1064 | nbytes -= sg->length; | 1108 | nbytes -= sg->length; |
1065 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | 1109 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
1066 | *chained = 1; | 1110 | *chained = true; |
1067 | sg = scatterwalk_sg_next(sg); | 1111 | sg = scatterwalk_sg_next(sg); |
1068 | } | 1112 | } |
1069 | 1113 | ||
@@ -1132,17 +1176,21 @@ static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents, | |||
1132 | * allocate and map the extended descriptor | 1176 | * allocate and map the extended descriptor |
1133 | */ | 1177 | */ |
1134 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | 1178 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1179 | struct scatterlist *assoc, | ||
1135 | struct scatterlist *src, | 1180 | struct scatterlist *src, |
1136 | struct scatterlist *dst, | 1181 | struct scatterlist *dst, |
1137 | int hash_result, | 1182 | u8 *iv, |
1183 | unsigned int assoclen, | ||
1138 | unsigned int cryptlen, | 1184 | unsigned int cryptlen, |
1139 | unsigned int authsize, | 1185 | unsigned int authsize, |
1186 | unsigned int ivsize, | ||
1140 | int icv_stashing, | 1187 | int icv_stashing, |
1141 | u32 cryptoflags) | 1188 | u32 cryptoflags) |
1142 | { | 1189 | { |
1143 | struct talitos_edesc *edesc; | 1190 | struct talitos_edesc *edesc; |
1144 | int src_nents, dst_nents, alloc_len, dma_len; | 1191 | int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; |
1145 | int src_chained, dst_chained = 0; | 1192 | bool assoc_chained = false, src_chained = false, dst_chained = false; |
1193 | dma_addr_t iv_dma = 0; | ||
1146 | gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1194 | gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1147 | GFP_ATOMIC; | 1195 | GFP_ATOMIC; |
1148 | 1196 | ||
@@ -1151,10 +1199,29 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1151 | return ERR_PTR(-EINVAL); | 1199 | return ERR_PTR(-EINVAL); |
1152 | } | 1200 | } |
1153 | 1201 | ||
1202 | if (iv) | ||
1203 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | ||
1204 | |||
1205 | if (assoc) { | ||
1206 | /* | ||
1207 | * Currently it is assumed that iv is provided whenever assoc | ||
1208 | * is. | ||
1209 | */ | ||
1210 | BUG_ON(!iv); | ||
1211 | |||
1212 | assoc_nents = sg_count(assoc, assoclen, &assoc_chained); | ||
1213 | talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE, | ||
1214 | assoc_chained); | ||
1215 | assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents; | ||
1216 | |||
1217 | if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma) | ||
1218 | assoc_nents = assoc_nents ? assoc_nents + 1 : 2; | ||
1219 | } | ||
1220 | |||
1154 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); | 1221 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
1155 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1222 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1156 | 1223 | ||
1157 | if (hash_result) { | 1224 | if (!dst) { |
1158 | dst_nents = 0; | 1225 | dst_nents = 0; |
1159 | } else { | 1226 | } else { |
1160 | if (dst == src) { | 1227 | if (dst == src) { |
@@ -1172,9 +1239,9 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1172 | * and the ICV data itself | 1239 | * and the ICV data itself |
1173 | */ | 1240 | */ |
1174 | alloc_len = sizeof(struct talitos_edesc); | 1241 | alloc_len = sizeof(struct talitos_edesc); |
1175 | if (src_nents || dst_nents) { | 1242 | if (assoc_nents || src_nents || dst_nents) { |
1176 | dma_len = (src_nents + dst_nents + 2) * | 1243 | dma_len = (src_nents + dst_nents + 2 + assoc_nents) * |
1177 | sizeof(struct talitos_ptr) + authsize; | 1244 | sizeof(struct talitos_ptr) + authsize; |
1178 | alloc_len += dma_len; | 1245 | alloc_len += dma_len; |
1179 | } else { | 1246 | } else { |
1180 | dma_len = 0; | 1247 | dma_len = 0; |
@@ -1183,14 +1250,20 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1183 | 1250 | ||
1184 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1251 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1185 | if (!edesc) { | 1252 | if (!edesc) { |
1253 | talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); | ||
1254 | if (iv_dma) | ||
1255 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | ||
1186 | dev_err(dev, "could not allocate edescriptor\n"); | 1256 | dev_err(dev, "could not allocate edescriptor\n"); |
1187 | return ERR_PTR(-ENOMEM); | 1257 | return ERR_PTR(-ENOMEM); |
1188 | } | 1258 | } |
1189 | 1259 | ||
1260 | edesc->assoc_nents = assoc_nents; | ||
1190 | edesc->src_nents = src_nents; | 1261 | edesc->src_nents = src_nents; |
1191 | edesc->dst_nents = dst_nents; | 1262 | edesc->dst_nents = dst_nents; |
1192 | edesc->src_is_chained = src_chained; | 1263 | edesc->assoc_chained = assoc_chained; |
1193 | edesc->dst_is_chained = dst_chained; | 1264 | edesc->src_chained = src_chained; |
1265 | edesc->dst_chained = dst_chained; | ||
1266 | edesc->iv_dma = iv_dma; | ||
1194 | edesc->dma_len = dma_len; | 1267 | edesc->dma_len = dma_len; |
1195 | if (dma_len) | 1268 | if (dma_len) |
1196 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], | 1269 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], |
@@ -1200,14 +1273,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1200 | return edesc; | 1273 | return edesc; |
1201 | } | 1274 | } |
1202 | 1275 | ||
1203 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, | 1276 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
1204 | int icv_stashing) | 1277 | int icv_stashing) |
1205 | { | 1278 | { |
1206 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1279 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1207 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1280 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1281 | unsigned int ivsize = crypto_aead_ivsize(authenc); | ||
1208 | 1282 | ||
1209 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, | 1283 | return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, |
1210 | areq->cryptlen, ctx->authsize, icv_stashing, | 1284 | iv, areq->assoclen, areq->cryptlen, |
1285 | ctx->authsize, ivsize, icv_stashing, | ||
1211 | areq->base.flags); | 1286 | areq->base.flags); |
1212 | } | 1287 | } |
1213 | 1288 | ||
@@ -1218,14 +1293,14 @@ static int aead_encrypt(struct aead_request *req) | |||
1218 | struct talitos_edesc *edesc; | 1293 | struct talitos_edesc *edesc; |
1219 | 1294 | ||
1220 | /* allocate extended descriptor */ | 1295 | /* allocate extended descriptor */ |
1221 | edesc = aead_edesc_alloc(req, 0); | 1296 | edesc = aead_edesc_alloc(req, req->iv, 0); |
1222 | if (IS_ERR(edesc)) | 1297 | if (IS_ERR(edesc)) |
1223 | return PTR_ERR(edesc); | 1298 | return PTR_ERR(edesc); |
1224 | 1299 | ||
1225 | /* set encrypt */ | 1300 | /* set encrypt */ |
1226 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; | 1301 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; |
1227 | 1302 | ||
1228 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); | 1303 | return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done); |
1229 | } | 1304 | } |
1230 | 1305 | ||
1231 | static int aead_decrypt(struct aead_request *req) | 1306 | static int aead_decrypt(struct aead_request *req) |
@@ -1241,7 +1316,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1241 | req->cryptlen -= authsize; | 1316 | req->cryptlen -= authsize; |
1242 | 1317 | ||
1243 | /* allocate extended descriptor */ | 1318 | /* allocate extended descriptor */ |
1244 | edesc = aead_edesc_alloc(req, 1); | 1319 | edesc = aead_edesc_alloc(req, req->iv, 1); |
1245 | if (IS_ERR(edesc)) | 1320 | if (IS_ERR(edesc)) |
1246 | return PTR_ERR(edesc); | 1321 | return PTR_ERR(edesc); |
1247 | 1322 | ||
@@ -1257,9 +1332,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1257 | /* reset integrity check result bits */ | 1332 | /* reset integrity check result bits */ |
1258 | edesc->desc.hdr_lo = 0; | 1333 | edesc->desc.hdr_lo = 0; |
1259 | 1334 | ||
1260 | return ipsec_esp(edesc, req, NULL, 0, | 1335 | return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done); |
1261 | ipsec_esp_decrypt_hwauth_done); | ||
1262 | |||
1263 | } | 1336 | } |
1264 | 1337 | ||
1265 | /* Have to check the ICV with software */ | 1338 | /* Have to check the ICV with software */ |
@@ -1268,7 +1341,8 @@ static int aead_decrypt(struct aead_request *req) | |||
1268 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ | 1341 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ |
1269 | if (edesc->dma_len) | 1342 | if (edesc->dma_len) |
1270 | icvdata = &edesc->link_tbl[edesc->src_nents + | 1343 | icvdata = &edesc->link_tbl[edesc->src_nents + |
1271 | edesc->dst_nents + 2]; | 1344 | edesc->dst_nents + 2 + |
1345 | edesc->assoc_nents]; | ||
1272 | else | 1346 | else |
1273 | icvdata = &edesc->link_tbl[0]; | 1347 | icvdata = &edesc->link_tbl[0]; |
1274 | 1348 | ||
@@ -1277,7 +1351,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1277 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, | 1351 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, |
1278 | ctx->authsize); | 1352 | ctx->authsize); |
1279 | 1353 | ||
1280 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); | 1354 | return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done); |
1281 | } | 1355 | } |
1282 | 1356 | ||
1283 | static int aead_givencrypt(struct aead_givcrypt_request *req) | 1357 | static int aead_givencrypt(struct aead_givcrypt_request *req) |
@@ -1288,7 +1362,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req) | |||
1288 | struct talitos_edesc *edesc; | 1362 | struct talitos_edesc *edesc; |
1289 | 1363 | ||
1290 | /* allocate extended descriptor */ | 1364 | /* allocate extended descriptor */ |
1291 | edesc = aead_edesc_alloc(areq, 0); | 1365 | edesc = aead_edesc_alloc(areq, req->giv, 0); |
1292 | if (IS_ERR(edesc)) | 1366 | if (IS_ERR(edesc)) |
1293 | return PTR_ERR(edesc); | 1367 | return PTR_ERR(edesc); |
1294 | 1368 | ||
@@ -1299,8 +1373,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req) | |||
1299 | /* avoid consecutive packets going out with same IV */ | 1373 | /* avoid consecutive packets going out with same IV */ |
1300 | *(__be64 *)req->giv ^= cpu_to_be64(req->seq); | 1374 | *(__be64 *)req->giv ^= cpu_to_be64(req->seq); |
1301 | 1375 | ||
1302 | return ipsec_esp(edesc, areq, req->giv, req->seq, | 1376 | return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done); |
1303 | ipsec_esp_encrypt_done); | ||
1304 | } | 1377 | } |
1305 | 1378 | ||
1306 | static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | 1379 | static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, |
@@ -1356,7 +1429,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1356 | struct device *dev = ctx->dev; | 1429 | struct device *dev = ctx->dev; |
1357 | struct talitos_desc *desc = &edesc->desc; | 1430 | struct talitos_desc *desc = &edesc->desc; |
1358 | unsigned int cryptlen = areq->nbytes; | 1431 | unsigned int cryptlen = areq->nbytes; |
1359 | unsigned int ivsize; | 1432 | unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); |
1360 | int sg_count, ret; | 1433 | int sg_count, ret; |
1361 | 1434 | ||
1362 | /* first DWORD empty */ | 1435 | /* first DWORD empty */ |
@@ -1365,9 +1438,9 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1365 | desc->ptr[0].j_extent = 0; | 1438 | desc->ptr[0].j_extent = 0; |
1366 | 1439 | ||
1367 | /* cipher iv */ | 1440 | /* cipher iv */ |
1368 | ivsize = crypto_ablkcipher_ivsize(cipher); | 1441 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); |
1369 | map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0, | 1442 | desc->ptr[1].len = cpu_to_be16(ivsize); |
1370 | DMA_TO_DEVICE); | 1443 | desc->ptr[1].j_extent = 0; |
1371 | 1444 | ||
1372 | /* cipher key */ | 1445 | /* cipher key */ |
1373 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | 1446 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, |
@@ -1382,7 +1455,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1382 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, | 1455 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, |
1383 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL | 1456 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL |
1384 | : DMA_TO_DEVICE, | 1457 | : DMA_TO_DEVICE, |
1385 | edesc->src_is_chained); | 1458 | edesc->src_chained); |
1386 | 1459 | ||
1387 | if (sg_count == 1) { | 1460 | if (sg_count == 1) { |
1388 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); | 1461 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); |
@@ -1409,8 +1482,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1409 | if (areq->src != areq->dst) | 1482 | if (areq->src != areq->dst) |
1410 | sg_count = talitos_map_sg(dev, areq->dst, | 1483 | sg_count = talitos_map_sg(dev, areq->dst, |
1411 | edesc->dst_nents ? : 1, | 1484 | edesc->dst_nents ? : 1, |
1412 | DMA_FROM_DEVICE, | 1485 | DMA_FROM_DEVICE, edesc->dst_chained); |
1413 | edesc->dst_is_chained); | ||
1414 | 1486 | ||
1415 | if (sg_count == 1) { | 1487 | if (sg_count == 1) { |
1416 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); | 1488 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); |
@@ -1450,9 +1522,11 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | |||
1450 | { | 1522 | { |
1451 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | 1523 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
1452 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1524 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1525 | unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); | ||
1453 | 1526 | ||
1454 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, | 1527 | return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, |
1455 | areq->nbytes, 0, 0, areq->base.flags); | 1528 | areq->info, 0, areq->nbytes, 0, ivsize, 0, |
1529 | areq->base.flags); | ||
1456 | } | 1530 | } |
1457 | 1531 | ||
1458 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) | 1532 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) |
@@ -1578,8 +1652,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1578 | 1652 | ||
1579 | sg_count = talitos_map_sg(dev, req_ctx->psrc, | 1653 | sg_count = talitos_map_sg(dev, req_ctx->psrc, |
1580 | edesc->src_nents ? : 1, | 1654 | edesc->src_nents ? : 1, |
1581 | DMA_TO_DEVICE, | 1655 | DMA_TO_DEVICE, edesc->src_chained); |
1582 | edesc->src_is_chained); | ||
1583 | 1656 | ||
1584 | if (sg_count == 1) { | 1657 | if (sg_count == 1) { |
1585 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); | 1658 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); |
@@ -1631,8 +1704,8 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | |||
1631 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | 1704 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); |
1632 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1705 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1633 | 1706 | ||
1634 | return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1, | 1707 | return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, |
1635 | nbytes, 0, 0, areq->base.flags); | 1708 | nbytes, 0, 0, 0, areq->base.flags); |
1636 | } | 1709 | } |
1637 | 1710 | ||
1638 | static int ahash_init(struct ahash_request *areq) | 1711 | static int ahash_init(struct ahash_request *areq) |
@@ -1690,7 +1763,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1690 | unsigned int nbytes_to_hash; | 1763 | unsigned int nbytes_to_hash; |
1691 | unsigned int to_hash_later; | 1764 | unsigned int to_hash_later; |
1692 | unsigned int nsg; | 1765 | unsigned int nsg; |
1693 | int chained; | 1766 | bool chained; |
1694 | 1767 | ||
1695 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { | 1768 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { |
1696 | /* Buffer up to one whole block */ | 1769 | /* Buffer up to one whole block */ |
@@ -1902,21 +1975,18 @@ struct talitos_alg_template { | |||
1902 | }; | 1975 | }; |
1903 | 1976 | ||
1904 | static struct talitos_alg_template driver_algs[] = { | 1977 | static struct talitos_alg_template driver_algs[] = { |
1905 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ | 1978 | /* |
1979 | * AEAD algorithms. These use a single-pass ipsec_esp descriptor. | ||
1980 | * authencesn(*,*) is also registered, although not present | ||
1981 | * explicitly here. | ||
1982 | */ | ||
1906 | { .type = CRYPTO_ALG_TYPE_AEAD, | 1983 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1907 | .alg.crypto = { | 1984 | .alg.crypto = { |
1908 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1985 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
1909 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", | 1986 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", |
1910 | .cra_blocksize = AES_BLOCK_SIZE, | 1987 | .cra_blocksize = AES_BLOCK_SIZE, |
1911 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 1988 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1912 | .cra_type = &crypto_aead_type, | ||
1913 | .cra_aead = { | 1989 | .cra_aead = { |
1914 | .setkey = aead_setkey, | ||
1915 | .setauthsize = aead_setauthsize, | ||
1916 | .encrypt = aead_encrypt, | ||
1917 | .decrypt = aead_decrypt, | ||
1918 | .givencrypt = aead_givencrypt, | ||
1919 | .geniv = "<built-in>", | ||
1920 | .ivsize = AES_BLOCK_SIZE, | 1990 | .ivsize = AES_BLOCK_SIZE, |
1921 | .maxauthsize = SHA1_DIGEST_SIZE, | 1991 | .maxauthsize = SHA1_DIGEST_SIZE, |
1922 | } | 1992 | } |
@@ -1935,14 +2005,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
1935 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", | 2005 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", |
1936 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2006 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1937 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2007 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1938 | .cra_type = &crypto_aead_type, | ||
1939 | .cra_aead = { | 2008 | .cra_aead = { |
1940 | .setkey = aead_setkey, | ||
1941 | .setauthsize = aead_setauthsize, | ||
1942 | .encrypt = aead_encrypt, | ||
1943 | .decrypt = aead_decrypt, | ||
1944 | .givencrypt = aead_givencrypt, | ||
1945 | .geniv = "<built-in>", | ||
1946 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2009 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1947 | .maxauthsize = SHA1_DIGEST_SIZE, | 2010 | .maxauthsize = SHA1_DIGEST_SIZE, |
1948 | } | 2011 | } |
@@ -1962,14 +2025,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
1962 | .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos", | 2025 | .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos", |
1963 | .cra_blocksize = AES_BLOCK_SIZE, | 2026 | .cra_blocksize = AES_BLOCK_SIZE, |
1964 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2027 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1965 | .cra_type = &crypto_aead_type, | ||
1966 | .cra_aead = { | 2028 | .cra_aead = { |
1967 | .setkey = aead_setkey, | ||
1968 | .setauthsize = aead_setauthsize, | ||
1969 | .encrypt = aead_encrypt, | ||
1970 | .decrypt = aead_decrypt, | ||
1971 | .givencrypt = aead_givencrypt, | ||
1972 | .geniv = "<built-in>", | ||
1973 | .ivsize = AES_BLOCK_SIZE, | 2029 | .ivsize = AES_BLOCK_SIZE, |
1974 | .maxauthsize = SHA224_DIGEST_SIZE, | 2030 | .maxauthsize = SHA224_DIGEST_SIZE, |
1975 | } | 2031 | } |
@@ -1988,14 +2044,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
1988 | .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos", | 2044 | .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos", |
1989 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2045 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1990 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2046 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1991 | .cra_type = &crypto_aead_type, | ||
1992 | .cra_aead = { | 2047 | .cra_aead = { |
1993 | .setkey = aead_setkey, | ||
1994 | .setauthsize = aead_setauthsize, | ||
1995 | .encrypt = aead_encrypt, | ||
1996 | .decrypt = aead_decrypt, | ||
1997 | .givencrypt = aead_givencrypt, | ||
1998 | .geniv = "<built-in>", | ||
1999 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2048 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2000 | .maxauthsize = SHA224_DIGEST_SIZE, | 2049 | .maxauthsize = SHA224_DIGEST_SIZE, |
2001 | } | 2050 | } |
@@ -2015,14 +2064,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2015 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", | 2064 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", |
2016 | .cra_blocksize = AES_BLOCK_SIZE, | 2065 | .cra_blocksize = AES_BLOCK_SIZE, |
2017 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2066 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
2018 | .cra_type = &crypto_aead_type, | ||
2019 | .cra_aead = { | 2067 | .cra_aead = { |
2020 | .setkey = aead_setkey, | ||
2021 | .setauthsize = aead_setauthsize, | ||
2022 | .encrypt = aead_encrypt, | ||
2023 | .decrypt = aead_decrypt, | ||
2024 | .givencrypt = aead_givencrypt, | ||
2025 | .geniv = "<built-in>", | ||
2026 | .ivsize = AES_BLOCK_SIZE, | 2068 | .ivsize = AES_BLOCK_SIZE, |
2027 | .maxauthsize = SHA256_DIGEST_SIZE, | 2069 | .maxauthsize = SHA256_DIGEST_SIZE, |
2028 | } | 2070 | } |
@@ -2041,14 +2083,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2041 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", | 2083 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", |
2042 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2084 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2043 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2085 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
2044 | .cra_type = &crypto_aead_type, | ||
2045 | .cra_aead = { | 2086 | .cra_aead = { |
2046 | .setkey = aead_setkey, | ||
2047 | .setauthsize = aead_setauthsize, | ||
2048 | .encrypt = aead_encrypt, | ||
2049 | .decrypt = aead_decrypt, | ||
2050 | .givencrypt = aead_givencrypt, | ||
2051 | .geniv = "<built-in>", | ||
2052 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2087 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2053 | .maxauthsize = SHA256_DIGEST_SIZE, | 2088 | .maxauthsize = SHA256_DIGEST_SIZE, |
2054 | } | 2089 | } |
@@ -2068,14 +2103,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2068 | .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos", | 2103 | .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos", |
2069 | .cra_blocksize = AES_BLOCK_SIZE, | 2104 | .cra_blocksize = AES_BLOCK_SIZE, |
2070 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2105 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
2071 | .cra_type = &crypto_aead_type, | ||
2072 | .cra_aead = { | 2106 | .cra_aead = { |
2073 | .setkey = aead_setkey, | ||
2074 | .setauthsize = aead_setauthsize, | ||
2075 | .encrypt = aead_encrypt, | ||
2076 | .decrypt = aead_decrypt, | ||
2077 | .givencrypt = aead_givencrypt, | ||
2078 | .geniv = "<built-in>", | ||
2079 | .ivsize = AES_BLOCK_SIZE, | 2107 | .ivsize = AES_BLOCK_SIZE, |
2080 | .maxauthsize = SHA384_DIGEST_SIZE, | 2108 | .maxauthsize = SHA384_DIGEST_SIZE, |
2081 | } | 2109 | } |
@@ -2094,14 +2122,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2094 | .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos", | 2122 | .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos", |
2095 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2123 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2096 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2124 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
2097 | .cra_type = &crypto_aead_type, | ||
2098 | .cra_aead = { | 2125 | .cra_aead = { |
2099 | .setkey = aead_setkey, | ||
2100 | .setauthsize = aead_setauthsize, | ||
2101 | .encrypt = aead_encrypt, | ||
2102 | .decrypt = aead_decrypt, | ||
2103 | .givencrypt = aead_givencrypt, | ||
2104 | .geniv = "<built-in>", | ||
2105 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2126 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2106 | .maxauthsize = SHA384_DIGEST_SIZE, | 2127 | .maxauthsize = SHA384_DIGEST_SIZE, |
2107 | } | 2128 | } |
@@ -2121,14 +2142,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2121 | .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos", | 2142 | .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos", |
2122 | .cra_blocksize = AES_BLOCK_SIZE, | 2143 | .cra_blocksize = AES_BLOCK_SIZE, |
2123 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2144 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
2124 | .cra_type = &crypto_aead_type, | ||
2125 | .cra_aead = { | 2145 | .cra_aead = { |
2126 | .setkey = aead_setkey, | ||
2127 | .setauthsize = aead_setauthsize, | ||
2128 | .encrypt = aead_encrypt, | ||
2129 | .decrypt = aead_decrypt, | ||
2130 | .givencrypt = aead_givencrypt, | ||
2131 | .geniv = "<built-in>", | ||
2132 | .ivsize = AES_BLOCK_SIZE, | 2146 | .ivsize = AES_BLOCK_SIZE, |
2133 | .maxauthsize = SHA512_DIGEST_SIZE, | 2147 | .maxauthsize = SHA512_DIGEST_SIZE, |
2134 | } | 2148 | } |
@@ -2147,14 +2161,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2147 | .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos", | 2161 | .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos", |
2148 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2162 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2149 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2163 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
2150 | .cra_type = &crypto_aead_type, | ||
2151 | .cra_aead = { | 2164 | .cra_aead = { |
2152 | .setkey = aead_setkey, | ||
2153 | .setauthsize = aead_setauthsize, | ||
2154 | .encrypt = aead_encrypt, | ||
2155 | .decrypt = aead_decrypt, | ||
2156 | .givencrypt = aead_givencrypt, | ||
2157 | .geniv = "<built-in>", | ||
2158 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2165 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2159 | .maxauthsize = SHA512_DIGEST_SIZE, | 2166 | .maxauthsize = SHA512_DIGEST_SIZE, |
2160 | } | 2167 | } |
@@ -2174,14 +2181,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2174 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", | 2181 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", |
2175 | .cra_blocksize = AES_BLOCK_SIZE, | 2182 | .cra_blocksize = AES_BLOCK_SIZE, |
2176 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2183 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
2177 | .cra_type = &crypto_aead_type, | ||
2178 | .cra_aead = { | 2184 | .cra_aead = { |
2179 | .setkey = aead_setkey, | ||
2180 | .setauthsize = aead_setauthsize, | ||
2181 | .encrypt = aead_encrypt, | ||
2182 | .decrypt = aead_decrypt, | ||
2183 | .givencrypt = aead_givencrypt, | ||
2184 | .geniv = "<built-in>", | ||
2185 | .ivsize = AES_BLOCK_SIZE, | 2185 | .ivsize = AES_BLOCK_SIZE, |
2186 | .maxauthsize = MD5_DIGEST_SIZE, | 2186 | .maxauthsize = MD5_DIGEST_SIZE, |
2187 | } | 2187 | } |
@@ -2200,14 +2200,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2200 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", | 2200 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", |
2201 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2201 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2202 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2202 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
2203 | .cra_type = &crypto_aead_type, | ||
2204 | .cra_aead = { | 2203 | .cra_aead = { |
2205 | .setkey = aead_setkey, | ||
2206 | .setauthsize = aead_setauthsize, | ||
2207 | .encrypt = aead_encrypt, | ||
2208 | .decrypt = aead_decrypt, | ||
2209 | .givencrypt = aead_givencrypt, | ||
2210 | .geniv = "<built-in>", | ||
2211 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2204 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2212 | .maxauthsize = MD5_DIGEST_SIZE, | 2205 | .maxauthsize = MD5_DIGEST_SIZE, |
2213 | } | 2206 | } |
@@ -2229,12 +2222,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2229 | .cra_blocksize = AES_BLOCK_SIZE, | 2222 | .cra_blocksize = AES_BLOCK_SIZE, |
2230 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 2223 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
2231 | CRYPTO_ALG_ASYNC, | 2224 | CRYPTO_ALG_ASYNC, |
2232 | .cra_type = &crypto_ablkcipher_type, | ||
2233 | .cra_ablkcipher = { | 2225 | .cra_ablkcipher = { |
2234 | .setkey = ablkcipher_setkey, | ||
2235 | .encrypt = ablkcipher_encrypt, | ||
2236 | .decrypt = ablkcipher_decrypt, | ||
2237 | .geniv = "eseqiv", | ||
2238 | .min_keysize = AES_MIN_KEY_SIZE, | 2226 | .min_keysize = AES_MIN_KEY_SIZE, |
2239 | .max_keysize = AES_MAX_KEY_SIZE, | 2227 | .max_keysize = AES_MAX_KEY_SIZE, |
2240 | .ivsize = AES_BLOCK_SIZE, | 2228 | .ivsize = AES_BLOCK_SIZE, |
@@ -2251,12 +2239,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2251 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2239 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2252 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 2240 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
2253 | CRYPTO_ALG_ASYNC, | 2241 | CRYPTO_ALG_ASYNC, |
2254 | .cra_type = &crypto_ablkcipher_type, | ||
2255 | .cra_ablkcipher = { | 2242 | .cra_ablkcipher = { |
2256 | .setkey = ablkcipher_setkey, | ||
2257 | .encrypt = ablkcipher_encrypt, | ||
2258 | .decrypt = ablkcipher_decrypt, | ||
2259 | .geniv = "eseqiv", | ||
2260 | .min_keysize = DES3_EDE_KEY_SIZE, | 2243 | .min_keysize = DES3_EDE_KEY_SIZE, |
2261 | .max_keysize = DES3_EDE_KEY_SIZE, | 2244 | .max_keysize = DES3_EDE_KEY_SIZE, |
2262 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2245 | .ivsize = DES3_EDE_BLOCK_SIZE, |
@@ -2270,11 +2253,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2270 | /* AHASH algorithms. */ | 2253 | /* AHASH algorithms. */ |
2271 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2254 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2272 | .alg.hash = { | 2255 | .alg.hash = { |
2273 | .init = ahash_init, | ||
2274 | .update = ahash_update, | ||
2275 | .final = ahash_final, | ||
2276 | .finup = ahash_finup, | ||
2277 | .digest = ahash_digest, | ||
2278 | .halg.digestsize = MD5_DIGEST_SIZE, | 2256 | .halg.digestsize = MD5_DIGEST_SIZE, |
2279 | .halg.base = { | 2257 | .halg.base = { |
2280 | .cra_name = "md5", | 2258 | .cra_name = "md5", |
@@ -2282,7 +2260,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2282 | .cra_blocksize = MD5_BLOCK_SIZE, | 2260 | .cra_blocksize = MD5_BLOCK_SIZE, |
2283 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2261 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2284 | CRYPTO_ALG_ASYNC, | 2262 | CRYPTO_ALG_ASYNC, |
2285 | .cra_type = &crypto_ahash_type | ||
2286 | } | 2263 | } |
2287 | }, | 2264 | }, |
2288 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2265 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2291,11 +2268,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2291 | }, | 2268 | }, |
2292 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2269 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2293 | .alg.hash = { | 2270 | .alg.hash = { |
2294 | .init = ahash_init, | ||
2295 | .update = ahash_update, | ||
2296 | .final = ahash_final, | ||
2297 | .finup = ahash_finup, | ||
2298 | .digest = ahash_digest, | ||
2299 | .halg.digestsize = SHA1_DIGEST_SIZE, | 2271 | .halg.digestsize = SHA1_DIGEST_SIZE, |
2300 | .halg.base = { | 2272 | .halg.base = { |
2301 | .cra_name = "sha1", | 2273 | .cra_name = "sha1", |
@@ -2303,7 +2275,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2303 | .cra_blocksize = SHA1_BLOCK_SIZE, | 2275 | .cra_blocksize = SHA1_BLOCK_SIZE, |
2304 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2276 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2305 | CRYPTO_ALG_ASYNC, | 2277 | CRYPTO_ALG_ASYNC, |
2306 | .cra_type = &crypto_ahash_type | ||
2307 | } | 2278 | } |
2308 | }, | 2279 | }, |
2309 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2280 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2312,11 +2283,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2312 | }, | 2283 | }, |
2313 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2284 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2314 | .alg.hash = { | 2285 | .alg.hash = { |
2315 | .init = ahash_init, | ||
2316 | .update = ahash_update, | ||
2317 | .final = ahash_final, | ||
2318 | .finup = ahash_finup, | ||
2319 | .digest = ahash_digest, | ||
2320 | .halg.digestsize = SHA224_DIGEST_SIZE, | 2286 | .halg.digestsize = SHA224_DIGEST_SIZE, |
2321 | .halg.base = { | 2287 | .halg.base = { |
2322 | .cra_name = "sha224", | 2288 | .cra_name = "sha224", |
@@ -2324,7 +2290,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2324 | .cra_blocksize = SHA224_BLOCK_SIZE, | 2290 | .cra_blocksize = SHA224_BLOCK_SIZE, |
2325 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2291 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2326 | CRYPTO_ALG_ASYNC, | 2292 | CRYPTO_ALG_ASYNC, |
2327 | .cra_type = &crypto_ahash_type | ||
2328 | } | 2293 | } |
2329 | }, | 2294 | }, |
2330 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2295 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2333,11 +2298,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2333 | }, | 2298 | }, |
2334 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2299 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2335 | .alg.hash = { | 2300 | .alg.hash = { |
2336 | .init = ahash_init, | ||
2337 | .update = ahash_update, | ||
2338 | .final = ahash_final, | ||
2339 | .finup = ahash_finup, | ||
2340 | .digest = ahash_digest, | ||
2341 | .halg.digestsize = SHA256_DIGEST_SIZE, | 2301 | .halg.digestsize = SHA256_DIGEST_SIZE, |
2342 | .halg.base = { | 2302 | .halg.base = { |
2343 | .cra_name = "sha256", | 2303 | .cra_name = "sha256", |
@@ -2345,7 +2305,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2345 | .cra_blocksize = SHA256_BLOCK_SIZE, | 2305 | .cra_blocksize = SHA256_BLOCK_SIZE, |
2346 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2306 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2347 | CRYPTO_ALG_ASYNC, | 2307 | CRYPTO_ALG_ASYNC, |
2348 | .cra_type = &crypto_ahash_type | ||
2349 | } | 2308 | } |
2350 | }, | 2309 | }, |
2351 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2310 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2354,11 +2313,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2354 | }, | 2313 | }, |
2355 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2314 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2356 | .alg.hash = { | 2315 | .alg.hash = { |
2357 | .init = ahash_init, | ||
2358 | .update = ahash_update, | ||
2359 | .final = ahash_final, | ||
2360 | .finup = ahash_finup, | ||
2361 | .digest = ahash_digest, | ||
2362 | .halg.digestsize = SHA384_DIGEST_SIZE, | 2316 | .halg.digestsize = SHA384_DIGEST_SIZE, |
2363 | .halg.base = { | 2317 | .halg.base = { |
2364 | .cra_name = "sha384", | 2318 | .cra_name = "sha384", |
@@ -2366,7 +2320,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2366 | .cra_blocksize = SHA384_BLOCK_SIZE, | 2320 | .cra_blocksize = SHA384_BLOCK_SIZE, |
2367 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2321 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2368 | CRYPTO_ALG_ASYNC, | 2322 | CRYPTO_ALG_ASYNC, |
2369 | .cra_type = &crypto_ahash_type | ||
2370 | } | 2323 | } |
2371 | }, | 2324 | }, |
2372 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2325 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2375,11 +2328,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2375 | }, | 2328 | }, |
2376 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2329 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2377 | .alg.hash = { | 2330 | .alg.hash = { |
2378 | .init = ahash_init, | ||
2379 | .update = ahash_update, | ||
2380 | .final = ahash_final, | ||
2381 | .finup = ahash_finup, | ||
2382 | .digest = ahash_digest, | ||
2383 | .halg.digestsize = SHA512_DIGEST_SIZE, | 2331 | .halg.digestsize = SHA512_DIGEST_SIZE, |
2384 | .halg.base = { | 2332 | .halg.base = { |
2385 | .cra_name = "sha512", | 2333 | .cra_name = "sha512", |
@@ -2387,7 +2335,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2387 | .cra_blocksize = SHA512_BLOCK_SIZE, | 2335 | .cra_blocksize = SHA512_BLOCK_SIZE, |
2388 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2336 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2389 | CRYPTO_ALG_ASYNC, | 2337 | CRYPTO_ALG_ASYNC, |
2390 | .cra_type = &crypto_ahash_type | ||
2391 | } | 2338 | } |
2392 | }, | 2339 | }, |
2393 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2340 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2396,12 +2343,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2396 | }, | 2343 | }, |
2397 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2344 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2398 | .alg.hash = { | 2345 | .alg.hash = { |
2399 | .init = ahash_init, | ||
2400 | .update = ahash_update, | ||
2401 | .final = ahash_final, | ||
2402 | .finup = ahash_finup, | ||
2403 | .digest = ahash_digest, | ||
2404 | .setkey = ahash_setkey, | ||
2405 | .halg.digestsize = MD5_DIGEST_SIZE, | 2346 | .halg.digestsize = MD5_DIGEST_SIZE, |
2406 | .halg.base = { | 2347 | .halg.base = { |
2407 | .cra_name = "hmac(md5)", | 2348 | .cra_name = "hmac(md5)", |
@@ -2409,7 +2350,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2409 | .cra_blocksize = MD5_BLOCK_SIZE, | 2350 | .cra_blocksize = MD5_BLOCK_SIZE, |
2410 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2351 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2411 | CRYPTO_ALG_ASYNC, | 2352 | CRYPTO_ALG_ASYNC, |
2412 | .cra_type = &crypto_ahash_type | ||
2413 | } | 2353 | } |
2414 | }, | 2354 | }, |
2415 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2355 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2418,12 +2358,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2418 | }, | 2358 | }, |
2419 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2359 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2420 | .alg.hash = { | 2360 | .alg.hash = { |
2421 | .init = ahash_init, | ||
2422 | .update = ahash_update, | ||
2423 | .final = ahash_final, | ||
2424 | .finup = ahash_finup, | ||
2425 | .digest = ahash_digest, | ||
2426 | .setkey = ahash_setkey, | ||
2427 | .halg.digestsize = SHA1_DIGEST_SIZE, | 2361 | .halg.digestsize = SHA1_DIGEST_SIZE, |
2428 | .halg.base = { | 2362 | .halg.base = { |
2429 | .cra_name = "hmac(sha1)", | 2363 | .cra_name = "hmac(sha1)", |
@@ -2431,7 +2365,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2431 | .cra_blocksize = SHA1_BLOCK_SIZE, | 2365 | .cra_blocksize = SHA1_BLOCK_SIZE, |
2432 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2366 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2433 | CRYPTO_ALG_ASYNC, | 2367 | CRYPTO_ALG_ASYNC, |
2434 | .cra_type = &crypto_ahash_type | ||
2435 | } | 2368 | } |
2436 | }, | 2369 | }, |
2437 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2370 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2440,12 +2373,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2440 | }, | 2373 | }, |
2441 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2374 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2442 | .alg.hash = { | 2375 | .alg.hash = { |
2443 | .init = ahash_init, | ||
2444 | .update = ahash_update, | ||
2445 | .final = ahash_final, | ||
2446 | .finup = ahash_finup, | ||
2447 | .digest = ahash_digest, | ||
2448 | .setkey = ahash_setkey, | ||
2449 | .halg.digestsize = SHA224_DIGEST_SIZE, | 2376 | .halg.digestsize = SHA224_DIGEST_SIZE, |
2450 | .halg.base = { | 2377 | .halg.base = { |
2451 | .cra_name = "hmac(sha224)", | 2378 | .cra_name = "hmac(sha224)", |
@@ -2453,7 +2380,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2453 | .cra_blocksize = SHA224_BLOCK_SIZE, | 2380 | .cra_blocksize = SHA224_BLOCK_SIZE, |
2454 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2381 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2455 | CRYPTO_ALG_ASYNC, | 2382 | CRYPTO_ALG_ASYNC, |
2456 | .cra_type = &crypto_ahash_type | ||
2457 | } | 2383 | } |
2458 | }, | 2384 | }, |
2459 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2385 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2462,12 +2388,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2462 | }, | 2388 | }, |
2463 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2389 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2464 | .alg.hash = { | 2390 | .alg.hash = { |
2465 | .init = ahash_init, | ||
2466 | .update = ahash_update, | ||
2467 | .final = ahash_final, | ||
2468 | .finup = ahash_finup, | ||
2469 | .digest = ahash_digest, | ||
2470 | .setkey = ahash_setkey, | ||
2471 | .halg.digestsize = SHA256_DIGEST_SIZE, | 2391 | .halg.digestsize = SHA256_DIGEST_SIZE, |
2472 | .halg.base = { | 2392 | .halg.base = { |
2473 | .cra_name = "hmac(sha256)", | 2393 | .cra_name = "hmac(sha256)", |
@@ -2475,7 +2395,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2475 | .cra_blocksize = SHA256_BLOCK_SIZE, | 2395 | .cra_blocksize = SHA256_BLOCK_SIZE, |
2476 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2396 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2477 | CRYPTO_ALG_ASYNC, | 2397 | CRYPTO_ALG_ASYNC, |
2478 | .cra_type = &crypto_ahash_type | ||
2479 | } | 2398 | } |
2480 | }, | 2399 | }, |
2481 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2400 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2484,12 +2403,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2484 | }, | 2403 | }, |
2485 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2404 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2486 | .alg.hash = { | 2405 | .alg.hash = { |
2487 | .init = ahash_init, | ||
2488 | .update = ahash_update, | ||
2489 | .final = ahash_final, | ||
2490 | .finup = ahash_finup, | ||
2491 | .digest = ahash_digest, | ||
2492 | .setkey = ahash_setkey, | ||
2493 | .halg.digestsize = SHA384_DIGEST_SIZE, | 2406 | .halg.digestsize = SHA384_DIGEST_SIZE, |
2494 | .halg.base = { | 2407 | .halg.base = { |
2495 | .cra_name = "hmac(sha384)", | 2408 | .cra_name = "hmac(sha384)", |
@@ -2497,7 +2410,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2497 | .cra_blocksize = SHA384_BLOCK_SIZE, | 2410 | .cra_blocksize = SHA384_BLOCK_SIZE, |
2498 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2411 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2499 | CRYPTO_ALG_ASYNC, | 2412 | CRYPTO_ALG_ASYNC, |
2500 | .cra_type = &crypto_ahash_type | ||
2501 | } | 2413 | } |
2502 | }, | 2414 | }, |
2503 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2415 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2506,12 +2418,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2506 | }, | 2418 | }, |
2507 | { .type = CRYPTO_ALG_TYPE_AHASH, | 2419 | { .type = CRYPTO_ALG_TYPE_AHASH, |
2508 | .alg.hash = { | 2420 | .alg.hash = { |
2509 | .init = ahash_init, | ||
2510 | .update = ahash_update, | ||
2511 | .final = ahash_final, | ||
2512 | .finup = ahash_finup, | ||
2513 | .digest = ahash_digest, | ||
2514 | .setkey = ahash_setkey, | ||
2515 | .halg.digestsize = SHA512_DIGEST_SIZE, | 2421 | .halg.digestsize = SHA512_DIGEST_SIZE, |
2516 | .halg.base = { | 2422 | .halg.base = { |
2517 | .cra_name = "hmac(sha512)", | 2423 | .cra_name = "hmac(sha512)", |
@@ -2519,7 +2425,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2519 | .cra_blocksize = SHA512_BLOCK_SIZE, | 2425 | .cra_blocksize = SHA512_BLOCK_SIZE, |
2520 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 2426 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
2521 | CRYPTO_ALG_ASYNC, | 2427 | CRYPTO_ALG_ASYNC, |
2522 | .cra_type = &crypto_ahash_type | ||
2523 | } | 2428 | } |
2524 | }, | 2429 | }, |
2525 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2430 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2677,14 +2582,34 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
2677 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 2582 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
2678 | alg = &t_alg->algt.alg.crypto; | 2583 | alg = &t_alg->algt.alg.crypto; |
2679 | alg->cra_init = talitos_cra_init; | 2584 | alg->cra_init = talitos_cra_init; |
2585 | alg->cra_type = &crypto_ablkcipher_type; | ||
2586 | alg->cra_ablkcipher.setkey = ablkcipher_setkey; | ||
2587 | alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; | ||
2588 | alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; | ||
2589 | alg->cra_ablkcipher.geniv = "eseqiv"; | ||
2680 | break; | 2590 | break; |
2681 | case CRYPTO_ALG_TYPE_AEAD: | 2591 | case CRYPTO_ALG_TYPE_AEAD: |
2682 | alg = &t_alg->algt.alg.crypto; | 2592 | alg = &t_alg->algt.alg.crypto; |
2683 | alg->cra_init = talitos_cra_init_aead; | 2593 | alg->cra_init = talitos_cra_init_aead; |
2594 | alg->cra_type = &crypto_aead_type; | ||
2595 | alg->cra_aead.setkey = aead_setkey; | ||
2596 | alg->cra_aead.setauthsize = aead_setauthsize; | ||
2597 | alg->cra_aead.encrypt = aead_encrypt; | ||
2598 | alg->cra_aead.decrypt = aead_decrypt; | ||
2599 | alg->cra_aead.givencrypt = aead_givencrypt; | ||
2600 | alg->cra_aead.geniv = "<built-in>"; | ||
2684 | break; | 2601 | break; |
2685 | case CRYPTO_ALG_TYPE_AHASH: | 2602 | case CRYPTO_ALG_TYPE_AHASH: |
2686 | alg = &t_alg->algt.alg.hash.halg.base; | 2603 | alg = &t_alg->algt.alg.hash.halg.base; |
2687 | alg->cra_init = talitos_cra_init_ahash; | 2604 | alg->cra_init = talitos_cra_init_ahash; |
2605 | alg->cra_type = &crypto_ahash_type; | ||
2606 | t_alg->algt.alg.hash.init = ahash_init; | ||
2607 | t_alg->algt.alg.hash.update = ahash_update; | ||
2608 | t_alg->algt.alg.hash.final = ahash_final; | ||
2609 | t_alg->algt.alg.hash.finup = ahash_finup; | ||
2610 | t_alg->algt.alg.hash.digest = ahash_digest; | ||
2611 | t_alg->algt.alg.hash.setkey = ahash_setkey; | ||
2612 | |||
2688 | if (!(priv->features & TALITOS_FTR_HMAC_OK) && | 2613 | if (!(priv->features & TALITOS_FTR_HMAC_OK) && |
2689 | !strncmp(alg->cra_name, "hmac", 4)) { | 2614 | !strncmp(alg->cra_name, "hmac", 4)) { |
2690 | kfree(t_alg); | 2615 | kfree(t_alg); |
@@ -2896,7 +2821,9 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2896 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { | 2821 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { |
2897 | struct talitos_crypto_alg *t_alg; | 2822 | struct talitos_crypto_alg *t_alg; |
2898 | char *name = NULL; | 2823 | char *name = NULL; |
2824 | bool authenc = false; | ||
2899 | 2825 | ||
2826 | authencesn: | ||
2900 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); | 2827 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); |
2901 | if (IS_ERR(t_alg)) { | 2828 | if (IS_ERR(t_alg)) { |
2902 | err = PTR_ERR(t_alg); | 2829 | err = PTR_ERR(t_alg); |
@@ -2911,6 +2838,8 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2911 | err = crypto_register_alg( | 2838 | err = crypto_register_alg( |
2912 | &t_alg->algt.alg.crypto); | 2839 | &t_alg->algt.alg.crypto); |
2913 | name = t_alg->algt.alg.crypto.cra_driver_name; | 2840 | name = t_alg->algt.alg.crypto.cra_driver_name; |
2841 | authenc = authenc ? !authenc : | ||
2842 | !(bool)memcmp(name, "authenc", 7); | ||
2914 | break; | 2843 | break; |
2915 | case CRYPTO_ALG_TYPE_AHASH: | 2844 | case CRYPTO_ALG_TYPE_AHASH: |
2916 | err = crypto_register_ahash( | 2845 | err = crypto_register_ahash( |
@@ -2923,8 +2852,25 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2923 | dev_err(dev, "%s alg registration failed\n", | 2852 | dev_err(dev, "%s alg registration failed\n", |
2924 | name); | 2853 | name); |
2925 | kfree(t_alg); | 2854 | kfree(t_alg); |
2926 | } else | 2855 | } else { |
2927 | list_add_tail(&t_alg->entry, &priv->alg_list); | 2856 | list_add_tail(&t_alg->entry, &priv->alg_list); |
2857 | if (authenc) { | ||
2858 | struct crypto_alg *alg = | ||
2859 | &driver_algs[i].alg.crypto; | ||
2860 | |||
2861 | name = alg->cra_name; | ||
2862 | memmove(name + 10, name + 7, | ||
2863 | strlen(name) - 7); | ||
2864 | memcpy(name + 7, "esn", 3); | ||
2865 | |||
2866 | name = alg->cra_driver_name; | ||
2867 | memmove(name + 10, name + 7, | ||
2868 | strlen(name) - 7); | ||
2869 | memcpy(name + 7, "esn", 3); | ||
2870 | |||
2871 | goto authencesn; | ||
2872 | } | ||
2873 | } | ||
2928 | } | 2874 | } |
2929 | } | 2875 | } |
2930 | if (!list_empty(&priv->alg_list)) | 2876 | if (!list_empty(&priv->alg_list)) |
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c index ac236f6724f4..37185e6630cd 100644 --- a/drivers/crypto/tegra-aes.c +++ b/drivers/crypto/tegra-aes.c | |||
@@ -969,6 +969,7 @@ static int tegra_aes_probe(struct platform_device *pdev) | |||
969 | aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1); | 969 | aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1); |
970 | if (!aes_wq) { | 970 | if (!aes_wq) { |
971 | dev_err(dev, "alloc_workqueue failed\n"); | 971 | dev_err(dev, "alloc_workqueue failed\n"); |
972 | err = -ENOMEM; | ||
972 | goto out; | 973 | goto out; |
973 | } | 974 | } |
974 | 975 | ||
@@ -1004,8 +1005,6 @@ static int tegra_aes_probe(struct platform_device *pdev) | |||
1004 | 1005 | ||
1005 | aes_dev = dd; | 1006 | aes_dev = dd; |
1006 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | 1007 | for (i = 0; i < ARRAY_SIZE(algs); i++) { |
1007 | INIT_LIST_HEAD(&algs[i].cra_list); | ||
1008 | |||
1009 | algs[i].cra_priority = 300; | 1008 | algs[i].cra_priority = 300; |
1010 | algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx); | 1009 | algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx); |
1011 | algs[i].cra_module = THIS_MODULE; | 1010 | algs[i].cra_module = THIS_MODULE; |
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index ef17e3871c71..bc615cc56266 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -1486,6 +1486,7 @@ static int ux500_cryp_probe(struct platform_device *pdev) | |||
1486 | if (!res_irq) { | 1486 | if (!res_irq) { |
1487 | dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable", | 1487 | dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable", |
1488 | __func__); | 1488 | __func__); |
1489 | ret = -ENODEV; | ||
1489 | goto out_power; | 1490 | goto out_power; |
1490 | } | 1491 | } |
1491 | 1492 | ||
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 08765072a2b3..632c3339895f 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -1991,7 +1991,6 @@ static int __init ux500_hash_mod_init(void) | |||
1991 | static void __exit ux500_hash_mod_fini(void) | 1991 | static void __exit ux500_hash_mod_fini(void) |
1992 | { | 1992 | { |
1993 | platform_driver_unregister(&hash_driver); | 1993 | platform_driver_unregister(&hash_driver); |
1994 | return; | ||
1995 | } | 1994 | } |
1996 | 1995 | ||
1997 | module_init(ux500_hash_mod_init); | 1996 | module_init(ux500_hash_mod_init); |