summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-08 23:57:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-08 23:57:08 -0400
commit4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26 (patch)
treecbb763ec5e74cfbaac6ce53df277883cb78a8a1a /drivers
parent8b68150883ca466a23e90902dd4113b22e692f04 (diff)
parentf3880a23564e3172437285ebcb5b8a124539fdae (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 5.3: API: - Test shash interface directly in testmgr - cra_driver_name is now mandatory Algorithms: - Replace arc4 crypto_cipher with library helper - Implement 5 way interleave for ECB, CBC and CTR on arm64 - Add xxhash - Add continuous self-test on noise source to drbg - Update jitter RNG Drivers: - Add support for SHA204A random number generator - Add support for 7211 in iproc-rng200 - Fix fuzz test failures in inside-secure - Fix fuzz test failures in talitos - Fix fuzz test failures in qat" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits) crypto: stm32/hash - remove interruptible condition for dma crypto: stm32/hash - Fix hmac issue more than 256 bytes crypto: stm32/crc32 - rename driver file crypto: amcc - remove memset after dma_alloc_coherent crypto: ccp - Switch to SPDX license identifiers crypto: ccp - Validate the the error value used to index error messages crypto: doc - Fix formatting of new crypto engine content crypto: doc - Add parameter documentation crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR crypto: arm64/aes-ce - add 5 way interleave routines crypto: talitos - drop icv_ool crypto: talitos - fix hash on SEC1. crypto: talitos - move struct talitos_edesc into talitos.h lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE crypto/NX: Set receive window credits to max number of CRBs in RxFIFO crypto: asymmetric_keys - select CRYPTO_HASH where needed crypto: serpent - mark __serpent_setkey_sbox noinline crypto: testmgr - dynamically allocate crypto_shash crypto: testmgr - dynamically allocate testvec_config crypto: talitos - eliminate unneeded 'done' functions at build time ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/char/hw_random/meson-rng.c52
-rw-r--r--drivers/crypto/Kconfig20
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c36
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c25
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h10
-rw-r--r--drivers/crypto/atmel-ecc.c403
-rw-r--r--drivers/crypto/atmel-ecc.h116
-rw-r--r--drivers/crypto/atmel-i2c.c364
-rw-r--r--drivers/crypto/atmel-i2c.h197
-rw-r--r--drivers/crypto/atmel-sha204a.c171
-rw-r--r--drivers/crypto/bcm/cipher.c8
-rw-r--r--drivers/crypto/bcm/spu2.c10
-rw-r--r--drivers/crypto/caam/Kconfig46
-rw-r--r--drivers/crypto/caam/Makefile18
-rw-r--r--drivers/crypto/caam/caamalg.c338
-rw-r--r--drivers/crypto/caam/caamalg_desc.c147
-rw-r--r--drivers/crypto/caam/caamalg_desc.h4
-rw-r--r--drivers/crypto/caam/caamalg_qi.c267
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c202
-rw-r--r--drivers/crypto/caam/caamhash.c329
-rw-r--r--drivers/crypto/caam/caampkc.c177
-rw-r--r--drivers/crypto/caam/caampkc.h9
-rw-r--r--drivers/crypto/caam/caamrng.c76
-rw-r--r--drivers/crypto/caam/ctrl.c56
-rw-r--r--drivers/crypto/caam/desc_constr.h11
-rw-r--r--drivers/crypto/caam/error.c8
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/intern.h102
-rw-r--r--drivers/crypto/caam/jr.c43
-rw-r--r--drivers/crypto/caam/key_gen.c28
-rw-r--r--drivers/crypto/caam/qi.c52
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h18
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h18
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h26
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.h2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c7
-rw-r--r--drivers/crypto/ccp/ccp-dev.c96
-rw-r--r--drivers/crypto/ccp/ccp-dev.h2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c20
-rw-r--r--drivers/crypto/ccree/cc_driver.c70
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h20
-rw-r--r--drivers/crypto/ccree/cc_pm.c11
-rw-r--r--drivers/crypto/ccree/cc_pm.h7
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c13
-rw-r--r--drivers/crypto/inside-secure/safexcel.h17
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c116
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c92
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c3
-rw-r--r--drivers/crypto/ixp4xx_crypto.c15
-rw-r--r--drivers/crypto/mxs-dcp.c5
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c8
-rw-r--r--drivers/crypto/nx/nx.c4
-rw-r--r--drivers/crypto/nx/nx.h12
-rw-r--r--drivers/crypto/nx/nx_debugfs.c71
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c294
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h2
-rw-r--r--drivers/crypto/sahara.c4
-rw-r--r--drivers/crypto/stm32/Makefile2
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c (renamed from drivers/crypto/stm32/stm32_crc32.c)0
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c47
-rw-r--r--drivers/crypto/talitos.c368
-rw-r--r--drivers/crypto/talitos.h73
-rw-r--r--drivers/crypto/vmx/aes_cbc.c183
-rw-r--r--drivers/crypto/vmx/aes_ctr.c165
-rw-r--r--drivers/crypto/vmx/aes_xts.c175
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h2
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl22
-rw-r--r--drivers/crypto/vmx/vmx.c72
-rw-r--r--drivers/i2c/i2c-core-acpi.c6
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/ppp_mppe.c97
78 files changed, 2927 insertions, 2588 deletions
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 8b5a20b35293..92be1c0ab99f 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
220} 220}
221 221
222static const struct of_device_id iproc_rng200_of_match[] = { 222static const struct of_device_id iproc_rng200_of_match[] = {
223 { .compatible = "brcm,bcm7211-rng200", },
223 { .compatible = "brcm,bcm7278-rng200", }, 224 { .compatible = "brcm,bcm7278-rng200", },
224 { .compatible = "brcm,iproc-rng200", }, 225 { .compatible = "brcm,iproc-rng200", },
225 {}, 226 {},
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 2e23be802a62..76e693da5dde 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -1,58 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
1/* 2/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright (c) 2016 BayLibre, SAS. 3 * Copyright (c) 2016 BayLibre, SAS.
8 * Author: Neil Armstrong <narmstrong@baylibre.com> 4 * Author: Neil Armstrong <narmstrong@baylibre.com>
9 * Copyright (C) 2014 Amlogic, Inc. 5 * Copyright (C) 2014 Amlogic, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
24 *
25 * BSD LICENSE
26 *
27 * Copyright (c) 2016 BayLibre, SAS.
28 * Author: Neil Armstrong <narmstrong@baylibre.com>
29 * Copyright (C) 2014 Amlogic, Inc.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 *
35 * * Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * * Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in
39 * the documentation and/or other materials provided with the
40 * distribution.
41 * * Neither the name of Intel Corporation nor the names of its
42 * contributors may be used to endorse or promote products derived
43 * from this software without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 */ 6 */
57#include <linux/err.h> 7#include <linux/err.h>
58#include <linux/module.h> 8#include <linux/module.h>
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0af08081e305..603413f28fa3 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -520,10 +520,13 @@ config CRYPTO_DEV_ATMEL_SHA
520 To compile this driver as a module, choose M here: the module 520 To compile this driver as a module, choose M here: the module
521 will be called atmel-sha. 521 will be called atmel-sha.
522 522
523config CRYPTO_DEV_ATMEL_I2C
524 tristate
525
523config CRYPTO_DEV_ATMEL_ECC 526config CRYPTO_DEV_ATMEL_ECC
524 tristate "Support for Microchip / Atmel ECC hw accelerator" 527 tristate "Support for Microchip / Atmel ECC hw accelerator"
525 depends on ARCH_AT91 || COMPILE_TEST
526 depends on I2C 528 depends on I2C
529 select CRYPTO_DEV_ATMEL_I2C
527 select CRYPTO_ECDH 530 select CRYPTO_ECDH
528 select CRC16 531 select CRC16
529 help 532 help
@@ -534,6 +537,21 @@ config CRYPTO_DEV_ATMEL_ECC
534 To compile this driver as a module, choose M here: the module 537 To compile this driver as a module, choose M here: the module
535 will be called atmel-ecc. 538 will be called atmel-ecc.
536 539
540config CRYPTO_DEV_ATMEL_SHA204A
541 tristate "Support for Microchip / Atmel SHA accelerator and RNG"
542 depends on I2C
543 select CRYPTO_DEV_ATMEL_I2C
544 select HW_RANDOM
545 select CRC16
546 help
547 Microhip / Atmel SHA accelerator and RNG.
548 Select this if you want to use the Microchip / Atmel SHA204A
549 module as a random number generator. (Other functions of the
550 chip are currently not exposed by this driver)
551
552 To compile this driver as a module, choose M here: the module
553 will be called atmel-sha204a.
554
537config CRYPTO_DEV_CCP 555config CRYPTO_DEV_CCP
538 bool "Support for AMD Secure Processor" 556 bool "Support for AMD Secure Processor"
539 depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM 557 depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index a23a7197fcd7..afc4753b5d28 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,7 +2,9 @@
2obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o 2obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
3obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o 3obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
4obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o 4obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
5obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
5obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o 6obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
7obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
6obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ 8obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
7obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ 9obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
8obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ 10obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 49f3e0ce242c..cbfc607282f4 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -67,12 +67,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
67} 67}
68 68
69static inline int crypto4xx_crypt(struct skcipher_request *req, 69static inline int crypto4xx_crypt(struct skcipher_request *req,
70 const unsigned int ivlen, bool decrypt) 70 const unsigned int ivlen, bool decrypt,
71 bool check_blocksize)
71{ 72{
72 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 73 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
73 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); 74 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
74 __le32 iv[AES_IV_SIZE]; 75 __le32 iv[AES_IV_SIZE];
75 76
77 if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
78 return -EINVAL;
79
76 if (ivlen) 80 if (ivlen)
77 crypto4xx_memcpy_to_le32(iv, req->iv, ivlen); 81 crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
78 82
@@ -81,24 +85,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
81 ctx->sa_len, 0, NULL); 85 ctx->sa_len, 0, NULL);
82} 86}
83 87
84int crypto4xx_encrypt_noiv(struct skcipher_request *req) 88int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
89{
90 return crypto4xx_crypt(req, 0, false, true);
91}
92
93int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
94{
95 return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
96}
97
98int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
85{ 99{
86 return crypto4xx_crypt(req, 0, false); 100 return crypto4xx_crypt(req, 0, true, true);
87} 101}
88 102
89int crypto4xx_encrypt_iv(struct skcipher_request *req) 103int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
90{ 104{
91 return crypto4xx_crypt(req, AES_IV_SIZE, false); 105 return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
92} 106}
93 107
94int crypto4xx_decrypt_noiv(struct skcipher_request *req) 108int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
95{ 109{
96 return crypto4xx_crypt(req, 0, true); 110 return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
97} 111}
98 112
99int crypto4xx_decrypt_iv(struct skcipher_request *req) 113int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
100{ 114{
101 return crypto4xx_crypt(req, AES_IV_SIZE, true); 115 return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
102} 116}
103 117
104/** 118/**
@@ -269,8 +283,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
269 return ret; 283 return ret;
270 } 284 }
271 285
272 return encrypt ? crypto4xx_encrypt_iv(req) 286 return encrypt ? crypto4xx_encrypt_iv_stream(req)
273 : crypto4xx_decrypt_iv(req); 287 : crypto4xx_decrypt_iv_stream(req);
274} 288}
275 289
276static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx, 290static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 16d911aaa508..de5e9352e920 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -182,7 +182,6 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
182 dev->pdr_pa); 182 dev->pdr_pa);
183 return -ENOMEM; 183 return -ENOMEM;
184 } 184 }
185 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
186 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, 185 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
187 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, 186 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
188 &dev->shadow_sa_pool_pa, 187 &dev->shadow_sa_pool_pa,
@@ -1210,8 +1209,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1210 .max_keysize = AES_MAX_KEY_SIZE, 1209 .max_keysize = AES_MAX_KEY_SIZE,
1211 .ivsize = AES_IV_SIZE, 1210 .ivsize = AES_IV_SIZE,
1212 .setkey = crypto4xx_setkey_aes_cbc, 1211 .setkey = crypto4xx_setkey_aes_cbc,
1213 .encrypt = crypto4xx_encrypt_iv, 1212 .encrypt = crypto4xx_encrypt_iv_block,
1214 .decrypt = crypto4xx_decrypt_iv, 1213 .decrypt = crypto4xx_decrypt_iv_block,
1215 .init = crypto4xx_sk_init, 1214 .init = crypto4xx_sk_init,
1216 .exit = crypto4xx_sk_exit, 1215 .exit = crypto4xx_sk_exit,
1217 } }, 1216 } },
@@ -1222,7 +1221,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1222 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1221 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1223 .cra_flags = CRYPTO_ALG_ASYNC | 1222 .cra_flags = CRYPTO_ALG_ASYNC |
1224 CRYPTO_ALG_KERN_DRIVER_ONLY, 1223 CRYPTO_ALG_KERN_DRIVER_ONLY,
1225 .cra_blocksize = AES_BLOCK_SIZE, 1224 .cra_blocksize = 1,
1226 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1225 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1227 .cra_module = THIS_MODULE, 1226 .cra_module = THIS_MODULE,
1228 }, 1227 },
@@ -1230,8 +1229,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1230 .max_keysize = AES_MAX_KEY_SIZE, 1229 .max_keysize = AES_MAX_KEY_SIZE,
1231 .ivsize = AES_IV_SIZE, 1230 .ivsize = AES_IV_SIZE,
1232 .setkey = crypto4xx_setkey_aes_cfb, 1231 .setkey = crypto4xx_setkey_aes_cfb,
1233 .encrypt = crypto4xx_encrypt_iv, 1232 .encrypt = crypto4xx_encrypt_iv_stream,
1234 .decrypt = crypto4xx_decrypt_iv, 1233 .decrypt = crypto4xx_decrypt_iv_stream,
1235 .init = crypto4xx_sk_init, 1234 .init = crypto4xx_sk_init,
1236 .exit = crypto4xx_sk_exit, 1235 .exit = crypto4xx_sk_exit,
1237 } }, 1236 } },
@@ -1243,7 +1242,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1243 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | 1242 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1244 CRYPTO_ALG_ASYNC | 1243 CRYPTO_ALG_ASYNC |
1245 CRYPTO_ALG_KERN_DRIVER_ONLY, 1244 CRYPTO_ALG_KERN_DRIVER_ONLY,
1246 .cra_blocksize = AES_BLOCK_SIZE, 1245 .cra_blocksize = 1,
1247 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1246 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1248 .cra_module = THIS_MODULE, 1247 .cra_module = THIS_MODULE,
1249 }, 1248 },
@@ -1263,7 +1262,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1263 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1262 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1264 .cra_flags = CRYPTO_ALG_ASYNC | 1263 .cra_flags = CRYPTO_ALG_ASYNC |
1265 CRYPTO_ALG_KERN_DRIVER_ONLY, 1264 CRYPTO_ALG_KERN_DRIVER_ONLY,
1266 .cra_blocksize = AES_BLOCK_SIZE, 1265 .cra_blocksize = 1,
1267 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1266 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1268 .cra_module = THIS_MODULE, 1267 .cra_module = THIS_MODULE,
1269 }, 1268 },
@@ -1290,8 +1289,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1290 .min_keysize = AES_MIN_KEY_SIZE, 1289 .min_keysize = AES_MIN_KEY_SIZE,
1291 .max_keysize = AES_MAX_KEY_SIZE, 1290 .max_keysize = AES_MAX_KEY_SIZE,
1292 .setkey = crypto4xx_setkey_aes_ecb, 1291 .setkey = crypto4xx_setkey_aes_ecb,
1293 .encrypt = crypto4xx_encrypt_noiv, 1292 .encrypt = crypto4xx_encrypt_noiv_block,
1294 .decrypt = crypto4xx_decrypt_noiv, 1293 .decrypt = crypto4xx_decrypt_noiv_block,
1295 .init = crypto4xx_sk_init, 1294 .init = crypto4xx_sk_init,
1296 .exit = crypto4xx_sk_exit, 1295 .exit = crypto4xx_sk_exit,
1297 } }, 1296 } },
@@ -1302,7 +1301,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1302 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1301 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1303 .cra_flags = CRYPTO_ALG_ASYNC | 1302 .cra_flags = CRYPTO_ALG_ASYNC |
1304 CRYPTO_ALG_KERN_DRIVER_ONLY, 1303 CRYPTO_ALG_KERN_DRIVER_ONLY,
1305 .cra_blocksize = AES_BLOCK_SIZE, 1304 .cra_blocksize = 1,
1306 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1305 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1307 .cra_module = THIS_MODULE, 1306 .cra_module = THIS_MODULE,
1308 }, 1307 },
@@ -1310,8 +1309,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1310 .max_keysize = AES_MAX_KEY_SIZE, 1309 .max_keysize = AES_MAX_KEY_SIZE,
1311 .ivsize = AES_IV_SIZE, 1310 .ivsize = AES_IV_SIZE,
1312 .setkey = crypto4xx_setkey_aes_ofb, 1311 .setkey = crypto4xx_setkey_aes_ofb,
1313 .encrypt = crypto4xx_encrypt_iv, 1312 .encrypt = crypto4xx_encrypt_iv_stream,
1314 .decrypt = crypto4xx_decrypt_iv, 1313 .decrypt = crypto4xx_decrypt_iv_stream,
1315 .init = crypto4xx_sk_init, 1314 .init = crypto4xx_sk_init,
1316 .exit = crypto4xx_sk_exit, 1315 .exit = crypto4xx_sk_exit,
1317 } }, 1316 } },
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index ca1c25c40c23..6b6841359190 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -173,10 +173,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
173 const u8 *key, unsigned int keylen); 173 const u8 *key, unsigned int keylen);
174int crypto4xx_encrypt_ctr(struct skcipher_request *req); 174int crypto4xx_encrypt_ctr(struct skcipher_request *req);
175int crypto4xx_decrypt_ctr(struct skcipher_request *req); 175int crypto4xx_decrypt_ctr(struct skcipher_request *req);
176int crypto4xx_encrypt_iv(struct skcipher_request *req); 176int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
177int crypto4xx_decrypt_iv(struct skcipher_request *req); 177int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
178int crypto4xx_encrypt_noiv(struct skcipher_request *req); 178int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
179int crypto4xx_decrypt_noiv(struct skcipher_request *req); 179int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
180int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
181int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
180int crypto4xx_rfc3686_encrypt(struct skcipher_request *req); 182int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
181int crypto4xx_rfc3686_decrypt(struct skcipher_request *req); 183int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
182int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); 184int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index ba00e4563ca0..ff02cc05affb 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -6,8 +6,6 @@
6 * Author: Tudor Ambarus <tudor.ambarus@microchip.com> 6 * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
7 */ 7 */
8 8
9#include <linux/bitrev.h>
10#include <linux/crc16.h>
11#include <linux/delay.h> 9#include <linux/delay.h>
12#include <linux/device.h> 10#include <linux/device.h>
13#include <linux/err.h> 11#include <linux/err.h>
@@ -23,42 +21,11 @@
23#include <crypto/internal/kpp.h> 21#include <crypto/internal/kpp.h>
24#include <crypto/ecdh.h> 22#include <crypto/ecdh.h>
25#include <crypto/kpp.h> 23#include <crypto/kpp.h>
26#include "atmel-ecc.h" 24#include "atmel-i2c.h"
27
28/* Used for binding tfm objects to i2c clients. */
29struct atmel_ecc_driver_data {
30 struct list_head i2c_client_list;
31 spinlock_t i2c_list_lock;
32} ____cacheline_aligned;
33 25
34static struct atmel_ecc_driver_data driver_data; 26static struct atmel_ecc_driver_data driver_data;
35 27
36/** 28/**
37 * atmel_ecc_i2c_client_priv - i2c_client private data
38 * @client : pointer to i2c client device
39 * @i2c_client_list_node: part of i2c_client_list
40 * @lock : lock for sending i2c commands
41 * @wake_token : wake token array of zeros
42 * @wake_token_sz : size in bytes of the wake_token
43 * @tfm_count : number of active crypto transformations on i2c client
44 *
45 * Reads and writes from/to the i2c client are sequential. The first byte
46 * transmitted to the device is treated as the byte size. Any attempt to send
47 * more than this number of bytes will cause the device to not ACK those bytes.
48 * After the host writes a single command byte to the input buffer, reads are
49 * prohibited until after the device completes command execution. Use a mutex
50 * when sending i2c commands.
51 */
52struct atmel_ecc_i2c_client_priv {
53 struct i2c_client *client;
54 struct list_head i2c_client_list_node;
55 struct mutex lock;
56 u8 wake_token[WAKE_TOKEN_MAX_SIZE];
57 size_t wake_token_sz;
58 atomic_t tfm_count ____cacheline_aligned;
59};
60
61/**
62 * atmel_ecdh_ctx - transformation context 29 * atmel_ecdh_ctx - transformation context
63 * @client : pointer to i2c client device 30 * @client : pointer to i2c client device
64 * @fallback : used for unsupported curves or when user wants to use its own 31 * @fallback : used for unsupported curves or when user wants to use its own
@@ -80,188 +47,12 @@ struct atmel_ecdh_ctx {
80 bool do_fallback; 47 bool do_fallback;
81}; 48};
82 49
83/** 50static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
84 * atmel_ecc_work_data - data structure representing the work
85 * @ctx : transformation context.
86 * @cbk : pointer to a callback function to be invoked upon completion of this
87 * request. This has the form:
88 * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
89 * where:
90 * @work_data: data structure representing the work
91 * @areq : optional pointer to an argument passed with the original
92 * request.
93 * @status : status returned from the i2c client device or i2c error.
94 * @areq: optional pointer to a user argument for use at callback time.
95 * @work: describes the task to be executed.
96 * @cmd : structure used for communicating with the device.
97 */
98struct atmel_ecc_work_data {
99 struct atmel_ecdh_ctx *ctx;
100 void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
101 int status);
102 void *areq;
103 struct work_struct work;
104 struct atmel_ecc_cmd cmd;
105};
106
107static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
108{
109 return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
110}
111
112/**
113 * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
114 * CRC16 verification of the count, opcode, param1, param2 and data bytes.
115 * The checksum is saved in little-endian format in the least significant
116 * two bytes of the command. CRC polynomial is 0x8005 and the initial register
117 * value should be zero.
118 *
119 * @cmd : structure used for communicating with the device.
120 */
121static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
122{
123 u8 *data = &cmd->count;
124 size_t len = cmd->count - CRC_SIZE;
125 u16 *crc16 = (u16 *)(data + len);
126
127 *crc16 = atmel_ecc_crc16(0, data, len);
128}
129
130static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
131{
132 cmd->word_addr = COMMAND;
133 cmd->opcode = OPCODE_READ;
134 /*
135 * Read the word from Configuration zone that contains the lock bytes
136 * (UserExtra, Selector, LockValue, LockConfig).
137 */
138 cmd->param1 = CONFIG_ZONE;
139 cmd->param2 = DEVICE_LOCK_ADDR;
140 cmd->count = READ_COUNT;
141
142 atmel_ecc_checksum(cmd);
143
144 cmd->msecs = MAX_EXEC_TIME_READ;
145 cmd->rxsize = READ_RSP_SIZE;
146}
147
148static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
149{
150 cmd->word_addr = COMMAND;
151 cmd->count = GENKEY_COUNT;
152 cmd->opcode = OPCODE_GENKEY;
153 cmd->param1 = GENKEY_MODE_PRIVATE;
154 /* a random private key will be generated and stored in slot keyID */
155 cmd->param2 = cpu_to_le16(keyid);
156
157 atmel_ecc_checksum(cmd);
158
159 cmd->msecs = MAX_EXEC_TIME_GENKEY;
160 cmd->rxsize = GENKEY_RSP_SIZE;
161}
162
163static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
164 struct scatterlist *pubkey)
165{
166 size_t copied;
167
168 cmd->word_addr = COMMAND;
169 cmd->count = ECDH_COUNT;
170 cmd->opcode = OPCODE_ECDH;
171 cmd->param1 = ECDH_PREFIX_MODE;
172 /* private key slot */
173 cmd->param2 = cpu_to_le16(DATA_SLOT_2);
174
175 /*
176 * The device only supports NIST P256 ECC keys. The public key size will
177 * always be the same. Use a macro for the key size to avoid unnecessary
178 * computations.
179 */
180 copied = sg_copy_to_buffer(pubkey,
181 sg_nents_for_len(pubkey,
182 ATMEL_ECC_PUBKEY_SIZE),
183 cmd->data, ATMEL_ECC_PUBKEY_SIZE);
184 if (copied != ATMEL_ECC_PUBKEY_SIZE)
185 return -EINVAL;
186
187 atmel_ecc_checksum(cmd);
188
189 cmd->msecs = MAX_EXEC_TIME_ECDH;
190 cmd->rxsize = ECDH_RSP_SIZE;
191
192 return 0;
193}
194
195/*
196 * After wake and after execution of a command, there will be error, status, or
197 * result bytes in the device's output register that can be retrieved by the
198 * system. When the length of that group is four bytes, the codes returned are
199 * detailed in error_list.
200 */
201static int atmel_ecc_status(struct device *dev, u8 *status)
202{
203 size_t err_list_len = ARRAY_SIZE(error_list);
204 int i;
205 u8 err_id = status[1];
206
207 if (*status != STATUS_SIZE)
208 return 0;
209
210 if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
211 return 0;
212
213 for (i = 0; i < err_list_len; i++)
214 if (error_list[i].value == err_id)
215 break;
216
217 /* if err_id is not in the error_list then ignore it */
218 if (i != err_list_len) {
219 dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
220 return err_id;
221 }
222
223 return 0;
224}
225
226static int atmel_ecc_wakeup(struct i2c_client *client)
227{
228 struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
229 u8 status[STATUS_RSP_SIZE];
230 int ret;
231
232 /*
233 * The device ignores any levels or transitions on the SCL pin when the
234 * device is idle, asleep or during waking up. Don't check for error
235 * when waking up the device.
236 */
237 i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
238
239 /*
240 * Wait to wake the device. Typical execution times for ecdh and genkey
241 * are around tens of milliseconds. Delta is chosen to 50 microseconds.
242 */
243 usleep_range(TWHI_MIN, TWHI_MAX);
244
245 ret = i2c_master_recv(client, status, STATUS_SIZE);
246 if (ret < 0)
247 return ret;
248
249 return atmel_ecc_status(&client->dev, status);
250}
251
252static int atmel_ecc_sleep(struct i2c_client *client)
253{
254 u8 sleep = SLEEP_TOKEN;
255
256 return i2c_master_send(client, &sleep, 1);
257}
258
259static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
260 int status) 51 int status)
261{ 52{
262 struct kpp_request *req = areq; 53 struct kpp_request *req = areq;
263 struct atmel_ecdh_ctx *ctx = work_data->ctx; 54 struct atmel_ecdh_ctx *ctx = work_data->ctx;
264 struct atmel_ecc_cmd *cmd = &work_data->cmd; 55 struct atmel_i2c_cmd *cmd = &work_data->cmd;
265 size_t copied, n_sz; 56 size_t copied, n_sz;
266 57
267 if (status) 58 if (status)
@@ -282,82 +73,6 @@ free_work_data:
282 kpp_request_complete(req, status); 73 kpp_request_complete(req, status);
283} 74}
284 75
285/*
286 * atmel_ecc_send_receive() - send a command to the device and receive its
287 * response.
288 * @client: i2c client device
289 * @cmd : structure used to communicate with the device
290 *
291 * After the device receives a Wake token, a watchdog counter starts within the
292 * device. After the watchdog timer expires, the device enters sleep mode
293 * regardless of whether some I/O transmission or command execution is in
294 * progress. If a command is attempted when insufficient time remains prior to
295 * watchdog timer execution, the device will return the watchdog timeout error
296 * code without attempting to execute the command. There is no way to reset the
297 * counter other than to put the device into sleep or idle mode and then
298 * wake it up again.
299 */
300static int atmel_ecc_send_receive(struct i2c_client *client,
301 struct atmel_ecc_cmd *cmd)
302{
303 struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
304 int ret;
305
306 mutex_lock(&i2c_priv->lock);
307
308 ret = atmel_ecc_wakeup(client);
309 if (ret)
310 goto err;
311
312 /* send the command */
313 ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
314 if (ret < 0)
315 goto err;
316
317 /* delay the appropriate amount of time for command to execute */
318 msleep(cmd->msecs);
319
320 /* receive the response */
321 ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
322 if (ret < 0)
323 goto err;
324
325 /* put the device into low-power mode */
326 ret = atmel_ecc_sleep(client);
327 if (ret < 0)
328 goto err;
329
330 mutex_unlock(&i2c_priv->lock);
331 return atmel_ecc_status(&client->dev, cmd->data);
332err:
333 mutex_unlock(&i2c_priv->lock);
334 return ret;
335}
336
337static void atmel_ecc_work_handler(struct work_struct *work)
338{
339 struct atmel_ecc_work_data *work_data =
340 container_of(work, struct atmel_ecc_work_data, work);
341 struct atmel_ecc_cmd *cmd = &work_data->cmd;
342 struct i2c_client *client = work_data->ctx->client;
343 int status;
344
345 status = atmel_ecc_send_receive(client, cmd);
346 work_data->cbk(work_data, work_data->areq, status);
347}
348
349static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
350 void (*cbk)(struct atmel_ecc_work_data *work_data,
351 void *areq, int status),
352 void *areq)
353{
354 work_data->cbk = (void *)cbk;
355 work_data->areq = areq;
356
357 INIT_WORK(&work_data->work, atmel_ecc_work_handler);
358 schedule_work(&work_data->work);
359}
360
361static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) 76static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
362{ 77{
363 if (curve_id == ECC_CURVE_NIST_P256) 78 if (curve_id == ECC_CURVE_NIST_P256)
@@ -374,7 +89,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
374 unsigned int len) 89 unsigned int len)
375{ 90{
376 struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); 91 struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
377 struct atmel_ecc_cmd *cmd; 92 struct atmel_i2c_cmd *cmd;
378 void *public_key; 93 void *public_key;
379 struct ecdh params; 94 struct ecdh params;
380 int ret = -ENOMEM; 95 int ret = -ENOMEM;
@@ -412,9 +127,9 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
412 ctx->do_fallback = false; 127 ctx->do_fallback = false;
413 ctx->curve_id = params.curve_id; 128 ctx->curve_id = params.curve_id;
414 129
415 atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2); 130 atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
416 131
417 ret = atmel_ecc_send_receive(ctx->client, cmd); 132 ret = atmel_i2c_send_receive(ctx->client, cmd);
418 if (ret) 133 if (ret)
419 goto free_public_key; 134 goto free_public_key;
420 135
@@ -444,6 +159,9 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
444 return crypto_kpp_generate_public_key(req); 159 return crypto_kpp_generate_public_key(req);
445 } 160 }
446 161
162 if (!ctx->public_key)
163 return -EINVAL;
164
447 /* might want less than we've got */ 165 /* might want less than we've got */
448 nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len); 166 nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
449 167
@@ -461,7 +179,7 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
461{ 179{
462 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 180 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
463 struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); 181 struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
464 struct atmel_ecc_work_data *work_data; 182 struct atmel_i2c_work_data *work_data;
465 gfp_t gfp; 183 gfp_t gfp;
466 int ret; 184 int ret;
467 185
@@ -482,12 +200,13 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
482 return -ENOMEM; 200 return -ENOMEM;
483 201
484 work_data->ctx = ctx; 202 work_data->ctx = ctx;
203 work_data->client = ctx->client;
485 204
486 ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src); 205 ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src);
487 if (ret) 206 if (ret)
488 goto free_work_data; 207 goto free_work_data;
489 208
490 atmel_ecc_enqueue(work_data, atmel_ecdh_done, req); 209 atmel_i2c_enqueue(work_data, atmel_ecdh_done, req);
491 210
492 return -EINPROGRESS; 211 return -EINPROGRESS;
493 212
@@ -498,7 +217,7 @@ free_work_data:
498 217
499static struct i2c_client *atmel_ecc_i2c_client_alloc(void) 218static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
500{ 219{
501 struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL; 220 struct atmel_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
502 struct i2c_client *client = ERR_PTR(-ENODEV); 221 struct i2c_client *client = ERR_PTR(-ENODEV);
503 int min_tfm_cnt = INT_MAX; 222 int min_tfm_cnt = INT_MAX;
504 int tfm_cnt; 223 int tfm_cnt;
@@ -533,7 +252,7 @@ static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
533 252
534static void atmel_ecc_i2c_client_free(struct i2c_client *client) 253static void atmel_ecc_i2c_client_free(struct i2c_client *client)
535{ 254{
536 struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 255 struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
537 256
538 atomic_dec(&i2c_priv->tfm_count); 257 atomic_dec(&i2c_priv->tfm_count);
539} 258}
@@ -604,96 +323,18 @@ static struct kpp_alg atmel_ecdh = {
604 }, 323 },
605}; 324};
606 325
607static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
608{
609 u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
610
611 /* return the size of the wake_token in bytes */
612 return DIV_ROUND_UP(no_of_bits, 8);
613}
614
615static int device_sanity_check(struct i2c_client *client)
616{
617 struct atmel_ecc_cmd *cmd;
618 int ret;
619
620 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
621 if (!cmd)
622 return -ENOMEM;
623
624 atmel_ecc_init_read_cmd(cmd);
625
626 ret = atmel_ecc_send_receive(client, cmd);
627 if (ret)
628 goto free_cmd;
629
630 /*
631 * It is vital that the Configuration, Data and OTP zones be locked
632 * prior to release into the field of the system containing the device.
633 * Failure to lock these zones may permit modification of any secret
634 * keys and may lead to other security problems.
635 */
636 if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
637 dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
638 ret = -ENOTSUPP;
639 }
640
641 /* fall through */
642free_cmd:
643 kfree(cmd);
644 return ret;
645}
646
647static int atmel_ecc_probe(struct i2c_client *client, 326static int atmel_ecc_probe(struct i2c_client *client,
648 const struct i2c_device_id *id) 327 const struct i2c_device_id *id)
649{ 328{
650 struct atmel_ecc_i2c_client_priv *i2c_priv; 329 struct atmel_i2c_client_priv *i2c_priv;
651 struct device *dev = &client->dev;
652 int ret; 330 int ret;
653 u32 bus_clk_rate;
654
655 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
656 dev_err(dev, "I2C_FUNC_I2C not supported\n");
657 return -ENODEV;
658 }
659 331
660 ret = of_property_read_u32(client->adapter->dev.of_node, 332 ret = atmel_i2c_probe(client, id);
661 "clock-frequency", &bus_clk_rate);
662 if (ret) {
663 dev_err(dev, "of: failed to read clock-frequency property\n");
664 return ret;
665 }
666
667 if (bus_clk_rate > 1000000L) {
668 dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
669 bus_clk_rate);
670 return -EINVAL;
671 }
672
673 i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
674 if (!i2c_priv)
675 return -ENOMEM;
676
677 i2c_priv->client = client;
678 mutex_init(&i2c_priv->lock);
679
680 /*
681 * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
682 * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
683 * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
684 */
685 i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
686
687 memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
688
689 atomic_set(&i2c_priv->tfm_count, 0);
690
691 i2c_set_clientdata(client, i2c_priv);
692
693 ret = device_sanity_check(client);
694 if (ret) 333 if (ret)
695 return ret; 334 return ret;
696 335
336 i2c_priv = i2c_get_clientdata(client);
337
697 spin_lock(&driver_data.i2c_list_lock); 338 spin_lock(&driver_data.i2c_list_lock);
698 list_add_tail(&i2c_priv->i2c_client_list_node, 339 list_add_tail(&i2c_priv->i2c_client_list_node,
699 &driver_data.i2c_client_list); 340 &driver_data.i2c_client_list);
@@ -705,10 +346,10 @@ static int atmel_ecc_probe(struct i2c_client *client,
705 list_del(&i2c_priv->i2c_client_list_node); 346 list_del(&i2c_priv->i2c_client_list_node);
706 spin_unlock(&driver_data.i2c_list_lock); 347 spin_unlock(&driver_data.i2c_list_lock);
707 348
708 dev_err(dev, "%s alg registration failed\n", 349 dev_err(&client->dev, "%s alg registration failed\n",
709 atmel_ecdh.base.cra_driver_name); 350 atmel_ecdh.base.cra_driver_name);
710 } else { 351 } else {
711 dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n"); 352 dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
712 } 353 }
713 354
714 return ret; 355 return ret;
@@ -716,7 +357,7 @@ static int atmel_ecc_probe(struct i2c_client *client,
716 357
717static int atmel_ecc_remove(struct i2c_client *client) 358static int atmel_ecc_remove(struct i2c_client *client)
718{ 359{
719 struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); 360 struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
720 361
721 /* Return EBUSY if i2c client already allocated. */ 362 /* Return EBUSY if i2c client already allocated. */
722 if (atomic_read(&i2c_priv->tfm_count)) { 363 if (atomic_read(&i2c_priv->tfm_count)) {
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
deleted file mode 100644
index 643a3b947338..000000000000
--- a/drivers/crypto/atmel-ecc.h
+++ /dev/null
@@ -1,116 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2017, Microchip Technology Inc.
4 * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
5 */
6
7#ifndef __ATMEL_ECC_H__
8#define __ATMEL_ECC_H__
9
10#define ATMEL_ECC_PRIORITY 300
11
12#define COMMAND 0x03 /* packet function */
13#define SLEEP_TOKEN 0x01
14#define WAKE_TOKEN_MAX_SIZE 8
15
16/* Definitions of Data and Command sizes */
17#define WORD_ADDR_SIZE 1
18#define COUNT_SIZE 1
19#define CRC_SIZE 2
20#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
21
22/* size in bytes of the n prime */
23#define ATMEL_ECC_NIST_P256_N_SIZE 32
24#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
25
26#define STATUS_RSP_SIZE 4
27#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
28#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
29 CMD_OVERHEAD_SIZE)
30#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
31#define MAX_RSP_SIZE GENKEY_RSP_SIZE
32
33/**
34 * atmel_ecc_cmd - structure used for communicating with the device.
35 * @word_addr: indicates the function of the packet sent to the device. This
36 * byte should have a value of COMMAND for normal operation.
37 * @count : number of bytes to be transferred to (or from) the device.
38 * @opcode : the command code.
39 * @param1 : the first parameter; always present.
40 * @param2 : the second parameter; always present.
41 * @data : optional remaining input data. Includes a 2-byte CRC.
42 * @rxsize : size of the data received from i2c client.
43 * @msecs : command execution time in milliseconds
44 */
45struct atmel_ecc_cmd {
46 u8 word_addr;
47 u8 count;
48 u8 opcode;
49 u8 param1;
50 u16 param2;
51 u8 data[MAX_RSP_SIZE];
52 u8 msecs;
53 u16 rxsize;
54} __packed;
55
56/* Status/Error codes */
57#define STATUS_SIZE 0x04
58#define STATUS_NOERR 0x00
59#define STATUS_WAKE_SUCCESSFUL 0x11
60
61static const struct {
62 u8 value;
63 const char *error_text;
64} error_list[] = {
65 { 0x01, "CheckMac or Verify miscompare" },
66 { 0x03, "Parse Error" },
67 { 0x05, "ECC Fault" },
68 { 0x0F, "Execution Error" },
69 { 0xEE, "Watchdog about to expire" },
70 { 0xFF, "CRC or other communication error" },
71};
72
73/* Definitions for eeprom organization */
74#define CONFIG_ZONE 0
75
76/* Definitions for Indexes common to all commands */
77#define RSP_DATA_IDX 1 /* buffer index of data in response */
78#define DATA_SLOT_2 2 /* used for ECDH private key */
79
80/* Definitions for the device lock state */
81#define DEVICE_LOCK_ADDR 0x15
82#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
83#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
84
85/*
86 * Wake High delay to data communication (microseconds). SDA should be stable
87 * high for this entire duration.
88 */
89#define TWHI_MIN 1500
90#define TWHI_MAX 1550
91
92/* Wake Low duration */
93#define TWLO_USEC 60
94
95/* Command execution time (milliseconds) */
96#define MAX_EXEC_TIME_ECDH 58
97#define MAX_EXEC_TIME_GENKEY 115
98#define MAX_EXEC_TIME_READ 1
99
100/* Command opcode */
101#define OPCODE_ECDH 0x43
102#define OPCODE_GENKEY 0x40
103#define OPCODE_READ 0x02
104
105/* Definitions for the READ Command */
106#define READ_COUNT 7
107
108/* Definitions for the GenKey Command */
109#define GENKEY_COUNT 7
110#define GENKEY_MODE_PRIVATE 0x04
111
112/* Definitions for the ECDH Command */
113#define ECDH_COUNT 71
114#define ECDH_PREFIX_MODE 0x00
115
116#endif /* __ATMEL_ECC_H__ */
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
new file mode 100644
index 000000000000..dc876fab2882
--- /dev/null
+++ b/drivers/crypto/atmel-i2c.c
@@ -0,0 +1,364 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Microchip / Atmel ECC (I2C) driver.
4 *
5 * Copyright (c) 2017, Microchip Technology Inc.
6 * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
7 */
8
9#include <linux/bitrev.h>
10#include <linux/crc16.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/i2c.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/scatterlist.h>
20#include <linux/slab.h>
21#include <linux/workqueue.h>
22#include "atmel-i2c.h"
23
24/**
25 * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
26 * CRC16 verification of the count, opcode, param1, param2 and data bytes.
27 * The checksum is saved in little-endian format in the least significant
28 * two bytes of the command. CRC polynomial is 0x8005 and the initial register
29 * value should be zero.
30 *
31 * @cmd : structure used for communicating with the device.
32 */
33static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd)
34{
35 u8 *data = &cmd->count;
36 size_t len = cmd->count - CRC_SIZE;
37 __le16 *__crc16 = (__le16 *)(data + len);
38
39 *__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len)));
40}
41
42void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
43{
44 cmd->word_addr = COMMAND;
45 cmd->opcode = OPCODE_READ;
46 /*
47 * Read the word from Configuration zone that contains the lock bytes
48 * (UserExtra, Selector, LockValue, LockConfig).
49 */
50 cmd->param1 = CONFIG_ZONE;
51 cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR);
52 cmd->count = READ_COUNT;
53
54 atmel_i2c_checksum(cmd);
55
56 cmd->msecs = MAX_EXEC_TIME_READ;
57 cmd->rxsize = READ_RSP_SIZE;
58}
59EXPORT_SYMBOL(atmel_i2c_init_read_cmd);
60
61void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd)
62{
63 cmd->word_addr = COMMAND;
64 cmd->opcode = OPCODE_RANDOM;
65 cmd->param1 = 0;
66 cmd->param2 = 0;
67 cmd->count = RANDOM_COUNT;
68
69 atmel_i2c_checksum(cmd);
70
71 cmd->msecs = MAX_EXEC_TIME_RANDOM;
72 cmd->rxsize = RANDOM_RSP_SIZE;
73}
74EXPORT_SYMBOL(atmel_i2c_init_random_cmd);
75
76void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid)
77{
78 cmd->word_addr = COMMAND;
79 cmd->count = GENKEY_COUNT;
80 cmd->opcode = OPCODE_GENKEY;
81 cmd->param1 = GENKEY_MODE_PRIVATE;
82 /* a random private key will be generated and stored in slot keyID */
83 cmd->param2 = cpu_to_le16(keyid);
84
85 atmel_i2c_checksum(cmd);
86
87 cmd->msecs = MAX_EXEC_TIME_GENKEY;
88 cmd->rxsize = GENKEY_RSP_SIZE;
89}
90EXPORT_SYMBOL(atmel_i2c_init_genkey_cmd);
91
92int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
93 struct scatterlist *pubkey)
94{
95 size_t copied;
96
97 cmd->word_addr = COMMAND;
98 cmd->count = ECDH_COUNT;
99 cmd->opcode = OPCODE_ECDH;
100 cmd->param1 = ECDH_PREFIX_MODE;
101 /* private key slot */
102 cmd->param2 = cpu_to_le16(DATA_SLOT_2);
103
104 /*
105 * The device only supports NIST P256 ECC keys. The public key size will
106 * always be the same. Use a macro for the key size to avoid unnecessary
107 * computations.
108 */
109 copied = sg_copy_to_buffer(pubkey,
110 sg_nents_for_len(pubkey,
111 ATMEL_ECC_PUBKEY_SIZE),
112 cmd->data, ATMEL_ECC_PUBKEY_SIZE);
113 if (copied != ATMEL_ECC_PUBKEY_SIZE)
114 return -EINVAL;
115
116 atmel_i2c_checksum(cmd);
117
118 cmd->msecs = MAX_EXEC_TIME_ECDH;
119 cmd->rxsize = ECDH_RSP_SIZE;
120
121 return 0;
122}
123EXPORT_SYMBOL(atmel_i2c_init_ecdh_cmd);
124
125/*
126 * After wake and after execution of a command, there will be error, status, or
127 * result bytes in the device's output register that can be retrieved by the
128 * system. When the length of that group is four bytes, the codes returned are
129 * detailed in error_list.
130 */
131static int atmel_i2c_status(struct device *dev, u8 *status)
132{
133 size_t err_list_len = ARRAY_SIZE(error_list);
134 int i;
135 u8 err_id = status[1];
136
137 if (*status != STATUS_SIZE)
138 return 0;
139
140 if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
141 return 0;
142
143 for (i = 0; i < err_list_len; i++)
144 if (error_list[i].value == err_id)
145 break;
146
147 /* if err_id is not in the error_list then ignore it */
148 if (i != err_list_len) {
149 dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
150 return err_id;
151 }
152
153 return 0;
154}
155
156static int atmel_i2c_wakeup(struct i2c_client *client)
157{
158 struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
159 u8 status[STATUS_RSP_SIZE];
160 int ret;
161
162 /*
163 * The device ignores any levels or transitions on the SCL pin when the
164 * device is idle, asleep or during waking up. Don't check for error
165 * when waking up the device.
166 */
167 i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
168
169 /*
170 * Wait to wake the device. Typical execution times for ecdh and genkey
171 * are around tens of milliseconds. Delta is chosen to 50 microseconds.
172 */
173 usleep_range(TWHI_MIN, TWHI_MAX);
174
175 ret = i2c_master_recv(client, status, STATUS_SIZE);
176 if (ret < 0)
177 return ret;
178
179 return atmel_i2c_status(&client->dev, status);
180}
181
182static int atmel_i2c_sleep(struct i2c_client *client)
183{
184 u8 sleep = SLEEP_TOKEN;
185
186 return i2c_master_send(client, &sleep, 1);
187}
188
189/*
190 * atmel_i2c_send_receive() - send a command to the device and receive its
191 * response.
192 * @client: i2c client device
193 * @cmd : structure used to communicate with the device
194 *
195 * After the device receives a Wake token, a watchdog counter starts within the
196 * device. After the watchdog timer expires, the device enters sleep mode
197 * regardless of whether some I/O transmission or command execution is in
198 * progress. If a command is attempted when insufficient time remains prior to
199 * watchdog timer execution, the device will return the watchdog timeout error
200 * code without attempting to execute the command. There is no way to reset the
201 * counter other than to put the device into sleep or idle mode and then
202 * wake it up again.
203 */
204int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd)
205{
206 struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
207 int ret;
208
209 mutex_lock(&i2c_priv->lock);
210
211 ret = atmel_i2c_wakeup(client);
212 if (ret)
213 goto err;
214
215 /* send the command */
216 ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
217 if (ret < 0)
218 goto err;
219
220 /* delay the appropriate amount of time for command to execute */
221 msleep(cmd->msecs);
222
223 /* receive the response */
224 ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
225 if (ret < 0)
226 goto err;
227
228 /* put the device into low-power mode */
229 ret = atmel_i2c_sleep(client);
230 if (ret < 0)
231 goto err;
232
233 mutex_unlock(&i2c_priv->lock);
234 return atmel_i2c_status(&client->dev, cmd->data);
235err:
236 mutex_unlock(&i2c_priv->lock);
237 return ret;
238}
239EXPORT_SYMBOL(atmel_i2c_send_receive);
240
241static void atmel_i2c_work_handler(struct work_struct *work)
242{
243 struct atmel_i2c_work_data *work_data =
244 container_of(work, struct atmel_i2c_work_data, work);
245 struct atmel_i2c_cmd *cmd = &work_data->cmd;
246 struct i2c_client *client = work_data->client;
247 int status;
248
249 status = atmel_i2c_send_receive(client, cmd);
250 work_data->cbk(work_data, work_data->areq, status);
251}
252
253void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
254 void (*cbk)(struct atmel_i2c_work_data *work_data,
255 void *areq, int status),
256 void *areq)
257{
258 work_data->cbk = (void *)cbk;
259 work_data->areq = areq;
260
261 INIT_WORK(&work_data->work, atmel_i2c_work_handler);
262 schedule_work(&work_data->work);
263}
264EXPORT_SYMBOL(atmel_i2c_enqueue);
265
266static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate)
267{
268 u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
269
270 /* return the size of the wake_token in bytes */
271 return DIV_ROUND_UP(no_of_bits, 8);
272}
273
274static int device_sanity_check(struct i2c_client *client)
275{
276 struct atmel_i2c_cmd *cmd;
277 int ret;
278
279 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
280 if (!cmd)
281 return -ENOMEM;
282
283 atmel_i2c_init_read_cmd(cmd);
284
285 ret = atmel_i2c_send_receive(client, cmd);
286 if (ret)
287 goto free_cmd;
288
289 /*
290 * It is vital that the Configuration, Data and OTP zones be locked
291 * prior to release into the field of the system containing the device.
292 * Failure to lock these zones may permit modification of any secret
293 * keys and may lead to other security problems.
294 */
295 if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
296 dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
297 ret = -ENOTSUPP;
298 }
299
300 /* fall through */
301free_cmd:
302 kfree(cmd);
303 return ret;
304}
305
306int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
307{
308 struct atmel_i2c_client_priv *i2c_priv;
309 struct device *dev = &client->dev;
310 int ret;
311 u32 bus_clk_rate;
312
313 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
314 dev_err(dev, "I2C_FUNC_I2C not supported\n");
315 return -ENODEV;
316 }
317
318 bus_clk_rate = i2c_acpi_find_bus_speed(&client->adapter->dev);
319 if (!bus_clk_rate) {
320 ret = device_property_read_u32(&client->adapter->dev,
321 "clock-frequency", &bus_clk_rate);
322 if (ret) {
323 dev_err(dev, "failed to read clock-frequency property\n");
324 return ret;
325 }
326 }
327
328 if (bus_clk_rate > 1000000L) {
329 dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
330 bus_clk_rate);
331 return -EINVAL;
332 }
333
334 i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
335 if (!i2c_priv)
336 return -ENOMEM;
337
338 i2c_priv->client = client;
339 mutex_init(&i2c_priv->lock);
340
341 /*
342 * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
343 * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
344 * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
345 */
346 i2c_priv->wake_token_sz = atmel_i2c_wake_token_sz(bus_clk_rate);
347
348 memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
349
350 atomic_set(&i2c_priv->tfm_count, 0);
351
352 i2c_set_clientdata(client, i2c_priv);
353
354 ret = device_sanity_check(client);
355 if (ret)
356 return ret;
357
358 return 0;
359}
360EXPORT_SYMBOL(atmel_i2c_probe);
361
362MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
363MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
364MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
new file mode 100644
index 000000000000..21860b99c3e3
--- /dev/null
+++ b/drivers/crypto/atmel-i2c.h
@@ -0,0 +1,197 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2017, Microchip Technology Inc.
4 * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
5 */
6
7#ifndef __ATMEL_I2C_H__
8#define __ATMEL_I2C_H__
9
10#include <linux/hw_random.h>
11#include <linux/types.h>
12
13#define ATMEL_ECC_PRIORITY 300
14
15#define COMMAND 0x03 /* packet function */
16#define SLEEP_TOKEN 0x01
17#define WAKE_TOKEN_MAX_SIZE 8
18
19/* Definitions of Data and Command sizes */
20#define WORD_ADDR_SIZE 1
21#define COUNT_SIZE 1
22#define CRC_SIZE 2
23#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
24
25/* size in bytes of the n prime */
26#define ATMEL_ECC_NIST_P256_N_SIZE 32
27#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
28
29#define STATUS_RSP_SIZE 4
30#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
31#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
32 CMD_OVERHEAD_SIZE)
33#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
34#define RANDOM_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
35#define MAX_RSP_SIZE GENKEY_RSP_SIZE
36
37/**
38 * atmel_i2c_cmd - structure used for communicating with the device.
39 * @word_addr: indicates the function of the packet sent to the device. This
40 * byte should have a value of COMMAND for normal operation.
41 * @count : number of bytes to be transferred to (or from) the device.
42 * @opcode : the command code.
43 * @param1 : the first parameter; always present.
44 * @param2 : the second parameter; always present.
45 * @data : optional remaining input data. Includes a 2-byte CRC.
46 * @rxsize : size of the data received from i2c client.
47 * @msecs : command execution time in milliseconds
48 */
49struct atmel_i2c_cmd {
50 u8 word_addr;
51 u8 count;
52 u8 opcode;
53 u8 param1;
54 __le16 param2;
55 u8 data[MAX_RSP_SIZE];
56 u8 msecs;
57 u16 rxsize;
58} __packed;
59
60/* Status/Error codes */
61#define STATUS_SIZE 0x04
62#define STATUS_NOERR 0x00
63#define STATUS_WAKE_SUCCESSFUL 0x11
64
65static const struct {
66 u8 value;
67 const char *error_text;
68} error_list[] = {
69 { 0x01, "CheckMac or Verify miscompare" },
70 { 0x03, "Parse Error" },
71 { 0x05, "ECC Fault" },
72 { 0x0F, "Execution Error" },
73 { 0xEE, "Watchdog about to expire" },
74 { 0xFF, "CRC or other communication error" },
75};
76
77/* Definitions for eeprom organization */
78#define CONFIG_ZONE 0
79
80/* Definitions for Indexes common to all commands */
81#define RSP_DATA_IDX 1 /* buffer index of data in response */
82#define DATA_SLOT_2 2 /* used for ECDH private key */
83
84/* Definitions for the device lock state */
85#define DEVICE_LOCK_ADDR 0x15
86#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
87#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
88
89/*
90 * Wake High delay to data communication (microseconds). SDA should be stable
91 * high for this entire duration.
92 */
93#define TWHI_MIN 1500
94#define TWHI_MAX 1550
95
96/* Wake Low duration */
97#define TWLO_USEC 60
98
99/* Command execution time (milliseconds) */
100#define MAX_EXEC_TIME_ECDH 58
101#define MAX_EXEC_TIME_GENKEY 115
102#define MAX_EXEC_TIME_READ 1
103#define MAX_EXEC_TIME_RANDOM 50
104
105/* Command opcode */
106#define OPCODE_ECDH 0x43
107#define OPCODE_GENKEY 0x40
108#define OPCODE_READ 0x02
109#define OPCODE_RANDOM 0x1b
110
111/* Definitions for the READ Command */
112#define READ_COUNT 7
113
114/* Definitions for the RANDOM Command */
115#define RANDOM_COUNT 7
116
117/* Definitions for the GenKey Command */
118#define GENKEY_COUNT 7
119#define GENKEY_MODE_PRIVATE 0x04
120
121/* Definitions for the ECDH Command */
122#define ECDH_COUNT 71
123#define ECDH_PREFIX_MODE 0x00
124
125/* Used for binding tfm objects to i2c clients. */
126struct atmel_ecc_driver_data {
127 struct list_head i2c_client_list;
128 spinlock_t i2c_list_lock;
129} ____cacheline_aligned;
130
131/**
132 * atmel_i2c_client_priv - i2c_client private data
133 * @client : pointer to i2c client device
134 * @i2c_client_list_node: part of i2c_client_list
135 * @lock : lock for sending i2c commands
136 * @wake_token : wake token array of zeros
137 * @wake_token_sz : size in bytes of the wake_token
138 * @tfm_count : number of active crypto transformations on i2c client
139 *
140 * Reads and writes from/to the i2c client are sequential. The first byte
141 * transmitted to the device is treated as the byte size. Any attempt to send
142 * more than this number of bytes will cause the device to not ACK those bytes.
143 * After the host writes a single command byte to the input buffer, reads are
144 * prohibited until after the device completes command execution. Use a mutex
145 * when sending i2c commands.
146 */
147struct atmel_i2c_client_priv {
148 struct i2c_client *client;
149 struct list_head i2c_client_list_node;
150 struct mutex lock;
151 u8 wake_token[WAKE_TOKEN_MAX_SIZE];
152 size_t wake_token_sz;
153 atomic_t tfm_count ____cacheline_aligned;
154 struct hwrng hwrng;
155};
156
157/**
158 * atmel_i2c_work_data - data structure representing the work
159 * @ctx : transformation context.
160 * @cbk : pointer to a callback function to be invoked upon completion of this
161 * request. This has the form:
162 * callback(struct atmel_i2c_work_data *work_data, void *areq, u8 status)
163 * where:
164 * @work_data: data structure representing the work
165 * @areq : optional pointer to an argument passed with the original
166 * request.
167 * @status : status returned from the i2c client device or i2c error.
168 * @areq: optional pointer to a user argument for use at callback time.
169 * @work: describes the task to be executed.
170 * @cmd : structure used for communicating with the device.
171 */
172struct atmel_i2c_work_data {
173 void *ctx;
174 struct i2c_client *client;
175 void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq,
176 int status);
177 void *areq;
178 struct work_struct work;
179 struct atmel_i2c_cmd cmd;
180};
181
182int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
183
184void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
185 void (*cbk)(struct atmel_i2c_work_data *work_data,
186 void *areq, int status),
187 void *areq);
188
189int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd);
190
191void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd);
192void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd);
193void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid);
194int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
195 struct scatterlist *pubkey);
196
197#endif /* __ATMEL_I2C_H__ */
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
new file mode 100644
index 000000000000..ea0d2068ea4f
--- /dev/null
+++ b/drivers/crypto/atmel-sha204a.c
@@ -0,0 +1,171 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Microchip / Atmel SHA204A (I2C) driver.
4 *
5 * Copyright (c) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
6 */
7
8#include <linux/delay.h>
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/errno.h>
12#include <linux/i2c.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/scatterlist.h>
17#include <linux/slab.h>
18#include <linux/workqueue.h>
19#include "atmel-i2c.h"
20
21static void atmel_sha204a_rng_done(struct atmel_i2c_work_data *work_data,
22 void *areq, int status)
23{
24 struct atmel_i2c_client_priv *i2c_priv = work_data->ctx;
25 struct hwrng *rng = areq;
26
27 if (status)
28 dev_warn_ratelimited(&i2c_priv->client->dev,
29 "i2c transaction failed (%d)\n",
30 status);
31
32 rng->priv = (unsigned long)work_data;
33 atomic_dec(&i2c_priv->tfm_count);
34}
35
36static int atmel_sha204a_rng_read_nonblocking(struct hwrng *rng, void *data,
37 size_t max)
38{
39 struct atmel_i2c_client_priv *i2c_priv;
40 struct atmel_i2c_work_data *work_data;
41
42 i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
43
44 /* keep maximum 1 asynchronous read in flight at any time */
45 if (!atomic_add_unless(&i2c_priv->tfm_count, 1, 1))
46 return 0;
47
48 if (rng->priv) {
49 work_data = (struct atmel_i2c_work_data *)rng->priv;
50 max = min(sizeof(work_data->cmd.data), max);
51 memcpy(data, &work_data->cmd.data, max);
52 rng->priv = 0;
53 } else {
54 work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC);
55 if (!work_data)
56 return -ENOMEM;
57
58 work_data->ctx = i2c_priv;
59 work_data->client = i2c_priv->client;
60
61 max = 0;
62 }
63
64 atmel_i2c_init_random_cmd(&work_data->cmd);
65 atmel_i2c_enqueue(work_data, atmel_sha204a_rng_done, rng);
66
67 return max;
68}
69
70static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
71 bool wait)
72{
73 struct atmel_i2c_client_priv *i2c_priv;
74 struct atmel_i2c_cmd cmd;
75 int ret;
76
77 if (!wait)
78 return atmel_sha204a_rng_read_nonblocking(rng, data, max);
79
80 i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
81
82 atmel_i2c_init_random_cmd(&cmd);
83
84 ret = atmel_i2c_send_receive(i2c_priv->client, &cmd);
85 if (ret)
86 return ret;
87
88 max = min(sizeof(cmd.data), max);
89 memcpy(data, cmd.data, max);
90
91 return max;
92}
93
94static int atmel_sha204a_probe(struct i2c_client *client,
95 const struct i2c_device_id *id)
96{
97 struct atmel_i2c_client_priv *i2c_priv;
98 int ret;
99
100 ret = atmel_i2c_probe(client, id);
101 if (ret)
102 return ret;
103
104 i2c_priv = i2c_get_clientdata(client);
105
106 memset(&i2c_priv->hwrng, 0, sizeof(i2c_priv->hwrng));
107
108 i2c_priv->hwrng.name = dev_name(&client->dev);
109 i2c_priv->hwrng.read = atmel_sha204a_rng_read;
110 i2c_priv->hwrng.quality = 1024;
111
112 ret = hwrng_register(&i2c_priv->hwrng);
113 if (ret)
114 dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
115
116 return ret;
117}
118
119static int atmel_sha204a_remove(struct i2c_client *client)
120{
121 struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
122
123 if (atomic_read(&i2c_priv->tfm_count)) {
124 dev_err(&client->dev, "Device is busy\n");
125 return -EBUSY;
126 }
127
128 if (i2c_priv->hwrng.priv)
129 kfree((void *)i2c_priv->hwrng.priv);
130 hwrng_unregister(&i2c_priv->hwrng);
131
132 return 0;
133}
134
135static const struct of_device_id atmel_sha204a_dt_ids[] = {
136 { .compatible = "atmel,atsha204a", },
137 { /* sentinel */ }
138};
139MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
140
141static const struct i2c_device_id atmel_sha204a_id[] = {
142 { "atsha204a", 0 },
143 { /* sentinel */ }
144};
145MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
146
147static struct i2c_driver atmel_sha204a_driver = {
148 .probe = atmel_sha204a_probe,
149 .remove = atmel_sha204a_remove,
150 .id_table = atmel_sha204a_id,
151
152 .driver.name = "atmel-sha204a",
153 .driver.of_match_table = of_match_ptr(atmel_sha204a_dt_ids),
154};
155
156static int __init atmel_sha204a_init(void)
157{
158 return i2c_add_driver(&atmel_sha204a_driver);
159}
160
161static void __exit atmel_sha204a_exit(void)
162{
163 flush_scheduled_work();
164 i2c_del_driver(&atmel_sha204a_driver);
165}
166
167module_init(atmel_sha204a_init);
168module_exit(atmel_sha204a_exit);
169
170MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
171MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 18410c9e7b29..869602fcfd96 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
85 * 0x70 - ring 2 85 * 0x70 - ring 2
86 * 0x78 - ring 3 86 * 0x78 - ring 3
87 */ 87 */
88char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; 88static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
89/* 89/*
90 * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN 90 * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
91 * is set dynamically after reading SPU type from device tree. 91 * is set dynamically after reading SPU type from device tree.
@@ -2083,7 +2083,7 @@ static int __ahash_init(struct ahash_request *req)
2083 * Return: true if incremental hashing is not supported 2083 * Return: true if incremental hashing is not supported
2084 * false otherwise 2084 * false otherwise
2085 */ 2085 */
2086bool spu_no_incr_hash(struct iproc_ctx_s *ctx) 2086static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2087{ 2087{
2088 struct spu_hw *spu = &iproc_priv.spu; 2088 struct spu_hw *spu = &iproc_priv.spu;
2089 2089
@@ -4809,7 +4809,7 @@ static int spu_dt_read(struct platform_device *pdev)
4809 return 0; 4809 return 0;
4810} 4810}
4811 4811
4812int bcm_spu_probe(struct platform_device *pdev) 4812static int bcm_spu_probe(struct platform_device *pdev)
4813{ 4813{
4814 struct device *dev = &pdev->dev; 4814 struct device *dev = &pdev->dev;
4815 struct spu_hw *spu = &iproc_priv.spu; 4815 struct spu_hw *spu = &iproc_priv.spu;
@@ -4853,7 +4853,7 @@ failure:
4853 return err; 4853 return err;
4854} 4854}
4855 4855
4856int bcm_spu_remove(struct platform_device *pdev) 4856static int bcm_spu_remove(struct platform_device *pdev)
4857{ 4857{
4858 int i; 4858 int i;
4859 struct device *dev = &pdev->dev; 4859 struct device *dev = &pdev->dev;
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index cb477259a2e2..2add51024575 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -38,21 +38,21 @@ enum spu2_proto_sel {
38 SPU2_DTLS_AEAD = 10 38 SPU2_DTLS_AEAD = 10
39}; 39};
40 40
41char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256", 41static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
42 "DES", "3DES" 42 "DES", "3DES"
43}; 43};
44 44
45char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS", 45static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB",
46 "CCM", "GCM" 46 "XTS", "CCM", "GCM"
47}; 47};
48 48
49char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256", 49static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
50 "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384", 50 "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
51 "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256", 51 "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
52 "SHA3-384", "SHA3-512" 52 "SHA3-384", "SHA3-512"
53}; 53};
54 54
55char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC", 55static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
56 "Rabin", "CCM", "GCM", "Reserved" 56 "Rabin", "CCM", "GCM", "Reserved"
57}; 57};
58 58
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 577c9844b322..3720ddabb507 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -2,6 +2,12 @@
2config CRYPTO_DEV_FSL_CAAM_COMMON 2config CRYPTO_DEV_FSL_CAAM_COMMON
3 tristate 3 tristate
4 4
5config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
6 tristate
7
8config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
9 tristate
10
5config CRYPTO_DEV_FSL_CAAM 11config CRYPTO_DEV_FSL_CAAM
6 tristate "Freescale CAAM-Multicore platform driver backend" 12 tristate "Freescale CAAM-Multicore platform driver backend"
7 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE 13 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
@@ -25,7 +31,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
25 Selecting this will enable printing of various debug 31 Selecting this will enable printing of various debug
26 information in the CAAM driver. 32 information in the CAAM driver.
27 33
28config CRYPTO_DEV_FSL_CAAM_JR 34menuconfig CRYPTO_DEV_FSL_CAAM_JR
29 tristate "Freescale CAAM Job Ring driver backend" 35 tristate "Freescale CAAM Job Ring driver backend"
30 default y 36 default y
31 help 37 help
@@ -86,8 +92,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
86 threshold. Range is 1-65535. 92 threshold. Range is 1-65535.
87 93
88config CRYPTO_DEV_FSL_CAAM_CRYPTO_API 94config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
89 tristate "Register algorithm implementations with the Crypto API" 95 bool "Register algorithm implementations with the Crypto API"
90 default y 96 default y
97 select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
91 select CRYPTO_AEAD 98 select CRYPTO_AEAD
92 select CRYPTO_AUTHENC 99 select CRYPTO_AUTHENC
93 select CRYPTO_BLKCIPHER 100 select CRYPTO_BLKCIPHER
@@ -97,13 +104,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
97 scatterlist crypto API (such as the linux native IPSec 104 scatterlist crypto API (such as the linux native IPSec
98 stack) to the SEC4 via job ring. 105 stack) to the SEC4 via job ring.
99 106
100 To compile this as a module, choose M here: the module
101 will be called caamalg.
102
103config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI 107config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
104 tristate "Queue Interface as Crypto API backend" 108 bool "Queue Interface as Crypto API backend"
105 depends on FSL_DPAA && NET 109 depends on FSL_DPAA && NET
106 default y 110 default y
111 select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
107 select CRYPTO_AUTHENC 112 select CRYPTO_AUTHENC
108 select CRYPTO_BLKCIPHER 113 select CRYPTO_BLKCIPHER
109 help 114 help
@@ -114,33 +119,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
114 assigned to the kernel should also be more than the number of 119 assigned to the kernel should also be more than the number of
115 job rings. 120 job rings.
116 121
117 To compile this as a module, choose M here: the module
118 will be called caamalg_qi.
119
120config CRYPTO_DEV_FSL_CAAM_AHASH_API 122config CRYPTO_DEV_FSL_CAAM_AHASH_API
121 tristate "Register hash algorithm implementations with Crypto API" 123 bool "Register hash algorithm implementations with Crypto API"
122 default y 124 default y
125 select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
123 select CRYPTO_HASH 126 select CRYPTO_HASH
124 help 127 help
125 Selecting this will offload ahash for users of the 128 Selecting this will offload ahash for users of the
126 scatterlist crypto API to the SEC4 via job ring. 129 scatterlist crypto API to the SEC4 via job ring.
127 130
128 To compile this as a module, choose M here: the module
129 will be called caamhash.
130
131config CRYPTO_DEV_FSL_CAAM_PKC_API 131config CRYPTO_DEV_FSL_CAAM_PKC_API
132 tristate "Register public key cryptography implementations with Crypto API" 132 bool "Register public key cryptography implementations with Crypto API"
133 default y 133 default y
134 select CRYPTO_RSA 134 select CRYPTO_RSA
135 help 135 help
136 Selecting this will allow SEC Public key support for RSA. 136 Selecting this will allow SEC Public key support for RSA.
137 Supported cryptographic primitives: encryption, decryption, 137 Supported cryptographic primitives: encryption, decryption,
138 signature and verification. 138 signature and verification.
139 To compile this as a module, choose M here: the module
140 will be called caam_pkc.
141 139
142config CRYPTO_DEV_FSL_CAAM_RNG_API 140config CRYPTO_DEV_FSL_CAAM_RNG_API
143 tristate "Register caam device for hwrng API" 141 bool "Register caam device for hwrng API"
144 default y 142 default y
145 select CRYPTO_RNG 143 select CRYPTO_RNG
146 select HW_RANDOM 144 select HW_RANDOM
@@ -148,9 +146,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
148 Selecting this will register the SEC4 hardware rng to 146 Selecting this will register the SEC4 hardware rng to
149 the hw_random API for suppying the kernel entropy pool. 147 the hw_random API for suppying the kernel entropy pool.
150 148
151 To compile this as a module, choose M here: the module
152 will be called caamrng.
153
154endif # CRYPTO_DEV_FSL_CAAM_JR 149endif # CRYPTO_DEV_FSL_CAAM_JR
155 150
156endif # CRYPTO_DEV_FSL_CAAM 151endif # CRYPTO_DEV_FSL_CAAM
@@ -160,6 +155,8 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
160 depends on FSL_MC_DPIO 155 depends on FSL_MC_DPIO
161 depends on NETDEVICES 156 depends on NETDEVICES
162 select CRYPTO_DEV_FSL_CAAM_COMMON 157 select CRYPTO_DEV_FSL_CAAM_COMMON
158 select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
159 select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
163 select CRYPTO_BLKCIPHER 160 select CRYPTO_BLKCIPHER
164 select CRYPTO_AUTHENC 161 select CRYPTO_AUTHENC
165 select CRYPTO_AEAD 162 select CRYPTO_AEAD
@@ -171,12 +168,3 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
171 168
172 To compile this as a module, choose M here: the module 169 To compile this as a module, choose M here: the module
173 will be called dpaa2_caam. 170 will be called dpaa2_caam.
174
175config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
176 def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
177 CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
178 CRYPTO_DEV_FSL_DPAA2_CAAM)
179
180config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
181 def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
182 CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 7bbfd06a11ff..9ab4e81ea21e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -11,20 +11,20 @@ ccflags-y += -DVERSION=\"\"
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o 11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o 13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
14obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
15obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
16obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o 14obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
17obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
18obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o 15obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
19obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
20obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
21 16
22caam-objs := ctrl.o 17caam-y := ctrl.o
23caam_jr-objs := jr.o key_gen.o 18caam_jr-y := jr.o key_gen.o
24caam_pkc-y := caampkc.o pkc_desc.o 19caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
20caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
21caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
22caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
23caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
24
25caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
25ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) 26ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
26 ccflags-y += -DCONFIG_CAAM_QI 27 ccflags-y += -DCONFIG_CAAM_QI
27 caam-objs += qi.o
28endif 28endif
29 29
30obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o 30obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c0ece44f303b..43f18253e5b6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -77,13 +77,6 @@
77#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) 77#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
78#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 78#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
79 79
80#ifdef DEBUG
81/* for print_hex_dumps with line references */
82#define debug(format, arg...) printk(format, arg)
83#else
84#define debug(format, arg...)
85#endif
86
87struct caam_alg_entry { 80struct caam_alg_entry {
88 int class1_alg_type; 81 int class1_alg_type;
89 int class2_alg_type; 82 int class2_alg_type;
@@ -583,13 +576,11 @@ static int aead_setkey(struct crypto_aead *aead,
583 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 576 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
584 goto badkey; 577 goto badkey;
585 578
586#ifdef DEBUG 579 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
587 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
588 keys.authkeylen + keys.enckeylen, keys.enckeylen, 580 keys.authkeylen + keys.enckeylen, keys.enckeylen,
589 keys.authkeylen); 581 keys.authkeylen);
590 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 582 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
591 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 583 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
592#endif
593 584
594 /* 585 /*
595 * If DKP is supported, use it in the shared descriptor to generate 586 * If DKP is supported, use it in the shared descriptor to generate
@@ -623,11 +614,10 @@ static int aead_setkey(struct crypto_aead *aead,
623 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 614 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
624 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 615 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
625 keys.enckeylen, ctx->dir); 616 keys.enckeylen, ctx->dir);
626#ifdef DEBUG 617
627 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 618 print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
628 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 619 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
629 ctx->adata.keylen_pad + keys.enckeylen, 1); 620 ctx->adata.keylen_pad + keys.enckeylen, 1);
630#endif
631 621
632skip_split_key: 622skip_split_key:
633 ctx->cdata.keylen = keys.enckeylen; 623 ctx->cdata.keylen = keys.enckeylen;
@@ -678,10 +668,8 @@ static int gcm_setkey(struct crypto_aead *aead,
678 struct caam_ctx *ctx = crypto_aead_ctx(aead); 668 struct caam_ctx *ctx = crypto_aead_ctx(aead);
679 struct device *jrdev = ctx->jrdev; 669 struct device *jrdev = ctx->jrdev;
680 670
681#ifdef DEBUG 671 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
682 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 672 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
683 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
684#endif
685 673
686 memcpy(ctx->key, key, keylen); 674 memcpy(ctx->key, key, keylen);
687 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); 675 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
@@ -699,10 +687,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
699 if (keylen < 4) 687 if (keylen < 4)
700 return -EINVAL; 688 return -EINVAL;
701 689
702#ifdef DEBUG 690 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
703 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 691 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
704 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
705#endif
706 692
707 memcpy(ctx->key, key, keylen); 693 memcpy(ctx->key, key, keylen);
708 694
@@ -725,10 +711,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
725 if (keylen < 4) 711 if (keylen < 4)
726 return -EINVAL; 712 return -EINVAL;
727 713
728#ifdef DEBUG 714 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
729 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 715 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
730 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
731#endif
732 716
733 memcpy(ctx->key, key, keylen); 717 memcpy(ctx->key, key, keylen);
734 718
@@ -757,10 +741,8 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
757 OP_ALG_AAI_CTR_MOD128); 741 OP_ALG_AAI_CTR_MOD128);
758 const bool is_rfc3686 = alg->caam.rfc3686; 742 const bool is_rfc3686 = alg->caam.rfc3686;
759 743
760#ifdef DEBUG 744 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
761 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 745 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
762 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
763#endif
764 /* 746 /*
765 * AES-CTR needs to load IV in CONTEXT1 reg 747 * AES-CTR needs to load IV in CONTEXT1 reg
766 * at an offset of 128bits (16bytes) 748 * at an offset of 128bits (16bytes)
@@ -916,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
916 } 898 }
917 899
918 if (iv_dma) 900 if (iv_dma)
919 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 901 dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
920 if (sec4_sg_bytes) 902 if (sec4_sg_bytes)
921 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 903 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
922 DMA_TO_DEVICE); 904 DMA_TO_DEVICE);
@@ -949,9 +931,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
949 struct aead_request *req = context; 931 struct aead_request *req = context;
950 struct aead_edesc *edesc; 932 struct aead_edesc *edesc;
951 933
952#ifdef DEBUG 934 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
953 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
954#endif
955 935
956 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 936 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
957 937
@@ -971,9 +951,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
971 struct aead_request *req = context; 951 struct aead_request *req = context;
972 struct aead_edesc *edesc; 952 struct aead_edesc *edesc;
973 953
974#ifdef DEBUG 954 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
975 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
976#endif
977 955
978 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 956 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
979 957
@@ -1001,33 +979,32 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1001 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 979 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1002 int ivsize = crypto_skcipher_ivsize(skcipher); 980 int ivsize = crypto_skcipher_ivsize(skcipher);
1003 981
1004#ifdef DEBUG 982 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1005 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1006#endif
1007 983
1008 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); 984 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
1009 985
1010 if (err) 986 if (err)
1011 caam_jr_strstatus(jrdev, err); 987 caam_jr_strstatus(jrdev, err);
1012 988
1013#ifdef DEBUG
1014 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
1015 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1016 edesc->src_nents > 1 ? 100 : ivsize, 1);
1017#endif
1018 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1019 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1020 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1021
1022 skcipher_unmap(jrdev, edesc, req); 989 skcipher_unmap(jrdev, edesc, req);
1023 990
1024 /* 991 /*
1025 * The crypto API expects us to set the IV (req->iv) to the last 992 * The crypto API expects us to set the IV (req->iv) to the last
1026 * ciphertext block. This is used e.g. by the CTS mode. 993 * ciphertext block (CBC mode) or last counter (CTR mode).
994 * This is used e.g. by the CTS mode.
1027 */ 995 */
1028 if (ivsize) 996 if (ivsize) {
1029 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - 997 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1030 ivsize, ivsize, 0); 998 ivsize);
999
1000 print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
1001 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1002 edesc->src_nents > 1 ? 100 : ivsize, 1);
1003 }
1004
1005 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1006 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1007 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1031 1008
1032 kfree(edesc); 1009 kfree(edesc);
1033 1010
@@ -1039,26 +1016,35 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1039{ 1016{
1040 struct skcipher_request *req = context; 1017 struct skcipher_request *req = context;
1041 struct skcipher_edesc *edesc; 1018 struct skcipher_edesc *edesc;
1042#ifdef DEBUG
1043 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1019 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1044 int ivsize = crypto_skcipher_ivsize(skcipher); 1020 int ivsize = crypto_skcipher_ivsize(skcipher);
1045 1021
1046 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1022 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1047#endif
1048 1023
1049 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); 1024 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
1050 if (err) 1025 if (err)
1051 caam_jr_strstatus(jrdev, err); 1026 caam_jr_strstatus(jrdev, err);
1052 1027
1053#ifdef DEBUG 1028 skcipher_unmap(jrdev, edesc, req);
1054 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 1029
1055 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1030 /*
1056#endif 1031 * The crypto API expects us to set the IV (req->iv) to the last
1057 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 1032 * ciphertext block (CBC mode) or last counter (CTR mode).
1033 * This is used e.g. by the CTS mode.
1034 */
1035 if (ivsize) {
1036 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1037 ivsize);
1038
1039 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1040 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1041 ivsize, 1);
1042 }
1043
1044 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1058 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1045 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1059 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1046 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1060 1047
1061 skcipher_unmap(jrdev, edesc, req);
1062 kfree(edesc); 1048 kfree(edesc);
1063 1049
1064 skcipher_request_complete(req, err); 1050 skcipher_request_complete(req, err);
@@ -1106,6 +1092,7 @@ static void init_aead_job(struct aead_request *req,
1106 if (unlikely(req->src != req->dst)) { 1092 if (unlikely(req->src != req->dst)) {
1107 if (!edesc->mapped_dst_nents) { 1093 if (!edesc->mapped_dst_nents) {
1108 dst_dma = 0; 1094 dst_dma = 0;
1095 out_options = 0;
1109 } else if (edesc->mapped_dst_nents == 1) { 1096 } else if (edesc->mapped_dst_nents == 1) {
1110 dst_dma = sg_dma_address(req->dst); 1097 dst_dma = sg_dma_address(req->dst);
1111 out_options = 0; 1098 out_options = 0;
@@ -1249,6 +1236,7 @@ static void init_skcipher_job(struct skcipher_request *req,
1249{ 1236{
1250 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1237 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1251 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1238 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1239 struct device *jrdev = ctx->jrdev;
1252 int ivsize = crypto_skcipher_ivsize(skcipher); 1240 int ivsize = crypto_skcipher_ivsize(skcipher);
1253 u32 *desc = edesc->hw_desc; 1241 u32 *desc = edesc->hw_desc;
1254 u32 *sh_desc; 1242 u32 *sh_desc;
@@ -1256,13 +1244,12 @@ static void init_skcipher_job(struct skcipher_request *req,
1256 dma_addr_t src_dma, dst_dma, ptr; 1244 dma_addr_t src_dma, dst_dma, ptr;
1257 int len, sec4_sg_index = 0; 1245 int len, sec4_sg_index = 0;
1258 1246
1259#ifdef DEBUG 1247 print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
1260 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 1248 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1261 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1249 dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
1262 pr_err("asked=%d, cryptlen%d\n",
1263 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); 1250 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1264#endif 1251
1265 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", 1252 caam_dump_sg("src @" __stringify(__LINE__)": ",
1266 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1253 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1267 edesc->src_nents > 1 ? 100 : req->cryptlen, 1); 1254 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1268 1255
@@ -1285,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req,
1285 if (likely(req->src == req->dst)) { 1272 if (likely(req->src == req->dst)) {
1286 dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); 1273 dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
1287 out_options = in_options; 1274 out_options = in_options;
1288 } else if (edesc->mapped_dst_nents == 1) { 1275 } else if (!ivsize && edesc->mapped_dst_nents == 1) {
1289 dst_dma = sg_dma_address(req->dst); 1276 dst_dma = sg_dma_address(req->dst);
1290 } else { 1277 } else {
1291 dst_dma = edesc->sec4_sg_dma + sec4_sg_index * 1278 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
@@ -1293,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req,
1293 out_options = LDST_SGF; 1280 out_options = LDST_SGF;
1294 } 1281 }
1295 1282
1296 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 1283 append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
1297} 1284}
1298 1285
1299/* 1286/*
@@ -1309,37 +1296,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1309 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1296 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1310 GFP_KERNEL : GFP_ATOMIC; 1297 GFP_KERNEL : GFP_ATOMIC;
1311 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1298 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1299 int src_len, dst_len = 0;
1312 struct aead_edesc *edesc; 1300 struct aead_edesc *edesc;
1313 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; 1301 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1314 unsigned int authsize = ctx->authsize; 1302 unsigned int authsize = ctx->authsize;
1315 1303
1316 if (unlikely(req->dst != req->src)) { 1304 if (unlikely(req->dst != req->src)) {
1317 src_nents = sg_nents_for_len(req->src, req->assoclen + 1305 src_len = req->assoclen + req->cryptlen;
1318 req->cryptlen); 1306 dst_len = src_len + (encrypt ? authsize : (-authsize));
1307
1308 src_nents = sg_nents_for_len(req->src, src_len);
1319 if (unlikely(src_nents < 0)) { 1309 if (unlikely(src_nents < 0)) {
1320 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1310 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1321 req->assoclen + req->cryptlen); 1311 src_len);
1322 return ERR_PTR(src_nents); 1312 return ERR_PTR(src_nents);
1323 } 1313 }
1324 1314
1325 dst_nents = sg_nents_for_len(req->dst, req->assoclen + 1315 dst_nents = sg_nents_for_len(req->dst, dst_len);
1326 req->cryptlen +
1327 (encrypt ? authsize :
1328 (-authsize)));
1329 if (unlikely(dst_nents < 0)) { 1316 if (unlikely(dst_nents < 0)) {
1330 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", 1317 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1331 req->assoclen + req->cryptlen + 1318 dst_len);
1332 (encrypt ? authsize : (-authsize)));
1333 return ERR_PTR(dst_nents); 1319 return ERR_PTR(dst_nents);
1334 } 1320 }
1335 } else { 1321 } else {
1336 src_nents = sg_nents_for_len(req->src, req->assoclen + 1322 src_len = req->assoclen + req->cryptlen +
1337 req->cryptlen + 1323 (encrypt ? authsize : 0);
1338 (encrypt ? authsize : 0)); 1324
1325 src_nents = sg_nents_for_len(req->src, src_len);
1339 if (unlikely(src_nents < 0)) { 1326 if (unlikely(src_nents < 0)) {
1340 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1327 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1341 req->assoclen + req->cryptlen + 1328 src_len);
1342 (encrypt ? authsize : 0));
1343 return ERR_PTR(src_nents); 1329 return ERR_PTR(src_nents);
1344 } 1330 }
1345 } 1331 }
@@ -1380,8 +1366,16 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1380 } 1366 }
1381 } 1367 }
1382 1368
1369 /*
1370 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1371 * the end of the table by allocating more S/G entries.
1372 */
1383 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; 1373 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
1384 sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1374 if (mapped_dst_nents > 1)
1375 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
1376 else
1377 sec4_sg_len = pad_sg_nents(sec4_sg_len);
1378
1385 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1379 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1386 1380
1387 /* allocate space for base edesc and hw desc commands, link tables */ 1381 /* allocate space for base edesc and hw desc commands, link tables */
@@ -1403,12 +1397,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1403 1397
1404 sec4_sg_index = 0; 1398 sec4_sg_index = 0;
1405 if (mapped_src_nents > 1) { 1399 if (mapped_src_nents > 1) {
1406 sg_to_sec4_sg_last(req->src, mapped_src_nents, 1400 sg_to_sec4_sg_last(req->src, src_len,
1407 edesc->sec4_sg + sec4_sg_index, 0); 1401 edesc->sec4_sg + sec4_sg_index, 0);
1408 sec4_sg_index += mapped_src_nents; 1402 sec4_sg_index += mapped_src_nents;
1409 } 1403 }
1410 if (mapped_dst_nents > 1) { 1404 if (mapped_dst_nents > 1) {
1411 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1405 sg_to_sec4_sg_last(req->dst, dst_len,
1412 edesc->sec4_sg + sec4_sg_index, 0); 1406 edesc->sec4_sg + sec4_sg_index, 0);
1413 } 1407 }
1414 1408
@@ -1446,11 +1440,10 @@ static int gcm_encrypt(struct aead_request *req)
1446 1440
1447 /* Create and submit job descriptor */ 1441 /* Create and submit job descriptor */
1448 init_gcm_job(req, edesc, all_contig, true); 1442 init_gcm_job(req, edesc, all_contig, true);
1449#ifdef DEBUG 1443
1450 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1444 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1451 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1445 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1452 desc_bytes(edesc->hw_desc), 1); 1446 desc_bytes(edesc->hw_desc), 1);
1453#endif
1454 1447
1455 desc = edesc->hw_desc; 1448 desc = edesc->hw_desc;
1456 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1449 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
@@ -1556,11 +1549,10 @@ static int aead_encrypt(struct aead_request *req)
1556 1549
1557 /* Create and submit job descriptor */ 1550 /* Create and submit job descriptor */
1558 init_authenc_job(req, edesc, all_contig, true); 1551 init_authenc_job(req, edesc, all_contig, true);
1559#ifdef DEBUG 1552
1560 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1553 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1561 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1554 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1562 desc_bytes(edesc->hw_desc), 1); 1555 desc_bytes(edesc->hw_desc), 1);
1563#endif
1564 1556
1565 desc = edesc->hw_desc; 1557 desc = edesc->hw_desc;
1566 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1558 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
@@ -1591,11 +1583,10 @@ static int gcm_decrypt(struct aead_request *req)
1591 1583
1592 /* Create and submit job descriptor*/ 1584 /* Create and submit job descriptor*/
1593 init_gcm_job(req, edesc, all_contig, false); 1585 init_gcm_job(req, edesc, all_contig, false);
1594#ifdef DEBUG 1586
1595 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1587 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1596 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1588 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1597 desc_bytes(edesc->hw_desc), 1); 1589 desc_bytes(edesc->hw_desc), 1);
1598#endif
1599 1590
1600 desc = edesc->hw_desc; 1591 desc = edesc->hw_desc;
1601 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 1592 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
@@ -1627,7 +1618,7 @@ static int aead_decrypt(struct aead_request *req)
1627 u32 *desc; 1618 u32 *desc;
1628 int ret = 0; 1619 int ret = 0;
1629 1620
1630 caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ", 1621 caam_dump_sg("dec src@" __stringify(__LINE__)": ",
1631 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1622 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1632 req->assoclen + req->cryptlen, 1); 1623 req->assoclen + req->cryptlen, 1);
1633 1624
@@ -1639,11 +1630,10 @@ static int aead_decrypt(struct aead_request *req)
1639 1630
1640 /* Create and submit job descriptor*/ 1631 /* Create and submit job descriptor*/
1641 init_authenc_job(req, edesc, all_contig, false); 1632 init_authenc_job(req, edesc, all_contig, false);
1642#ifdef DEBUG 1633
1643 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1634 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1644 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1635 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1645 desc_bytes(edesc->hw_desc), 1); 1636 desc_bytes(edesc->hw_desc), 1);
1646#endif
1647 1637
1648 desc = edesc->hw_desc; 1638 desc = edesc->hw_desc;
1649 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 1639 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
@@ -1719,7 +1709,29 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1719 else 1709 else
1720 sec4_sg_ents = mapped_src_nents + !!ivsize; 1710 sec4_sg_ents = mapped_src_nents + !!ivsize;
1721 dst_sg_idx = sec4_sg_ents; 1711 dst_sg_idx = sec4_sg_ents;
1722 sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1712
1713 /*
1714 * Input, output HW S/G tables: [IV, src][dst, IV]
1715 * IV entries point to the same buffer
1716 * If src == dst, S/G entries are reused (S/G tables overlap)
1717 *
1718 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1719 * the end of the table by allocating more S/G entries. Logic:
1720 * if (output S/G)
1721 * pad output S/G, if needed
1722 * else if (input S/G) ...
1723 * pad input S/G, if needed
1724 */
1725 if (ivsize || mapped_dst_nents > 1) {
1726 if (req->src == req->dst)
1727 sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
1728 else
1729 sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
1730 !!ivsize);
1731 } else {
1732 sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
1733 }
1734
1723 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); 1735 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1724 1736
1725 /* 1737 /*
@@ -1744,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1744 1756
1745 /* Make sure IV is located in a DMAable area */ 1757 /* Make sure IV is located in a DMAable area */
1746 if (ivsize) { 1758 if (ivsize) {
1747 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; 1759 iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
1748 memcpy(iv, req->iv, ivsize); 1760 memcpy(iv, req->iv, ivsize);
1749 1761
1750 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); 1762 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
1751 if (dma_mapping_error(jrdev, iv_dma)) { 1763 if (dma_mapping_error(jrdev, iv_dma)) {
1752 dev_err(jrdev, "unable to map IV\n"); 1764 dev_err(jrdev, "unable to map IV\n");
1753 caam_unmap(jrdev, req->src, req->dst, src_nents, 1765 caam_unmap(jrdev, req->src, req->dst, src_nents,
@@ -1759,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1759 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1771 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1760 } 1772 }
1761 if (dst_sg_idx) 1773 if (dst_sg_idx)
1762 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1774 sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1763 !!ivsize, 0); 1775 !!ivsize, 0);
1764 1776
1765 if (mapped_dst_nents > 1) { 1777 if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
1766 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1778 sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1767 edesc->sec4_sg + dst_sg_idx, 0); 1779 dst_sg_idx, 0);
1768 } 1780
1781 if (ivsize)
1782 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1783 mapped_dst_nents, iv_dma, ivsize, 0);
1784
1785 if (ivsize || mapped_dst_nents > 1)
1786 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
1787 mapped_dst_nents);
1769 1788
1770 if (sec4_sg_bytes) { 1789 if (sec4_sg_bytes) {
1771 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1790 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1782,11 +1801,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1782 1801
1783 edesc->iv_dma = iv_dma; 1802 edesc->iv_dma = iv_dma;
1784 1803
1785#ifdef DEBUG 1804 print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
1786 print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ", 1805 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1787 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1806 sec4_sg_bytes, 1);
1788 sec4_sg_bytes, 1);
1789#endif
1790 1807
1791 return edesc; 1808 return edesc;
1792} 1809}
@@ -1807,11 +1824,11 @@ static int skcipher_encrypt(struct skcipher_request *req)
1807 1824
1808 /* Create and submit job descriptor*/ 1825 /* Create and submit job descriptor*/
1809 init_skcipher_job(req, edesc, true); 1826 init_skcipher_job(req, edesc, true);
1810#ifdef DEBUG 1827
1811 print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", 1828 print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1812 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1829 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1813 desc_bytes(edesc->hw_desc), 1); 1830 desc_bytes(edesc->hw_desc), 1);
1814#endif 1831
1815 desc = edesc->hw_desc; 1832 desc = edesc->hw_desc;
1816 ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); 1833 ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
1817 1834
@@ -1830,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
1830 struct skcipher_edesc *edesc; 1847 struct skcipher_edesc *edesc;
1831 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1848 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1832 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1849 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1833 int ivsize = crypto_skcipher_ivsize(skcipher);
1834 struct device *jrdev = ctx->jrdev; 1850 struct device *jrdev = ctx->jrdev;
1835 u32 *desc; 1851 u32 *desc;
1836 int ret = 0; 1852 int ret = 0;
@@ -1840,22 +1856,13 @@ static int skcipher_decrypt(struct skcipher_request *req)
1840 if (IS_ERR(edesc)) 1856 if (IS_ERR(edesc))
1841 return PTR_ERR(edesc); 1857 return PTR_ERR(edesc);
1842 1858
1843 /*
1844 * The crypto API expects us to set the IV (req->iv) to the last
1845 * ciphertext block.
1846 */
1847 if (ivsize)
1848 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
1849 ivsize, ivsize, 0);
1850
1851 /* Create and submit job descriptor*/ 1859 /* Create and submit job descriptor*/
1852 init_skcipher_job(req, edesc, false); 1860 init_skcipher_job(req, edesc, false);
1853 desc = edesc->hw_desc; 1861 desc = edesc->hw_desc;
1854#ifdef DEBUG 1862
1855 print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", 1863 print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1856 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1864 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1857 desc_bytes(edesc->hw_desc), 1); 1865 desc_bytes(edesc->hw_desc), 1);
1858#endif
1859 1866
1860 ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); 1867 ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
1861 if (!ret) { 1868 if (!ret) {
@@ -3444,7 +3451,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
3444 caam_exit_common(crypto_aead_ctx(tfm)); 3451 caam_exit_common(crypto_aead_ctx(tfm));
3445} 3452}
3446 3453
3447static void __exit caam_algapi_exit(void) 3454void caam_algapi_exit(void)
3448{ 3455{
3449 int i; 3456 int i;
3450 3457
@@ -3489,43 +3496,15 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3489 alg->exit = caam_aead_exit; 3496 alg->exit = caam_aead_exit;
3490} 3497}
3491 3498
3492static int __init caam_algapi_init(void) 3499int caam_algapi_init(struct device *ctrldev)
3493{ 3500{
3494 struct device_node *dev_node; 3501 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
3495 struct platform_device *pdev;
3496 struct caam_drv_private *priv;
3497 int i = 0, err = 0; 3502 int i = 0, err = 0;
3498 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; 3503 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
3499 u32 arc4_inst; 3504 u32 arc4_inst;
3500 unsigned int md_limit = SHA512_DIGEST_SIZE; 3505 unsigned int md_limit = SHA512_DIGEST_SIZE;
3501 bool registered = false, gcm_support; 3506 bool registered = false, gcm_support;
3502 3507
3503 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3504 if (!dev_node) {
3505 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3506 if (!dev_node)
3507 return -ENODEV;
3508 }
3509
3510 pdev = of_find_device_by_node(dev_node);
3511 if (!pdev) {
3512 of_node_put(dev_node);
3513 return -ENODEV;
3514 }
3515
3516 priv = dev_get_drvdata(&pdev->dev);
3517 of_node_put(dev_node);
3518
3519 /*
3520 * If priv is NULL, it's probably because the caam driver wasn't
3521 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3522 */
3523 if (!priv) {
3524 err = -ENODEV;
3525 goto out_put_dev;
3526 }
3527
3528
3529 /* 3508 /*
3530 * Register crypto algorithms the device supports. 3509 * Register crypto algorithms the device supports.
3531 * First, detect presence and attributes of DES, AES, and MD blocks. 3510 * First, detect presence and attributes of DES, AES, and MD blocks.
@@ -3668,14 +3647,5 @@ static int __init caam_algapi_init(void)
3668 if (registered) 3647 if (registered)
3669 pr_info("caam algorithms registered in /proc/crypto\n"); 3648 pr_info("caam algorithms registered in /proc/crypto\n");
3670 3649
3671out_put_dev:
3672 put_device(&pdev->dev);
3673 return err; 3650 return err;
3674} 3651}
3675
3676module_init(caam_algapi_init);
3677module_exit(caam_algapi_exit);
3678
3679MODULE_LICENSE("GPL");
3680MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3681MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 1e1a376edc2f..72531837571e 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type)
33 } 33 }
34 34
35 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); 35 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
36 append_operation(desc, type | OP_ALG_AS_INITFINAL | 36 append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT);
37 OP_ALG_DECRYPT);
38 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); 37 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
39 set_jump_tgt_here(desc, jump_cmd); 38 set_jump_tgt_here(desc, jump_cmd);
40 append_operation(desc, type | OP_ALG_AS_INITFINAL | 39 append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT |
41 OP_ALG_DECRYPT | OP_ALG_AAI_DK); 40 OP_ALG_AAI_DK);
42 set_jump_tgt_here(desc, uncond_jump_cmd); 41 set_jump_tgt_here(desc, uncond_jump_cmd);
43} 42}
44 43
@@ -115,11 +114,9 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
115 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | 114 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
116 LDST_SRCDST_BYTE_CONTEXT); 115 LDST_SRCDST_BYTE_CONTEXT);
117 116
118#ifdef DEBUG 117 print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ",
119 print_hex_dump(KERN_ERR, 118 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
120 "aead null enc shdesc@" __stringify(__LINE__)": ", 119 1);
121 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
122#endif
123} 120}
124EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); 121EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
125 122
@@ -204,11 +201,9 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
204 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | 201 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
205 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); 202 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
206 203
207#ifdef DEBUG 204 print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ",
208 print_hex_dump(KERN_ERR, 205 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
209 "aead null dec shdesc@" __stringify(__LINE__)": ", 206 1);
210 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
211#endif
212} 207}
213EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); 208EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
214 209
@@ -358,10 +353,9 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
358 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | 353 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
359 LDST_SRCDST_BYTE_CONTEXT); 354 LDST_SRCDST_BYTE_CONTEXT);
360 355
361#ifdef DEBUG 356 print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ",
362 print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ", 357 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
363 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 358 1);
364#endif
365} 359}
366EXPORT_SYMBOL(cnstr_shdsc_aead_encap); 360EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
367 361
@@ -475,10 +469,9 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
475 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | 469 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
476 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); 470 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
477 471
478#ifdef DEBUG 472 print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ",
479 print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ", 473 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
480 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 474 1);
481#endif
482} 475}
483EXPORT_SYMBOL(cnstr_shdsc_aead_decap); 476EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
484 477
@@ -613,11 +606,9 @@ copy_iv:
613 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | 606 append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
614 LDST_SRCDST_BYTE_CONTEXT); 607 LDST_SRCDST_BYTE_CONTEXT);
615 608
616#ifdef DEBUG 609 print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ",
617 print_hex_dump(KERN_ERR, 610 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
618 "aead givenc shdesc@" __stringify(__LINE__)": ", 611 1);
619 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
620#endif
621} 612}
622EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); 613EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
623 614
@@ -742,10 +733,9 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
742 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | 733 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
743 LDST_SRCDST_BYTE_CONTEXT); 734 LDST_SRCDST_BYTE_CONTEXT);
744 735
745#ifdef DEBUG 736 print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ",
746 print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ", 737 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
747 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 738 1);
748#endif
749} 739}
750EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); 740EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
751 741
@@ -838,10 +828,9 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
838 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | 828 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
839 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); 829 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
840 830
841#ifdef DEBUG 831 print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ",
842 print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ", 832 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
843 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 833 1);
844#endif
845} 834}
846EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); 835EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
847 836
@@ -933,11 +922,9 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
933 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | 922 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
934 LDST_SRCDST_BYTE_CONTEXT); 923 LDST_SRCDST_BYTE_CONTEXT);
935 924
936#ifdef DEBUG 925 print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ",
937 print_hex_dump(KERN_ERR, 926 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
938 "rfc4106 enc shdesc@" __stringify(__LINE__)": ", 927 1);
939 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
940#endif
941} 928}
942EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); 929EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
943 930
@@ -1030,11 +1017,9 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
1030 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | 1017 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
1031 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); 1018 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1032 1019
1033#ifdef DEBUG 1020 print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ",
1034 print_hex_dump(KERN_ERR, 1021 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1035 "rfc4106 dec shdesc@" __stringify(__LINE__)": ", 1022 1);
1036 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1037#endif
1038} 1023}
1039EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); 1024EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
1040 1025
@@ -1115,11 +1100,9 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
1115 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | 1100 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
1116 LDST_SRCDST_BYTE_CONTEXT); 1101 LDST_SRCDST_BYTE_CONTEXT);
1117 1102
1118#ifdef DEBUG 1103 print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ",
1119 print_hex_dump(KERN_ERR, 1104 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1120 "rfc4543 enc shdesc@" __stringify(__LINE__)": ", 1105 1);
1121 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1122#endif
1123} 1106}
1124EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); 1107EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
1125 1108
@@ -1205,11 +1188,9 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
1205 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | 1188 append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
1206 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); 1189 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1207 1190
1208#ifdef DEBUG 1191 print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ",
1209 print_hex_dump(KERN_ERR, 1192 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1210 "rfc4543 dec shdesc@" __stringify(__LINE__)": ", 1193 1);
1211 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1212#endif
1213} 1194}
1214EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); 1195EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
1215 1196
@@ -1410,17 +1391,21 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
1410 LDST_OFFSET_SHIFT)); 1391 LDST_OFFSET_SHIFT));
1411 1392
1412 /* Load operation */ 1393 /* Load operation */
1413 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 1394 append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
1414 OP_ALG_ENCRYPT); 1395 OP_ALG_ENCRYPT);
1415 1396
1416 /* Perform operation */ 1397 /* Perform operation */
1417 skcipher_append_src_dst(desc); 1398 skcipher_append_src_dst(desc);
1418 1399
1419#ifdef DEBUG 1400 /* Store IV */
1420 print_hex_dump(KERN_ERR, 1401 if (ivsize)
1421 "skcipher enc shdesc@" __stringify(__LINE__)": ", 1402 append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
1422 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1403 LDST_CLASS_1_CCB | (ctx1_iv_off <<
1423#endif 1404 LDST_OFFSET_SHIFT));
1405
1406 print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ",
1407 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1408 1);
1424} 1409}
1425EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); 1410EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
1426 1411
@@ -1479,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
1479 1464
1480 /* Choose operation */ 1465 /* Choose operation */
1481 if (ctx1_iv_off) 1466 if (ctx1_iv_off)
1482 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 1467 append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
1483 OP_ALG_DECRYPT); 1468 OP_ALG_DECRYPT);
1484 else 1469 else
1485 append_dec_op1(desc, cdata->algtype); 1470 append_dec_op1(desc, cdata->algtype);
@@ -1487,11 +1472,15 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
1487 /* Perform operation */ 1472 /* Perform operation */
1488 skcipher_append_src_dst(desc); 1473 skcipher_append_src_dst(desc);
1489 1474
1490#ifdef DEBUG 1475 /* Store IV */
1491 print_hex_dump(KERN_ERR, 1476 if (ivsize)
1492 "skcipher dec shdesc@" __stringify(__LINE__)": ", 1477 append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
1493 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1478 LDST_CLASS_1_CCB | (ctx1_iv_off <<
1494#endif 1479 LDST_OFFSET_SHIFT));
1480
1481 print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ",
1482 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1483 1);
1495} 1484}
1496EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); 1485EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
1497 1486
@@ -1538,11 +1527,13 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
1538 /* Perform operation */ 1527 /* Perform operation */
1539 skcipher_append_src_dst(desc); 1528 skcipher_append_src_dst(desc);
1540 1529
1541#ifdef DEBUG 1530 /* Store upper 8B of IV */
1542 print_hex_dump(KERN_ERR, 1531 append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1543 "xts skcipher enc shdesc@" __stringify(__LINE__) ": ", 1532 (0x20 << LDST_OFFSET_SHIFT));
1544 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1533
1545#endif 1534 print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
1535 ": ", DUMP_PREFIX_ADDRESS, 16, 4,
1536 desc, desc_bytes(desc), 1);
1546} 1537}
1547EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); 1538EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
1548 1539
@@ -1588,11 +1579,13 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
1588 /* Perform operation */ 1579 /* Perform operation */
1589 skcipher_append_src_dst(desc); 1580 skcipher_append_src_dst(desc);
1590 1581
1591#ifdef DEBUG 1582 /* Store upper 8B of IV */
1592 print_hex_dump(KERN_ERR, 1583 append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1593 "xts skcipher dec shdesc@" __stringify(__LINE__) ": ", 1584 (0x20 << LDST_OFFSET_SHIFT));
1594 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1585
1595#endif 1586 print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
1587 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
1588 desc_bytes(desc), 1);
1596} 1589}
1597EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap); 1590EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
1598 1591
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index d5ca42ff961a..da4a4ee60c80 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -44,9 +44,9 @@
44 44
45#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ) 45#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
46#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \ 46#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
47 20 * CAAM_CMD_SZ) 47 21 * CAAM_CMD_SZ)
48#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ 48#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
49 15 * CAAM_CMD_SZ) 49 16 * CAAM_CMD_SZ)
50 50
51void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, 51void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
52 unsigned int icvsize, int era); 52 unsigned int icvsize, int era);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d290d6b41825..32f0f8a72067 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -4,7 +4,7 @@
4 * Based on caamalg.c 4 * Based on caamalg.c
5 * 5 *
6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
7 * Copyright 2016-2018 NXP 7 * Copyright 2016-2019 NXP
8 */ 8 */
9 9
10#include "compat.h" 10#include "compat.h"
@@ -214,13 +214,11 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
214 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 214 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
215 goto badkey; 215 goto badkey;
216 216
217#ifdef DEBUG 217 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
218 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
219 keys.authkeylen + keys.enckeylen, keys.enckeylen, 218 keys.authkeylen + keys.enckeylen, keys.enckeylen,
220 keys.authkeylen); 219 keys.authkeylen);
221 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 220 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
222 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 221 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
223#endif
224 222
225 /* 223 /*
226 * If DKP is supported, use it in the shared descriptor to generate 224 * If DKP is supported, use it in the shared descriptor to generate
@@ -237,7 +235,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
237 memcpy(ctx->key, keys.authkey, keys.authkeylen); 235 memcpy(ctx->key, keys.authkey, keys.authkeylen);
238 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 236 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
239 keys.enckeylen); 237 keys.enckeylen);
240 dma_sync_single_for_device(jrdev, ctx->key_dma, 238 dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
241 ctx->adata.keylen_pad + 239 ctx->adata.keylen_pad +
242 keys.enckeylen, ctx->dir); 240 keys.enckeylen, ctx->dir);
243 goto skip_split_key; 241 goto skip_split_key;
@@ -251,8 +249,9 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
251 249
252 /* postpend encryption key to auth split key */ 250 /* postpend encryption key to auth split key */
253 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 251 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
254 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 252 dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
255 keys.enckeylen, ctx->dir); 253 ctx->adata.keylen_pad + keys.enckeylen,
254 ctx->dir);
256#ifdef DEBUG 255#ifdef DEBUG
257 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 256 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
258 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 257 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
@@ -386,13 +385,12 @@ static int gcm_setkey(struct crypto_aead *aead,
386 struct device *jrdev = ctx->jrdev; 385 struct device *jrdev = ctx->jrdev;
387 int ret; 386 int ret;
388 387
389#ifdef DEBUG 388 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
390 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 389 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
391 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
392#endif
393 390
394 memcpy(ctx->key, key, keylen); 391 memcpy(ctx->key, key, keylen);
395 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); 392 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
393 ctx->dir);
396 ctx->cdata.keylen = keylen; 394 ctx->cdata.keylen = keylen;
397 395
398 ret = gcm_set_sh_desc(aead); 396 ret = gcm_set_sh_desc(aead);
@@ -485,10 +483,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
485 if (keylen < 4) 483 if (keylen < 4)
486 return -EINVAL; 484 return -EINVAL;
487 485
488#ifdef DEBUG 486 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
489 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 487 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
490 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
491#endif
492 488
493 memcpy(ctx->key, key, keylen); 489 memcpy(ctx->key, key, keylen);
494 /* 490 /*
@@ -496,8 +492,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
496 * in the nonce. Update the AES key length. 492 * in the nonce. Update the AES key length.
497 */ 493 */
498 ctx->cdata.keylen = keylen - 4; 494 ctx->cdata.keylen = keylen - 4;
499 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 495 dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
500 ctx->dir); 496 ctx->cdata.keylen, ctx->dir);
501 497
502 ret = rfc4106_set_sh_desc(aead); 498 ret = rfc4106_set_sh_desc(aead);
503 if (ret) 499 if (ret)
@@ -589,10 +585,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
589 if (keylen < 4) 585 if (keylen < 4)
590 return -EINVAL; 586 return -EINVAL;
591 587
592#ifdef DEBUG 588 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
593 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
594 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
595#endif
596 590
597 memcpy(ctx->key, key, keylen); 591 memcpy(ctx->key, key, keylen);
598 /* 592 /*
@@ -600,8 +594,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
600 * in the nonce. Update the AES key length. 594 * in the nonce. Update the AES key length.
601 */ 595 */
602 ctx->cdata.keylen = keylen - 4; 596 ctx->cdata.keylen = keylen - 4;
603 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 597 dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
604 ctx->dir); 598 ctx->cdata.keylen, ctx->dir);
605 599
606 ret = rfc4543_set_sh_desc(aead); 600 ret = rfc4543_set_sh_desc(aead);
607 if (ret) 601 if (ret)
@@ -644,10 +638,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
644 const bool is_rfc3686 = alg->caam.rfc3686; 638 const bool is_rfc3686 = alg->caam.rfc3686;
645 int ret = 0; 639 int ret = 0;
646 640
647#ifdef DEBUG 641 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
648 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 642 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
649 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 643
650#endif
651 /* 644 /*
652 * AES-CTR needs to load IV in CONTEXT1 reg 645 * AES-CTR needs to load IV in CONTEXT1 reg
653 * at an offset of 128bits (16bytes) 646 * at an offset of 128bits (16bytes)
@@ -838,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
838static void caam_unmap(struct device *dev, struct scatterlist *src, 831static void caam_unmap(struct device *dev, struct scatterlist *src,
839 struct scatterlist *dst, int src_nents, 832 struct scatterlist *dst, int src_nents,
840 int dst_nents, dma_addr_t iv_dma, int ivsize, 833 int dst_nents, dma_addr_t iv_dma, int ivsize,
841 dma_addr_t qm_sg_dma, int qm_sg_bytes) 834 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
835 int qm_sg_bytes)
842{ 836{
843 if (dst != src) { 837 if (dst != src) {
844 if (src_nents) 838 if (src_nents)
@@ -850,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
850 } 844 }
851 845
852 if (iv_dma) 846 if (iv_dma)
853 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 847 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
854 if (qm_sg_bytes) 848 if (qm_sg_bytes)
855 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 849 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
856} 850}
@@ -863,7 +857,8 @@ static void aead_unmap(struct device *dev,
863 int ivsize = crypto_aead_ivsize(aead); 857 int ivsize = crypto_aead_ivsize(aead);
864 858
865 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 859 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
866 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 860 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
861 edesc->qm_sg_bytes);
867 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 862 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
868} 863}
869 864
@@ -874,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
874 int ivsize = crypto_skcipher_ivsize(skcipher); 869 int ivsize = crypto_skcipher_ivsize(skcipher);
875 870
876 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 871 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
877 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 872 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
873 edesc->qm_sg_bytes);
878} 874}
879 875
880static void aead_done(struct caam_drv_req *drv_req, u32 status) 876static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -924,6 +920,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
924 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 920 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
925 GFP_KERNEL : GFP_ATOMIC; 921 GFP_KERNEL : GFP_ATOMIC;
926 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 922 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
923 int src_len, dst_len = 0;
927 struct aead_edesc *edesc; 924 struct aead_edesc *edesc;
928 dma_addr_t qm_sg_dma, iv_dma = 0; 925 dma_addr_t qm_sg_dma, iv_dma = 0;
929 int ivsize = 0; 926 int ivsize = 0;
@@ -945,13 +942,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
945 } 942 }
946 943
947 if (likely(req->src == req->dst)) { 944 if (likely(req->src == req->dst)) {
948 src_nents = sg_nents_for_len(req->src, req->assoclen + 945 src_len = req->assoclen + req->cryptlen +
949 req->cryptlen + 946 (encrypt ? authsize : 0);
950 (encrypt ? authsize : 0)); 947
948 src_nents = sg_nents_for_len(req->src, src_len);
951 if (unlikely(src_nents < 0)) { 949 if (unlikely(src_nents < 0)) {
952 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 950 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
953 req->assoclen + req->cryptlen + 951 src_len);
954 (encrypt ? authsize : 0));
955 qi_cache_free(edesc); 952 qi_cache_free(edesc);
956 return ERR_PTR(src_nents); 953 return ERR_PTR(src_nents);
957 } 954 }
@@ -964,23 +961,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
964 return ERR_PTR(-ENOMEM); 961 return ERR_PTR(-ENOMEM);
965 } 962 }
966 } else { 963 } else {
967 src_nents = sg_nents_for_len(req->src, req->assoclen + 964 src_len = req->assoclen + req->cryptlen;
968 req->cryptlen); 965 dst_len = src_len + (encrypt ? authsize : (-authsize));
966
967 src_nents = sg_nents_for_len(req->src, src_len);
969 if (unlikely(src_nents < 0)) { 968 if (unlikely(src_nents < 0)) {
970 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 969 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
971 req->assoclen + req->cryptlen); 970 src_len);
972 qi_cache_free(edesc); 971 qi_cache_free(edesc);
973 return ERR_PTR(src_nents); 972 return ERR_PTR(src_nents);
974 } 973 }
975 974
976 dst_nents = sg_nents_for_len(req->dst, req->assoclen + 975 dst_nents = sg_nents_for_len(req->dst, dst_len);
977 req->cryptlen +
978 (encrypt ? authsize :
979 (-authsize)));
980 if (unlikely(dst_nents < 0)) { 976 if (unlikely(dst_nents < 0)) {
981 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 977 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
982 req->assoclen + req->cryptlen + 978 dst_len);
983 (encrypt ? authsize : (-authsize)));
984 qi_cache_free(edesc); 979 qi_cache_free(edesc);
985 return ERR_PTR(dst_nents); 980 return ERR_PTR(dst_nents);
986 } 981 }
@@ -1019,9 +1014,24 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1019 /* 1014 /*
1020 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1015 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1021 * Input is not contiguous. 1016 * Input is not contiguous.
1017 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1018 * the end of the table by allocating more S/G entries. Logic:
1019 * if (src != dst && output S/G)
1020 * pad output S/G, if needed
1021 * else if (src == dst && S/G)
1022 * overlapping S/Gs; pad one of them
1023 * else if (input S/G) ...
1024 * pad input S/G, if needed
1022 */ 1025 */
1023 qm_sg_ents = 1 + !!ivsize + mapped_src_nents + 1026 qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
1024 (mapped_dst_nents > 1 ? mapped_dst_nents : 0); 1027 if (mapped_dst_nents > 1)
1028 qm_sg_ents += pad_sg_nents(mapped_dst_nents);
1029 else if ((req->src == req->dst) && (mapped_src_nents > 1))
1030 qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
1031 1 + !!ivsize + pad_sg_nents(mapped_src_nents));
1032 else
1033 qm_sg_ents = pad_sg_nents(qm_sg_ents);
1034
1025 sg_table = &edesc->sgt[0]; 1035 sg_table = &edesc->sgt[0];
1026 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1036 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1027 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 1037 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -1029,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1029 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1039 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1030 qm_sg_ents, ivsize); 1040 qm_sg_ents, ivsize);
1031 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1041 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1032 0, 0, 0); 1042 0, DMA_NONE, 0, 0);
1033 qi_cache_free(edesc); 1043 qi_cache_free(edesc);
1034 return ERR_PTR(-ENOMEM); 1044 return ERR_PTR(-ENOMEM);
1035 } 1045 }
@@ -1044,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1044 if (dma_mapping_error(qidev, iv_dma)) { 1054 if (dma_mapping_error(qidev, iv_dma)) {
1045 dev_err(qidev, "unable to map IV\n"); 1055 dev_err(qidev, "unable to map IV\n");
1046 caam_unmap(qidev, req->src, req->dst, src_nents, 1056 caam_unmap(qidev, req->src, req->dst, src_nents,
1047 dst_nents, 0, 0, 0, 0); 1057 dst_nents, 0, 0, DMA_NONE, 0, 0);
1048 qi_cache_free(edesc); 1058 qi_cache_free(edesc);
1049 return ERR_PTR(-ENOMEM); 1059 return ERR_PTR(-ENOMEM);
1050 } 1060 }
@@ -1063,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1063 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1073 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1064 dev_err(qidev, "unable to map assoclen\n"); 1074 dev_err(qidev, "unable to map assoclen\n");
1065 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1075 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1066 iv_dma, ivsize, 0, 0); 1076 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
1067 qi_cache_free(edesc); 1077 qi_cache_free(edesc);
1068 return ERR_PTR(-ENOMEM); 1078 return ERR_PTR(-ENOMEM);
1069 } 1079 }
@@ -1074,19 +1084,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1074 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1084 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1075 qm_sg_index++; 1085 qm_sg_index++;
1076 } 1086 }
1077 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 1087 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
1078 qm_sg_index += mapped_src_nents; 1088 qm_sg_index += mapped_src_nents;
1079 1089
1080 if (mapped_dst_nents > 1) 1090 if (mapped_dst_nents > 1)
1081 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1091 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
1082 qm_sg_index, 0);
1083 1092
1084 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1093 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1085 if (dma_mapping_error(qidev, qm_sg_dma)) { 1094 if (dma_mapping_error(qidev, qm_sg_dma)) {
1086 dev_err(qidev, "unable to map S/G table\n"); 1095 dev_err(qidev, "unable to map S/G table\n");
1087 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1096 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1088 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1097 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1089 iv_dma, ivsize, 0, 0); 1098 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
1090 qi_cache_free(edesc); 1099 qi_cache_free(edesc);
1091 return ERR_PTR(-ENOMEM); 1100 return ERR_PTR(-ENOMEM);
1092 } 1101 }
@@ -1109,7 +1118,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1109 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1118 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1110 (1 + !!ivsize) * sizeof(*sg_table), 1119 (1 + !!ivsize) * sizeof(*sg_table),
1111 out_len, 0); 1120 out_len, 0);
1112 } else if (mapped_dst_nents == 1) { 1121 } else if (mapped_dst_nents <= 1) {
1113 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1122 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1114 0); 1123 0);
1115 } else { 1124 } else {
@@ -1182,33 +1191,28 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
1182 struct device *qidev = caam_ctx->qidev; 1191 struct device *qidev = caam_ctx->qidev;
1183 int ivsize = crypto_skcipher_ivsize(skcipher); 1192 int ivsize = crypto_skcipher_ivsize(skcipher);
1184 1193
1185#ifdef DEBUG 1194 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1186 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1187#endif
1188 1195
1189 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1196 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1190 1197
1191 if (status) 1198 if (status)
1192 caam_jr_strstatus(qidev, status); 1199 caam_jr_strstatus(qidev, status);
1193 1200
1194#ifdef DEBUG 1201 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1195 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", 1202 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1196 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1203 edesc->src_nents > 1 ? 100 : ivsize, 1);
1197 edesc->src_nents > 1 ? 100 : ivsize, 1); 1204 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1198 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1199 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1205 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1200 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1206 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1201#endif
1202 1207
1203 skcipher_unmap(qidev, edesc, req); 1208 skcipher_unmap(qidev, edesc, req);
1204 1209
1205 /* 1210 /*
1206 * The crypto API expects us to set the IV (req->iv) to the last 1211 * The crypto API expects us to set the IV (req->iv) to the last
1207 * ciphertext block. This is used e.g. by the CTS mode. 1212 * ciphertext block (CBC mode) or last counter (CTR mode).
1213 * This is used e.g. by the CTS mode.
1208 */ 1214 */
1209 if (edesc->drv_req.drv_ctx->op_type == ENCRYPT) 1215 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
1210 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
1211 ivsize, ivsize, 0);
1212 1216
1213 qi_cache_free(edesc); 1217 qi_cache_free(edesc);
1214 skcipher_request_complete(req, status); 1218 skcipher_request_complete(req, status);
@@ -1276,14 +1280,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1276 qm_sg_ents = 1 + mapped_src_nents; 1280 qm_sg_ents = 1 + mapped_src_nents;
1277 dst_sg_idx = qm_sg_ents; 1281 dst_sg_idx = qm_sg_ents;
1278 1282
1279 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1283 /*
1284 * Input, output HW S/G tables: [IV, src][dst, IV]
1285 * IV entries point to the same buffer
1286 * If src == dst, S/G entries are reused (S/G tables overlap)
1287 *
1288 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1289 * the end of the table by allocating more S/G entries.
1290 */
1291 if (req->src != req->dst)
1292 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1293 else
1294 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1295
1280 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1296 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1281 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1297 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1282 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1298 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1283 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1299 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1284 qm_sg_ents, ivsize); 1300 qm_sg_ents, ivsize);
1285 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1301 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1286 0, 0, 0); 1302 0, DMA_NONE, 0, 0);
1287 return ERR_PTR(-ENOMEM); 1303 return ERR_PTR(-ENOMEM);
1288 } 1304 }
1289 1305
@@ -1292,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1292 if (unlikely(!edesc)) { 1308 if (unlikely(!edesc)) {
1293 dev_err(qidev, "could not allocate extended descriptor\n"); 1309 dev_err(qidev, "could not allocate extended descriptor\n");
1294 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1310 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1295 0, 0, 0); 1311 0, DMA_NONE, 0, 0);
1296 return ERR_PTR(-ENOMEM); 1312 return ERR_PTR(-ENOMEM);
1297 } 1313 }
1298 1314
@@ -1301,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1301 iv = (u8 *)(sg_table + qm_sg_ents); 1317 iv = (u8 *)(sg_table + qm_sg_ents);
1302 memcpy(iv, req->iv, ivsize); 1318 memcpy(iv, req->iv, ivsize);
1303 1319
1304 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1320 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
1305 if (dma_mapping_error(qidev, iv_dma)) { 1321 if (dma_mapping_error(qidev, iv_dma)) {
1306 dev_err(qidev, "unable to map IV\n"); 1322 dev_err(qidev, "unable to map IV\n");
1307 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1323 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1308 0, 0, 0); 1324 0, DMA_NONE, 0, 0);
1309 qi_cache_free(edesc); 1325 qi_cache_free(edesc);
1310 return ERR_PTR(-ENOMEM); 1326 return ERR_PTR(-ENOMEM);
1311 } 1327 }
@@ -1319,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1319 edesc->drv_req.drv_ctx = drv_ctx; 1335 edesc->drv_req.drv_ctx = drv_ctx;
1320 1336
1321 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1337 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1322 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 1338 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1323 1339
1324 if (mapped_dst_nents > 1) 1340 if (req->src != req->dst)
1325 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1341 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1326 dst_sg_idx, 0); 1342
1343 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1344 ivsize, 0);
1327 1345
1328 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1346 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1329 DMA_TO_DEVICE); 1347 DMA_TO_DEVICE);
1330 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1348 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1331 dev_err(qidev, "unable to map S/G table\n"); 1349 dev_err(qidev, "unable to map S/G table\n");
1332 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1350 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1333 iv_dma, ivsize, 0, 0); 1351 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1334 qi_cache_free(edesc); 1352 qi_cache_free(edesc);
1335 return ERR_PTR(-ENOMEM); 1353 return ERR_PTR(-ENOMEM);
1336 } 1354 }
@@ -1340,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1340 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1358 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1341 ivsize + req->cryptlen, 0); 1359 ivsize + req->cryptlen, 0);
1342 1360
1343 if (req->src == req->dst) { 1361 if (req->src == req->dst)
1344 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1362 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1345 sizeof(*sg_table), req->cryptlen, 0); 1363 sizeof(*sg_table), req->cryptlen + ivsize,
1346 } else if (mapped_dst_nents > 1) { 1364 0);
1365 else
1347 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1366 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1348 sizeof(*sg_table), req->cryptlen, 0); 1367 sizeof(*sg_table), req->cryptlen + ivsize,
1349 } else { 1368 0);
1350 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1351 req->cryptlen, 0);
1352 }
1353 1369
1354 return edesc; 1370 return edesc;
1355} 1371}
@@ -1359,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1359 struct skcipher_edesc *edesc; 1375 struct skcipher_edesc *edesc;
1360 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1376 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1361 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1377 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1362 int ivsize = crypto_skcipher_ivsize(skcipher);
1363 int ret; 1378 int ret;
1364 1379
1365 if (unlikely(caam_congested)) 1380 if (unlikely(caam_congested))
@@ -1370,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1370 if (IS_ERR(edesc)) 1385 if (IS_ERR(edesc))
1371 return PTR_ERR(edesc); 1386 return PTR_ERR(edesc);
1372 1387
1373 /*
1374 * The crypto API expects us to set the IV (req->iv) to the last
1375 * ciphertext block.
1376 */
1377 if (!encrypt)
1378 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
1379 ivsize, ivsize, 0);
1380
1381 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1388 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1382 if (!ret) { 1389 if (!ret) {
1383 ret = -EINPROGRESS; 1390 ret = -EINPROGRESS;
@@ -2382,6 +2389,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2382 bool uses_dkp) 2389 bool uses_dkp)
2383{ 2390{
2384 struct caam_drv_private *priv; 2391 struct caam_drv_private *priv;
2392 struct device *dev;
2385 2393
2386 /* 2394 /*
2387 * distribute tfms across job rings to ensure in-order 2395 * distribute tfms across job rings to ensure in-order
@@ -2393,16 +2401,17 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2393 return PTR_ERR(ctx->jrdev); 2401 return PTR_ERR(ctx->jrdev);
2394 } 2402 }
2395 2403
2396 priv = dev_get_drvdata(ctx->jrdev->parent); 2404 dev = ctx->jrdev->parent;
2405 priv = dev_get_drvdata(dev);
2397 if (priv->era >= 6 && uses_dkp) 2406 if (priv->era >= 6 && uses_dkp)
2398 ctx->dir = DMA_BIDIRECTIONAL; 2407 ctx->dir = DMA_BIDIRECTIONAL;
2399 else 2408 else
2400 ctx->dir = DMA_TO_DEVICE; 2409 ctx->dir = DMA_TO_DEVICE;
2401 2410
2402 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), 2411 ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
2403 ctx->dir); 2412 ctx->dir);
2404 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 2413 if (dma_mapping_error(dev, ctx->key_dma)) {
2405 dev_err(ctx->jrdev, "unable to map key\n"); 2414 dev_err(dev, "unable to map key\n");
2406 caam_jr_free(ctx->jrdev); 2415 caam_jr_free(ctx->jrdev);
2407 return -ENOMEM; 2416 return -ENOMEM;
2408 } 2417 }
@@ -2411,7 +2420,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2411 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2420 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2412 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2421 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2413 2422
2414 ctx->qidev = priv->qidev; 2423 ctx->qidev = dev;
2415 2424
2416 spin_lock_init(&ctx->lock); 2425 spin_lock_init(&ctx->lock);
2417 ctx->drv_ctx[ENCRYPT] = NULL; 2426 ctx->drv_ctx[ENCRYPT] = NULL;
@@ -2445,7 +2454,8 @@ static void caam_exit_common(struct caam_ctx *ctx)
2445 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2454 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2446 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2455 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2447 2456
2448 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); 2457 dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
2458 ctx->dir);
2449 2459
2450 caam_jr_free(ctx->jrdev); 2460 caam_jr_free(ctx->jrdev);
2451} 2461}
@@ -2460,7 +2470,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
2460 caam_exit_common(crypto_aead_ctx(tfm)); 2470 caam_exit_common(crypto_aead_ctx(tfm));
2461} 2471}
2462 2472
2463static void __exit caam_qi_algapi_exit(void) 2473void caam_qi_algapi_exit(void)
2464{ 2474{
2465 int i; 2475 int i;
2466 2476
@@ -2505,45 +2515,17 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2505 alg->exit = caam_aead_exit; 2515 alg->exit = caam_aead_exit;
2506} 2516}
2507 2517
2508static int __init caam_qi_algapi_init(void) 2518int caam_qi_algapi_init(struct device *ctrldev)
2509{ 2519{
2510 struct device_node *dev_node; 2520 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
2511 struct platform_device *pdev;
2512 struct device *ctrldev;
2513 struct caam_drv_private *priv;
2514 int i = 0, err = 0; 2521 int i = 0, err = 0;
2515 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2522 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
2516 unsigned int md_limit = SHA512_DIGEST_SIZE; 2523 unsigned int md_limit = SHA512_DIGEST_SIZE;
2517 bool registered = false; 2524 bool registered = false;
2518 2525
2519 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2520 if (!dev_node) {
2521 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2522 if (!dev_node)
2523 return -ENODEV;
2524 }
2525
2526 pdev = of_find_device_by_node(dev_node);
2527 of_node_put(dev_node);
2528 if (!pdev)
2529 return -ENODEV;
2530
2531 ctrldev = &pdev->dev;
2532 priv = dev_get_drvdata(ctrldev);
2533
2534 /*
2535 * If priv is NULL, it's probably because the caam driver wasn't
2536 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2537 */
2538 if (!priv || !priv->qi_present) {
2539 err = -ENODEV;
2540 goto out_put_dev;
2541 }
2542
2543 if (caam_dpaa2) { 2526 if (caam_dpaa2) {
2544 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); 2527 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2545 err = -ENODEV; 2528 return -ENODEV;
2546 goto out_put_dev;
2547 } 2529 }
2548 2530
2549 /* 2531 /*
@@ -2598,7 +2580,7 @@ static int __init caam_qi_algapi_init(void)
2598 2580
2599 err = crypto_register_skcipher(&t_alg->skcipher); 2581 err = crypto_register_skcipher(&t_alg->skcipher);
2600 if (err) { 2582 if (err) {
2601 dev_warn(priv->qidev, "%s alg registration failed\n", 2583 dev_warn(ctrldev, "%s alg registration failed\n",
2602 t_alg->skcipher.base.cra_driver_name); 2584 t_alg->skcipher.base.cra_driver_name);
2603 continue; 2585 continue;
2604 } 2586 }
@@ -2654,16 +2636,7 @@ static int __init caam_qi_algapi_init(void)
2654 } 2636 }
2655 2637
2656 if (registered) 2638 if (registered)
2657 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); 2639 dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
2658 2640
2659out_put_dev:
2660 put_device(ctrldev);
2661 return err; 2641 return err;
2662} 2642}
2663
2664module_init(caam_qi_algapi_init);
2665module_exit(caam_qi_algapi_exit);
2666
2667MODULE_LICENSE("GPL");
2668MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2669MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 2b2980a8a9b9..06bf32c32cbd 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1,7 +1,7 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/* 2/*
3 * Copyright 2015-2016 Freescale Semiconductor Inc. 3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP 4 * Copyright 2017-2019 NXP
5 */ 5 */
6 6
7#include "compat.h" 7#include "compat.h"
@@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
140static void caam_unmap(struct device *dev, struct scatterlist *src, 140static void caam_unmap(struct device *dev, struct scatterlist *src,
141 struct scatterlist *dst, int src_nents, 141 struct scatterlist *dst, int src_nents,
142 int dst_nents, dma_addr_t iv_dma, int ivsize, 142 int dst_nents, dma_addr_t iv_dma, int ivsize,
143 dma_addr_t qm_sg_dma, int qm_sg_bytes) 143 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
144 int qm_sg_bytes)
144{ 145{
145 if (dst != src) { 146 if (dst != src) {
146 if (src_nents) 147 if (src_nents)
@@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
152 } 153 }
153 154
154 if (iv_dma) 155 if (iv_dma)
155 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 156 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
156 157
157 if (qm_sg_bytes) 158 if (qm_sg_bytes)
158 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 159 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
@@ -371,6 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
371 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 372 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
372 GFP_KERNEL : GFP_ATOMIC; 373 GFP_KERNEL : GFP_ATOMIC;
373 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 374 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
375 int src_len, dst_len = 0;
374 struct aead_edesc *edesc; 376 struct aead_edesc *edesc;
375 dma_addr_t qm_sg_dma, iv_dma = 0; 377 dma_addr_t qm_sg_dma, iv_dma = 0;
376 int ivsize = 0; 378 int ivsize = 0;
@@ -387,23 +389,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
387 } 389 }
388 390
389 if (unlikely(req->dst != req->src)) { 391 if (unlikely(req->dst != req->src)) {
390 src_nents = sg_nents_for_len(req->src, req->assoclen + 392 src_len = req->assoclen + req->cryptlen;
391 req->cryptlen); 393 dst_len = src_len + (encrypt ? authsize : (-authsize));
394
395 src_nents = sg_nents_for_len(req->src, src_len);
392 if (unlikely(src_nents < 0)) { 396 if (unlikely(src_nents < 0)) {
393 dev_err(dev, "Insufficient bytes (%d) in src S/G\n", 397 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
394 req->assoclen + req->cryptlen); 398 src_len);
395 qi_cache_free(edesc); 399 qi_cache_free(edesc);
396 return ERR_PTR(src_nents); 400 return ERR_PTR(src_nents);
397 } 401 }
398 402
399 dst_nents = sg_nents_for_len(req->dst, req->assoclen + 403 dst_nents = sg_nents_for_len(req->dst, dst_len);
400 req->cryptlen +
401 (encrypt ? authsize :
402 (-authsize)));
403 if (unlikely(dst_nents < 0)) { 404 if (unlikely(dst_nents < 0)) {
404 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", 405 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
405 req->assoclen + req->cryptlen + 406 dst_len);
406 (encrypt ? authsize : (-authsize)));
407 qi_cache_free(edesc); 407 qi_cache_free(edesc);
408 return ERR_PTR(dst_nents); 408 return ERR_PTR(dst_nents);
409 } 409 }
@@ -434,13 +434,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
434 mapped_dst_nents = 0; 434 mapped_dst_nents = 0;
435 } 435 }
436 } else { 436 } else {
437 src_nents = sg_nents_for_len(req->src, req->assoclen + 437 src_len = req->assoclen + req->cryptlen +
438 req->cryptlen + 438 (encrypt ? authsize : 0);
439 (encrypt ? authsize : 0)); 439
440 src_nents = sg_nents_for_len(req->src, src_len);
440 if (unlikely(src_nents < 0)) { 441 if (unlikely(src_nents < 0)) {
441 dev_err(dev, "Insufficient bytes (%d) in src S/G\n", 442 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
442 req->assoclen + req->cryptlen + 443 src_len);
443 (encrypt ? authsize : 0));
444 qi_cache_free(edesc); 444 qi_cache_free(edesc);
445 return ERR_PTR(src_nents); 445 return ERR_PTR(src_nents);
446 } 446 }
@@ -460,9 +460,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
460 /* 460 /*
461 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 461 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
462 * Input is not contiguous. 462 * Input is not contiguous.
463 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
464 * the end of the table by allocating more S/G entries. Logic:
465 * if (src != dst && output S/G)
466 * pad output S/G, if needed
467 * else if (src == dst && S/G)
468 * overlapping S/Gs; pad one of them
469 * else if (input S/G) ...
470 * pad input S/G, if needed
463 */ 471 */
464 qm_sg_nents = 1 + !!ivsize + mapped_src_nents + 472 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
465 (mapped_dst_nents > 1 ? mapped_dst_nents : 0); 473 if (mapped_dst_nents > 1)
474 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
475 else if ((req->src == req->dst) && (mapped_src_nents > 1))
476 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
477 1 + !!ivsize +
478 pad_sg_nents(mapped_src_nents));
479 else
480 qm_sg_nents = pad_sg_nents(qm_sg_nents);
481
466 sg_table = &edesc->sgt[0]; 482 sg_table = &edesc->sgt[0];
467 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); 483 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
468 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 484 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -470,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
470 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", 486 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
471 qm_sg_nents, ivsize); 487 qm_sg_nents, ivsize);
472 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 488 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
473 0, 0, 0); 489 0, DMA_NONE, 0, 0);
474 qi_cache_free(edesc); 490 qi_cache_free(edesc);
475 return ERR_PTR(-ENOMEM); 491 return ERR_PTR(-ENOMEM);
476 } 492 }
@@ -485,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
485 if (dma_mapping_error(dev, iv_dma)) { 501 if (dma_mapping_error(dev, iv_dma)) {
486 dev_err(dev, "unable to map IV\n"); 502 dev_err(dev, "unable to map IV\n");
487 caam_unmap(dev, req->src, req->dst, src_nents, 503 caam_unmap(dev, req->src, req->dst, src_nents,
488 dst_nents, 0, 0, 0, 0); 504 dst_nents, 0, 0, DMA_NONE, 0, 0);
489 qi_cache_free(edesc); 505 qi_cache_free(edesc);
490 return ERR_PTR(-ENOMEM); 506 return ERR_PTR(-ENOMEM);
491 } 507 }
@@ -509,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
509 if (dma_mapping_error(dev, edesc->assoclen_dma)) { 525 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
510 dev_err(dev, "unable to map assoclen\n"); 526 dev_err(dev, "unable to map assoclen\n");
511 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 527 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
512 iv_dma, ivsize, 0, 0); 528 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
513 qi_cache_free(edesc); 529 qi_cache_free(edesc);
514 return ERR_PTR(-ENOMEM); 530 return ERR_PTR(-ENOMEM);
515 } 531 }
@@ -520,19 +536,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
520 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 536 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
521 qm_sg_index++; 537 qm_sg_index++;
522 } 538 }
523 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 539 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
524 qm_sg_index += mapped_src_nents; 540 qm_sg_index += mapped_src_nents;
525 541
526 if (mapped_dst_nents > 1) 542 if (mapped_dst_nents > 1)
527 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 543 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
528 qm_sg_index, 0);
529 544
530 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 545 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
531 if (dma_mapping_error(dev, qm_sg_dma)) { 546 if (dma_mapping_error(dev, qm_sg_dma)) {
532 dev_err(dev, "unable to map S/G table\n"); 547 dev_err(dev, "unable to map S/G table\n");
533 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 548 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
534 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 549 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
535 iv_dma, ivsize, 0, 0); 550 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
536 qi_cache_free(edesc); 551 qi_cache_free(edesc);
537 return ERR_PTR(-ENOMEM); 552 return ERR_PTR(-ENOMEM);
538 } 553 }
@@ -559,6 +574,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
559 dpaa2_fl_set_addr(out_fle, qm_sg_dma + 574 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
560 (1 + !!ivsize) * sizeof(*sg_table)); 575 (1 + !!ivsize) * sizeof(*sg_table));
561 } 576 }
577 } else if (!mapped_dst_nents) {
578 /*
579 * crypto engine requires the output entry to be present when
580 * "frame list" FD is used.
581 * Since engine does not support FMT=2'b11 (unused entry type),
582 * leaving out_fle zeroized is the best option.
583 */
584 goto skip_out_fle;
562 } else if (mapped_dst_nents == 1) { 585 } else if (mapped_dst_nents == 1) {
563 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 586 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
564 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); 587 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
@@ -570,6 +593,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
570 593
571 dpaa2_fl_set_len(out_fle, out_len); 594 dpaa2_fl_set_len(out_fle, out_len);
572 595
596skip_out_fle:
573 return edesc; 597 return edesc;
574} 598}
575 599
@@ -1077,14 +1101,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1077 qm_sg_ents = 1 + mapped_src_nents; 1101 qm_sg_ents = 1 + mapped_src_nents;
1078 dst_sg_idx = qm_sg_ents; 1102 dst_sg_idx = qm_sg_ents;
1079 1103
1080 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1104 /*
1105 * Input, output HW S/G tables: [IV, src][dst, IV]
1106 * IV entries point to the same buffer
1107 * If src == dst, S/G entries are reused (S/G tables overlap)
1108 *
1109 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1110 * the end of the table by allocating more S/G entries.
1111 */
1112 if (req->src != req->dst)
1113 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1114 else
1115 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1116
1081 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); 1117 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1082 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1118 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1083 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1119 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1084 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", 1120 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1085 qm_sg_ents, ivsize); 1121 qm_sg_ents, ivsize);
1086 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 1122 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1087 0, 0, 0); 1123 0, DMA_NONE, 0, 0);
1088 return ERR_PTR(-ENOMEM); 1124 return ERR_PTR(-ENOMEM);
1089 } 1125 }
1090 1126
@@ -1093,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1093 if (unlikely(!edesc)) { 1129 if (unlikely(!edesc)) {
1094 dev_err(dev, "could not allocate extended descriptor\n"); 1130 dev_err(dev, "could not allocate extended descriptor\n");
1095 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 1131 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1096 0, 0, 0); 1132 0, DMA_NONE, 0, 0);
1097 return ERR_PTR(-ENOMEM); 1133 return ERR_PTR(-ENOMEM);
1098 } 1134 }
1099 1135
@@ -1102,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1102 iv = (u8 *)(sg_table + qm_sg_ents); 1138 iv = (u8 *)(sg_table + qm_sg_ents);
1103 memcpy(iv, req->iv, ivsize); 1139 memcpy(iv, req->iv, ivsize);
1104 1140
1105 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1141 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1106 if (dma_mapping_error(dev, iv_dma)) { 1142 if (dma_mapping_error(dev, iv_dma)) {
1107 dev_err(dev, "unable to map IV\n"); 1143 dev_err(dev, "unable to map IV\n");
1108 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 1144 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1109 0, 0, 0); 1145 0, DMA_NONE, 0, 0);
1110 qi_cache_free(edesc); 1146 qi_cache_free(edesc);
1111 return ERR_PTR(-ENOMEM); 1147 return ERR_PTR(-ENOMEM);
1112 } 1148 }
@@ -1117,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1117 edesc->qm_sg_bytes = qm_sg_bytes; 1153 edesc->qm_sg_bytes = qm_sg_bytes;
1118 1154
1119 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1155 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1120 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 1156 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1121 1157
1122 if (mapped_dst_nents > 1) 1158 if (req->src != req->dst)
1123 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1159 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1124 dst_sg_idx, 0); 1160
1161 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1162 ivsize, 0);
1125 1163
1126 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, 1164 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1127 DMA_TO_DEVICE); 1165 DMA_TO_DEVICE);
1128 if (dma_mapping_error(dev, edesc->qm_sg_dma)) { 1166 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1129 dev_err(dev, "unable to map S/G table\n"); 1167 dev_err(dev, "unable to map S/G table\n");
1130 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 1168 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1131 iv_dma, ivsize, 0, 0); 1169 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1132 qi_cache_free(edesc); 1170 qi_cache_free(edesc);
1133 return ERR_PTR(-ENOMEM); 1171 return ERR_PTR(-ENOMEM);
1134 } 1172 }
@@ -1136,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1136 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); 1174 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1137 dpaa2_fl_set_final(in_fle, true); 1175 dpaa2_fl_set_final(in_fle, true);
1138 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); 1176 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1139 dpaa2_fl_set_len(out_fle, req->cryptlen); 1177 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1140 1178
1141 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); 1179 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1142 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); 1180 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1143 1181
1144 if (req->src == req->dst) { 1182 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1145 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); 1183
1184 if (req->src == req->dst)
1146 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + 1185 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1147 sizeof(*sg_table)); 1186 sizeof(*sg_table));
1148 } else if (mapped_dst_nents > 1) { 1187 else
1149 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1150 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * 1188 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1151 sizeof(*sg_table)); 1189 sizeof(*sg_table));
1152 } else {
1153 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1154 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1155 }
1156 1190
1157 return edesc; 1191 return edesc;
1158} 1192}
@@ -1164,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1164 int ivsize = crypto_aead_ivsize(aead); 1198 int ivsize = crypto_aead_ivsize(aead);
1165 1199
1166 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 1200 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1167 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 1201 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1202 edesc->qm_sg_bytes);
1168 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1203 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1169} 1204}
1170 1205
@@ -1175,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1175 int ivsize = crypto_skcipher_ivsize(skcipher); 1210 int ivsize = crypto_skcipher_ivsize(skcipher);
1176 1211
1177 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 1212 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1178 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 1213 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1214 edesc->qm_sg_bytes);
1179} 1215}
1180 1216
1181static void aead_encrypt_done(void *cbk_ctx, u32 status) 1217static void aead_encrypt_done(void *cbk_ctx, u32 status)
@@ -1324,7 +1360,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1324 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1360 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1325 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1361 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1326 edesc->src_nents > 1 ? 100 : ivsize, 1); 1362 edesc->src_nents > 1 ? 100 : ivsize, 1);
1327 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", 1363 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1328 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1364 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1329 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1365 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1330 1366
@@ -1332,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1332 1368
1333 /* 1369 /*
1334 * The crypto API expects us to set the IV (req->iv) to the last 1370 * The crypto API expects us to set the IV (req->iv) to the last
1335 * ciphertext block. This is used e.g. by the CTS mode. 1371 * ciphertext block (CBC mode) or last counter (CTR mode).
1372 * This is used e.g. by the CTS mode.
1336 */ 1373 */
1337 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, 1374 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
1338 ivsize, 0);
1339 1375
1340 qi_cache_free(edesc); 1376 qi_cache_free(edesc);
1341 skcipher_request_complete(req, ecode); 1377 skcipher_request_complete(req, ecode);
@@ -1362,11 +1398,19 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1362 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1398 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1363 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1399 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1364 edesc->src_nents > 1 ? 100 : ivsize, 1); 1400 edesc->src_nents > 1 ? 100 : ivsize, 1);
1365 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", 1401 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1366 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1402 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1367 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1403 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1368 1404
1369 skcipher_unmap(ctx->dev, edesc, req); 1405 skcipher_unmap(ctx->dev, edesc, req);
1406
1407 /*
1408 * The crypto API expects us to set the IV (req->iv) to the last
1409 * ciphertext block (CBC mode) or last counter (CTR mode).
1410 * This is used e.g. by the CTS mode.
1411 */
1412 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
1413
1370 qi_cache_free(edesc); 1414 qi_cache_free(edesc);
1371 skcipher_request_complete(req, ecode); 1415 skcipher_request_complete(req, ecode);
1372} 1416}
@@ -1405,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
1405 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1449 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1406 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1450 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1407 struct caam_request *caam_req = skcipher_request_ctx(req); 1451 struct caam_request *caam_req = skcipher_request_ctx(req);
1408 int ivsize = crypto_skcipher_ivsize(skcipher);
1409 int ret; 1452 int ret;
1410 1453
1411 /* allocate extended descriptor */ 1454 /* allocate extended descriptor */
@@ -1413,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
1413 if (IS_ERR(edesc)) 1456 if (IS_ERR(edesc))
1414 return PTR_ERR(edesc); 1457 return PTR_ERR(edesc);
1415 1458
1416 /*
1417 * The crypto API expects us to set the IV (req->iv) to the last
1418 * ciphertext block.
1419 */
1420 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1421 ivsize, 0);
1422
1423 caam_req->flc = &ctx->flc[DECRYPT]; 1459 caam_req->flc = &ctx->flc[DECRYPT];
1424 caam_req->flc_dma = ctx->flc_dma[DECRYPT]; 1460 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1425 caam_req->cbk = skcipher_decrypt_done; 1461 caam_req->cbk = skcipher_decrypt_done;
@@ -3380,9 +3416,9 @@ static int ahash_update_ctx(struct ahash_request *req)
3380 3416
3381 if (to_hash) { 3417 if (to_hash) {
3382 struct dpaa2_sg_entry *sg_table; 3418 struct dpaa2_sg_entry *sg_table;
3419 int src_len = req->nbytes - *next_buflen;
3383 3420
3384 src_nents = sg_nents_for_len(req->src, 3421 src_nents = sg_nents_for_len(req->src, src_len);
3385 req->nbytes - (*next_buflen));
3386 if (src_nents < 0) { 3422 if (src_nents < 0) {
3387 dev_err(ctx->dev, "Invalid number of src SG.\n"); 3423 dev_err(ctx->dev, "Invalid number of src SG.\n");
3388 return src_nents; 3424 return src_nents;
@@ -3409,7 +3445,7 @@ static int ahash_update_ctx(struct ahash_request *req)
3409 3445
3410 edesc->src_nents = src_nents; 3446 edesc->src_nents = src_nents;
3411 qm_sg_src_index = 1 + (*buflen ? 1 : 0); 3447 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3412 qm_sg_bytes = (qm_sg_src_index + mapped_nents) * 3448 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3413 sizeof(*sg_table); 3449 sizeof(*sg_table);
3414 sg_table = &edesc->sgt[0]; 3450 sg_table = &edesc->sgt[0];
3415 3451
@@ -3423,7 +3459,7 @@ static int ahash_update_ctx(struct ahash_request *req)
3423 goto unmap_ctx; 3459 goto unmap_ctx;
3424 3460
3425 if (mapped_nents) { 3461 if (mapped_nents) {
3426 sg_to_qm_sg_last(req->src, mapped_nents, 3462 sg_to_qm_sg_last(req->src, src_len,
3427 sg_table + qm_sg_src_index, 0); 3463 sg_table + qm_sg_src_index, 0);
3428 if (*next_buflen) 3464 if (*next_buflen)
3429 scatterwalk_map_and_copy(next_buf, req->src, 3465 scatterwalk_map_and_copy(next_buf, req->src,
@@ -3494,7 +3530,7 @@ static int ahash_final_ctx(struct ahash_request *req)
3494 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 3530 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3495 GFP_KERNEL : GFP_ATOMIC; 3531 GFP_KERNEL : GFP_ATOMIC;
3496 int buflen = *current_buflen(state); 3532 int buflen = *current_buflen(state);
3497 int qm_sg_bytes, qm_sg_src_index; 3533 int qm_sg_bytes;
3498 int digestsize = crypto_ahash_digestsize(ahash); 3534 int digestsize = crypto_ahash_digestsize(ahash);
3499 struct ahash_edesc *edesc; 3535 struct ahash_edesc *edesc;
3500 struct dpaa2_sg_entry *sg_table; 3536 struct dpaa2_sg_entry *sg_table;
@@ -3505,8 +3541,7 @@ static int ahash_final_ctx(struct ahash_request *req)
3505 if (!edesc) 3541 if (!edesc)
3506 return -ENOMEM; 3542 return -ENOMEM;
3507 3543
3508 qm_sg_src_index = 1 + (buflen ? 1 : 0); 3544 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3509 qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3510 sg_table = &edesc->sgt[0]; 3545 sg_table = &edesc->sgt[0];
3511 3546
3512 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, 3547 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3518,7 +3553,7 @@ static int ahash_final_ctx(struct ahash_request *req)
3518 if (ret) 3553 if (ret)
3519 goto unmap_ctx; 3554 goto unmap_ctx;
3520 3555
3521 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); 3556 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3522 3557
3523 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, 3558 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3524 DMA_TO_DEVICE); 3559 DMA_TO_DEVICE);
@@ -3599,7 +3634,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
3599 3634
3600 edesc->src_nents = src_nents; 3635 edesc->src_nents = src_nents;
3601 qm_sg_src_index = 1 + (buflen ? 1 : 0); 3636 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3602 qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table); 3637 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3638 sizeof(*sg_table);
3603 sg_table = &edesc->sgt[0]; 3639 sg_table = &edesc->sgt[0];
3604 3640
3605 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, 3641 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3611,7 +3647,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
3611 if (ret) 3647 if (ret)
3612 goto unmap_ctx; 3648 goto unmap_ctx;
3613 3649
3614 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); 3650 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3615 3651
3616 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, 3652 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3617 DMA_TO_DEVICE); 3653 DMA_TO_DEVICE);
@@ -3696,8 +3732,8 @@ static int ahash_digest(struct ahash_request *req)
3696 int qm_sg_bytes; 3732 int qm_sg_bytes;
3697 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; 3733 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3698 3734
3699 qm_sg_bytes = mapped_nents * sizeof(*sg_table); 3735 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3700 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); 3736 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3701 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, 3737 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3702 qm_sg_bytes, DMA_TO_DEVICE); 3738 qm_sg_bytes, DMA_TO_DEVICE);
3703 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { 3739 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -3840,9 +3876,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
3840 3876
3841 if (to_hash) { 3877 if (to_hash) {
3842 struct dpaa2_sg_entry *sg_table; 3878 struct dpaa2_sg_entry *sg_table;
3879 int src_len = req->nbytes - *next_buflen;
3843 3880
3844 src_nents = sg_nents_for_len(req->src, 3881 src_nents = sg_nents_for_len(req->src, src_len);
3845 req->nbytes - *next_buflen);
3846 if (src_nents < 0) { 3882 if (src_nents < 0) {
3847 dev_err(ctx->dev, "Invalid number of src SG.\n"); 3883 dev_err(ctx->dev, "Invalid number of src SG.\n");
3848 return src_nents; 3884 return src_nents;
@@ -3868,14 +3904,15 @@ static int ahash_update_no_ctx(struct ahash_request *req)
3868 } 3904 }
3869 3905
3870 edesc->src_nents = src_nents; 3906 edesc->src_nents = src_nents;
3871 qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table); 3907 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
3908 sizeof(*sg_table);
3872 sg_table = &edesc->sgt[0]; 3909 sg_table = &edesc->sgt[0];
3873 3910
3874 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); 3911 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3875 if (ret) 3912 if (ret)
3876 goto unmap_ctx; 3913 goto unmap_ctx;
3877 3914
3878 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); 3915 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
3879 3916
3880 if (*next_buflen) 3917 if (*next_buflen)
3881 scatterwalk_map_and_copy(next_buf, req->src, 3918 scatterwalk_map_and_copy(next_buf, req->src,
@@ -3987,14 +4024,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
3987 } 4024 }
3988 4025
3989 edesc->src_nents = src_nents; 4026 edesc->src_nents = src_nents;
3990 qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table); 4027 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
3991 sg_table = &edesc->sgt[0]; 4028 sg_table = &edesc->sgt[0];
3992 4029
3993 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); 4030 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3994 if (ret) 4031 if (ret)
3995 goto unmap; 4032 goto unmap;
3996 4033
3997 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); 4034 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
3998 4035
3999 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, 4036 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4000 DMA_TO_DEVICE); 4037 DMA_TO_DEVICE);
@@ -4064,9 +4101,9 @@ static int ahash_update_first(struct ahash_request *req)
4064 4101
4065 if (to_hash) { 4102 if (to_hash) {
4066 struct dpaa2_sg_entry *sg_table; 4103 struct dpaa2_sg_entry *sg_table;
4104 int src_len = req->nbytes - *next_buflen;
4067 4105
4068 src_nents = sg_nents_for_len(req->src, 4106 src_nents = sg_nents_for_len(req->src, src_len);
4069 req->nbytes - (*next_buflen));
4070 if (src_nents < 0) { 4107 if (src_nents < 0) {
4071 dev_err(ctx->dev, "Invalid number of src SG.\n"); 4108 dev_err(ctx->dev, "Invalid number of src SG.\n");
4072 return src_nents; 4109 return src_nents;
@@ -4101,8 +4138,9 @@ static int ahash_update_first(struct ahash_request *req)
4101 if (mapped_nents > 1) { 4138 if (mapped_nents > 1) {
4102 int qm_sg_bytes; 4139 int qm_sg_bytes;
4103 4140
4104 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); 4141 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4105 qm_sg_bytes = mapped_nents * sizeof(*sg_table); 4142 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4143 sizeof(*sg_table);
4106 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, 4144 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4107 qm_sg_bytes, 4145 qm_sg_bytes,
4108 DMA_TO_DEVICE); 4146 DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 7205d9f4029e..e4ac5d591ad6 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -82,14 +82,6 @@
82#define HASH_MSG_LEN 8 82#define HASH_MSG_LEN 8
83#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 83#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84 84
85#ifdef DEBUG
86/* for print_hex_dumps with line references */
87#define debug(format, arg...) printk(format, arg)
88#else
89#define debug(format, arg...)
90#endif
91
92
93static struct list_head hash_list; 85static struct list_head hash_list;
94 86
95/* ahash per-session context */ 87/* ahash per-session context */
@@ -243,11 +235,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
243 ctx->ctx_len, true, ctrlpriv->era); 235 ctx->ctx_len, true, ctrlpriv->era);
244 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 236 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
245 desc_bytes(desc), ctx->dir); 237 desc_bytes(desc), ctx->dir);
246#ifdef DEBUG 238
247 print_hex_dump(KERN_ERR, 239 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
248 "ahash update shdesc@"__stringify(__LINE__)": ", 240 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
249 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 241 1);
250#endif
251 242
252 /* ahash_update_first shared descriptor */ 243 /* ahash_update_first shared descriptor */
253 desc = ctx->sh_desc_update_first; 244 desc = ctx->sh_desc_update_first;
@@ -255,11 +246,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
255 ctx->ctx_len, false, ctrlpriv->era); 246 ctx->ctx_len, false, ctrlpriv->era);
256 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 247 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
257 desc_bytes(desc), ctx->dir); 248 desc_bytes(desc), ctx->dir);
258#ifdef DEBUG 249 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
259 print_hex_dump(KERN_ERR, 250 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
260 "ahash update first shdesc@"__stringify(__LINE__)": ", 251 desc_bytes(desc), 1);
261 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
262#endif
263 252
264 /* ahash_final shared descriptor */ 253 /* ahash_final shared descriptor */
265 desc = ctx->sh_desc_fin; 254 desc = ctx->sh_desc_fin;
@@ -267,11 +256,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
267 ctx->ctx_len, true, ctrlpriv->era); 256 ctx->ctx_len, true, ctrlpriv->era);
268 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 257 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
269 desc_bytes(desc), ctx->dir); 258 desc_bytes(desc), ctx->dir);
270#ifdef DEBUG 259
271 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 260 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
272 DUMP_PREFIX_ADDRESS, 16, 4, desc, 261 DUMP_PREFIX_ADDRESS, 16, 4, desc,
273 desc_bytes(desc), 1); 262 desc_bytes(desc), 1);
274#endif
275 263
276 /* ahash_digest shared descriptor */ 264 /* ahash_digest shared descriptor */
277 desc = ctx->sh_desc_digest; 265 desc = ctx->sh_desc_digest;
@@ -279,12 +267,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
279 ctx->ctx_len, false, ctrlpriv->era); 267 ctx->ctx_len, false, ctrlpriv->era);
280 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 268 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
281 desc_bytes(desc), ctx->dir); 269 desc_bytes(desc), ctx->dir);
282#ifdef DEBUG 270
283 print_hex_dump(KERN_ERR, 271 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
284 "ahash digest shdesc@"__stringify(__LINE__)": ", 272 DUMP_PREFIX_ADDRESS, 16, 4, desc,
285 DUMP_PREFIX_ADDRESS, 16, 4, desc, 273 desc_bytes(desc), 1);
286 desc_bytes(desc), 1);
287#endif
288 274
289 return 0; 275 return 0;
290} 276}
@@ -328,9 +314,9 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
328 ctx->ctx_len, ctx->key_dma); 314 ctx->ctx_len, ctx->key_dma);
329 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 315 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
330 desc_bytes(desc), ctx->dir); 316 desc_bytes(desc), ctx->dir);
331 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", 317 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 318 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
333 1); 319 desc_bytes(desc), 1);
334 320
335 /* shared descriptor for ahash_digest */ 321 /* shared descriptor for ahash_digest */
336 desc = ctx->sh_desc_digest; 322 desc = ctx->sh_desc_digest;
@@ -377,8 +363,8 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
377 ctx->ctx_len, 0); 363 ctx->ctx_len, 0);
378 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 364 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
379 desc_bytes(desc), ctx->dir); 365 desc_bytes(desc), ctx->dir);
380 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", 366 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
381 DUMP_PREFIX_ADDRESS, 16, 4, desc, 367 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
382 desc_bytes(desc), 1); 368 desc_bytes(desc), 1);
383 369
384 /* shared descriptor for ahash_digest */ 370 /* shared descriptor for ahash_digest */
@@ -429,12 +415,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
429 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 415 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
430 LDST_SRCDST_BYTE_CONTEXT); 416 LDST_SRCDST_BYTE_CONTEXT);
431 417
432#ifdef DEBUG 418 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
433 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 419 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
434 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); 420 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
435 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 421 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
436 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 422 1);
437#endif
438 423
439 result.err = 0; 424 result.err = 0;
440 init_completion(&result.completion); 425 init_completion(&result.completion);
@@ -444,11 +429,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
444 /* in progress */ 429 /* in progress */
445 wait_for_completion(&result.completion); 430 wait_for_completion(&result.completion);
446 ret = result.err; 431 ret = result.err;
447#ifdef DEBUG 432
448 print_hex_dump(KERN_ERR, 433 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
449 "digested key@"__stringify(__LINE__)": ", 434 DUMP_PREFIX_ADDRESS, 16, 4, key,
450 DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); 435 digestsize, 1);
451#endif
452 } 436 }
453 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); 437 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
454 438
@@ -463,15 +447,14 @@ static int ahash_setkey(struct crypto_ahash *ahash,
463 const u8 *key, unsigned int keylen) 447 const u8 *key, unsigned int keylen)
464{ 448{
465 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 449 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
450 struct device *jrdev = ctx->jrdev;
466 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 451 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
467 int digestsize = crypto_ahash_digestsize(ahash); 452 int digestsize = crypto_ahash_digestsize(ahash);
468 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 453 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
469 int ret; 454 int ret;
470 u8 *hashed_key = NULL; 455 u8 *hashed_key = NULL;
471 456
472#ifdef DEBUG 457 dev_dbg(jrdev, "keylen %d\n", keylen);
473 printk(KERN_ERR "keylen %d\n", keylen);
474#endif
475 458
476 if (keylen > blocksize) { 459 if (keylen > blocksize) {
477 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 460 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
@@ -600,11 +583,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
600 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 583 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
601 int digestsize = crypto_ahash_digestsize(ahash); 584 int digestsize = crypto_ahash_digestsize(ahash);
602 struct caam_hash_state *state = ahash_request_ctx(req); 585 struct caam_hash_state *state = ahash_request_ctx(req);
603#ifdef DEBUG
604 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 586 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
605 587
606 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 588 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
607#endif
608 589
609 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 590 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
610 if (err) 591 if (err)
@@ -614,11 +595,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
614 memcpy(req->result, state->caam_ctx, digestsize); 595 memcpy(req->result, state->caam_ctx, digestsize);
615 kfree(edesc); 596 kfree(edesc);
616 597
617#ifdef DEBUG 598 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
618 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 599 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
619 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 600 ctx->ctx_len, 1);
620 ctx->ctx_len, 1);
621#endif
622 601
623 req->base.complete(&req->base, err); 602 req->base.complete(&req->base, err);
624} 603}
@@ -631,11 +610,9 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
631 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 610 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
632 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 611 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
633 struct caam_hash_state *state = ahash_request_ctx(req); 612 struct caam_hash_state *state = ahash_request_ctx(req);
634#ifdef DEBUG
635 int digestsize = crypto_ahash_digestsize(ahash); 613 int digestsize = crypto_ahash_digestsize(ahash);
636 614
637 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 615 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
638#endif
639 616
640 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 617 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
641 if (err) 618 if (err)
@@ -645,15 +622,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
645 switch_buf(state); 622 switch_buf(state);
646 kfree(edesc); 623 kfree(edesc);
647 624
648#ifdef DEBUG 625 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
649 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 626 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
650 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 627 ctx->ctx_len, 1);
651 ctx->ctx_len, 1);
652 if (req->result) 628 if (req->result)
653 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 629 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
654 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 630 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
655 digestsize, 1); 631 digestsize, 1);
656#endif
657 632
658 req->base.complete(&req->base, err); 633 req->base.complete(&req->base, err);
659} 634}
@@ -666,11 +641,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
666 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 641 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
667 int digestsize = crypto_ahash_digestsize(ahash); 642 int digestsize = crypto_ahash_digestsize(ahash);
668 struct caam_hash_state *state = ahash_request_ctx(req); 643 struct caam_hash_state *state = ahash_request_ctx(req);
669#ifdef DEBUG
670 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 644 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
671 645
672 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 646 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
673#endif
674 647
675 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 648 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
676 if (err) 649 if (err)
@@ -680,11 +653,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
680 memcpy(req->result, state->caam_ctx, digestsize); 653 memcpy(req->result, state->caam_ctx, digestsize);
681 kfree(edesc); 654 kfree(edesc);
682 655
683#ifdef DEBUG 656 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
684 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 657 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
685 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 658 ctx->ctx_len, 1);
686 ctx->ctx_len, 1);
687#endif
688 659
689 req->base.complete(&req->base, err); 660 req->base.complete(&req->base, err);
690} 661}
@@ -697,11 +668,9 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
697 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 668 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
698 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 669 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
699 struct caam_hash_state *state = ahash_request_ctx(req); 670 struct caam_hash_state *state = ahash_request_ctx(req);
700#ifdef DEBUG
701 int digestsize = crypto_ahash_digestsize(ahash); 671 int digestsize = crypto_ahash_digestsize(ahash);
702 672
703 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 673 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
704#endif
705 674
706 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 675 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
707 if (err) 676 if (err)
@@ -711,15 +680,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
711 switch_buf(state); 680 switch_buf(state);
712 kfree(edesc); 681 kfree(edesc);
713 682
714#ifdef DEBUG 683 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
715 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 684 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
716 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 685 ctx->ctx_len, 1);
717 ctx->ctx_len, 1);
718 if (req->result) 686 if (req->result)
719 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 687 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
720 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 688 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
721 digestsize, 1); 689 digestsize, 1);
722#endif
723 690
724 req->base.complete(&req->base, err); 691 req->base.complete(&req->base, err);
725} 692}
@@ -759,9 +726,10 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
759 726
760 if (nents > 1 || first_sg) { 727 if (nents > 1 || first_sg) {
761 struct sec4_sg_entry *sg = edesc->sec4_sg; 728 struct sec4_sg_entry *sg = edesc->sec4_sg;
762 unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 729 unsigned int sgsize = sizeof(*sg) *
730 pad_sg_nents(first_sg + nents);
763 731
764 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 732 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
765 733
766 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 734 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
767 if (dma_mapping_error(ctx->jrdev, src_dma)) { 735 if (dma_mapping_error(ctx->jrdev, src_dma)) {
@@ -819,8 +787,10 @@ static int ahash_update_ctx(struct ahash_request *req)
819 } 787 }
820 788
821 if (to_hash) { 789 if (to_hash) {
822 src_nents = sg_nents_for_len(req->src, 790 int pad_nents;
823 req->nbytes - (*next_buflen)); 791 int src_len = req->nbytes - *next_buflen;
792
793 src_nents = sg_nents_for_len(req->src, src_len);
824 if (src_nents < 0) { 794 if (src_nents < 0) {
825 dev_err(jrdev, "Invalid number of src SG.\n"); 795 dev_err(jrdev, "Invalid number of src SG.\n");
826 return src_nents; 796 return src_nents;
@@ -838,15 +808,14 @@ static int ahash_update_ctx(struct ahash_request *req)
838 } 808 }
839 809
840 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 810 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
841 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 811 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
842 sizeof(struct sec4_sg_entry); 812 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
843 813
844 /* 814 /*
845 * allocate space for base edesc and hw desc commands, 815 * allocate space for base edesc and hw desc commands,
846 * link tables 816 * link tables
847 */ 817 */
848 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 818 edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
849 ctx->sh_desc_update,
850 ctx->sh_desc_update_dma, flags); 819 ctx->sh_desc_update_dma, flags);
851 if (!edesc) { 820 if (!edesc) {
852 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 821 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -866,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req)
866 goto unmap_ctx; 835 goto unmap_ctx;
867 836
868 if (mapped_nents) 837 if (mapped_nents)
869 sg_to_sec4_sg_last(req->src, mapped_nents, 838 sg_to_sec4_sg_last(req->src, src_len,
870 edesc->sec4_sg + sec4_sg_src_index, 839 edesc->sec4_sg + sec4_sg_src_index,
871 0); 840 0);
872 else 841 else
@@ -893,11 +862,9 @@ static int ahash_update_ctx(struct ahash_request *req)
893 862
894 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 863 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
895 864
896#ifdef DEBUG 865 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
897 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 866 DUMP_PREFIX_ADDRESS, 16, 4, desc,
898 DUMP_PREFIX_ADDRESS, 16, 4, desc, 867 desc_bytes(desc), 1);
899 desc_bytes(desc), 1);
900#endif
901 868
902 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 869 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
903 if (ret) 870 if (ret)
@@ -910,13 +877,12 @@ static int ahash_update_ctx(struct ahash_request *req)
910 *buflen = *next_buflen; 877 *buflen = *next_buflen;
911 *next_buflen = last_buflen; 878 *next_buflen = last_buflen;
912 } 879 }
913#ifdef DEBUG 880
914 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 881 print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
915 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 882 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
916 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 883 print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
917 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 884 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
918 *next_buflen, 1); 885 *next_buflen, 1);
919#endif
920 886
921 return ret; 887 return ret;
922unmap_ctx: 888unmap_ctx:
@@ -935,18 +901,17 @@ static int ahash_final_ctx(struct ahash_request *req)
935 GFP_KERNEL : GFP_ATOMIC; 901 GFP_KERNEL : GFP_ATOMIC;
936 int buflen = *current_buflen(state); 902 int buflen = *current_buflen(state);
937 u32 *desc; 903 u32 *desc;
938 int sec4_sg_bytes, sec4_sg_src_index; 904 int sec4_sg_bytes;
939 int digestsize = crypto_ahash_digestsize(ahash); 905 int digestsize = crypto_ahash_digestsize(ahash);
940 struct ahash_edesc *edesc; 906 struct ahash_edesc *edesc;
941 int ret; 907 int ret;
942 908
943 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 909 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
944 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 910 sizeof(struct sec4_sg_entry);
945 911
946 /* allocate space for base edesc and hw desc commands, link tables */ 912 /* allocate space for base edesc and hw desc commands, link tables */
947 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 913 edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
948 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 914 ctx->sh_desc_fin_dma, flags);
949 flags);
950 if (!edesc) 915 if (!edesc)
951 return -ENOMEM; 916 return -ENOMEM;
952 917
@@ -963,7 +928,7 @@ static int ahash_final_ctx(struct ahash_request *req)
963 if (ret) 928 if (ret)
964 goto unmap_ctx; 929 goto unmap_ctx;
965 930
966 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); 931 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
967 932
968 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 933 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
969 sec4_sg_bytes, DMA_TO_DEVICE); 934 sec4_sg_bytes, DMA_TO_DEVICE);
@@ -977,10 +942,9 @@ static int ahash_final_ctx(struct ahash_request *req)
977 LDST_SGF); 942 LDST_SGF);
978 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 943 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
979 944
980#ifdef DEBUG 945 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
981 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 946 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
982 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 947 1);
983#endif
984 948
985 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 949 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
986 if (ret) 950 if (ret)
@@ -1058,10 +1022,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
1058 1022
1059 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 1023 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1060 1024
1061#ifdef DEBUG 1025 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1062 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1026 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1063 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1027 1);
1064#endif
1065 1028
1066 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1029 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1067 if (ret) 1030 if (ret)
@@ -1135,10 +1098,9 @@ static int ahash_digest(struct ahash_request *req)
1135 return -ENOMEM; 1098 return -ENOMEM;
1136 } 1099 }
1137 1100
1138#ifdef DEBUG 1101 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1139 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1102 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1140 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1103 1);
1141#endif
1142 1104
1143 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1105 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1144 if (!ret) { 1106 if (!ret) {
@@ -1190,10 +1152,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1190 if (ret) 1152 if (ret)
1191 goto unmap; 1153 goto unmap;
1192 1154
1193#ifdef DEBUG 1155 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1194 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1156 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1195 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1157 1);
1196#endif
1197 1158
1198 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1159 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1199 if (!ret) { 1160 if (!ret) {
@@ -1246,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1246 } 1207 }
1247 1208
1248 if (to_hash) { 1209 if (to_hash) {
1249 src_nents = sg_nents_for_len(req->src, 1210 int pad_nents;
1250 req->nbytes - *next_buflen); 1211 int src_len = req->nbytes - *next_buflen;
1212
1213 src_nents = sg_nents_for_len(req->src, src_len);
1251 if (src_nents < 0) { 1214 if (src_nents < 0) {
1252 dev_err(jrdev, "Invalid number of src SG.\n"); 1215 dev_err(jrdev, "Invalid number of src SG.\n");
1253 return src_nents; 1216 return src_nents;
@@ -1264,14 +1227,14 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1264 mapped_nents = 0; 1227 mapped_nents = 0;
1265 } 1228 }
1266 1229
1267 sec4_sg_bytes = (1 + mapped_nents) * 1230 pad_nents = pad_sg_nents(1 + mapped_nents);
1268 sizeof(struct sec4_sg_entry); 1231 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1269 1232
1270 /* 1233 /*
1271 * allocate space for base edesc and hw desc commands, 1234 * allocate space for base edesc and hw desc commands,
1272 * link tables 1235 * link tables
1273 */ 1236 */
1274 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1237 edesc = ahash_edesc_alloc(ctx, pad_nents,
1275 ctx->sh_desc_update_first, 1238 ctx->sh_desc_update_first,
1276 ctx->sh_desc_update_first_dma, 1239 ctx->sh_desc_update_first_dma,
1277 flags); 1240 flags);
@@ -1287,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1287 if (ret) 1250 if (ret)
1288 goto unmap_ctx; 1251 goto unmap_ctx;
1289 1252
1290 sg_to_sec4_sg_last(req->src, mapped_nents, 1253 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1291 edesc->sec4_sg + 1, 0);
1292 1254
1293 if (*next_buflen) { 1255 if (*next_buflen) {
1294 scatterwalk_map_and_copy(next_buf, req->src, 1256 scatterwalk_map_and_copy(next_buf, req->src,
@@ -1313,11 +1275,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1313 if (ret) 1275 if (ret)
1314 goto unmap_ctx; 1276 goto unmap_ctx;
1315 1277
1316#ifdef DEBUG 1278 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1317 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1279 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1318 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1280 desc_bytes(desc), 1);
1319 desc_bytes(desc), 1);
1320#endif
1321 1281
1322 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1282 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1323 if (ret) 1283 if (ret)
@@ -1333,13 +1293,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1333 *buflen = *next_buflen; 1293 *buflen = *next_buflen;
1334 *next_buflen = 0; 1294 *next_buflen = 0;
1335 } 1295 }
1336#ifdef DEBUG 1296
1337 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1297 print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
1338 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1298 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1339 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1299 print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
1340 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1300 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
1341 *next_buflen, 1); 1301 1);
1342#endif
1343 1302
1344 return ret; 1303 return ret;
1345 unmap_ctx: 1304 unmap_ctx:
@@ -1414,10 +1373,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1414 if (ret) 1373 if (ret)
1415 goto unmap; 1374 goto unmap;
1416 1375
1417#ifdef DEBUG 1376 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1418 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1377 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1419 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1378 1);
1420#endif
1421 1379
1422 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1380 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1423 if (!ret) { 1381 if (!ret) {
@@ -1517,11 +1475,9 @@ static int ahash_update_first(struct ahash_request *req)
1517 if (ret) 1475 if (ret)
1518 goto unmap_ctx; 1476 goto unmap_ctx;
1519 1477
1520#ifdef DEBUG 1478 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1521 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1479 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1522 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1480 desc_bytes(desc), 1);
1523 desc_bytes(desc), 1);
1524#endif
1525 1481
1526 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1482 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1527 if (ret) 1483 if (ret)
@@ -1539,11 +1495,10 @@ static int ahash_update_first(struct ahash_request *req)
1539 req->nbytes, 0); 1495 req->nbytes, 0);
1540 switch_buf(state); 1496 switch_buf(state);
1541 } 1497 }
1542#ifdef DEBUG 1498
1543 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1499 print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
1544 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1500 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
1545 *next_buflen, 1); 1501 1);
1546#endif
1547 1502
1548 return ret; 1503 return ret;
1549 unmap_ctx: 1504 unmap_ctx:
@@ -1930,7 +1885,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1930 caam_jr_free(ctx->jrdev); 1885 caam_jr_free(ctx->jrdev);
1931} 1886}
1932 1887
1933static void __exit caam_algapi_hash_exit(void) 1888void caam_algapi_hash_exit(void)
1934{ 1889{
1935 struct caam_hash_alg *t_alg, *n; 1890 struct caam_hash_alg *t_alg, *n;
1936 1891
@@ -1988,40 +1943,13 @@ caam_hash_alloc(struct caam_hash_template *template,
1988 return t_alg; 1943 return t_alg;
1989} 1944}
1990 1945
1991static int __init caam_algapi_hash_init(void) 1946int caam_algapi_hash_init(struct device *ctrldev)
1992{ 1947{
1993 struct device_node *dev_node;
1994 struct platform_device *pdev;
1995 int i = 0, err = 0; 1948 int i = 0, err = 0;
1996 struct caam_drv_private *priv; 1949 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1997 unsigned int md_limit = SHA512_DIGEST_SIZE; 1950 unsigned int md_limit = SHA512_DIGEST_SIZE;
1998 u32 md_inst, md_vid; 1951 u32 md_inst, md_vid;
1999 1952
2000 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2001 if (!dev_node) {
2002 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2003 if (!dev_node)
2004 return -ENODEV;
2005 }
2006
2007 pdev = of_find_device_by_node(dev_node);
2008 if (!pdev) {
2009 of_node_put(dev_node);
2010 return -ENODEV;
2011 }
2012
2013 priv = dev_get_drvdata(&pdev->dev);
2014 of_node_put(dev_node);
2015
2016 /*
2017 * If priv is NULL, it's probably because the caam driver wasn't
2018 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2019 */
2020 if (!priv) {
2021 err = -ENODEV;
2022 goto out_put_dev;
2023 }
2024
2025 /* 1953 /*
2026 * Register crypto algorithms the device supports. First, identify 1954 * Register crypto algorithms the device supports. First, identify
2027 * presence and attributes of MD block. 1955 * presence and attributes of MD block.
@@ -2042,10 +1970,8 @@ static int __init caam_algapi_hash_init(void)
2042 * Skip registration of any hashing algorithms if MD block 1970 * Skip registration of any hashing algorithms if MD block
2043 * is not present. 1971 * is not present.
2044 */ 1972 */
2045 if (!md_inst) { 1973 if (!md_inst)
2046 err = -ENODEV; 1974 return -ENODEV;
2047 goto out_put_dev;
2048 }
2049 1975
2050 /* Limit digest size based on LP256 */ 1976 /* Limit digest size based on LP256 */
2051 if (md_vid == CHA_VER_VID_MD_LP256) 1977 if (md_vid == CHA_VER_VID_MD_LP256)
@@ -2102,14 +2028,5 @@ static int __init caam_algapi_hash_init(void)
2102 list_add_tail(&t_alg->entry, &hash_list); 2028 list_add_tail(&t_alg->entry, &hash_list);
2103 } 2029 }
2104 2030
2105out_put_dev:
2106 put_device(&pdev->dev);
2107 return err; 2031 return err;
2108} 2032}
2109
2110module_init(caam_algapi_hash_init);
2111module_exit(caam_algapi_hash_exit);
2112
2113MODULE_LICENSE("GPL");
2114MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2115MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index fe24485274e1..80574106af29 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -3,7 +3,7 @@
3 * caam - Freescale FSL CAAM support for Public Key Cryptography 3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 * 4 *
5 * Copyright 2016 Freescale Semiconductor, Inc. 5 * Copyright 2016 Freescale Semiconductor, Inc.
6 * Copyright 2018 NXP 6 * Copyright 2018-2019 NXP
7 * 7 *
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry 8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers. 9 * all the desired key parameters, input and output pointers.
@@ -24,12 +24,18 @@
24 sizeof(struct rsa_priv_f2_pdb)) 24 sizeof(struct rsa_priv_f2_pdb))
25#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ 25#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 sizeof(struct rsa_priv_f3_pdb)) 26 sizeof(struct rsa_priv_f3_pdb))
27#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
28
29/* buffer filled with zeros, used for padding */
30static u8 *zero_buffer;
27 31
28static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, 32static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
29 struct akcipher_request *req) 33 struct akcipher_request *req)
30{ 34{
35 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
36
31 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); 37 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
32 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 38 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
33 39
34 if (edesc->sec4_sg_bytes) 40 if (edesc->sec4_sg_bytes)
35 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, 41 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
@@ -168,6 +174,13 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
168 akcipher_request_complete(req, err); 174 akcipher_request_complete(req, err);
169} 175}
170 176
177/**
178 * Count leading zeros, need it to strip, from a given scatterlist
179 *
180 * @sgl : scatterlist to count zeros from
181 * @nbytes: number of zeros, in bytes, to strip
182 * @flags : operation flags
183 */
171static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, 184static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
172 unsigned int nbytes, 185 unsigned int nbytes,
173 unsigned int flags) 186 unsigned int flags)
@@ -187,7 +200,8 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
187 lzeros = 0; 200 lzeros = 0;
188 len = 0; 201 len = 0;
189 while (nbytes > 0) { 202 while (nbytes > 0) {
190 while (len && !*buff) { 203 /* do not strip more than given bytes */
204 while (len && !*buff && lzeros < nbytes) {
191 lzeros++; 205 lzeros++;
192 len--; 206 len--;
193 buff++; 207 buff++;
@@ -218,6 +232,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
218 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 232 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
219 struct device *dev = ctx->dev; 233 struct device *dev = ctx->dev;
220 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 234 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
235 struct caam_rsa_key *key = &ctx->key;
221 struct rsa_edesc *edesc; 236 struct rsa_edesc *edesc;
222 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 237 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
223 GFP_KERNEL : GFP_ATOMIC; 238 GFP_KERNEL : GFP_ATOMIC;
@@ -225,22 +240,45 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
225 int sgc; 240 int sgc;
226 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 241 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
227 int src_nents, dst_nents; 242 int src_nents, dst_nents;
243 unsigned int diff_size = 0;
228 int lzeros; 244 int lzeros;
229 245
230 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); 246 if (req->src_len > key->n_sz) {
231 if (lzeros < 0) 247 /*
232 return ERR_PTR(lzeros); 248 * strip leading zeros and
233 249 * return the number of zeros to skip
234 req->src_len -= lzeros; 250 */
235 req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); 251 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
252 key->n_sz, sg_flags);
253 if (lzeros < 0)
254 return ERR_PTR(lzeros);
255
256 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
257 lzeros);
258 req_ctx->fixup_src_len = req->src_len - lzeros;
259 } else {
260 /*
261 * input src is less then n key modulus,
262 * so there will be zero padding
263 */
264 diff_size = key->n_sz - req->src_len;
265 req_ctx->fixup_src = req->src;
266 req_ctx->fixup_src_len = req->src_len;
267 }
236 268
237 src_nents = sg_nents_for_len(req->src, req->src_len); 269 src_nents = sg_nents_for_len(req_ctx->fixup_src,
270 req_ctx->fixup_src_len);
238 dst_nents = sg_nents_for_len(req->dst, req->dst_len); 271 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
239 272
240 if (src_nents > 1) 273 if (!diff_size && src_nents == 1)
241 sec4_sg_len = src_nents; 274 sec4_sg_len = 0; /* no need for an input hw s/g table */
275 else
276 sec4_sg_len = src_nents + !!diff_size;
277 sec4_sg_index = sec4_sg_len;
242 if (dst_nents > 1) 278 if (dst_nents > 1)
243 sec4_sg_len += dst_nents; 279 sec4_sg_len += pad_sg_nents(dst_nents);
280 else
281 sec4_sg_len = pad_sg_nents(sec4_sg_len);
244 282
245 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 283 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
246 284
@@ -250,7 +288,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
250 if (!edesc) 288 if (!edesc)
251 return ERR_PTR(-ENOMEM); 289 return ERR_PTR(-ENOMEM);
252 290
253 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); 291 sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
254 if (unlikely(!sgc)) { 292 if (unlikely(!sgc)) {
255 dev_err(dev, "unable to map source\n"); 293 dev_err(dev, "unable to map source\n");
256 goto src_fail; 294 goto src_fail;
@@ -263,14 +301,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
263 } 301 }
264 302
265 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; 303 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
304 if (diff_size)
305 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
306 0);
307
308 if (sec4_sg_index)
309 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
310 edesc->sec4_sg + !!diff_size, 0);
266 311
267 sec4_sg_index = 0;
268 if (src_nents > 1) {
269 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
270 sec4_sg_index += src_nents;
271 }
272 if (dst_nents > 1) 312 if (dst_nents > 1)
273 sg_to_sec4_sg_last(req->dst, dst_nents, 313 sg_to_sec4_sg_last(req->dst, req->dst_len,
274 edesc->sec4_sg + sec4_sg_index, 0); 314 edesc->sec4_sg + sec4_sg_index, 0);
275 315
276 /* Save nents for later use in Job Descriptor */ 316 /* Save nents for later use in Job Descriptor */
@@ -289,12 +329,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
289 329
290 edesc->sec4_sg_bytes = sec4_sg_bytes; 330 edesc->sec4_sg_bytes = sec4_sg_bytes;
291 331
332 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
333 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
334 edesc->sec4_sg_bytes, 1);
335
292 return edesc; 336 return edesc;
293 337
294sec4_sg_fail: 338sec4_sg_fail:
295 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); 339 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
296dst_fail: 340dst_fail:
297 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); 341 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
298src_fail: 342src_fail:
299 kfree(edesc); 343 kfree(edesc);
300 return ERR_PTR(-ENOMEM); 344 return ERR_PTR(-ENOMEM);
@@ -304,6 +348,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
304 struct rsa_edesc *edesc) 348 struct rsa_edesc *edesc)
305{ 349{
306 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 350 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
351 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
307 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 352 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
308 struct caam_rsa_key *key = &ctx->key; 353 struct caam_rsa_key *key = &ctx->key;
309 struct device *dev = ctx->dev; 354 struct device *dev = ctx->dev;
@@ -328,7 +373,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
328 pdb->f_dma = edesc->sec4_sg_dma; 373 pdb->f_dma = edesc->sec4_sg_dma;
329 sec4_sg_index += edesc->src_nents; 374 sec4_sg_index += edesc->src_nents;
330 } else { 375 } else {
331 pdb->f_dma = sg_dma_address(req->src); 376 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
332 } 377 }
333 378
334 if (edesc->dst_nents > 1) { 379 if (edesc->dst_nents > 1) {
@@ -340,7 +385,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
340 } 385 }
341 386
342 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; 387 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
343 pdb->f_len = req->src_len; 388 pdb->f_len = req_ctx->fixup_src_len;
344 389
345 return 0; 390 return 0;
346} 391}
@@ -373,7 +418,9 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
373 pdb->g_dma = edesc->sec4_sg_dma; 418 pdb->g_dma = edesc->sec4_sg_dma;
374 sec4_sg_index += edesc->src_nents; 419 sec4_sg_index += edesc->src_nents;
375 } else { 420 } else {
376 pdb->g_dma = sg_dma_address(req->src); 421 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
422
423 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
377 } 424 }
378 425
379 if (edesc->dst_nents > 1) { 426 if (edesc->dst_nents > 1) {
@@ -436,7 +483,9 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
436 pdb->g_dma = edesc->sec4_sg_dma; 483 pdb->g_dma = edesc->sec4_sg_dma;
437 sec4_sg_index += edesc->src_nents; 484 sec4_sg_index += edesc->src_nents;
438 } else { 485 } else {
439 pdb->g_dma = sg_dma_address(req->src); 486 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
487
488 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
440 } 489 }
441 490
442 if (edesc->dst_nents > 1) { 491 if (edesc->dst_nents > 1) {
@@ -523,7 +572,9 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
523 pdb->g_dma = edesc->sec4_sg_dma; 572 pdb->g_dma = edesc->sec4_sg_dma;
524 sec4_sg_index += edesc->src_nents; 573 sec4_sg_index += edesc->src_nents;
525 } else { 574 } else {
526 pdb->g_dma = sg_dma_address(req->src); 575 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
576
577 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
527 } 578 }
528 579
529 if (edesc->dst_nents > 1) { 580 if (edesc->dst_nents > 1) {
@@ -978,6 +1029,15 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
978 return PTR_ERR(ctx->dev); 1029 return PTR_ERR(ctx->dev);
979 } 1030 }
980 1031
1032 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1033 CAAM_RSA_MAX_INPUT_SIZE - 1,
1034 DMA_TO_DEVICE);
1035 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1036 dev_err(ctx->dev, "unable to map padding\n");
1037 caam_jr_free(ctx->dev);
1038 return -ENOMEM;
1039 }
1040
981 return 0; 1041 return 0;
982} 1042}
983 1043
@@ -987,6 +1047,8 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
987 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 1047 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
988 struct caam_rsa_key *key = &ctx->key; 1048 struct caam_rsa_key *key = &ctx->key;
989 1049
1050 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1051 1, DMA_TO_DEVICE);
990 caam_rsa_free_key(key); 1052 caam_rsa_free_key(key);
991 caam_jr_free(ctx->dev); 1053 caam_jr_free(ctx->dev);
992} 1054}
@@ -1010,41 +1072,12 @@ static struct akcipher_alg caam_rsa = {
1010}; 1072};
1011 1073
1012/* Public Key Cryptography module initialization handler */ 1074/* Public Key Cryptography module initialization handler */
1013static int __init caam_pkc_init(void) 1075int caam_pkc_init(struct device *ctrldev)
1014{ 1076{
1015 struct device_node *dev_node; 1077 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1016 struct platform_device *pdev;
1017 struct device *ctrldev;
1018 struct caam_drv_private *priv;
1019 u32 pk_inst; 1078 u32 pk_inst;
1020 int err; 1079 int err;
1021 1080
1022 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1023 if (!dev_node) {
1024 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1025 if (!dev_node)
1026 return -ENODEV;
1027 }
1028
1029 pdev = of_find_device_by_node(dev_node);
1030 if (!pdev) {
1031 of_node_put(dev_node);
1032 return -ENODEV;
1033 }
1034
1035 ctrldev = &pdev->dev;
1036 priv = dev_get_drvdata(ctrldev);
1037 of_node_put(dev_node);
1038
1039 /*
1040 * If priv is NULL, it's probably because the caam driver wasn't
1041 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1042 */
1043 if (!priv) {
1044 err = -ENODEV;
1045 goto out_put_dev;
1046 }
1047
1048 /* Determine public key hardware accelerator presence. */ 1081 /* Determine public key hardware accelerator presence. */
1049 if (priv->era < 10) 1082 if (priv->era < 10)
1050 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 1083 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
@@ -1053,31 +1086,29 @@ static int __init caam_pkc_init(void)
1053 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; 1086 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
1054 1087
1055 /* Do not register algorithms if PKHA is not present. */ 1088 /* Do not register algorithms if PKHA is not present. */
1056 if (!pk_inst) { 1089 if (!pk_inst)
1057 err = -ENODEV; 1090 return 0;
1058 goto out_put_dev; 1091
1059 } 1092 /* allocate zero buffer, used for padding input */
1093 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1094 GFP_KERNEL);
1095 if (!zero_buffer)
1096 return -ENOMEM;
1060 1097
1061 err = crypto_register_akcipher(&caam_rsa); 1098 err = crypto_register_akcipher(&caam_rsa);
1062 if (err) 1099 if (err) {
1100 kfree(zero_buffer);
1063 dev_warn(ctrldev, "%s alg registration failed\n", 1101 dev_warn(ctrldev, "%s alg registration failed\n",
1064 caam_rsa.base.cra_driver_name); 1102 caam_rsa.base.cra_driver_name);
1065 else 1103 } else {
1066 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); 1104 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1105 }
1067 1106
1068out_put_dev:
1069 put_device(ctrldev);
1070 return err; 1107 return err;
1071} 1108}
1072 1109
1073static void __exit caam_pkc_exit(void) 1110void caam_pkc_exit(void)
1074{ 1111{
1112 kfree(zero_buffer);
1075 crypto_unregister_akcipher(&caam_rsa); 1113 crypto_unregister_akcipher(&caam_rsa);
1076} 1114}
1077
1078module_init(caam_pkc_init);
1079module_exit(caam_pkc_exit);
1080
1081MODULE_LICENSE("Dual BSD/GPL");
1082MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1083MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 82645bcf8b27..2c488c9a3812 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -89,18 +89,25 @@ struct caam_rsa_key {
89 * caam_rsa_ctx - per session context. 89 * caam_rsa_ctx - per session context.
90 * @key : RSA key in DMA zone 90 * @key : RSA key in DMA zone
91 * @dev : device structure 91 * @dev : device structure
92 * @padding_dma : dma address of padding, for adding it to the input
92 */ 93 */
93struct caam_rsa_ctx { 94struct caam_rsa_ctx {
94 struct caam_rsa_key key; 95 struct caam_rsa_key key;
95 struct device *dev; 96 struct device *dev;
97 dma_addr_t padding_dma;
98
96}; 99};
97 100
98/** 101/**
99 * caam_rsa_req_ctx - per request context. 102 * caam_rsa_req_ctx - per request context.
100 * @src: input scatterlist (stripped of leading zeros) 103 * @src : input scatterlist (stripped of leading zeros)
104 * @fixup_src : input scatterlist (that might be stripped of leading zeros)
105 * @fixup_src_len : length of the fixup_src input scatterlist
101 */ 106 */
102struct caam_rsa_req_ctx { 107struct caam_rsa_req_ctx {
103 struct scatterlist src[2]; 108 struct scatterlist src[2];
109 struct scatterlist *fixup_src;
110 unsigned int fixup_src_len;
104}; 111};
105 112
106/** 113/**
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 95eb5402c59f..561bcb535184 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -3,7 +3,7 @@
3 * caam - Freescale FSL CAAM support for hw_random 3 * caam - Freescale FSL CAAM support for hw_random
4 * 4 *
5 * Copyright 2011 Freescale Semiconductor, Inc. 5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018 NXP 6 * Copyright 2018-2019 NXP
7 * 7 *
8 * Based on caamalg.c crypto API driver. 8 * Based on caamalg.c crypto API driver.
9 * 9 *
@@ -113,10 +113,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
113 /* Buffer refilled, invalidate cache */ 113 /* Buffer refilled, invalidate cache */
114 dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); 114 dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
115 115
116#ifdef DEBUG 116 print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
117 print_hex_dump(KERN_ERR, "rng refreshed buf@: ", 117 bd->buf, RN_BUF_SIZE, 1);
118 DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
119#endif
120} 118}
121 119
122static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) 120static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
@@ -209,10 +207,10 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
209 dev_err(jrdev, "unable to map shared descriptor\n"); 207 dev_err(jrdev, "unable to map shared descriptor\n");
210 return -ENOMEM; 208 return -ENOMEM;
211 } 209 }
212#ifdef DEBUG 210
213 print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, 211 print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
214 desc, desc_bytes(desc), 1); 212 desc, desc_bytes(desc), 1);
215#endif 213
216 return 0; 214 return 0;
217} 215}
218 216
@@ -233,10 +231,10 @@ static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
233 } 231 }
234 232
235 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); 233 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
236#ifdef DEBUG 234
237 print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, 235 print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
238 desc, desc_bytes(desc), 1); 236 desc, desc_bytes(desc), 1);
239#endif 237
240 return 0; 238 return 0;
241} 239}
242 240
@@ -296,47 +294,20 @@ static struct hwrng caam_rng = {
296 .read = caam_read, 294 .read = caam_read,
297}; 295};
298 296
299static void __exit caam_rng_exit(void) 297void caam_rng_exit(void)
300{ 298{
301 caam_jr_free(rng_ctx->jrdev); 299 caam_jr_free(rng_ctx->jrdev);
302 hwrng_unregister(&caam_rng); 300 hwrng_unregister(&caam_rng);
303 kfree(rng_ctx); 301 kfree(rng_ctx);
304} 302}
305 303
306static int __init caam_rng_init(void) 304int caam_rng_init(struct device *ctrldev)
307{ 305{
308 struct device *dev; 306 struct device *dev;
309 struct device_node *dev_node;
310 struct platform_device *pdev;
311 struct caam_drv_private *priv;
312 u32 rng_inst; 307 u32 rng_inst;
308 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
313 int err; 309 int err;
314 310
315 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
316 if (!dev_node) {
317 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
318 if (!dev_node)
319 return -ENODEV;
320 }
321
322 pdev = of_find_device_by_node(dev_node);
323 if (!pdev) {
324 of_node_put(dev_node);
325 return -ENODEV;
326 }
327
328 priv = dev_get_drvdata(&pdev->dev);
329 of_node_put(dev_node);
330
331 /*
332 * If priv is NULL, it's probably because the caam driver wasn't
333 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
334 */
335 if (!priv) {
336 err = -ENODEV;
337 goto out_put_dev;
338 }
339
340 /* Check for an instantiated RNG before registration */ 311 /* Check for an instantiated RNG before registration */
341 if (priv->era < 10) 312 if (priv->era < 10)
342 rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 313 rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
@@ -344,16 +315,13 @@ static int __init caam_rng_init(void)
344 else 315 else
345 rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; 316 rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
346 317
347 if (!rng_inst) { 318 if (!rng_inst)
348 err = -ENODEV; 319 return 0;
349 goto out_put_dev;
350 }
351 320
352 dev = caam_jr_alloc(); 321 dev = caam_jr_alloc();
353 if (IS_ERR(dev)) { 322 if (IS_ERR(dev)) {
354 pr_err("Job Ring Device allocation for transform failed\n"); 323 pr_err("Job Ring Device allocation for transform failed\n");
355 err = PTR_ERR(dev); 324 return PTR_ERR(dev);
356 goto out_put_dev;
357 } 325 }
358 rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); 326 rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
359 if (!rng_ctx) { 327 if (!rng_ctx) {
@@ -364,7 +332,6 @@ static int __init caam_rng_init(void)
364 if (err) 332 if (err)
365 goto free_rng_ctx; 333 goto free_rng_ctx;
366 334
367 put_device(&pdev->dev);
368 dev_info(dev, "registering rng-caam\n"); 335 dev_info(dev, "registering rng-caam\n");
369 return hwrng_register(&caam_rng); 336 return hwrng_register(&caam_rng);
370 337
@@ -372,14 +339,5 @@ free_rng_ctx:
372 kfree(rng_ctx); 339 kfree(rng_ctx);
373free_caam_alloc: 340free_caam_alloc:
374 caam_jr_free(dev); 341 caam_jr_free(dev);
375out_put_dev:
376 put_device(&pdev->dev);
377 return err; 342 return err;
378} 343}
379
380module_init(caam_rng_init);
381module_exit(caam_rng_exit);
382
383MODULE_LICENSE("GPL");
384MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
385MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index fec39c35c877..4e43ca4d3656 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -3,7 +3,7 @@
3 * Controller-level driver, kernel property detection, initialization 3 * Controller-level driver, kernel property detection, initialization
4 * 4 *
5 * Copyright 2008-2012 Freescale Semiconductor, Inc. 5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6 * Copyright 2018 NXP 6 * Copyright 2018-2019 NXP
7 */ 7 */
8 8
9#include <linux/device.h> 9#include <linux/device.h>
@@ -323,8 +323,8 @@ static int caam_remove(struct platform_device *pdev)
323 of_platform_depopulate(ctrldev); 323 of_platform_depopulate(ctrldev);
324 324
325#ifdef CONFIG_CAAM_QI 325#ifdef CONFIG_CAAM_QI
326 if (ctrlpriv->qidev) 326 if (ctrlpriv->qi_init)
327 caam_qi_shutdown(ctrlpriv->qidev); 327 caam_qi_shutdown(ctrldev);
328#endif 328#endif
329 329
330 /* 330 /*
@@ -540,7 +540,8 @@ static int caam_probe(struct platform_device *pdev)
540 ctrlpriv->caam_ipg = clk; 540 ctrlpriv->caam_ipg = clk;
541 541
542 if (!of_machine_is_compatible("fsl,imx7d") && 542 if (!of_machine_is_compatible("fsl,imx7d") &&
543 !of_machine_is_compatible("fsl,imx7s")) { 543 !of_machine_is_compatible("fsl,imx7s") &&
544 !of_machine_is_compatible("fsl,imx7ulp")) {
544 clk = caam_drv_identify_clk(&pdev->dev, "mem"); 545 clk = caam_drv_identify_clk(&pdev->dev, "mem");
545 if (IS_ERR(clk)) { 546 if (IS_ERR(clk)) {
546 ret = PTR_ERR(clk); 547 ret = PTR_ERR(clk);
@@ -562,7 +563,8 @@ static int caam_probe(struct platform_device *pdev)
562 563
563 if (!of_machine_is_compatible("fsl,imx6ul") && 564 if (!of_machine_is_compatible("fsl,imx6ul") &&
564 !of_machine_is_compatible("fsl,imx7d") && 565 !of_machine_is_compatible("fsl,imx7d") &&
565 !of_machine_is_compatible("fsl,imx7s")) { 566 !of_machine_is_compatible("fsl,imx7s") &&
567 !of_machine_is_compatible("fsl,imx7ulp")) {
566 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); 568 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
567 if (IS_ERR(clk)) { 569 if (IS_ERR(clk)) {
568 ret = PTR_ERR(clk); 570 ret = PTR_ERR(clk);
@@ -702,12 +704,7 @@ static int caam_probe(struct platform_device *pdev)
702 } 704 }
703 705
704 ctrlpriv->era = caam_get_era(ctrl); 706 ctrlpriv->era = caam_get_era(ctrl);
705 707 ctrlpriv->domain = iommu_get_domain_for_dev(dev);
706 ret = of_platform_populate(nprop, caam_match, NULL, dev);
707 if (ret) {
708 dev_err(dev, "JR platform devices creation error\n");
709 goto iounmap_ctrl;
710 }
711 708
712#ifdef CONFIG_DEBUG_FS 709#ifdef CONFIG_DEBUG_FS
713 /* 710 /*
@@ -721,19 +718,6 @@ static int caam_probe(struct platform_device *pdev)
721 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); 718 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
722#endif 719#endif
723 720
724 ring = 0;
725 for_each_available_child_of_node(nprop, np)
726 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
727 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
728 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
729 ((__force uint8_t *)ctrl +
730 (ring + JR_BLOCK_NUMBER) *
731 BLOCK_OFFSET
732 );
733 ctrlpriv->total_jobrs++;
734 ring++;
735 }
736
737 /* Check to see if (DPAA 1.x) QI present. If so, enable */ 721 /* Check to see if (DPAA 1.x) QI present. If so, enable */
738 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); 722 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
739 if (ctrlpriv->qi_present && !caam_dpaa2) { 723 if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -752,6 +736,25 @@ static int caam_probe(struct platform_device *pdev)
752#endif 736#endif
753 } 737 }
754 738
739 ret = of_platform_populate(nprop, caam_match, NULL, dev);
740 if (ret) {
741 dev_err(dev, "JR platform devices creation error\n");
742 goto shutdown_qi;
743 }
744
745 ring = 0;
746 for_each_available_child_of_node(nprop, np)
747 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
748 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
749 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
750 ((__force uint8_t *)ctrl +
751 (ring + JR_BLOCK_NUMBER) *
752 BLOCK_OFFSET
753 );
754 ctrlpriv->total_jobrs++;
755 ring++;
756 }
757
755 /* If no QI and no rings specified, quit and go home */ 758 /* If no QI and no rings specified, quit and go home */
756 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { 759 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
757 dev_err(dev, "no queues configured, terminating\n"); 760 dev_err(dev, "no queues configured, terminating\n");
@@ -898,6 +901,11 @@ caam_remove:
898 caam_remove(pdev); 901 caam_remove(pdev);
899 return ret; 902 return ret;
900 903
904shutdown_qi:
905#ifdef CONFIG_CAAM_QI
906 if (ctrlpriv->qi_init)
907 caam_qi_shutdown(dev);
908#endif
901iounmap_ctrl: 909iounmap_ctrl:
902 iounmap(ctrl); 910 iounmap(ctrl);
903disable_caam_emi_slow: 911disable_caam_emi_slow:
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 2980b8ef1fb1..5988a26a2441 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -3,6 +3,7 @@
3 * caam descriptor construction helper functions 3 * caam descriptor construction helper functions
4 * 4 *
5 * Copyright 2008-2012 Freescale Semiconductor, Inc. 5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6 * Copyright 2019 NXP
6 */ 7 */
7 8
8#ifndef DESC_CONSTR_H 9#ifndef DESC_CONSTR_H
@@ -37,6 +38,16 @@
37 38
38extern bool caam_little_end; 39extern bool caam_little_end;
39 40
41/*
42 * HW fetches 4 S/G table entries at a time, irrespective of how many entries
43 * are in the table. It's SW's responsibility to make sure these accesses
44 * do not have side effects.
45 */
46static inline int pad_sg_nents(int sg_nents)
47{
48 return ALIGN(sg_nents, 4);
49}
50
40static inline int desc_len(u32 * const desc) 51static inline int desc_len(u32 * const desc)
41{ 52{
42 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; 53 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 4da844e4b61d..4f0d45865aa2 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -13,7 +13,7 @@
13#ifdef DEBUG 13#ifdef DEBUG
14#include <linux/highmem.h> 14#include <linux/highmem.h>
15 15
16void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 16void caam_dump_sg(const char *prefix_str, int prefix_type,
17 int rowsize, int groupsize, struct scatterlist *sg, 17 int rowsize, int groupsize, struct scatterlist *sg,
18 size_t tlen, bool ascii) 18 size_t tlen, bool ascii)
19{ 19{
@@ -35,15 +35,15 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
35 35
36 buf = it_page + it->offset; 36 buf = it_page + it->offset;
37 len = min_t(size_t, tlen, it->length); 37 len = min_t(size_t, tlen, it->length);
38 print_hex_dump(level, prefix_str, prefix_type, rowsize, 38 print_hex_dump_debug(prefix_str, prefix_type, rowsize,
39 groupsize, buf, len, ascii); 39 groupsize, buf, len, ascii);
40 tlen -= len; 40 tlen -= len;
41 41
42 kunmap_atomic(it_page); 42 kunmap_atomic(it_page);
43 } 43 }
44} 44}
45#else 45#else
46void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 46void caam_dump_sg(const char *prefix_str, int prefix_type,
47 int rowsize, int groupsize, struct scatterlist *sg, 47 int rowsize, int groupsize, struct scatterlist *sg,
48 size_t tlen, bool ascii) 48 size_t tlen, bool ascii)
49{} 49{}
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 8c6b83e02a70..d9726e66edbf 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -17,7 +17,7 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
17#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) 17#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
18#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) 18#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
19 19
20void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 20void caam_dump_sg(const char *prefix_str, int prefix_type,
21 int rowsize, int groupsize, struct scatterlist *sg, 21 int rowsize, int groupsize, struct scatterlist *sg,
22 size_t tlen, bool ascii); 22 size_t tlen, bool ascii);
23 23
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 3392615dc91b..6af84bbc612c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -4,7 +4,7 @@
4 * Private/internal definitions between modules 4 * Private/internal definitions between modules
5 * 5 *
6 * Copyright 2008-2011 Freescale Semiconductor, Inc. 6 * Copyright 2008-2011 Freescale Semiconductor, Inc.
7 * 7 * Copyright 2019 NXP
8 */ 8 */
9 9
10#ifndef INTERN_H 10#ifndef INTERN_H
@@ -63,10 +63,6 @@ struct caam_drv_private_jr {
63 * Driver-private storage for a single CAAM block instance 63 * Driver-private storage for a single CAAM block instance
64 */ 64 */
65struct caam_drv_private { 65struct caam_drv_private {
66#ifdef CONFIG_CAAM_QI
67 struct device *qidev;
68#endif
69
70 /* Physical-presence section */ 66 /* Physical-presence section */
71 struct caam_ctrl __iomem *ctrl; /* controller region */ 67 struct caam_ctrl __iomem *ctrl; /* controller region */
72 struct caam_deco __iomem *deco; /* DECO/CCB views */ 68 struct caam_deco __iomem *deco; /* DECO/CCB views */
@@ -74,12 +70,17 @@ struct caam_drv_private {
74 struct caam_queue_if __iomem *qi; /* QI control region */ 70 struct caam_queue_if __iomem *qi; /* QI control region */
75 struct caam_job_ring __iomem *jr[4]; /* JobR's register space */ 71 struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
76 72
73 struct iommu_domain *domain;
74
77 /* 75 /*
78 * Detected geometry block. Filled in from device tree if powerpc, 76 * Detected geometry block. Filled in from device tree if powerpc,
79 * or from register-based version detection code 77 * or from register-based version detection code
80 */ 78 */
81 u8 total_jobrs; /* Total Job Rings in device */ 79 u8 total_jobrs; /* Total Job Rings in device */
82 u8 qi_present; /* Nonzero if QI present in device */ 80 u8 qi_present; /* Nonzero if QI present in device */
81#ifdef CONFIG_CAAM_QI
82 u8 qi_init; /* Nonzero if QI has been initialized */
83#endif
83 u8 mc_en; /* Nonzero if MC f/w is active */ 84 u8 mc_en; /* Nonzero if MC f/w is active */
84 int secvio_irq; /* Security violation interrupt number */ 85 int secvio_irq; /* Security violation interrupt number */
85 int virt_en; /* Virtualization enabled in CAAM */ 86 int virt_en; /* Virtualization enabled in CAAM */
@@ -107,8 +108,95 @@ struct caam_drv_private {
107#endif 108#endif
108}; 109};
109 110
110void caam_jr_algapi_init(struct device *dev); 111#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
111void caam_jr_algapi_remove(struct device *dev); 112
113int caam_algapi_init(struct device *dev);
114void caam_algapi_exit(void);
115
116#else
117
118static inline int caam_algapi_init(struct device *dev)
119{
120 return 0;
121}
122
123static inline void caam_algapi_exit(void)
124{
125}
126
127#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
128
129#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
130
131int caam_algapi_hash_init(struct device *dev);
132void caam_algapi_hash_exit(void);
133
134#else
135
136static inline int caam_algapi_hash_init(struct device *dev)
137{
138 return 0;
139}
140
141static inline void caam_algapi_hash_exit(void)
142{
143}
144
145#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
146
147#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
148
149int caam_pkc_init(struct device *dev);
150void caam_pkc_exit(void);
151
152#else
153
154static inline int caam_pkc_init(struct device *dev)
155{
156 return 0;
157}
158
159static inline void caam_pkc_exit(void)
160{
161}
162
163#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
164
165#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
166
167int caam_rng_init(struct device *dev);
168void caam_rng_exit(void);
169
170#else
171
172static inline int caam_rng_init(struct device *dev)
173{
174 return 0;
175}
176
177static inline void caam_rng_exit(void)
178{
179}
180
181#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
182
183#ifdef CONFIG_CAAM_QI
184
185int caam_qi_algapi_init(struct device *dev);
186void caam_qi_algapi_exit(void);
187
188#else
189
190static inline int caam_qi_algapi_init(struct device *dev)
191{
192 return 0;
193}
194
195static inline void caam_qi_algapi_exit(void)
196{
197}
198
199#endif /* CONFIG_CAAM_QI */
112 200
113#ifdef CONFIG_DEBUG_FS 201#ifdef CONFIG_DEBUG_FS
114static int caam_debugfs_u64_get(void *data, u64 *val) 202static int caam_debugfs_u64_get(void *data, u64 *val)
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1de2562d0982..cea811fed320 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -4,6 +4,7 @@
4 * JobR backend functionality 4 * JobR backend functionality
5 * 5 *
6 * Copyright 2008-2012 Freescale Semiconductor, Inc. 6 * Copyright 2008-2012 Freescale Semiconductor, Inc.
7 * Copyright 2019 NXP
7 */ 8 */
8 9
9#include <linux/of_irq.h> 10#include <linux/of_irq.h>
@@ -23,6 +24,43 @@ struct jr_driver_data {
23} ____cacheline_aligned; 24} ____cacheline_aligned;
24 25
25static struct jr_driver_data driver_data; 26static struct jr_driver_data driver_data;
27static DEFINE_MUTEX(algs_lock);
28static unsigned int active_devs;
29
30static void register_algs(struct device *dev)
31{
32 mutex_lock(&algs_lock);
33
34 if (++active_devs != 1)
35 goto algs_unlock;
36
37 caam_algapi_init(dev);
38 caam_algapi_hash_init(dev);
39 caam_pkc_init(dev);
40 caam_rng_init(dev);
41 caam_qi_algapi_init(dev);
42
43algs_unlock:
44 mutex_unlock(&algs_lock);
45}
46
47static void unregister_algs(void)
48{
49 mutex_lock(&algs_lock);
50
51 if (--active_devs != 0)
52 goto algs_unlock;
53
54 caam_qi_algapi_exit();
55
56 caam_rng_exit();
57 caam_pkc_exit();
58 caam_algapi_hash_exit();
59 caam_algapi_exit();
60
61algs_unlock:
62 mutex_unlock(&algs_lock);
63}
26 64
27static int caam_reset_hw_jr(struct device *dev) 65static int caam_reset_hw_jr(struct device *dev)
28{ 66{
@@ -109,6 +147,9 @@ static int caam_jr_remove(struct platform_device *pdev)
109 return -EBUSY; 147 return -EBUSY;
110 } 148 }
111 149
150 /* Unregister JR-based RNG & crypto algorithms */
151 unregister_algs();
152
112 /* Remove the node from Physical JobR list maintained by driver */ 153 /* Remove the node from Physical JobR list maintained by driver */
113 spin_lock(&driver_data.jr_alloc_lock); 154 spin_lock(&driver_data.jr_alloc_lock);
114 list_del(&jrpriv->list_node); 155 list_del(&jrpriv->list_node);
@@ -541,6 +582,8 @@ static int caam_jr_probe(struct platform_device *pdev)
541 582
542 atomic_set(&jrpriv->tfm_count, 0); 583 atomic_set(&jrpriv->tfm_count, 0);
543 584
585 register_algs(jrdev->parent);
586
544 return 0; 587 return 0;
545} 588}
546 589
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8d0713fae6ac..48dd3536060d 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -16,9 +16,7 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
16{ 16{
17 struct split_key_result *res = context; 17 struct split_key_result *res = context;
18 18
19#ifdef DEBUG 19 dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
21#endif
22 20
23 if (err) 21 if (err)
24 caam_jr_strstatus(dev, err); 22 caam_jr_strstatus(dev, err);
@@ -55,12 +53,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
55 adata->keylen_pad = split_key_pad_len(adata->algtype & 53 adata->keylen_pad = split_key_pad_len(adata->algtype &
56 OP_ALG_ALGSEL_MASK); 54 OP_ALG_ALGSEL_MASK);
57 55
58#ifdef DEBUG 56 dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
59 dev_err(jrdev, "split keylen %d split keylen padded %d\n",
60 adata->keylen, adata->keylen_pad); 57 adata->keylen, adata->keylen_pad);
61 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 58 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
62 DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); 59 DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
63#endif
64 60
65 if (adata->keylen_pad > max_keylen) 61 if (adata->keylen_pad > max_keylen)
66 return -EINVAL; 62 return -EINVAL;
@@ -102,10 +98,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
102 append_fifo_store(desc, dma_addr, adata->keylen, 98 append_fifo_store(desc, dma_addr, adata->keylen,
103 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); 99 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
104 100
105#ifdef DEBUG 101 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
106 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 102 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
107 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 103 1);
108#endif
109 104
110 result.err = 0; 105 result.err = 0;
111 init_completion(&result.completion); 106 init_completion(&result.completion);
@@ -115,11 +110,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
115 /* in progress */ 110 /* in progress */
116 wait_for_completion(&result.completion); 111 wait_for_completion(&result.completion);
117 ret = result.err; 112 ret = result.err;
118#ifdef DEBUG 113
119 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 114 print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
120 DUMP_PREFIX_ADDRESS, 16, 4, key_out, 115 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
121 adata->keylen_pad, 1); 116 adata->keylen_pad, 1);
122#endif
123 } 117 }
124 118
125 dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); 119 dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 9f08f84cca59..0fe618e3804a 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -4,7 +4,7 @@
4 * Queue Interface backend functionality 4 * Queue Interface backend functionality
5 * 5 *
6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
7 * Copyright 2016-2017 NXP 7 * Copyright 2016-2017, 2019 NXP
8 */ 8 */
9 9
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
@@ -18,6 +18,7 @@
18#include "desc_constr.h" 18#include "desc_constr.h"
19 19
20#define PREHDR_RSLS_SHIFT 31 20#define PREHDR_RSLS_SHIFT 31
21#define PREHDR_ABS BIT(25)
21 22
22/* 23/*
23 * Use a reasonable backlog of frames (per CPU) as congestion threshold, 24 * Use a reasonable backlog of frames (per CPU) as congestion threshold,
@@ -58,11 +59,9 @@ static DEFINE_PER_CPU(int, last_cpu);
58/* 59/*
59 * caam_qi_priv - CAAM QI backend private params 60 * caam_qi_priv - CAAM QI backend private params
60 * @cgr: QMan congestion group 61 * @cgr: QMan congestion group
61 * @qi_pdev: platform device for QI backend
62 */ 62 */
63struct caam_qi_priv { 63struct caam_qi_priv {
64 struct qman_cgr cgr; 64 struct qman_cgr cgr;
65 struct platform_device *qi_pdev;
66}; 65};
67 66
68static struct caam_qi_priv qipriv ____cacheline_aligned; 67static struct caam_qi_priv qipriv ____cacheline_aligned;
@@ -95,6 +94,16 @@ static u64 times_congested;
95 */ 94 */
96static struct kmem_cache *qi_cache; 95static struct kmem_cache *qi_cache;
97 96
97static void *caam_iova_to_virt(struct iommu_domain *domain,
98 dma_addr_t iova_addr)
99{
100 phys_addr_t phys_addr;
101
102 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
103
104 return phys_to_virt(phys_addr);
105}
106
98int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) 107int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
99{ 108{
100 struct qm_fd fd; 109 struct qm_fd fd;
@@ -135,6 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
135 const struct qm_fd *fd; 144 const struct qm_fd *fd;
136 struct caam_drv_req *drv_req; 145 struct caam_drv_req *drv_req;
137 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); 146 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
147 struct caam_drv_private *priv = dev_get_drvdata(qidev);
138 148
139 fd = &msg->ern.fd; 149 fd = &msg->ern.fd;
140 150
@@ -143,7 +153,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
143 return; 153 return;
144 } 154 }
145 155
146 drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); 156 drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
147 if (!drv_req) { 157 if (!drv_req) {
148 dev_err(qidev, 158 dev_err(qidev,
149 "Can't find original request for CAAM response\n"); 159 "Can't find original request for CAAM response\n");
@@ -346,6 +356,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
346 */ 356 */
347 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | 357 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
348 num_words); 358 num_words);
359 drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
349 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); 360 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
350 dma_sync_single_for_device(qidev, drv_ctx->context_a, 361 dma_sync_single_for_device(qidev, drv_ctx->context_a,
351 sizeof(drv_ctx->sh_desc) + 362 sizeof(drv_ctx->sh_desc) +
@@ -401,6 +412,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
401 */ 412 */
402 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | 413 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
403 num_words); 414 num_words);
415 drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
404 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); 416 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
405 size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); 417 size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
406 hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, 418 hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
@@ -488,7 +500,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
488void caam_qi_shutdown(struct device *qidev) 500void caam_qi_shutdown(struct device *qidev)
489{ 501{
490 int i; 502 int i;
491 struct caam_qi_priv *priv = dev_get_drvdata(qidev); 503 struct caam_qi_priv *priv = &qipriv;
492 const cpumask_t *cpus = qman_affine_cpus(); 504 const cpumask_t *cpus = qman_affine_cpus();
493 505
494 for_each_cpu(i, cpus) { 506 for_each_cpu(i, cpus) {
@@ -506,8 +518,6 @@ void caam_qi_shutdown(struct device *qidev)
506 qman_release_cgrid(priv->cgr.cgrid); 518 qman_release_cgrid(priv->cgr.cgrid);
507 519
508 kmem_cache_destroy(qi_cache); 520 kmem_cache_destroy(qi_cache);
509
510 platform_device_unregister(priv->qi_pdev);
511} 521}
512 522
513static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) 523static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -550,6 +560,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
550 struct caam_drv_req *drv_req; 560 struct caam_drv_req *drv_req;
551 const struct qm_fd *fd; 561 const struct qm_fd *fd;
552 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); 562 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
563 struct caam_drv_private *priv = dev_get_drvdata(qidev);
553 u32 status; 564 u32 status;
554 565
555 if (caam_qi_napi_schedule(p, caam_napi)) 566 if (caam_qi_napi_schedule(p, caam_napi))
@@ -572,7 +583,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
572 return qman_cb_dqrr_consume; 583 return qman_cb_dqrr_consume;
573 } 584 }
574 585
575 drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); 586 drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
576 if (unlikely(!drv_req)) { 587 if (unlikely(!drv_req)) {
577 dev_err(qidev, 588 dev_err(qidev,
578 "Can't find original request for caam response\n"); 589 "Can't find original request for caam response\n");
@@ -692,33 +703,17 @@ static void free_rsp_fqs(void)
692int caam_qi_init(struct platform_device *caam_pdev) 703int caam_qi_init(struct platform_device *caam_pdev)
693{ 704{
694 int err, i; 705 int err, i;
695 struct platform_device *qi_pdev;
696 struct device *ctrldev = &caam_pdev->dev, *qidev; 706 struct device *ctrldev = &caam_pdev->dev, *qidev;
697 struct caam_drv_private *ctrlpriv; 707 struct caam_drv_private *ctrlpriv;
698 const cpumask_t *cpus = qman_affine_cpus(); 708 const cpumask_t *cpus = qman_affine_cpus();
699 static struct platform_device_info qi_pdev_info = {
700 .name = "caam_qi",
701 .id = PLATFORM_DEVID_NONE
702 };
703
704 qi_pdev_info.parent = ctrldev;
705 qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
706 qi_pdev = platform_device_register_full(&qi_pdev_info);
707 if (IS_ERR(qi_pdev))
708 return PTR_ERR(qi_pdev);
709 set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
710 709
711 ctrlpriv = dev_get_drvdata(ctrldev); 710 ctrlpriv = dev_get_drvdata(ctrldev);
712 qidev = &qi_pdev->dev; 711 qidev = ctrldev;
713
714 qipriv.qi_pdev = qi_pdev;
715 dev_set_drvdata(qidev, &qipriv);
716 712
717 /* Initialize the congestion detection */ 713 /* Initialize the congestion detection */
718 err = init_cgr(qidev); 714 err = init_cgr(qidev);
719 if (err) { 715 if (err) {
720 dev_err(qidev, "CGR initialization failed: %d\n", err); 716 dev_err(qidev, "CGR initialization failed: %d\n", err);
721 platform_device_unregister(qi_pdev);
722 return err; 717 return err;
723 } 718 }
724 719
@@ -727,7 +722,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
727 if (err) { 722 if (err) {
728 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); 723 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
729 free_rsp_fqs(); 724 free_rsp_fqs();
730 platform_device_unregister(qi_pdev);
731 return err; 725 return err;
732 } 726 }
733 727
@@ -750,15 +744,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
750 napi_enable(irqtask); 744 napi_enable(irqtask);
751 } 745 }
752 746
753 /* Hook up QI device to parent controlling caam device */
754 ctrlpriv->qidev = qidev;
755
756 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, 747 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
757 SLAB_CACHE_DMA, NULL); 748 SLAB_CACHE_DMA, NULL);
758 if (!qi_cache) { 749 if (!qi_cache) {
759 dev_err(qidev, "Can't allocate CAAM cache\n"); 750 dev_err(qidev, "Can't allocate CAAM cache\n");
760 free_rsp_fqs(); 751 free_rsp_fqs();
761 platform_device_unregister(qi_pdev);
762 return -ENOMEM; 752 return -ENOMEM;
763 } 753 }
764 754
@@ -766,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
766 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, 756 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
767 &times_congested, &caam_fops_u64_ro); 757 &times_congested, &caam_fops_u64_ro);
768#endif 758#endif
759
760 ctrlpriv->qi_init = 1;
769 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); 761 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
770 return 0; 762 return 0;
771} 763}
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index b3e1aaaeffea..d56cc7efbc13 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
54 * but does not have final bit; instead, returns last entry 54 * but does not have final bit; instead, returns last entry
55 */ 55 */
56static inline struct qm_sg_entry * 56static inline struct qm_sg_entry *
57sg_to_qm_sg(struct scatterlist *sg, int sg_count, 57sg_to_qm_sg(struct scatterlist *sg, int len,
58 struct qm_sg_entry *qm_sg_ptr, u16 offset) 58 struct qm_sg_entry *qm_sg_ptr, u16 offset)
59{ 59{
60 while (sg_count && sg) { 60 int ent_len;
61 dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), 61
62 sg_dma_len(sg), offset); 62 while (len) {
63 ent_len = min_t(int, sg_dma_len(sg), len);
64
65 dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
66 offset);
63 qm_sg_ptr++; 67 qm_sg_ptr++;
64 sg = sg_next(sg); 68 sg = sg_next(sg);
65 sg_count--; 69 len -= ent_len;
66 } 70 }
67 return qm_sg_ptr - 1; 71 return qm_sg_ptr - 1;
68} 72}
@@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
71 * convert scatterlist to h/w link table format 75 * convert scatterlist to h/w link table format
72 * scatterlist must have been previously dma mapped 76 * scatterlist must have been previously dma mapped
73 */ 77 */
74static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, 78static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
75 struct qm_sg_entry *qm_sg_ptr, u16 offset) 79 struct qm_sg_entry *qm_sg_ptr, u16 offset)
76{ 80{
77 qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); 81 qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
78 qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); 82 qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
79} 83}
80 84
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index c9378402a5f8..b8b737d2b0ea 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
25 * but does not have final bit; instead, returns last entry 25 * but does not have final bit; instead, returns last entry
26 */ 26 */
27static inline struct dpaa2_sg_entry * 27static inline struct dpaa2_sg_entry *
28sg_to_qm_sg(struct scatterlist *sg, int sg_count, 28sg_to_qm_sg(struct scatterlist *sg, int len,
29 struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) 29 struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
30{ 30{
31 while (sg_count && sg) { 31 int ent_len;
32 dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), 32
33 sg_dma_len(sg), offset); 33 while (len) {
34 ent_len = min_t(int, sg_dma_len(sg), len);
35
36 dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
37 offset);
34 qm_sg_ptr++; 38 qm_sg_ptr++;
35 sg = sg_next(sg); 39 sg = sg_next(sg);
36 sg_count--; 40 len -= ent_len;
37 } 41 }
38 return qm_sg_ptr - 1; 42 return qm_sg_ptr - 1;
39} 43}
@@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
42 * convert scatterlist to h/w link table format 46 * convert scatterlist to h/w link table format
43 * scatterlist must have been previously dma mapped 47 * scatterlist must have been previously dma mapped
44 */ 48 */
45static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, 49static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
46 struct dpaa2_sg_entry *qm_sg_ptr, 50 struct dpaa2_sg_entry *qm_sg_ptr,
47 u16 offset) 51 u16 offset)
48{ 52{
49 qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); 53 qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
50 dpaa2_sg_set_final(qm_sg_ptr, true); 54 dpaa2_sg_set_final(qm_sg_ptr, true);
51} 55}
52 56
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index dbfa9fce33e0..07e1ee99273b 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -35,11 +35,9 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
35 sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & 35 sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
36 SEC4_SG_OFFSET_MASK); 36 SEC4_SG_OFFSET_MASK);
37 } 37 }
38#ifdef DEBUG 38
39 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", 39 print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4,
40 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, 40 sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1);
41 sizeof(struct sec4_sg_entry), 1);
42#endif
43} 41}
44 42
45/* 43/*
@@ -47,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
47 * but does not have final bit; instead, returns last entry 45 * but does not have final bit; instead, returns last entry
48 */ 46 */
49static inline struct sec4_sg_entry * 47static inline struct sec4_sg_entry *
50sg_to_sec4_sg(struct scatterlist *sg, int sg_count, 48sg_to_sec4_sg(struct scatterlist *sg, int len,
51 struct sec4_sg_entry *sec4_sg_ptr, u16 offset) 49 struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
52{ 50{
53 while (sg_count) { 51 int ent_len;
54 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), 52
55 sg_dma_len(sg), offset); 53 while (len) {
54 ent_len = min_t(int, sg_dma_len(sg), len);
55
56 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len,
57 offset);
56 sec4_sg_ptr++; 58 sec4_sg_ptr++;
57 sg = sg_next(sg); 59 sg = sg_next(sg);
58 sg_count--; 60 len -= ent_len;
59 } 61 }
60 return sec4_sg_ptr - 1; 62 return sec4_sg_ptr - 1;
61} 63}
@@ -72,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
72 * convert scatterlist to h/w link table format 74 * convert scatterlist to h/w link table format
73 * scatterlist must have been previously dma mapped 75 * scatterlist must have been previously dma mapped
74 */ 76 */
75static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, 77static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len,
76 struct sec4_sg_entry *sec4_sg_ptr, 78 struct sec4_sg_entry *sec4_sg_ptr,
77 u16 offset) 79 u16 offset)
78{ 80{
79 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); 81 sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset);
80 sg_to_sec4_set_last(sec4_sg_ptr); 82 sg_to_sec4_set_last(sec4_sg_ptr);
81} 83}
82 84
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index e9f4704494fb..ff3cb1f8f2b6 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -7,7 +7,6 @@
7#include <crypto/aes.h> 7#include <crypto/aes.h>
8#include <crypto/algapi.h> 8#include <crypto/algapi.h>
9#include <crypto/authenc.h> 9#include <crypto/authenc.h>
10#include <crypto/crypto_wq.h>
11#include <crypto/des.h> 10#include <crypto/des.h>
12#include <crypto/xts.h> 11#include <crypto/xts.h>
13#include <linux/crypto.h> 12#include <linux/crypto.h>
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
index f177b79bbab0..09c4cf2513fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NITROX_DEBUGFS_H 2#ifndef __NITROX_DEBUGFS_H
3#define __NITROX_DEBUGFS_H 3#define __NITROX_DEBUGFS_H
4 4
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.h b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
index 5008399775a9..7c93d0282174 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NITROX_MBX_H 2#ifndef __NITROX_MBX_H
3#define __NITROX_MBX_H 3#define __NITROX_MBX_H
4 4
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index ea3d6de55ff6..58c6dddfc5e1 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -2,7 +2,7 @@
2/* 2/*
3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support 3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support
4 * 4 *
5 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
6 * 6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */ 8 */
@@ -76,8 +76,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
76 return -EINVAL; 76 return -EINVAL;
77 77
78 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || 78 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
79 (ctx->u.aes.mode == CCP_AES_MODE_CBC) || 79 (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
80 (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
81 (req->nbytes & (AES_BLOCK_SIZE - 1))) 80 (req->nbytes & (AES_BLOCK_SIZE - 1)))
82 return -EINVAL; 81 return -EINVAL;
83 82
@@ -288,7 +287,7 @@ static struct ccp_aes_def aes_algs[] = {
288 .version = CCP_VERSION(3, 0), 287 .version = CCP_VERSION(3, 0),
289 .name = "cfb(aes)", 288 .name = "cfb(aes)",
290 .driver_name = "cfb-aes-ccp", 289 .driver_name = "cfb-aes-ccp",
291 .blocksize = AES_BLOCK_SIZE, 290 .blocksize = 1,
292 .ivsize = AES_BLOCK_SIZE, 291 .ivsize = AES_BLOCK_SIZE,
293 .alg_defaults = &ccp_aes_defaults, 292 .alg_defaults = &ccp_aes_defaults,
294 }, 293 },
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cc3e96c4f5fb..f79eede71c62 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -32,56 +32,62 @@ struct ccp_tasklet_data {
32}; 32};
33 33
34/* Human-readable error strings */ 34/* Human-readable error strings */
35#define CCP_MAX_ERROR_CODE 64
35static char *ccp_error_codes[] = { 36static char *ccp_error_codes[] = {
36 "", 37 "",
37 "ERR 01: ILLEGAL_ENGINE", 38 "ILLEGAL_ENGINE",
38 "ERR 02: ILLEGAL_KEY_ID", 39 "ILLEGAL_KEY_ID",
39 "ERR 03: ILLEGAL_FUNCTION_TYPE", 40 "ILLEGAL_FUNCTION_TYPE",
40 "ERR 04: ILLEGAL_FUNCTION_MODE", 41 "ILLEGAL_FUNCTION_MODE",
41 "ERR 05: ILLEGAL_FUNCTION_ENCRYPT", 42 "ILLEGAL_FUNCTION_ENCRYPT",
42 "ERR 06: ILLEGAL_FUNCTION_SIZE", 43 "ILLEGAL_FUNCTION_SIZE",
43 "ERR 07: Zlib_MISSING_INIT_EOM", 44 "Zlib_MISSING_INIT_EOM",
44 "ERR 08: ILLEGAL_FUNCTION_RSVD", 45 "ILLEGAL_FUNCTION_RSVD",
45 "ERR 09: ILLEGAL_BUFFER_LENGTH", 46 "ILLEGAL_BUFFER_LENGTH",
46 "ERR 10: VLSB_FAULT", 47 "VLSB_FAULT",
47 "ERR 11: ILLEGAL_MEM_ADDR", 48 "ILLEGAL_MEM_ADDR",
48 "ERR 12: ILLEGAL_MEM_SEL", 49 "ILLEGAL_MEM_SEL",
49 "ERR 13: ILLEGAL_CONTEXT_ID", 50 "ILLEGAL_CONTEXT_ID",
50 "ERR 14: ILLEGAL_KEY_ADDR", 51 "ILLEGAL_KEY_ADDR",
51 "ERR 15: 0xF Reserved", 52 "0xF Reserved",
52 "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE", 53 "Zlib_ILLEGAL_MULTI_QUEUE",
53 "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE", 54 "Zlib_ILLEGAL_JOBID_CHANGE",
54 "ERR 18: CMD_TIMEOUT", 55 "CMD_TIMEOUT",
55 "ERR 19: IDMA0_AXI_SLVERR", 56 "IDMA0_AXI_SLVERR",
56 "ERR 20: IDMA0_AXI_DECERR", 57 "IDMA0_AXI_DECERR",
57 "ERR 21: 0x15 Reserved", 58 "0x15 Reserved",
58 "ERR 22: IDMA1_AXI_SLAVE_FAULT", 59 "IDMA1_AXI_SLAVE_FAULT",
59 "ERR 23: IDMA1_AIXI_DECERR", 60 "IDMA1_AIXI_DECERR",
60 "ERR 24: 0x18 Reserved", 61 "0x18 Reserved",
61 "ERR 25: ZLIBVHB_AXI_SLVERR", 62 "ZLIBVHB_AXI_SLVERR",
62 "ERR 26: ZLIBVHB_AXI_DECERR", 63 "ZLIBVHB_AXI_DECERR",
63 "ERR 27: 0x1B Reserved", 64 "0x1B Reserved",
64 "ERR 27: ZLIB_UNEXPECTED_EOM", 65 "ZLIB_UNEXPECTED_EOM",
65 "ERR 27: ZLIB_EXTRA_DATA", 66 "ZLIB_EXTRA_DATA",
66 "ERR 30: ZLIB_BTYPE", 67 "ZLIB_BTYPE",
67 "ERR 31: ZLIB_UNDEFINED_SYMBOL", 68 "ZLIB_UNDEFINED_SYMBOL",
68 "ERR 32: ZLIB_UNDEFINED_DISTANCE_S", 69 "ZLIB_UNDEFINED_DISTANCE_S",
69 "ERR 33: ZLIB_CODE_LENGTH_SYMBOL", 70 "ZLIB_CODE_LENGTH_SYMBOL",
70 "ERR 34: ZLIB _VHB_ILLEGAL_FETCH", 71 "ZLIB _VHB_ILLEGAL_FETCH",
71 "ERR 35: ZLIB_UNCOMPRESSED_LEN", 72 "ZLIB_UNCOMPRESSED_LEN",
72 "ERR 36: ZLIB_LIMIT_REACHED", 73 "ZLIB_LIMIT_REACHED",
73 "ERR 37: ZLIB_CHECKSUM_MISMATCH0", 74 "ZLIB_CHECKSUM_MISMATCH0",
74 "ERR 38: ODMA0_AXI_SLVERR", 75 "ODMA0_AXI_SLVERR",
75 "ERR 39: ODMA0_AXI_DECERR", 76 "ODMA0_AXI_DECERR",
76 "ERR 40: 0x28 Reserved", 77 "0x28 Reserved",
77 "ERR 41: ODMA1_AXI_SLVERR", 78 "ODMA1_AXI_SLVERR",
78 "ERR 42: ODMA1_AXI_DECERR", 79 "ODMA1_AXI_DECERR",
79 "ERR 43: LSB_PARITY_ERR",
80}; 80};
81 81
82void ccp_log_error(struct ccp_device *d, int e) 82void ccp_log_error(struct ccp_device *d, unsigned int e)
83{ 83{
84 dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e); 84 if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
85 return;
86
87 if (e < ARRAY_SIZE(ccp_error_codes))
88 dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
89 else
90 dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
85} 91}
86 92
87/* List of CCPs, CCP count, read-write access lock, and access functions 93/* List of CCPs, CCP count, read-write access lock, and access functions
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 90523a069bff..5e624920fd99 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -629,7 +629,7 @@ struct ccp5_desc {
629void ccp_add_device(struct ccp_device *ccp); 629void ccp_add_device(struct ccp_device *ccp);
630void ccp_del_device(struct ccp_device *ccp); 630void ccp_del_device(struct ccp_device *ccp);
631 631
632extern void ccp_log_error(struct ccp_device *, int); 632extern void ccp_log_error(struct ccp_device *, unsigned int);
633 633
634struct ccp_device *ccp_alloc_struct(struct sp_device *sp); 634struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
635bool ccp_queues_suspended(struct ccp_device *ccp); 635bool ccp_queues_suspended(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index db8de89d990f..866b2e05ca77 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -2,7 +2,7 @@
2/* 2/*
3 * AMD Cryptographic Coprocessor (CCP) driver 3 * AMD Cryptographic Coprocessor (CCP) driver
4 * 4 *
5 * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
6 * 6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com>
@@ -890,8 +890,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
890 return -EINVAL; 890 return -EINVAL;
891 891
892 if (((aes->mode == CCP_AES_MODE_ECB) || 892 if (((aes->mode == CCP_AES_MODE_ECB) ||
893 (aes->mode == CCP_AES_MODE_CBC) || 893 (aes->mode == CCP_AES_MODE_CBC)) &&
894 (aes->mode == CCP_AES_MODE_CFB)) &&
895 (aes->src_len & (AES_BLOCK_SIZE - 1))) 894 (aes->src_len & (AES_BLOCK_SIZE - 1)))
896 return -EINVAL; 895 return -EINVAL;
897 896
@@ -1264,6 +1263,9 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1264 int ret; 1263 int ret;
1265 1264
1266 /* Error checks */ 1265 /* Error checks */
1266 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
1267 return -EINVAL;
1268
1267 if (!cmd_q->ccp->vdata->perform->des3) 1269 if (!cmd_q->ccp->vdata->perform->des3)
1268 return -EINVAL; 1270 return -EINVAL;
1269 1271
@@ -1346,8 +1348,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1346 * passthru option to convert from big endian to little endian. 1348 * passthru option to convert from big endian to little endian.
1347 */ 1349 */
1348 if (des3->mode != CCP_DES3_MODE_ECB) { 1350 if (des3->mode != CCP_DES3_MODE_ECB) {
1349 u32 load_mode;
1350
1351 op.sb_ctx = cmd_q->sb_ctx; 1351 op.sb_ctx = cmd_q->sb_ctx;
1352 1352
1353 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1353 ret = ccp_init_dm_workarea(&ctx, cmd_q,
@@ -1363,12 +1363,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1363 if (ret) 1363 if (ret)
1364 goto e_ctx; 1364 goto e_ctx;
1365 1365
1366 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1367 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
1368 else
1369 load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
1370 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1366 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1371 load_mode); 1367 CCP_PASSTHRU_BYTESWAP_256BIT);
1372 if (ret) { 1368 if (ret) {
1373 cmd->engine_error = cmd_q->cmd_error; 1369 cmd->engine_error = cmd_q->cmd_error;
1374 goto e_ctx; 1370 goto e_ctx;
@@ -1430,10 +1426,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1430 } 1426 }
1431 1427
1432 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ 1428 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1433 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1434 dm_offset = CCP_SB_BYTES - des3->iv_len;
1435 else
1436 dm_offset = 0;
1437 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, 1429 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1438 DES3_EDE_BLOCK_SIZE); 1430 DES3_EDE_BLOCK_SIZE);
1439 } 1431 }
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 86ac7b443355..980aa04b655b 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -48,6 +48,7 @@ struct cc_hw_data {
48}; 48};
49 49
50#define CC_NUM_IDRS 4 50#define CC_NUM_IDRS 4
51#define CC_HW_RESET_LOOP_COUNT 10
51 52
52/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */ 53/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
53static const u32 pidr_0124_offsets[CC_NUM_IDRS] = { 54static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
@@ -133,6 +134,9 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
133 u32 imr; 134 u32 imr;
134 135
135 /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ 136 /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
137 /* if driver suspended return, probebly shared interrupt */
138 if (cc_pm_is_dev_suspended(dev))
139 return IRQ_NONE;
136 140
137 /* read the interrupt status */ 141 /* read the interrupt status */
138 irr = cc_ioread(drvdata, CC_REG(HOST_IRR)); 142 irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -188,6 +192,31 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
188 return IRQ_HANDLED; 192 return IRQ_HANDLED;
189} 193}
190 194
195bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
196{
197 unsigned int val;
198 unsigned int i;
199
200 /* 712/710/63 has no reset completion indication, always return true */
201 if (drvdata->hw_rev <= CC_HW_REV_712)
202 return true;
203
204 for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
205 /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
206 * completed and device is fully functional
207 */
208 val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
209 if (val & CC_NVM_IS_IDLE_MASK) {
210 /* hw indicate reset completed */
211 return true;
212 }
213 /* allow scheduling other process on the processor */
214 schedule();
215 }
216 /* reset not completed */
217 return false;
218}
219
191int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe) 220int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
192{ 221{
193 unsigned int val, cache_params; 222 unsigned int val, cache_params;
@@ -315,15 +344,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
315 return new_drvdata->irq; 344 return new_drvdata->irq;
316 } 345 }
317 346
318 rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
319 IRQF_SHARED, "ccree", new_drvdata);
320 if (rc) {
321 dev_err(dev, "Could not register to interrupt %d\n",
322 new_drvdata->irq);
323 return rc;
324 }
325 dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
326
327 init_completion(&new_drvdata->hw_queue_avail); 347 init_completion(&new_drvdata->hw_queue_avail);
328 348
329 if (!plat_dev->dev.dma_mask) 349 if (!plat_dev->dev.dma_mask)
@@ -352,6 +372,11 @@ static int init_cc_resources(struct platform_device *plat_dev)
352 372
353 new_drvdata->sec_disabled = cc_sec_disable; 373 new_drvdata->sec_disabled = cc_sec_disable;
354 374
375 /* wait for Crytpcell reset completion */
376 if (!cc_wait_for_reset_completion(new_drvdata)) {
377 dev_err(dev, "Cryptocell reset not completed");
378 }
379
355 if (hw_rev->rev <= CC_HW_REV_712) { 380 if (hw_rev->rev <= CC_HW_REV_712) {
356 /* Verify correct mapping */ 381 /* Verify correct mapping */
357 val = cc_ioread(new_drvdata, new_drvdata->sig_offset); 382 val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
@@ -383,6 +408,24 @@ static int init_cc_resources(struct platform_device *plat_dev)
383 } 408 }
384 sig_cidr = val; 409 sig_cidr = val;
385 410
411 /* Check HW engine configuration */
412 val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
413 switch (val) {
414 case CC_PINS_FULL:
415 /* This is fine */
416 break;
417 case CC_PINS_SLIM:
418 if (new_drvdata->std_bodies & CC_STD_NIST) {
419 dev_warn(dev, "703 mode forced due to HW configuration.\n");
420 new_drvdata->std_bodies = CC_STD_OSCCA;
421 }
422 break;
423 default:
424 dev_err(dev, "Unsupported engines configration.\n");
425 rc = -EINVAL;
426 goto post_clk_err;
427 }
428
386 /* Check security disable state */ 429 /* Check security disable state */
387 val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED)); 430 val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
388 val &= CC_SECURITY_DISABLED_MASK; 431 val &= CC_SECURITY_DISABLED_MASK;
@@ -401,6 +444,15 @@ static int init_cc_resources(struct platform_device *plat_dev)
401 /* Display HW versions */ 444 /* Display HW versions */
402 dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n", 445 dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
403 hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION); 446 hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
447 /* register the driver isr function */
448 rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
449 IRQF_SHARED, "ccree", new_drvdata);
450 if (rc) {
451 dev_err(dev, "Could not register to interrupt %d\n",
452 new_drvdata->irq);
453 goto post_clk_err;
454 }
455 dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
404 456
405 rc = init_cc_regs(new_drvdata, true); 457 rc = init_cc_regs(new_drvdata, true);
406 if (rc) { 458 if (rc) {
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index b76181335c08..7cd99380bf1f 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -53,6 +53,9 @@ enum cc_std_body {
53 53
54#define CC_COHERENT_CACHE_PARAMS 0xEEE 54#define CC_COHERENT_CACHE_PARAMS 0xEEE
55 55
56#define CC_PINS_FULL 0x0
57#define CC_PINS_SLIM 0x9F
58
56/* Maximum DMA mask supported by IP */ 59/* Maximum DMA mask supported by IP */
57#define DMA_BIT_MASK_LEN 48 60#define DMA_BIT_MASK_LEN 48
58 61
@@ -67,6 +70,8 @@ enum cc_std_body {
67 70
68#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT) 71#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
69 72
73#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
74
70#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \ 75#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
71 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \ 76 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
72 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT) 77 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
@@ -216,6 +221,7 @@ static inline void dump_byte_array(const char *name, const u8 *the_array,
216 __dump_byte_array(name, the_array, size); 221 __dump_byte_array(name, the_array, size);
217} 222}
218 223
224bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
219int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe); 225int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
220void fini_cc_regs(struct cc_drvdata *drvdata); 226void fini_cc_regs(struct cc_drvdata *drvdata);
221int cc_clk_on(struct cc_drvdata *drvdata); 227int cc_clk_on(struct cc_drvdata *drvdata);
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index d0764147573f..efe3e1d8b87b 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -114,6 +114,9 @@
114#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL 114#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
115#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL 115#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
116#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL 116#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
117#define CC_NVM_IS_IDLE_REG_OFFSET 0x0A10UL
118#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
119#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
117#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL 120#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
118#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL 121#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
119#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL 122#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
@@ -203,6 +206,23 @@
203#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL 206#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
204#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL 207#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
205#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL 208#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
209#define CC_HOST_REMOVE_INPUT_PINS_REG_OFFSET 0x0A7CUL
210#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SHIFT 0x0UL
211#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SIZE 0x1UL
212#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SHIFT 0x1UL
213#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SIZE 0x1UL
214#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SHIFT 0x2UL
215#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SIZE 0x1UL
216#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SHIFT 0x3UL
217#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SIZE 0x1UL
218#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SHIFT 0x4UL
219#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SIZE 0x1UL
220#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SHIFT 0x5UL
221#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SIZE 0x1UL
222#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SHIFT 0x6UL
223#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SIZE 0x1UL
224#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SHIFT 0x7UL
225#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SIZE 0x1UL
206// -------------------------------------- 226// --------------------------------------
207// BLOCK: ID_REGISTERS 227// BLOCK: ID_REGISTERS
208// -------------------------------------- 228// --------------------------------------
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 2dad9c9543c6..899a52f05b7a 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -49,6 +49,11 @@ int cc_pm_resume(struct device *dev)
49 dev_err(dev, "failed getting clock back on. We're toast.\n"); 49 dev_err(dev, "failed getting clock back on. We're toast.\n");
50 return rc; 50 return rc;
51 } 51 }
52 /* wait for Crytpcell reset completion */
53 if (!cc_wait_for_reset_completion(drvdata)) {
54 dev_err(dev, "Cryptocell reset not completed");
55 return -EBUSY;
56 }
52 57
53 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); 58 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
54 rc = init_cc_regs(drvdata, false); 59 rc = init_cc_regs(drvdata, false);
@@ -101,6 +106,12 @@ int cc_pm_put_suspend(struct device *dev)
101 return rc; 106 return rc;
102} 107}
103 108
109bool cc_pm_is_dev_suspended(struct device *dev)
110{
111 /* check device state using runtime api */
112 return pm_runtime_suspended(dev);
113}
114
104int cc_pm_init(struct cc_drvdata *drvdata) 115int cc_pm_init(struct cc_drvdata *drvdata)
105{ 116{
106 struct device *dev = drvdata_to_dev(drvdata); 117 struct device *dev = drvdata_to_dev(drvdata);
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 6190cdba5dad..a7d98a5da2e1 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -22,6 +22,7 @@ int cc_pm_suspend(struct device *dev);
22int cc_pm_resume(struct device *dev); 22int cc_pm_resume(struct device *dev);
23int cc_pm_get(struct device *dev); 23int cc_pm_get(struct device *dev);
24int cc_pm_put_suspend(struct device *dev); 24int cc_pm_put_suspend(struct device *dev);
25bool cc_pm_is_dev_suspended(struct device *dev);
25 26
26#else 27#else
27 28
@@ -54,6 +55,12 @@ static inline int cc_pm_put_suspend(struct device *dev)
54 return 0; 55 return 0;
55} 56}
56 57
58static inline bool cc_pm_is_dev_suspended(struct device *dev)
59{
60 /* if PM not supported device is never suspend */
61 return false;
62}
63
57#endif 64#endif
58 65
59#endif /*__POWER_MGR_H__*/ 66#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 2d2f186674ba..4d9063a8b10b 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2016-2017 Hisilicon Limited. */ 2/* Copyright (c) 2016-2017 Hisilicon Limited. */
3 3
4#ifndef _SEC_DRV_H_ 4#ifndef _SEC_DRV_H_
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 86c699c14f84..df43a2c6933b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -398,6 +398,12 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
398 398
399 /* Processing Engine configuration */ 399 /* Processing Engine configuration */
400 400
401 /* Token & context configuration */
402 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
403 EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
404 EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
405 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
406
401 /* H/W capabilities selection */ 407 /* H/W capabilities selection */
402 val = EIP197_FUNCTION_RSVD; 408 val = EIP197_FUNCTION_RSVD;
403 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; 409 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
@@ -589,9 +595,9 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
589 if (rdesc->result_data.error_code & 0x407f) { 595 if (rdesc->result_data.error_code & 0x407f) {
590 /* Fatal error (bits 0-7, 14) */ 596 /* Fatal error (bits 0-7, 14) */
591 dev_err(priv->dev, 597 dev_err(priv->dev,
592 "cipher: result: result descriptor error (%d)\n", 598 "cipher: result: result descriptor error (0x%x)\n",
593 rdesc->result_data.error_code); 599 rdesc->result_data.error_code);
594 return -EIO; 600 return -EINVAL;
595 } else if (rdesc->result_data.error_code == BIT(9)) { 601 } else if (rdesc->result_data.error_code == BIT(9)) {
596 /* Authentication failed */ 602 /* Authentication failed */
597 return -EBADMSG; 603 return -EBADMSG;
@@ -720,11 +726,10 @@ handle_results:
720 } 726 }
721 727
722acknowledge: 728acknowledge:
723 if (i) { 729 if (i)
724 writel(EIP197_xDR_PROC_xD_PKT(i) | 730 writel(EIP197_xDR_PROC_xD_PKT(i) |
725 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), 731 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
726 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); 732 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
727 }
728 733
729 /* If the number of requests overflowed the counter, try to proceed more 734 /* If the number of requests overflowed the counter, try to proceed more
730 * requests. 735 * requests.
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 65624a81f0fd..e0c202f33674 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -118,6 +118,7 @@
118#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) 118#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
119#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) 119#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
120#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) 120#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
121#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
121#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) 122#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
122#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) 123#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
123#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) 124#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
@@ -249,6 +250,11 @@
249#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0) 250#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0)
250#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1) 251#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1)
251 252
253/* EIP197_PE_EIP96_TOKEN_CTRL */
254#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16)
255#define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19)
256#define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20)
257
252/* EIP197_PE_EIP96_FUNCTION_EN */ 258/* EIP197_PE_EIP96_FUNCTION_EN */
253#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) 259#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23))
254#define EIP197_PROTOCOL_HASH_ONLY BIT(0) 260#define EIP197_PROTOCOL_HASH_ONLY BIT(0)
@@ -333,6 +339,7 @@ struct safexcel_context_record {
333#define CONTEXT_CONTROL_IV3 BIT(8) 339#define CONTEXT_CONTROL_IV3 BIT(8)
334#define CONTEXT_CONTROL_DIGEST_CNT BIT(9) 340#define CONTEXT_CONTROL_DIGEST_CNT BIT(9)
335#define CONTEXT_CONTROL_COUNTER_MODE BIT(10) 341#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
342#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12)
336#define CONTEXT_CONTROL_HASH_STORE BIT(19) 343#define CONTEXT_CONTROL_HASH_STORE BIT(19)
337 344
338/* The hash counter given to the engine in the context has a granularity of 345/* The hash counter given to the engine in the context has a granularity of
@@ -425,6 +432,10 @@ struct safexcel_token {
425 432
426#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16) 433#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16)
427 434
435#define EIP197_TOKEN_CTX_OFFSET(x) (x)
436#define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11)
437#define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12)
438
428#define EIP197_TOKEN_STAT_LAST_HASH BIT(0) 439#define EIP197_TOKEN_STAT_LAST_HASH BIT(0)
429#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1) 440#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1)
430#define EIP197_TOKEN_OPCODE_DIRECTION 0x0 441#define EIP197_TOKEN_OPCODE_DIRECTION 0x0
@@ -432,6 +443,7 @@ struct safexcel_token {
432#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT 443#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT
433#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 444#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4
434#define EIP197_TOKEN_OPCODE_VERIFY 0xd 445#define EIP197_TOKEN_OPCODE_VERIFY 0xd
446#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe
435#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) 447#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0)
436 448
437static inline void eip197_noop_token(struct safexcel_token *token) 449static inline void eip197_noop_token(struct safexcel_token *token)
@@ -442,6 +454,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
442 454
443/* Instructions */ 455/* Instructions */
444#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c 456#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c
457#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14
458#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5)
445#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) 459#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5)
446#define EIP197_TOKEN_INS_TYPE_HASH BIT(6) 460#define EIP197_TOKEN_INS_TYPE_HASH BIT(6)
447#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) 461#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7)
@@ -468,6 +482,7 @@ struct safexcel_control_data_desc {
468 482
469#define EIP197_OPTION_MAGIC_VALUE BIT(0) 483#define EIP197_OPTION_MAGIC_VALUE BIT(0)
470#define EIP197_OPTION_64BIT_CTX BIT(1) 484#define EIP197_OPTION_64BIT_CTX BIT(1)
485#define EIP197_OPTION_RC_AUTO (0x2 << 3)
471#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8) 486#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
472#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10) 487#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
473#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) 488#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
@@ -629,7 +644,7 @@ struct safexcel_ahash_export_state {
629 u32 digest; 644 u32 digest;
630 645
631 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; 646 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
632 u8 cache[SHA512_BLOCK_SIZE]; 647 u8 cache[SHA512_BLOCK_SIZE << 1];
633}; 648};
634 649
635/* 650/*
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index de4be10b172f..8cdbdbe35681 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
51 51
52struct safexcel_cipher_req { 52struct safexcel_cipher_req {
53 enum safexcel_cipher_direction direction; 53 enum safexcel_cipher_direction direction;
54 /* Number of result descriptors associated to the request */
55 unsigned int rdescs;
54 bool needs_inv; 56 bool needs_inv;
55}; 57};
56 58
@@ -59,27 +61,26 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
59 u32 length) 61 u32 length)
60{ 62{
61 struct safexcel_token *token; 63 struct safexcel_token *token;
62 unsigned offset = 0; 64 u32 offset = 0, block_sz = 0;
63 65
64 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { 66 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
65 switch (ctx->alg) { 67 switch (ctx->alg) {
66 case SAFEXCEL_DES: 68 case SAFEXCEL_DES:
67 offset = DES_BLOCK_SIZE / sizeof(u32); 69 block_sz = DES_BLOCK_SIZE;
68 memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
69 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; 70 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
70 break; 71 break;
71 case SAFEXCEL_3DES: 72 case SAFEXCEL_3DES:
72 offset = DES3_EDE_BLOCK_SIZE / sizeof(u32); 73 block_sz = DES3_EDE_BLOCK_SIZE;
73 memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
74 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; 74 cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
75 break; 75 break;
76
77 case SAFEXCEL_AES: 76 case SAFEXCEL_AES:
78 offset = AES_BLOCK_SIZE / sizeof(u32); 77 block_sz = AES_BLOCK_SIZE;
79 memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
80 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; 78 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
81 break; 79 break;
82 } 80 }
81
82 offset = block_sz / sizeof(u32);
83 memcpy(cdesc->control_data.token, iv, block_sz);
83 } 84 }
84 85
85 token = (struct safexcel_token *)(cdesc->control_data.token + offset); 86 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -91,6 +92,25 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
91 token[0].instructions = EIP197_TOKEN_INS_LAST | 92 token[0].instructions = EIP197_TOKEN_INS_LAST |
92 EIP197_TOKEN_INS_TYPE_CRYTO | 93 EIP197_TOKEN_INS_TYPE_CRYTO |
93 EIP197_TOKEN_INS_TYPE_OUTPUT; 94 EIP197_TOKEN_INS_TYPE_OUTPUT;
95
96 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
97 u32 last = (EIP197_MAX_TOKENS - 1) - offset;
98
99 token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS;
100 token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL |
101 EIP197_TOKEN_EXEC_IF_SUCCESSFUL|
102 EIP197_TOKEN_CTX_OFFSET(0x2);
103 token[last].stat = EIP197_TOKEN_STAT_LAST_HASH |
104 EIP197_TOKEN_STAT_LAST_PACKET;
105 token[last].instructions =
106 EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) |
107 EIP197_TOKEN_INS_ORIGIN_IV0;
108
109 /* Store the updated IV values back in the internal context
110 * registers.
111 */
112 cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE;
113 }
94} 114}
95 115
96static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, 116static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -333,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
333 353
334 *ret = 0; 354 *ret = 0;
335 355
336 do { 356 if (unlikely(!sreq->rdescs))
357 return 0;
358
359 while (sreq->rdescs--) {
337 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); 360 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
338 if (IS_ERR(rdesc)) { 361 if (IS_ERR(rdesc)) {
339 dev_err(priv->dev, 362 dev_err(priv->dev,
@@ -346,21 +369,15 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
346 *ret = safexcel_rdesc_check_errors(priv, rdesc); 369 *ret = safexcel_rdesc_check_errors(priv, rdesc);
347 370
348 ndesc++; 371 ndesc++;
349 } while (!rdesc->last_seg); 372 }
350 373
351 safexcel_complete(priv, ring); 374 safexcel_complete(priv, ring);
352 375
353 if (src == dst) { 376 if (src == dst) {
354 dma_unmap_sg(priv->dev, src, 377 dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
355 sg_nents_for_len(src, cryptlen),
356 DMA_BIDIRECTIONAL);
357 } else { 378 } else {
358 dma_unmap_sg(priv->dev, src, 379 dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
359 sg_nents_for_len(src, cryptlen), 380 dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
360 DMA_TO_DEVICE);
361 dma_unmap_sg(priv->dev, dst,
362 sg_nents_for_len(dst, cryptlen),
363 DMA_FROM_DEVICE);
364 } 381 }
365 382
366 *should_complete = true; 383 *should_complete = true;
@@ -385,26 +402,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
385 int i, ret = 0; 402 int i, ret = 0;
386 403
387 if (src == dst) { 404 if (src == dst) {
388 nr_src = dma_map_sg(priv->dev, src, 405 nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
389 sg_nents_for_len(src, totlen),
390 DMA_BIDIRECTIONAL); 406 DMA_BIDIRECTIONAL);
391 nr_dst = nr_src; 407 nr_dst = nr_src;
392 if (!nr_src) 408 if (!nr_src)
393 return -EINVAL; 409 return -EINVAL;
394 } else { 410 } else {
395 nr_src = dma_map_sg(priv->dev, src, 411 nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
396 sg_nents_for_len(src, totlen),
397 DMA_TO_DEVICE); 412 DMA_TO_DEVICE);
398 if (!nr_src) 413 if (!nr_src)
399 return -EINVAL; 414 return -EINVAL;
400 415
401 nr_dst = dma_map_sg(priv->dev, dst, 416 nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
402 sg_nents_for_len(dst, totlen),
403 DMA_FROM_DEVICE); 417 DMA_FROM_DEVICE);
404 if (!nr_dst) { 418 if (!nr_dst) {
405 dma_unmap_sg(priv->dev, src, 419 dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
406 sg_nents_for_len(src, totlen),
407 DMA_TO_DEVICE);
408 return -EINVAL; 420 return -EINVAL;
409 } 421 }
410 } 422 }
@@ -454,7 +466,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
454 466
455 /* result descriptors */ 467 /* result descriptors */
456 for_each_sg(dst, sg, nr_dst, i) { 468 for_each_sg(dst, sg, nr_dst, i) {
457 bool first = !i, last = (i == nr_dst - 1); 469 bool first = !i, last = sg_is_last(sg);
458 u32 len = sg_dma_len(sg); 470 u32 len = sg_dma_len(sg);
459 471
460 rdesc = safexcel_add_rdesc(priv, ring, first, last, 472 rdesc = safexcel_add_rdesc(priv, ring, first, last,
@@ -483,16 +495,10 @@ cdesc_rollback:
483 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); 495 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
484 496
485 if (src == dst) { 497 if (src == dst) {
486 dma_unmap_sg(priv->dev, src, 498 dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
487 sg_nents_for_len(src, totlen),
488 DMA_BIDIRECTIONAL);
489 } else { 499 } else {
490 dma_unmap_sg(priv->dev, src, 500 dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
491 sg_nents_for_len(src, totlen), 501 dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
492 DMA_TO_DEVICE);
493 dma_unmap_sg(priv->dev, dst,
494 sg_nents_for_len(dst, totlen),
495 DMA_FROM_DEVICE);
496 } 502 }
497 503
498 return ret; 504 return ret;
@@ -501,6 +507,7 @@ cdesc_rollback:
501static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, 507static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
502 int ring, 508 int ring,
503 struct crypto_async_request *base, 509 struct crypto_async_request *base,
510 struct safexcel_cipher_req *sreq,
504 bool *should_complete, int *ret) 511 bool *should_complete, int *ret)
505{ 512{
506 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); 513 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
@@ -509,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
509 516
510 *ret = 0; 517 *ret = 0;
511 518
512 do { 519 if (unlikely(!sreq->rdescs))
520 return 0;
521
522 while (sreq->rdescs--) {
513 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); 523 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
514 if (IS_ERR(rdesc)) { 524 if (IS_ERR(rdesc)) {
515 dev_err(priv->dev, 525 dev_err(priv->dev,
@@ -522,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
522 *ret = safexcel_rdesc_check_errors(priv, rdesc); 532 *ret = safexcel_rdesc_check_errors(priv, rdesc);
523 533
524 ndesc++; 534 ndesc++;
525 } while (!rdesc->last_seg); 535 }
526 536
527 safexcel_complete(priv, ring); 537 safexcel_complete(priv, ring);
528 538
@@ -560,16 +570,35 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
560{ 570{
561 struct skcipher_request *req = skcipher_request_cast(async); 571 struct skcipher_request *req = skcipher_request_cast(async);
562 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); 572 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
573 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm);
563 int err; 574 int err;
564 575
565 if (sreq->needs_inv) { 576 if (sreq->needs_inv) {
566 sreq->needs_inv = false; 577 sreq->needs_inv = false;
567 err = safexcel_handle_inv_result(priv, ring, async, 578 err = safexcel_handle_inv_result(priv, ring, async, sreq,
568 should_complete, ret); 579 should_complete, ret);
569 } else { 580 } else {
570 err = safexcel_handle_req_result(priv, ring, async, req->src, 581 err = safexcel_handle_req_result(priv, ring, async, req->src,
571 req->dst, req->cryptlen, sreq, 582 req->dst, req->cryptlen, sreq,
572 should_complete, ret); 583 should_complete, ret);
584
585 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
586 u32 block_sz = 0;
587
588 switch (ctx->alg) {
589 case SAFEXCEL_DES:
590 block_sz = DES_BLOCK_SIZE;
591 break;
592 case SAFEXCEL_3DES:
593 block_sz = DES3_EDE_BLOCK_SIZE;
594 break;
595 case SAFEXCEL_AES:
596 block_sz = AES_BLOCK_SIZE;
597 break;
598 }
599
600 memcpy(req->iv, ctx->base.ctxr->data, block_sz);
601 }
573 } 602 }
574 603
575 return err; 604 return err;
@@ -587,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
587 616
588 if (sreq->needs_inv) { 617 if (sreq->needs_inv) {
589 sreq->needs_inv = false; 618 sreq->needs_inv = false;
590 err = safexcel_handle_inv_result(priv, ring, async, 619 err = safexcel_handle_inv_result(priv, ring, async, sreq,
591 should_complete, ret); 620 should_complete, ret);
592 } else { 621 } else {
593 err = safexcel_handle_req_result(priv, ring, async, req->src, 622 err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -633,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
633 ret = safexcel_send_req(async, ring, sreq, req->src, 662 ret = safexcel_send_req(async, ring, sreq, req->src,
634 req->dst, req->cryptlen, 0, 0, req->iv, 663 req->dst, req->cryptlen, 0, 0, req->iv,
635 commands, results); 664 commands, results);
665
666 sreq->rdescs = *results;
636 return ret; 667 return ret;
637} 668}
638 669
@@ -655,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
655 req->cryptlen, req->assoclen, 686 req->cryptlen, req->assoclen,
656 crypto_aead_authsize(tfm), req->iv, 687 crypto_aead_authsize(tfm), req->iv,
657 commands, results); 688 commands, results);
689 sreq->rdescs = *results;
658 return ret; 690 return ret;
659} 691}
660 692
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index ac9282c1a5ec..a80a5e757b1f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -41,19 +41,21 @@ struct safexcel_ahash_req {
41 u64 len[2]; 41 u64 len[2];
42 u64 processed[2]; 42 u64 processed[2];
43 43
44 u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 44 u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
45 dma_addr_t cache_dma; 45 dma_addr_t cache_dma;
46 unsigned int cache_sz; 46 unsigned int cache_sz;
47 47
48 u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 48 u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
49}; 49};
50 50
51static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) 51static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
52{ 52{
53 if (req->len[1] > req->processed[1]) 53 u64 len, processed;
54 return 0xffffffff - (req->len[0] - req->processed[0]);
55 54
56 return req->len[0] - req->processed[0]; 55 len = (0xffffffff * req->len[1]) + req->len[0];
56 processed = (0xffffffff * req->processed[1]) + req->processed[0];
57
58 return len - processed;
57} 59}
58 60
59static void safexcel_hash_token(struct safexcel_command_desc *cdesc, 61static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
@@ -87,6 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
87 cdesc->control_data.control0 |= ctx->alg; 89 cdesc->control_data.control0 |= ctx->alg;
88 cdesc->control_data.control0 |= req->digest; 90 cdesc->control_data.control0 |= req->digest;
89 91
92 if (!req->finish)
93 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
94
90 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { 95 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
91 if (req->processed[0] || req->processed[1]) { 96 if (req->processed[0] || req->processed[1]) {
92 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) 97 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
@@ -105,9 +110,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
105 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; 110 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
106 } 111 }
107 112
108 if (!req->finish)
109 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
110
111 /* 113 /*
112 * Copy the input digest if needed, and setup the context 114 * Copy the input digest if needed, and setup the context
113 * fields. Do this now as we need it to setup the first command 115 * fields. Do this now as we need it to setup the first command
@@ -183,6 +185,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
183 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz, 185 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
184 DMA_TO_DEVICE); 186 DMA_TO_DEVICE);
185 sreq->cache_dma = 0; 187 sreq->cache_dma = 0;
188 sreq->cache_sz = 0;
186 } 189 }
187 190
188 if (sreq->finish) 191 if (sreq->finish)
@@ -209,11 +212,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
209 struct safexcel_command_desc *cdesc, *first_cdesc = NULL; 212 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
210 struct safexcel_result_desc *rdesc; 213 struct safexcel_result_desc *rdesc;
211 struct scatterlist *sg; 214 struct scatterlist *sg;
212 int i, extra, n_cdesc = 0, ret = 0; 215 int i, extra = 0, n_cdesc = 0, ret = 0;
213 u64 queued, len, cache_len; 216 u64 queued, len, cache_len, cache_max;
217
218 cache_max = crypto_ahash_blocksize(ahash);
219 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
220 cache_max <<= 1;
214 221
215 queued = len = safexcel_queued_len(req); 222 queued = len = safexcel_queued_len(req);
216 if (queued <= crypto_ahash_blocksize(ahash)) 223 if (queued <= cache_max)
217 cache_len = queued; 224 cache_len = queued;
218 else 225 else
219 cache_len = queued - areq->nbytes; 226 cache_len = queued - areq->nbytes;
@@ -223,26 +230,23 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
223 * fit into full blocks, cache it for the next send() call. 230 * fit into full blocks, cache it for the next send() call.
224 */ 231 */
225 extra = queued & (crypto_ahash_blocksize(ahash) - 1); 232 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
233
234 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
235 extra < crypto_ahash_blocksize(ahash))
236 extra += crypto_ahash_blocksize(ahash);
237
238 /* If this is not the last request and the queued data
239 * is a multiple of a block, cache the last one for now.
240 */
226 if (!extra) 241 if (!extra)
227 /* If this is not the last request and the queued data
228 * is a multiple of a block, cache the last one for now.
229 */
230 extra = crypto_ahash_blocksize(ahash); 242 extra = crypto_ahash_blocksize(ahash);
231 243
232 if (extra) { 244 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
233 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 245 req->cache_next, extra,
234 req->cache_next, extra, 246 areq->nbytes - extra);
235 areq->nbytes - extra);
236
237 queued -= extra;
238 len -= extra;
239 247
240 if (!queued) { 248 queued -= extra;
241 *commands = 0; 249 len -= extra;
242 *results = 0;
243 return 0;
244 }
245 }
246 } 250 }
247 251
248 /* Add a command descriptor for the cached data, if any */ 252 /* Add a command descriptor for the cached data, if any */
@@ -269,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
269 } 273 }
270 274
271 /* Now handle the current ahash request buffer(s) */ 275 /* Now handle the current ahash request buffer(s) */
272 req->nents = dma_map_sg(priv->dev, areq->src, 276 req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
273 sg_nents_for_len(areq->src, areq->nbytes),
274 DMA_TO_DEVICE); 277 DMA_TO_DEVICE);
275 if (!req->nents) { 278 if (!req->nents) {
276 ret = -ENOMEM; 279 ret = -ENOMEM;
@@ -345,6 +348,7 @@ unmap_cache:
345 if (req->cache_dma) { 348 if (req->cache_dma) {
346 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz, 349 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
347 DMA_TO_DEVICE); 350 DMA_TO_DEVICE);
351 req->cache_dma = 0;
348 req->cache_sz = 0; 352 req->cache_sz = 0;
349 } 353 }
350 354
@@ -486,7 +490,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
486 struct safexcel_inv_result result = {}; 490 struct safexcel_inv_result result = {};
487 int ring = ctx->base.ring; 491 int ring = ctx->base.ring;
488 492
489 memset(req, 0, sizeof(struct ahash_request)); 493 memset(req, 0, EIP197_AHASH_REQ_SIZE);
490 494
491 /* create invalidation request */ 495 /* create invalidation request */
492 init_completion(&result.completion); 496 init_completion(&result.completion);
@@ -519,10 +523,9 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
519/* safexcel_ahash_cache: cache data until at least one request can be sent to 523/* safexcel_ahash_cache: cache data until at least one request can be sent to
520 * the engine, aka. when there is at least 1 block size in the pipe. 524 * the engine, aka. when there is at least 1 block size in the pipe.
521 */ 525 */
522static int safexcel_ahash_cache(struct ahash_request *areq) 526static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
523{ 527{
524 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 528 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
525 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
526 u64 queued, cache_len; 529 u64 queued, cache_len;
527 530
528 /* queued: everything accepted by the driver which will be handled by 531 /* queued: everything accepted by the driver which will be handled by
@@ -539,7 +542,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
539 * In case there isn't enough bytes to proceed (less than a 542 * In case there isn't enough bytes to proceed (less than a
540 * block size), cache the data until we have enough. 543 * block size), cache the data until we have enough.
541 */ 544 */
542 if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { 545 if (cache_len + areq->nbytes <= cache_max) {
543 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 546 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
544 req->cache + cache_len, 547 req->cache + cache_len,
545 areq->nbytes, 0); 548 areq->nbytes, 0);
@@ -599,6 +602,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
599{ 602{
600 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 603 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
601 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 604 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
605 u32 cache_max;
602 606
603 /* If the request is 0 length, do nothing */ 607 /* If the request is 0 length, do nothing */
604 if (!areq->nbytes) 608 if (!areq->nbytes)
@@ -608,7 +612,11 @@ static int safexcel_ahash_update(struct ahash_request *areq)
608 if (req->len[0] < areq->nbytes) 612 if (req->len[0] < areq->nbytes)
609 req->len[1]++; 613 req->len[1]++;
610 614
611 safexcel_ahash_cache(areq); 615 cache_max = crypto_ahash_blocksize(ahash);
616 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
617 cache_max <<= 1;
618
619 safexcel_ahash_cache(areq, cache_max);
612 620
613 /* 621 /*
614 * We're not doing partial updates when performing an hmac request. 622 * We're not doing partial updates when performing an hmac request.
@@ -621,7 +629,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
621 return safexcel_ahash_enqueue(areq); 629 return safexcel_ahash_enqueue(areq);
622 630
623 if (!req->last_req && 631 if (!req->last_req &&
624 safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) 632 safexcel_queued_len(req) > cache_max)
625 return safexcel_ahash_enqueue(areq); 633 return safexcel_ahash_enqueue(areq);
626 634
627 return 0; 635 return 0;
@@ -678,6 +686,11 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
678 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 686 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
679 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 687 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
680 struct safexcel_ahash_export_state *export = out; 688 struct safexcel_ahash_export_state *export = out;
689 u32 cache_sz;
690
691 cache_sz = crypto_ahash_blocksize(ahash);
692 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
693 cache_sz <<= 1;
681 694
682 export->len[0] = req->len[0]; 695 export->len[0] = req->len[0];
683 export->len[1] = req->len[1]; 696 export->len[1] = req->len[1];
@@ -687,7 +700,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
687 export->digest = req->digest; 700 export->digest = req->digest;
688 701
689 memcpy(export->state, req->state, req->state_sz); 702 memcpy(export->state, req->state, req->state_sz);
690 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); 703 memcpy(export->cache, req->cache, cache_sz);
691 704
692 return 0; 705 return 0;
693} 706}
@@ -697,12 +710,17 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
697 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 710 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
698 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 711 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
699 const struct safexcel_ahash_export_state *export = in; 712 const struct safexcel_ahash_export_state *export = in;
713 u32 cache_sz;
700 int ret; 714 int ret;
701 715
702 ret = crypto_ahash_init(areq); 716 ret = crypto_ahash_init(areq);
703 if (ret) 717 if (ret)
704 return ret; 718 return ret;
705 719
720 cache_sz = crypto_ahash_blocksize(ahash);
721 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
722 cache_sz <<= 1;
723
706 req->len[0] = export->len[0]; 724 req->len[0] = export->len[0];
707 req->len[1] = export->len[1]; 725 req->len[1] = export->len[1];
708 req->processed[0] = export->processed[0]; 726 req->processed[0] = export->processed[0];
@@ -710,7 +728,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
710 728
711 req->digest = export->digest; 729 req->digest = export->digest;
712 730
713 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); 731 memcpy(req->cache, export->cache, cache_sz);
714 memcpy(req->state, export->state, req->state_sz); 732 memcpy(req->state, export->state, req->state_sz);
715 733
716 return 0; 734 return 0;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index eb75fa684876..142bc3f5c45c 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -145,6 +145,9 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
145 (lower_32_bits(context) & GENMASK(31, 2)) >> 2; 145 (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
146 cdesc->control_data.context_hi = upper_32_bits(context); 146 cdesc->control_data.context_hi = upper_32_bits(context);
147 147
148 if (priv->version == EIP197B || priv->version == EIP197D)
149 cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
150
148 /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ 151 /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
149 cdesc->control_data.refresh = 2; 152 cdesc->control_data.refresh = 2;
150 153
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index e5cf3a59c420..acedafe3fa98 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -100,7 +100,7 @@ struct buffer_desc {
100 u16 pkt_len; 100 u16 pkt_len;
101 u16 buf_len; 101 u16 buf_len;
102#endif 102#endif
103 u32 phys_addr; 103 dma_addr_t phys_addr;
104 u32 __reserved[4]; 104 u32 __reserved[4];
105 struct buffer_desc *next; 105 struct buffer_desc *next;
106 enum dma_data_direction dir; 106 enum dma_data_direction dir;
@@ -117,9 +117,9 @@ struct crypt_ctl {
117 u8 mode; /* NPE_OP_* operation mode */ 117 u8 mode; /* NPE_OP_* operation mode */
118#endif 118#endif
119 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ 119 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
120 u32 icv_rev_aes; /* icv or rev aes */ 120 dma_addr_t icv_rev_aes; /* icv or rev aes */
121 u32 src_buf; 121 dma_addr_t src_buf;
122 u32 dst_buf; 122 dma_addr_t dst_buf;
123#ifdef __ARMEB__ 123#ifdef __ARMEB__
124 u16 auth_offs; /* Authentication start offset */ 124 u16 auth_offs; /* Authentication start offset */
125 u16 auth_len; /* Authentication data length */ 125 u16 auth_len; /* Authentication data length */
@@ -320,7 +320,8 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
320 } 320 }
321} 321}
322 322
323static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) 323static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
324 dma_addr_t phys)
324{ 325{
325 while (buf) { 326 while (buf) {
326 struct buffer_desc *buf1; 327 struct buffer_desc *buf1;
@@ -602,7 +603,7 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
602 struct buffer_desc *buf; 603 struct buffer_desc *buf;
603 int i; 604 int i;
604 u8 *pad; 605 u8 *pad;
605 u32 pad_phys, buf_phys; 606 dma_addr_t pad_phys, buf_phys;
606 607
607 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); 608 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
608 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); 609 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
@@ -787,7 +788,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
787 for (; nbytes > 0; sg = sg_next(sg)) { 788 for (; nbytes > 0; sg = sg_next(sg)) {
788 unsigned len = min(nbytes, sg->length); 789 unsigned len = min(nbytes, sg->length);
789 struct buffer_desc *next_buf; 790 struct buffer_desc *next_buf;
790 u32 next_buf_phys; 791 dma_addr_t next_buf_phys;
791 void *ptr; 792 void *ptr;
792 793
793 nbytes -= len; 794 nbytes -= len;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index bdc4c42d3ac8..f1fa637cb029 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -986,8 +986,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
986 struct device *dev = &pdev->dev; 986 struct device *dev = &pdev->dev;
987 struct dcp *sdcp = NULL; 987 struct dcp *sdcp = NULL;
988 int i, ret; 988 int i, ret;
989
990 struct resource *iores;
991 int dcp_vmi_irq, dcp_irq; 989 int dcp_vmi_irq, dcp_irq;
992 990
993 if (global_sdcp) { 991 if (global_sdcp) {
@@ -995,7 +993,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
995 return -ENODEV; 993 return -ENODEV;
996 } 994 }
997 995
998 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
999 dcp_vmi_irq = platform_get_irq(pdev, 0); 996 dcp_vmi_irq = platform_get_irq(pdev, 0);
1000 if (dcp_vmi_irq < 0) { 997 if (dcp_vmi_irq < 0) {
1001 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); 998 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
@@ -1013,7 +1010,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
1013 return -ENOMEM; 1010 return -ENOMEM;
1014 1011
1015 sdcp->dev = dev; 1012 sdcp->dev = dev;
1016 sdcp->base = devm_ioremap_resource(dev, iores); 1013 sdcp->base = devm_platform_ioremap_resource(pdev, 0);
1017 if (IS_ERR(sdcp->base)) 1014 if (IS_ERR(sdcp->base))
1018 return PTR_ERR(sdcp->base); 1015 return PTR_ERR(sdcp->base);
1019 1016
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 4acbc47973e9..e78ff5c65ed6 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -27,8 +27,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
27#define WORKMEM_ALIGN (CRB_ALIGN) 27#define WORKMEM_ALIGN (CRB_ALIGN)
28#define CSB_WAIT_MAX (5000) /* ms */ 28#define CSB_WAIT_MAX (5000) /* ms */
29#define VAS_RETRIES (10) 29#define VAS_RETRIES (10)
30/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
31#define MAX_CREDITS_PER_RXFIFO (1024)
32 30
33struct nx842_workmem { 31struct nx842_workmem {
34 /* Below fields must be properly aligned */ 32 /* Below fields must be properly aligned */
@@ -812,7 +810,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
812 rxattr.lnotify_lpid = lpid; 810 rxattr.lnotify_lpid = lpid;
813 rxattr.lnotify_pid = pid; 811 rxattr.lnotify_pid = pid;
814 rxattr.lnotify_tid = tid; 812 rxattr.lnotify_tid = tid;
815 rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO; 813 /*
814 * Maximum RX window credits can not be more than #CRBs in
815 * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
816 */
817 rxattr.wcreds_max = fifo_size / CRB_SIZE;
816 818
817 /* 819 /*
818 * Open a VAS receice window which is used to configure RxFIFO 820 * Open a VAS receice window which is used to configure RxFIFO
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 428c273a1ab6..28817880c76d 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -569,9 +569,7 @@ static int nx_register_algs(void)
569 569
570 memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); 570 memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
571 571
572 rc = NX_DEBUGFS_INIT(&nx_driver); 572 NX_DEBUGFS_INIT(&nx_driver);
573 if (rc)
574 goto out;
575 573
576 nx_driver.of.status = NX_OKAY; 574 nx_driver.of.status = NX_OKAY;
577 575
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index c3e54af18645..c6b5a3be02be 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -76,20 +76,12 @@ struct nx_stats {
76 atomic_t last_error_pid; 76 atomic_t last_error_pid;
77}; 77};
78 78
79struct nx_debugfs {
80 struct dentry *dfs_root;
81 struct dentry *dfs_aes_ops, *dfs_aes_bytes;
82 struct dentry *dfs_sha256_ops, *dfs_sha256_bytes;
83 struct dentry *dfs_sha512_ops, *dfs_sha512_bytes;
84 struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid;
85};
86
87struct nx_crypto_driver { 79struct nx_crypto_driver {
88 struct nx_stats stats; 80 struct nx_stats stats;
89 struct nx_of of; 81 struct nx_of of;
90 struct vio_dev *viodev; 82 struct vio_dev *viodev;
91 struct vio_driver viodriver; 83 struct vio_driver viodriver;
92 struct nx_debugfs dfs; 84 struct dentry *dfs_root;
93}; 85};
94 86
95#define NX_GCM4106_NONCE_LEN (4) 87#define NX_GCM4106_NONCE_LEN (4)
@@ -177,7 +169,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
177#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) 169#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
178#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv) 170#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv)
179 171
180int nx_debugfs_init(struct nx_crypto_driver *); 172void nx_debugfs_init(struct nx_crypto_driver *);
181void nx_debugfs_fini(struct nx_crypto_driver *); 173void nx_debugfs_fini(struct nx_crypto_driver *);
182#else 174#else
183#define NX_DEBUGFS_INIT(drv) (0) 175#define NX_DEBUGFS_INIT(drv) (0)
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index 03e4f0363c6a..e0d44a5512ab 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -30,62 +30,37 @@
30 * Documentation/ABI/testing/debugfs-pfo-nx-crypto 30 * Documentation/ABI/testing/debugfs-pfo-nx-crypto
31 */ 31 */
32 32
33int nx_debugfs_init(struct nx_crypto_driver *drv) 33void nx_debugfs_init(struct nx_crypto_driver *drv)
34{ 34{
35 struct nx_debugfs *dfs = &drv->dfs; 35 struct dentry *root;
36 36
37 dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL); 37 root = debugfs_create_dir(NX_NAME, NULL);
38 drv->dfs_root = root;
38 39
39 dfs->dfs_aes_ops = 40 debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
40 debugfs_create_u32("aes_ops", 41 root, (u32 *)&drv->stats.aes_ops);
41 S_IRUSR | S_IRGRP | S_IROTH, 42 debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
42 dfs->dfs_root, (u32 *)&drv->stats.aes_ops); 43 root, (u32 *)&drv->stats.sha256_ops);
43 dfs->dfs_sha256_ops = 44 debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
44 debugfs_create_u32("sha256_ops", 45 root, (u32 *)&drv->stats.sha512_ops);
45 S_IRUSR | S_IRGRP | S_IROTH, 46 debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
46 dfs->dfs_root, 47 root, (u64 *)&drv->stats.aes_bytes);
47 (u32 *)&drv->stats.sha256_ops); 48 debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
48 dfs->dfs_sha512_ops = 49 root, (u64 *)&drv->stats.sha256_bytes);
49 debugfs_create_u32("sha512_ops", 50 debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
50 S_IRUSR | S_IRGRP | S_IROTH, 51 root, (u64 *)&drv->stats.sha512_bytes);
51 dfs->dfs_root, 52 debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
52 (u32 *)&drv->stats.sha512_ops); 53 root, (u32 *)&drv->stats.errors);
53 dfs->dfs_aes_bytes = 54 debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
54 debugfs_create_u64("aes_bytes", 55 root, (u32 *)&drv->stats.last_error);
55 S_IRUSR | S_IRGRP | S_IROTH, 56 debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
56 dfs->dfs_root, 57 root, (u32 *)&drv->stats.last_error_pid);
57 (u64 *)&drv->stats.aes_bytes);
58 dfs->dfs_sha256_bytes =
59 debugfs_create_u64("sha256_bytes",
60 S_IRUSR | S_IRGRP | S_IROTH,
61 dfs->dfs_root,
62 (u64 *)&drv->stats.sha256_bytes);
63 dfs->dfs_sha512_bytes =
64 debugfs_create_u64("sha512_bytes",
65 S_IRUSR | S_IRGRP | S_IROTH,
66 dfs->dfs_root,
67 (u64 *)&drv->stats.sha512_bytes);
68 dfs->dfs_errors =
69 debugfs_create_u32("errors",
70 S_IRUSR | S_IRGRP | S_IROTH,
71 dfs->dfs_root, (u32 *)&drv->stats.errors);
72 dfs->dfs_last_error =
73 debugfs_create_u32("last_error",
74 S_IRUSR | S_IRGRP | S_IROTH,
75 dfs->dfs_root,
76 (u32 *)&drv->stats.last_error);
77 dfs->dfs_last_error_pid =
78 debugfs_create_u32("last_error_pid",
79 S_IRUSR | S_IRGRP | S_IROTH,
80 dfs->dfs_root,
81 (u32 *)&drv->stats.last_error_pid);
82 return 0;
83} 58}
84 59
85void 60void
86nx_debugfs_fini(struct nx_crypto_driver *drv) 61nx_debugfs_fini(struct nx_crypto_driver *drv)
87{ 62{
88 debugfs_remove_recursive(drv->dfs.dfs_root); 63 debugfs_remove_recursive(drv->dfs_root);
89} 64}
90 65
91#endif 66#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index c8d401646902..b50eb55f8f57 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -131,7 +131,6 @@ struct qat_alg_ablkcipher_ctx {
131 struct icp_qat_fw_la_bulk_req dec_fw_req; 131 struct icp_qat_fw_la_bulk_req dec_fw_req;
132 struct qat_crypto_instance *inst; 132 struct qat_crypto_instance *inst;
133 struct crypto_tfm *tfm; 133 struct crypto_tfm *tfm;
134 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
135}; 134};
136 135
137static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) 136static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -223,6 +222,9 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
223 return -EFAULT; 222 return -EFAULT;
224 223
225 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); 224 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
225 if (offset < 0)
226 return -EFAULT;
227
226 hash_state_out = (__be32 *)(hash->sha.state1 + offset); 228 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
227 hash512_state_out = (__be64 *)hash_state_out; 229 hash512_state_out = (__be64 *)hash_state_out;
228 230
@@ -253,7 +255,24 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
253 return 0; 255 return 0;
254} 256}
255 257
256static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) 258static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
259{
260 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
261 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
262 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
263 ICP_QAT_FW_LA_UPDATE_STATE);
264}
265
266static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
267{
268 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
269 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
270 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_NO_UPDATE_STATE);
272}
273
274static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
275 int aead)
257{ 276{
258 header->hdr_flags = 277 header->hdr_flags =
259 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); 278 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -263,12 +282,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
263 QAT_COMN_PTR_TYPE_SGL); 282 QAT_COMN_PTR_TYPE_SGL);
264 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, 283 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
265 ICP_QAT_FW_LA_PARTIAL_NONE); 284 ICP_QAT_FW_LA_PARTIAL_NONE);
266 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, 285 if (aead)
267 ICP_QAT_FW_CIPH_IV_16BYTE_DATA); 286 qat_alg_init_hdr_no_iv_updt(header);
287 else
288 qat_alg_init_hdr_iv_updt(header);
268 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, 289 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_NO_PROTO); 290 ICP_QAT_FW_LA_NO_PROTO);
270 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_NO_UPDATE_STATE);
272} 291}
273 292
274static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, 293static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
@@ -303,7 +322,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
303 return -EFAULT; 322 return -EFAULT;
304 323
305 /* Request setup */ 324 /* Request setup */
306 qat_alg_init_common_hdr(header); 325 qat_alg_init_common_hdr(header, 1);
307 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; 326 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
308 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, 327 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
309 ICP_QAT_FW_LA_DIGEST_IN_BUFFER); 328 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -390,7 +409,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
390 return -EFAULT; 409 return -EFAULT;
391 410
392 /* Request setup */ 411 /* Request setup */
393 qat_alg_init_common_hdr(header); 412 qat_alg_init_common_hdr(header, 1);
394 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; 413 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
395 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, 414 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_DIGEST_IN_BUFFER); 415 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -454,7 +473,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
454 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; 473 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
455 474
456 memcpy(cd->aes.key, key, keylen); 475 memcpy(cd->aes.key, key, keylen);
457 qat_alg_init_common_hdr(header); 476 qat_alg_init_common_hdr(header, 0);
458 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; 477 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
459 cd_pars->u.s.content_desc_params_sz = 478 cd_pars->u.s.content_desc_params_sz =
460 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; 479 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
@@ -576,45 +595,52 @@ bad_key:
576 return -EINVAL; 595 return -EINVAL;
577} 596}
578 597
579static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, 598static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
599 unsigned int keylen)
600{
601 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
602
603 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
604 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
605 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
606 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
607
608 return qat_alg_aead_init_sessions(tfm, key, keylen,
609 ICP_QAT_HW_CIPHER_CBC_MODE);
610}
611
612static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
580 unsigned int keylen) 613 unsigned int keylen)
581{ 614{
582 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); 615 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
616 struct qat_crypto_instance *inst = NULL;
617 int node = get_current_node();
583 struct device *dev; 618 struct device *dev;
619 int ret;
584 620
585 if (ctx->enc_cd) { 621 inst = qat_crypto_get_instance_node(node);
586 /* rekeying */ 622 if (!inst)
587 dev = &GET_DEV(ctx->inst->accel_dev); 623 return -EINVAL;
588 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); 624 dev = &GET_DEV(inst->accel_dev);
589 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); 625 ctx->inst = inst;
590 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); 626 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
591 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); 627 &ctx->enc_cd_paddr,
592 } else { 628 GFP_ATOMIC);
593 /* new key */ 629 if (!ctx->enc_cd) {
594 int node = get_current_node(); 630 ret = -ENOMEM;
595 struct qat_crypto_instance *inst = 631 goto out_free_inst;
596 qat_crypto_get_instance_node(node); 632 }
597 if (!inst) { 633 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
598 return -EINVAL; 634 &ctx->dec_cd_paddr,
599 } 635 GFP_ATOMIC);
600 636 if (!ctx->dec_cd) {
601 dev = &GET_DEV(inst->accel_dev); 637 ret = -ENOMEM;
602 ctx->inst = inst; 638 goto out_free_enc;
603 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
604 &ctx->enc_cd_paddr,
605 GFP_ATOMIC);
606 if (!ctx->enc_cd) {
607 return -ENOMEM;
608 }
609 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
610 &ctx->dec_cd_paddr,
611 GFP_ATOMIC);
612 if (!ctx->dec_cd) {
613 goto out_free_enc;
614 }
615 } 639 }
616 if (qat_alg_aead_init_sessions(tfm, key, keylen, 640
617 ICP_QAT_HW_CIPHER_CBC_MODE)) 641 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
642 ICP_QAT_HW_CIPHER_CBC_MODE);
643 if (ret)
618 goto out_free_all; 644 goto out_free_all;
619 645
620 return 0; 646 return 0;
@@ -629,7 +655,21 @@ out_free_enc:
629 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 655 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
630 ctx->enc_cd, ctx->enc_cd_paddr); 656 ctx->enc_cd, ctx->enc_cd_paddr);
631 ctx->enc_cd = NULL; 657 ctx->enc_cd = NULL;
632 return -ENOMEM; 658out_free_inst:
659 ctx->inst = NULL;
660 qat_crypto_put_instance(inst);
661 return ret;
662}
663
664static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
665 unsigned int keylen)
666{
667 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
668
669 if (ctx->enc_cd)
670 return qat_alg_aead_rekey(tfm, key, keylen);
671 else
672 return qat_alg_aead_newkey(tfm, key, keylen);
633} 673}
634 674
635static void qat_alg_free_bufl(struct qat_crypto_instance *inst, 675static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
@@ -677,8 +717,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
677 dma_addr_t blp; 717 dma_addr_t blp;
678 dma_addr_t bloutp = 0; 718 dma_addr_t bloutp = 0;
679 struct scatterlist *sg; 719 struct scatterlist *sg;
680 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + 720 size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
681 ((1 + n) * sizeof(struct qat_alg_buf));
682 721
683 if (unlikely(!n)) 722 if (unlikely(!n))
684 return -EINVAL; 723 return -EINVAL;
@@ -715,8 +754,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
715 struct qat_alg_buf *bufers; 754 struct qat_alg_buf *bufers;
716 755
717 n = sg_nents(sglout); 756 n = sg_nents(sglout);
718 sz_out = sizeof(struct qat_alg_buf_list) + 757 sz_out = struct_size(buflout, bufers, n + 1);
719 ((1 + n) * sizeof(struct qat_alg_buf));
720 sg_nctr = 0; 758 sg_nctr = 0;
721 buflout = kzalloc_node(sz_out, GFP_ATOMIC, 759 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
722 dev_to_node(&GET_DEV(inst->accel_dev))); 760 dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -801,11 +839,17 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
801 struct qat_crypto_instance *inst = ctx->inst; 839 struct qat_crypto_instance *inst = ctx->inst;
802 struct ablkcipher_request *areq = qat_req->ablkcipher_req; 840 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
803 uint8_t stat_filed = qat_resp->comn_resp.comn_status; 841 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
842 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
804 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); 843 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
805 844
806 qat_alg_free_bufl(inst, qat_req); 845 qat_alg_free_bufl(inst, qat_req);
807 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) 846 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
808 res = -EINVAL; 847 res = -EINVAL;
848
849 memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
850 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
851 qat_req->iv_paddr);
852
809 areq->base.complete(&areq->base, res); 853 areq->base.complete(&areq->base, res);
810} 854}
811 855
@@ -905,50 +949,49 @@ static int qat_alg_aead_enc(struct aead_request *areq)
905 return -EINPROGRESS; 949 return -EINPROGRESS;
906} 950}
907 951
908static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 952static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
953 const u8 *key, unsigned int keylen,
954 int mode)
955{
956 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
957 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
958 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
959 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
960
961 return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
962}
963
964static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
909 const u8 *key, unsigned int keylen, 965 const u8 *key, unsigned int keylen,
910 int mode) 966 int mode)
911{ 967{
912 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); 968 struct qat_crypto_instance *inst = NULL;
913 struct device *dev; 969 struct device *dev;
970 int node = get_current_node();
971 int ret;
914 972
915 spin_lock(&ctx->lock); 973 inst = qat_crypto_get_instance_node(node);
916 if (ctx->enc_cd) { 974 if (!inst)
917 /* rekeying */ 975 return -EINVAL;
918 dev = &GET_DEV(ctx->inst->accel_dev); 976 dev = &GET_DEV(inst->accel_dev);
919 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); 977 ctx->inst = inst;
920 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); 978 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
921 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); 979 &ctx->enc_cd_paddr,
922 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); 980 GFP_ATOMIC);
923 } else { 981 if (!ctx->enc_cd) {
924 /* new key */ 982 ret = -ENOMEM;
925 int node = get_current_node(); 983 goto out_free_instance;
926 struct qat_crypto_instance *inst = 984 }
927 qat_crypto_get_instance_node(node); 985 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
928 if (!inst) { 986 &ctx->dec_cd_paddr,
929 spin_unlock(&ctx->lock); 987 GFP_ATOMIC);
930 return -EINVAL; 988 if (!ctx->dec_cd) {
931 } 989 ret = -ENOMEM;
932 990 goto out_free_enc;
933 dev = &GET_DEV(inst->accel_dev);
934 ctx->inst = inst;
935 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
936 &ctx->enc_cd_paddr,
937 GFP_ATOMIC);
938 if (!ctx->enc_cd) {
939 spin_unlock(&ctx->lock);
940 return -ENOMEM;
941 }
942 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
943 &ctx->dec_cd_paddr,
944 GFP_ATOMIC);
945 if (!ctx->dec_cd) {
946 spin_unlock(&ctx->lock);
947 goto out_free_enc;
948 }
949 } 991 }
950 spin_unlock(&ctx->lock); 992
951 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode)) 993 ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
994 if (ret)
952 goto out_free_all; 995 goto out_free_all;
953 996
954 return 0; 997 return 0;
@@ -963,7 +1006,22 @@ out_free_enc:
963 dma_free_coherent(dev, sizeof(*ctx->enc_cd), 1006 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
964 ctx->enc_cd, ctx->enc_cd_paddr); 1007 ctx->enc_cd, ctx->enc_cd_paddr);
965 ctx->enc_cd = NULL; 1008 ctx->enc_cd = NULL;
966 return -ENOMEM; 1009out_free_instance:
1010 ctx->inst = NULL;
1011 qat_crypto_put_instance(inst);
1012 return ret;
1013}
1014
1015static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
1016 const u8 *key, unsigned int keylen,
1017 int mode)
1018{
1019 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1020
1021 if (ctx->enc_cd)
1022 return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
1023 else
1024 return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
967} 1025}
968 1026
969static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm, 1027static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
@@ -995,11 +1053,23 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
995 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); 1053 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
996 struct icp_qat_fw_la_cipher_req_params *cipher_param; 1054 struct icp_qat_fw_la_cipher_req_params *cipher_param;
997 struct icp_qat_fw_la_bulk_req *msg; 1055 struct icp_qat_fw_la_bulk_req *msg;
1056 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
998 int ret, ctr = 0; 1057 int ret, ctr = 0;
999 1058
1059 if (req->nbytes == 0)
1060 return 0;
1061
1062 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1063 &qat_req->iv_paddr, GFP_ATOMIC);
1064 if (!qat_req->iv)
1065 return -ENOMEM;
1066
1000 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); 1067 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1001 if (unlikely(ret)) 1068 if (unlikely(ret)) {
1069 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1070 qat_req->iv_paddr);
1002 return ret; 1071 return ret;
1072 }
1003 1073
1004 msg = &qat_req->req; 1074 msg = &qat_req->req;
1005 *msg = ctx->enc_fw_req; 1075 *msg = ctx->enc_fw_req;
@@ -1012,18 +1082,29 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1012 cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 1082 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1013 cipher_param->cipher_length = req->nbytes; 1083 cipher_param->cipher_length = req->nbytes;
1014 cipher_param->cipher_offset = 0; 1084 cipher_param->cipher_offset = 0;
1015 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); 1085 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1086 memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
1016 do { 1087 do {
1017 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); 1088 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1018 } while (ret == -EAGAIN && ctr++ < 10); 1089 } while (ret == -EAGAIN && ctr++ < 10);
1019 1090
1020 if (ret == -EAGAIN) { 1091 if (ret == -EAGAIN) {
1021 qat_alg_free_bufl(ctx->inst, qat_req); 1092 qat_alg_free_bufl(ctx->inst, qat_req);
1093 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1094 qat_req->iv_paddr);
1022 return -EBUSY; 1095 return -EBUSY;
1023 } 1096 }
1024 return -EINPROGRESS; 1097 return -EINPROGRESS;
1025} 1098}
1026 1099
1100static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
1101{
1102 if (req->nbytes % AES_BLOCK_SIZE != 0)
1103 return -EINVAL;
1104
1105 return qat_alg_ablkcipher_encrypt(req);
1106}
1107
1027static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) 1108static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1028{ 1109{
1029 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); 1110 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
@@ -1032,11 +1113,23 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1032 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); 1113 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1033 struct icp_qat_fw_la_cipher_req_params *cipher_param; 1114 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1034 struct icp_qat_fw_la_bulk_req *msg; 1115 struct icp_qat_fw_la_bulk_req *msg;
1116 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1035 int ret, ctr = 0; 1117 int ret, ctr = 0;
1036 1118
1119 if (req->nbytes == 0)
1120 return 0;
1121
1122 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1123 &qat_req->iv_paddr, GFP_ATOMIC);
1124 if (!qat_req->iv)
1125 return -ENOMEM;
1126
1037 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); 1127 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1038 if (unlikely(ret)) 1128 if (unlikely(ret)) {
1129 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1130 qat_req->iv_paddr);
1039 return ret; 1131 return ret;
1132 }
1040 1133
1041 msg = &qat_req->req; 1134 msg = &qat_req->req;
1042 *msg = ctx->dec_fw_req; 1135 *msg = ctx->dec_fw_req;
@@ -1049,18 +1142,28 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1049 cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 1142 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1050 cipher_param->cipher_length = req->nbytes; 1143 cipher_param->cipher_length = req->nbytes;
1051 cipher_param->cipher_offset = 0; 1144 cipher_param->cipher_offset = 0;
1052 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); 1145 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1146 memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
1053 do { 1147 do {
1054 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); 1148 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1055 } while (ret == -EAGAIN && ctr++ < 10); 1149 } while (ret == -EAGAIN && ctr++ < 10);
1056 1150
1057 if (ret == -EAGAIN) { 1151 if (ret == -EAGAIN) {
1058 qat_alg_free_bufl(ctx->inst, qat_req); 1152 qat_alg_free_bufl(ctx->inst, qat_req);
1153 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1154 qat_req->iv_paddr);
1059 return -EBUSY; 1155 return -EBUSY;
1060 } 1156 }
1061 return -EINPROGRESS; 1157 return -EINPROGRESS;
1062} 1158}
1063 1159
1160static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
1161{
1162 if (req->nbytes % AES_BLOCK_SIZE != 0)
1163 return -EINVAL;
1164
1165 return qat_alg_ablkcipher_decrypt(req);
1166}
1064static int qat_alg_aead_init(struct crypto_aead *tfm, 1167static int qat_alg_aead_init(struct crypto_aead *tfm,
1065 enum icp_qat_hw_auth_algo hash, 1168 enum icp_qat_hw_auth_algo hash,
1066 const char *hash_name) 1169 const char *hash_name)
@@ -1119,7 +1222,6 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1119{ 1222{
1120 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 1223 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1121 1224
1122 spin_lock_init(&ctx->lock);
1123 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request); 1225 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1124 ctx->tfm = tfm; 1226 ctx->tfm = tfm;
1125 return 0; 1227 return 0;
@@ -1221,8 +1323,8 @@ static struct crypto_alg qat_algs[] = { {
1221 .cra_u = { 1323 .cra_u = {
1222 .ablkcipher = { 1324 .ablkcipher = {
1223 .setkey = qat_alg_ablkcipher_cbc_setkey, 1325 .setkey = qat_alg_ablkcipher_cbc_setkey,
1224 .decrypt = qat_alg_ablkcipher_decrypt, 1326 .decrypt = qat_alg_ablkcipher_blk_decrypt,
1225 .encrypt = qat_alg_ablkcipher_encrypt, 1327 .encrypt = qat_alg_ablkcipher_blk_encrypt,
1226 .min_keysize = AES_MIN_KEY_SIZE, 1328 .min_keysize = AES_MIN_KEY_SIZE,
1227 .max_keysize = AES_MAX_KEY_SIZE, 1329 .max_keysize = AES_MAX_KEY_SIZE,
1228 .ivsize = AES_BLOCK_SIZE, 1330 .ivsize = AES_BLOCK_SIZE,
@@ -1233,7 +1335,7 @@ static struct crypto_alg qat_algs[] = { {
1233 .cra_driver_name = "qat_aes_ctr", 1335 .cra_driver_name = "qat_aes_ctr",
1234 .cra_priority = 4001, 1336 .cra_priority = 4001,
1235 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 1337 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1236 .cra_blocksize = AES_BLOCK_SIZE, 1338 .cra_blocksize = 1,
1237 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), 1339 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1238 .cra_alignmask = 0, 1340 .cra_alignmask = 0,
1239 .cra_type = &crypto_ablkcipher_type, 1341 .cra_type = &crypto_ablkcipher_type,
@@ -1265,8 +1367,8 @@ static struct crypto_alg qat_algs[] = { {
1265 .cra_u = { 1367 .cra_u = {
1266 .ablkcipher = { 1368 .ablkcipher = {
1267 .setkey = qat_alg_ablkcipher_xts_setkey, 1369 .setkey = qat_alg_ablkcipher_xts_setkey,
1268 .decrypt = qat_alg_ablkcipher_decrypt, 1370 .decrypt = qat_alg_ablkcipher_blk_decrypt,
1269 .encrypt = qat_alg_ablkcipher_encrypt, 1371 .encrypt = qat_alg_ablkcipher_blk_encrypt,
1270 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1372 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1271 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1373 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1272 .ivsize = AES_BLOCK_SIZE, 1374 .ivsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index dc0273fe3620..c77a80020cde 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -88,6 +88,8 @@ struct qat_crypto_request {
88 struct qat_crypto_request_buffs buf; 88 struct qat_crypto_request_buffs buf;
89 void (*cb)(struct icp_qat_fw_la_resp *resp, 89 void (*cb)(struct icp_qat_fw_la_resp *resp,
90 struct qat_crypto_request *req); 90 struct qat_crypto_request *req);
91 void *iv;
92 dma_addr_t iv_paddr;
91}; 93};
92 94
93#endif 95#endif
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 6b498a90181e..b0b8e3d48aef 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1384,7 +1384,6 @@ MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1384static int sahara_probe(struct platform_device *pdev) 1384static int sahara_probe(struct platform_device *pdev)
1385{ 1385{
1386 struct sahara_dev *dev; 1386 struct sahara_dev *dev;
1387 struct resource *res;
1388 u32 version; 1387 u32 version;
1389 int irq; 1388 int irq;
1390 int err; 1389 int err;
@@ -1398,8 +1397,7 @@ static int sahara_probe(struct platform_device *pdev)
1398 platform_set_drvdata(pdev, dev); 1397 platform_set_drvdata(pdev, dev);
1399 1398
1400 /* Get the base address */ 1399 /* Get the base address */
1401 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1400 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1402 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1403 if (IS_ERR(dev->regs_base)) 1401 if (IS_ERR(dev->regs_base))
1404 return PTR_ERR(dev->regs_base); 1402 return PTR_ERR(dev->regs_base);
1405 1403
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index ce77e38c77e0..518e0e0b11a9 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,4 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o 2obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
3obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o 3obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
4obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o 4obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 440c9f1bd006..440c9f1bd006 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 29519d1c403f..23061f2bc74b 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -349,7 +349,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
349 return -ETIMEDOUT; 349 return -ETIMEDOUT;
350 350
351 if ((hdev->flags & HASH_FLAGS_HMAC) && 351 if ((hdev->flags & HASH_FLAGS_HMAC) &&
352 (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) { 352 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
353 hdev->flags |= HASH_FLAGS_HMAC_KEY; 353 hdev->flags |= HASH_FLAGS_HMAC_KEY;
354 stm32_hash_write_key(hdev); 354 stm32_hash_write_key(hdev);
355 if (stm32_hash_wait_busy(hdev)) 355 if (stm32_hash_wait_busy(hdev))
@@ -447,8 +447,8 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
447 447
448 dma_async_issue_pending(hdev->dma_lch); 448 dma_async_issue_pending(hdev->dma_lch);
449 449
450 if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion, 450 if (!wait_for_completion_timeout(&hdev->dma_completion,
451 msecs_to_jiffies(100))) 451 msecs_to_jiffies(100)))
452 err = -ETIMEDOUT; 452 err = -ETIMEDOUT;
453 453
454 if (dma_async_is_tx_complete(hdev->dma_lch, cookie, 454 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 7b0c42882830..4ab14d58e85b 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -12,7 +12,7 @@
12 */ 12 */
13#include "sun4i-ss.h" 13#include "sun4i-ss.h"
14 14
15static int sun4i_ss_opti_poll(struct skcipher_request *areq) 15static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
16{ 16{
17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); 18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
@@ -114,6 +114,29 @@ release_ss:
114 return err; 114 return err;
115} 115}
116 116
117
118static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
119{
120 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
121 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
122 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
123 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
124 int err;
125
126 skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
127 skcipher_request_set_callback(subreq, areq->base.flags, NULL,
128 NULL);
129 skcipher_request_set_crypt(subreq, areq->src, areq->dst,
130 areq->cryptlen, areq->iv);
131 if (ctx->mode & SS_DECRYPTION)
132 err = crypto_skcipher_decrypt(subreq);
133 else
134 err = crypto_skcipher_encrypt(subreq);
135 skcipher_request_zero(subreq);
136
137 return err;
138}
139
117/* Generic function that support SG with size not multiple of 4 */ 140/* Generic function that support SG with size not multiple of 4 */
118static int sun4i_ss_cipher_poll(struct skcipher_request *areq) 141static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
119{ 142{
@@ -140,8 +163,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
140 unsigned int todo; 163 unsigned int todo;
141 struct sg_mapping_iter mi, mo; 164 struct sg_mapping_iter mi, mo;
142 unsigned int oi, oo; /* offset for in and out */ 165 unsigned int oi, oo; /* offset for in and out */
143 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
144 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
145 unsigned int ob = 0; /* offset in buf */ 166 unsigned int ob = 0; /* offset in buf */
146 unsigned int obo = 0; /* offset in bufo*/ 167 unsigned int obo = 0; /* offset in bufo*/
147 unsigned int obl = 0; /* length of data in bufo */ 168 unsigned int obl = 0; /* length of data in bufo */
@@ -178,20 +199,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
178 if (no_chunk == 1 && !need_fallback) 199 if (no_chunk == 1 && !need_fallback)
179 return sun4i_ss_opti_poll(areq); 200 return sun4i_ss_opti_poll(areq);
180 201
181 if (need_fallback) { 202 if (need_fallback)
182 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); 203 return sun4i_ss_cipher_poll_fallback(areq);
183 skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
184 skcipher_request_set_callback(subreq, areq->base.flags, NULL,
185 NULL);
186 skcipher_request_set_crypt(subreq, areq->src, areq->dst,
187 areq->cryptlen, areq->iv);
188 if (ctx->mode & SS_DECRYPTION)
189 err = crypto_skcipher_decrypt(subreq);
190 else
191 err = crypto_skcipher_encrypt(subreq);
192 skcipher_request_zero(subreq);
193 return err;
194 }
195 204
196 spin_lock_irqsave(&ss->slock, flags); 205 spin_lock_irqsave(&ss->slock, flags);
197 206
@@ -224,6 +233,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
224 233
225 while (oleft) { 234 while (oleft) {
226 if (ileft) { 235 if (ileft) {
236 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
237
227 /* 238 /*
228 * todo is the number of consecutive 4byte word that we 239 * todo is the number of consecutive 4byte word that we
229 * can read from current SG 240 * can read from current SG
@@ -281,6 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
281 oo = 0; 292 oo = 0;
282 } 293 }
283 } else { 294 } else {
295 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
296
284 /* 297 /*
285 * read obl bytes in bufo, we read at maximum for 298 * read obl bytes in bufo, we read at maximum for
286 * emptying the device 299 * emptying the device
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index fbc7bf9d7380..c9d686a0e805 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -265,11 +265,11 @@ static int init_device(struct device *dev)
265 * callback must check err and feedback in descriptor header 265 * callback must check err and feedback in descriptor header
266 * for device processing status. 266 * for device processing status.
267 */ 267 */
268int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 268static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 void (*callback)(struct device *dev, 269 void (*callback)(struct device *dev,
270 struct talitos_desc *desc, 270 struct talitos_desc *desc,
271 void *context, int error), 271 void *context, int error),
272 void *context) 272 void *context)
273{ 273{
274 struct talitos_private *priv = dev_get_drvdata(dev); 274 struct talitos_private *priv = dev_get_drvdata(dev);
275 struct talitos_request *request; 275 struct talitos_request *request;
@@ -319,7 +319,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
319 319
320 return -EINPROGRESS; 320 return -EINPROGRESS;
321} 321}
322EXPORT_SYMBOL(talitos_submit); 322
323static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
324{
325 struct talitos_edesc *edesc;
326
327 if (!is_sec1)
328 return request->desc->hdr;
329
330 if (!request->desc->next_desc)
331 return request->desc->hdr1;
332
333 edesc = container_of(request->desc, struct talitos_edesc, desc);
334
335 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
336}
323 337
324/* 338/*
325 * process what was done, notify callback of error if not 339 * process what was done, notify callback of error if not
@@ -342,12 +356,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
342 356
343 /* descriptors with their done bits set don't get the error */ 357 /* descriptors with their done bits set don't get the error */
344 rmb(); 358 rmb();
345 if (!is_sec1) 359 hdr = get_request_hdr(request, is_sec1);
346 hdr = request->desc->hdr;
347 else if (request->desc->next_desc)
348 hdr = (request->desc + 1)->hdr1;
349 else
350 hdr = request->desc->hdr1;
351 360
352 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 361 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
353 status = 0; 362 status = 0;
@@ -477,8 +486,14 @@ static u32 current_desc_hdr(struct device *dev, int ch)
477 } 486 }
478 } 487 }
479 488
480 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) 489 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
481 return (priv->chan[ch].fifo[iter].desc + 1)->hdr; 490 struct talitos_edesc *edesc;
491
492 edesc = container_of(priv->chan[ch].fifo[iter].desc,
493 struct talitos_edesc, desc);
494 return ((struct talitos_desc *)
495 (edesc->buf + edesc->dma_len))->hdr;
496 }
482 497
483 return priv->chan[ch].fifo[iter].desc->hdr; 498 return priv->chan[ch].fifo[iter].desc->hdr;
484} 499}
@@ -824,7 +839,11 @@ static void talitos_unregister_rng(struct device *dev)
824 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP 839 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
825 */ 840 */
826#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) 841#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
842#ifdef CONFIG_CRYPTO_DEV_TALITOS2
827#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) 843#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
844#else
845#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
846#endif
828#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 847#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
829 848
830struct talitos_ctx { 849struct talitos_ctx {
@@ -948,36 +967,6 @@ badkey:
948 goto out; 967 goto out;
949} 968}
950 969
951/*
952 * talitos_edesc - s/w-extended descriptor
953 * @src_nents: number of segments in input scatterlist
954 * @dst_nents: number of segments in output scatterlist
955 * @icv_ool: whether ICV is out-of-line
956 * @iv_dma: dma address of iv for checking continuity and link table
957 * @dma_len: length of dma mapped link_tbl space
958 * @dma_link_tbl: bus physical address of link_tbl/buf
959 * @desc: h/w descriptor
960 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
961 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
962 *
963 * if decrypting (with authcheck), or either one of src_nents or dst_nents
964 * is greater than 1, an integrity check value is concatenated to the end
965 * of link_tbl data
966 */
967struct talitos_edesc {
968 int src_nents;
969 int dst_nents;
970 bool icv_ool;
971 dma_addr_t iv_dma;
972 int dma_len;
973 dma_addr_t dma_link_tbl;
974 struct talitos_desc desc;
975 union {
976 struct talitos_ptr link_tbl[0];
977 u8 buf[0];
978 };
979};
980
981static void talitos_sg_unmap(struct device *dev, 970static void talitos_sg_unmap(struct device *dev,
982 struct talitos_edesc *edesc, 971 struct talitos_edesc *edesc,
983 struct scatterlist *src, 972 struct scatterlist *src,
@@ -1008,11 +997,13 @@ static void talitos_sg_unmap(struct device *dev,
1008 997
1009static void ipsec_esp_unmap(struct device *dev, 998static void ipsec_esp_unmap(struct device *dev,
1010 struct talitos_edesc *edesc, 999 struct talitos_edesc *edesc,
1011 struct aead_request *areq) 1000 struct aead_request *areq, bool encrypt)
1012{ 1001{
1013 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 1002 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1014 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1003 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1015 unsigned int ivsize = crypto_aead_ivsize(aead); 1004 unsigned int ivsize = crypto_aead_ivsize(aead);
1005 unsigned int authsize = crypto_aead_authsize(aead);
1006 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1016 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; 1007 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1017 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; 1008 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1018 1009
@@ -1021,8 +1012,8 @@ static void ipsec_esp_unmap(struct device *dev,
1021 DMA_FROM_DEVICE); 1012 DMA_FROM_DEVICE);
1022 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); 1013 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1023 1014
1024 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 1015 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1025 areq->assoclen); 1016 cryptlen + authsize, areq->assoclen);
1026 1017
1027 if (edesc->dma_len) 1018 if (edesc->dma_len)
1028 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1019 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
@@ -1032,7 +1023,7 @@ static void ipsec_esp_unmap(struct device *dev,
1032 unsigned int dst_nents = edesc->dst_nents ? : 1; 1023 unsigned int dst_nents = edesc->dst_nents ? : 1;
1033 1024
1034 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, 1025 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1035 areq->assoclen + areq->cryptlen - ivsize); 1026 areq->assoclen + cryptlen - ivsize);
1036 } 1027 }
1037} 1028}
1038 1029
@@ -1043,31 +1034,14 @@ static void ipsec_esp_encrypt_done(struct device *dev,
1043 struct talitos_desc *desc, void *context, 1034 struct talitos_desc *desc, void *context,
1044 int err) 1035 int err)
1045{ 1036{
1046 struct talitos_private *priv = dev_get_drvdata(dev);
1047 bool is_sec1 = has_ftr_sec1(priv);
1048 struct aead_request *areq = context; 1037 struct aead_request *areq = context;
1049 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1038 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1050 unsigned int authsize = crypto_aead_authsize(authenc);
1051 unsigned int ivsize = crypto_aead_ivsize(authenc); 1039 unsigned int ivsize = crypto_aead_ivsize(authenc);
1052 struct talitos_edesc *edesc; 1040 struct talitos_edesc *edesc;
1053 struct scatterlist *sg;
1054 void *icvdata;
1055 1041
1056 edesc = container_of(desc, struct talitos_edesc, desc); 1042 edesc = container_of(desc, struct talitos_edesc, desc);
1057 1043
1058 ipsec_esp_unmap(dev, edesc, areq); 1044 ipsec_esp_unmap(dev, edesc, areq, true);
1059
1060 /* copy the generated ICV to dst */
1061 if (edesc->icv_ool) {
1062 if (is_sec1)
1063 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1064 else
1065 icvdata = &edesc->link_tbl[edesc->src_nents +
1066 edesc->dst_nents + 2];
1067 sg = sg_last(areq->dst, edesc->dst_nents);
1068 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1069 icvdata, authsize);
1070 }
1071 1045
1072 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); 1046 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1073 1047
@@ -1084,32 +1058,16 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1084 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1058 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1085 unsigned int authsize = crypto_aead_authsize(authenc); 1059 unsigned int authsize = crypto_aead_authsize(authenc);
1086 struct talitos_edesc *edesc; 1060 struct talitos_edesc *edesc;
1087 struct scatterlist *sg;
1088 char *oicv, *icv; 1061 char *oicv, *icv;
1089 struct talitos_private *priv = dev_get_drvdata(dev);
1090 bool is_sec1 = has_ftr_sec1(priv);
1091 1062
1092 edesc = container_of(desc, struct talitos_edesc, desc); 1063 edesc = container_of(desc, struct talitos_edesc, desc);
1093 1064
1094 ipsec_esp_unmap(dev, edesc, req); 1065 ipsec_esp_unmap(dev, edesc, req, false);
1095 1066
1096 if (!err) { 1067 if (!err) {
1097 /* auth check */ 1068 /* auth check */
1098 sg = sg_last(req->dst, edesc->dst_nents ? : 1); 1069 oicv = edesc->buf + edesc->dma_len;
1099 icv = (char *)sg_virt(sg) + sg->length - authsize; 1070 icv = oicv - authsize;
1100
1101 if (edesc->dma_len) {
1102 if (is_sec1)
1103 oicv = (char *)&edesc->dma_link_tbl +
1104 req->assoclen + req->cryptlen;
1105 else
1106 oicv = (char *)
1107 &edesc->link_tbl[edesc->src_nents +
1108 edesc->dst_nents + 2];
1109 if (edesc->icv_ool)
1110 icv = oicv + authsize;
1111 } else
1112 oicv = (char *)&edesc->link_tbl[0];
1113 1071
1114 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; 1072 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1115 } 1073 }
@@ -1128,7 +1086,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1128 1086
1129 edesc = container_of(desc, struct talitos_edesc, desc); 1087 edesc = container_of(desc, struct talitos_edesc, desc);
1130 1088
1131 ipsec_esp_unmap(dev, edesc, req); 1089 ipsec_esp_unmap(dev, edesc, req, false);
1132 1090
1133 /* check ICV auth status */ 1091 /* check ICV auth status */
1134 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != 1092 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
@@ -1145,11 +1103,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1145 * stop at cryptlen bytes 1103 * stop at cryptlen bytes
1146 */ 1104 */
1147static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, 1105static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1148 unsigned int offset, int cryptlen, 1106 unsigned int offset, int datalen, int elen,
1149 struct talitos_ptr *link_tbl_ptr) 1107 struct talitos_ptr *link_tbl_ptr)
1150{ 1108{
1151 int n_sg = sg_count; 1109 int n_sg = elen ? sg_count + 1 : sg_count;
1152 int count = 0; 1110 int count = 0;
1111 int cryptlen = datalen + elen;
1153 1112
1154 while (cryptlen && sg && n_sg--) { 1113 while (cryptlen && sg && n_sg--) {
1155 unsigned int len = sg_dma_len(sg); 1114 unsigned int len = sg_dma_len(sg);
@@ -1164,11 +1123,20 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1164 if (len > cryptlen) 1123 if (len > cryptlen)
1165 len = cryptlen; 1124 len = cryptlen;
1166 1125
1126 if (datalen > 0 && len > datalen) {
1127 to_talitos_ptr(link_tbl_ptr + count,
1128 sg_dma_address(sg) + offset, datalen, 0);
1129 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1130 count++;
1131 len -= datalen;
1132 offset += datalen;
1133 }
1167 to_talitos_ptr(link_tbl_ptr + count, 1134 to_talitos_ptr(link_tbl_ptr + count,
1168 sg_dma_address(sg) + offset, len, 0); 1135 sg_dma_address(sg) + offset, len, 0);
1169 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); 1136 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1170 count++; 1137 count++;
1171 cryptlen -= len; 1138 cryptlen -= len;
1139 datalen -= len;
1172 offset = 0; 1140 offset = 0;
1173 1141
1174next: 1142next:
@@ -1178,7 +1146,7 @@ next:
1178 /* tag end of link table */ 1146 /* tag end of link table */
1179 if (count > 0) 1147 if (count > 0)
1180 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1, 1148 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1181 DESC_PTR_LNKTBL_RETURN, 0); 1149 DESC_PTR_LNKTBL_RET, 0);
1182 1150
1183 return count; 1151 return count;
1184} 1152}
@@ -1186,7 +1154,8 @@ next:
1186static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, 1154static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1187 unsigned int len, struct talitos_edesc *edesc, 1155 unsigned int len, struct talitos_edesc *edesc,
1188 struct talitos_ptr *ptr, int sg_count, 1156 struct talitos_ptr *ptr, int sg_count,
1189 unsigned int offset, int tbl_off, int elen) 1157 unsigned int offset, int tbl_off, int elen,
1158 bool force)
1190{ 1159{
1191 struct talitos_private *priv = dev_get_drvdata(dev); 1160 struct talitos_private *priv = dev_get_drvdata(dev);
1192 bool is_sec1 = has_ftr_sec1(priv); 1161 bool is_sec1 = has_ftr_sec1(priv);
@@ -1196,7 +1165,7 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1196 return 1; 1165 return 1;
1197 } 1166 }
1198 to_talitos_ptr_ext_set(ptr, elen, is_sec1); 1167 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1199 if (sg_count == 1) { 1168 if (sg_count == 1 && !force) {
1200 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); 1169 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1201 return sg_count; 1170 return sg_count;
1202 } 1171 }
@@ -1204,9 +1173,9 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1204 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); 1173 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1205 return sg_count; 1174 return sg_count;
1206 } 1175 }
1207 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen, 1176 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1208 &edesc->link_tbl[tbl_off]); 1177 &edesc->link_tbl[tbl_off]);
1209 if (sg_count == 1) { 1178 if (sg_count == 1 && !force) {
1210 /* Only one segment now, so no link tbl needed*/ 1179 /* Only one segment now, so no link tbl needed*/
1211 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); 1180 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1212 return sg_count; 1181 return sg_count;
@@ -1224,13 +1193,14 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1224 unsigned int offset, int tbl_off) 1193 unsigned int offset, int tbl_off)
1225{ 1194{
1226 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, 1195 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1227 tbl_off, 0); 1196 tbl_off, 0, false);
1228} 1197}
1229 1198
1230/* 1199/*
1231 * fill in and submit ipsec_esp descriptor 1200 * fill in and submit ipsec_esp descriptor
1232 */ 1201 */
1233static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, 1202static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1203 bool encrypt,
1234 void (*callback)(struct device *dev, 1204 void (*callback)(struct device *dev,
1235 struct talitos_desc *desc, 1205 struct talitos_desc *desc,
1236 void *context, int error)) 1206 void *context, int error))
@@ -1240,7 +1210,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1240 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1210 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1241 struct device *dev = ctx->dev; 1211 struct device *dev = ctx->dev;
1242 struct talitos_desc *desc = &edesc->desc; 1212 struct talitos_desc *desc = &edesc->desc;
1243 unsigned int cryptlen = areq->cryptlen; 1213 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1244 unsigned int ivsize = crypto_aead_ivsize(aead); 1214 unsigned int ivsize = crypto_aead_ivsize(aead);
1245 int tbl_off = 0; 1215 int tbl_off = 0;
1246 int sg_count, ret; 1216 int sg_count, ret;
@@ -1251,6 +1221,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1251 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP; 1221 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1252 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3]; 1222 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1253 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2]; 1223 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1224 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1254 1225
1255 /* hmac key */ 1226 /* hmac key */
1256 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1); 1227 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
@@ -1290,7 +1261,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1290 elen = authsize; 1261 elen = authsize;
1291 1262
1292 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], 1263 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1293 sg_count, areq->assoclen, tbl_off, elen); 1264 sg_count, areq->assoclen, tbl_off, elen,
1265 false);
1294 1266
1295 if (ret > 1) { 1267 if (ret > 1) {
1296 tbl_off += ret; 1268 tbl_off += ret;
@@ -1304,55 +1276,32 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1304 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); 1276 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1305 } 1277 }
1306 1278
1307 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], 1279 if (is_ipsec_esp && encrypt)
1308 sg_count, areq->assoclen, tbl_off); 1280 elen = authsize;
1309 1281 else
1310 if (is_ipsec_esp) 1282 elen = 0;
1311 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); 1283 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1312 1284 sg_count, areq->assoclen, tbl_off, elen,
1313 /* ICV data */ 1285 is_ipsec_esp && !encrypt);
1314 if (ret > 1) { 1286 tbl_off += ret;
1315 tbl_off += ret;
1316 edesc->icv_ool = true;
1317 sync_needed = true;
1318
1319 if (is_ipsec_esp) {
1320 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1321 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1322 sizeof(struct talitos_ptr) + authsize;
1323
1324 /* Add an entry to the link table for ICV data */
1325 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1326 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1327 is_sec1);
1328 1287
1329 /* icv data follows link tables */ 1288 if (!encrypt && is_ipsec_esp) {
1330 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, 1289 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1331 authsize, is_sec1);
1332 } else {
1333 dma_addr_t addr = edesc->dma_link_tbl;
1334 1290
1335 if (is_sec1) 1291 /* Add an entry to the link table for ICV data */
1336 addr += areq->assoclen + cryptlen; 1292 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1337 else 1293 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1338 addr += sizeof(struct talitos_ptr) * tbl_off;
1339 1294
1340 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1); 1295 /* icv data follows link tables */
1341 } 1296 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1297 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1298 sync_needed = true;
1299 } else if (!encrypt) {
1300 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1301 sync_needed = true;
1342 } else if (!is_ipsec_esp) { 1302 } else if (!is_ipsec_esp) {
1343 ret = talitos_sg_map(dev, areq->dst, authsize, edesc, 1303 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1344 &desc->ptr[6], sg_count, areq->assoclen + 1304 sg_count, areq->assoclen + cryptlen, tbl_off);
1345 cryptlen,
1346 tbl_off);
1347 if (ret > 1) {
1348 tbl_off += ret;
1349 edesc->icv_ool = true;
1350 sync_needed = true;
1351 } else {
1352 edesc->icv_ool = false;
1353 }
1354 } else {
1355 edesc->icv_ool = false;
1356 } 1305 }
1357 1306
1358 /* iv out */ 1307 /* iv out */
@@ -1367,7 +1316,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1367 1316
1368 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1317 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1369 if (ret != -EINPROGRESS) { 1318 if (ret != -EINPROGRESS) {
1370 ipsec_esp_unmap(dev, edesc, areq); 1319 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1371 kfree(edesc); 1320 kfree(edesc);
1372 } 1321 }
1373 return ret; 1322 return ret;
@@ -1435,18 +1384,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1435 * and space for two sets of ICVs (stashed and generated) 1384 * and space for two sets of ICVs (stashed and generated)
1436 */ 1385 */
1437 alloc_len = sizeof(struct talitos_edesc); 1386 alloc_len = sizeof(struct talitos_edesc);
1438 if (src_nents || dst_nents) { 1387 if (src_nents || dst_nents || !encrypt) {
1439 if (is_sec1) 1388 if (is_sec1)
1440 dma_len = (src_nents ? src_len : 0) + 1389 dma_len = (src_nents ? src_len : 0) +
1441 (dst_nents ? dst_len : 0); 1390 (dst_nents ? dst_len : 0) + authsize;
1442 else 1391 else
1443 dma_len = (src_nents + dst_nents + 2) * 1392 dma_len = (src_nents + dst_nents + 2) *
1444 sizeof(struct talitos_ptr) + authsize * 2; 1393 sizeof(struct talitos_ptr) + authsize;
1445 alloc_len += dma_len; 1394 alloc_len += dma_len;
1446 } else { 1395 } else {
1447 dma_len = 0; 1396 dma_len = 0;
1448 alloc_len += icv_stashing ? authsize : 0;
1449 } 1397 }
1398 alloc_len += icv_stashing ? authsize : 0;
1450 1399
1451 /* if its a ahash, add space for a second desc next to the first one */ 1400 /* if its a ahash, add space for a second desc next to the first one */
1452 if (is_sec1 && !dst) 1401 if (is_sec1 && !dst)
@@ -1466,15 +1415,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1466 edesc->dst_nents = dst_nents; 1415 edesc->dst_nents = dst_nents;
1467 edesc->iv_dma = iv_dma; 1416 edesc->iv_dma = iv_dma;
1468 edesc->dma_len = dma_len; 1417 edesc->dma_len = dma_len;
1469 if (dma_len) { 1418 if (dma_len)
1470 void *addr = &edesc->link_tbl[0]; 1419 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1471
1472 if (is_sec1 && !dst)
1473 addr += sizeof(struct talitos_desc);
1474 edesc->dma_link_tbl = dma_map_single(dev, addr,
1475 edesc->dma_len, 1420 edesc->dma_len,
1476 DMA_BIDIRECTIONAL); 1421 DMA_BIDIRECTIONAL);
1477 } 1422
1478 return edesc; 1423 return edesc;
1479} 1424}
1480 1425
@@ -1485,9 +1430,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1485 unsigned int authsize = crypto_aead_authsize(authenc); 1430 unsigned int authsize = crypto_aead_authsize(authenc);
1486 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1431 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1487 unsigned int ivsize = crypto_aead_ivsize(authenc); 1432 unsigned int ivsize = crypto_aead_ivsize(authenc);
1433 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1488 1434
1489 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1435 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1490 iv, areq->assoclen, areq->cryptlen, 1436 iv, areq->assoclen, cryptlen,
1491 authsize, ivsize, icv_stashing, 1437 authsize, ivsize, icv_stashing,
1492 areq->base.flags, encrypt); 1438 areq->base.flags, encrypt);
1493} 1439}
@@ -1506,7 +1452,7 @@ static int aead_encrypt(struct aead_request *req)
1506 /* set encrypt */ 1452 /* set encrypt */
1507 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1453 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1508 1454
1509 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); 1455 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1510} 1456}
1511 1457
1512static int aead_decrypt(struct aead_request *req) 1458static int aead_decrypt(struct aead_request *req)
@@ -1516,17 +1462,15 @@ static int aead_decrypt(struct aead_request *req)
1516 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1462 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1517 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1463 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1518 struct talitos_edesc *edesc; 1464 struct talitos_edesc *edesc;
1519 struct scatterlist *sg;
1520 void *icvdata; 1465 void *icvdata;
1521 1466
1522 req->cryptlen -= authsize;
1523
1524 /* allocate extended descriptor */ 1467 /* allocate extended descriptor */
1525 edesc = aead_edesc_alloc(req, req->iv, 1, false); 1468 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1526 if (IS_ERR(edesc)) 1469 if (IS_ERR(edesc))
1527 return PTR_ERR(edesc); 1470 return PTR_ERR(edesc);
1528 1471
1529 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1472 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1473 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1530 ((!edesc->src_nents && !edesc->dst_nents) || 1474 ((!edesc->src_nents && !edesc->dst_nents) ||
1531 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { 1475 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1532 1476
@@ -1537,24 +1481,20 @@ static int aead_decrypt(struct aead_request *req)
1537 1481
1538 /* reset integrity check result bits */ 1482 /* reset integrity check result bits */
1539 1483
1540 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); 1484 return ipsec_esp(edesc, req, false,
1485 ipsec_esp_decrypt_hwauth_done);
1541 } 1486 }
1542 1487
1543 /* Have to check the ICV with software */ 1488 /* Have to check the ICV with software */
1544 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1489 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1545 1490
1546 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1491 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1547 if (edesc->dma_len) 1492 icvdata = edesc->buf + edesc->dma_len;
1548 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1549 edesc->dst_nents + 2];
1550 else
1551 icvdata = &edesc->link_tbl[0];
1552 1493
1553 sg = sg_last(req->src, edesc->src_nents ? : 1); 1494 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1495 req->assoclen + req->cryptlen - authsize);
1554 1496
1555 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); 1497 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1556
1557 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1558} 1498}
1559 1499
1560static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, 1500static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1605,6 +1545,18 @@ static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1605 return ablkcipher_setkey(cipher, key, keylen); 1545 return ablkcipher_setkey(cipher, key, keylen);
1606} 1546}
1607 1547
1548static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1549 const u8 *key, unsigned int keylen)
1550{
1551 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1552 keylen == AES_KEYSIZE_256)
1553 return ablkcipher_setkey(cipher, key, keylen);
1554
1555 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1556
1557 return -EINVAL;
1558}
1559
1608static void common_nonsnoop_unmap(struct device *dev, 1560static void common_nonsnoop_unmap(struct device *dev,
1609 struct talitos_edesc *edesc, 1561 struct talitos_edesc *edesc,
1610 struct ablkcipher_request *areq) 1562 struct ablkcipher_request *areq)
@@ -1624,11 +1576,15 @@ static void ablkcipher_done(struct device *dev,
1624 int err) 1576 int err)
1625{ 1577{
1626 struct ablkcipher_request *areq = context; 1578 struct ablkcipher_request *areq = context;
1579 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1580 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1581 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1627 struct talitos_edesc *edesc; 1582 struct talitos_edesc *edesc;
1628 1583
1629 edesc = container_of(desc, struct talitos_edesc, desc); 1584 edesc = container_of(desc, struct talitos_edesc, desc);
1630 1585
1631 common_nonsnoop_unmap(dev, edesc, areq); 1586 common_nonsnoop_unmap(dev, edesc, areq);
1587 memcpy(areq->info, ctx->iv, ivsize);
1632 1588
1633 kfree(edesc); 1589 kfree(edesc);
1634 1590
@@ -1723,6 +1679,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1723 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1679 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1724 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1680 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1725 struct talitos_edesc *edesc; 1681 struct talitos_edesc *edesc;
1682 unsigned int blocksize =
1683 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1684
1685 if (!areq->nbytes)
1686 return 0;
1687
1688 if (areq->nbytes % blocksize)
1689 return -EINVAL;
1726 1690
1727 /* allocate extended descriptor */ 1691 /* allocate extended descriptor */
1728 edesc = ablkcipher_edesc_alloc(areq, true); 1692 edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1740,6 +1704,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1740 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1704 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1741 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1705 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1742 struct talitos_edesc *edesc; 1706 struct talitos_edesc *edesc;
1707 unsigned int blocksize =
1708 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1709
1710 if (!areq->nbytes)
1711 return 0;
1712
1713 if (areq->nbytes % blocksize)
1714 return -EINVAL;
1743 1715
1744 /* allocate extended descriptor */ 1716 /* allocate extended descriptor */
1745 edesc = ablkcipher_edesc_alloc(areq, false); 1717 edesc = ablkcipher_edesc_alloc(areq, false);
@@ -1759,14 +1731,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
1759 struct talitos_private *priv = dev_get_drvdata(dev); 1731 struct talitos_private *priv = dev_get_drvdata(dev);
1760 bool is_sec1 = has_ftr_sec1(priv); 1732 bool is_sec1 = has_ftr_sec1(priv);
1761 struct talitos_desc *desc = &edesc->desc; 1733 struct talitos_desc *desc = &edesc->desc;
1762 struct talitos_desc *desc2 = desc + 1; 1734 struct talitos_desc *desc2 = (struct talitos_desc *)
1735 (edesc->buf + edesc->dma_len);
1763 1736
1764 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1737 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1765 if (desc->next_desc && 1738 if (desc->next_desc &&
1766 desc->ptr[5].ptr != desc2->ptr[5].ptr) 1739 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1767 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); 1740 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1768 1741
1769 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); 1742 if (req_ctx->psrc)
1743 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1770 1744
1771 /* When using hashctx-in, must unmap it. */ 1745 /* When using hashctx-in, must unmap it. */
1772 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) 1746 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
@@ -1833,7 +1807,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1833 1807
1834static int common_nonsnoop_hash(struct talitos_edesc *edesc, 1808static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1835 struct ahash_request *areq, unsigned int length, 1809 struct ahash_request *areq, unsigned int length,
1836 unsigned int offset,
1837 void (*callback) (struct device *dev, 1810 void (*callback) (struct device *dev,
1838 struct talitos_desc *desc, 1811 struct talitos_desc *desc,
1839 void *context, int error)) 1812 void *context, int error))
@@ -1872,9 +1845,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1872 1845
1873 sg_count = edesc->src_nents ?: 1; 1846 sg_count = edesc->src_nents ?: 1;
1874 if (is_sec1 && sg_count > 1) 1847 if (is_sec1 && sg_count > 1)
1875 sg_pcopy_to_buffer(req_ctx->psrc, sg_count, 1848 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1876 edesc->buf + sizeof(struct talitos_desc),
1877 length, req_ctx->nbuf);
1878 else if (length) 1849 else if (length)
1879 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, 1850 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1880 DMA_TO_DEVICE); 1851 DMA_TO_DEVICE);
@@ -1887,7 +1858,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1887 DMA_TO_DEVICE); 1858 DMA_TO_DEVICE);
1888 } else { 1859 } else {
1889 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1860 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1890 &desc->ptr[3], sg_count, offset, 0); 1861 &desc->ptr[3], sg_count, 0, 0);
1891 if (sg_count > 1) 1862 if (sg_count > 1)
1892 sync_needed = true; 1863 sync_needed = true;
1893 } 1864 }
@@ -1911,7 +1882,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1911 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1882 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1912 1883
1913 if (is_sec1 && req_ctx->nbuf && length) { 1884 if (is_sec1 && req_ctx->nbuf && length) {
1914 struct talitos_desc *desc2 = desc + 1; 1885 struct talitos_desc *desc2 = (struct talitos_desc *)
1886 (edesc->buf + edesc->dma_len);
1915 dma_addr_t next_desc; 1887 dma_addr_t next_desc;
1916 1888
1917 memset(desc2, 0, sizeof(*desc2)); 1889 memset(desc2, 0, sizeof(*desc2));
@@ -1932,7 +1904,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1932 DMA_TO_DEVICE); 1904 DMA_TO_DEVICE);
1933 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); 1905 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1934 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1906 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1935 &desc2->ptr[3], sg_count, offset, 0); 1907 &desc2->ptr[3], sg_count, 0, 0);
1936 if (sg_count > 1) 1908 if (sg_count > 1)
1937 sync_needed = true; 1909 sync_needed = true;
1938 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); 1910 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
@@ -2043,7 +2015,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2043 struct device *dev = ctx->dev; 2015 struct device *dev = ctx->dev;
2044 struct talitos_private *priv = dev_get_drvdata(dev); 2016 struct talitos_private *priv = dev_get_drvdata(dev);
2045 bool is_sec1 = has_ftr_sec1(priv); 2017 bool is_sec1 = has_ftr_sec1(priv);
2046 int offset = 0;
2047 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; 2018 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2048 2019
2049 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { 2020 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
@@ -2083,6 +2054,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2083 sg_chain(req_ctx->bufsl, 2, areq->src); 2054 sg_chain(req_ctx->bufsl, 2, areq->src);
2084 req_ctx->psrc = req_ctx->bufsl; 2055 req_ctx->psrc = req_ctx->bufsl;
2085 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { 2056 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2057 int offset;
2058
2086 if (nbytes_to_hash > blocksize) 2059 if (nbytes_to_hash > blocksize)
2087 offset = blocksize - req_ctx->nbuf; 2060 offset = blocksize - req_ctx->nbuf;
2088 else 2061 else
@@ -2095,7 +2068,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2095 sg_copy_to_buffer(areq->src, nents, 2068 sg_copy_to_buffer(areq->src, nents,
2096 ctx_buf + req_ctx->nbuf, offset); 2069 ctx_buf + req_ctx->nbuf, offset);
2097 req_ctx->nbuf += offset; 2070 req_ctx->nbuf += offset;
2098 req_ctx->psrc = areq->src; 2071 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2072 offset);
2099 } else 2073 } else
2100 req_ctx->psrc = areq->src; 2074 req_ctx->psrc = areq->src;
2101 2075
@@ -2135,8 +2109,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2135 if (ctx->keylen && (req_ctx->first || req_ctx->last)) 2109 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2136 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; 2110 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2137 2111
2138 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset, 2112 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2139 ahash_done);
2140} 2113}
2141 2114
2142static int ahash_update(struct ahash_request *areq) 2115static int ahash_update(struct ahash_request *areq)
@@ -2339,7 +2312,7 @@ static struct talitos_alg_template driver_algs[] = {
2339 .base = { 2312 .base = {
2340 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2313 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2341 .cra_driver_name = "authenc-hmac-sha1-" 2314 .cra_driver_name = "authenc-hmac-sha1-"
2342 "cbc-aes-talitos", 2315 "cbc-aes-talitos-hsna",
2343 .cra_blocksize = AES_BLOCK_SIZE, 2316 .cra_blocksize = AES_BLOCK_SIZE,
2344 .cra_flags = CRYPTO_ALG_ASYNC, 2317 .cra_flags = CRYPTO_ALG_ASYNC,
2345 }, 2318 },
@@ -2384,7 +2357,7 @@ static struct talitos_alg_template driver_algs[] = {
2384 .cra_name = "authenc(hmac(sha1)," 2357 .cra_name = "authenc(hmac(sha1),"
2385 "cbc(des3_ede))", 2358 "cbc(des3_ede))",
2386 .cra_driver_name = "authenc-hmac-sha1-" 2359 .cra_driver_name = "authenc-hmac-sha1-"
2387 "cbc-3des-talitos", 2360 "cbc-3des-talitos-hsna",
2388 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2361 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2389 .cra_flags = CRYPTO_ALG_ASYNC, 2362 .cra_flags = CRYPTO_ALG_ASYNC,
2390 }, 2363 },
@@ -2427,7 +2400,7 @@ static struct talitos_alg_template driver_algs[] = {
2427 .base = { 2400 .base = {
2428 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2401 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2429 .cra_driver_name = "authenc-hmac-sha224-" 2402 .cra_driver_name = "authenc-hmac-sha224-"
2430 "cbc-aes-talitos", 2403 "cbc-aes-talitos-hsna",
2431 .cra_blocksize = AES_BLOCK_SIZE, 2404 .cra_blocksize = AES_BLOCK_SIZE,
2432 .cra_flags = CRYPTO_ALG_ASYNC, 2405 .cra_flags = CRYPTO_ALG_ASYNC,
2433 }, 2406 },
@@ -2472,7 +2445,7 @@ static struct talitos_alg_template driver_algs[] = {
2472 .cra_name = "authenc(hmac(sha224)," 2445 .cra_name = "authenc(hmac(sha224),"
2473 "cbc(des3_ede))", 2446 "cbc(des3_ede))",
2474 .cra_driver_name = "authenc-hmac-sha224-" 2447 .cra_driver_name = "authenc-hmac-sha224-"
2475 "cbc-3des-talitos", 2448 "cbc-3des-talitos-hsna",
2476 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2449 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2477 .cra_flags = CRYPTO_ALG_ASYNC, 2450 .cra_flags = CRYPTO_ALG_ASYNC,
2478 }, 2451 },
@@ -2515,7 +2488,7 @@ static struct talitos_alg_template driver_algs[] = {
2515 .base = { 2488 .base = {
2516 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2489 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2517 .cra_driver_name = "authenc-hmac-sha256-" 2490 .cra_driver_name = "authenc-hmac-sha256-"
2518 "cbc-aes-talitos", 2491 "cbc-aes-talitos-hsna",
2519 .cra_blocksize = AES_BLOCK_SIZE, 2492 .cra_blocksize = AES_BLOCK_SIZE,
2520 .cra_flags = CRYPTO_ALG_ASYNC, 2493 .cra_flags = CRYPTO_ALG_ASYNC,
2521 }, 2494 },
@@ -2560,7 +2533,7 @@ static struct talitos_alg_template driver_algs[] = {
2560 .cra_name = "authenc(hmac(sha256)," 2533 .cra_name = "authenc(hmac(sha256),"
2561 "cbc(des3_ede))", 2534 "cbc(des3_ede))",
2562 .cra_driver_name = "authenc-hmac-sha256-" 2535 .cra_driver_name = "authenc-hmac-sha256-"
2563 "cbc-3des-talitos", 2536 "cbc-3des-talitos-hsna",
2564 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2537 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2565 .cra_flags = CRYPTO_ALG_ASYNC, 2538 .cra_flags = CRYPTO_ALG_ASYNC,
2566 }, 2539 },
@@ -2689,7 +2662,7 @@ static struct talitos_alg_template driver_algs[] = {
2689 .base = { 2662 .base = {
2690 .cra_name = "authenc(hmac(md5),cbc(aes))", 2663 .cra_name = "authenc(hmac(md5),cbc(aes))",
2691 .cra_driver_name = "authenc-hmac-md5-" 2664 .cra_driver_name = "authenc-hmac-md5-"
2692 "cbc-aes-talitos", 2665 "cbc-aes-talitos-hsna",
2693 .cra_blocksize = AES_BLOCK_SIZE, 2666 .cra_blocksize = AES_BLOCK_SIZE,
2694 .cra_flags = CRYPTO_ALG_ASYNC, 2667 .cra_flags = CRYPTO_ALG_ASYNC,
2695 }, 2668 },
@@ -2732,7 +2705,7 @@ static struct talitos_alg_template driver_algs[] = {
2732 .base = { 2705 .base = {
2733 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2706 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2734 .cra_driver_name = "authenc-hmac-md5-" 2707 .cra_driver_name = "authenc-hmac-md5-"
2735 "cbc-3des-talitos", 2708 "cbc-3des-talitos-hsna",
2736 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2709 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2737 .cra_flags = CRYPTO_ALG_ASYNC, 2710 .cra_flags = CRYPTO_ALG_ASYNC,
2738 }, 2711 },
@@ -2760,7 +2733,7 @@ static struct talitos_alg_template driver_algs[] = {
2760 .cra_ablkcipher = { 2733 .cra_ablkcipher = {
2761 .min_keysize = AES_MIN_KEY_SIZE, 2734 .min_keysize = AES_MIN_KEY_SIZE,
2762 .max_keysize = AES_MAX_KEY_SIZE, 2735 .max_keysize = AES_MAX_KEY_SIZE,
2763 .ivsize = AES_BLOCK_SIZE, 2736 .setkey = ablkcipher_aes_setkey,
2764 } 2737 }
2765 }, 2738 },
2766 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2739 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2777,6 +2750,7 @@ static struct talitos_alg_template driver_algs[] = {
2777 .min_keysize = AES_MIN_KEY_SIZE, 2750 .min_keysize = AES_MIN_KEY_SIZE,
2778 .max_keysize = AES_MAX_KEY_SIZE, 2751 .max_keysize = AES_MAX_KEY_SIZE,
2779 .ivsize = AES_BLOCK_SIZE, 2752 .ivsize = AES_BLOCK_SIZE,
2753 .setkey = ablkcipher_aes_setkey,
2780 } 2754 }
2781 }, 2755 },
2782 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2756 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2787,13 +2761,14 @@ static struct talitos_alg_template driver_algs[] = {
2787 .alg.crypto = { 2761 .alg.crypto = {
2788 .cra_name = "ctr(aes)", 2762 .cra_name = "ctr(aes)",
2789 .cra_driver_name = "ctr-aes-talitos", 2763 .cra_driver_name = "ctr-aes-talitos",
2790 .cra_blocksize = AES_BLOCK_SIZE, 2764 .cra_blocksize = 1,
2791 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2765 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2792 CRYPTO_ALG_ASYNC, 2766 CRYPTO_ALG_ASYNC,
2793 .cra_ablkcipher = { 2767 .cra_ablkcipher = {
2794 .min_keysize = AES_MIN_KEY_SIZE, 2768 .min_keysize = AES_MIN_KEY_SIZE,
2795 .max_keysize = AES_MAX_KEY_SIZE, 2769 .max_keysize = AES_MAX_KEY_SIZE,
2796 .ivsize = AES_BLOCK_SIZE, 2770 .ivsize = AES_BLOCK_SIZE,
2771 .setkey = ablkcipher_aes_setkey,
2797 } 2772 }
2798 }, 2773 },
2799 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | 2774 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
@@ -2810,7 +2785,6 @@ static struct talitos_alg_template driver_algs[] = {
2810 .cra_ablkcipher = { 2785 .cra_ablkcipher = {
2811 .min_keysize = DES_KEY_SIZE, 2786 .min_keysize = DES_KEY_SIZE,
2812 .max_keysize = DES_KEY_SIZE, 2787 .max_keysize = DES_KEY_SIZE,
2813 .ivsize = DES_BLOCK_SIZE,
2814 .setkey = ablkcipher_des_setkey, 2788 .setkey = ablkcipher_des_setkey,
2815 } 2789 }
2816 }, 2790 },
@@ -2845,7 +2819,6 @@ static struct talitos_alg_template driver_algs[] = {
2845 .cra_ablkcipher = { 2819 .cra_ablkcipher = {
2846 .min_keysize = DES3_EDE_KEY_SIZE, 2820 .min_keysize = DES3_EDE_KEY_SIZE,
2847 .max_keysize = DES3_EDE_KEY_SIZE, 2821 .max_keysize = DES3_EDE_KEY_SIZE,
2848 .ivsize = DES3_EDE_BLOCK_SIZE,
2849 .setkey = ablkcipher_des3_setkey, 2822 .setkey = ablkcipher_des3_setkey,
2850 } 2823 }
2851 }, 2824 },
@@ -3270,7 +3243,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3270 alg->cra_priority = t_alg->algt.priority; 3243 alg->cra_priority = t_alg->algt.priority;
3271 else 3244 else
3272 alg->cra_priority = TALITOS_CRA_PRIORITY; 3245 alg->cra_priority = TALITOS_CRA_PRIORITY;
3273 alg->cra_alignmask = 0; 3246 if (has_ftr_sec1(priv))
3247 alg->cra_alignmask = 3;
3248 else
3249 alg->cra_alignmask = 0;
3274 alg->cra_ctxsize = sizeof(struct talitos_ctx); 3250 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3275 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; 3251 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3276 3252
@@ -3418,7 +3394,7 @@ static int talitos_probe(struct platform_device *ofdev)
3418 if (err) 3394 if (err)
3419 goto err_out; 3395 goto err_out;
3420 3396
3421 if (of_device_is_compatible(np, "fsl,sec1.0")) { 3397 if (has_ftr_sec1(priv)) {
3422 if (priv->num_channels == 1) 3398 if (priv->num_channels == 1)
3423 tasklet_init(&priv->done_task[0], talitos1_done_ch0, 3399 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3424 (unsigned long)dev); 3400 (unsigned long)dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index a65a63e0d6c1..1469b956948a 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,31 +1,8 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 * Freescale SEC (talitos) device register and descriptor header defines 3 * Freescale SEC (talitos) device register and descriptor header defines
3 * 4 *
4 * Copyright (c) 2006-2011 Freescale Semiconductor, Inc. 5 * Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 */ 6 */
30 7
31#define TALITOS_TIMEOUT 100000 8#define TALITOS_TIMEOUT 100000
@@ -65,6 +42,34 @@ struct talitos_desc {
65 42
66#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) 43#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32))
67 44
45/*
46 * talitos_edesc - s/w-extended descriptor
47 * @src_nents: number of segments in input scatterlist
48 * @dst_nents: number of segments in output scatterlist
49 * @iv_dma: dma address of iv for checking continuity and link table
50 * @dma_len: length of dma mapped link_tbl space
51 * @dma_link_tbl: bus physical address of link_tbl/buf
52 * @desc: h/w descriptor
53 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
54 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
55 *
56 * if decrypting (with authcheck), or either one of src_nents or dst_nents
57 * is greater than 1, an integrity check value is concatenated to the end
58 * of link_tbl data
59 */
60struct talitos_edesc {
61 int src_nents;
62 int dst_nents;
63 dma_addr_t iv_dma;
64 int dma_len;
65 dma_addr_t dma_link_tbl;
66 struct talitos_desc desc;
67 union {
68 struct talitos_ptr link_tbl[0];
69 u8 buf[0];
70 };
71};
72
68/** 73/**
69 * talitos_request - descriptor submission request 74 * talitos_request - descriptor submission request
70 * @desc: descriptor pointer (kernel virtual) 75 * @desc: descriptor pointer (kernel virtual)
@@ -150,12 +155,6 @@ struct talitos_private {
150 bool rng_registered; 155 bool rng_registered;
151}; 156};
152 157
153extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
154 void (*callback)(struct device *dev,
155 struct talitos_desc *desc,
156 void *context, int error),
157 void *context);
158
159/* .features flag */ 158/* .features flag */
160#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 159#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
161#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 160#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
@@ -170,13 +169,11 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
170 */ 169 */
171static inline bool has_ftr_sec1(struct talitos_private *priv) 170static inline bool has_ftr_sec1(struct talitos_private *priv)
172{ 171{
173#if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2) 172 if (IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1) &&
174 return priv->features & TALITOS_FTR_SEC1 ? true : false; 173 IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS2))
175#elif defined(CONFIG_CRYPTO_DEV_TALITOS1) 174 return priv->features & TALITOS_FTR_SEC1;
176 return true; 175
177#else 176 return IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1);
178 return false;
179#endif
180} 177}
181 178
182/* 179/*
@@ -412,5 +409,5 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
412 409
413/* link table extent field bits */ 410/* link table extent field bits */
414#define DESC_PTR_LNKTBL_JUMP 0x80 411#define DESC_PTR_LNKTBL_JUMP 0x80
415#define DESC_PTR_LNKTBL_RETURN 0x02 412#define DESC_PTR_LNKTBL_RET 0x02
416#define DESC_PTR_LNKTBL_NEXT 0x01 413#define DESC_PTR_LNKTBL_NEXT 0x01
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c7e515a1bc97..d88084447f1c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -7,64 +7,52 @@
7 * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> 7 * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
8 */ 8 */
9 9
10#include <linux/types.h>
11#include <linux/err.h>
12#include <linux/crypto.h>
13#include <linux/delay.h>
14#include <asm/simd.h> 10#include <asm/simd.h>
15#include <asm/switch_to.h> 11#include <asm/switch_to.h>
16#include <crypto/aes.h> 12#include <crypto/aes.h>
17#include <crypto/internal/simd.h> 13#include <crypto/internal/simd.h>
18#include <crypto/scatterwalk.h> 14#include <crypto/internal/skcipher.h>
19#include <crypto/skcipher.h>
20 15
21#include "aesp8-ppc.h" 16#include "aesp8-ppc.h"
22 17
23struct p8_aes_cbc_ctx { 18struct p8_aes_cbc_ctx {
24 struct crypto_sync_skcipher *fallback; 19 struct crypto_skcipher *fallback;
25 struct aes_key enc_key; 20 struct aes_key enc_key;
26 struct aes_key dec_key; 21 struct aes_key dec_key;
27}; 22};
28 23
29static int p8_aes_cbc_init(struct crypto_tfm *tfm) 24static int p8_aes_cbc_init(struct crypto_skcipher *tfm)
30{ 25{
31 const char *alg = crypto_tfm_alg_name(tfm); 26 struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
32 struct crypto_sync_skcipher *fallback; 27 struct crypto_skcipher *fallback;
33 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
34
35 fallback = crypto_alloc_sync_skcipher(alg, 0,
36 CRYPTO_ALG_NEED_FALLBACK);
37 28
29 fallback = crypto_alloc_skcipher("cbc(aes)", 0,
30 CRYPTO_ALG_NEED_FALLBACK |
31 CRYPTO_ALG_ASYNC);
38 if (IS_ERR(fallback)) { 32 if (IS_ERR(fallback)) {
39 printk(KERN_ERR 33 pr_err("Failed to allocate cbc(aes) fallback: %ld\n",
40 "Failed to allocate transformation for '%s': %ld\n", 34 PTR_ERR(fallback));
41 alg, PTR_ERR(fallback));
42 return PTR_ERR(fallback); 35 return PTR_ERR(fallback);
43 } 36 }
44 37
45 crypto_sync_skcipher_set_flags( 38 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
46 fallback, 39 crypto_skcipher_reqsize(fallback));
47 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
48 ctx->fallback = fallback; 40 ctx->fallback = fallback;
49
50 return 0; 41 return 0;
51} 42}
52 43
53static void p8_aes_cbc_exit(struct crypto_tfm *tfm) 44static void p8_aes_cbc_exit(struct crypto_skcipher *tfm)
54{ 45{
55 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 46 struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
56 47
57 if (ctx->fallback) { 48 crypto_free_skcipher(ctx->fallback);
58 crypto_free_sync_skcipher(ctx->fallback);
59 ctx->fallback = NULL;
60 }
61} 49}
62 50
63static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, 51static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
64 unsigned int keylen) 52 unsigned int keylen)
65{ 53{
54 struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
66 int ret; 55 int ret;
67 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
68 56
69 preempt_disable(); 57 preempt_disable();
70 pagefault_disable(); 58 pagefault_disable();
@@ -75,108 +63,71 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
75 pagefault_enable(); 63 pagefault_enable();
76 preempt_enable(); 64 preempt_enable();
77 65
78 ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 66 ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
79 67
80 return ret ? -EINVAL : 0; 68 return ret ? -EINVAL : 0;
81} 69}
82 70
83static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, 71static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
84 struct scatterlist *dst,
85 struct scatterlist *src, unsigned int nbytes)
86{ 72{
73 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
74 const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
75 struct skcipher_walk walk;
76 unsigned int nbytes;
87 int ret; 77 int ret;
88 struct blkcipher_walk walk;
89 struct p8_aes_cbc_ctx *ctx =
90 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
91 78
92 if (!crypto_simd_usable()) { 79 if (!crypto_simd_usable()) {
93 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 80 struct skcipher_request *subreq = skcipher_request_ctx(req);
94 skcipher_request_set_sync_tfm(req, ctx->fallback); 81
95 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 82 *subreq = *req;
96 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 83 skcipher_request_set_tfm(subreq, ctx->fallback);
97 ret = crypto_skcipher_encrypt(req); 84 return enc ? crypto_skcipher_encrypt(subreq) :
98 skcipher_request_zero(req); 85 crypto_skcipher_decrypt(subreq);
99 } else {
100 blkcipher_walk_init(&walk, dst, src, nbytes);
101 ret = blkcipher_walk_virt(desc, &walk);
102 while ((nbytes = walk.nbytes)) {
103 preempt_disable();
104 pagefault_disable();
105 enable_kernel_vsx();
106 aes_p8_cbc_encrypt(walk.src.virt.addr,
107 walk.dst.virt.addr,
108 nbytes & AES_BLOCK_MASK,
109 &ctx->enc_key, walk.iv, 1);
110 disable_kernel_vsx();
111 pagefault_enable();
112 preempt_enable();
113
114 nbytes &= AES_BLOCK_SIZE - 1;
115 ret = blkcipher_walk_done(desc, &walk, nbytes);
116 }
117 } 86 }
118 87
88 ret = skcipher_walk_virt(&walk, req, false);
89 while ((nbytes = walk.nbytes) != 0) {
90 preempt_disable();
91 pagefault_disable();
92 enable_kernel_vsx();
93 aes_p8_cbc_encrypt(walk.src.virt.addr,
94 walk.dst.virt.addr,
95 round_down(nbytes, AES_BLOCK_SIZE),
96 enc ? &ctx->enc_key : &ctx->dec_key,
97 walk.iv, enc);
98 disable_kernel_vsx();
99 pagefault_enable();
100 preempt_enable();
101
102 ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
103 }
119 return ret; 104 return ret;
120} 105}
121 106
122static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, 107static int p8_aes_cbc_encrypt(struct skcipher_request *req)
123 struct scatterlist *dst,
124 struct scatterlist *src, unsigned int nbytes)
125{ 108{
126 int ret; 109 return p8_aes_cbc_crypt(req, 1);
127 struct blkcipher_walk walk;
128 struct p8_aes_cbc_ctx *ctx =
129 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
130
131 if (!crypto_simd_usable()) {
132 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
133 skcipher_request_set_sync_tfm(req, ctx->fallback);
134 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
135 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
136 ret = crypto_skcipher_decrypt(req);
137 skcipher_request_zero(req);
138 } else {
139 blkcipher_walk_init(&walk, dst, src, nbytes);
140 ret = blkcipher_walk_virt(desc, &walk);
141 while ((nbytes = walk.nbytes)) {
142 preempt_disable();
143 pagefault_disable();
144 enable_kernel_vsx();
145 aes_p8_cbc_encrypt(walk.src.virt.addr,
146 walk.dst.virt.addr,
147 nbytes & AES_BLOCK_MASK,
148 &ctx->dec_key, walk.iv, 0);
149 disable_kernel_vsx();
150 pagefault_enable();
151 preempt_enable();
152
153 nbytes &= AES_BLOCK_SIZE - 1;
154 ret = blkcipher_walk_done(desc, &walk, nbytes);
155 }
156 }
157
158 return ret;
159} 110}
160 111
112static int p8_aes_cbc_decrypt(struct skcipher_request *req)
113{
114 return p8_aes_cbc_crypt(req, 0);
115}
161 116
162struct crypto_alg p8_aes_cbc_alg = { 117struct skcipher_alg p8_aes_cbc_alg = {
163 .cra_name = "cbc(aes)", 118 .base.cra_name = "cbc(aes)",
164 .cra_driver_name = "p8_aes_cbc", 119 .base.cra_driver_name = "p8_aes_cbc",
165 .cra_module = THIS_MODULE, 120 .base.cra_module = THIS_MODULE,
166 .cra_priority = 2000, 121 .base.cra_priority = 2000,
167 .cra_type = &crypto_blkcipher_type, 122 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
168 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, 123 .base.cra_blocksize = AES_BLOCK_SIZE,
169 .cra_alignmask = 0, 124 .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
170 .cra_blocksize = AES_BLOCK_SIZE, 125 .setkey = p8_aes_cbc_setkey,
171 .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), 126 .encrypt = p8_aes_cbc_encrypt,
172 .cra_init = p8_aes_cbc_init, 127 .decrypt = p8_aes_cbc_decrypt,
173 .cra_exit = p8_aes_cbc_exit, 128 .init = p8_aes_cbc_init,
174 .cra_blkcipher = { 129 .exit = p8_aes_cbc_exit,
175 .ivsize = AES_BLOCK_SIZE, 130 .min_keysize = AES_MIN_KEY_SIZE,
176 .min_keysize = AES_MIN_KEY_SIZE, 131 .max_keysize = AES_MAX_KEY_SIZE,
177 .max_keysize = AES_MAX_KEY_SIZE, 132 .ivsize = AES_BLOCK_SIZE,
178 .setkey = p8_aes_cbc_setkey,
179 .encrypt = p8_aes_cbc_encrypt,
180 .decrypt = p8_aes_cbc_decrypt,
181 },
182}; 133};
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index dd017ef42fa9..79ba062ee1c1 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -7,62 +7,51 @@
7 * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> 7 * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
8 */ 8 */
9 9
10#include <linux/types.h>
11#include <linux/err.h>
12#include <linux/crypto.h>
13#include <linux/delay.h>
14#include <asm/simd.h> 10#include <asm/simd.h>
15#include <asm/switch_to.h> 11#include <asm/switch_to.h>
16#include <crypto/aes.h> 12#include <crypto/aes.h>
17#include <crypto/internal/simd.h> 13#include <crypto/internal/simd.h>
18#include <crypto/scatterwalk.h> 14#include <crypto/internal/skcipher.h>
19#include <crypto/skcipher.h>
20 15
21#include "aesp8-ppc.h" 16#include "aesp8-ppc.h"
22 17
23struct p8_aes_ctr_ctx { 18struct p8_aes_ctr_ctx {
24 struct crypto_sync_skcipher *fallback; 19 struct crypto_skcipher *fallback;
25 struct aes_key enc_key; 20 struct aes_key enc_key;
26}; 21};
27 22
28static int p8_aes_ctr_init(struct crypto_tfm *tfm) 23static int p8_aes_ctr_init(struct crypto_skcipher *tfm)
29{ 24{
30 const char *alg = crypto_tfm_alg_name(tfm); 25 struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
31 struct crypto_sync_skcipher *fallback; 26 struct crypto_skcipher *fallback;
32 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
33 27
34 fallback = crypto_alloc_sync_skcipher(alg, 0, 28 fallback = crypto_alloc_skcipher("ctr(aes)", 0,
35 CRYPTO_ALG_NEED_FALLBACK); 29 CRYPTO_ALG_NEED_FALLBACK |
30 CRYPTO_ALG_ASYNC);
36 if (IS_ERR(fallback)) { 31 if (IS_ERR(fallback)) {
37 printk(KERN_ERR 32 pr_err("Failed to allocate ctr(aes) fallback: %ld\n",
38 "Failed to allocate transformation for '%s': %ld\n", 33 PTR_ERR(fallback));
39 alg, PTR_ERR(fallback));
40 return PTR_ERR(fallback); 34 return PTR_ERR(fallback);
41 } 35 }
42 36
43 crypto_sync_skcipher_set_flags( 37 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
44 fallback, 38 crypto_skcipher_reqsize(fallback));
45 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
46 ctx->fallback = fallback; 39 ctx->fallback = fallback;
47
48 return 0; 40 return 0;
49} 41}
50 42
51static void p8_aes_ctr_exit(struct crypto_tfm *tfm) 43static void p8_aes_ctr_exit(struct crypto_skcipher *tfm)
52{ 44{
53 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 45 struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
54 46
55 if (ctx->fallback) { 47 crypto_free_skcipher(ctx->fallback);
56 crypto_free_sync_skcipher(ctx->fallback);
57 ctx->fallback = NULL;
58 }
59} 48}
60 49
61static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, 50static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key,
62 unsigned int keylen) 51 unsigned int keylen)
63{ 52{
53 struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
64 int ret; 54 int ret;
65 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
66 55
67 preempt_disable(); 56 preempt_disable();
68 pagefault_disable(); 57 pagefault_disable();
@@ -72,13 +61,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
72 pagefault_enable(); 61 pagefault_enable();
73 preempt_enable(); 62 preempt_enable();
74 63
75 ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 64 ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
76 65
77 return ret ? -EINVAL : 0; 66 return ret ? -EINVAL : 0;
78} 67}
79 68
80static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, 69static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
81 struct blkcipher_walk *walk) 70 struct skcipher_walk *walk)
82{ 71{
83 u8 *ctrblk = walk->iv; 72 u8 *ctrblk = walk->iv;
84 u8 keystream[AES_BLOCK_SIZE]; 73 u8 keystream[AES_BLOCK_SIZE];
@@ -98,77 +87,63 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
98 crypto_inc(ctrblk, AES_BLOCK_SIZE); 87 crypto_inc(ctrblk, AES_BLOCK_SIZE);
99} 88}
100 89
101static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, 90static int p8_aes_ctr_crypt(struct skcipher_request *req)
102 struct scatterlist *dst,
103 struct scatterlist *src, unsigned int nbytes)
104{ 91{
92 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
93 const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
94 struct skcipher_walk walk;
95 unsigned int nbytes;
105 int ret; 96 int ret;
106 u64 inc;
107 struct blkcipher_walk walk;
108 struct p8_aes_ctr_ctx *ctx =
109 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
110 97
111 if (!crypto_simd_usable()) { 98 if (!crypto_simd_usable()) {
112 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 99 struct skcipher_request *subreq = skcipher_request_ctx(req);
113 skcipher_request_set_sync_tfm(req, ctx->fallback); 100
114 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 101 *subreq = *req;
115 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 102 skcipher_request_set_tfm(subreq, ctx->fallback);
116 ret = crypto_skcipher_encrypt(req); 103 return crypto_skcipher_encrypt(subreq);
117 skcipher_request_zero(req);
118 } else {
119 blkcipher_walk_init(&walk, dst, src, nbytes);
120 ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
121 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
122 preempt_disable();
123 pagefault_disable();
124 enable_kernel_vsx();
125 aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
126 walk.dst.virt.addr,
127 (nbytes &
128 AES_BLOCK_MASK) /
129 AES_BLOCK_SIZE,
130 &ctx->enc_key,
131 walk.iv);
132 disable_kernel_vsx();
133 pagefault_enable();
134 preempt_enable();
135
136 /* We need to update IV mostly for last bytes/round */
137 inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
138 if (inc > 0)
139 while (inc--)
140 crypto_inc(walk.iv, AES_BLOCK_SIZE);
141
142 nbytes &= AES_BLOCK_SIZE - 1;
143 ret = blkcipher_walk_done(desc, &walk, nbytes);
144 }
145 if (walk.nbytes) {
146 p8_aes_ctr_final(ctx, &walk);
147 ret = blkcipher_walk_done(desc, &walk, 0);
148 }
149 } 104 }
150 105
106 ret = skcipher_walk_virt(&walk, req, false);
107 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
108 preempt_disable();
109 pagefault_disable();
110 enable_kernel_vsx();
111 aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
112 walk.dst.virt.addr,
113 nbytes / AES_BLOCK_SIZE,
114 &ctx->enc_key, walk.iv);
115 disable_kernel_vsx();
116 pagefault_enable();
117 preempt_enable();
118
119 do {
120 crypto_inc(walk.iv, AES_BLOCK_SIZE);
121 } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE);
122
123 ret = skcipher_walk_done(&walk, nbytes);
124 }
125 if (nbytes) {
126 p8_aes_ctr_final(ctx, &walk);
127 ret = skcipher_walk_done(&walk, 0);
128 }
151 return ret; 129 return ret;
152} 130}
153 131
154struct crypto_alg p8_aes_ctr_alg = { 132struct skcipher_alg p8_aes_ctr_alg = {
155 .cra_name = "ctr(aes)", 133 .base.cra_name = "ctr(aes)",
156 .cra_driver_name = "p8_aes_ctr", 134 .base.cra_driver_name = "p8_aes_ctr",
157 .cra_module = THIS_MODULE, 135 .base.cra_module = THIS_MODULE,
158 .cra_priority = 2000, 136 .base.cra_priority = 2000,
159 .cra_type = &crypto_blkcipher_type, 137 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
160 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, 138 .base.cra_blocksize = 1,
161 .cra_alignmask = 0, 139 .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
162 .cra_blocksize = 1, 140 .setkey = p8_aes_ctr_setkey,
163 .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), 141 .encrypt = p8_aes_ctr_crypt,
164 .cra_init = p8_aes_ctr_init, 142 .decrypt = p8_aes_ctr_crypt,
165 .cra_exit = p8_aes_ctr_exit, 143 .init = p8_aes_ctr_init,
166 .cra_blkcipher = { 144 .exit = p8_aes_ctr_exit,
167 .ivsize = AES_BLOCK_SIZE, 145 .min_keysize = AES_MIN_KEY_SIZE,
168 .min_keysize = AES_MIN_KEY_SIZE, 146 .max_keysize = AES_MAX_KEY_SIZE,
169 .max_keysize = AES_MAX_KEY_SIZE, 147 .ivsize = AES_BLOCK_SIZE,
170 .setkey = p8_aes_ctr_setkey, 148 .chunksize = AES_BLOCK_SIZE,
171 .encrypt = p8_aes_ctr_crypt,
172 .decrypt = p8_aes_ctr_crypt,
173 },
174}; 149};
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 536167e737a0..49f7258045fa 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -7,67 +7,56 @@
7 * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> 7 * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
8 */ 8 */
9 9
10#include <linux/types.h>
11#include <linux/err.h>
12#include <linux/crypto.h>
13#include <linux/delay.h>
14#include <asm/simd.h> 10#include <asm/simd.h>
15#include <asm/switch_to.h> 11#include <asm/switch_to.h>
16#include <crypto/aes.h> 12#include <crypto/aes.h>
17#include <crypto/internal/simd.h> 13#include <crypto/internal/simd.h>
18#include <crypto/scatterwalk.h> 14#include <crypto/internal/skcipher.h>
19#include <crypto/xts.h> 15#include <crypto/xts.h>
20#include <crypto/skcipher.h>
21 16
22#include "aesp8-ppc.h" 17#include "aesp8-ppc.h"
23 18
24struct p8_aes_xts_ctx { 19struct p8_aes_xts_ctx {
25 struct crypto_sync_skcipher *fallback; 20 struct crypto_skcipher *fallback;
26 struct aes_key enc_key; 21 struct aes_key enc_key;
27 struct aes_key dec_key; 22 struct aes_key dec_key;
28 struct aes_key tweak_key; 23 struct aes_key tweak_key;
29}; 24};
30 25
31static int p8_aes_xts_init(struct crypto_tfm *tfm) 26static int p8_aes_xts_init(struct crypto_skcipher *tfm)
32{ 27{
33 const char *alg = crypto_tfm_alg_name(tfm); 28 struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
34 struct crypto_sync_skcipher *fallback; 29 struct crypto_skcipher *fallback;
35 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
36 30
37 fallback = crypto_alloc_sync_skcipher(alg, 0, 31 fallback = crypto_alloc_skcipher("xts(aes)", 0,
38 CRYPTO_ALG_NEED_FALLBACK); 32 CRYPTO_ALG_NEED_FALLBACK |
33 CRYPTO_ALG_ASYNC);
39 if (IS_ERR(fallback)) { 34 if (IS_ERR(fallback)) {
40 printk(KERN_ERR 35 pr_err("Failed to allocate xts(aes) fallback: %ld\n",
41 "Failed to allocate transformation for '%s': %ld\n", 36 PTR_ERR(fallback));
42 alg, PTR_ERR(fallback));
43 return PTR_ERR(fallback); 37 return PTR_ERR(fallback);
44 } 38 }
45 39
46 crypto_sync_skcipher_set_flags( 40 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
47 fallback, 41 crypto_skcipher_reqsize(fallback));
48 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
49 ctx->fallback = fallback; 42 ctx->fallback = fallback;
50
51 return 0; 43 return 0;
52} 44}
53 45
54static void p8_aes_xts_exit(struct crypto_tfm *tfm) 46static void p8_aes_xts_exit(struct crypto_skcipher *tfm)
55{ 47{
56 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); 48 struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
57 49
58 if (ctx->fallback) { 50 crypto_free_skcipher(ctx->fallback);
59 crypto_free_sync_skcipher(ctx->fallback);
60 ctx->fallback = NULL;
61 }
62} 51}
63 52
64static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, 53static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
65 unsigned int keylen) 54 unsigned int keylen)
66{ 55{
56 struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
67 int ret; 57 int ret;
68 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
69 58
70 ret = xts_check_key(tfm, key, keylen); 59 ret = xts_verify_key(tfm, key, keylen);
71 if (ret) 60 if (ret)
72 return ret; 61 return ret;
73 62
@@ -81,100 +70,90 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
81 pagefault_enable(); 70 pagefault_enable();
82 preempt_enable(); 71 preempt_enable();
83 72
84 ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 73 ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
85 74
86 return ret ? -EINVAL : 0; 75 return ret ? -EINVAL : 0;
87} 76}
88 77
89static int p8_aes_xts_crypt(struct blkcipher_desc *desc, 78static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
90 struct scatterlist *dst,
91 struct scatterlist *src,
92 unsigned int nbytes, int enc)
93{ 79{
94 int ret; 80 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
81 const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
82 struct skcipher_walk walk;
83 unsigned int nbytes;
95 u8 tweak[AES_BLOCK_SIZE]; 84 u8 tweak[AES_BLOCK_SIZE];
96 u8 *iv; 85 int ret;
97 struct blkcipher_walk walk;
98 struct p8_aes_xts_ctx *ctx =
99 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
100 86
101 if (!crypto_simd_usable()) { 87 if (!crypto_simd_usable()) {
102 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 88 struct skcipher_request *subreq = skcipher_request_ctx(req);
103 skcipher_request_set_sync_tfm(req, ctx->fallback); 89
104 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 90 *subreq = *req;
105 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 91 skcipher_request_set_tfm(subreq, ctx->fallback);
106 ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); 92 return enc ? crypto_skcipher_encrypt(subreq) :
107 skcipher_request_zero(req); 93 crypto_skcipher_decrypt(subreq);
108 } else { 94 }
109 blkcipher_walk_init(&walk, dst, src, nbytes); 95
96 ret = skcipher_walk_virt(&walk, req, false);
97 if (ret)
98 return ret;
99
100 preempt_disable();
101 pagefault_disable();
102 enable_kernel_vsx();
110 103
111 ret = blkcipher_walk_virt(desc, &walk); 104 aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key);
105
106 disable_kernel_vsx();
107 pagefault_enable();
108 preempt_enable();
112 109
110 while ((nbytes = walk.nbytes) != 0) {
113 preempt_disable(); 111 preempt_disable();
114 pagefault_disable(); 112 pagefault_disable();
115 enable_kernel_vsx(); 113 enable_kernel_vsx();
116 114 if (enc)
117 iv = walk.iv; 115 aes_p8_xts_encrypt(walk.src.virt.addr,
118 memset(tweak, 0, AES_BLOCK_SIZE); 116 walk.dst.virt.addr,
119 aes_p8_encrypt(iv, tweak, &ctx->tweak_key); 117 round_down(nbytes, AES_BLOCK_SIZE),
120 118 &ctx->enc_key, NULL, tweak);
119 else
120 aes_p8_xts_decrypt(walk.src.virt.addr,
121 walk.dst.virt.addr,
122 round_down(nbytes, AES_BLOCK_SIZE),
123 &ctx->dec_key, NULL, tweak);
121 disable_kernel_vsx(); 124 disable_kernel_vsx();
122 pagefault_enable(); 125 pagefault_enable();
123 preempt_enable(); 126 preempt_enable();
124 127
125 while ((nbytes = walk.nbytes)) { 128 ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
126 preempt_disable();
127 pagefault_disable();
128 enable_kernel_vsx();
129 if (enc)
130 aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
131 nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
132 else
133 aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
134 nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
135 disable_kernel_vsx();
136 pagefault_enable();
137 preempt_enable();
138
139 nbytes &= AES_BLOCK_SIZE - 1;
140 ret = blkcipher_walk_done(desc, &walk, nbytes);
141 }
142 } 129 }
143 return ret; 130 return ret;
144} 131}
145 132
146static int p8_aes_xts_encrypt(struct blkcipher_desc *desc, 133static int p8_aes_xts_encrypt(struct skcipher_request *req)
147 struct scatterlist *dst,
148 struct scatterlist *src, unsigned int nbytes)
149{ 134{
150 return p8_aes_xts_crypt(desc, dst, src, nbytes, 1); 135 return p8_aes_xts_crypt(req, 1);
151} 136}
152 137
153static int p8_aes_xts_decrypt(struct blkcipher_desc *desc, 138static int p8_aes_xts_decrypt(struct skcipher_request *req)
154 struct scatterlist *dst,
155 struct scatterlist *src, unsigned int nbytes)
156{ 139{
157 return p8_aes_xts_crypt(desc, dst, src, nbytes, 0); 140 return p8_aes_xts_crypt(req, 0);
158} 141}
159 142
160struct crypto_alg p8_aes_xts_alg = { 143struct skcipher_alg p8_aes_xts_alg = {
161 .cra_name = "xts(aes)", 144 .base.cra_name = "xts(aes)",
162 .cra_driver_name = "p8_aes_xts", 145 .base.cra_driver_name = "p8_aes_xts",
163 .cra_module = THIS_MODULE, 146 .base.cra_module = THIS_MODULE,
164 .cra_priority = 2000, 147 .base.cra_priority = 2000,
165 .cra_type = &crypto_blkcipher_type, 148 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
166 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, 149 .base.cra_blocksize = AES_BLOCK_SIZE,
167 .cra_alignmask = 0, 150 .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
168 .cra_blocksize = AES_BLOCK_SIZE, 151 .setkey = p8_aes_xts_setkey,
169 .cra_ctxsize = sizeof(struct p8_aes_xts_ctx), 152 .encrypt = p8_aes_xts_encrypt,
170 .cra_init = p8_aes_xts_init, 153 .decrypt = p8_aes_xts_decrypt,
171 .cra_exit = p8_aes_xts_exit, 154 .init = p8_aes_xts_init,
172 .cra_blkcipher = { 155 .exit = p8_aes_xts_exit,
173 .ivsize = AES_BLOCK_SIZE, 156 .min_keysize = 2 * AES_MIN_KEY_SIZE,
174 .min_keysize = 2 * AES_MIN_KEY_SIZE, 157 .max_keysize = 2 * AES_MAX_KEY_SIZE,
175 .max_keysize = 2 * AES_MAX_KEY_SIZE, 158 .ivsize = AES_BLOCK_SIZE,
176 .setkey = p8_aes_xts_setkey,
177 .encrypt = p8_aes_xts_encrypt,
178 .decrypt = p8_aes_xts_decrypt,
179 }
180}; 159};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 349646b73754..01774a4d26a2 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -2,8 +2,6 @@
2#include <linux/types.h> 2#include <linux/types.h>
3#include <crypto/aes.h> 3#include <crypto/aes.h>
4 4
5#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
6
7struct aes_key { 5struct aes_key {
8 u8 key[AES_MAX_KEYLENGTH]; 6 u8 key[AES_MAX_KEYLENGTH];
9 int rounds; 7 int rounds;
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 9c6b5c1d6a1a..db874367b602 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1286,6 +1286,24 @@ ___
1286 1286
1287######################################################################### 1287#########################################################################
1288{{{ # CTR procedure[s] # 1288{{{ # CTR procedure[s] #
1289
1290####################### WARNING: Here be dragons! #######################
1291#
1292# This code is written as 'ctr32', based on a 32-bit counter used
1293# upstream. The kernel does *not* use a 32-bit counter. The kernel uses
1294# a 128-bit counter.
1295#
1296# This leads to subtle changes from the upstream code: the counter
1297# is incremented with vaddu_q_m rather than vaddu_w_m. This occurs in
1298# both the bulk (8 blocks at a time) path, and in the individual block
1299# path. Be aware of this when doing updates.
1300#
1301# See:
1302# 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug")
1303# 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword")
1304# https://github.com/openssl/openssl/pull/8942
1305#
1306#########################################################################
1289my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10)); 1307my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
1290my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3)); 1308my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
1291my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)= 1309my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
@@ -1357,7 +1375,7 @@ Loop_ctr32_enc:
1357 addi $idx,$idx,16 1375 addi $idx,$idx,16
1358 bdnz Loop_ctr32_enc 1376 bdnz Loop_ctr32_enc
1359 1377
1360 vadduqm $ivec,$ivec,$one 1378 vadduqm $ivec,$ivec,$one # Kernel change for 128-bit
1361 vmr $dat,$inptail 1379 vmr $dat,$inptail
1362 lvx $inptail,0,$inp 1380 lvx $inptail,0,$inp
1363 addi $inp,$inp,16 1381 addi $inp,$inp,16
@@ -1501,7 +1519,7 @@ Load_ctr32_enc_key:
1501 $SHL $len,$len,4 1519 $SHL $len,$len,4
1502 1520
1503 vadduqm $out1,$ivec,$one # counter values ... 1521 vadduqm $out1,$ivec,$one # counter values ...
1504 vadduqm $out2,$ivec,$two 1522 vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit)
1505 vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] 1523 vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
1506 le?li $idx,8 1524 le?li $idx,8
1507 vadduqm $out3,$out1,$two 1525 vadduqm $out3,$out1,$two
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 6c4c77f4e159..3e0335fb406c 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -15,54 +15,58 @@
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <asm/cputable.h> 16#include <asm/cputable.h>
17#include <crypto/internal/hash.h> 17#include <crypto/internal/hash.h>
18#include <crypto/internal/skcipher.h>
18 19
19extern struct shash_alg p8_ghash_alg; 20extern struct shash_alg p8_ghash_alg;
20extern struct crypto_alg p8_aes_alg; 21extern struct crypto_alg p8_aes_alg;
21extern struct crypto_alg p8_aes_cbc_alg; 22extern struct skcipher_alg p8_aes_cbc_alg;
22extern struct crypto_alg p8_aes_ctr_alg; 23extern struct skcipher_alg p8_aes_ctr_alg;
23extern struct crypto_alg p8_aes_xts_alg; 24extern struct skcipher_alg p8_aes_xts_alg;
24static struct crypto_alg *algs[] = {
25 &p8_aes_alg,
26 &p8_aes_cbc_alg,
27 &p8_aes_ctr_alg,
28 &p8_aes_xts_alg,
29 NULL,
30};
31 25
32static int __init p8_init(void) 26static int __init p8_init(void)
33{ 27{
34 int ret = 0; 28 int ret;
35 struct crypto_alg **alg_it;
36 29
37 for (alg_it = algs; *alg_it; alg_it++) { 30 ret = crypto_register_shash(&p8_ghash_alg);
38 ret = crypto_register_alg(*alg_it);
39 printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
40 (*alg_it)->cra_name, ret);
41 if (ret) {
42 for (alg_it--; alg_it >= algs; alg_it--)
43 crypto_unregister_alg(*alg_it);
44 break;
45 }
46 }
47 if (ret) 31 if (ret)
48 return ret; 32 goto err;
49 33
50 ret = crypto_register_shash(&p8_ghash_alg); 34 ret = crypto_register_alg(&p8_aes_alg);
51 if (ret) { 35 if (ret)
52 for (alg_it = algs; *alg_it; alg_it++) 36 goto err_unregister_ghash;
53 crypto_unregister_alg(*alg_it); 37
54 } 38 ret = crypto_register_skcipher(&p8_aes_cbc_alg);
39 if (ret)
40 goto err_unregister_aes;
41
42 ret = crypto_register_skcipher(&p8_aes_ctr_alg);
43 if (ret)
44 goto err_unregister_aes_cbc;
45
46 ret = crypto_register_skcipher(&p8_aes_xts_alg);
47 if (ret)
48 goto err_unregister_aes_ctr;
49
50 return 0;
51
52err_unregister_aes_ctr:
53 crypto_unregister_skcipher(&p8_aes_ctr_alg);
54err_unregister_aes_cbc:
55 crypto_unregister_skcipher(&p8_aes_cbc_alg);
56err_unregister_aes:
57 crypto_unregister_alg(&p8_aes_alg);
58err_unregister_ghash:
59 crypto_unregister_shash(&p8_ghash_alg);
60err:
55 return ret; 61 return ret;
56} 62}
57 63
58static void __exit p8_exit(void) 64static void __exit p8_exit(void)
59{ 65{
60 struct crypto_alg **alg_it; 66 crypto_unregister_skcipher(&p8_aes_xts_alg);
61 67 crypto_unregister_skcipher(&p8_aes_ctr_alg);
62 for (alg_it = algs; *alg_it; alg_it++) { 68 crypto_unregister_skcipher(&p8_aes_cbc_alg);
63 printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); 69 crypto_unregister_alg(&p8_aes_alg);
64 crypto_unregister_alg(*alg_it);
65 }
66 crypto_unregister_shash(&p8_ghash_alg); 70 crypto_unregister_shash(&p8_ghash_alg);
67} 71}
68 72
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index d84095591e45..f86065e16772 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -111,8 +111,7 @@ static int i2c_acpi_do_lookup(struct acpi_device *adev,
111 struct list_head resource_list; 111 struct list_head resource_list;
112 int ret; 112 int ret;
113 113
114 if (acpi_bus_get_status(adev) || !adev->status.present || 114 if (acpi_bus_get_status(adev) || !adev->status.present)
115 acpi_device_enumerated(adev))
116 return -EINVAL; 115 return -EINVAL;
117 116
118 if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0) 117 if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0)
@@ -147,6 +146,9 @@ static int i2c_acpi_get_info(struct acpi_device *adev,
147 lookup.info = info; 146 lookup.info = info;
148 lookup.index = -1; 147 lookup.index = -1;
149 148
149 if (acpi_device_enumerated(adev))
150 return -EINVAL;
151
150 ret = i2c_acpi_do_lookup(adev, &lookup); 152 ret = i2c_acpi_do_lookup(adev, &lookup);
151 if (ret) 153 if (ret)
152 return ret; 154 return ret;
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index bf395df3bb37..1a2e2f7629f3 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -87,8 +87,7 @@ config PPP_MPPE
87 depends on PPP 87 depends on PPP
88 select CRYPTO 88 select CRYPTO
89 select CRYPTO_SHA1 89 select CRYPTO_SHA1
90 select CRYPTO_ARC4 90 select CRYPTO_LIB_ARC4
91 select CRYPTO_ECB
92 ---help--- 91 ---help---
93 Support for the MPPE Encryption protocol, as employed by the 92 Support for the MPPE Encryption protocol, as employed by the
94 Microsoft Point-to-Point Tunneling Protocol. 93 Microsoft Point-to-Point Tunneling Protocol.
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 66c8e65f6872..bd3c80b0bc77 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -42,9 +42,10 @@
42 * deprecated in 2.6 42 * deprecated in 2.6
43 */ 43 */
44 44
45#include <crypto/arc4.h>
45#include <crypto/hash.h> 46#include <crypto/hash.h>
46#include <crypto/skcipher.h>
47#include <linux/err.h> 47#include <linux/err.h>
48#include <linux/fips.h>
48#include <linux/module.h> 49#include <linux/module.h>
49#include <linux/kernel.h> 50#include <linux/kernel.h>
50#include <linux/init.h> 51#include <linux/init.h>
@@ -66,13 +67,6 @@ MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
66MODULE_SOFTDEP("pre: arc4"); 67MODULE_SOFTDEP("pre: arc4");
67MODULE_VERSION("1.0.2"); 68MODULE_VERSION("1.0.2");
68 69
69static unsigned int
70setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
71{
72 sg_set_buf(sg, address, length);
73 return length;
74}
75
76#define SHA1_PAD_SIZE 40 70#define SHA1_PAD_SIZE 40
77 71
78/* 72/*
@@ -96,7 +90,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
96 * State for an MPPE (de)compressor. 90 * State for an MPPE (de)compressor.
97 */ 91 */
98struct ppp_mppe_state { 92struct ppp_mppe_state {
99 struct crypto_sync_skcipher *arc4; 93 struct arc4_ctx arc4;
100 struct shash_desc *sha1; 94 struct shash_desc *sha1;
101 unsigned char *sha1_digest; 95 unsigned char *sha1_digest;
102 unsigned char master_key[MPPE_MAX_KEY_LEN]; 96 unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -155,24 +149,11 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
155 */ 149 */
156static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) 150static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
157{ 151{
158 struct scatterlist sg_in[1], sg_out[1];
159 SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
160
161 skcipher_request_set_sync_tfm(req, state->arc4);
162 skcipher_request_set_callback(req, 0, NULL, NULL);
163
164 get_new_key_from_sha(state); 152 get_new_key_from_sha(state);
165 if (!initial_key) { 153 if (!initial_key) {
166 crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest, 154 arc4_setkey(&state->arc4, state->sha1_digest, state->keylen);
167 state->keylen); 155 arc4_crypt(&state->arc4, state->session_key, state->sha1_digest,
168 sg_init_table(sg_in, 1); 156 state->keylen);
169 sg_init_table(sg_out, 1);
170 setup_sg(sg_in, state->sha1_digest, state->keylen);
171 setup_sg(sg_out, state->session_key, state->keylen);
172 skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen,
173 NULL);
174 if (crypto_skcipher_encrypt(req))
175 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
176 } else { 157 } else {
177 memcpy(state->session_key, state->sha1_digest, state->keylen); 158 memcpy(state->session_key, state->sha1_digest, state->keylen);
178 } 159 }
@@ -182,9 +163,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
182 state->session_key[1] = 0x26; 163 state->session_key[1] = 0x26;
183 state->session_key[2] = 0x9e; 164 state->session_key[2] = 0x9e;
184 } 165 }
185 crypto_sync_skcipher_setkey(state->arc4, state->session_key, 166 arc4_setkey(&state->arc4, state->session_key, state->keylen);
186 state->keylen);
187 skcipher_request_zero(req);
188} 167}
189 168
190/* 169/*
@@ -197,7 +176,8 @@ static void *mppe_alloc(unsigned char *options, int optlen)
197 unsigned int digestsize; 176 unsigned int digestsize;
198 177
199 if (optlen != CILEN_MPPE + sizeof(state->master_key) || 178 if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
200 options[0] != CI_MPPE || options[1] != CILEN_MPPE) 179 options[0] != CI_MPPE || options[1] != CILEN_MPPE ||
180 fips_enabled)
201 goto out; 181 goto out;
202 182
203 state = kzalloc(sizeof(*state), GFP_KERNEL); 183 state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -205,12 +185,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
205 goto out; 185 goto out;
206 186
207 187
208 state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
209 if (IS_ERR(state->arc4)) {
210 state->arc4 = NULL;
211 goto out_free;
212 }
213
214 shash = crypto_alloc_shash("sha1", 0, 0); 188 shash = crypto_alloc_shash("sha1", 0, 0);
215 if (IS_ERR(shash)) 189 if (IS_ERR(shash))
216 goto out_free; 190 goto out_free;
@@ -251,7 +225,6 @@ out_free:
251 crypto_free_shash(state->sha1->tfm); 225 crypto_free_shash(state->sha1->tfm);
252 kzfree(state->sha1); 226 kzfree(state->sha1);
253 } 227 }
254 crypto_free_sync_skcipher(state->arc4);
255 kfree(state); 228 kfree(state);
256out: 229out:
257 return NULL; 230 return NULL;
@@ -267,8 +240,7 @@ static void mppe_free(void *arg)
267 kfree(state->sha1_digest); 240 kfree(state->sha1_digest);
268 crypto_free_shash(state->sha1->tfm); 241 crypto_free_shash(state->sha1->tfm);
269 kzfree(state->sha1); 242 kzfree(state->sha1);
270 crypto_free_sync_skcipher(state->arc4); 243 kzfree(state);
271 kfree(state);
272 } 244 }
273} 245}
274 246
@@ -367,10 +339,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
367 int isize, int osize) 339 int isize, int osize)
368{ 340{
369 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 341 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
370 SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
371 int proto; 342 int proto;
372 int err;
373 struct scatterlist sg_in[1], sg_out[1];
374 343
375 /* 344 /*
376 * Check that the protocol is in the range we handle. 345 * Check that the protocol is in the range we handle.
@@ -421,21 +390,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
421 ibuf += 2; /* skip to proto field */ 390 ibuf += 2; /* skip to proto field */
422 isize -= 2; 391 isize -= 2;
423 392
424 /* Encrypt packet */ 393 arc4_crypt(&state->arc4, obuf, ibuf, isize);
425 sg_init_table(sg_in, 1);
426 sg_init_table(sg_out, 1);
427 setup_sg(sg_in, ibuf, isize);
428 setup_sg(sg_out, obuf, osize);
429
430 skcipher_request_set_sync_tfm(req, state->arc4);
431 skcipher_request_set_callback(req, 0, NULL, NULL);
432 skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
433 err = crypto_skcipher_encrypt(req);
434 skcipher_request_zero(req);
435 if (err) {
436 printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
437 return -1;
438 }
439 394
440 state->stats.unc_bytes += isize; 395 state->stats.unc_bytes += isize;
441 state->stats.unc_packets++; 396 state->stats.unc_packets++;
@@ -481,10 +436,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
481 int osize) 436 int osize)
482{ 437{
483 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 438 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
484 SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
485 unsigned ccount; 439 unsigned ccount;
486 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; 440 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
487 struct scatterlist sg_in[1], sg_out[1];
488 441
489 if (isize <= PPP_HDRLEN + MPPE_OVHD) { 442 if (isize <= PPP_HDRLEN + MPPE_OVHD) {
490 if (state->debug) 443 if (state->debug)
@@ -611,19 +564,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
611 * Decrypt the first byte in order to check if it is 564 * Decrypt the first byte in order to check if it is
612 * a compressed or uncompressed protocol field. 565 * a compressed or uncompressed protocol field.
613 */ 566 */
614 sg_init_table(sg_in, 1); 567 arc4_crypt(&state->arc4, obuf, ibuf, 1);
615 sg_init_table(sg_out, 1);
616 setup_sg(sg_in, ibuf, 1);
617 setup_sg(sg_out, obuf, 1);
618
619 skcipher_request_set_sync_tfm(req, state->arc4);
620 skcipher_request_set_callback(req, 0, NULL, NULL);
621 skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
622 if (crypto_skcipher_decrypt(req)) {
623 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
624 osize = DECOMP_ERROR;
625 goto out_zap_req;
626 }
627 568
628 /* 569 /*
629 * Do PFC decompression. 570 * Do PFC decompression.
@@ -638,14 +579,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
638 } 579 }
639 580
640 /* And finally, decrypt the rest of the packet. */ 581 /* And finally, decrypt the rest of the packet. */
641 setup_sg(sg_in, ibuf + 1, isize - 1); 582 arc4_crypt(&state->arc4, obuf + 1, ibuf + 1, isize - 1);
642 setup_sg(sg_out, obuf + 1, osize - 1);
643 skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL);
644 if (crypto_skcipher_decrypt(req)) {
645 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
646 osize = DECOMP_ERROR;
647 goto out_zap_req;
648 }
649 583
650 state->stats.unc_bytes += osize; 584 state->stats.unc_bytes += osize;
651 state->stats.unc_packets++; 585 state->stats.unc_packets++;
@@ -655,8 +589,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
655 /* good packet credit */ 589 /* good packet credit */
656 state->sanity_errors >>= 1; 590 state->sanity_errors >>= 1;
657 591
658out_zap_req:
659 skcipher_request_zero(req);
660 return osize; 592 return osize;
661 593
662sanity_error: 594sanity_error:
@@ -729,8 +661,7 @@ static struct compressor ppp_mppe = {
729static int __init ppp_mppe_init(void) 661static int __init ppp_mppe_init(void)
730{ 662{
731 int answer; 663 int answer;
732 if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && 664 if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))
733 crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)))
734 return -ENODEV; 665 return -ENODEV;
735 666
736 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); 667 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);