diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 17:31:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 17:31:18 -0400 |
commit | 6be48f2940af9ea8d93c23a0dd8e322672c92efd (patch) | |
tree | 1bdc85a9d3fd0c19e108ea27a29a83ef2b44f5d0 | |
parent | 0ffb01d9def22f1954e99529b7e4ded497b2e88b (diff) | |
parent | 68411521cc6055edc6274e03ab3210a5893533ba (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 3.12:
- Added MODULE_SOFTDEP to allow pre-loading of modules.
- Reinstated crct10dif driver using the module softdep feature.
- Allow via rng driver to be auto-loaded.
- Split large input data when necessary in nx.
- Handle zero length messages correctly for GCM/XCBC in nx.
- Handle SHA-2 chunks bigger than block size properly in nx.
- Handle unaligned lengths in omap-aes.
- Added SHA384/SHA512 to omap-sham.
- Added OMAP5/AM43XX SHAM support.
- Added OMAP4 TRNG support.
- Misc fixes"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (66 commits)
Reinstate "crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework"
hwrng: via - Add MODULE_DEVICE_TABLE
crypto: fcrypt - Fix bitoperation for compilation with clang
crypto: nx - fix SHA-2 for chunks bigger than block size
crypto: nx - fix GCM for zero length messages
crypto: nx - fix XCBC for zero length messages
crypto: nx - fix limits to sg lists for AES-CCM
crypto: nx - fix limits to sg lists for AES-XCBC
crypto: nx - fix limits to sg lists for AES-GCM
crypto: nx - fix limits to sg lists for AES-CTR
crypto: nx - fix limits to sg lists for AES-CBC
crypto: nx - fix limits to sg lists for AES-ECB
crypto: nx - add offset to nx_build_sg_lists()
padata - Register hotcpu notifier after initialization
padata - share code between CPU_ONLINE and CPU_DOWN_FAILED, same to CPU_DOWN_PREPARE and CPU_UP_CANCELED
hwrng: omap - reorder OMAP TRNG driver code
crypto: omap-sham - correct dma burst size
crypto: omap-sham - Enable Polling mode if DMA fails
crypto: tegra-aes - bitwise vs logical and
crypto: sahara - checking the wrong variable
...
56 files changed, 3405 insertions, 1364 deletions
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 73ae7536a32b..5c5315ba129b 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c | |||
@@ -530,12 +530,12 @@ static int __init omap2_init_devices(void) | |||
530 | omap_init_mcspi(); | 530 | omap_init_mcspi(); |
531 | omap_init_sham(); | 531 | omap_init_sham(); |
532 | omap_init_aes(); | 532 | omap_init_aes(); |
533 | omap_init_rng(); | ||
533 | } else { | 534 | } else { |
534 | /* These can be removed when bindings are done */ | 535 | /* These can be removed when bindings are done */ |
535 | omap_init_wl12xx_of(); | 536 | omap_init_wl12xx_of(); |
536 | } | 537 | } |
537 | omap_init_sti(); | 538 | omap_init_sti(); |
538 | omap_init_rng(); | ||
539 | omap_init_vout(); | 539 | omap_init_vout(); |
540 | 540 | ||
541 | return 0; | 541 | return 0; |
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 6c63c358a7e6..7d6ba9db1be9 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
@@ -27,6 +27,7 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o | |||
27 | obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o | 27 | obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o |
28 | obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o | 28 | obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o |
29 | obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o | 29 | obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o |
30 | obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o | ||
30 | 31 | ||
31 | # These modules require assembler to support AVX. | 32 | # These modules require assembler to support AVX. |
32 | ifeq ($(avx_supported),yes) | 33 | ifeq ($(avx_supported),yes) |
@@ -81,3 +82,4 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o | |||
81 | crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o | 82 | crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o |
82 | sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o | 83 | sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o |
83 | sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o | 84 | sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o |
85 | crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o | ||
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 5cb86ccd4acb..c171dcbf192d 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c | |||
@@ -62,7 +62,7 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | /* camellia sboxes */ | 64 | /* camellia sboxes */ |
65 | const u64 camellia_sp10011110[256] = { | 65 | __visible const u64 camellia_sp10011110[256] = { |
66 | 0x7000007070707000ULL, 0x8200008282828200ULL, 0x2c00002c2c2c2c00ULL, | 66 | 0x7000007070707000ULL, 0x8200008282828200ULL, 0x2c00002c2c2c2c00ULL, |
67 | 0xec0000ecececec00ULL, 0xb30000b3b3b3b300ULL, 0x2700002727272700ULL, | 67 | 0xec0000ecececec00ULL, 0xb30000b3b3b3b300ULL, 0x2700002727272700ULL, |
68 | 0xc00000c0c0c0c000ULL, 0xe50000e5e5e5e500ULL, 0xe40000e4e4e4e400ULL, | 68 | 0xc00000c0c0c0c000ULL, 0xe50000e5e5e5e500ULL, 0xe40000e4e4e4e400ULL, |
@@ -151,7 +151,7 @@ const u64 camellia_sp10011110[256] = { | |||
151 | 0x9e00009e9e9e9e00ULL, | 151 | 0x9e00009e9e9e9e00ULL, |
152 | }; | 152 | }; |
153 | 153 | ||
154 | const u64 camellia_sp22000222[256] = { | 154 | __visible const u64 camellia_sp22000222[256] = { |
155 | 0xe0e0000000e0e0e0ULL, 0x0505000000050505ULL, 0x5858000000585858ULL, | 155 | 0xe0e0000000e0e0e0ULL, 0x0505000000050505ULL, 0x5858000000585858ULL, |
156 | 0xd9d9000000d9d9d9ULL, 0x6767000000676767ULL, 0x4e4e0000004e4e4eULL, | 156 | 0xd9d9000000d9d9d9ULL, 0x6767000000676767ULL, 0x4e4e0000004e4e4eULL, |
157 | 0x8181000000818181ULL, 0xcbcb000000cbcbcbULL, 0xc9c9000000c9c9c9ULL, | 157 | 0x8181000000818181ULL, 0xcbcb000000cbcbcbULL, 0xc9c9000000c9c9c9ULL, |
@@ -240,7 +240,7 @@ const u64 camellia_sp22000222[256] = { | |||
240 | 0x3d3d0000003d3d3dULL, | 240 | 0x3d3d0000003d3d3dULL, |
241 | }; | 241 | }; |
242 | 242 | ||
243 | const u64 camellia_sp03303033[256] = { | 243 | __visible const u64 camellia_sp03303033[256] = { |
244 | 0x0038380038003838ULL, 0x0041410041004141ULL, 0x0016160016001616ULL, | 244 | 0x0038380038003838ULL, 0x0041410041004141ULL, 0x0016160016001616ULL, |
245 | 0x0076760076007676ULL, 0x00d9d900d900d9d9ULL, 0x0093930093009393ULL, | 245 | 0x0076760076007676ULL, 0x00d9d900d900d9d9ULL, 0x0093930093009393ULL, |
246 | 0x0060600060006060ULL, 0x00f2f200f200f2f2ULL, 0x0072720072007272ULL, | 246 | 0x0060600060006060ULL, 0x00f2f200f200f2f2ULL, 0x0072720072007272ULL, |
@@ -329,7 +329,7 @@ const u64 camellia_sp03303033[256] = { | |||
329 | 0x004f4f004f004f4fULL, | 329 | 0x004f4f004f004f4fULL, |
330 | }; | 330 | }; |
331 | 331 | ||
332 | const u64 camellia_sp00444404[256] = { | 332 | __visible const u64 camellia_sp00444404[256] = { |
333 | 0x0000707070700070ULL, 0x00002c2c2c2c002cULL, 0x0000b3b3b3b300b3ULL, | 333 | 0x0000707070700070ULL, 0x00002c2c2c2c002cULL, 0x0000b3b3b3b300b3ULL, |
334 | 0x0000c0c0c0c000c0ULL, 0x0000e4e4e4e400e4ULL, 0x0000575757570057ULL, | 334 | 0x0000c0c0c0c000c0ULL, 0x0000e4e4e4e400e4ULL, 0x0000575757570057ULL, |
335 | 0x0000eaeaeaea00eaULL, 0x0000aeaeaeae00aeULL, 0x0000232323230023ULL, | 335 | 0x0000eaeaeaea00eaULL, 0x0000aeaeaeae00aeULL, 0x0000232323230023ULL, |
@@ -418,7 +418,7 @@ const u64 camellia_sp00444404[256] = { | |||
418 | 0x00009e9e9e9e009eULL, | 418 | 0x00009e9e9e9e009eULL, |
419 | }; | 419 | }; |
420 | 420 | ||
421 | const u64 camellia_sp02220222[256] = { | 421 | __visible const u64 camellia_sp02220222[256] = { |
422 | 0x00e0e0e000e0e0e0ULL, 0x0005050500050505ULL, 0x0058585800585858ULL, | 422 | 0x00e0e0e000e0e0e0ULL, 0x0005050500050505ULL, 0x0058585800585858ULL, |
423 | 0x00d9d9d900d9d9d9ULL, 0x0067676700676767ULL, 0x004e4e4e004e4e4eULL, | 423 | 0x00d9d9d900d9d9d9ULL, 0x0067676700676767ULL, 0x004e4e4e004e4e4eULL, |
424 | 0x0081818100818181ULL, 0x00cbcbcb00cbcbcbULL, 0x00c9c9c900c9c9c9ULL, | 424 | 0x0081818100818181ULL, 0x00cbcbcb00cbcbcbULL, 0x00c9c9c900c9c9c9ULL, |
@@ -507,7 +507,7 @@ const u64 camellia_sp02220222[256] = { | |||
507 | 0x003d3d3d003d3d3dULL, | 507 | 0x003d3d3d003d3d3dULL, |
508 | }; | 508 | }; |
509 | 509 | ||
510 | const u64 camellia_sp30333033[256] = { | 510 | __visible const u64 camellia_sp30333033[256] = { |
511 | 0x3800383838003838ULL, 0x4100414141004141ULL, 0x1600161616001616ULL, | 511 | 0x3800383838003838ULL, 0x4100414141004141ULL, 0x1600161616001616ULL, |
512 | 0x7600767676007676ULL, 0xd900d9d9d900d9d9ULL, 0x9300939393009393ULL, | 512 | 0x7600767676007676ULL, 0xd900d9d9d900d9d9ULL, 0x9300939393009393ULL, |
513 | 0x6000606060006060ULL, 0xf200f2f2f200f2f2ULL, 0x7200727272007272ULL, | 513 | 0x6000606060006060ULL, 0xf200f2f2f200f2f2ULL, 0x7200727272007272ULL, |
@@ -596,7 +596,7 @@ const u64 camellia_sp30333033[256] = { | |||
596 | 0x4f004f4f4f004f4fULL, | 596 | 0x4f004f4f4f004f4fULL, |
597 | }; | 597 | }; |
598 | 598 | ||
599 | const u64 camellia_sp44044404[256] = { | 599 | __visible const u64 camellia_sp44044404[256] = { |
600 | 0x7070007070700070ULL, 0x2c2c002c2c2c002cULL, 0xb3b300b3b3b300b3ULL, | 600 | 0x7070007070700070ULL, 0x2c2c002c2c2c002cULL, 0xb3b300b3b3b300b3ULL, |
601 | 0xc0c000c0c0c000c0ULL, 0xe4e400e4e4e400e4ULL, 0x5757005757570057ULL, | 601 | 0xc0c000c0c0c000c0ULL, 0xe4e400e4e4e400e4ULL, 0x5757005757570057ULL, |
602 | 0xeaea00eaeaea00eaULL, 0xaeae00aeaeae00aeULL, 0x2323002323230023ULL, | 602 | 0xeaea00eaeaea00eaULL, 0xaeae00aeaeae00aeULL, 0x2323002323230023ULL, |
@@ -685,7 +685,7 @@ const u64 camellia_sp44044404[256] = { | |||
685 | 0x9e9e009e9e9e009eULL, | 685 | 0x9e9e009e9e9e009eULL, |
686 | }; | 686 | }; |
687 | 687 | ||
688 | const u64 camellia_sp11101110[256] = { | 688 | __visible const u64 camellia_sp11101110[256] = { |
689 | 0x7070700070707000ULL, 0x8282820082828200ULL, 0x2c2c2c002c2c2c00ULL, | 689 | 0x7070700070707000ULL, 0x8282820082828200ULL, 0x2c2c2c002c2c2c00ULL, |
690 | 0xececec00ececec00ULL, 0xb3b3b300b3b3b300ULL, 0x2727270027272700ULL, | 690 | 0xececec00ececec00ULL, 0xb3b3b300b3b3b300ULL, 0x2727270027272700ULL, |
691 | 0xc0c0c000c0c0c000ULL, 0xe5e5e500e5e5e500ULL, 0xe4e4e400e4e4e400ULL, | 691 | 0xc0c0c000c0c0c000ULL, 0xe5e5e500e5e5e500ULL, 0xe4e4e400e4e4e400ULL, |
@@ -828,8 +828,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
828 | 828 | ||
829 | subRL[1] ^= (subRL[1] & ~subRL[9]) << 32; | 829 | subRL[1] ^= (subRL[1] & ~subRL[9]) << 32; |
830 | /* modified for FLinv(kl2) */ | 830 | /* modified for FLinv(kl2) */ |
831 | dw = (subRL[1] & subRL[9]) >> 32, | 831 | dw = (subRL[1] & subRL[9]) >> 32; |
832 | subRL[1] ^= rol32(dw, 1); | 832 | subRL[1] ^= rol32(dw, 1); |
833 | 833 | ||
834 | /* round 8 */ | 834 | /* round 8 */ |
835 | subRL[11] ^= subRL[1]; | 835 | subRL[11] ^= subRL[1]; |
@@ -840,8 +840,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
840 | 840 | ||
841 | subRL[1] ^= (subRL[1] & ~subRL[17]) << 32; | 841 | subRL[1] ^= (subRL[1] & ~subRL[17]) << 32; |
842 | /* modified for FLinv(kl4) */ | 842 | /* modified for FLinv(kl4) */ |
843 | dw = (subRL[1] & subRL[17]) >> 32, | 843 | dw = (subRL[1] & subRL[17]) >> 32; |
844 | subRL[1] ^= rol32(dw, 1); | 844 | subRL[1] ^= rol32(dw, 1); |
845 | 845 | ||
846 | /* round 14 */ | 846 | /* round 14 */ |
847 | subRL[19] ^= subRL[1]; | 847 | subRL[19] ^= subRL[1]; |
@@ -859,8 +859,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
859 | } else { | 859 | } else { |
860 | subRL[1] ^= (subRL[1] & ~subRL[25]) << 32; | 860 | subRL[1] ^= (subRL[1] & ~subRL[25]) << 32; |
861 | /* modified for FLinv(kl6) */ | 861 | /* modified for FLinv(kl6) */ |
862 | dw = (subRL[1] & subRL[25]) >> 32, | 862 | dw = (subRL[1] & subRL[25]) >> 32; |
863 | subRL[1] ^= rol32(dw, 1); | 863 | subRL[1] ^= rol32(dw, 1); |
864 | 864 | ||
865 | /* round 20 */ | 865 | /* round 20 */ |
866 | subRL[27] ^= subRL[1]; | 866 | subRL[27] ^= subRL[1]; |
@@ -882,8 +882,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
882 | 882 | ||
883 | kw4 ^= (kw4 & ~subRL[24]) << 32; | 883 | kw4 ^= (kw4 & ~subRL[24]) << 32; |
884 | /* modified for FL(kl5) */ | 884 | /* modified for FL(kl5) */ |
885 | dw = (kw4 & subRL[24]) >> 32, | 885 | dw = (kw4 & subRL[24]) >> 32; |
886 | kw4 ^= rol32(dw, 1); | 886 | kw4 ^= rol32(dw, 1); |
887 | } | 887 | } |
888 | 888 | ||
889 | /* round 17 */ | 889 | /* round 17 */ |
@@ -895,8 +895,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
895 | 895 | ||
896 | kw4 ^= (kw4 & ~subRL[16]) << 32; | 896 | kw4 ^= (kw4 & ~subRL[16]) << 32; |
897 | /* modified for FL(kl3) */ | 897 | /* modified for FL(kl3) */ |
898 | dw = (kw4 & subRL[16]) >> 32, | 898 | dw = (kw4 & subRL[16]) >> 32; |
899 | kw4 ^= rol32(dw, 1); | 899 | kw4 ^= rol32(dw, 1); |
900 | 900 | ||
901 | /* round 11 */ | 901 | /* round 11 */ |
902 | subRL[14] ^= kw4; | 902 | subRL[14] ^= kw4; |
@@ -907,8 +907,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
907 | 907 | ||
908 | kw4 ^= (kw4 & ~subRL[8]) << 32; | 908 | kw4 ^= (kw4 & ~subRL[8]) << 32; |
909 | /* modified for FL(kl1) */ | 909 | /* modified for FL(kl1) */ |
910 | dw = (kw4 & subRL[8]) >> 32, | 910 | dw = (kw4 & subRL[8]) >> 32; |
911 | kw4 ^= rol32(dw, 1); | 911 | kw4 ^= rol32(dw, 1); |
912 | 912 | ||
913 | /* round 5 */ | 913 | /* round 5 */ |
914 | subRL[6] ^= kw4; | 914 | subRL[6] ^= kw4; |
@@ -928,8 +928,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
928 | SET_SUBKEY_LR(6, subRL[5] ^ subRL[7]); /* round 5 */ | 928 | SET_SUBKEY_LR(6, subRL[5] ^ subRL[7]); /* round 5 */ |
929 | 929 | ||
930 | tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]); | 930 | tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]); |
931 | dw = tl & (subRL[8] >> 32), /* FL(kl1) */ | 931 | dw = tl & (subRL[8] >> 32); /* FL(kl1) */ |
932 | tr = subRL[10] ^ rol32(dw, 1); | 932 | tr = subRL[10] ^ rol32(dw, 1); |
933 | tt = (tr | ((u64)tl << 32)); | 933 | tt = (tr | ((u64)tl << 32)); |
934 | 934 | ||
935 | SET_SUBKEY_LR(7, subRL[6] ^ tt); /* round 6 */ | 935 | SET_SUBKEY_LR(7, subRL[6] ^ tt); /* round 6 */ |
@@ -937,8 +937,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
937 | SET_SUBKEY_LR(9, subRL[9]); /* FLinv(kl2) */ | 937 | SET_SUBKEY_LR(9, subRL[9]); /* FLinv(kl2) */ |
938 | 938 | ||
939 | tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]); | 939 | tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]); |
940 | dw = tl & (subRL[9] >> 32), /* FLinv(kl2) */ | 940 | dw = tl & (subRL[9] >> 32); /* FLinv(kl2) */ |
941 | tr = subRL[7] ^ rol32(dw, 1); | 941 | tr = subRL[7] ^ rol32(dw, 1); |
942 | tt = (tr | ((u64)tl << 32)); | 942 | tt = (tr | ((u64)tl << 32)); |
943 | 943 | ||
944 | SET_SUBKEY_LR(10, subRL[11] ^ tt); /* round 7 */ | 944 | SET_SUBKEY_LR(10, subRL[11] ^ tt); /* round 7 */ |
@@ -948,8 +948,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
948 | SET_SUBKEY_LR(14, subRL[13] ^ subRL[15]); /* round 11 */ | 948 | SET_SUBKEY_LR(14, subRL[13] ^ subRL[15]); /* round 11 */ |
949 | 949 | ||
950 | tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]); | 950 | tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]); |
951 | dw = tl & (subRL[16] >> 32), /* FL(kl3) */ | 951 | dw = tl & (subRL[16] >> 32); /* FL(kl3) */ |
952 | tr = subRL[18] ^ rol32(dw, 1); | 952 | tr = subRL[18] ^ rol32(dw, 1); |
953 | tt = (tr | ((u64)tl << 32)); | 953 | tt = (tr | ((u64)tl << 32)); |
954 | 954 | ||
955 | SET_SUBKEY_LR(15, subRL[14] ^ tt); /* round 12 */ | 955 | SET_SUBKEY_LR(15, subRL[14] ^ tt); /* round 12 */ |
@@ -957,8 +957,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
957 | SET_SUBKEY_LR(17, subRL[17]); /* FLinv(kl4) */ | 957 | SET_SUBKEY_LR(17, subRL[17]); /* FLinv(kl4) */ |
958 | 958 | ||
959 | tl = (subRL[15] >> 32) ^ (subRL[15] & ~subRL[17]); | 959 | tl = (subRL[15] >> 32) ^ (subRL[15] & ~subRL[17]); |
960 | dw = tl & (subRL[17] >> 32), /* FLinv(kl4) */ | 960 | dw = tl & (subRL[17] >> 32); /* FLinv(kl4) */ |
961 | tr = subRL[15] ^ rol32(dw, 1); | 961 | tr = subRL[15] ^ rol32(dw, 1); |
962 | tt = (tr | ((u64)tl << 32)); | 962 | tt = (tr | ((u64)tl << 32)); |
963 | 963 | ||
964 | SET_SUBKEY_LR(18, subRL[19] ^ tt); /* round 13 */ | 964 | SET_SUBKEY_LR(18, subRL[19] ^ tt); /* round 13 */ |
@@ -972,8 +972,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
972 | SET_SUBKEY_LR(24, subRL[24] ^ subRL[23]); /* kw3 */ | 972 | SET_SUBKEY_LR(24, subRL[24] ^ subRL[23]); /* kw3 */ |
973 | } else { | 973 | } else { |
974 | tl = (subRL[26] >> 32) ^ (subRL[26] & ~subRL[24]); | 974 | tl = (subRL[26] >> 32) ^ (subRL[26] & ~subRL[24]); |
975 | dw = tl & (subRL[24] >> 32), /* FL(kl5) */ | 975 | dw = tl & (subRL[24] >> 32); /* FL(kl5) */ |
976 | tr = subRL[26] ^ rol32(dw, 1); | 976 | tr = subRL[26] ^ rol32(dw, 1); |
977 | tt = (tr | ((u64)tl << 32)); | 977 | tt = (tr | ((u64)tl << 32)); |
978 | 978 | ||
979 | SET_SUBKEY_LR(23, subRL[22] ^ tt); /* round 18 */ | 979 | SET_SUBKEY_LR(23, subRL[22] ^ tt); /* round 18 */ |
@@ -981,8 +981,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max) | |||
981 | SET_SUBKEY_LR(25, subRL[25]); /* FLinv(kl6) */ | 981 | SET_SUBKEY_LR(25, subRL[25]); /* FLinv(kl6) */ |
982 | 982 | ||
983 | tl = (subRL[23] >> 32) ^ (subRL[23] & ~subRL[25]); | 983 | tl = (subRL[23] >> 32) ^ (subRL[23] & ~subRL[25]); |
984 | dw = tl & (subRL[25] >> 32), /* FLinv(kl6) */ | 984 | dw = tl & (subRL[25] >> 32); /* FLinv(kl6) */ |
985 | tr = subRL[23] ^ rol32(dw, 1); | 985 | tr = subRL[23] ^ rol32(dw, 1); |
986 | tt = (tr | ((u64)tl << 32)); | 986 | tt = (tr | ((u64)tl << 32)); |
987 | 987 | ||
988 | SET_SUBKEY_LR(26, subRL[27] ^ tt); /* round 19 */ | 988 | SET_SUBKEY_LR(26, subRL[27] ^ tt); /* round 19 */ |
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S new file mode 100644 index 000000000000..35e97569d05f --- /dev/null +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S | |||
@@ -0,0 +1,643 @@ | |||
1 | ######################################################################## | ||
2 | # Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions | ||
3 | # | ||
4 | # Copyright (c) 2013, Intel Corporation | ||
5 | # | ||
6 | # Authors: | ||
7 | # Erdinc Ozturk <erdinc.ozturk@intel.com> | ||
8 | # Vinodh Gopal <vinodh.gopal@intel.com> | ||
9 | # James Guilford <james.guilford@intel.com> | ||
10 | # Tim Chen <tim.c.chen@linux.intel.com> | ||
11 | # | ||
12 | # This software is available to you under a choice of one of two | ||
13 | # licenses. You may choose to be licensed under the terms of the GNU | ||
14 | # General Public License (GPL) Version 2, available from the file | ||
15 | # COPYING in the main directory of this source tree, or the | ||
16 | # OpenIB.org BSD license below: | ||
17 | # | ||
18 | # Redistribution and use in source and binary forms, with or without | ||
19 | # modification, are permitted provided that the following conditions are | ||
20 | # met: | ||
21 | # | ||
22 | # * Redistributions of source code must retain the above copyright | ||
23 | # notice, this list of conditions and the following disclaimer. | ||
24 | # | ||
25 | # * Redistributions in binary form must reproduce the above copyright | ||
26 | # notice, this list of conditions and the following disclaimer in the | ||
27 | # documentation and/or other materials provided with the | ||
28 | # distribution. | ||
29 | # | ||
30 | # * Neither the name of the Intel Corporation nor the names of its | ||
31 | # contributors may be used to endorse or promote products derived from | ||
32 | # this software without specific prior written permission. | ||
33 | # | ||
34 | # | ||
35 | # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY | ||
36 | # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
37 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
38 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR | ||
39 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | ||
40 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
41 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | ||
42 | # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
43 | # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
44 | # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
45 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | ######################################################################## | ||
47 | # Function API: | ||
48 | # UINT16 crc_t10dif_pcl( | ||
49 | # UINT16 init_crc, //initial CRC value, 16 bits | ||
50 | # const unsigned char *buf, //buffer pointer to calculate CRC on | ||
51 | # UINT64 len //buffer length in bytes (64-bit data) | ||
52 | # ); | ||
53 | # | ||
54 | # Reference paper titled "Fast CRC Computation for Generic | ||
55 | # Polynomials Using PCLMULQDQ Instruction" | ||
56 | # URL: http://www.intel.com/content/dam/www/public/us/en/documents | ||
57 | # /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf | ||
58 | # | ||
59 | # | ||
60 | |||
61 | #include <linux/linkage.h> | ||
62 | |||
63 | .text | ||
64 | |||
65 | #define arg1 %rdi | ||
66 | #define arg2 %rsi | ||
67 | #define arg3 %rdx | ||
68 | |||
69 | #define arg1_low32 %edi | ||
70 | |||
71 | ENTRY(crc_t10dif_pcl) | ||
72 | .align 16 | ||
73 | |||
74 | # adjust the 16-bit initial_crc value, scale it to 32 bits | ||
75 | shl $16, arg1_low32 | ||
76 | |||
77 | # Allocate Stack Space | ||
78 | mov %rsp, %rcx | ||
79 | sub $16*2, %rsp | ||
80 | # align stack to 16 byte boundary | ||
81 | and $~(0x10 - 1), %rsp | ||
82 | |||
83 | # check if smaller than 256 | ||
84 | cmp $256, arg3 | ||
85 | |||
86 | # for sizes less than 128, we can't fold 64B at a time... | ||
87 | jl _less_than_128 | ||
88 | |||
89 | |||
90 | # load the initial crc value | ||
91 | movd arg1_low32, %xmm10 # initial crc | ||
92 | |||
93 | # crc value does not need to be byte-reflected, but it needs | ||
94 | # to be moved to the high part of the register. | ||
95 | # because data will be byte-reflected and will align with | ||
96 | # initial crc at correct place. | ||
97 | pslldq $12, %xmm10 | ||
98 | |||
99 | movdqa SHUF_MASK(%rip), %xmm11 | ||
100 | # receive the initial 64B data, xor the initial crc value | ||
101 | movdqu 16*0(arg2), %xmm0 | ||
102 | movdqu 16*1(arg2), %xmm1 | ||
103 | movdqu 16*2(arg2), %xmm2 | ||
104 | movdqu 16*3(arg2), %xmm3 | ||
105 | movdqu 16*4(arg2), %xmm4 | ||
106 | movdqu 16*5(arg2), %xmm5 | ||
107 | movdqu 16*6(arg2), %xmm6 | ||
108 | movdqu 16*7(arg2), %xmm7 | ||
109 | |||
110 | pshufb %xmm11, %xmm0 | ||
111 | # XOR the initial_crc value | ||
112 | pxor %xmm10, %xmm0 | ||
113 | pshufb %xmm11, %xmm1 | ||
114 | pshufb %xmm11, %xmm2 | ||
115 | pshufb %xmm11, %xmm3 | ||
116 | pshufb %xmm11, %xmm4 | ||
117 | pshufb %xmm11, %xmm5 | ||
118 | pshufb %xmm11, %xmm6 | ||
119 | pshufb %xmm11, %xmm7 | ||
120 | |||
121 | movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4 | ||
122 | #imm value of pclmulqdq instruction | ||
123 | #will determine which constant to use | ||
124 | |||
125 | ################################################################# | ||
126 | # we subtract 256 instead of 128 to save one instruction from the loop | ||
127 | sub $256, arg3 | ||
128 | |||
129 | # at this section of the code, there is 64*x+y (0<=y<64) bytes of | ||
130 | # buffer. The _fold_64_B_loop will fold 64B at a time | ||
131 | # until we have 64+y Bytes of buffer | ||
132 | |||
133 | |||
134 | # fold 64B at a time. This section of the code folds 4 xmm | ||
135 | # registers in parallel | ||
136 | _fold_64_B_loop: | ||
137 | |||
138 | # update the buffer pointer | ||
139 | add $128, arg2 # buf += 64# | ||
140 | |||
141 | movdqu 16*0(arg2), %xmm9 | ||
142 | movdqu 16*1(arg2), %xmm12 | ||
143 | pshufb %xmm11, %xmm9 | ||
144 | pshufb %xmm11, %xmm12 | ||
145 | movdqa %xmm0, %xmm8 | ||
146 | movdqa %xmm1, %xmm13 | ||
147 | pclmulqdq $0x0 , %xmm10, %xmm0 | ||
148 | pclmulqdq $0x11, %xmm10, %xmm8 | ||
149 | pclmulqdq $0x0 , %xmm10, %xmm1 | ||
150 | pclmulqdq $0x11, %xmm10, %xmm13 | ||
151 | pxor %xmm9 , %xmm0 | ||
152 | xorps %xmm8 , %xmm0 | ||
153 | pxor %xmm12, %xmm1 | ||
154 | xorps %xmm13, %xmm1 | ||
155 | |||
156 | movdqu 16*2(arg2), %xmm9 | ||
157 | movdqu 16*3(arg2), %xmm12 | ||
158 | pshufb %xmm11, %xmm9 | ||
159 | pshufb %xmm11, %xmm12 | ||
160 | movdqa %xmm2, %xmm8 | ||
161 | movdqa %xmm3, %xmm13 | ||
162 | pclmulqdq $0x0, %xmm10, %xmm2 | ||
163 | pclmulqdq $0x11, %xmm10, %xmm8 | ||
164 | pclmulqdq $0x0, %xmm10, %xmm3 | ||
165 | pclmulqdq $0x11, %xmm10, %xmm13 | ||
166 | pxor %xmm9 , %xmm2 | ||
167 | xorps %xmm8 , %xmm2 | ||
168 | pxor %xmm12, %xmm3 | ||
169 | xorps %xmm13, %xmm3 | ||
170 | |||
171 | movdqu 16*4(arg2), %xmm9 | ||
172 | movdqu 16*5(arg2), %xmm12 | ||
173 | pshufb %xmm11, %xmm9 | ||
174 | pshufb %xmm11, %xmm12 | ||
175 | movdqa %xmm4, %xmm8 | ||
176 | movdqa %xmm5, %xmm13 | ||
177 | pclmulqdq $0x0, %xmm10, %xmm4 | ||
178 | pclmulqdq $0x11, %xmm10, %xmm8 | ||
179 | pclmulqdq $0x0, %xmm10, %xmm5 | ||
180 | pclmulqdq $0x11, %xmm10, %xmm13 | ||
181 | pxor %xmm9 , %xmm4 | ||
182 | xorps %xmm8 , %xmm4 | ||
183 | pxor %xmm12, %xmm5 | ||
184 | xorps %xmm13, %xmm5 | ||
185 | |||
186 | movdqu 16*6(arg2), %xmm9 | ||
187 | movdqu 16*7(arg2), %xmm12 | ||
188 | pshufb %xmm11, %xmm9 | ||
189 | pshufb %xmm11, %xmm12 | ||
190 | movdqa %xmm6 , %xmm8 | ||
191 | movdqa %xmm7 , %xmm13 | ||
192 | pclmulqdq $0x0 , %xmm10, %xmm6 | ||
193 | pclmulqdq $0x11, %xmm10, %xmm8 | ||
194 | pclmulqdq $0x0 , %xmm10, %xmm7 | ||
195 | pclmulqdq $0x11, %xmm10, %xmm13 | ||
196 | pxor %xmm9 , %xmm6 | ||
197 | xorps %xmm8 , %xmm6 | ||
198 | pxor %xmm12, %xmm7 | ||
199 | xorps %xmm13, %xmm7 | ||
200 | |||
201 | sub $128, arg3 | ||
202 | |||
203 | # check if there is another 64B in the buffer to be able to fold | ||
204 | jge _fold_64_B_loop | ||
205 | ################################################################## | ||
206 | |||
207 | |||
208 | add $128, arg2 | ||
209 | # at this point, the buffer pointer is pointing at the last y Bytes | ||
210 | # of the buffer the 64B of folded data is in 4 of the xmm | ||
211 | # registers: xmm0, xmm1, xmm2, xmm3 | ||
212 | |||
213 | |||
214 | # fold the 8 xmm registers to 1 xmm register with different constants | ||
215 | |||
216 | movdqa rk9(%rip), %xmm10 | ||
217 | movdqa %xmm0, %xmm8 | ||
218 | pclmulqdq $0x11, %xmm10, %xmm0 | ||
219 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
220 | pxor %xmm8, %xmm7 | ||
221 | xorps %xmm0, %xmm7 | ||
222 | |||
223 | movdqa rk11(%rip), %xmm10 | ||
224 | movdqa %xmm1, %xmm8 | ||
225 | pclmulqdq $0x11, %xmm10, %xmm1 | ||
226 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
227 | pxor %xmm8, %xmm7 | ||
228 | xorps %xmm1, %xmm7 | ||
229 | |||
230 | movdqa rk13(%rip), %xmm10 | ||
231 | movdqa %xmm2, %xmm8 | ||
232 | pclmulqdq $0x11, %xmm10, %xmm2 | ||
233 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
234 | pxor %xmm8, %xmm7 | ||
235 | pxor %xmm2, %xmm7 | ||
236 | |||
237 | movdqa rk15(%rip), %xmm10 | ||
238 | movdqa %xmm3, %xmm8 | ||
239 | pclmulqdq $0x11, %xmm10, %xmm3 | ||
240 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
241 | pxor %xmm8, %xmm7 | ||
242 | xorps %xmm3, %xmm7 | ||
243 | |||
244 | movdqa rk17(%rip), %xmm10 | ||
245 | movdqa %xmm4, %xmm8 | ||
246 | pclmulqdq $0x11, %xmm10, %xmm4 | ||
247 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
248 | pxor %xmm8, %xmm7 | ||
249 | pxor %xmm4, %xmm7 | ||
250 | |||
251 | movdqa rk19(%rip), %xmm10 | ||
252 | movdqa %xmm5, %xmm8 | ||
253 | pclmulqdq $0x11, %xmm10, %xmm5 | ||
254 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
255 | pxor %xmm8, %xmm7 | ||
256 | xorps %xmm5, %xmm7 | ||
257 | |||
258 | movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2 | ||
259 | #imm value of pclmulqdq instruction | ||
260 | #will determine which constant to use | ||
261 | movdqa %xmm6, %xmm8 | ||
262 | pclmulqdq $0x11, %xmm10, %xmm6 | ||
263 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
264 | pxor %xmm8, %xmm7 | ||
265 | pxor %xmm6, %xmm7 | ||
266 | |||
267 | |||
268 | # instead of 64, we add 48 to the loop counter to save 1 instruction | ||
269 | # from the loop instead of a cmp instruction, we use the negative | ||
270 | # flag with the jl instruction | ||
271 | add $128-16, arg3 | ||
272 | jl _final_reduction_for_128 | ||
273 | |||
274 | # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 | ||
275 | # and the rest is in memory. We can fold 16 bytes at a time if y>=16 | ||
276 | # continue folding 16B at a time | ||
277 | |||
278 | _16B_reduction_loop: | ||
279 | movdqa %xmm7, %xmm8 | ||
280 | pclmulqdq $0x11, %xmm10, %xmm7 | ||
281 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
282 | pxor %xmm8, %xmm7 | ||
283 | movdqu (arg2), %xmm0 | ||
284 | pshufb %xmm11, %xmm0 | ||
285 | pxor %xmm0 , %xmm7 | ||
286 | add $16, arg2 | ||
287 | sub $16, arg3 | ||
288 | # instead of a cmp instruction, we utilize the flags with the | ||
289 | # jge instruction equivalent of: cmp arg3, 16-16 | ||
290 | # check if there is any more 16B in the buffer to be able to fold | ||
291 | jge _16B_reduction_loop | ||
292 | |||
293 | #now we have 16+z bytes left to reduce, where 0<= z < 16. | ||
294 | #first, we reduce the data in the xmm7 register | ||
295 | |||
296 | |||
297 | _final_reduction_for_128: | ||
298 | # check if any more data to fold. If not, compute the CRC of | ||
299 | # the final 128 bits | ||
300 | add $16, arg3 | ||
301 | je _128_done | ||
302 | |||
303 | # here we are getting data that is less than 16 bytes. | ||
304 | # since we know that there was data before the pointer, we can | ||
305 | # offset the input pointer before the actual point, to receive | ||
306 | # exactly 16 bytes. after that the registers need to be adjusted. | ||
307 | _get_last_two_xmms: | ||
308 | movdqa %xmm7, %xmm2 | ||
309 | |||
310 | movdqu -16(arg2, arg3), %xmm1 | ||
311 | pshufb %xmm11, %xmm1 | ||
312 | |||
313 | # get rid of the extra data that was loaded before | ||
314 | # load the shift constant | ||
315 | lea pshufb_shf_table+16(%rip), %rax | ||
316 | sub arg3, %rax | ||
317 | movdqu (%rax), %xmm0 | ||
318 | |||
319 | # shift xmm2 to the left by arg3 bytes | ||
320 | pshufb %xmm0, %xmm2 | ||
321 | |||
322 | # shift xmm7 to the right by 16-arg3 bytes | ||
323 | pxor mask1(%rip), %xmm0 | ||
324 | pshufb %xmm0, %xmm7 | ||
325 | pblendvb %xmm2, %xmm1 #xmm0 is implicit | ||
326 | |||
327 | # fold 16 Bytes | ||
328 | movdqa %xmm1, %xmm2 | ||
329 | movdqa %xmm7, %xmm8 | ||
330 | pclmulqdq $0x11, %xmm10, %xmm7 | ||
331 | pclmulqdq $0x0 , %xmm10, %xmm8 | ||
332 | pxor %xmm8, %xmm7 | ||
333 | pxor %xmm2, %xmm7 | ||
334 | |||
335 | _128_done: | ||
336 | # compute crc of a 128-bit value | ||
337 | movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10 | ||
338 | movdqa %xmm7, %xmm0 | ||
339 | |||
340 | #64b fold | ||
341 | pclmulqdq $0x1, %xmm10, %xmm7 | ||
342 | pslldq $8 , %xmm0 | ||
343 | pxor %xmm0, %xmm7 | ||
344 | |||
345 | #32b fold | ||
346 | movdqa %xmm7, %xmm0 | ||
347 | |||
348 | pand mask2(%rip), %xmm0 | ||
349 | |||
350 | psrldq $12, %xmm7 | ||
351 | pclmulqdq $0x10, %xmm10, %xmm7 | ||
352 | pxor %xmm0, %xmm7 | ||
353 | |||
354 | #barrett reduction | ||
355 | _barrett: | ||
356 | movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10 | ||
357 | movdqa %xmm7, %xmm0 | ||
358 | pclmulqdq $0x01, %xmm10, %xmm7 | ||
359 | pslldq $4, %xmm7 | ||
360 | pclmulqdq $0x11, %xmm10, %xmm7 | ||
361 | |||
362 | pslldq $4, %xmm7 | ||
363 | pxor %xmm0, %xmm7 | ||
364 | pextrd $1, %xmm7, %eax | ||
365 | |||
366 | _cleanup: | ||
367 | # scale the result back to 16 bits | ||
368 | shr $16, %eax | ||
369 | mov %rcx, %rsp | ||
370 | ret | ||
371 | |||
372 | ######################################################################## | ||
373 | |||
374 | .align 16 | ||
375 | _less_than_128: | ||
376 | |||
377 | # check if there is enough buffer to be able to fold 16B at a time | ||
378 | cmp $32, arg3 | ||
379 | jl _less_than_32 | ||
380 | movdqa SHUF_MASK(%rip), %xmm11 | ||
381 | |||
382 | # now if there is, load the constants | ||
383 | movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 | ||
384 | |||
385 | movd arg1_low32, %xmm0 # get the initial crc value | ||
386 | pslldq $12, %xmm0 # align it to its correct place | ||
387 | movdqu (arg2), %xmm7 # load the plaintext | ||
388 | pshufb %xmm11, %xmm7 # byte-reflect the plaintext | ||
389 | pxor %xmm0, %xmm7 | ||
390 | |||
391 | |||
392 | # update the buffer pointer | ||
393 | add $16, arg2 | ||
394 | |||
395 | # update the counter. subtract 32 instead of 16 to save one | ||
396 | # instruction from the loop | ||
397 | sub $32, arg3 | ||
398 | |||
399 | jmp _16B_reduction_loop | ||
400 | |||
401 | |||
402 | .align 16 | ||
403 | _less_than_32: | ||
404 | # mov initial crc to the return value. this is necessary for | ||
405 | # zero-length buffers. | ||
406 | mov arg1_low32, %eax | ||
407 | test arg3, arg3 | ||
408 | je _cleanup | ||
409 | |||
410 | movdqa SHUF_MASK(%rip), %xmm11 | ||
411 | |||
412 | movd arg1_low32, %xmm0 # get the initial crc value | ||
413 | pslldq $12, %xmm0 # align it to its correct place | ||
414 | |||
415 | cmp $16, arg3 | ||
416 | je _exact_16_left | ||
417 | jl _less_than_16_left | ||
418 | |||
419 | movdqu (arg2), %xmm7 # load the plaintext | ||
420 | pshufb %xmm11, %xmm7 # byte-reflect the plaintext | ||
421 | pxor %xmm0 , %xmm7 # xor the initial crc value | ||
422 | add $16, arg2 | ||
423 | sub $16, arg3 | ||
424 | movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 | ||
425 | jmp _get_last_two_xmms | ||
426 | |||
427 | |||
428 | .align 16 | ||
429 | _less_than_16_left: | ||
430 | # use stack space to load data less than 16 bytes, zero-out | ||
431 | # the 16B in memory first. | ||
432 | |||
433 | pxor %xmm1, %xmm1 | ||
434 | mov %rsp, %r11 | ||
435 | movdqa %xmm1, (%r11) | ||
436 | |||
437 | cmp $4, arg3 | ||
438 | jl _only_less_than_4 | ||
439 | |||
440 | # backup the counter value | ||
441 | mov arg3, %r9 | ||
442 | cmp $8, arg3 | ||
443 | jl _less_than_8_left | ||
444 | |||
445 | # load 8 Bytes | ||
446 | mov (arg2), %rax | ||
447 | mov %rax, (%r11) | ||
448 | add $8, %r11 | ||
449 | sub $8, arg3 | ||
450 | add $8, arg2 | ||
451 | _less_than_8_left: | ||
452 | |||
453 | cmp $4, arg3 | ||
454 | jl _less_than_4_left | ||
455 | |||
456 | # load 4 Bytes | ||
457 | mov (arg2), %eax | ||
458 | mov %eax, (%r11) | ||
459 | add $4, %r11 | ||
460 | sub $4, arg3 | ||
461 | add $4, arg2 | ||
462 | _less_than_4_left: | ||
463 | |||
464 | cmp $2, arg3 | ||
465 | jl _less_than_2_left | ||
466 | |||
467 | # load 2 Bytes | ||
468 | mov (arg2), %ax | ||
469 | mov %ax, (%r11) | ||
470 | add $2, %r11 | ||
471 | sub $2, arg3 | ||
472 | add $2, arg2 | ||
473 | _less_than_2_left: | ||
474 | cmp $1, arg3 | ||
475 | jl _zero_left | ||
476 | |||
477 | # load 1 Byte | ||
478 | mov (arg2), %al | ||
479 | mov %al, (%r11) | ||
480 | _zero_left: | ||
481 | movdqa (%rsp), %xmm7 | ||
482 | pshufb %xmm11, %xmm7 | ||
483 | pxor %xmm0 , %xmm7 # xor the initial crc value | ||
484 | |||
485 | # shl r9, 4 | ||
486 | lea pshufb_shf_table+16(%rip), %rax | ||
487 | sub %r9, %rax | ||
488 | movdqu (%rax), %xmm0 | ||
489 | pxor mask1(%rip), %xmm0 | ||
490 | |||
491 | pshufb %xmm0, %xmm7 | ||
492 | jmp _128_done | ||
493 | |||
494 | .align 16 | ||
495 | _exact_16_left: | ||
496 | movdqu (arg2), %xmm7 | ||
497 | pshufb %xmm11, %xmm7 | ||
498 | pxor %xmm0 , %xmm7 # xor the initial crc value | ||
499 | |||
500 | jmp _128_done | ||
501 | |||
502 | _only_less_than_4: | ||
503 | cmp $3, arg3 | ||
504 | jl _only_less_than_3 | ||
505 | |||
506 | # load 3 Bytes | ||
507 | mov (arg2), %al | ||
508 | mov %al, (%r11) | ||
509 | |||
510 | mov 1(arg2), %al | ||
511 | mov %al, 1(%r11) | ||
512 | |||
513 | mov 2(arg2), %al | ||
514 | mov %al, 2(%r11) | ||
515 | |||
516 | movdqa (%rsp), %xmm7 | ||
517 | pshufb %xmm11, %xmm7 | ||
518 | pxor %xmm0 , %xmm7 # xor the initial crc value | ||
519 | |||
520 | psrldq $5, %xmm7 | ||
521 | |||
522 | jmp _barrett | ||
523 | _only_less_than_3: | ||
524 | cmp $2, arg3 | ||
525 | jl _only_less_than_2 | ||
526 | |||
527 | # load 2 Bytes | ||
528 | mov (arg2), %al | ||
529 | mov %al, (%r11) | ||
530 | |||
531 | mov 1(arg2), %al | ||
532 | mov %al, 1(%r11) | ||
533 | |||
534 | movdqa (%rsp), %xmm7 | ||
535 | pshufb %xmm11, %xmm7 | ||
536 | pxor %xmm0 , %xmm7 # xor the initial crc value | ||
537 | |||
538 | psrldq $6, %xmm7 | ||
539 | |||
540 | jmp _barrett | ||
541 | _only_less_than_2: | ||
542 | |||
543 | # load 1 Byte | ||
544 | mov (arg2), %al | ||
545 | mov %al, (%r11) | ||
546 | |||
547 | movdqa (%rsp), %xmm7 | ||
548 | pshufb %xmm11, %xmm7 | ||
549 | pxor %xmm0 , %xmm7 # xor the initial crc value | ||
550 | |||
551 | psrldq $7, %xmm7 | ||
552 | |||
553 | jmp _barrett | ||
554 | |||
555 | ENDPROC(crc_t10dif_pcl) | ||
556 | |||
557 | .data | ||
558 | |||
559 | # precomputed constants | ||
560 | # these constants are precomputed from the poly: | ||
561 | # 0x8bb70000 (0x8bb7 scaled to 32 bits) | ||
562 | .align 16 | ||
563 | # Q = 0x18BB70000 | ||
564 | # rk1 = 2^(32*3) mod Q << 32 | ||
565 | # rk2 = 2^(32*5) mod Q << 32 | ||
566 | # rk3 = 2^(32*15) mod Q << 32 | ||
567 | # rk4 = 2^(32*17) mod Q << 32 | ||
568 | # rk5 = 2^(32*3) mod Q << 32 | ||
569 | # rk6 = 2^(32*2) mod Q << 32 | ||
570 | # rk7 = floor(2^64/Q) | ||
571 | # rk8 = Q | ||
572 | rk1: | ||
573 | .quad 0x2d56000000000000 | ||
574 | rk2: | ||
575 | .quad 0x06df000000000000 | ||
576 | rk3: | ||
577 | .quad 0x9d9d000000000000 | ||
578 | rk4: | ||
579 | .quad 0x7cf5000000000000 | ||
580 | rk5: | ||
581 | .quad 0x2d56000000000000 | ||
582 | rk6: | ||
583 | .quad 0x1368000000000000 | ||
584 | rk7: | ||
585 | .quad 0x00000001f65a57f8 | ||
586 | rk8: | ||
587 | .quad 0x000000018bb70000 | ||
588 | |||
589 | rk9: | ||
590 | .quad 0xceae000000000000 | ||
591 | rk10: | ||
592 | .quad 0xbfd6000000000000 | ||
593 | rk11: | ||
594 | .quad 0x1e16000000000000 | ||
595 | rk12: | ||
596 | .quad 0x713c000000000000 | ||
597 | rk13: | ||
598 | .quad 0xf7f9000000000000 | ||
599 | rk14: | ||
600 | .quad 0x80a6000000000000 | ||
601 | rk15: | ||
602 | .quad 0x044c000000000000 | ||
603 | rk16: | ||
604 | .quad 0xe658000000000000 | ||
605 | rk17: | ||
606 | .quad 0xad18000000000000 | ||
607 | rk18: | ||
608 | .quad 0xa497000000000000 | ||
609 | rk19: | ||
610 | .quad 0x6ee3000000000000 | ||
611 | rk20: | ||
612 | .quad 0xe7b5000000000000 | ||
613 | |||
614 | |||
615 | |||
616 | mask1: | ||
617 | .octa 0x80808080808080808080808080808080 | ||
618 | mask2: | ||
619 | .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF | ||
620 | |||
621 | SHUF_MASK: | ||
622 | .octa 0x000102030405060708090A0B0C0D0E0F | ||
623 | |||
624 | pshufb_shf_table: | ||
625 | # use these values for shift constants for the pshufb instruction | ||
626 | # different alignments result in values as shown: | ||
627 | # DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 | ||
628 | # DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 | ||
629 | # DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 | ||
630 | # DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 | ||
631 | # DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 | ||
632 | # DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 | ||
633 | # DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 | ||
634 | # DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 | ||
635 | # DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 | ||
636 | # DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 | ||
637 | # DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 | ||
638 | # DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 | ||
639 | # DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 | ||
640 | # DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 | ||
641 | # DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 | ||
642 | .octa 0x8f8e8d8c8b8a89888786858483828100 | ||
643 | .octa 0x000e0d0c0b0a09080706050403020100 | ||
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c new file mode 100644 index 000000000000..7845d7fd54c0 --- /dev/null +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions | ||
5 | * | ||
6 | * Copyright (C) 2013 Intel Corporation | ||
7 | * Author: Tim Chen <tim.c.chen@linux.intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
15 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
16 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
17 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
18 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
19 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
20 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
21 | * SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/types.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/crc-t10dif.h> | ||
28 | #include <crypto/internal/hash.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <asm/i387.h> | ||
33 | #include <asm/cpufeature.h> | ||
34 | #include <asm/cpu_device_id.h> | ||
35 | |||
36 | asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, | ||
37 | size_t len); | ||
38 | |||
39 | struct chksum_desc_ctx { | ||
40 | __u16 crc; | ||
41 | }; | ||
42 | |||
43 | /* | ||
44 | * Steps through buffer one byte at at time, calculates reflected | ||
45 | * crc using table. | ||
46 | */ | ||
47 | |||
48 | static int chksum_init(struct shash_desc *desc) | ||
49 | { | ||
50 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
51 | |||
52 | ctx->crc = 0; | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int chksum_update(struct shash_desc *desc, const u8 *data, | ||
58 | unsigned int length) | ||
59 | { | ||
60 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
61 | |||
62 | if (irq_fpu_usable()) { | ||
63 | kernel_fpu_begin(); | ||
64 | ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); | ||
65 | kernel_fpu_end(); | ||
66 | } else | ||
67 | ctx->crc = crc_t10dif_generic(ctx->crc, data, length); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static int chksum_final(struct shash_desc *desc, u8 *out) | ||
72 | { | ||
73 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
74 | |||
75 | *(__u16 *)out = ctx->crc; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, | ||
80 | u8 *out) | ||
81 | { | ||
82 | if (irq_fpu_usable()) { | ||
83 | kernel_fpu_begin(); | ||
84 | *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); | ||
85 | kernel_fpu_end(); | ||
86 | } else | ||
87 | *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int chksum_finup(struct shash_desc *desc, const u8 *data, | ||
92 | unsigned int len, u8 *out) | ||
93 | { | ||
94 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
95 | |||
96 | return __chksum_finup(&ctx->crc, data, len, out); | ||
97 | } | ||
98 | |||
99 | static int chksum_digest(struct shash_desc *desc, const u8 *data, | ||
100 | unsigned int length, u8 *out) | ||
101 | { | ||
102 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
103 | |||
104 | return __chksum_finup(&ctx->crc, data, length, out); | ||
105 | } | ||
106 | |||
107 | static struct shash_alg alg = { | ||
108 | .digestsize = CRC_T10DIF_DIGEST_SIZE, | ||
109 | .init = chksum_init, | ||
110 | .update = chksum_update, | ||
111 | .final = chksum_final, | ||
112 | .finup = chksum_finup, | ||
113 | .digest = chksum_digest, | ||
114 | .descsize = sizeof(struct chksum_desc_ctx), | ||
115 | .base = { | ||
116 | .cra_name = "crct10dif", | ||
117 | .cra_driver_name = "crct10dif-pclmul", | ||
118 | .cra_priority = 200, | ||
119 | .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, | ||
120 | .cra_module = THIS_MODULE, | ||
121 | } | ||
122 | }; | ||
123 | |||
124 | static const struct x86_cpu_id crct10dif_cpu_id[] = { | ||
125 | X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), | ||
126 | {} | ||
127 | }; | ||
128 | MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id); | ||
129 | |||
130 | static int __init crct10dif_intel_mod_init(void) | ||
131 | { | ||
132 | if (!x86_match_cpu(crct10dif_cpu_id)) | ||
133 | return -ENODEV; | ||
134 | |||
135 | return crypto_register_shash(&alg); | ||
136 | } | ||
137 | |||
138 | static void __exit crct10dif_intel_mod_fini(void) | ||
139 | { | ||
140 | crypto_unregister_shash(&alg); | ||
141 | } | ||
142 | |||
143 | module_init(crct10dif_intel_mod_init); | ||
144 | module_exit(crct10dif_intel_mod_fini); | ||
145 | |||
146 | MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); | ||
147 | MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); | ||
148 | MODULE_LICENSE("GPL"); | ||
149 | |||
150 | MODULE_ALIAS("crct10dif"); | ||
151 | MODULE_ALIAS("crct10dif-pclmul"); | ||
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h index 7ea79c5fa1f2..492b29802f57 100644 --- a/arch/x86/include/asm/xor_avx.h +++ b/arch/x86/include/asm/xor_avx.h | |||
@@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = { | |||
167 | 167 | ||
168 | #define AVX_XOR_SPEED \ | 168 | #define AVX_XOR_SPEED \ |
169 | do { \ | 169 | do { \ |
170 | if (cpu_has_avx) \ | 170 | if (cpu_has_avx && cpu_has_osxsave) \ |
171 | xor_speed(&xor_block_avx); \ | 171 | xor_speed(&xor_block_avx); \ |
172 | } while (0) | 172 | } while (0) |
173 | 173 | ||
174 | #define AVX_SELECT(FASTEST) \ | 174 | #define AVX_SELECT(FASTEST) \ |
175 | (cpu_has_avx ? &xor_block_avx : FASTEST) | 175 | (cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST) |
176 | 176 | ||
177 | #else | 177 | #else |
178 | 178 | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index aca01164f002..69ce573f1224 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -376,6 +376,25 @@ config CRYPTO_CRC32_PCLMUL | |||
376 | which will enable any routine to use the CRC-32-IEEE 802.3 checksum | 376 | which will enable any routine to use the CRC-32-IEEE 802.3 checksum |
377 | and gain better performance as compared with the table implementation. | 377 | and gain better performance as compared with the table implementation. |
378 | 378 | ||
379 | config CRYPTO_CRCT10DIF | ||
380 | tristate "CRCT10DIF algorithm" | ||
381 | select CRYPTO_HASH | ||
382 | help | ||
383 | CRC T10 Data Integrity Field computation is being cast as | ||
384 | a crypto transform. This allows for faster crc t10 diff | ||
385 | transforms to be used if they are available. | ||
386 | |||
387 | config CRYPTO_CRCT10DIF_PCLMUL | ||
388 | tristate "CRCT10DIF PCLMULQDQ hardware acceleration" | ||
389 | depends on X86 && 64BIT && CRC_T10DIF | ||
390 | select CRYPTO_HASH | ||
391 | help | ||
392 | For x86_64 processors with SSE4.2 and PCLMULQDQ supported, | ||
393 | CRC T10 DIF PCLMULQDQ computation can be hardware | ||
394 | accelerated PCLMULQDQ instruction. This option will create | ||
395 | 'crct10dif-plcmul' module, which is faster when computing the | ||
396 | crct10dif checksum as compared with the generic table implementation. | ||
397 | |||
379 | config CRYPTO_GHASH | 398 | config CRYPTO_GHASH |
380 | tristate "GHASH digest algorithm" | 399 | tristate "GHASH digest algorithm" |
381 | select CRYPTO_GF128MUL | 400 | select CRYPTO_GF128MUL |
diff --git a/crypto/Makefile b/crypto/Makefile index 2ba0df2f908f..2d5ed08a239f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o | |||
83 | obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o | 83 | obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o |
84 | obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o | 84 | obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o |
85 | obj-$(CONFIG_CRYPTO_CRC32) += crc32.o | 85 | obj-$(CONFIG_CRYPTO_CRC32) += crc32.o |
86 | obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif.o | ||
86 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o | 87 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o |
87 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o | 88 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o |
88 | obj-$(CONFIG_CRYPTO_LZ4) += lz4.o | 89 | obj-$(CONFIG_CRYPTO_LZ4) += lz4.o |
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 47f2e5c71759..fd0d6b454975 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c | |||
@@ -62,7 +62,7 @@ static inline u8 byte(const u32 x, const unsigned n) | |||
62 | 62 | ||
63 | static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 }; | 63 | static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 }; |
64 | 64 | ||
65 | const u32 crypto_ft_tab[4][256] = { | 65 | __visible const u32 crypto_ft_tab[4][256] = { |
66 | { | 66 | { |
67 | 0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, | 67 | 0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, |
68 | 0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591, | 68 | 0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591, |
@@ -326,7 +326,7 @@ const u32 crypto_ft_tab[4][256] = { | |||
326 | } | 326 | } |
327 | }; | 327 | }; |
328 | 328 | ||
329 | const u32 crypto_fl_tab[4][256] = { | 329 | __visible const u32 crypto_fl_tab[4][256] = { |
330 | { | 330 | { |
331 | 0x00000063, 0x0000007c, 0x00000077, 0x0000007b, | 331 | 0x00000063, 0x0000007c, 0x00000077, 0x0000007b, |
332 | 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5, | 332 | 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5, |
@@ -590,7 +590,7 @@ const u32 crypto_fl_tab[4][256] = { | |||
590 | } | 590 | } |
591 | }; | 591 | }; |
592 | 592 | ||
593 | const u32 crypto_it_tab[4][256] = { | 593 | __visible const u32 crypto_it_tab[4][256] = { |
594 | { | 594 | { |
595 | 0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a, | 595 | 0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a, |
596 | 0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b, | 596 | 0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b, |
@@ -854,7 +854,7 @@ const u32 crypto_it_tab[4][256] = { | |||
854 | } | 854 | } |
855 | }; | 855 | }; |
856 | 856 | ||
857 | const u32 crypto_il_tab[4][256] = { | 857 | __visible const u32 crypto_il_tab[4][256] = { |
858 | { | 858 | { |
859 | 0x00000052, 0x00000009, 0x0000006a, 0x000000d5, | 859 | 0x00000052, 0x00000009, 0x0000006a, 0x000000d5, |
860 | 0x00000030, 0x00000036, 0x000000a5, 0x00000038, | 860 | 0x00000030, 0x00000036, 0x000000a5, 0x00000038, |
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 75efa2052305..26bcd7a2d6b4 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c | |||
@@ -388,8 +388,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
388 | /* round 6 */ | 388 | /* round 6 */ |
389 | subL[7] ^= subL[1]; subR[7] ^= subR[1]; | 389 | subL[7] ^= subL[1]; subR[7] ^= subR[1]; |
390 | subL[1] ^= subR[1] & ~subR[9]; | 390 | subL[1] ^= subR[1] & ~subR[9]; |
391 | dw = subL[1] & subL[9], | 391 | dw = subL[1] & subL[9]; |
392 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ | 392 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ |
393 | /* round 8 */ | 393 | /* round 8 */ |
394 | subL[11] ^= subL[1]; subR[11] ^= subR[1]; | 394 | subL[11] ^= subL[1]; subR[11] ^= subR[1]; |
395 | /* round 10 */ | 395 | /* round 10 */ |
@@ -397,8 +397,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
397 | /* round 12 */ | 397 | /* round 12 */ |
398 | subL[15] ^= subL[1]; subR[15] ^= subR[1]; | 398 | subL[15] ^= subL[1]; subR[15] ^= subR[1]; |
399 | subL[1] ^= subR[1] & ~subR[17]; | 399 | subL[1] ^= subR[1] & ~subR[17]; |
400 | dw = subL[1] & subL[17], | 400 | dw = subL[1] & subL[17]; |
401 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ | 401 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ |
402 | /* round 14 */ | 402 | /* round 14 */ |
403 | subL[19] ^= subL[1]; subR[19] ^= subR[1]; | 403 | subL[19] ^= subL[1]; subR[19] ^= subR[1]; |
404 | /* round 16 */ | 404 | /* round 16 */ |
@@ -413,8 +413,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
413 | kw4l = subL[25]; kw4r = subR[25]; | 413 | kw4l = subL[25]; kw4r = subR[25]; |
414 | } else { | 414 | } else { |
415 | subL[1] ^= subR[1] & ~subR[25]; | 415 | subL[1] ^= subR[1] & ~subR[25]; |
416 | dw = subL[1] & subL[25], | 416 | dw = subL[1] & subL[25]; |
417 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ | 417 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ |
418 | /* round 20 */ | 418 | /* round 20 */ |
419 | subL[27] ^= subL[1]; subR[27] ^= subR[1]; | 419 | subL[27] ^= subL[1]; subR[27] ^= subR[1]; |
420 | /* round 22 */ | 420 | /* round 22 */ |
@@ -433,8 +433,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
433 | /* round 19 */ | 433 | /* round 19 */ |
434 | subL[26] ^= kw4l; subR[26] ^= kw4r; | 434 | subL[26] ^= kw4l; subR[26] ^= kw4r; |
435 | kw4l ^= kw4r & ~subR[24]; | 435 | kw4l ^= kw4r & ~subR[24]; |
436 | dw = kw4l & subL[24], | 436 | dw = kw4l & subL[24]; |
437 | kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ | 437 | kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ |
438 | } | 438 | } |
439 | /* round 17 */ | 439 | /* round 17 */ |
440 | subL[22] ^= kw4l; subR[22] ^= kw4r; | 440 | subL[22] ^= kw4l; subR[22] ^= kw4r; |
@@ -443,8 +443,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
443 | /* round 13 */ | 443 | /* round 13 */ |
444 | subL[18] ^= kw4l; subR[18] ^= kw4r; | 444 | subL[18] ^= kw4l; subR[18] ^= kw4r; |
445 | kw4l ^= kw4r & ~subR[16]; | 445 | kw4l ^= kw4r & ~subR[16]; |
446 | dw = kw4l & subL[16], | 446 | dw = kw4l & subL[16]; |
447 | kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ | 447 | kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ |
448 | /* round 11 */ | 448 | /* round 11 */ |
449 | subL[14] ^= kw4l; subR[14] ^= kw4r; | 449 | subL[14] ^= kw4l; subR[14] ^= kw4r; |
450 | /* round 9 */ | 450 | /* round 9 */ |
@@ -452,8 +452,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
452 | /* round 7 */ | 452 | /* round 7 */ |
453 | subL[10] ^= kw4l; subR[10] ^= kw4r; | 453 | subL[10] ^= kw4l; subR[10] ^= kw4r; |
454 | kw4l ^= kw4r & ~subR[8]; | 454 | kw4l ^= kw4r & ~subR[8]; |
455 | dw = kw4l & subL[8], | 455 | dw = kw4l & subL[8]; |
456 | kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ | 456 | kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ |
457 | /* round 5 */ | 457 | /* round 5 */ |
458 | subL[6] ^= kw4l; subR[6] ^= kw4r; | 458 | subL[6] ^= kw4l; subR[6] ^= kw4r; |
459 | /* round 3 */ | 459 | /* round 3 */ |
@@ -477,8 +477,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
477 | SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ | 477 | SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ |
478 | SUBKEY_R(6) = subR[5] ^ subR[7]; | 478 | SUBKEY_R(6) = subR[5] ^ subR[7]; |
479 | tl = subL[10] ^ (subR[10] & ~subR[8]); | 479 | tl = subL[10] ^ (subR[10] & ~subR[8]); |
480 | dw = tl & subL[8], /* FL(kl1) */ | 480 | dw = tl & subL[8]; /* FL(kl1) */ |
481 | tr = subR[10] ^ rol32(dw, 1); | 481 | tr = subR[10] ^ rol32(dw, 1); |
482 | SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ | 482 | SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ |
483 | SUBKEY_R(7) = subR[6] ^ tr; | 483 | SUBKEY_R(7) = subR[6] ^ tr; |
484 | SUBKEY_L(8) = subL[8]; /* FL(kl1) */ | 484 | SUBKEY_L(8) = subL[8]; /* FL(kl1) */ |
@@ -486,8 +486,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
486 | SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ | 486 | SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ |
487 | SUBKEY_R(9) = subR[9]; | 487 | SUBKEY_R(9) = subR[9]; |
488 | tl = subL[7] ^ (subR[7] & ~subR[9]); | 488 | tl = subL[7] ^ (subR[7] & ~subR[9]); |
489 | dw = tl & subL[9], /* FLinv(kl2) */ | 489 | dw = tl & subL[9]; /* FLinv(kl2) */ |
490 | tr = subR[7] ^ rol32(dw, 1); | 490 | tr = subR[7] ^ rol32(dw, 1); |
491 | SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ | 491 | SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ |
492 | SUBKEY_R(10) = tr ^ subR[11]; | 492 | SUBKEY_R(10) = tr ^ subR[11]; |
493 | SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ | 493 | SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ |
@@ -499,8 +499,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
499 | SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ | 499 | SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ |
500 | SUBKEY_R(14) = subR[13] ^ subR[15]; | 500 | SUBKEY_R(14) = subR[13] ^ subR[15]; |
501 | tl = subL[18] ^ (subR[18] & ~subR[16]); | 501 | tl = subL[18] ^ (subR[18] & ~subR[16]); |
502 | dw = tl & subL[16], /* FL(kl3) */ | 502 | dw = tl & subL[16]; /* FL(kl3) */ |
503 | tr = subR[18] ^ rol32(dw, 1); | 503 | tr = subR[18] ^ rol32(dw, 1); |
504 | SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ | 504 | SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ |
505 | SUBKEY_R(15) = subR[14] ^ tr; | 505 | SUBKEY_R(15) = subR[14] ^ tr; |
506 | SUBKEY_L(16) = subL[16]; /* FL(kl3) */ | 506 | SUBKEY_L(16) = subL[16]; /* FL(kl3) */ |
@@ -508,8 +508,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
508 | SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ | 508 | SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ |
509 | SUBKEY_R(17) = subR[17]; | 509 | SUBKEY_R(17) = subR[17]; |
510 | tl = subL[15] ^ (subR[15] & ~subR[17]); | 510 | tl = subL[15] ^ (subR[15] & ~subR[17]); |
511 | dw = tl & subL[17], /* FLinv(kl4) */ | 511 | dw = tl & subL[17]; /* FLinv(kl4) */ |
512 | tr = subR[15] ^ rol32(dw, 1); | 512 | tr = subR[15] ^ rol32(dw, 1); |
513 | SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ | 513 | SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ |
514 | SUBKEY_R(18) = tr ^ subR[19]; | 514 | SUBKEY_R(18) = tr ^ subR[19]; |
515 | SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ | 515 | SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ |
@@ -527,8 +527,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
527 | SUBKEY_R(24) = subR[24] ^ subR[23]; | 527 | SUBKEY_R(24) = subR[24] ^ subR[23]; |
528 | } else { | 528 | } else { |
529 | tl = subL[26] ^ (subR[26] & ~subR[24]); | 529 | tl = subL[26] ^ (subR[26] & ~subR[24]); |
530 | dw = tl & subL[24], /* FL(kl5) */ | 530 | dw = tl & subL[24]; /* FL(kl5) */ |
531 | tr = subR[26] ^ rol32(dw, 1); | 531 | tr = subR[26] ^ rol32(dw, 1); |
532 | SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ | 532 | SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ |
533 | SUBKEY_R(23) = subR[22] ^ tr; | 533 | SUBKEY_R(23) = subR[22] ^ tr; |
534 | SUBKEY_L(24) = subL[24]; /* FL(kl5) */ | 534 | SUBKEY_L(24) = subL[24]; /* FL(kl5) */ |
@@ -536,8 +536,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
536 | SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ | 536 | SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ |
537 | SUBKEY_R(25) = subR[25]; | 537 | SUBKEY_R(25) = subR[25]; |
538 | tl = subL[23] ^ (subR[23] & ~subR[25]); | 538 | tl = subL[23] ^ (subR[23] & ~subR[25]); |
539 | dw = tl & subL[25], /* FLinv(kl6) */ | 539 | dw = tl & subL[25]; /* FLinv(kl6) */ |
540 | tr = subR[23] ^ rol32(dw, 1); | 540 | tr = subR[23] ^ rol32(dw, 1); |
541 | SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ | 541 | SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ |
542 | SUBKEY_R(26) = tr ^ subR[27]; | 542 | SUBKEY_R(26) = tr ^ subR[27]; |
543 | SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ | 543 | SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ |
diff --git a/crypto/cast_common.c b/crypto/cast_common.c index a15f523d5f56..117dd8250f27 100644 --- a/crypto/cast_common.c +++ b/crypto/cast_common.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <crypto/cast_common.h> | 16 | #include <crypto/cast_common.h> |
17 | 17 | ||
18 | const u32 cast_s1[256] = { | 18 | __visible const u32 cast_s1[256] = { |
19 | 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, | 19 | 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, |
20 | 0x9c004dd3, 0x6003e540, 0xcf9fc949, | 20 | 0x9c004dd3, 0x6003e540, 0xcf9fc949, |
21 | 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, | 21 | 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, |
@@ -83,7 +83,7 @@ const u32 cast_s1[256] = { | |||
83 | }; | 83 | }; |
84 | EXPORT_SYMBOL_GPL(cast_s1); | 84 | EXPORT_SYMBOL_GPL(cast_s1); |
85 | 85 | ||
86 | const u32 cast_s2[256] = { | 86 | __visible const u32 cast_s2[256] = { |
87 | 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, | 87 | 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, |
88 | 0xeec5207a, 0x55889c94, 0x72fc0651, | 88 | 0xeec5207a, 0x55889c94, 0x72fc0651, |
89 | 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, | 89 | 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, |
@@ -151,7 +151,7 @@ const u32 cast_s2[256] = { | |||
151 | }; | 151 | }; |
152 | EXPORT_SYMBOL_GPL(cast_s2); | 152 | EXPORT_SYMBOL_GPL(cast_s2); |
153 | 153 | ||
154 | const u32 cast_s3[256] = { | 154 | __visible const u32 cast_s3[256] = { |
155 | 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, | 155 | 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, |
156 | 0x369fe44b, 0x8c1fc644, 0xaececa90, | 156 | 0x369fe44b, 0x8c1fc644, 0xaececa90, |
157 | 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, | 157 | 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, |
@@ -219,7 +219,7 @@ const u32 cast_s3[256] = { | |||
219 | }; | 219 | }; |
220 | EXPORT_SYMBOL_GPL(cast_s3); | 220 | EXPORT_SYMBOL_GPL(cast_s3); |
221 | 221 | ||
222 | const u32 cast_s4[256] = { | 222 | __visible const u32 cast_s4[256] = { |
223 | 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, | 223 | 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, |
224 | 0x64ad8c57, 0x85510443, 0xfa020ed1, | 224 | 0x64ad8c57, 0x85510443, 0xfa020ed1, |
225 | 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, | 225 | 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, |
diff --git a/crypto/crct10dif.c b/crypto/crct10dif.c new file mode 100644 index 000000000000..92aca96d6b98 --- /dev/null +++ b/crypto/crct10dif.c | |||
@@ -0,0 +1,178 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * T10 Data Integrity Field CRC16 Crypto Transform | ||
5 | * | ||
6 | * Copyright (c) 2007 Oracle Corporation. All rights reserved. | ||
7 | * Written by Martin K. Petersen <martin.petersen@oracle.com> | ||
8 | * Copyright (C) 2013 Intel Corporation | ||
9 | * Author: Tim Chen <tim.c.chen@linux.intel.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the Free | ||
13 | * Software Foundation; either version 2 of the License, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
19 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
20 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
21 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
22 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
23 | * SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/crc-t10dif.h> | ||
30 | #include <crypto/internal/hash.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/kernel.h> | ||
34 | |||
35 | struct chksum_desc_ctx { | ||
36 | __u16 crc; | ||
37 | }; | ||
38 | |||
39 | /* Table generated using the following polynomium: | ||
40 | * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 | ||
41 | * gt: 0x8bb7 | ||
42 | */ | ||
43 | static const __u16 t10_dif_crc_table[256] = { | ||
44 | 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, | ||
45 | 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, | ||
46 | 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, | ||
47 | 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, | ||
48 | 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, | ||
49 | 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, | ||
50 | 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, | ||
51 | 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, | ||
52 | 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, | ||
53 | 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, | ||
54 | 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, | ||
55 | 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, | ||
56 | 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, | ||
57 | 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, | ||
58 | 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, | ||
59 | 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, | ||
60 | 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, | ||
61 | 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, | ||
62 | 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, | ||
63 | 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, | ||
64 | 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, | ||
65 | 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, | ||
66 | 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, | ||
67 | 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, | ||
68 | 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, | ||
69 | 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, | ||
70 | 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, | ||
71 | 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, | ||
72 | 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, | ||
73 | 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, | ||
74 | 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, | ||
75 | 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 | ||
76 | }; | ||
77 | |||
78 | __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len) | ||
79 | { | ||
80 | unsigned int i; | ||
81 | |||
82 | for (i = 0 ; i < len ; i++) | ||
83 | crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; | ||
84 | |||
85 | return crc; | ||
86 | } | ||
87 | EXPORT_SYMBOL(crc_t10dif_generic); | ||
88 | |||
89 | /* | ||
90 | * Steps through buffer one byte at at time, calculates reflected | ||
91 | * crc using table. | ||
92 | */ | ||
93 | |||
94 | static int chksum_init(struct shash_desc *desc) | ||
95 | { | ||
96 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
97 | |||
98 | ctx->crc = 0; | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static int chksum_update(struct shash_desc *desc, const u8 *data, | ||
104 | unsigned int length) | ||
105 | { | ||
106 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
107 | |||
108 | ctx->crc = crc_t10dif_generic(ctx->crc, data, length); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int chksum_final(struct shash_desc *desc, u8 *out) | ||
113 | { | ||
114 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
115 | |||
116 | *(__u16 *)out = ctx->crc; | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, | ||
121 | u8 *out) | ||
122 | { | ||
123 | *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static int chksum_finup(struct shash_desc *desc, const u8 *data, | ||
128 | unsigned int len, u8 *out) | ||
129 | { | ||
130 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
131 | |||
132 | return __chksum_finup(&ctx->crc, data, len, out); | ||
133 | } | ||
134 | |||
135 | static int chksum_digest(struct shash_desc *desc, const u8 *data, | ||
136 | unsigned int length, u8 *out) | ||
137 | { | ||
138 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
139 | |||
140 | return __chksum_finup(&ctx->crc, data, length, out); | ||
141 | } | ||
142 | |||
143 | static struct shash_alg alg = { | ||
144 | .digestsize = CRC_T10DIF_DIGEST_SIZE, | ||
145 | .init = chksum_init, | ||
146 | .update = chksum_update, | ||
147 | .final = chksum_final, | ||
148 | .finup = chksum_finup, | ||
149 | .digest = chksum_digest, | ||
150 | .descsize = sizeof(struct chksum_desc_ctx), | ||
151 | .base = { | ||
152 | .cra_name = "crct10dif", | ||
153 | .cra_driver_name = "crct10dif-generic", | ||
154 | .cra_priority = 100, | ||
155 | .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, | ||
156 | .cra_module = THIS_MODULE, | ||
157 | } | ||
158 | }; | ||
159 | |||
160 | static int __init crct10dif_mod_init(void) | ||
161 | { | ||
162 | int ret; | ||
163 | |||
164 | ret = crypto_register_shash(&alg); | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | static void __exit crct10dif_mod_fini(void) | ||
169 | { | ||
170 | crypto_unregister_shash(&alg); | ||
171 | } | ||
172 | |||
173 | module_init(crct10dif_mod_init); | ||
174 | module_exit(crct10dif_mod_fini); | ||
175 | |||
176 | MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); | ||
177 | MODULE_DESCRIPTION("T10 DIF CRC calculation."); | ||
178 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 3b2cf569c684..021d7fec6bc8 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c | |||
@@ -110,7 +110,7 @@ static const __be32 sbox0[256] = { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | #undef Z | 112 | #undef Z |
113 | #define Z(x) cpu_to_be32((x << 27) | (x >> 5)) | 113 | #define Z(x) cpu_to_be32(((x & 0x1f) << 27) | (x >> 5)) |
114 | static const __be32 sbox1[256] = { | 114 | static const __be32 sbox1[256] = { |
115 | Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e), | 115 | Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e), |
116 | Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85), | 116 | Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85), |
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 7281b8a93ad3..79ca2278c2a3 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c | |||
@@ -124,3 +124,25 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, | |||
124 | scatterwalk_done(&walk, out, 0); | 124 | scatterwalk_done(&walk, out, 0); |
125 | } | 125 | } |
126 | EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); | 126 | EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); |
127 | |||
128 | int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes) | ||
129 | { | ||
130 | int offset = 0, n = 0; | ||
131 | |||
132 | /* num_bytes is too small */ | ||
133 | if (num_bytes < sg->length) | ||
134 | return -1; | ||
135 | |||
136 | do { | ||
137 | offset += sg->length; | ||
138 | n++; | ||
139 | sg = scatterwalk_sg_next(sg); | ||
140 | |||
141 | /* num_bytes is too large */ | ||
142 | if (unlikely(!sg && (num_bytes < offset))) | ||
143 | return -1; | ||
144 | } while (sg && (num_bytes > offset)); | ||
145 | |||
146 | return n; | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen); | ||
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 66d254ce0d11..25a5934f0e50 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -1174,6 +1174,10 @@ static int do_test(int m) | |||
1174 | ret += tcrypt_test("ghash"); | 1174 | ret += tcrypt_test("ghash"); |
1175 | break; | 1175 | break; |
1176 | 1176 | ||
1177 | case 47: | ||
1178 | ret += tcrypt_test("crct10dif"); | ||
1179 | break; | ||
1180 | |||
1177 | case 100: | 1181 | case 100: |
1178 | ret += tcrypt_test("hmac(md5)"); | 1182 | ret += tcrypt_test("hmac(md5)"); |
1179 | break; | 1183 | break; |
@@ -1498,6 +1502,10 @@ static int do_test(int m) | |||
1498 | test_hash_speed("crc32c", sec, generic_hash_speed_template); | 1502 | test_hash_speed("crc32c", sec, generic_hash_speed_template); |
1499 | if (mode > 300 && mode < 400) break; | 1503 | if (mode > 300 && mode < 400) break; |
1500 | 1504 | ||
1505 | case 320: | ||
1506 | test_hash_speed("crct10dif", sec, generic_hash_speed_template); | ||
1507 | if (mode > 300 && mode < 400) break; | ||
1508 | |||
1501 | case 399: | 1509 | case 399: |
1502 | break; | 1510 | break; |
1503 | 1511 | ||
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index ecddf921a9db..e091ef6e1791 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -2046,6 +2046,16 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2046 | } | 2046 | } |
2047 | } | 2047 | } |
2048 | }, { | 2048 | }, { |
2049 | .alg = "crct10dif", | ||
2050 | .test = alg_test_hash, | ||
2051 | .fips_allowed = 1, | ||
2052 | .suite = { | ||
2053 | .hash = { | ||
2054 | .vecs = crct10dif_tv_template, | ||
2055 | .count = CRCT10DIF_TEST_VECTORS | ||
2056 | } | ||
2057 | } | ||
2058 | }, { | ||
2049 | .alg = "cryptd(__driver-cbc-aes-aesni)", | 2059 | .alg = "cryptd(__driver-cbc-aes-aesni)", |
2050 | .test = alg_test_null, | 2060 | .test = alg_test_null, |
2051 | .fips_allowed = 1, | 2061 | .fips_allowed = 1, |
@@ -3224,7 +3234,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | |||
3224 | if (i >= 0) | 3234 | if (i >= 0) |
3225 | rc |= alg_test_descs[i].test(alg_test_descs + i, driver, | 3235 | rc |= alg_test_descs[i].test(alg_test_descs + i, driver, |
3226 | type, mask); | 3236 | type, mask); |
3227 | if (j >= 0) | 3237 | if (j >= 0 && j != i) |
3228 | rc |= alg_test_descs[j].test(alg_test_descs + j, driver, | 3238 | rc |= alg_test_descs[j].test(alg_test_descs + j, driver, |
3229 | type, mask); | 3239 | type, mask); |
3230 | 3240 | ||
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 1e701bc075b9..7d44aa3d6b44 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -450,6 +450,39 @@ static struct hash_testvec rmd320_tv_template[] = { | |||
450 | } | 450 | } |
451 | }; | 451 | }; |
452 | 452 | ||
453 | #define CRCT10DIF_TEST_VECTORS 3 | ||
454 | static struct hash_testvec crct10dif_tv_template[] = { | ||
455 | { | ||
456 | .plaintext = "abc", | ||
457 | .psize = 3, | ||
458 | #ifdef __LITTLE_ENDIAN | ||
459 | .digest = "\x3b\x44", | ||
460 | #else | ||
461 | .digest = "\x44\x3b", | ||
462 | #endif | ||
463 | }, { | ||
464 | .plaintext = "1234567890123456789012345678901234567890" | ||
465 | "123456789012345678901234567890123456789", | ||
466 | .psize = 79, | ||
467 | #ifdef __LITTLE_ENDIAN | ||
468 | .digest = "\x70\x4b", | ||
469 | #else | ||
470 | .digest = "\x4b\x70", | ||
471 | #endif | ||
472 | }, { | ||
473 | .plaintext = | ||
474 | "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd", | ||
475 | .psize = 56, | ||
476 | #ifdef __LITTLE_ENDIAN | ||
477 | .digest = "\xe3\x9c", | ||
478 | #else | ||
479 | .digest = "\x9c\xe3", | ||
480 | #endif | ||
481 | .np = 2, | ||
482 | .tap = { 28, 28 } | ||
483 | } | ||
484 | }; | ||
485 | |||
453 | /* | 486 | /* |
454 | * SHA1 test vectors from from FIPS PUB 180-1 | 487 | * SHA1 test vectors from from FIPS PUB 180-1 |
455 | * Long vector from CAVS 5.0 | 488 | * Long vector from CAVS 5.0 |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 40a865449f35..0aa9d91daef5 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -153,12 +153,12 @@ config HW_RANDOM_IXP4XX | |||
153 | 153 | ||
154 | config HW_RANDOM_OMAP | 154 | config HW_RANDOM_OMAP |
155 | tristate "OMAP Random Number Generator support" | 155 | tristate "OMAP Random Number Generator support" |
156 | depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2) | 156 | depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2PLUS) |
157 | default HW_RANDOM | 157 | default HW_RANDOM |
158 | ---help--- | 158 | ---help--- |
159 | This driver provides kernel-side support for the Random Number | 159 | This driver provides kernel-side support for the Random Number |
160 | Generator hardware found on OMAP16xx and OMAP24xx multimedia | 160 | Generator hardware found on OMAP16xx, OMAP2/3/4/5 and AM33xx/AM43xx |
161 | processors. | 161 | multimedia processors. |
162 | 162 | ||
163 | To compile this driver as a module, choose M here: the | 163 | To compile this driver as a module, choose M here: the |
164 | module will be called omap-rng. | 164 | module will be called omap-rng. |
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 19a12ac64a9e..6a86b6f56af2 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c | |||
@@ -164,7 +164,9 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) | |||
164 | goto out; | 164 | goto out; |
165 | } | 165 | } |
166 | 166 | ||
167 | clk_prepare_enable(mxc_rng->clk); | 167 | err = clk_prepare_enable(mxc_rng->clk); |
168 | if (err) | ||
169 | goto out; | ||
168 | 170 | ||
169 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 171 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
170 | mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); | 172 | mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); |
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 6843ec87b98b..9b89ff4881de 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c | |||
@@ -24,57 +24,131 @@ | |||
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/of.h> | ||
28 | #include <linux/of_device.h> | ||
29 | #include <linux/of_address.h> | ||
30 | #include <linux/interrupt.h> | ||
27 | 31 | ||
28 | #include <asm/io.h> | 32 | #include <asm/io.h> |
29 | 33 | ||
30 | #define RNG_OUT_REG 0x00 /* Output register */ | 34 | #define RNG_REG_STATUS_RDY (1 << 0) |
31 | #define RNG_STAT_REG 0x04 /* Status register | 35 | |
32 | [0] = STAT_BUSY */ | 36 | #define RNG_REG_INTACK_RDY_MASK (1 << 0) |
33 | #define RNG_ALARM_REG 0x24 /* Alarm register | 37 | #define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK (1 << 1) |
34 | [7:0] = ALARM_COUNTER */ | 38 | #define RNG_SHUTDOWN_OFLO_MASK (1 << 1) |
35 | #define RNG_CONFIG_REG 0x28 /* Configuration register | 39 | |
36 | [11:6] = RESET_COUNT | 40 | #define RNG_CONTROL_STARTUP_CYCLES_SHIFT 16 |
37 | [5:3] = RING2_DELAY | 41 | #define RNG_CONTROL_STARTUP_CYCLES_MASK (0xffff << 16) |
38 | [2:0] = RING1_DELAY */ | 42 | #define RNG_CONTROL_ENABLE_TRNG_SHIFT 10 |
39 | #define RNG_REV_REG 0x3c /* Revision register | 43 | #define RNG_CONTROL_ENABLE_TRNG_MASK (1 << 10) |
40 | [7:0] = REV_NB */ | 44 | |
41 | #define RNG_MASK_REG 0x40 /* Mask and reset register | 45 | #define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT 16 |
42 | [2] = IT_EN | 46 | #define RNG_CONFIG_MAX_REFIL_CYCLES_MASK (0xffff << 16) |
43 | [1] = SOFTRESET | 47 | #define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT 0 |
44 | [0] = AUTOIDLE */ | 48 | #define RNG_CONFIG_MIN_REFIL_CYCLES_MASK (0xff << 0) |
45 | #define RNG_SYSSTATUS 0x44 /* System status | 49 | |
46 | [0] = RESETDONE */ | 50 | #define RNG_CONTROL_STARTUP_CYCLES 0xff |
51 | #define RNG_CONFIG_MIN_REFIL_CYCLES 0x21 | ||
52 | #define RNG_CONFIG_MAX_REFIL_CYCLES 0x22 | ||
53 | |||
54 | #define RNG_ALARMCNT_ALARM_TH_SHIFT 0x0 | ||
55 | #define RNG_ALARMCNT_ALARM_TH_MASK (0xff << 0) | ||
56 | #define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT 16 | ||
57 | #define RNG_ALARMCNT_SHUTDOWN_TH_MASK (0x1f << 16) | ||
58 | #define RNG_ALARM_THRESHOLD 0xff | ||
59 | #define RNG_SHUTDOWN_THRESHOLD 0x4 | ||
60 | |||
61 | #define RNG_REG_FROENABLE_MASK 0xffffff | ||
62 | #define RNG_REG_FRODETUNE_MASK 0xffffff | ||
63 | |||
64 | #define OMAP2_RNG_OUTPUT_SIZE 0x4 | ||
65 | #define OMAP4_RNG_OUTPUT_SIZE 0x8 | ||
66 | |||
67 | enum { | ||
68 | RNG_OUTPUT_L_REG = 0, | ||
69 | RNG_OUTPUT_H_REG, | ||
70 | RNG_STATUS_REG, | ||
71 | RNG_INTMASK_REG, | ||
72 | RNG_INTACK_REG, | ||
73 | RNG_CONTROL_REG, | ||
74 | RNG_CONFIG_REG, | ||
75 | RNG_ALARMCNT_REG, | ||
76 | RNG_FROENABLE_REG, | ||
77 | RNG_FRODETUNE_REG, | ||
78 | RNG_ALARMMASK_REG, | ||
79 | RNG_ALARMSTOP_REG, | ||
80 | RNG_REV_REG, | ||
81 | RNG_SYSCONFIG_REG, | ||
82 | }; | ||
83 | |||
84 | static const u16 reg_map_omap2[] = { | ||
85 | [RNG_OUTPUT_L_REG] = 0x0, | ||
86 | [RNG_STATUS_REG] = 0x4, | ||
87 | [RNG_CONFIG_REG] = 0x28, | ||
88 | [RNG_REV_REG] = 0x3c, | ||
89 | [RNG_SYSCONFIG_REG] = 0x40, | ||
90 | }; | ||
47 | 91 | ||
92 | static const u16 reg_map_omap4[] = { | ||
93 | [RNG_OUTPUT_L_REG] = 0x0, | ||
94 | [RNG_OUTPUT_H_REG] = 0x4, | ||
95 | [RNG_STATUS_REG] = 0x8, | ||
96 | [RNG_INTMASK_REG] = 0xc, | ||
97 | [RNG_INTACK_REG] = 0x10, | ||
98 | [RNG_CONTROL_REG] = 0x14, | ||
99 | [RNG_CONFIG_REG] = 0x18, | ||
100 | [RNG_ALARMCNT_REG] = 0x1c, | ||
101 | [RNG_FROENABLE_REG] = 0x20, | ||
102 | [RNG_FRODETUNE_REG] = 0x24, | ||
103 | [RNG_ALARMMASK_REG] = 0x28, | ||
104 | [RNG_ALARMSTOP_REG] = 0x2c, | ||
105 | [RNG_REV_REG] = 0x1FE0, | ||
106 | [RNG_SYSCONFIG_REG] = 0x1FE4, | ||
107 | }; | ||
108 | |||
109 | struct omap_rng_dev; | ||
48 | /** | 110 | /** |
49 | * struct omap_rng_private_data - RNG IP block-specific data | 111 | * struct omap_rng_pdata - RNG IP block-specific data |
50 | * @base: virtual address of the beginning of the RNG IP block registers | 112 | * @regs: Pointer to the register offsets structure. |
51 | * @mem_res: struct resource * for the IP block registers physical memory | 113 | * @data_size: No. of bytes in RNG output. |
114 | * @data_present: Callback to determine if data is available. | ||
115 | * @init: Callback for IP specific initialization sequence. | ||
116 | * @cleanup: Callback for IP specific cleanup sequence. | ||
52 | */ | 117 | */ |
53 | struct omap_rng_private_data { | 118 | struct omap_rng_pdata { |
54 | void __iomem *base; | 119 | u16 *regs; |
55 | struct resource *mem_res; | 120 | u32 data_size; |
121 | u32 (*data_present)(struct omap_rng_dev *priv); | ||
122 | int (*init)(struct omap_rng_dev *priv); | ||
123 | void (*cleanup)(struct omap_rng_dev *priv); | ||
56 | }; | 124 | }; |
57 | 125 | ||
58 | static inline u32 omap_rng_read_reg(struct omap_rng_private_data *priv, int reg) | 126 | struct omap_rng_dev { |
127 | void __iomem *base; | ||
128 | struct device *dev; | ||
129 | const struct omap_rng_pdata *pdata; | ||
130 | }; | ||
131 | |||
132 | static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg) | ||
59 | { | 133 | { |
60 | return __raw_readl(priv->base + reg); | 134 | return __raw_readl(priv->base + priv->pdata->regs[reg]); |
61 | } | 135 | } |
62 | 136 | ||
63 | static inline void omap_rng_write_reg(struct omap_rng_private_data *priv, | 137 | static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg, |
64 | int reg, u32 val) | 138 | u32 val) |
65 | { | 139 | { |
66 | __raw_writel(val, priv->base + reg); | 140 | __raw_writel(val, priv->base + priv->pdata->regs[reg]); |
67 | } | 141 | } |
68 | 142 | ||
69 | static int omap_rng_data_present(struct hwrng *rng, int wait) | 143 | static int omap_rng_data_present(struct hwrng *rng, int wait) |
70 | { | 144 | { |
71 | struct omap_rng_private_data *priv; | 145 | struct omap_rng_dev *priv; |
72 | int data, i; | 146 | int data, i; |
73 | 147 | ||
74 | priv = (struct omap_rng_private_data *)rng->priv; | 148 | priv = (struct omap_rng_dev *)rng->priv; |
75 | 149 | ||
76 | for (i = 0; i < 20; i++) { | 150 | for (i = 0; i < 20; i++) { |
77 | data = omap_rng_read_reg(priv, RNG_STAT_REG) ? 0 : 1; | 151 | data = priv->pdata->data_present(priv); |
78 | if (data || !wait) | 152 | if (data || !wait) |
79 | break; | 153 | break; |
80 | /* RNG produces data fast enough (2+ MBit/sec, even | 154 | /* RNG produces data fast enough (2+ MBit/sec, even |
@@ -89,27 +163,212 @@ static int omap_rng_data_present(struct hwrng *rng, int wait) | |||
89 | 163 | ||
90 | static int omap_rng_data_read(struct hwrng *rng, u32 *data) | 164 | static int omap_rng_data_read(struct hwrng *rng, u32 *data) |
91 | { | 165 | { |
92 | struct omap_rng_private_data *priv; | 166 | struct omap_rng_dev *priv; |
167 | u32 data_size, i; | ||
168 | |||
169 | priv = (struct omap_rng_dev *)rng->priv; | ||
170 | data_size = priv->pdata->data_size; | ||
171 | |||
172 | for (i = 0; i < data_size / sizeof(u32); i++) | ||
173 | data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i); | ||
174 | |||
175 | if (priv->pdata->regs[RNG_INTACK_REG]) | ||
176 | omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK); | ||
177 | return data_size; | ||
178 | } | ||
179 | |||
180 | static int omap_rng_init(struct hwrng *rng) | ||
181 | { | ||
182 | struct omap_rng_dev *priv; | ||
93 | 183 | ||
94 | priv = (struct omap_rng_private_data *)rng->priv; | 184 | priv = (struct omap_rng_dev *)rng->priv; |
185 | return priv->pdata->init(priv); | ||
186 | } | ||
95 | 187 | ||
96 | *data = omap_rng_read_reg(priv, RNG_OUT_REG); | 188 | static void omap_rng_cleanup(struct hwrng *rng) |
189 | { | ||
190 | struct omap_rng_dev *priv; | ||
97 | 191 | ||
98 | return sizeof(u32); | 192 | priv = (struct omap_rng_dev *)rng->priv; |
193 | priv->pdata->cleanup(priv); | ||
99 | } | 194 | } |
100 | 195 | ||
101 | static struct hwrng omap_rng_ops = { | 196 | static struct hwrng omap_rng_ops = { |
102 | .name = "omap", | 197 | .name = "omap", |
103 | .data_present = omap_rng_data_present, | 198 | .data_present = omap_rng_data_present, |
104 | .data_read = omap_rng_data_read, | 199 | .data_read = omap_rng_data_read, |
200 | .init = omap_rng_init, | ||
201 | .cleanup = omap_rng_cleanup, | ||
202 | }; | ||
203 | |||
204 | static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv) | ||
205 | { | ||
206 | return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1; | ||
207 | } | ||
208 | |||
209 | static int omap2_rng_init(struct omap_rng_dev *priv) | ||
210 | { | ||
211 | omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1); | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static void omap2_rng_cleanup(struct omap_rng_dev *priv) | ||
216 | { | ||
217 | omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0); | ||
218 | } | ||
219 | |||
220 | static struct omap_rng_pdata omap2_rng_pdata = { | ||
221 | .regs = (u16 *)reg_map_omap2, | ||
222 | .data_size = OMAP2_RNG_OUTPUT_SIZE, | ||
223 | .data_present = omap2_rng_data_present, | ||
224 | .init = omap2_rng_init, | ||
225 | .cleanup = omap2_rng_cleanup, | ||
105 | }; | 226 | }; |
106 | 227 | ||
228 | #if defined(CONFIG_OF) | ||
229 | static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) | ||
230 | { | ||
231 | return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; | ||
232 | } | ||
233 | |||
234 | static int omap4_rng_init(struct omap_rng_dev *priv) | ||
235 | { | ||
236 | u32 val; | ||
237 | |||
238 | /* Return if RNG is already running. */ | ||
239 | if (omap_rng_read(priv, RNG_CONFIG_REG) & RNG_CONTROL_ENABLE_TRNG_MASK) | ||
240 | return 0; | ||
241 | |||
242 | val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT; | ||
243 | val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT; | ||
244 | omap_rng_write(priv, RNG_CONFIG_REG, val); | ||
245 | |||
246 | omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0); | ||
247 | omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK); | ||
248 | val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT; | ||
249 | val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT; | ||
250 | omap_rng_write(priv, RNG_ALARMCNT_REG, val); | ||
251 | |||
252 | val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT; | ||
253 | val |= RNG_CONTROL_ENABLE_TRNG_MASK; | ||
254 | omap_rng_write(priv, RNG_CONTROL_REG, val); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static void omap4_rng_cleanup(struct omap_rng_dev *priv) | ||
260 | { | ||
261 | int val; | ||
262 | |||
263 | val = omap_rng_read(priv, RNG_CONTROL_REG); | ||
264 | val &= ~RNG_CONTROL_ENABLE_TRNG_MASK; | ||
265 | omap_rng_write(priv, RNG_CONFIG_REG, val); | ||
266 | } | ||
267 | |||
268 | static irqreturn_t omap4_rng_irq(int irq, void *dev_id) | ||
269 | { | ||
270 | struct omap_rng_dev *priv = dev_id; | ||
271 | u32 fro_detune, fro_enable; | ||
272 | |||
273 | /* | ||
274 | * Interrupt raised by a fro shutdown threshold, do the following: | ||
275 | * 1. Clear the alarm events. | ||
276 | * 2. De tune the FROs which are shutdown. | ||
277 | * 3. Re enable the shutdown FROs. | ||
278 | */ | ||
279 | omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0); | ||
280 | omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0); | ||
281 | |||
282 | fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG); | ||
283 | fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK; | ||
284 | fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG); | ||
285 | fro_enable = RNG_REG_FROENABLE_MASK; | ||
286 | |||
287 | omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune); | ||
288 | omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable); | ||
289 | |||
290 | omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK); | ||
291 | |||
292 | return IRQ_HANDLED; | ||
293 | } | ||
294 | |||
295 | static struct omap_rng_pdata omap4_rng_pdata = { | ||
296 | .regs = (u16 *)reg_map_omap4, | ||
297 | .data_size = OMAP4_RNG_OUTPUT_SIZE, | ||
298 | .data_present = omap4_rng_data_present, | ||
299 | .init = omap4_rng_init, | ||
300 | .cleanup = omap4_rng_cleanup, | ||
301 | }; | ||
302 | |||
303 | static const struct of_device_id omap_rng_of_match[] = { | ||
304 | { | ||
305 | .compatible = "ti,omap2-rng", | ||
306 | .data = &omap2_rng_pdata, | ||
307 | }, | ||
308 | { | ||
309 | .compatible = "ti,omap4-rng", | ||
310 | .data = &omap4_rng_pdata, | ||
311 | }, | ||
312 | {}, | ||
313 | }; | ||
314 | MODULE_DEVICE_TABLE(of, omap_rng_of_match); | ||
315 | |||
316 | static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, | ||
317 | struct platform_device *pdev) | ||
318 | { | ||
319 | const struct of_device_id *match; | ||
320 | struct device *dev = &pdev->dev; | ||
321 | int irq, err; | ||
322 | |||
323 | match = of_match_device(of_match_ptr(omap_rng_of_match), dev); | ||
324 | if (!match) { | ||
325 | dev_err(dev, "no compatible OF match\n"); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | priv->pdata = match->data; | ||
329 | |||
330 | if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) { | ||
331 | irq = platform_get_irq(pdev, 0); | ||
332 | if (irq < 0) { | ||
333 | dev_err(dev, "%s: error getting IRQ resource - %d\n", | ||
334 | __func__, irq); | ||
335 | return irq; | ||
336 | } | ||
337 | |||
338 | err = devm_request_irq(dev, irq, omap4_rng_irq, | ||
339 | IRQF_TRIGGER_NONE, dev_name(dev), priv); | ||
340 | if (err) { | ||
341 | dev_err(dev, "unable to request irq %d, err = %d\n", | ||
342 | irq, err); | ||
343 | return err; | ||
344 | } | ||
345 | omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK); | ||
346 | } | ||
347 | return 0; | ||
348 | } | ||
349 | #else | ||
350 | static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng, | ||
351 | struct platform_device *pdev) | ||
352 | { | ||
353 | return -EINVAL; | ||
354 | } | ||
355 | #endif | ||
356 | |||
357 | static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng) | ||
358 | { | ||
359 | /* Only OMAP2/3 can be non-DT */ | ||
360 | omap_rng->pdata = &omap2_rng_pdata; | ||
361 | return 0; | ||
362 | } | ||
363 | |||
107 | static int omap_rng_probe(struct platform_device *pdev) | 364 | static int omap_rng_probe(struct platform_device *pdev) |
108 | { | 365 | { |
109 | struct omap_rng_private_data *priv; | 366 | struct omap_rng_dev *priv; |
367 | struct resource *res; | ||
368 | struct device *dev = &pdev->dev; | ||
110 | int ret; | 369 | int ret; |
111 | 370 | ||
112 | priv = kzalloc(sizeof(struct omap_rng_private_data), GFP_KERNEL); | 371 | priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL); |
113 | if (!priv) { | 372 | if (!priv) { |
114 | dev_err(&pdev->dev, "could not allocate memory\n"); | 373 | dev_err(&pdev->dev, "could not allocate memory\n"); |
115 | return -ENOMEM; | 374 | return -ENOMEM; |
@@ -117,26 +376,29 @@ static int omap_rng_probe(struct platform_device *pdev) | |||
117 | 376 | ||
118 | omap_rng_ops.priv = (unsigned long)priv; | 377 | omap_rng_ops.priv = (unsigned long)priv; |
119 | platform_set_drvdata(pdev, priv); | 378 | platform_set_drvdata(pdev, priv); |
379 | priv->dev = dev; | ||
120 | 380 | ||
121 | priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 381 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
122 | priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res); | 382 | priv->base = devm_ioremap_resource(dev, res); |
123 | if (IS_ERR(priv->base)) { | 383 | if (IS_ERR(priv->base)) { |
124 | ret = PTR_ERR(priv->base); | 384 | ret = PTR_ERR(priv->base); |
125 | goto err_ioremap; | 385 | goto err_ioremap; |
126 | } | 386 | } |
127 | platform_set_drvdata(pdev, priv); | ||
128 | 387 | ||
129 | pm_runtime_enable(&pdev->dev); | 388 | pm_runtime_enable(&pdev->dev); |
130 | pm_runtime_get_sync(&pdev->dev); | 389 | pm_runtime_get_sync(&pdev->dev); |
131 | 390 | ||
391 | ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : | ||
392 | get_omap_rng_device_details(priv); | ||
393 | if (ret) | ||
394 | goto err_ioremap; | ||
395 | |||
132 | ret = hwrng_register(&omap_rng_ops); | 396 | ret = hwrng_register(&omap_rng_ops); |
133 | if (ret) | 397 | if (ret) |
134 | goto err_register; | 398 | goto err_register; |
135 | 399 | ||
136 | dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", | 400 | dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", |
137 | omap_rng_read_reg(priv, RNG_REV_REG)); | 401 | omap_rng_read(priv, RNG_REV_REG)); |
138 | |||
139 | omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); | ||
140 | 402 | ||
141 | return 0; | 403 | return 0; |
142 | 404 | ||
@@ -144,26 +406,21 @@ err_register: | |||
144 | priv->base = NULL; | 406 | priv->base = NULL; |
145 | pm_runtime_disable(&pdev->dev); | 407 | pm_runtime_disable(&pdev->dev); |
146 | err_ioremap: | 408 | err_ioremap: |
147 | kfree(priv); | 409 | dev_err(dev, "initialization failed.\n"); |
148 | |||
149 | return ret; | 410 | return ret; |
150 | } | 411 | } |
151 | 412 | ||
152 | static int __exit omap_rng_remove(struct platform_device *pdev) | 413 | static int __exit omap_rng_remove(struct platform_device *pdev) |
153 | { | 414 | { |
154 | struct omap_rng_private_data *priv = platform_get_drvdata(pdev); | 415 | struct omap_rng_dev *priv = platform_get_drvdata(pdev); |
155 | 416 | ||
156 | hwrng_unregister(&omap_rng_ops); | 417 | hwrng_unregister(&omap_rng_ops); |
157 | 418 | ||
158 | omap_rng_write_reg(priv, RNG_MASK_REG, 0x0); | 419 | priv->pdata->cleanup(priv); |
159 | 420 | ||
160 | pm_runtime_put_sync(&pdev->dev); | 421 | pm_runtime_put_sync(&pdev->dev); |
161 | pm_runtime_disable(&pdev->dev); | 422 | pm_runtime_disable(&pdev->dev); |
162 | 423 | ||
163 | release_mem_region(priv->mem_res->start, resource_size(priv->mem_res)); | ||
164 | |||
165 | kfree(priv); | ||
166 | |||
167 | return 0; | 424 | return 0; |
168 | } | 425 | } |
169 | 426 | ||
@@ -171,9 +428,9 @@ static int __exit omap_rng_remove(struct platform_device *pdev) | |||
171 | 428 | ||
172 | static int omap_rng_suspend(struct device *dev) | 429 | static int omap_rng_suspend(struct device *dev) |
173 | { | 430 | { |
174 | struct omap_rng_private_data *priv = dev_get_drvdata(dev); | 431 | struct omap_rng_dev *priv = dev_get_drvdata(dev); |
175 | 432 | ||
176 | omap_rng_write_reg(priv, RNG_MASK_REG, 0x0); | 433 | priv->pdata->cleanup(priv); |
177 | pm_runtime_put_sync(dev); | 434 | pm_runtime_put_sync(dev); |
178 | 435 | ||
179 | return 0; | 436 | return 0; |
@@ -181,10 +438,10 @@ static int omap_rng_suspend(struct device *dev) | |||
181 | 438 | ||
182 | static int omap_rng_resume(struct device *dev) | 439 | static int omap_rng_resume(struct device *dev) |
183 | { | 440 | { |
184 | struct omap_rng_private_data *priv = dev_get_drvdata(dev); | 441 | struct omap_rng_dev *priv = dev_get_drvdata(dev); |
185 | 442 | ||
186 | pm_runtime_get_sync(dev); | 443 | pm_runtime_get_sync(dev); |
187 | omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); | 444 | priv->pdata->init(priv); |
188 | 445 | ||
189 | return 0; | 446 | return 0; |
190 | } | 447 | } |
@@ -198,31 +455,18 @@ static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume); | |||
198 | 455 | ||
199 | #endif | 456 | #endif |
200 | 457 | ||
201 | /* work with hotplug and coldplug */ | ||
202 | MODULE_ALIAS("platform:omap_rng"); | ||
203 | |||
204 | static struct platform_driver omap_rng_driver = { | 458 | static struct platform_driver omap_rng_driver = { |
205 | .driver = { | 459 | .driver = { |
206 | .name = "omap_rng", | 460 | .name = "omap_rng", |
207 | .owner = THIS_MODULE, | 461 | .owner = THIS_MODULE, |
208 | .pm = OMAP_RNG_PM, | 462 | .pm = OMAP_RNG_PM, |
463 | .of_match_table = of_match_ptr(omap_rng_of_match), | ||
209 | }, | 464 | }, |
210 | .probe = omap_rng_probe, | 465 | .probe = omap_rng_probe, |
211 | .remove = __exit_p(omap_rng_remove), | 466 | .remove = __exit_p(omap_rng_remove), |
212 | }; | 467 | }; |
213 | 468 | ||
214 | static int __init omap_rng_init(void) | 469 | module_platform_driver(omap_rng_driver); |
215 | { | 470 | MODULE_ALIAS("platform:omap_rng"); |
216 | return platform_driver_register(&omap_rng_driver); | ||
217 | } | ||
218 | |||
219 | static void __exit omap_rng_exit(void) | ||
220 | { | ||
221 | platform_driver_unregister(&omap_rng_driver); | ||
222 | } | ||
223 | |||
224 | module_init(omap_rng_init); | ||
225 | module_exit(omap_rng_exit); | ||
226 | |||
227 | MODULE_AUTHOR("Deepak Saxena (and others)"); | 471 | MODULE_AUTHOR("Deepak Saxena (and others)"); |
228 | MODULE_LICENSE("GPL"); | 472 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c index 973b95113edf..3d4c2293c6f5 100644 --- a/drivers/char/hw_random/picoxcell-rng.c +++ b/drivers/char/hw_random/picoxcell-rng.c | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | static void __iomem *rng_base; | 34 | static void __iomem *rng_base; |
35 | static struct clk *rng_clk; | 35 | static struct clk *rng_clk; |
36 | struct device *rng_dev; | 36 | static struct device *rng_dev; |
37 | 37 | ||
38 | static inline u32 picoxcell_trng_read_csr(void) | 38 | static inline u32 picoxcell_trng_read_csr(void) |
39 | { | 39 | { |
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c index 00593c847cf0..09c5fbea2b93 100644 --- a/drivers/char/hw_random/tx4939-rng.c +++ b/drivers/char/hw_random/tx4939-rng.c | |||
@@ -110,12 +110,10 @@ static int __init tx4939_rng_probe(struct platform_device *dev) | |||
110 | struct resource *r; | 110 | struct resource *r; |
111 | int i; | 111 | int i; |
112 | 112 | ||
113 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
114 | if (!r) | ||
115 | return -EBUSY; | ||
116 | rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL); | 113 | rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL); |
117 | if (!rngdev) | 114 | if (!rngdev) |
118 | return -ENOMEM; | 115 | return -ENOMEM; |
116 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
119 | rngdev->base = devm_ioremap_resource(&dev->dev, r); | 117 | rngdev->base = devm_ioremap_resource(&dev->dev, r); |
120 | if (IS_ERR(rngdev->base)) | 118 | if (IS_ERR(rngdev->base)) |
121 | return PTR_ERR(rngdev->base); | 119 | return PTR_ERR(rngdev->base); |
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index d0387a84eec1..e737772ad69a 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/hw_random.h> | 30 | #include <linux/hw_random.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <asm/cpu_device_id.h> | ||
32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
33 | #include <asm/msr.h> | 34 | #include <asm/msr.h> |
34 | #include <asm/cpufeature.h> | 35 | #include <asm/cpufeature.h> |
@@ -220,5 +221,11 @@ static void __exit mod_exit(void) | |||
220 | module_init(mod_init); | 221 | module_init(mod_init); |
221 | module_exit(mod_exit); | 222 | module_exit(mod_exit); |
222 | 223 | ||
224 | static struct x86_cpu_id via_rng_cpu_id[] = { | ||
225 | X86_FEATURE_MATCH(X86_FEATURE_XSTORE), | ||
226 | {} | ||
227 | }; | ||
228 | |||
223 | MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); | 229 | MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); |
224 | MODULE_LICENSE("GPL"); | 230 | MODULE_LICENSE("GPL"); |
231 | MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); | ||
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 8ff7c230d82e..f4fd837bcb82 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -242,17 +242,20 @@ config CRYPTO_DEV_PPC4XX | |||
242 | This option allows you to have support for AMCC crypto acceleration. | 242 | This option allows you to have support for AMCC crypto acceleration. |
243 | 243 | ||
244 | config CRYPTO_DEV_OMAP_SHAM | 244 | config CRYPTO_DEV_OMAP_SHAM |
245 | tristate "Support for OMAP SHA1/MD5 hw accelerator" | 245 | tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator" |
246 | depends on ARCH_OMAP2 || ARCH_OMAP3 | 246 | depends on ARCH_OMAP2PLUS |
247 | select CRYPTO_SHA1 | 247 | select CRYPTO_SHA1 |
248 | select CRYPTO_MD5 | 248 | select CRYPTO_MD5 |
249 | select CRYPTO_SHA256 | ||
250 | select CRYPTO_SHA512 | ||
251 | select CRYPTO_HMAC | ||
249 | help | 252 | help |
250 | OMAP processors have SHA1/MD5 hw accelerator. Select this if you | 253 | OMAP processors have MD5/SHA1/SHA2 hw accelerator. Select this if you |
251 | want to use the OMAP module for SHA1/MD5 algorithms. | 254 | want to use the OMAP module for MD5/SHA1/SHA2 algorithms. |
252 | 255 | ||
253 | config CRYPTO_DEV_OMAP_AES | 256 | config CRYPTO_DEV_OMAP_AES |
254 | tristate "Support for OMAP AES hw engine" | 257 | tristate "Support for OMAP AES hw engine" |
255 | depends on ARCH_OMAP2 || ARCH_OMAP3 | 258 | depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS |
256 | select CRYPTO_AES | 259 | select CRYPTO_AES |
257 | select CRYPTO_BLKCIPHER2 | 260 | select CRYPTO_BLKCIPHER2 |
258 | help | 261 | help |
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index a33243c17b00..4afca3968773 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c | |||
@@ -32,10 +32,10 @@ | |||
32 | #include "crypto4xx_sa.h" | 32 | #include "crypto4xx_sa.h" |
33 | #include "crypto4xx_core.h" | 33 | #include "crypto4xx_core.h" |
34 | 34 | ||
35 | void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, | 35 | static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, |
36 | u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc, | 36 | u32 save_iv, u32 ld_h, u32 ld_iv, |
37 | u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op, | 37 | u32 hdr_proc, u32 h, u32 c, u32 pad_type, |
38 | u32 dir) | 38 | u32 op_grp, u32 op, u32 dir) |
39 | { | 39 | { |
40 | sa->sa_command_0.w = 0; | 40 | sa->sa_command_0.w = 0; |
41 | sa->sa_command_0.bf.save_hash_state = save_h; | 41 | sa->sa_command_0.bf.save_hash_state = save_h; |
@@ -52,9 +52,10 @@ void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, | |||
52 | sa->sa_command_0.bf.dir = dir; | 52 | sa->sa_command_0.bf.dir = dir; |
53 | } | 53 | } |
54 | 54 | ||
55 | void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc, | 55 | static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, |
56 | u32 cfb, u32 esn, u32 sn_mask, u32 mute, | 56 | u32 hmac_mc, u32 cfb, u32 esn, |
57 | u32 cp_pad, u32 cp_pay, u32 cp_hdr) | 57 | u32 sn_mask, u32 mute, u32 cp_pad, |
58 | u32 cp_pay, u32 cp_hdr) | ||
58 | { | 59 | { |
59 | sa->sa_command_1.w = 0; | 60 | sa->sa_command_1.w = 0; |
60 | sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2; | 61 | sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2; |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index b44091c47f75..ca89f6b84b06 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
@@ -98,3 +98,11 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API | |||
98 | 98 | ||
99 | To compile this as a module, choose M here: the module | 99 | To compile this as a module, choose M here: the module |
100 | will be called caamrng. | 100 | will be called caamrng. |
101 | |||
102 | config CRYPTO_DEV_FSL_CAAM_DEBUG | ||
103 | bool "Enable debug output in CAAM driver" | ||
104 | depends on CRYPTO_DEV_FSL_CAAM | ||
105 | default n | ||
106 | help | ||
107 | Selecting this will enable printing of various debug | ||
108 | information in the CAAM driver. | ||
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index b1eb44838db5..d56bd0ec65d8 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
@@ -1,6 +1,9 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the CAAM backend and dependent components | 2 | # Makefile for the CAAM backend and dependent components |
3 | # | 3 | # |
4 | ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) | ||
5 | EXTRA_CFLAGS := -DDEBUG | ||
6 | endif | ||
4 | 7 | ||
5 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | 8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
6 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | 9 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index bf416a8391a7..7c63b72ecd75 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -65,8 +65,6 @@ | |||
65 | #define CAAM_MAX_IV_LENGTH 16 | 65 | #define CAAM_MAX_IV_LENGTH 16 |
66 | 66 | ||
67 | /* length of descriptors text */ | 67 | /* length of descriptors text */ |
68 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) | ||
69 | |||
70 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | 68 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
71 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) | 69 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) |
72 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) | 70 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) |
@@ -84,8 +82,6 @@ | |||
84 | 82 | ||
85 | #ifdef DEBUG | 83 | #ifdef DEBUG |
86 | /* for print_hex_dumps with line references */ | 84 | /* for print_hex_dumps with line references */ |
87 | #define xstr(s) str(s) | ||
88 | #define str(s) #s | ||
89 | #define debug(format, arg...) printk(format, arg) | 85 | #define debug(format, arg...) printk(format, arg) |
90 | #else | 86 | #else |
91 | #define debug(format, arg...) | 87 | #define debug(format, arg...) |
@@ -285,7 +281,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
285 | return -ENOMEM; | 281 | return -ENOMEM; |
286 | } | 282 | } |
287 | #ifdef DEBUG | 283 | #ifdef DEBUG |
288 | print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", | 284 | print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ", |
289 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 285 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
290 | desc_bytes(desc), 1); | 286 | desc_bytes(desc), 1); |
291 | #endif | 287 | #endif |
@@ -353,7 +349,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
353 | return -ENOMEM; | 349 | return -ENOMEM; |
354 | } | 350 | } |
355 | #ifdef DEBUG | 351 | #ifdef DEBUG |
356 | print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", | 352 | print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ", |
357 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 353 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
358 | desc_bytes(desc), 1); | 354 | desc_bytes(desc), 1); |
359 | #endif | 355 | #endif |
@@ -436,7 +432,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
436 | return -ENOMEM; | 432 | return -ENOMEM; |
437 | } | 433 | } |
438 | #ifdef DEBUG | 434 | #ifdef DEBUG |
439 | print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", | 435 | print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ", |
440 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 436 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
441 | desc_bytes(desc), 1); | 437 | desc_bytes(desc), 1); |
442 | #endif | 438 | #endif |
@@ -500,7 +496,7 @@ static int aead_setkey(struct crypto_aead *aead, | |||
500 | keylen, enckeylen, authkeylen); | 496 | keylen, enckeylen, authkeylen); |
501 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | 497 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", |
502 | ctx->split_key_len, ctx->split_key_pad_len); | 498 | ctx->split_key_len, ctx->split_key_pad_len); |
503 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | 499 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", |
504 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 500 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
505 | #endif | 501 | #endif |
506 | 502 | ||
@@ -519,7 +515,7 @@ static int aead_setkey(struct crypto_aead *aead, | |||
519 | return -ENOMEM; | 515 | return -ENOMEM; |
520 | } | 516 | } |
521 | #ifdef DEBUG | 517 | #ifdef DEBUG |
522 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | 518 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
523 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 519 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
524 | ctx->split_key_pad_len + enckeylen, 1); | 520 | ctx->split_key_pad_len + enckeylen, 1); |
525 | #endif | 521 | #endif |
@@ -549,7 +545,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
549 | u32 *desc; | 545 | u32 *desc; |
550 | 546 | ||
551 | #ifdef DEBUG | 547 | #ifdef DEBUG |
552 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | 548 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", |
553 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 549 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
554 | #endif | 550 | #endif |
555 | 551 | ||
@@ -598,7 +594,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
598 | return -ENOMEM; | 594 | return -ENOMEM; |
599 | } | 595 | } |
600 | #ifdef DEBUG | 596 | #ifdef DEBUG |
601 | print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", | 597 | print_hex_dump(KERN_ERR, |
598 | "ablkcipher enc shdesc@"__stringify(__LINE__)": ", | ||
602 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 599 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
603 | desc_bytes(desc), 1); | 600 | desc_bytes(desc), 1); |
604 | #endif | 601 | #endif |
@@ -643,7 +640,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
643 | } | 640 | } |
644 | 641 | ||
645 | #ifdef DEBUG | 642 | #ifdef DEBUG |
646 | print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", | 643 | print_hex_dump(KERN_ERR, |
644 | "ablkcipher dec shdesc@"__stringify(__LINE__)": ", | ||
647 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 645 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
648 | desc_bytes(desc), 1); | 646 | desc_bytes(desc), 1); |
649 | #endif | 647 | #endif |
@@ -780,13 +778,13 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
780 | aead_unmap(jrdev, edesc, req); | 778 | aead_unmap(jrdev, edesc, req); |
781 | 779 | ||
782 | #ifdef DEBUG | 780 | #ifdef DEBUG |
783 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | 781 | print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", |
784 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | 782 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
785 | req->assoclen , 1); | 783 | req->assoclen , 1); |
786 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | 784 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", |
787 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, | 785 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, |
788 | edesc->src_nents ? 100 : ivsize, 1); | 786 | edesc->src_nents ? 100 : ivsize, 1); |
789 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | 787 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
790 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 788 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
791 | edesc->src_nents ? 100 : req->cryptlen + | 789 | edesc->src_nents ? 100 : req->cryptlen + |
792 | ctx->authsize + 4, 1); | 790 | ctx->authsize + 4, 1); |
@@ -814,10 +812,10 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
814 | offsetof(struct aead_edesc, hw_desc)); | 812 | offsetof(struct aead_edesc, hw_desc)); |
815 | 813 | ||
816 | #ifdef DEBUG | 814 | #ifdef DEBUG |
817 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | 815 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", |
818 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 816 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
819 | ivsize, 1); | 817 | ivsize, 1); |
820 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | 818 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
821 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), | 819 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), |
822 | req->cryptlen, 1); | 820 | req->cryptlen, 1); |
823 | #endif | 821 | #endif |
@@ -837,7 +835,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
837 | err = -EBADMSG; | 835 | err = -EBADMSG; |
838 | 836 | ||
839 | #ifdef DEBUG | 837 | #ifdef DEBUG |
840 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", | 838 | print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ", |
841 | DUMP_PREFIX_ADDRESS, 16, 4, | 839 | DUMP_PREFIX_ADDRESS, 16, 4, |
842 | ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), | 840 | ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), |
843 | sizeof(struct iphdr) + req->assoclen + | 841 | sizeof(struct iphdr) + req->assoclen + |
@@ -845,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
845 | ctx->authsize + 36, 1); | 843 | ctx->authsize + 36, 1); |
846 | if (!err && edesc->sec4_sg_bytes) { | 844 | if (!err && edesc->sec4_sg_bytes) { |
847 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); | 845 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); |
848 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | 846 | print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ", |
849 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | 847 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), |
850 | sg->length + ctx->authsize + 16, 1); | 848 | sg->length + ctx->authsize + 16, 1); |
851 | } | 849 | } |
@@ -878,10 +876,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
878 | } | 876 | } |
879 | 877 | ||
880 | #ifdef DEBUG | 878 | #ifdef DEBUG |
881 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | 879 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", |
882 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 880 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
883 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 881 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
884 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | 882 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
885 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 883 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
886 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | 884 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); |
887 | #endif | 885 | #endif |
@@ -913,10 +911,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
913 | } | 911 | } |
914 | 912 | ||
915 | #ifdef DEBUG | 913 | #ifdef DEBUG |
916 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | 914 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", |
917 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 915 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
918 | ivsize, 1); | 916 | ivsize, 1); |
919 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | 917 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
920 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 918 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
921 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | 919 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); |
922 | #endif | 920 | #endif |
@@ -947,16 +945,16 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
947 | #ifdef DEBUG | 945 | #ifdef DEBUG |
948 | debug("assoclen %d cryptlen %d authsize %d\n", | 946 | debug("assoclen %d cryptlen %d authsize %d\n", |
949 | req->assoclen, req->cryptlen, authsize); | 947 | req->assoclen, req->cryptlen, authsize); |
950 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | 948 | print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", |
951 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | 949 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
952 | req->assoclen , 1); | 950 | req->assoclen , 1); |
953 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | 951 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", |
954 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 952 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
955 | edesc->src_nents ? 100 : ivsize, 1); | 953 | edesc->src_nents ? 100 : ivsize, 1); |
956 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | 954 | print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", |
957 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 955 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
958 | edesc->src_nents ? 100 : req->cryptlen, 1); | 956 | edesc->src_nents ? 100 : req->cryptlen, 1); |
959 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | 957 | print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", |
960 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | 958 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, |
961 | desc_bytes(sh_desc), 1); | 959 | desc_bytes(sh_desc), 1); |
962 | #endif | 960 | #endif |
@@ -1025,15 +1023,15 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1025 | #ifdef DEBUG | 1023 | #ifdef DEBUG |
1026 | debug("assoclen %d cryptlen %d authsize %d\n", | 1024 | debug("assoclen %d cryptlen %d authsize %d\n", |
1027 | req->assoclen, req->cryptlen, authsize); | 1025 | req->assoclen, req->cryptlen, authsize); |
1028 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | 1026 | print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", |
1029 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | 1027 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
1030 | req->assoclen , 1); | 1028 | req->assoclen , 1); |
1031 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | 1029 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", |
1032 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | 1030 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); |
1033 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | 1031 | print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", |
1034 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 1032 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
1035 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); | 1033 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); |
1036 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | 1034 | print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", |
1037 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | 1035 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, |
1038 | desc_bytes(sh_desc), 1); | 1036 | desc_bytes(sh_desc), 1); |
1039 | #endif | 1037 | #endif |
@@ -1086,10 +1084,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
1086 | int len, sec4_sg_index = 0; | 1084 | int len, sec4_sg_index = 0; |
1087 | 1085 | ||
1088 | #ifdef DEBUG | 1086 | #ifdef DEBUG |
1089 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | 1087 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", |
1090 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 1088 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
1091 | ivsize, 1); | 1089 | ivsize, 1); |
1092 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | 1090 | print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", |
1093 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 1091 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
1094 | edesc->src_nents ? 100 : req->nbytes, 1); | 1092 | edesc->src_nents ? 100 : req->nbytes, 1); |
1095 | #endif | 1093 | #endif |
@@ -1247,7 +1245,7 @@ static int aead_encrypt(struct aead_request *req) | |||
1247 | init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, | 1245 | init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, |
1248 | all_contig, true); | 1246 | all_contig, true); |
1249 | #ifdef DEBUG | 1247 | #ifdef DEBUG |
1250 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | 1248 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", |
1251 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1249 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1252 | desc_bytes(edesc->hw_desc), 1); | 1250 | desc_bytes(edesc->hw_desc), 1); |
1253 | #endif | 1251 | #endif |
@@ -1281,7 +1279,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1281 | return PTR_ERR(edesc); | 1279 | return PTR_ERR(edesc); |
1282 | 1280 | ||
1283 | #ifdef DEBUG | 1281 | #ifdef DEBUG |
1284 | print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", | 1282 | print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", |
1285 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 1283 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
1286 | req->cryptlen, 1); | 1284 | req->cryptlen, 1); |
1287 | #endif | 1285 | #endif |
@@ -1290,7 +1288,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1290 | init_aead_job(ctx->sh_desc_dec, | 1288 | init_aead_job(ctx->sh_desc_dec, |
1291 | ctx->sh_desc_dec_dma, edesc, req, all_contig, false); | 1289 | ctx->sh_desc_dec_dma, edesc, req, all_contig, false); |
1292 | #ifdef DEBUG | 1290 | #ifdef DEBUG |
1293 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | 1291 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", |
1294 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1292 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1295 | desc_bytes(edesc->hw_desc), 1); | 1293 | desc_bytes(edesc->hw_desc), 1); |
1296 | #endif | 1294 | #endif |
@@ -1437,7 +1435,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
1437 | return PTR_ERR(edesc); | 1435 | return PTR_ERR(edesc); |
1438 | 1436 | ||
1439 | #ifdef DEBUG | 1437 | #ifdef DEBUG |
1440 | print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", | 1438 | print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ", |
1441 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 1439 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
1442 | req->cryptlen, 1); | 1440 | req->cryptlen, 1); |
1443 | #endif | 1441 | #endif |
@@ -1446,7 +1444,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
1446 | init_aead_giv_job(ctx->sh_desc_givenc, | 1444 | init_aead_giv_job(ctx->sh_desc_givenc, |
1447 | ctx->sh_desc_givenc_dma, edesc, req, contig); | 1445 | ctx->sh_desc_givenc_dma, edesc, req, contig); |
1448 | #ifdef DEBUG | 1446 | #ifdef DEBUG |
1449 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | 1447 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", |
1450 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1448 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1451 | desc_bytes(edesc->hw_desc), 1); | 1449 | desc_bytes(edesc->hw_desc), 1); |
1452 | #endif | 1450 | #endif |
@@ -1546,7 +1544,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1546 | edesc->iv_dma = iv_dma; | 1544 | edesc->iv_dma = iv_dma; |
1547 | 1545 | ||
1548 | #ifdef DEBUG | 1546 | #ifdef DEBUG |
1549 | print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ", | 1547 | print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", |
1550 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, | 1548 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, |
1551 | sec4_sg_bytes, 1); | 1549 | sec4_sg_bytes, 1); |
1552 | #endif | 1550 | #endif |
@@ -1575,7 +1573,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req) | |||
1575 | init_ablkcipher_job(ctx->sh_desc_enc, | 1573 | init_ablkcipher_job(ctx->sh_desc_enc, |
1576 | ctx->sh_desc_enc_dma, edesc, req, iv_contig); | 1574 | ctx->sh_desc_enc_dma, edesc, req, iv_contig); |
1577 | #ifdef DEBUG | 1575 | #ifdef DEBUG |
1578 | print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", | 1576 | print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", |
1579 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1577 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1580 | desc_bytes(edesc->hw_desc), 1); | 1578 | desc_bytes(edesc->hw_desc), 1); |
1581 | #endif | 1579 | #endif |
@@ -1613,7 +1611,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) | |||
1613 | ctx->sh_desc_dec_dma, edesc, req, iv_contig); | 1611 | ctx->sh_desc_dec_dma, edesc, req, iv_contig); |
1614 | desc = edesc->hw_desc; | 1612 | desc = edesc->hw_desc; |
1615 | #ifdef DEBUG | 1613 | #ifdef DEBUG |
1616 | print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", | 1614 | print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", |
1617 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1615 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1618 | desc_bytes(edesc->hw_desc), 1); | 1616 | desc_bytes(edesc->hw_desc), 1); |
1619 | #endif | 1617 | #endif |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 84573b4d6f92..e732bd962e98 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -72,8 +72,6 @@ | |||
72 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE | 72 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE |
73 | 73 | ||
74 | /* length of descriptors text */ | 74 | /* length of descriptors text */ |
75 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) | ||
76 | |||
77 | #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) | 75 | #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) |
78 | #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) | 76 | #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) |
79 | #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) | 77 | #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) |
@@ -91,8 +89,6 @@ | |||
91 | 89 | ||
92 | #ifdef DEBUG | 90 | #ifdef DEBUG |
93 | /* for print_hex_dumps with line references */ | 91 | /* for print_hex_dumps with line references */ |
94 | #define xstr(s) str(s) | ||
95 | #define str(s) #s | ||
96 | #define debug(format, arg...) printk(format, arg) | 92 | #define debug(format, arg...) printk(format, arg) |
97 | #else | 93 | #else |
98 | #define debug(format, arg...) | 94 | #define debug(format, arg...) |
@@ -331,7 +327,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
331 | return -ENOMEM; | 327 | return -ENOMEM; |
332 | } | 328 | } |
333 | #ifdef DEBUG | 329 | #ifdef DEBUG |
334 | print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ", | 330 | print_hex_dump(KERN_ERR, |
331 | "ahash update shdesc@"__stringify(__LINE__)": ", | ||
335 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 332 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
336 | #endif | 333 | #endif |
337 | 334 | ||
@@ -349,7 +346,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
349 | return -ENOMEM; | 346 | return -ENOMEM; |
350 | } | 347 | } |
351 | #ifdef DEBUG | 348 | #ifdef DEBUG |
352 | print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ", | 349 | print_hex_dump(KERN_ERR, |
350 | "ahash update first shdesc@"__stringify(__LINE__)": ", | ||
353 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 351 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
354 | #endif | 352 | #endif |
355 | 353 | ||
@@ -366,7 +364,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
366 | return -ENOMEM; | 364 | return -ENOMEM; |
367 | } | 365 | } |
368 | #ifdef DEBUG | 366 | #ifdef DEBUG |
369 | print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ", | 367 | print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", |
370 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 368 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
371 | desc_bytes(desc), 1); | 369 | desc_bytes(desc), 1); |
372 | #endif | 370 | #endif |
@@ -384,7 +382,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
384 | return -ENOMEM; | 382 | return -ENOMEM; |
385 | } | 383 | } |
386 | #ifdef DEBUG | 384 | #ifdef DEBUG |
387 | print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ", | 385 | print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", |
388 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 386 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
389 | desc_bytes(desc), 1); | 387 | desc_bytes(desc), 1); |
390 | #endif | 388 | #endif |
@@ -403,7 +401,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
403 | return -ENOMEM; | 401 | return -ENOMEM; |
404 | } | 402 | } |
405 | #ifdef DEBUG | 403 | #ifdef DEBUG |
406 | print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ", | 404 | print_hex_dump(KERN_ERR, |
405 | "ahash digest shdesc@"__stringify(__LINE__)": ", | ||
407 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 406 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
408 | desc_bytes(desc), 1); | 407 | desc_bytes(desc), 1); |
409 | #endif | 408 | #endif |
@@ -464,9 +463,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
464 | LDST_SRCDST_BYTE_CONTEXT); | 463 | LDST_SRCDST_BYTE_CONTEXT); |
465 | 464 | ||
466 | #ifdef DEBUG | 465 | #ifdef DEBUG |
467 | print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ", | 466 | print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", |
468 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); | 467 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); |
469 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 468 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
470 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 469 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
471 | #endif | 470 | #endif |
472 | 471 | ||
@@ -479,7 +478,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
479 | wait_for_completion_interruptible(&result.completion); | 478 | wait_for_completion_interruptible(&result.completion); |
480 | ret = result.err; | 479 | ret = result.err; |
481 | #ifdef DEBUG | 480 | #ifdef DEBUG |
482 | print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ", | 481 | print_hex_dump(KERN_ERR, |
482 | "digested key@"__stringify(__LINE__)": ", | ||
483 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, | 483 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, |
484 | digestsize, 1); | 484 | digestsize, 1); |
485 | #endif | 485 | #endif |
@@ -530,7 +530,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
530 | #ifdef DEBUG | 530 | #ifdef DEBUG |
531 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | 531 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", |
532 | ctx->split_key_len, ctx->split_key_pad_len); | 532 | ctx->split_key_len, ctx->split_key_pad_len); |
533 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | 533 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", |
534 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 534 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
535 | #endif | 535 | #endif |
536 | 536 | ||
@@ -545,7 +545,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
545 | return -ENOMEM; | 545 | return -ENOMEM; |
546 | } | 546 | } |
547 | #ifdef DEBUG | 547 | #ifdef DEBUG |
548 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | 548 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
549 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 549 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
550 | ctx->split_key_pad_len, 1); | 550 | ctx->split_key_pad_len, 1); |
551 | #endif | 551 | #endif |
@@ -638,11 +638,11 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |||
638 | kfree(edesc); | 638 | kfree(edesc); |
639 | 639 | ||
640 | #ifdef DEBUG | 640 | #ifdef DEBUG |
641 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | 641 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", |
642 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 642 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
643 | ctx->ctx_len, 1); | 643 | ctx->ctx_len, 1); |
644 | if (req->result) | 644 | if (req->result) |
645 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | 645 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", |
646 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 646 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
647 | digestsize, 1); | 647 | digestsize, 1); |
648 | #endif | 648 | #endif |
@@ -676,11 +676,11 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |||
676 | kfree(edesc); | 676 | kfree(edesc); |
677 | 677 | ||
678 | #ifdef DEBUG | 678 | #ifdef DEBUG |
679 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | 679 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", |
680 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 680 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
681 | ctx->ctx_len, 1); | 681 | ctx->ctx_len, 1); |
682 | if (req->result) | 682 | if (req->result) |
683 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | 683 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", |
684 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 684 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
685 | digestsize, 1); | 685 | digestsize, 1); |
686 | #endif | 686 | #endif |
@@ -714,11 +714,11 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
714 | kfree(edesc); | 714 | kfree(edesc); |
715 | 715 | ||
716 | #ifdef DEBUG | 716 | #ifdef DEBUG |
717 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | 717 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", |
718 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 718 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
719 | ctx->ctx_len, 1); | 719 | ctx->ctx_len, 1); |
720 | if (req->result) | 720 | if (req->result) |
721 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | 721 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", |
722 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 722 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
723 | digestsize, 1); | 723 | digestsize, 1); |
724 | #endif | 724 | #endif |
@@ -752,11 +752,11 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
752 | kfree(edesc); | 752 | kfree(edesc); |
753 | 753 | ||
754 | #ifdef DEBUG | 754 | #ifdef DEBUG |
755 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | 755 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", |
756 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 756 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
757 | ctx->ctx_len, 1); | 757 | ctx->ctx_len, 1); |
758 | if (req->result) | 758 | if (req->result) |
759 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | 759 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", |
760 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 760 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
761 | digestsize, 1); | 761 | digestsize, 1); |
762 | #endif | 762 | #endif |
@@ -852,7 +852,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
852 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); | 852 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); |
853 | 853 | ||
854 | #ifdef DEBUG | 854 | #ifdef DEBUG |
855 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 855 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
856 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 856 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
857 | desc_bytes(desc), 1); | 857 | desc_bytes(desc), 1); |
858 | #endif | 858 | #endif |
@@ -871,9 +871,9 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
871 | *next_buflen = last_buflen; | 871 | *next_buflen = last_buflen; |
872 | } | 872 | } |
873 | #ifdef DEBUG | 873 | #ifdef DEBUG |
874 | print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", | 874 | print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", |
875 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | 875 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
876 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | 876 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", |
877 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 877 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
878 | *next_buflen, 1); | 878 | *next_buflen, 1); |
879 | #endif | 879 | #endif |
@@ -937,7 +937,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
937 | digestsize); | 937 | digestsize); |
938 | 938 | ||
939 | #ifdef DEBUG | 939 | #ifdef DEBUG |
940 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 940 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
941 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 941 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
942 | #endif | 942 | #endif |
943 | 943 | ||
@@ -1016,7 +1016,7 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
1016 | digestsize); | 1016 | digestsize); |
1017 | 1017 | ||
1018 | #ifdef DEBUG | 1018 | #ifdef DEBUG |
1019 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 1019 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
1020 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1020 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
1021 | #endif | 1021 | #endif |
1022 | 1022 | ||
@@ -1086,7 +1086,7 @@ static int ahash_digest(struct ahash_request *req) | |||
1086 | digestsize); | 1086 | digestsize); |
1087 | 1087 | ||
1088 | #ifdef DEBUG | 1088 | #ifdef DEBUG |
1089 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 1089 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
1090 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1090 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
1091 | #endif | 1091 | #endif |
1092 | 1092 | ||
@@ -1140,7 +1140,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
1140 | edesc->src_nents = 0; | 1140 | edesc->src_nents = 0; |
1141 | 1141 | ||
1142 | #ifdef DEBUG | 1142 | #ifdef DEBUG |
1143 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 1143 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
1144 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1144 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
1145 | #endif | 1145 | #endif |
1146 | 1146 | ||
@@ -1228,7 +1228,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1228 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | 1228 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1229 | 1229 | ||
1230 | #ifdef DEBUG | 1230 | #ifdef DEBUG |
1231 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 1231 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
1232 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 1232 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
1233 | desc_bytes(desc), 1); | 1233 | desc_bytes(desc), 1); |
1234 | #endif | 1234 | #endif |
@@ -1250,9 +1250,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1250 | *next_buflen = 0; | 1250 | *next_buflen = 0; |
1251 | } | 1251 | } |
1252 | #ifdef DEBUG | 1252 | #ifdef DEBUG |
1253 | print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", | 1253 | print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", |
1254 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | 1254 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
1255 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | 1255 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", |
1256 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 1256 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
1257 | *next_buflen, 1); | 1257 | *next_buflen, 1); |
1258 | #endif | 1258 | #endif |
@@ -1321,7 +1321,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
1321 | digestsize); | 1321 | digestsize); |
1322 | 1322 | ||
1323 | #ifdef DEBUG | 1323 | #ifdef DEBUG |
1324 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 1324 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
1325 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1325 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
1326 | #endif | 1326 | #endif |
1327 | 1327 | ||
@@ -1414,7 +1414,7 @@ static int ahash_update_first(struct ahash_request *req) | |||
1414 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | 1414 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1415 | 1415 | ||
1416 | #ifdef DEBUG | 1416 | #ifdef DEBUG |
1417 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 1417 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
1418 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 1418 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
1419 | desc_bytes(desc), 1); | 1419 | desc_bytes(desc), 1); |
1420 | #endif | 1420 | #endif |
@@ -1438,7 +1438,7 @@ static int ahash_update_first(struct ahash_request *req) | |||
1438 | sg_copy(next_buf, req->src, req->nbytes); | 1438 | sg_copy(next_buf, req->src, req->nbytes); |
1439 | } | 1439 | } |
1440 | #ifdef DEBUG | 1440 | #ifdef DEBUG |
1441 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | 1441 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", |
1442 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 1442 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
1443 | *next_buflen, 1); | 1443 | *next_buflen, 1); |
1444 | #endif | 1444 | #endif |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index f5d6deced1cb..b010d42a1803 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -75,55 +75,53 @@ static void build_instantiation_desc(u32 *desc) | |||
75 | OP_ALG_RNG4_SK); | 75 | OP_ALG_RNG4_SK); |
76 | } | 76 | } |
77 | 77 | ||
78 | struct instantiate_result { | 78 | static int instantiate_rng(struct device *ctrldev) |
79 | struct completion completion; | ||
80 | int err; | ||
81 | }; | ||
82 | |||
83 | static void rng4_init_done(struct device *dev, u32 *desc, u32 err, | ||
84 | void *context) | ||
85 | { | ||
86 | struct instantiate_result *instantiation = context; | ||
87 | |||
88 | if (err) { | ||
89 | char tmp[CAAM_ERROR_STR_MAX]; | ||
90 | |||
91 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
92 | } | ||
93 | |||
94 | instantiation->err = err; | ||
95 | complete(&instantiation->completion); | ||
96 | } | ||
97 | |||
98 | static int instantiate_rng(struct device *jrdev) | ||
99 | { | 79 | { |
100 | struct instantiate_result instantiation; | 80 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
101 | 81 | struct caam_full __iomem *topregs; | |
102 | dma_addr_t desc_dma; | 82 | unsigned int timeout = 100000; |
103 | u32 *desc; | 83 | u32 *desc; |
104 | int ret; | 84 | int i, ret = 0; |
105 | 85 | ||
106 | desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); | 86 | desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); |
107 | if (!desc) { | 87 | if (!desc) { |
108 | dev_err(jrdev, "cannot allocate RNG init descriptor memory\n"); | 88 | dev_err(ctrldev, "can't allocate RNG init descriptor memory\n"); |
109 | return -ENOMEM; | 89 | return -ENOMEM; |
110 | } | 90 | } |
111 | |||
112 | build_instantiation_desc(desc); | 91 | build_instantiation_desc(desc); |
113 | desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); | 92 | |
114 | init_completion(&instantiation.completion); | 93 | /* Set the bit to request direct access to DECO0 */ |
115 | ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation); | 94 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; |
116 | if (!ret) { | 95 | setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
117 | wait_for_completion_interruptible(&instantiation.completion); | 96 | |
118 | ret = instantiation.err; | 97 | while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && |
119 | if (ret) | 98 | --timeout) |
120 | dev_err(jrdev, "unable to instantiate RNG\n"); | 99 | cpu_relax(); |
100 | |||
101 | if (!timeout) { | ||
102 | dev_err(ctrldev, "failed to acquire DECO 0\n"); | ||
103 | ret = -EIO; | ||
104 | goto out; | ||
121 | } | 105 | } |
122 | 106 | ||
123 | dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE); | 107 | for (i = 0; i < desc_len(desc); i++) |
108 | topregs->deco.descbuf[i] = *(desc + i); | ||
124 | 109 | ||
125 | kfree(desc); | 110 | wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR); |
126 | 111 | ||
112 | timeout = 10000000; | ||
113 | while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) && | ||
114 | --timeout) | ||
115 | cpu_relax(); | ||
116 | |||
117 | if (!timeout) { | ||
118 | dev_err(ctrldev, "failed to instantiate RNG\n"); | ||
119 | ret = -EIO; | ||
120 | } | ||
121 | |||
122 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); | ||
123 | out: | ||
124 | kfree(desc); | ||
127 | return ret; | 125 | return ret; |
128 | } | 126 | } |
129 | 127 | ||
@@ -303,7 +301,7 @@ static int caam_probe(struct platform_device *pdev) | |||
303 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && | 301 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && |
304 | !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { | 302 | !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { |
305 | kick_trng(pdev); | 303 | kick_trng(pdev); |
306 | ret = instantiate_rng(ctrlpriv->jrdev[0]); | 304 | ret = instantiate_rng(dev); |
307 | if (ret) { | 305 | if (ret) { |
308 | caam_remove(pdev); | 306 | caam_remove(pdev); |
309 | return ret; | 307 | return ret; |
@@ -315,9 +313,6 @@ static int caam_probe(struct platform_device *pdev) | |||
315 | 313 | ||
316 | /* NOTE: RTIC detection ought to go here, around Si time */ | 314 | /* NOTE: RTIC detection ought to go here, around Si time */ |
317 | 315 | ||
318 | /* Initialize queue allocator lock */ | ||
319 | spin_lock_init(&ctrlpriv->jr_alloc_lock); | ||
320 | |||
321 | caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); | 316 | caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); |
322 | 317 | ||
323 | /* Report "alive" for developer to see */ | 318 | /* Report "alive" for developer to see */ |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index fe3bfd1b08ca..cd5f678847ce 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define CAAM_CMD_SZ sizeof(u32) | 10 | #define CAAM_CMD_SZ sizeof(u32) |
11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) | 11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) |
12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) | 12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) |
13 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) | ||
13 | 14 | ||
14 | #ifdef DEBUG | 15 | #ifdef DEBUG |
15 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ | 16 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index e4a16b741371..34c4b9f7fbfa 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -9,9 +9,6 @@ | |||
9 | #ifndef INTERN_H | 9 | #ifndef INTERN_H |
10 | #define INTERN_H | 10 | #define INTERN_H |
11 | 11 | ||
12 | #define JOBR_UNASSIGNED 0 | ||
13 | #define JOBR_ASSIGNED 1 | ||
14 | |||
15 | /* Currently comes from Kconfig param as a ^2 (driver-required) */ | 12 | /* Currently comes from Kconfig param as a ^2 (driver-required) */ |
16 | #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) | 13 | #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) |
17 | 14 | ||
@@ -46,7 +43,6 @@ struct caam_drv_private_jr { | |||
46 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | 43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
47 | struct tasklet_struct irqtask; | 44 | struct tasklet_struct irqtask; |
48 | int irq; /* One per queue */ | 45 | int irq; /* One per queue */ |
49 | int assign; /* busy/free */ | ||
50 | 46 | ||
51 | /* Job ring info */ | 47 | /* Job ring info */ |
52 | int ringsize; /* Size of rings (assume input = output) */ | 48 | int ringsize; /* Size of rings (assume input = output) */ |
@@ -68,7 +64,6 @@ struct caam_drv_private { | |||
68 | 64 | ||
69 | struct device *dev; | 65 | struct device *dev; |
70 | struct device **jrdev; /* Alloc'ed array per sub-device */ | 66 | struct device **jrdev; /* Alloc'ed array per sub-device */ |
71 | spinlock_t jr_alloc_lock; | ||
72 | struct platform_device *pdev; | 67 | struct platform_device *pdev; |
73 | 68 | ||
74 | /* Physical-presence section */ | 69 | /* Physical-presence section */ |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index b4aa773ecbc8..105ba4da6180 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -126,72 +126,6 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * caam_jr_register() - Alloc a ring for someone to use as needed. Returns | ||
130 | * an ordinal of the rings allocated, else returns -ENODEV if no rings | ||
131 | * are available. | ||
132 | * @ctrldev: points to the controller level dev (parent) that | ||
133 | * owns rings available for use. | ||
134 | * @dev: points to where a pointer to the newly allocated queue's | ||
135 | * dev can be written to if successful. | ||
136 | **/ | ||
137 | int caam_jr_register(struct device *ctrldev, struct device **rdev) | ||
138 | { | ||
139 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | ||
140 | struct caam_drv_private_jr *jrpriv = NULL; | ||
141 | int ring; | ||
142 | |||
143 | /* Lock, if free ring - assign, unlock */ | ||
144 | spin_lock(&ctrlpriv->jr_alloc_lock); | ||
145 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | ||
146 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); | ||
147 | if (jrpriv->assign == JOBR_UNASSIGNED) { | ||
148 | jrpriv->assign = JOBR_ASSIGNED; | ||
149 | *rdev = ctrlpriv->jrdev[ring]; | ||
150 | spin_unlock(&ctrlpriv->jr_alloc_lock); | ||
151 | return ring; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | /* If assigned, write dev where caller needs it */ | ||
156 | spin_unlock(&ctrlpriv->jr_alloc_lock); | ||
157 | *rdev = NULL; | ||
158 | |||
159 | return -ENODEV; | ||
160 | } | ||
161 | EXPORT_SYMBOL(caam_jr_register); | ||
162 | |||
163 | /** | ||
164 | * caam_jr_deregister() - Deregister an API and release the queue. | ||
165 | * Returns 0 if OK, -EBUSY if queue still contains pending entries | ||
166 | * or unprocessed results at the time of the call | ||
167 | * @dev - points to the dev that identifies the queue to | ||
168 | * be released. | ||
169 | **/ | ||
170 | int caam_jr_deregister(struct device *rdev) | ||
171 | { | ||
172 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); | ||
173 | struct caam_drv_private *ctrlpriv; | ||
174 | |||
175 | /* Get the owning controller's private space */ | ||
176 | ctrlpriv = dev_get_drvdata(jrpriv->parentdev); | ||
177 | |||
178 | /* | ||
179 | * Make sure ring empty before release | ||
180 | */ | ||
181 | if (rd_reg32(&jrpriv->rregs->outring_used) || | ||
182 | (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH)) | ||
183 | return -EBUSY; | ||
184 | |||
185 | /* Release ring */ | ||
186 | spin_lock(&ctrlpriv->jr_alloc_lock); | ||
187 | jrpriv->assign = JOBR_UNASSIGNED; | ||
188 | spin_unlock(&ctrlpriv->jr_alloc_lock); | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | EXPORT_SYMBOL(caam_jr_deregister); | ||
193 | |||
194 | /** | ||
195 | * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, | 129 | * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, |
196 | * -EBUSY if the queue is full, -EIO if it cannot map the caller's | 130 | * -EBUSY if the queue is full, -EIO if it cannot map the caller's |
197 | * descriptor. | 131 | * descriptor. |
@@ -379,7 +313,6 @@ static int caam_jr_init(struct device *dev) | |||
379 | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | | 313 | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | |
380 | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); | 314 | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); |
381 | 315 | ||
382 | jrp->assign = JOBR_UNASSIGNED; | ||
383 | return 0; | 316 | return 0; |
384 | } | 317 | } |
385 | 318 | ||
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h index c23df395b622..9d8741a59037 100644 --- a/drivers/crypto/caam/jr.h +++ b/drivers/crypto/caam/jr.h | |||
@@ -8,8 +8,6 @@ | |||
8 | #define JR_H | 8 | #define JR_H |
9 | 9 | ||
10 | /* Prototypes for backend-level services exposed to APIs */ | 10 | /* Prototypes for backend-level services exposed to APIs */ |
11 | int caam_jr_register(struct device *ctrldev, struct device **rdev); | ||
12 | int caam_jr_deregister(struct device *rdev); | ||
13 | int caam_jr_enqueue(struct device *dev, u32 *desc, | 11 | int caam_jr_enqueue(struct device *dev, u32 *desc, |
14 | void (*cbk)(struct device *dev, u32 *desc, u32 status, | 12 | void (*cbk)(struct device *dev, u32 *desc, u32 status, |
15 | void *areq), | 13 | void *areq), |
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 87138d2adb5f..ea2e406610eb 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c | |||
@@ -95,9 +95,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | |||
95 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | 95 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); |
96 | 96 | ||
97 | #ifdef DEBUG | 97 | #ifdef DEBUG |
98 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | 98 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
99 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); | 99 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); |
100 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 100 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
101 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 101 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
102 | #endif | 102 | #endif |
103 | 103 | ||
@@ -110,7 +110,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | |||
110 | wait_for_completion_interruptible(&result.completion); | 110 | wait_for_completion_interruptible(&result.completion); |
111 | ret = result.err; | 111 | ret = result.err; |
112 | #ifdef DEBUG | 112 | #ifdef DEBUG |
113 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | 113 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
114 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, | 114 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, |
115 | split_key_pad_len, 1); | 115 | split_key_pad_len, 1); |
116 | #endif | 116 | #endif |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index c09142fc13e3..4455396918de 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -341,6 +341,8 @@ struct caam_ctrl { | |||
341 | #define MCFGR_DMA_RESET 0x10000000 | 341 | #define MCFGR_DMA_RESET 0x10000000 |
342 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ | 342 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ |
343 | #define SCFGR_RDBENABLE 0x00000400 | 343 | #define SCFGR_RDBENABLE 0x00000400 |
344 | #define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ | ||
345 | #define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ | ||
344 | 346 | ||
345 | /* AXI read cache control */ | 347 | /* AXI read cache control */ |
346 | #define MCFGR_ARCACHE_SHIFT 12 | 348 | #define MCFGR_ARCACHE_SHIFT 12 |
@@ -703,9 +705,16 @@ struct caam_deco { | |||
703 | struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */ | 705 | struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */ |
704 | u32 rsvd29[48]; | 706 | u32 rsvd29[48]; |
705 | u32 descbuf[64]; /* DxDESB - Descriptor buffer */ | 707 | u32 descbuf[64]; /* DxDESB - Descriptor buffer */ |
706 | u32 rsvd30[320]; | 708 | u32 rscvd30[193]; |
709 | u32 desc_dbg; /* DxDDR - DECO Debug Register */ | ||
710 | u32 rsvd31[126]; | ||
707 | }; | 711 | }; |
708 | 712 | ||
713 | /* DECO DBG Register Valid Bit*/ | ||
714 | #define DECO_DBG_VALID 0x80000000 | ||
715 | #define DECO_JQCR_WHL 0x20000000 | ||
716 | #define DECO_JQCR_FOUR 0x10000000 | ||
717 | |||
709 | /* | 718 | /* |
710 | * Current top-level view of memory map is: | 719 | * Current top-level view of memory map is: |
711 | * | 720 | * |
@@ -733,6 +742,7 @@ struct caam_full { | |||
733 | u64 rsvd[512]; | 742 | u64 rsvd[512]; |
734 | struct caam_assurance assure; | 743 | struct caam_assurance assure; |
735 | struct caam_queue_if qi; | 744 | struct caam_queue_if qi; |
745 | struct caam_deco deco; | ||
736 | }; | 746 | }; |
737 | 747 | ||
738 | #endif /* REGS_H */ | 748 | #endif /* REGS_H */ |
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index 35d483f8db66..cc00b52306ba 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c | |||
@@ -70,35 +70,52 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, | |||
70 | { | 70 | { |
71 | struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); | 71 | struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); |
72 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 72 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
73 | unsigned long irq_flags; | ||
74 | unsigned int processed = 0, to_process; | ||
75 | u32 max_sg_len; | ||
73 | int rc; | 76 | int rc; |
74 | 77 | ||
75 | if (nbytes > nx_ctx->ap->databytelen) | 78 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
76 | return -EINVAL; | 79 | |
80 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), | ||
81 | nx_ctx->ap->sglen); | ||
77 | 82 | ||
78 | if (enc) | 83 | if (enc) |
79 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | 84 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
80 | else | 85 | else |
81 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; | 86 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; |
82 | 87 | ||
83 | rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, | 88 | do { |
84 | csbcpb->cpb.aes_cbc.iv); | 89 | to_process = min_t(u64, nbytes - processed, |
85 | if (rc) | 90 | nx_ctx->ap->databytelen); |
86 | goto out; | 91 | to_process = min_t(u64, to_process, |
87 | 92 | NX_PAGE_SIZE * (max_sg_len - 1)); | |
88 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | 93 | to_process = to_process & ~(AES_BLOCK_SIZE - 1); |
89 | rc = -EINVAL; | 94 | |
90 | goto out; | 95 | rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, |
91 | } | 96 | processed, csbcpb->cpb.aes_cbc.iv); |
92 | 97 | if (rc) | |
93 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 98 | goto out; |
94 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 99 | |
95 | if (rc) | 100 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { |
96 | goto out; | 101 | rc = -EINVAL; |
97 | 102 | goto out; | |
98 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 103 | } |
99 | atomic64_add(csbcpb->csb.processed_byte_count, | 104 | |
100 | &(nx_ctx->stats->aes_bytes)); | 105 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, |
106 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
107 | if (rc) | ||
108 | goto out; | ||
109 | |||
110 | memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); | ||
111 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
112 | atomic64_add(csbcpb->csb.processed_byte_count, | ||
113 | &(nx_ctx->stats->aes_bytes)); | ||
114 | |||
115 | processed += to_process; | ||
116 | } while (processed < nbytes); | ||
101 | out: | 117 | out: |
118 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
102 | return rc; | 119 | return rc; |
103 | } | 120 | } |
104 | 121 | ||
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index ef5eae6d1400..5ecd4c2414aa 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c | |||
@@ -179,13 +179,26 @@ static int generate_pat(u8 *iv, | |||
179 | struct nx_sg *nx_insg = nx_ctx->in_sg; | 179 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
180 | struct nx_sg *nx_outsg = nx_ctx->out_sg; | 180 | struct nx_sg *nx_outsg = nx_ctx->out_sg; |
181 | unsigned int iauth_len = 0; | 181 | unsigned int iauth_len = 0; |
182 | struct vio_pfo_op *op = NULL; | ||
183 | u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; | 182 | u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; |
184 | int rc; | 183 | int rc; |
185 | 184 | ||
186 | /* zero the ctr value */ | 185 | /* zero the ctr value */ |
187 | memset(iv + 15 - iv[0], 0, iv[0] + 1); | 186 | memset(iv + 15 - iv[0], 0, iv[0] + 1); |
188 | 187 | ||
188 | /* page 78 of nx_wb.pdf has, | ||
189 | * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes | ||
190 | * in length. If a full message is used, the AES CCA implementation | ||
191 | * restricts the maximum AAD length to 2^32 -1 bytes. | ||
192 | * If partial messages are used, the implementation supports | ||
193 | * 2^64 -1 bytes maximum AAD length. | ||
194 | * | ||
195 | * However, in the cryptoapi's aead_request structure, | ||
196 | * assoclen is an unsigned int, thus it cannot hold a length | ||
197 | * value greater than 2^32 - 1. | ||
198 | * Thus the AAD is further constrained by this and is never | ||
199 | * greater than 2^32. | ||
200 | */ | ||
201 | |||
189 | if (!req->assoclen) { | 202 | if (!req->assoclen) { |
190 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; | 203 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; |
191 | } else if (req->assoclen <= 14) { | 204 | } else if (req->assoclen <= 14) { |
@@ -195,7 +208,46 @@ static int generate_pat(u8 *iv, | |||
195 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; | 208 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; |
196 | b1 = nx_ctx->priv.ccm.iauth_tag; | 209 | b1 = nx_ctx->priv.ccm.iauth_tag; |
197 | iauth_len = req->assoclen; | 210 | iauth_len = req->assoclen; |
211 | } else if (req->assoclen <= 65280) { | ||
212 | /* if associated data is less than (2^16 - 2^8), we construct | ||
213 | * B1 differently and feed in the associated data to a CCA | ||
214 | * operation */ | ||
215 | b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; | ||
216 | b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; | ||
217 | iauth_len = 14; | ||
218 | } else { | ||
219 | b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; | ||
220 | b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; | ||
221 | iauth_len = 10; | ||
222 | } | ||
223 | |||
224 | /* generate B0 */ | ||
225 | rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); | ||
226 | if (rc) | ||
227 | return rc; | ||
228 | |||
229 | /* generate B1: | ||
230 | * add control info for associated data | ||
231 | * RFC 3610 and NIST Special Publication 800-38C | ||
232 | */ | ||
233 | if (b1) { | ||
234 | memset(b1, 0, 16); | ||
235 | if (req->assoclen <= 65280) { | ||
236 | *(u16 *)b1 = (u16)req->assoclen; | ||
237 | scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, | ||
238 | iauth_len, SCATTERWALK_FROM_SG); | ||
239 | } else { | ||
240 | *(u16 *)b1 = (u16)(0xfffe); | ||
241 | *(u32 *)&b1[2] = (u32)req->assoclen; | ||
242 | scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, | ||
243 | iauth_len, SCATTERWALK_FROM_SG); | ||
244 | } | ||
245 | } | ||
198 | 246 | ||
247 | /* now copy any remaining AAD to scatterlist and call nx... */ | ||
248 | if (!req->assoclen) { | ||
249 | return rc; | ||
250 | } else if (req->assoclen <= 14) { | ||
199 | nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen); | 251 | nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen); |
200 | nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16, | 252 | nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16, |
201 | nx_ctx->ap->sglen); | 253 | nx_ctx->ap->sglen); |
@@ -210,56 +262,74 @@ static int generate_pat(u8 *iv, | |||
210 | NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; | 262 | NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
211 | NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; | 263 | NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; |
212 | 264 | ||
213 | op = &nx_ctx->op; | ||
214 | result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; | 265 | result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; |
215 | } else if (req->assoclen <= 65280) { | ||
216 | /* if associated data is less than (2^16 - 2^8), we construct | ||
217 | * B1 differently and feed in the associated data to a CCA | ||
218 | * operation */ | ||
219 | b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; | ||
220 | b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; | ||
221 | iauth_len = 14; | ||
222 | 266 | ||
223 | /* remaining assoc data must have scatterlist built for it */ | 267 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, |
224 | nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, | 268 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
225 | req->assoc, iauth_len, | 269 | if (rc) |
226 | req->assoclen - iauth_len); | 270 | return rc; |
227 | nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * | 271 | |
228 | sizeof(struct nx_sg); | 272 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
273 | atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); | ||
229 | 274 | ||
230 | op = &nx_ctx->op_aead; | ||
231 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; | ||
232 | } else { | 275 | } else { |
233 | /* if associated data is less than (2^32), we construct B1 | 276 | u32 max_sg_len; |
234 | * differently yet again and feed in the associated data to a | 277 | unsigned int processed = 0, to_process; |
235 | * CCA operation */ | 278 | |
236 | pr_err("associated data len is %u bytes (returning -EINVAL)\n", | 279 | /* page_limit: number of sg entries that fit on one page */ |
237 | req->assoclen); | 280 | max_sg_len = min_t(u32, |
238 | rc = -EINVAL; | 281 | nx_driver.of.max_sg_len/sizeof(struct nx_sg), |
239 | } | 282 | nx_ctx->ap->sglen); |
283 | |||
284 | processed += iauth_len; | ||
285 | |||
286 | do { | ||
287 | to_process = min_t(u32, req->assoclen - processed, | ||
288 | nx_ctx->ap->databytelen); | ||
289 | to_process = min_t(u64, to_process, | ||
290 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
291 | |||
292 | if ((to_process + processed) < req->assoclen) { | ||
293 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= | ||
294 | NX_FDM_INTERMEDIATE; | ||
295 | } else { | ||
296 | NX_CPB_FDM(nx_ctx->csbcpb_aead) &= | ||
297 | ~NX_FDM_INTERMEDIATE; | ||
298 | } | ||
299 | |||
300 | nx_insg = nx_walk_and_build(nx_ctx->in_sg, | ||
301 | nx_ctx->ap->sglen, | ||
302 | req->assoc, processed, | ||
303 | to_process); | ||
304 | |||
305 | nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * | ||
306 | sizeof(struct nx_sg); | ||
240 | 307 | ||
241 | rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); | 308 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; |
242 | if (rc) | ||
243 | goto done; | ||
244 | 309 | ||
245 | if (b1) { | 310 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, |
246 | memset(b1, 0, 16); | 311 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
247 | *(u16 *)b1 = (u16)req->assoclen; | 312 | if (rc) |
313 | return rc; | ||
248 | 314 | ||
249 | scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, | 315 | memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, |
250 | iauth_len, SCATTERWALK_FROM_SG); | 316 | nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, |
317 | AES_BLOCK_SIZE); | ||
251 | 318 | ||
252 | rc = nx_hcall_sync(nx_ctx, op, | 319 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; |
253 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
254 | if (rc) | ||
255 | goto done; | ||
256 | 320 | ||
257 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 321 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
258 | atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); | 322 | atomic64_add(req->assoclen, |
323 | &(nx_ctx->stats->aes_bytes)); | ||
259 | 324 | ||
260 | memcpy(out, result, AES_BLOCK_SIZE); | 325 | processed += to_process; |
326 | } while (processed < req->assoclen); | ||
327 | |||
328 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; | ||
261 | } | 329 | } |
262 | done: | 330 | |
331 | memcpy(out, result, AES_BLOCK_SIZE); | ||
332 | |||
263 | return rc; | 333 | return rc; |
264 | } | 334 | } |
265 | 335 | ||
@@ -271,10 +341,12 @@ static int ccm_nx_decrypt(struct aead_request *req, | |||
271 | unsigned int nbytes = req->cryptlen; | 341 | unsigned int nbytes = req->cryptlen; |
272 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); | 342 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); |
273 | struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; | 343 | struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; |
344 | unsigned long irq_flags; | ||
345 | unsigned int processed = 0, to_process; | ||
346 | u32 max_sg_len; | ||
274 | int rc = -1; | 347 | int rc = -1; |
275 | 348 | ||
276 | if (nbytes > nx_ctx->ap->databytelen) | 349 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
277 | return -EINVAL; | ||
278 | 350 | ||
279 | nbytes -= authsize; | 351 | nbytes -= authsize; |
280 | 352 | ||
@@ -288,26 +360,61 @@ static int ccm_nx_decrypt(struct aead_request *req, | |||
288 | if (rc) | 360 | if (rc) |
289 | goto out; | 361 | goto out; |
290 | 362 | ||
291 | rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, | 363 | /* page_limit: number of sg entries that fit on one page */ |
292 | csbcpb->cpb.aes_ccm.iv_or_ctr); | 364 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), |
293 | if (rc) | 365 | nx_ctx->ap->sglen); |
294 | goto out; | ||
295 | 366 | ||
296 | NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; | 367 | do { |
297 | NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE; | 368 | |
369 | /* to_process: the AES_BLOCK_SIZE data chunk to process in this | ||
370 | * update. This value is bound by sg list limits. | ||
371 | */ | ||
372 | to_process = min_t(u64, nbytes - processed, | ||
373 | nx_ctx->ap->databytelen); | ||
374 | to_process = min_t(u64, to_process, | ||
375 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
376 | |||
377 | if ((to_process + processed) < nbytes) | ||
378 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
379 | else | ||
380 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | ||
381 | |||
382 | NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; | ||
383 | |||
384 | rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, | ||
385 | to_process, processed, | ||
386 | csbcpb->cpb.aes_ccm.iv_or_ctr); | ||
387 | if (rc) | ||
388 | goto out; | ||
298 | 389 | ||
299 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 390 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, |
300 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 391 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
301 | if (rc) | 392 | if (rc) |
302 | goto out; | 393 | goto out; |
303 | 394 | ||
304 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 395 | /* for partial completion, copy following for next |
305 | atomic64_add(csbcpb->csb.processed_byte_count, | 396 | * entry into loop... |
306 | &(nx_ctx->stats->aes_bytes)); | 397 | */ |
398 | memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); | ||
399 | memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, | ||
400 | csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); | ||
401 | memcpy(csbcpb->cpb.aes_ccm.in_s0, | ||
402 | csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); | ||
403 | |||
404 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
405 | |||
406 | /* update stats */ | ||
407 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
408 | atomic64_add(csbcpb->csb.processed_byte_count, | ||
409 | &(nx_ctx->stats->aes_bytes)); | ||
410 | |||
411 | processed += to_process; | ||
412 | } while (processed < nbytes); | ||
307 | 413 | ||
308 | rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, | 414 | rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, |
309 | authsize) ? -EBADMSG : 0; | 415 | authsize) ? -EBADMSG : 0; |
310 | out: | 416 | out: |
417 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
311 | return rc; | 418 | return rc; |
312 | } | 419 | } |
313 | 420 | ||
@@ -318,38 +425,76 @@ static int ccm_nx_encrypt(struct aead_request *req, | |||
318 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 425 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
319 | unsigned int nbytes = req->cryptlen; | 426 | unsigned int nbytes = req->cryptlen; |
320 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); | 427 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); |
428 | unsigned long irq_flags; | ||
429 | unsigned int processed = 0, to_process; | ||
430 | u32 max_sg_len; | ||
321 | int rc = -1; | 431 | int rc = -1; |
322 | 432 | ||
323 | if (nbytes > nx_ctx->ap->databytelen) | 433 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
324 | return -EINVAL; | ||
325 | 434 | ||
326 | rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, | 435 | rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, |
327 | csbcpb->cpb.aes_ccm.in_pat_or_b0); | 436 | csbcpb->cpb.aes_ccm.in_pat_or_b0); |
328 | if (rc) | 437 | if (rc) |
329 | goto out; | 438 | goto out; |
330 | 439 | ||
331 | rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, | 440 | /* page_limit: number of sg entries that fit on one page */ |
332 | csbcpb->cpb.aes_ccm.iv_or_ctr); | 441 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), |
333 | if (rc) | 442 | nx_ctx->ap->sglen); |
334 | goto out; | 443 | |
444 | do { | ||
445 | /* to process: the AES_BLOCK_SIZE data chunk to process in this | ||
446 | * update. This value is bound by sg list limits. | ||
447 | */ | ||
448 | to_process = min_t(u64, nbytes - processed, | ||
449 | nx_ctx->ap->databytelen); | ||
450 | to_process = min_t(u64, to_process, | ||
451 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
452 | |||
453 | if ((to_process + processed) < nbytes) | ||
454 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
455 | else | ||
456 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | ||
457 | |||
458 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | ||
459 | |||
460 | rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, | ||
461 | to_process, processed, | ||
462 | csbcpb->cpb.aes_ccm.iv_or_ctr); | ||
463 | if (rc) | ||
464 | goto out; | ||
335 | 465 | ||
336 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | 466 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, |
337 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | 467 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
468 | if (rc) | ||
469 | goto out; | ||
338 | 470 | ||
339 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 471 | /* for partial completion, copy following for next |
340 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 472 | * entry into loop... |
341 | if (rc) | 473 | */ |
342 | goto out; | 474 | memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); |
475 | memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, | ||
476 | csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); | ||
477 | memcpy(csbcpb->cpb.aes_ccm.in_s0, | ||
478 | csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); | ||
343 | 479 | ||
344 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 480 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
345 | atomic64_add(csbcpb->csb.processed_byte_count, | 481 | |
346 | &(nx_ctx->stats->aes_bytes)); | 482 | /* update stats */ |
483 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
484 | atomic64_add(csbcpb->csb.processed_byte_count, | ||
485 | &(nx_ctx->stats->aes_bytes)); | ||
486 | |||
487 | processed += to_process; | ||
488 | |||
489 | } while (processed < nbytes); | ||
347 | 490 | ||
348 | /* copy out the auth tag */ | 491 | /* copy out the auth tag */ |
349 | scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, | 492 | scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, |
350 | req->dst, nbytes, authsize, | 493 | req->dst, nbytes, authsize, |
351 | SCATTERWALK_TO_SG); | 494 | SCATTERWALK_TO_SG); |
495 | |||
352 | out: | 496 | out: |
497 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
353 | return rc; | 498 | return rc; |
354 | } | 499 | } |
355 | 500 | ||
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index b6286f14680b..a37d009dc75c 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c | |||
@@ -88,30 +88,48 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, | |||
88 | { | 88 | { |
89 | struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); | 89 | struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); |
90 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 90 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
91 | unsigned long irq_flags; | ||
92 | unsigned int processed = 0, to_process; | ||
93 | u32 max_sg_len; | ||
91 | int rc; | 94 | int rc; |
92 | 95 | ||
93 | if (nbytes > nx_ctx->ap->databytelen) | 96 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
94 | return -EINVAL; | ||
95 | 97 | ||
96 | rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, | 98 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), |
97 | csbcpb->cpb.aes_ctr.iv); | 99 | nx_ctx->ap->sglen); |
98 | if (rc) | ||
99 | goto out; | ||
100 | 100 | ||
101 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | 101 | do { |
102 | rc = -EINVAL; | 102 | to_process = min_t(u64, nbytes - processed, |
103 | goto out; | 103 | nx_ctx->ap->databytelen); |
104 | } | 104 | to_process = min_t(u64, to_process, |
105 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
106 | to_process = to_process & ~(AES_BLOCK_SIZE - 1); | ||
107 | |||
108 | rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, | ||
109 | processed, csbcpb->cpb.aes_ctr.iv); | ||
110 | if (rc) | ||
111 | goto out; | ||
112 | |||
113 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | ||
114 | rc = -EINVAL; | ||
115 | goto out; | ||
116 | } | ||
117 | |||
118 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
119 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
120 | if (rc) | ||
121 | goto out; | ||
122 | |||
123 | memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); | ||
105 | 124 | ||
106 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 125 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
107 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 126 | atomic64_add(csbcpb->csb.processed_byte_count, |
108 | if (rc) | 127 | &(nx_ctx->stats->aes_bytes)); |
109 | goto out; | ||
110 | 128 | ||
111 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 129 | processed += to_process; |
112 | atomic64_add(csbcpb->csb.processed_byte_count, | 130 | } while (processed < nbytes); |
113 | &(nx_ctx->stats->aes_bytes)); | ||
114 | out: | 131 | out: |
132 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
115 | return rc; | 133 | return rc; |
116 | } | 134 | } |
117 | 135 | ||
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 7bbc9a81da21..85a8d23cf29d 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c | |||
@@ -70,34 +70,52 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, | |||
70 | { | 70 | { |
71 | struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); | 71 | struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); |
72 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 72 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
73 | unsigned long irq_flags; | ||
74 | unsigned int processed = 0, to_process; | ||
75 | u32 max_sg_len; | ||
73 | int rc; | 76 | int rc; |
74 | 77 | ||
75 | if (nbytes > nx_ctx->ap->databytelen) | 78 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
76 | return -EINVAL; | 79 | |
80 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), | ||
81 | nx_ctx->ap->sglen); | ||
77 | 82 | ||
78 | if (enc) | 83 | if (enc) |
79 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | 84 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
80 | else | 85 | else |
81 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; | 86 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; |
82 | 87 | ||
83 | rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL); | 88 | do { |
84 | if (rc) | 89 | to_process = min_t(u64, nbytes - processed, |
85 | goto out; | 90 | nx_ctx->ap->databytelen); |
91 | to_process = min_t(u64, to_process, | ||
92 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
93 | to_process = to_process & ~(AES_BLOCK_SIZE - 1); | ||
86 | 94 | ||
87 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | 95 | rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, |
88 | rc = -EINVAL; | 96 | processed, NULL); |
89 | goto out; | 97 | if (rc) |
90 | } | 98 | goto out; |
99 | |||
100 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | ||
101 | rc = -EINVAL; | ||
102 | goto out; | ||
103 | } | ||
104 | |||
105 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
106 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
107 | if (rc) | ||
108 | goto out; | ||
109 | |||
110 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
111 | atomic64_add(csbcpb->csb.processed_byte_count, | ||
112 | &(nx_ctx->stats->aes_bytes)); | ||
91 | 113 | ||
92 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 114 | processed += to_process; |
93 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 115 | } while (processed < nbytes); |
94 | if (rc) | ||
95 | goto out; | ||
96 | 116 | ||
97 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
98 | atomic64_add(csbcpb->csb.processed_byte_count, | ||
99 | &(nx_ctx->stats->aes_bytes)); | ||
100 | out: | 117 | out: |
118 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
101 | return rc; | 119 | return rc; |
102 | } | 120 | } |
103 | 121 | ||
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 6cca6c392b00..025d9a8d5b19 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c | |||
@@ -125,38 +125,187 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, | |||
125 | struct aead_request *req, | 125 | struct aead_request *req, |
126 | u8 *out) | 126 | u8 *out) |
127 | { | 127 | { |
128 | int rc; | ||
128 | struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; | 129 | struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; |
129 | int rc = -EINVAL; | ||
130 | struct scatter_walk walk; | 130 | struct scatter_walk walk; |
131 | struct nx_sg *nx_sg = nx_ctx->in_sg; | 131 | struct nx_sg *nx_sg = nx_ctx->in_sg; |
132 | unsigned int nbytes = req->assoclen; | ||
133 | unsigned int processed = 0, to_process; | ||
134 | u32 max_sg_len; | ||
132 | 135 | ||
133 | if (req->assoclen > nx_ctx->ap->databytelen) | 136 | if (nbytes <= AES_BLOCK_SIZE) { |
134 | goto out; | ||
135 | |||
136 | if (req->assoclen <= AES_BLOCK_SIZE) { | ||
137 | scatterwalk_start(&walk, req->assoc); | 137 | scatterwalk_start(&walk, req->assoc); |
138 | scatterwalk_copychunks(out, &walk, req->assoclen, | 138 | scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); |
139 | SCATTERWALK_FROM_SG); | ||
140 | scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); | 139 | scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); |
141 | 140 | return 0; | |
142 | rc = 0; | ||
143 | goto out; | ||
144 | } | 141 | } |
145 | 142 | ||
146 | nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, | 143 | NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; |
147 | req->assoclen); | 144 | |
148 | nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); | 145 | /* page_limit: number of sg entries that fit on one page */ |
146 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), | ||
147 | nx_ctx->ap->sglen); | ||
148 | |||
149 | do { | ||
150 | /* | ||
151 | * to_process: the data chunk to process in this update. | ||
152 | * This value is bound by sg list limits. | ||
153 | */ | ||
154 | to_process = min_t(u64, nbytes - processed, | ||
155 | nx_ctx->ap->databytelen); | ||
156 | to_process = min_t(u64, to_process, | ||
157 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
158 | |||
159 | if ((to_process + processed) < nbytes) | ||
160 | NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; | ||
161 | else | ||
162 | NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; | ||
163 | |||
164 | nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen, | ||
165 | req->assoc, processed, to_process); | ||
166 | nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) | ||
167 | * sizeof(struct nx_sg); | ||
168 | |||
169 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, | ||
170 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
171 | if (rc) | ||
172 | return rc; | ||
173 | |||
174 | memcpy(csbcpb_aead->cpb.aes_gca.in_pat, | ||
175 | csbcpb_aead->cpb.aes_gca.out_pat, | ||
176 | AES_BLOCK_SIZE); | ||
177 | NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; | ||
178 | |||
179 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
180 | atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); | ||
181 | |||
182 | processed += to_process; | ||
183 | } while (processed < nbytes); | ||
184 | |||
185 | memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); | ||
186 | |||
187 | return rc; | ||
188 | } | ||
189 | |||
190 | static int gmac(struct aead_request *req, struct blkcipher_desc *desc) | ||
191 | { | ||
192 | int rc; | ||
193 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | ||
194 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | ||
195 | struct nx_sg *nx_sg; | ||
196 | unsigned int nbytes = req->assoclen; | ||
197 | unsigned int processed = 0, to_process; | ||
198 | u32 max_sg_len; | ||
199 | |||
200 | /* Set GMAC mode */ | ||
201 | csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; | ||
202 | |||
203 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; | ||
204 | |||
205 | /* page_limit: number of sg entries that fit on one page */ | ||
206 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), | ||
207 | nx_ctx->ap->sglen); | ||
208 | |||
209 | /* Copy IV */ | ||
210 | memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); | ||
211 | |||
212 | do { | ||
213 | /* | ||
214 | * to_process: the data chunk to process in this update. | ||
215 | * This value is bound by sg list limits. | ||
216 | */ | ||
217 | to_process = min_t(u64, nbytes - processed, | ||
218 | nx_ctx->ap->databytelen); | ||
219 | to_process = min_t(u64, to_process, | ||
220 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
221 | |||
222 | if ((to_process + processed) < nbytes) | ||
223 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
224 | else | ||
225 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | ||
226 | |||
227 | nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen, | ||
228 | req->assoc, processed, to_process); | ||
229 | nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) | ||
230 | * sizeof(struct nx_sg); | ||
231 | |||
232 | csbcpb->cpb.aes_gcm.bit_length_data = 0; | ||
233 | csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; | ||
234 | |||
235 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
236 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
237 | if (rc) | ||
238 | goto out; | ||
239 | |||
240 | memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, | ||
241 | csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); | ||
242 | memcpy(csbcpb->cpb.aes_gcm.in_s0, | ||
243 | csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); | ||
244 | |||
245 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
246 | |||
247 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
248 | atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); | ||
249 | |||
250 | processed += to_process; | ||
251 | } while (processed < nbytes); | ||
252 | |||
253 | out: | ||
254 | /* Restore GCM mode */ | ||
255 | csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; | ||
256 | return rc; | ||
257 | } | ||
258 | |||
259 | static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, | ||
260 | int enc) | ||
261 | { | ||
262 | int rc; | ||
263 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | ||
264 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | ||
265 | char out[AES_BLOCK_SIZE]; | ||
266 | struct nx_sg *in_sg, *out_sg; | ||
267 | |||
268 | /* For scenarios where the input message is zero length, AES CTR mode | ||
269 | * may be used. Set the source data to be a single block (16B) of all | ||
270 | * zeros, and set the input IV value to be the same as the GMAC IV | ||
271 | * value. - nx_wb 4.8.1.3 */ | ||
272 | |||
273 | /* Change to ECB mode */ | ||
274 | csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; | ||
275 | memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, | ||
276 | sizeof(csbcpb->cpb.aes_ecb.key)); | ||
277 | if (enc) | ||
278 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | ||
279 | else | ||
280 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; | ||
281 | |||
282 | /* Encrypt the counter/IV */ | ||
283 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, | ||
284 | AES_BLOCK_SIZE, nx_ctx->ap->sglen); | ||
285 | out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out), | ||
286 | nx_ctx->ap->sglen); | ||
287 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); | ||
288 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | ||
149 | 289 | ||
150 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, | 290 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, |
151 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 291 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
152 | if (rc) | 292 | if (rc) |
153 | goto out; | 293 | goto out; |
154 | |||
155 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 294 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
156 | atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); | ||
157 | 295 | ||
158 | memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); | 296 | /* Copy out the auth tag */ |
297 | memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, | ||
298 | crypto_aead_authsize(crypto_aead_reqtfm(req))); | ||
159 | out: | 299 | out: |
300 | /* Restore XCBC mode */ | ||
301 | csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; | ||
302 | |||
303 | /* | ||
304 | * ECB key uses the same region that GCM AAD and counter, so it's safe | ||
305 | * to just fill it with zeroes. | ||
306 | */ | ||
307 | memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); | ||
308 | |||
160 | return rc; | 309 | return rc; |
161 | } | 310 | } |
162 | 311 | ||
@@ -166,88 +315,104 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | |||
166 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 315 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
167 | struct blkcipher_desc desc; | 316 | struct blkcipher_desc desc; |
168 | unsigned int nbytes = req->cryptlen; | 317 | unsigned int nbytes = req->cryptlen; |
318 | unsigned int processed = 0, to_process; | ||
319 | unsigned long irq_flags; | ||
320 | u32 max_sg_len; | ||
169 | int rc = -EINVAL; | 321 | int rc = -EINVAL; |
170 | 322 | ||
171 | if (nbytes > nx_ctx->ap->databytelen) | 323 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
172 | goto out; | ||
173 | 324 | ||
174 | desc.info = nx_ctx->priv.gcm.iv; | 325 | desc.info = nx_ctx->priv.gcm.iv; |
175 | /* initialize the counter */ | 326 | /* initialize the counter */ |
176 | *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; | 327 | *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; |
177 | 328 | ||
178 | /* For scenarios where the input message is zero length, AES CTR mode | ||
179 | * may be used. Set the source data to be a single block (16B) of all | ||
180 | * zeros, and set the input IV value to be the same as the GMAC IV | ||
181 | * value. - nx_wb 4.8.1.3 */ | ||
182 | if (nbytes == 0) { | 329 | if (nbytes == 0) { |
183 | char src[AES_BLOCK_SIZE] = {}; | 330 | if (req->assoclen == 0) |
184 | struct scatterlist sg; | 331 | rc = gcm_empty(req, &desc, enc); |
185 | 332 | else | |
186 | desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); | 333 | rc = gmac(req, &desc); |
187 | if (IS_ERR(desc.tfm)) { | 334 | if (rc) |
188 | rc = -ENOMEM; | ||
189 | goto out; | 335 | goto out; |
190 | } | ||
191 | |||
192 | crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key, | ||
193 | NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : | ||
194 | NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); | ||
195 | |||
196 | sg_init_one(&sg, src, AES_BLOCK_SIZE); | ||
197 | if (enc) | ||
198 | crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg, | ||
199 | AES_BLOCK_SIZE); | ||
200 | else | 336 | else |
201 | crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg, | 337 | goto mac; |
202 | AES_BLOCK_SIZE); | ||
203 | crypto_free_blkcipher(desc.tfm); | ||
204 | |||
205 | rc = 0; | ||
206 | goto out; | ||
207 | } | 338 | } |
208 | 339 | ||
209 | desc.tfm = (struct crypto_blkcipher *)req->base.tfm; | 340 | /* Process associated data */ |
210 | |||
211 | csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; | 341 | csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; |
212 | |||
213 | if (req->assoclen) { | 342 | if (req->assoclen) { |
214 | rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); | 343 | rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); |
215 | if (rc) | 344 | if (rc) |
216 | goto out; | 345 | goto out; |
217 | } | 346 | } |
218 | 347 | ||
219 | if (enc) | 348 | /* Set flags for encryption */ |
349 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; | ||
350 | if (enc) { | ||
220 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | 351 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
221 | else | 352 | } else { |
353 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; | ||
222 | nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); | 354 | nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); |
355 | } | ||
223 | 356 | ||
224 | csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; | 357 | /* page_limit: number of sg entries that fit on one page */ |
358 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), | ||
359 | nx_ctx->ap->sglen); | ||
360 | |||
361 | do { | ||
362 | /* | ||
363 | * to_process: the data chunk to process in this update. | ||
364 | * This value is bound by sg list limits. | ||
365 | */ | ||
366 | to_process = min_t(u64, nbytes - processed, | ||
367 | nx_ctx->ap->databytelen); | ||
368 | to_process = min_t(u64, to_process, | ||
369 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
370 | |||
371 | if ((to_process + processed) < nbytes) | ||
372 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
373 | else | ||
374 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | ||
225 | 375 | ||
226 | rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, | 376 | csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; |
227 | csbcpb->cpb.aes_gcm.iv_or_cnt); | 377 | desc.tfm = (struct crypto_blkcipher *) req->base.tfm; |
228 | if (rc) | 378 | rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, |
229 | goto out; | 379 | req->src, to_process, processed, |
380 | csbcpb->cpb.aes_gcm.iv_or_cnt); | ||
381 | if (rc) | ||
382 | goto out; | ||
230 | 383 | ||
231 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 384 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, |
232 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 385 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
233 | if (rc) | 386 | if (rc) |
234 | goto out; | 387 | goto out; |
235 | 388 | ||
236 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 389 | memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); |
237 | atomic64_add(csbcpb->csb.processed_byte_count, | 390 | memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, |
238 | &(nx_ctx->stats->aes_bytes)); | 391 | csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); |
392 | memcpy(csbcpb->cpb.aes_gcm.in_s0, | ||
393 | csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); | ||
394 | |||
395 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
396 | |||
397 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
398 | atomic64_add(csbcpb->csb.processed_byte_count, | ||
399 | &(nx_ctx->stats->aes_bytes)); | ||
400 | |||
401 | processed += to_process; | ||
402 | } while (processed < nbytes); | ||
239 | 403 | ||
404 | mac: | ||
240 | if (enc) { | 405 | if (enc) { |
241 | /* copy out the auth tag */ | 406 | /* copy out the auth tag */ |
242 | scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, | 407 | scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, |
243 | req->dst, nbytes, | 408 | req->dst, nbytes, |
244 | crypto_aead_authsize(crypto_aead_reqtfm(req)), | 409 | crypto_aead_authsize(crypto_aead_reqtfm(req)), |
245 | SCATTERWALK_TO_SG); | 410 | SCATTERWALK_TO_SG); |
246 | } else if (req->assoclen) { | 411 | } else { |
247 | u8 *itag = nx_ctx->priv.gcm.iauth_tag; | 412 | u8 *itag = nx_ctx->priv.gcm.iauth_tag; |
248 | u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; | 413 | u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; |
249 | 414 | ||
250 | scatterwalk_map_and_copy(itag, req->dst, nbytes, | 415 | scatterwalk_map_and_copy(itag, req->src, nbytes, |
251 | crypto_aead_authsize(crypto_aead_reqtfm(req)), | 416 | crypto_aead_authsize(crypto_aead_reqtfm(req)), |
252 | SCATTERWALK_FROM_SG); | 417 | SCATTERWALK_FROM_SG); |
253 | rc = memcmp(itag, otag, | 418 | rc = memcmp(itag, otag, |
@@ -255,6 +420,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | |||
255 | -EBADMSG : 0; | 420 | -EBADMSG : 0; |
256 | } | 421 | } |
257 | out: | 422 | out: |
423 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
258 | return rc; | 424 | return rc; |
259 | } | 425 | } |
260 | 426 | ||
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 93923e4628c0..03c4bf57d066 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c | |||
@@ -56,6 +56,77 @@ static int nx_xcbc_set_key(struct crypto_shash *desc, | |||
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
58 | 58 | ||
59 | /* | ||
60 | * Based on RFC 3566, for a zero-length message: | ||
61 | * | ||
62 | * n = 1 | ||
63 | * K1 = E(K, 0x01010101010101010101010101010101) | ||
64 | * K3 = E(K, 0x03030303030303030303030303030303) | ||
65 | * E[0] = 0x00000000000000000000000000000000 | ||
66 | * M[1] = 0x80000000000000000000000000000000 (0 length message with padding) | ||
67 | * E[1] = (K1, M[1] ^ E[0] ^ K3) | ||
68 | * Tag = M[1] | ||
69 | */ | ||
70 | static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) | ||
71 | { | ||
72 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | ||
73 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | ||
74 | struct nx_sg *in_sg, *out_sg; | ||
75 | u8 keys[2][AES_BLOCK_SIZE]; | ||
76 | u8 key[32]; | ||
77 | int rc = 0; | ||
78 | |||
79 | /* Change to ECB mode */ | ||
80 | csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; | ||
81 | memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE); | ||
82 | memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE); | ||
83 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | ||
84 | |||
85 | /* K1 and K3 base patterns */ | ||
86 | memset(keys[0], 0x01, sizeof(keys[0])); | ||
87 | memset(keys[1], 0x03, sizeof(keys[1])); | ||
88 | |||
89 | /* Generate K1 and K3 encrypting the patterns */ | ||
90 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys), | ||
91 | nx_ctx->ap->sglen); | ||
92 | out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys), | ||
93 | nx_ctx->ap->sglen); | ||
94 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); | ||
95 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | ||
96 | |||
97 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
98 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
99 | if (rc) | ||
100 | goto out; | ||
101 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
102 | |||
103 | /* XOr K3 with the padding for a 0 length message */ | ||
104 | keys[1][0] ^= 0x80; | ||
105 | |||
106 | /* Encrypt the final result */ | ||
107 | memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); | ||
108 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]), | ||
109 | nx_ctx->ap->sglen); | ||
110 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, | ||
111 | nx_ctx->ap->sglen); | ||
112 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); | ||
113 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | ||
114 | |||
115 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
116 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
117 | if (rc) | ||
118 | goto out; | ||
119 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
120 | |||
121 | out: | ||
122 | /* Restore XCBC mode */ | ||
123 | csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; | ||
124 | memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE); | ||
125 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; | ||
126 | |||
127 | return rc; | ||
128 | } | ||
129 | |||
59 | static int nx_xcbc_init(struct shash_desc *desc) | 130 | static int nx_xcbc_init(struct shash_desc *desc) |
60 | { | 131 | { |
61 | struct xcbc_state *sctx = shash_desc_ctx(desc); | 132 | struct xcbc_state *sctx = shash_desc_ctx(desc); |
@@ -88,76 +159,99 @@ static int nx_xcbc_update(struct shash_desc *desc, | |||
88 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 159 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
89 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 160 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
90 | struct nx_sg *in_sg; | 161 | struct nx_sg *in_sg; |
91 | u32 to_process, leftover; | 162 | u32 to_process, leftover, total; |
163 | u32 max_sg_len; | ||
164 | unsigned long irq_flags; | ||
92 | int rc = 0; | 165 | int rc = 0; |
93 | 166 | ||
94 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 167 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
95 | /* we've hit the nx chip previously and we're updating again, | 168 | |
96 | * so copy over the partial digest */ | 169 | |
97 | memcpy(csbcpb->cpb.aes_xcbc.cv, | 170 | total = sctx->count + len; |
98 | csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); | ||
99 | } | ||
100 | 171 | ||
101 | /* 2 cases for total data len: | 172 | /* 2 cases for total data len: |
102 | * 1: <= AES_BLOCK_SIZE: copy into state, return 0 | 173 | * 1: <= AES_BLOCK_SIZE: copy into state, return 0 |
103 | * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover | 174 | * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover |
104 | */ | 175 | */ |
105 | if (len + sctx->count <= AES_BLOCK_SIZE) { | 176 | if (total <= AES_BLOCK_SIZE) { |
106 | memcpy(sctx->buffer + sctx->count, data, len); | 177 | memcpy(sctx->buffer + sctx->count, data, len); |
107 | sctx->count += len; | 178 | sctx->count += len; |
108 | goto out; | 179 | goto out; |
109 | } | 180 | } |
110 | 181 | ||
111 | /* to_process: the AES_BLOCK_SIZE data chunk to process in this | 182 | in_sg = nx_ctx->in_sg; |
112 | * update */ | 183 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), |
113 | to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1); | 184 | nx_ctx->ap->sglen); |
114 | leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1); | 185 | |
115 | 186 | do { | |
116 | /* the hardware will not accept a 0 byte operation for this algorithm | 187 | |
117 | * and the operation MUST be finalized to be correct. So if we happen | 188 | /* to_process: the AES_BLOCK_SIZE data chunk to process in this |
118 | * to get an update that falls on a block sized boundary, we must | 189 | * update */ |
119 | * save off the last block to finalize with later. */ | 190 | to_process = min_t(u64, total, nx_ctx->ap->databytelen); |
120 | if (!leftover) { | 191 | to_process = min_t(u64, to_process, |
121 | to_process -= AES_BLOCK_SIZE; | 192 | NX_PAGE_SIZE * (max_sg_len - 1)); |
122 | leftover = AES_BLOCK_SIZE; | 193 | to_process = to_process & ~(AES_BLOCK_SIZE - 1); |
123 | } | 194 | leftover = total - to_process; |
124 | 195 | ||
125 | if (sctx->count) { | 196 | /* the hardware will not accept a 0 byte operation for this |
126 | in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer, | 197 | * algorithm and the operation MUST be finalized to be correct. |
127 | sctx->count, nx_ctx->ap->sglen); | 198 | * So if we happen to get an update that falls on a block sized |
128 | in_sg = nx_build_sg_list(in_sg, (u8 *)data, | 199 | * boundary, we must save off the last block to finalize with |
129 | to_process - sctx->count, | 200 | * later. */ |
130 | nx_ctx->ap->sglen); | 201 | if (!leftover) { |
202 | to_process -= AES_BLOCK_SIZE; | ||
203 | leftover = AES_BLOCK_SIZE; | ||
204 | } | ||
205 | |||
206 | if (sctx->count) { | ||
207 | in_sg = nx_build_sg_list(nx_ctx->in_sg, | ||
208 | (u8 *) sctx->buffer, | ||
209 | sctx->count, | ||
210 | max_sg_len); | ||
211 | } | ||
212 | in_sg = nx_build_sg_list(in_sg, | ||
213 | (u8 *) data, | ||
214 | to_process - sctx->count, | ||
215 | max_sg_len); | ||
131 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | 216 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * |
132 | sizeof(struct nx_sg); | 217 | sizeof(struct nx_sg); |
133 | } else { | ||
134 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process, | ||
135 | nx_ctx->ap->sglen); | ||
136 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | ||
137 | sizeof(struct nx_sg); | ||
138 | } | ||
139 | 218 | ||
140 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 219 | /* we've hit the nx chip previously and we're updating again, |
220 | * so copy over the partial digest */ | ||
221 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | ||
222 | memcpy(csbcpb->cpb.aes_xcbc.cv, | ||
223 | csbcpb->cpb.aes_xcbc.out_cv_mac, | ||
224 | AES_BLOCK_SIZE); | ||
225 | } | ||
226 | |||
227 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
228 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | ||
229 | rc = -EINVAL; | ||
230 | goto out; | ||
231 | } | ||
232 | |||
233 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
234 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
235 | if (rc) | ||
236 | goto out; | ||
141 | 237 | ||
142 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | 238 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
143 | rc = -EINVAL; | ||
144 | goto out; | ||
145 | } | ||
146 | 239 | ||
147 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 240 | /* everything after the first update is continuation */ |
148 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 241 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
149 | if (rc) | ||
150 | goto out; | ||
151 | 242 | ||
152 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 243 | total -= to_process; |
244 | data += to_process - sctx->count; | ||
245 | sctx->count = 0; | ||
246 | in_sg = nx_ctx->in_sg; | ||
247 | } while (leftover > AES_BLOCK_SIZE); | ||
153 | 248 | ||
154 | /* copy the leftover back into the state struct */ | 249 | /* copy the leftover back into the state struct */ |
155 | memcpy(sctx->buffer, data + len - leftover, leftover); | 250 | memcpy(sctx->buffer, data, leftover); |
156 | sctx->count = leftover; | 251 | sctx->count = leftover; |
157 | 252 | ||
158 | /* everything after the first update is continuation */ | ||
159 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
160 | out: | 253 | out: |
254 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
161 | return rc; | 255 | return rc; |
162 | } | 256 | } |
163 | 257 | ||
@@ -167,21 +261,23 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) | |||
167 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 261 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
168 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 262 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
169 | struct nx_sg *in_sg, *out_sg; | 263 | struct nx_sg *in_sg, *out_sg; |
264 | unsigned long irq_flags; | ||
170 | int rc = 0; | 265 | int rc = 0; |
171 | 266 | ||
267 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | ||
268 | |||
172 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 269 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { |
173 | /* we've hit the nx chip previously, now we're finalizing, | 270 | /* we've hit the nx chip previously, now we're finalizing, |
174 | * so copy over the partial digest */ | 271 | * so copy over the partial digest */ |
175 | memcpy(csbcpb->cpb.aes_xcbc.cv, | 272 | memcpy(csbcpb->cpb.aes_xcbc.cv, |
176 | csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); | 273 | csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); |
177 | } else if (sctx->count == 0) { | 274 | } else if (sctx->count == 0) { |
178 | /* we've never seen an update, so this is a 0 byte op. The | 275 | /* |
179 | * hardware cannot handle a 0 byte op, so just copy out the | 276 | * we've never seen an update, so this is a 0 byte op. The |
180 | * known 0 byte result. This is cheaper than allocating a | 277 | * hardware cannot handle a 0 byte op, so just ECB to |
181 | * software context to do a 0 byte op */ | 278 | * generate the hash. |
182 | u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c, | 279 | */ |
183 | 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 }; | 280 | rc = nx_xcbc_empty(desc, out); |
184 | memcpy(out, data, sizeof(data)); | ||
185 | goto out; | 281 | goto out; |
186 | } | 282 | } |
187 | 283 | ||
@@ -211,6 +307,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) | |||
211 | 307 | ||
212 | memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); | 308 | memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); |
213 | out: | 309 | out: |
310 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
214 | return rc; | 311 | return rc; |
215 | } | 312 | } |
216 | 313 | ||
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 67024f2f0b78..da0b24a7633f 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c | |||
@@ -55,71 +55,91 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, | |||
55 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 55 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
56 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 56 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
57 | struct nx_sg *in_sg; | 57 | struct nx_sg *in_sg; |
58 | u64 to_process, leftover; | 58 | u64 to_process, leftover, total; |
59 | u32 max_sg_len; | ||
60 | unsigned long irq_flags; | ||
59 | int rc = 0; | 61 | int rc = 0; |
60 | 62 | ||
61 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 63 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
62 | /* we've hit the nx chip previously and we're updating again, | ||
63 | * so copy over the partial digest */ | ||
64 | memcpy(csbcpb->cpb.sha256.input_partial_digest, | ||
65 | csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); | ||
66 | } | ||
67 | 64 | ||
68 | /* 2 cases for total data len: | 65 | /* 2 cases for total data len: |
69 | * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 | 66 | * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 |
70 | * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover | 67 | * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover |
71 | */ | 68 | */ |
72 | if (len + sctx->count < SHA256_BLOCK_SIZE) { | 69 | total = sctx->count + len; |
70 | if (total < SHA256_BLOCK_SIZE) { | ||
73 | memcpy(sctx->buf + sctx->count, data, len); | 71 | memcpy(sctx->buf + sctx->count, data, len); |
74 | sctx->count += len; | 72 | sctx->count += len; |
75 | goto out; | 73 | goto out; |
76 | } | 74 | } |
77 | 75 | ||
78 | /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this | 76 | in_sg = nx_ctx->in_sg; |
79 | * update */ | 77 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), |
80 | to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1); | 78 | nx_ctx->ap->sglen); |
81 | leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1); | 79 | |
82 | 80 | do { | |
83 | if (sctx->count) { | 81 | /* |
84 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, | 82 | * to_process: the SHA256_BLOCK_SIZE data chunk to process in |
85 | sctx->count, nx_ctx->ap->sglen); | 83 | * this update. This value is also restricted by the sg list |
86 | in_sg = nx_build_sg_list(in_sg, (u8 *)data, | 84 | * limits. |
85 | */ | ||
86 | to_process = min_t(u64, total, nx_ctx->ap->databytelen); | ||
87 | to_process = min_t(u64, to_process, | ||
88 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
89 | to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); | ||
90 | leftover = total - to_process; | ||
91 | |||
92 | if (sctx->count) { | ||
93 | in_sg = nx_build_sg_list(nx_ctx->in_sg, | ||
94 | (u8 *) sctx->buf, | ||
95 | sctx->count, max_sg_len); | ||
96 | } | ||
97 | in_sg = nx_build_sg_list(in_sg, (u8 *) data, | ||
87 | to_process - sctx->count, | 98 | to_process - sctx->count, |
88 | nx_ctx->ap->sglen); | 99 | max_sg_len); |
89 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | 100 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * |
90 | sizeof(struct nx_sg); | 101 | sizeof(struct nx_sg); |
91 | } else { | ||
92 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, | ||
93 | to_process, nx_ctx->ap->sglen); | ||
94 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | ||
95 | sizeof(struct nx_sg); | ||
96 | } | ||
97 | 102 | ||
98 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 103 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { |
104 | /* | ||
105 | * we've hit the nx chip previously and we're updating | ||
106 | * again, so copy over the partial digest. | ||
107 | */ | ||
108 | memcpy(csbcpb->cpb.sha256.input_partial_digest, | ||
109 | csbcpb->cpb.sha256.message_digest, | ||
110 | SHA256_DIGEST_SIZE); | ||
111 | } | ||
99 | 112 | ||
100 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | 113 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
101 | rc = -EINVAL; | 114 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { |
102 | goto out; | 115 | rc = -EINVAL; |
103 | } | 116 | goto out; |
117 | } | ||
104 | 118 | ||
105 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 119 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, |
106 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 120 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
107 | if (rc) | 121 | if (rc) |
108 | goto out; | 122 | goto out; |
109 | 123 | ||
110 | atomic_inc(&(nx_ctx->stats->sha256_ops)); | 124 | atomic_inc(&(nx_ctx->stats->sha256_ops)); |
125 | csbcpb->cpb.sha256.message_bit_length += (u64) | ||
126 | (csbcpb->cpb.sha256.spbc * 8); | ||
127 | |||
128 | /* everything after the first update is continuation */ | ||
129 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
130 | |||
131 | total -= to_process; | ||
132 | data += to_process - sctx->count; | ||
133 | sctx->count = 0; | ||
134 | in_sg = nx_ctx->in_sg; | ||
135 | } while (leftover >= SHA256_BLOCK_SIZE); | ||
111 | 136 | ||
112 | /* copy the leftover back into the state struct */ | 137 | /* copy the leftover back into the state struct */ |
113 | if (leftover) | 138 | if (leftover) |
114 | memcpy(sctx->buf, data + len - leftover, leftover); | 139 | memcpy(sctx->buf, data, leftover); |
115 | sctx->count = leftover; | 140 | sctx->count = leftover; |
116 | |||
117 | csbcpb->cpb.sha256.message_bit_length += (u64) | ||
118 | (csbcpb->cpb.sha256.spbc * 8); | ||
119 | |||
120 | /* everything after the first update is continuation */ | ||
121 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
122 | out: | 141 | out: |
142 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
123 | return rc; | 143 | return rc; |
124 | } | 144 | } |
125 | 145 | ||
@@ -129,8 +149,13 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) | |||
129 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 149 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
130 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 150 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
131 | struct nx_sg *in_sg, *out_sg; | 151 | struct nx_sg *in_sg, *out_sg; |
152 | u32 max_sg_len; | ||
153 | unsigned long irq_flags; | ||
132 | int rc; | 154 | int rc; |
133 | 155 | ||
156 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | ||
157 | |||
158 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); | ||
134 | 159 | ||
135 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 160 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { |
136 | /* we've hit the nx chip previously, now we're finalizing, | 161 | /* we've hit the nx chip previously, now we're finalizing, |
@@ -146,9 +171,9 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) | |||
146 | csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); | 171 | csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); |
147 | 172 | ||
148 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, | 173 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, |
149 | sctx->count, nx_ctx->ap->sglen); | 174 | sctx->count, max_sg_len); |
150 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, | 175 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, |
151 | nx_ctx->ap->sglen); | 176 | max_sg_len); |
152 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); | 177 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); |
153 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | 178 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |
154 | 179 | ||
@@ -168,6 +193,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) | |||
168 | &(nx_ctx->stats->sha256_bytes)); | 193 | &(nx_ctx->stats->sha256_bytes)); |
169 | memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); | 194 | memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); |
170 | out: | 195 | out: |
196 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
171 | return rc; | 197 | return rc; |
172 | } | 198 | } |
173 | 199 | ||
@@ -177,6 +203,9 @@ static int nx_sha256_export(struct shash_desc *desc, void *out) | |||
177 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 203 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
178 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 204 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
179 | struct sha256_state *octx = out; | 205 | struct sha256_state *octx = out; |
206 | unsigned long irq_flags; | ||
207 | |||
208 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | ||
180 | 209 | ||
181 | octx->count = sctx->count + | 210 | octx->count = sctx->count + |
182 | (csbcpb->cpb.sha256.message_bit_length / 8); | 211 | (csbcpb->cpb.sha256.message_bit_length / 8); |
@@ -199,6 +228,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out) | |||
199 | octx->state[7] = SHA256_H7; | 228 | octx->state[7] = SHA256_H7; |
200 | } | 229 | } |
201 | 230 | ||
231 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
202 | return 0; | 232 | return 0; |
203 | } | 233 | } |
204 | 234 | ||
@@ -208,6 +238,9 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in) | |||
208 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 238 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
209 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 239 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
210 | const struct sha256_state *ictx = in; | 240 | const struct sha256_state *ictx = in; |
241 | unsigned long irq_flags; | ||
242 | |||
243 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | ||
211 | 244 | ||
212 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | 245 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); |
213 | 246 | ||
@@ -222,6 +255,7 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in) | |||
222 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 255 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
223 | } | 256 | } |
224 | 257 | ||
258 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
225 | return 0; | 259 | return 0; |
226 | } | 260 | } |
227 | 261 | ||
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 08eee1122349..4ae5b0f221d5 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c | |||
@@ -55,73 +55,93 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
55 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 55 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
56 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 56 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
57 | struct nx_sg *in_sg; | 57 | struct nx_sg *in_sg; |
58 | u64 to_process, leftover, spbc_bits; | 58 | u64 to_process, leftover, total, spbc_bits; |
59 | u32 max_sg_len; | ||
60 | unsigned long irq_flags; | ||
59 | int rc = 0; | 61 | int rc = 0; |
60 | 62 | ||
61 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 63 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
62 | /* we've hit the nx chip previously and we're updating again, | ||
63 | * so copy over the partial digest */ | ||
64 | memcpy(csbcpb->cpb.sha512.input_partial_digest, | ||
65 | csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); | ||
66 | } | ||
67 | 64 | ||
68 | /* 2 cases for total data len: | 65 | /* 2 cases for total data len: |
69 | * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 | 66 | * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 |
70 | * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover | 67 | * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover |
71 | */ | 68 | */ |
72 | if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { | 69 | total = sctx->count[0] + len; |
70 | if (total < SHA512_BLOCK_SIZE) { | ||
73 | memcpy(sctx->buf + sctx->count[0], data, len); | 71 | memcpy(sctx->buf + sctx->count[0], data, len); |
74 | sctx->count[0] += len; | 72 | sctx->count[0] += len; |
75 | goto out; | 73 | goto out; |
76 | } | 74 | } |
77 | 75 | ||
78 | /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this | 76 | in_sg = nx_ctx->in_sg; |
79 | * update */ | 77 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), |
80 | to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1); | 78 | nx_ctx->ap->sglen); |
81 | leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1); | 79 | |
82 | 80 | do { | |
83 | if (sctx->count[0]) { | 81 | /* |
84 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, | 82 | * to_process: the SHA512_BLOCK_SIZE data chunk to process in |
85 | sctx->count[0], nx_ctx->ap->sglen); | 83 | * this update. This value is also restricted by the sg list |
86 | in_sg = nx_build_sg_list(in_sg, (u8 *)data, | 84 | * limits. |
85 | */ | ||
86 | to_process = min_t(u64, total, nx_ctx->ap->databytelen); | ||
87 | to_process = min_t(u64, to_process, | ||
88 | NX_PAGE_SIZE * (max_sg_len - 1)); | ||
89 | to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); | ||
90 | leftover = total - to_process; | ||
91 | |||
92 | if (sctx->count[0]) { | ||
93 | in_sg = nx_build_sg_list(nx_ctx->in_sg, | ||
94 | (u8 *) sctx->buf, | ||
95 | sctx->count[0], max_sg_len); | ||
96 | } | ||
97 | in_sg = nx_build_sg_list(in_sg, (u8 *) data, | ||
87 | to_process - sctx->count[0], | 98 | to_process - sctx->count[0], |
88 | nx_ctx->ap->sglen); | 99 | max_sg_len); |
89 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | ||
90 | sizeof(struct nx_sg); | ||
91 | } else { | ||
92 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, | ||
93 | to_process, nx_ctx->ap->sglen); | ||
94 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | 100 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * |
95 | sizeof(struct nx_sg); | 101 | sizeof(struct nx_sg); |
96 | } | ||
97 | 102 | ||
98 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 103 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { |
104 | /* | ||
105 | * we've hit the nx chip previously and we're updating | ||
106 | * again, so copy over the partial digest. | ||
107 | */ | ||
108 | memcpy(csbcpb->cpb.sha512.input_partial_digest, | ||
109 | csbcpb->cpb.sha512.message_digest, | ||
110 | SHA512_DIGEST_SIZE); | ||
111 | } | ||
99 | 112 | ||
100 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | 113 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
101 | rc = -EINVAL; | 114 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { |
102 | goto out; | 115 | rc = -EINVAL; |
103 | } | 116 | goto out; |
104 | 117 | } | |
105 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | 118 | |
106 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | 119 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, |
107 | if (rc) | 120 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
108 | goto out; | 121 | if (rc) |
122 | goto out; | ||
123 | |||
124 | atomic_inc(&(nx_ctx->stats->sha512_ops)); | ||
125 | spbc_bits = csbcpb->cpb.sha512.spbc * 8; | ||
126 | csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits; | ||
127 | if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits) | ||
128 | csbcpb->cpb.sha512.message_bit_length_hi++; | ||
129 | |||
130 | /* everything after the first update is continuation */ | ||
131 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
109 | 132 | ||
110 | atomic_inc(&(nx_ctx->stats->sha512_ops)); | 133 | total -= to_process; |
134 | data += to_process - sctx->count[0]; | ||
135 | sctx->count[0] = 0; | ||
136 | in_sg = nx_ctx->in_sg; | ||
137 | } while (leftover >= SHA512_BLOCK_SIZE); | ||
111 | 138 | ||
112 | /* copy the leftover back into the state struct */ | 139 | /* copy the leftover back into the state struct */ |
113 | if (leftover) | 140 | if (leftover) |
114 | memcpy(sctx->buf, data + len - leftover, leftover); | 141 | memcpy(sctx->buf, data, leftover); |
115 | sctx->count[0] = leftover; | 142 | sctx->count[0] = leftover; |
116 | |||
117 | spbc_bits = csbcpb->cpb.sha512.spbc * 8; | ||
118 | csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits; | ||
119 | if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits) | ||
120 | csbcpb->cpb.sha512.message_bit_length_hi++; | ||
121 | |||
122 | /* everything after the first update is continuation */ | ||
123 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
124 | out: | 143 | out: |
144 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
125 | return rc; | 145 | return rc; |
126 | } | 146 | } |
127 | 147 | ||
@@ -131,9 +151,15 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) | |||
131 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 151 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
132 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 152 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
133 | struct nx_sg *in_sg, *out_sg; | 153 | struct nx_sg *in_sg, *out_sg; |
154 | u32 max_sg_len; | ||
134 | u64 count0; | 155 | u64 count0; |
156 | unsigned long irq_flags; | ||
135 | int rc; | 157 | int rc; |
136 | 158 | ||
159 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | ||
160 | |||
161 | max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); | ||
162 | |||
137 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | 163 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { |
138 | /* we've hit the nx chip previously, now we're finalizing, | 164 | /* we've hit the nx chip previously, now we're finalizing, |
139 | * so copy over the partial digest */ | 165 | * so copy over the partial digest */ |
@@ -152,9 +178,9 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) | |||
152 | csbcpb->cpb.sha512.message_bit_length_hi++; | 178 | csbcpb->cpb.sha512.message_bit_length_hi++; |
153 | 179 | ||
154 | in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], | 180 | in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], |
155 | nx_ctx->ap->sglen); | 181 | max_sg_len); |
156 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, | 182 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, |
157 | nx_ctx->ap->sglen); | 183 | max_sg_len); |
158 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); | 184 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); |
159 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | 185 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |
160 | 186 | ||
@@ -174,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) | |||
174 | 200 | ||
175 | memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); | 201 | memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); |
176 | out: | 202 | out: |
203 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
177 | return rc; | 204 | return rc; |
178 | } | 205 | } |
179 | 206 | ||
@@ -183,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out) | |||
183 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 210 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
184 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 211 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
185 | struct sha512_state *octx = out; | 212 | struct sha512_state *octx = out; |
213 | unsigned long irq_flags; | ||
214 | |||
215 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | ||
186 | 216 | ||
187 | /* move message_bit_length (128 bits) into count and convert its value | 217 | /* move message_bit_length (128 bits) into count and convert its value |
188 | * to bytes */ | 218 | * to bytes */ |
@@ -214,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out) | |||
214 | octx->state[7] = SHA512_H7; | 244 | octx->state[7] = SHA512_H7; |
215 | } | 245 | } |
216 | 246 | ||
247 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
217 | return 0; | 248 | return 0; |
218 | } | 249 | } |
219 | 250 | ||
@@ -223,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in) | |||
223 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 254 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
224 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 255 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
225 | const struct sha512_state *ictx = in; | 256 | const struct sha512_state *ictx = in; |
257 | unsigned long irq_flags; | ||
258 | |||
259 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | ||
226 | 260 | ||
227 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | 261 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); |
228 | sctx->count[0] = ictx->count[0] & 0x3f; | 262 | sctx->count[0] = ictx->count[0] & 0x3f; |
@@ -240,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in) | |||
240 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 274 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
241 | } | 275 | } |
242 | 276 | ||
277 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | ||
243 | return 0; | 278 | return 0; |
244 | } | 279 | } |
245 | 280 | ||
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index bbdab6e5ccf0..5533fe31c90d 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
@@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, | |||
61 | 61 | ||
62 | do { | 62 | do { |
63 | rc = vio_h_cop_sync(viodev, op); | 63 | rc = vio_h_cop_sync(viodev, op); |
64 | } while ((rc == -EBUSY && !may_sleep && retries--) || | 64 | } while (rc == -EBUSY && !may_sleep && retries--); |
65 | (rc == -EBUSY && may_sleep && cond_resched())); | ||
66 | 65 | ||
67 | if (rc) { | 66 | if (rc) { |
68 | dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " | 67 | dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " |
@@ -114,13 +113,29 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, | |||
114 | * have been described (or @sgmax elements have been written), the | 113 | * have been described (or @sgmax elements have been written), the |
115 | * loop ends. min_t is used to ensure @end_addr falls on the same page | 114 | * loop ends. min_t is used to ensure @end_addr falls on the same page |
116 | * as sg_addr, if not, we need to create another nx_sg element for the | 115 | * as sg_addr, if not, we need to create another nx_sg element for the |
117 | * data on the next page */ | 116 | * data on the next page. |
117 | * | ||
118 | * Also when using vmalloc'ed data, every time that a system page | ||
119 | * boundary is crossed the physical address needs to be re-calculated. | ||
120 | */ | ||
118 | for (sg = sg_head; sg_len < len; sg++) { | 121 | for (sg = sg_head; sg_len < len; sg++) { |
122 | u64 next_page; | ||
123 | |||
119 | sg->addr = sg_addr; | 124 | sg->addr = sg_addr; |
120 | sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr); | 125 | sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), |
121 | sg->len = sg_addr - sg->addr; | 126 | end_addr); |
127 | |||
128 | next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; | ||
129 | sg->len = min_t(u64, sg_addr, next_page) - sg->addr; | ||
122 | sg_len += sg->len; | 130 | sg_len += sg->len; |
123 | 131 | ||
132 | if (sg_addr >= next_page && | ||
133 | is_vmalloc_addr(start_addr + sg_len)) { | ||
134 | sg_addr = page_to_phys(vmalloc_to_page( | ||
135 | start_addr + sg_len)); | ||
136 | end_addr = sg_addr + len - sg_len; | ||
137 | } | ||
138 | |||
124 | if ((sg - sg_head) == sgmax) { | 139 | if ((sg - sg_head) == sgmax) { |
125 | pr_err("nx: scatter/gather list overflow, pid: %d\n", | 140 | pr_err("nx: scatter/gather list overflow, pid: %d\n", |
126 | current->pid); | 141 | current->pid); |
@@ -196,6 +211,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, | |||
196 | * @dst: destination scatterlist | 211 | * @dst: destination scatterlist |
197 | * @src: source scatterlist | 212 | * @src: source scatterlist |
198 | * @nbytes: length of data described in the scatterlists | 213 | * @nbytes: length of data described in the scatterlists |
214 | * @offset: number of bytes to fast-forward past at the beginning of | ||
215 | * scatterlists. | ||
199 | * @iv: destination for the iv data, if the algorithm requires it | 216 | * @iv: destination for the iv data, if the algorithm requires it |
200 | * | 217 | * |
201 | * This is common code shared by all the AES algorithms. It uses the block | 218 | * This is common code shared by all the AES algorithms. It uses the block |
@@ -207,6 +224,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, | |||
207 | struct scatterlist *dst, | 224 | struct scatterlist *dst, |
208 | struct scatterlist *src, | 225 | struct scatterlist *src, |
209 | unsigned int nbytes, | 226 | unsigned int nbytes, |
227 | unsigned int offset, | ||
210 | u8 *iv) | 228 | u8 *iv) |
211 | { | 229 | { |
212 | struct nx_sg *nx_insg = nx_ctx->in_sg; | 230 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
@@ -215,8 +233,10 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, | |||
215 | if (iv) | 233 | if (iv) |
216 | memcpy(iv, desc->info, AES_BLOCK_SIZE); | 234 | memcpy(iv, desc->info, AES_BLOCK_SIZE); |
217 | 235 | ||
218 | nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); | 236 | nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, |
219 | nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); | 237 | offset, nbytes); |
238 | nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, | ||
239 | offset, nbytes); | ||
220 | 240 | ||
221 | /* these lengths should be negative, which will indicate to phyp that | 241 | /* these lengths should be negative, which will indicate to phyp that |
222 | * the input and output parameters are scatterlists, not linear | 242 | * the input and output parameters are scatterlists, not linear |
@@ -235,6 +255,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, | |||
235 | */ | 255 | */ |
236 | void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) | 256 | void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) |
237 | { | 257 | { |
258 | spin_lock_init(&nx_ctx->lock); | ||
238 | memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); | 259 | memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); |
239 | nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; | 260 | nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; |
240 | 261 | ||
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 3232b182dd28..befda07ca1da 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h | |||
@@ -117,6 +117,7 @@ struct nx_ctr_priv { | |||
117 | }; | 117 | }; |
118 | 118 | ||
119 | struct nx_crypto_ctx { | 119 | struct nx_crypto_ctx { |
120 | spinlock_t lock; /* synchronize access to the context */ | ||
120 | void *kmem; /* unaligned, kmalloc'd buffer */ | 121 | void *kmem; /* unaligned, kmalloc'd buffer */ |
121 | size_t kmem_len; /* length of kmem */ | 122 | size_t kmem_len; /* length of kmem */ |
122 | struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */ | 123 | struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */ |
@@ -155,7 +156,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, | |||
155 | struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32); | 156 | struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32); |
156 | int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, | 157 | int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, |
157 | struct scatterlist *, struct scatterlist *, unsigned int, | 158 | struct scatterlist *, struct scatterlist *, unsigned int, |
158 | u8 *); | 159 | unsigned int, u8 *); |
159 | struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, | 160 | struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, |
160 | struct scatterlist *, unsigned int, | 161 | struct scatterlist *, unsigned int, |
161 | unsigned int); | 162 | unsigned int); |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 5f7980586850..ce791c2f81f7 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -13,7 +13,9 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define pr_fmt(fmt) "%s: " fmt, __func__ | 16 | #define pr_fmt(fmt) "%20s: " fmt, __func__ |
17 | #define prn(num) pr_debug(#num "=%d\n", num) | ||
18 | #define prx(num) pr_debug(#num "=%x\n", num) | ||
17 | 19 | ||
18 | #include <linux/err.h> | 20 | #include <linux/err.h> |
19 | #include <linux/module.h> | 21 | #include <linux/module.h> |
@@ -38,6 +40,8 @@ | |||
38 | #define DST_MAXBURST 4 | 40 | #define DST_MAXBURST 4 |
39 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) | 41 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) |
40 | 42 | ||
43 | #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) | ||
44 | |||
41 | /* OMAP TRM gives bitfields as start:end, where start is the higher bit | 45 | /* OMAP TRM gives bitfields as start:end, where start is the higher bit |
42 | number. For example 7:0 */ | 46 | number. For example 7:0 */ |
43 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) | 47 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) |
@@ -74,6 +78,10 @@ | |||
74 | 78 | ||
75 | #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) | 79 | #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) |
76 | 80 | ||
81 | #define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) | ||
82 | #define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) | ||
83 | #define AES_REG_IRQ_DATA_IN BIT(1) | ||
84 | #define AES_REG_IRQ_DATA_OUT BIT(2) | ||
77 | #define DEFAULT_TIMEOUT (5*HZ) | 85 | #define DEFAULT_TIMEOUT (5*HZ) |
78 | 86 | ||
79 | #define FLAGS_MODE_MASK 0x000f | 87 | #define FLAGS_MODE_MASK 0x000f |
@@ -86,6 +94,8 @@ | |||
86 | #define FLAGS_FAST BIT(5) | 94 | #define FLAGS_FAST BIT(5) |
87 | #define FLAGS_BUSY BIT(6) | 95 | #define FLAGS_BUSY BIT(6) |
88 | 96 | ||
97 | #define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2) | ||
98 | |||
89 | struct omap_aes_ctx { | 99 | struct omap_aes_ctx { |
90 | struct omap_aes_dev *dd; | 100 | struct omap_aes_dev *dd; |
91 | 101 | ||
@@ -119,6 +129,8 @@ struct omap_aes_pdata { | |||
119 | u32 data_ofs; | 129 | u32 data_ofs; |
120 | u32 rev_ofs; | 130 | u32 rev_ofs; |
121 | u32 mask_ofs; | 131 | u32 mask_ofs; |
132 | u32 irq_enable_ofs; | ||
133 | u32 irq_status_ofs; | ||
122 | 134 | ||
123 | u32 dma_enable_in; | 135 | u32 dma_enable_in; |
124 | u32 dma_enable_out; | 136 | u32 dma_enable_out; |
@@ -146,25 +158,32 @@ struct omap_aes_dev { | |||
146 | struct tasklet_struct queue_task; | 158 | struct tasklet_struct queue_task; |
147 | 159 | ||
148 | struct ablkcipher_request *req; | 160 | struct ablkcipher_request *req; |
161 | |||
162 | /* | ||
163 | * total is used by PIO mode for book keeping so introduce | ||
164 | * variable total_save as need it to calc page_order | ||
165 | */ | ||
149 | size_t total; | 166 | size_t total; |
167 | size_t total_save; | ||
168 | |||
150 | struct scatterlist *in_sg; | 169 | struct scatterlist *in_sg; |
151 | struct scatterlist in_sgl; | ||
152 | size_t in_offset; | ||
153 | struct scatterlist *out_sg; | 170 | struct scatterlist *out_sg; |
171 | |||
172 | /* Buffers for copying for unaligned cases */ | ||
173 | struct scatterlist in_sgl; | ||
154 | struct scatterlist out_sgl; | 174 | struct scatterlist out_sgl; |
155 | size_t out_offset; | 175 | struct scatterlist *orig_out; |
176 | int sgs_copied; | ||
156 | 177 | ||
157 | size_t buflen; | 178 | struct scatter_walk in_walk; |
158 | void *buf_in; | 179 | struct scatter_walk out_walk; |
159 | size_t dma_size; | ||
160 | int dma_in; | 180 | int dma_in; |
161 | struct dma_chan *dma_lch_in; | 181 | struct dma_chan *dma_lch_in; |
162 | dma_addr_t dma_addr_in; | ||
163 | void *buf_out; | ||
164 | int dma_out; | 182 | int dma_out; |
165 | struct dma_chan *dma_lch_out; | 183 | struct dma_chan *dma_lch_out; |
166 | dma_addr_t dma_addr_out; | 184 | int in_sg_len; |
167 | 185 | int out_sg_len; | |
186 | int pio_only; | ||
168 | const struct omap_aes_pdata *pdata; | 187 | const struct omap_aes_pdata *pdata; |
169 | }; | 188 | }; |
170 | 189 | ||
@@ -172,16 +191,36 @@ struct omap_aes_dev { | |||
172 | static LIST_HEAD(dev_list); | 191 | static LIST_HEAD(dev_list); |
173 | static DEFINE_SPINLOCK(list_lock); | 192 | static DEFINE_SPINLOCK(list_lock); |
174 | 193 | ||
194 | #ifdef DEBUG | ||
195 | #define omap_aes_read(dd, offset) \ | ||
196 | ({ \ | ||
197 | int _read_ret; \ | ||
198 | _read_ret = __raw_readl(dd->io_base + offset); \ | ||
199 | pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \ | ||
200 | offset, _read_ret); \ | ||
201 | _read_ret; \ | ||
202 | }) | ||
203 | #else | ||
175 | static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) | 204 | static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) |
176 | { | 205 | { |
177 | return __raw_readl(dd->io_base + offset); | 206 | return __raw_readl(dd->io_base + offset); |
178 | } | 207 | } |
208 | #endif | ||
179 | 209 | ||
210 | #ifdef DEBUG | ||
211 | #define omap_aes_write(dd, offset, value) \ | ||
212 | do { \ | ||
213 | pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \ | ||
214 | offset, value); \ | ||
215 | __raw_writel(value, dd->io_base + offset); \ | ||
216 | } while (0) | ||
217 | #else | ||
180 | static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, | 218 | static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, |
181 | u32 value) | 219 | u32 value) |
182 | { | 220 | { |
183 | __raw_writel(value, dd->io_base + offset); | 221 | __raw_writel(value, dd->io_base + offset); |
184 | } | 222 | } |
223 | #endif | ||
185 | 224 | ||
186 | static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, | 225 | static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, |
187 | u32 value, u32 mask) | 226 | u32 value, u32 mask) |
@@ -323,33 +362,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) | |||
323 | dd->dma_lch_out = NULL; | 362 | dd->dma_lch_out = NULL; |
324 | dd->dma_lch_in = NULL; | 363 | dd->dma_lch_in = NULL; |
325 | 364 | ||
326 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | ||
327 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | ||
328 | dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; | ||
329 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | ||
330 | |||
331 | if (!dd->buf_in || !dd->buf_out) { | ||
332 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
333 | goto err_alloc; | ||
334 | } | ||
335 | |||
336 | /* MAP here */ | ||
337 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, | ||
338 | DMA_TO_DEVICE); | ||
339 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
340 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
341 | err = -EINVAL; | ||
342 | goto err_map_in; | ||
343 | } | ||
344 | |||
345 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, | ||
346 | DMA_FROM_DEVICE); | ||
347 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | ||
348 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
349 | err = -EINVAL; | ||
350 | goto err_map_out; | ||
351 | } | ||
352 | |||
353 | dma_cap_zero(mask); | 365 | dma_cap_zero(mask); |
354 | dma_cap_set(DMA_SLAVE, mask); | 366 | dma_cap_set(DMA_SLAVE, mask); |
355 | 367 | ||
@@ -376,14 +388,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) | |||
376 | err_dma_out: | 388 | err_dma_out: |
377 | dma_release_channel(dd->dma_lch_in); | 389 | dma_release_channel(dd->dma_lch_in); |
378 | err_dma_in: | 390 | err_dma_in: |
379 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
380 | DMA_FROM_DEVICE); | ||
381 | err_map_out: | ||
382 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | ||
383 | err_map_in: | ||
384 | free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); | ||
385 | free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); | ||
386 | err_alloc: | ||
387 | if (err) | 391 | if (err) |
388 | pr_err("error: %d\n", err); | 392 | pr_err("error: %d\n", err); |
389 | return err; | 393 | return err; |
@@ -393,11 +397,6 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) | |||
393 | { | 397 | { |
394 | dma_release_channel(dd->dma_lch_out); | 398 | dma_release_channel(dd->dma_lch_out); |
395 | dma_release_channel(dd->dma_lch_in); | 399 | dma_release_channel(dd->dma_lch_in); |
396 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
397 | DMA_FROM_DEVICE); | ||
398 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | ||
399 | free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); | ||
400 | free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); | ||
401 | } | 400 | } |
402 | 401 | ||
403 | static void sg_copy_buf(void *buf, struct scatterlist *sg, | 402 | static void sg_copy_buf(void *buf, struct scatterlist *sg, |
@@ -414,59 +413,27 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg, | |||
414 | scatterwalk_done(&walk, out, 0); | 413 | scatterwalk_done(&walk, out, 0); |
415 | } | 414 | } |
416 | 415 | ||
417 | static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, | ||
418 | size_t buflen, size_t total, int out) | ||
419 | { | ||
420 | unsigned int count, off = 0; | ||
421 | |||
422 | while (buflen && total) { | ||
423 | count = min((*sg)->length - *offset, total); | ||
424 | count = min(count, buflen); | ||
425 | |||
426 | if (!count) | ||
427 | return off; | ||
428 | |||
429 | /* | ||
430 | * buflen and total are AES_BLOCK_SIZE size aligned, | ||
431 | * so count should be also aligned | ||
432 | */ | ||
433 | |||
434 | sg_copy_buf(buf + off, *sg, *offset, count, out); | ||
435 | |||
436 | off += count; | ||
437 | buflen -= count; | ||
438 | *offset += count; | ||
439 | total -= count; | ||
440 | |||
441 | if (*offset == (*sg)->length) { | ||
442 | *sg = sg_next(*sg); | ||
443 | if (*sg) | ||
444 | *offset = 0; | ||
445 | else | ||
446 | total = 0; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | return off; | ||
451 | } | ||
452 | |||
453 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, | 416 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, |
454 | struct scatterlist *in_sg, struct scatterlist *out_sg) | 417 | struct scatterlist *in_sg, struct scatterlist *out_sg, |
418 | int in_sg_len, int out_sg_len) | ||
455 | { | 419 | { |
456 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 420 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
457 | struct omap_aes_dev *dd = ctx->dd; | 421 | struct omap_aes_dev *dd = ctx->dd; |
458 | struct dma_async_tx_descriptor *tx_in, *tx_out; | 422 | struct dma_async_tx_descriptor *tx_in, *tx_out; |
459 | struct dma_slave_config cfg; | 423 | struct dma_slave_config cfg; |
460 | dma_addr_t dma_addr_in = sg_dma_address(in_sg); | 424 | int ret; |
461 | int ret, length = sg_dma_len(in_sg); | ||
462 | 425 | ||
463 | pr_debug("len: %d\n", length); | 426 | if (dd->pio_only) { |
427 | scatterwalk_start(&dd->in_walk, dd->in_sg); | ||
428 | scatterwalk_start(&dd->out_walk, dd->out_sg); | ||
464 | 429 | ||
465 | dd->dma_size = length; | 430 | /* Enable DATAIN interrupt and let it take |
431 | care of the rest */ | ||
432 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); | ||
433 | return 0; | ||
434 | } | ||
466 | 435 | ||
467 | if (!(dd->flags & FLAGS_FAST)) | 436 | dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); |
468 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | ||
469 | DMA_TO_DEVICE); | ||
470 | 437 | ||
471 | memset(&cfg, 0, sizeof(cfg)); | 438 | memset(&cfg, 0, sizeof(cfg)); |
472 | 439 | ||
@@ -485,7 +452,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, | |||
485 | return ret; | 452 | return ret; |
486 | } | 453 | } |
487 | 454 | ||
488 | tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1, | 455 | tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, |
489 | DMA_MEM_TO_DEV, | 456 | DMA_MEM_TO_DEV, |
490 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 457 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
491 | if (!tx_in) { | 458 | if (!tx_in) { |
@@ -504,7 +471,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, | |||
504 | return ret; | 471 | return ret; |
505 | } | 472 | } |
506 | 473 | ||
507 | tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1, | 474 | tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, |
508 | DMA_DEV_TO_MEM, | 475 | DMA_DEV_TO_MEM, |
509 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 476 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
510 | if (!tx_out) { | 477 | if (!tx_out) { |
@@ -522,7 +489,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, | |||
522 | dma_async_issue_pending(dd->dma_lch_out); | 489 | dma_async_issue_pending(dd->dma_lch_out); |
523 | 490 | ||
524 | /* start DMA */ | 491 | /* start DMA */ |
525 | dd->pdata->trigger(dd, length); | 492 | dd->pdata->trigger(dd, dd->total); |
526 | 493 | ||
527 | return 0; | 494 | return 0; |
528 | } | 495 | } |
@@ -531,93 +498,32 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | |||
531 | { | 498 | { |
532 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( | 499 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( |
533 | crypto_ablkcipher_reqtfm(dd->req)); | 500 | crypto_ablkcipher_reqtfm(dd->req)); |
534 | int err, fast = 0, in, out; | 501 | int err; |
535 | size_t count; | ||
536 | dma_addr_t addr_in, addr_out; | ||
537 | struct scatterlist *in_sg, *out_sg; | ||
538 | int len32; | ||
539 | 502 | ||
540 | pr_debug("total: %d\n", dd->total); | 503 | pr_debug("total: %d\n", dd->total); |
541 | 504 | ||
542 | if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { | 505 | if (!dd->pio_only) { |
543 | /* check for alignment */ | 506 | err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, |
544 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); | 507 | DMA_TO_DEVICE); |
545 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); | ||
546 | |||
547 | fast = in && out; | ||
548 | } | ||
549 | |||
550 | if (fast) { | ||
551 | count = min(dd->total, sg_dma_len(dd->in_sg)); | ||
552 | count = min(count, sg_dma_len(dd->out_sg)); | ||
553 | |||
554 | if (count != dd->total) { | ||
555 | pr_err("request length != buffer length\n"); | ||
556 | return -EINVAL; | ||
557 | } | ||
558 | |||
559 | pr_debug("fast\n"); | ||
560 | |||
561 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
562 | if (!err) { | 508 | if (!err) { |
563 | dev_err(dd->dev, "dma_map_sg() error\n"); | 509 | dev_err(dd->dev, "dma_map_sg() error\n"); |
564 | return -EINVAL; | 510 | return -EINVAL; |
565 | } | 511 | } |
566 | 512 | ||
567 | err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | 513 | err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, |
514 | DMA_FROM_DEVICE); | ||
568 | if (!err) { | 515 | if (!err) { |
569 | dev_err(dd->dev, "dma_map_sg() error\n"); | 516 | dev_err(dd->dev, "dma_map_sg() error\n"); |
570 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
571 | return -EINVAL; | 517 | return -EINVAL; |
572 | } | 518 | } |
573 | |||
574 | addr_in = sg_dma_address(dd->in_sg); | ||
575 | addr_out = sg_dma_address(dd->out_sg); | ||
576 | |||
577 | in_sg = dd->in_sg; | ||
578 | out_sg = dd->out_sg; | ||
579 | |||
580 | dd->flags |= FLAGS_FAST; | ||
581 | |||
582 | } else { | ||
583 | /* use cache buffers */ | ||
584 | count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, | ||
585 | dd->buflen, dd->total, 0); | ||
586 | |||
587 | len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN; | ||
588 | |||
589 | /* | ||
590 | * The data going into the AES module has been copied | ||
591 | * to a local buffer and the data coming out will go | ||
592 | * into a local buffer so set up local SG entries for | ||
593 | * both. | ||
594 | */ | ||
595 | sg_init_table(&dd->in_sgl, 1); | ||
596 | dd->in_sgl.offset = dd->in_offset; | ||
597 | sg_dma_len(&dd->in_sgl) = len32; | ||
598 | sg_dma_address(&dd->in_sgl) = dd->dma_addr_in; | ||
599 | |||
600 | sg_init_table(&dd->out_sgl, 1); | ||
601 | dd->out_sgl.offset = dd->out_offset; | ||
602 | sg_dma_len(&dd->out_sgl) = len32; | ||
603 | sg_dma_address(&dd->out_sgl) = dd->dma_addr_out; | ||
604 | |||
605 | in_sg = &dd->in_sgl; | ||
606 | out_sg = &dd->out_sgl; | ||
607 | |||
608 | addr_in = dd->dma_addr_in; | ||
609 | addr_out = dd->dma_addr_out; | ||
610 | |||
611 | dd->flags &= ~FLAGS_FAST; | ||
612 | |||
613 | } | 519 | } |
614 | 520 | ||
615 | dd->total -= count; | 521 | err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, |
616 | 522 | dd->out_sg_len); | |
617 | err = omap_aes_crypt_dma(tfm, in_sg, out_sg); | 523 | if (err && !dd->pio_only) { |
618 | if (err) { | 524 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); |
619 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | 525 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, |
620 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | 526 | DMA_FROM_DEVICE); |
621 | } | 527 | } |
622 | 528 | ||
623 | return err; | 529 | return err; |
@@ -637,7 +543,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | |||
637 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | 543 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) |
638 | { | 544 | { |
639 | int err = 0; | 545 | int err = 0; |
640 | size_t count; | ||
641 | 546 | ||
642 | pr_debug("total: %d\n", dd->total); | 547 | pr_debug("total: %d\n", dd->total); |
643 | 548 | ||
@@ -646,23 +551,49 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
646 | dmaengine_terminate_all(dd->dma_lch_in); | 551 | dmaengine_terminate_all(dd->dma_lch_in); |
647 | dmaengine_terminate_all(dd->dma_lch_out); | 552 | dmaengine_terminate_all(dd->dma_lch_out); |
648 | 553 | ||
649 | if (dd->flags & FLAGS_FAST) { | 554 | return err; |
650 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | 555 | } |
651 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | 556 | |
652 | } else { | 557 | int omap_aes_check_aligned(struct scatterlist *sg) |
653 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | 558 | { |
654 | dd->dma_size, DMA_FROM_DEVICE); | 559 | while (sg) { |
655 | 560 | if (!IS_ALIGNED(sg->offset, 4)) | |
656 | /* copy data */ | 561 | return -1; |
657 | count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, | 562 | if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) |
658 | dd->buflen, dd->dma_size, 1); | 563 | return -1; |
659 | if (count != dd->dma_size) { | 564 | sg = sg_next(sg); |
660 | err = -EINVAL; | ||
661 | pr_err("not all data converted: %u\n", count); | ||
662 | } | ||
663 | } | 565 | } |
566 | return 0; | ||
567 | } | ||
664 | 568 | ||
665 | return err; | 569 | int omap_aes_copy_sgs(struct omap_aes_dev *dd) |
570 | { | ||
571 | void *buf_in, *buf_out; | ||
572 | int pages; | ||
573 | |||
574 | pages = get_order(dd->total); | ||
575 | |||
576 | buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
577 | buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
578 | |||
579 | if (!buf_in || !buf_out) { | ||
580 | pr_err("Couldn't allocated pages for unaligned cases.\n"); | ||
581 | return -1; | ||
582 | } | ||
583 | |||
584 | dd->orig_out = dd->out_sg; | ||
585 | |||
586 | sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); | ||
587 | |||
588 | sg_init_table(&dd->in_sgl, 1); | ||
589 | sg_set_buf(&dd->in_sgl, buf_in, dd->total); | ||
590 | dd->in_sg = &dd->in_sgl; | ||
591 | |||
592 | sg_init_table(&dd->out_sgl, 1); | ||
593 | sg_set_buf(&dd->out_sgl, buf_out, dd->total); | ||
594 | dd->out_sg = &dd->out_sgl; | ||
595 | |||
596 | return 0; | ||
666 | } | 597 | } |
667 | 598 | ||
668 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, | 599 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, |
@@ -698,11 +629,23 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, | |||
698 | /* assign new request to device */ | 629 | /* assign new request to device */ |
699 | dd->req = req; | 630 | dd->req = req; |
700 | dd->total = req->nbytes; | 631 | dd->total = req->nbytes; |
701 | dd->in_offset = 0; | 632 | dd->total_save = req->nbytes; |
702 | dd->in_sg = req->src; | 633 | dd->in_sg = req->src; |
703 | dd->out_offset = 0; | ||
704 | dd->out_sg = req->dst; | 634 | dd->out_sg = req->dst; |
705 | 635 | ||
636 | if (omap_aes_check_aligned(dd->in_sg) || | ||
637 | omap_aes_check_aligned(dd->out_sg)) { | ||
638 | if (omap_aes_copy_sgs(dd)) | ||
639 | pr_err("Failed to copy SGs for unaligned cases\n"); | ||
640 | dd->sgs_copied = 1; | ||
641 | } else { | ||
642 | dd->sgs_copied = 0; | ||
643 | } | ||
644 | |||
645 | dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); | ||
646 | dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); | ||
647 | BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); | ||
648 | |||
706 | rctx = ablkcipher_request_ctx(req); | 649 | rctx = ablkcipher_request_ctx(req); |
707 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | 650 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); |
708 | rctx->mode &= FLAGS_MODE_MASK; | 651 | rctx->mode &= FLAGS_MODE_MASK; |
@@ -726,21 +669,32 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, | |||
726 | static void omap_aes_done_task(unsigned long data) | 669 | static void omap_aes_done_task(unsigned long data) |
727 | { | 670 | { |
728 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | 671 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; |
729 | int err; | 672 | void *buf_in, *buf_out; |
730 | 673 | int pages; | |
731 | pr_debug("enter\n"); | 674 | |
675 | pr_debug("enter done_task\n"); | ||
676 | |||
677 | if (!dd->pio_only) { | ||
678 | dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, | ||
679 | DMA_FROM_DEVICE); | ||
680 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); | ||
681 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, | ||
682 | DMA_FROM_DEVICE); | ||
683 | omap_aes_crypt_dma_stop(dd); | ||
684 | } | ||
732 | 685 | ||
733 | err = omap_aes_crypt_dma_stop(dd); | 686 | if (dd->sgs_copied) { |
687 | buf_in = sg_virt(&dd->in_sgl); | ||
688 | buf_out = sg_virt(&dd->out_sgl); | ||
734 | 689 | ||
735 | err = dd->err ? : err; | 690 | sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); |
736 | 691 | ||
737 | if (dd->total && !err) { | 692 | pages = get_order(dd->total_save); |
738 | err = omap_aes_crypt_dma_start(dd); | 693 | free_pages((unsigned long)buf_in, pages); |
739 | if (!err) | 694 | free_pages((unsigned long)buf_out, pages); |
740 | return; /* DMA started. Not fininishing. */ | ||
741 | } | 695 | } |
742 | 696 | ||
743 | omap_aes_finish_req(dd, err); | 697 | omap_aes_finish_req(dd, 0); |
744 | omap_aes_handle_queue(dd, NULL); | 698 | omap_aes_handle_queue(dd, NULL); |
745 | 699 | ||
746 | pr_debug("exit\n"); | 700 | pr_debug("exit\n"); |
@@ -1002,6 +956,8 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = { | |||
1002 | .data_ofs = 0x60, | 956 | .data_ofs = 0x60, |
1003 | .rev_ofs = 0x80, | 957 | .rev_ofs = 0x80, |
1004 | .mask_ofs = 0x84, | 958 | .mask_ofs = 0x84, |
959 | .irq_status_ofs = 0x8c, | ||
960 | .irq_enable_ofs = 0x90, | ||
1005 | .dma_enable_in = BIT(5), | 961 | .dma_enable_in = BIT(5), |
1006 | .dma_enable_out = BIT(6), | 962 | .dma_enable_out = BIT(6), |
1007 | .major_mask = 0x0700, | 963 | .major_mask = 0x0700, |
@@ -1010,6 +966,90 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = { | |||
1010 | .minor_shift = 0, | 966 | .minor_shift = 0, |
1011 | }; | 967 | }; |
1012 | 968 | ||
969 | static irqreturn_t omap_aes_irq(int irq, void *dev_id) | ||
970 | { | ||
971 | struct omap_aes_dev *dd = dev_id; | ||
972 | u32 status, i; | ||
973 | u32 *src, *dst; | ||
974 | |||
975 | status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd)); | ||
976 | if (status & AES_REG_IRQ_DATA_IN) { | ||
977 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); | ||
978 | |||
979 | BUG_ON(!dd->in_sg); | ||
980 | |||
981 | BUG_ON(_calc_walked(in) > dd->in_sg->length); | ||
982 | |||
983 | src = sg_virt(dd->in_sg) + _calc_walked(in); | ||
984 | |||
985 | for (i = 0; i < AES_BLOCK_WORDS; i++) { | ||
986 | omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src); | ||
987 | |||
988 | scatterwalk_advance(&dd->in_walk, 4); | ||
989 | if (dd->in_sg->length == _calc_walked(in)) { | ||
990 | dd->in_sg = scatterwalk_sg_next(dd->in_sg); | ||
991 | if (dd->in_sg) { | ||
992 | scatterwalk_start(&dd->in_walk, | ||
993 | dd->in_sg); | ||
994 | src = sg_virt(dd->in_sg) + | ||
995 | _calc_walked(in); | ||
996 | } | ||
997 | } else { | ||
998 | src++; | ||
999 | } | ||
1000 | } | ||
1001 | |||
1002 | /* Clear IRQ status */ | ||
1003 | status &= ~AES_REG_IRQ_DATA_IN; | ||
1004 | omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); | ||
1005 | |||
1006 | /* Enable DATA_OUT interrupt */ | ||
1007 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4); | ||
1008 | |||
1009 | } else if (status & AES_REG_IRQ_DATA_OUT) { | ||
1010 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); | ||
1011 | |||
1012 | BUG_ON(!dd->out_sg); | ||
1013 | |||
1014 | BUG_ON(_calc_walked(out) > dd->out_sg->length); | ||
1015 | |||
1016 | dst = sg_virt(dd->out_sg) + _calc_walked(out); | ||
1017 | |||
1018 | for (i = 0; i < AES_BLOCK_WORDS; i++) { | ||
1019 | *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); | ||
1020 | scatterwalk_advance(&dd->out_walk, 4); | ||
1021 | if (dd->out_sg->length == _calc_walked(out)) { | ||
1022 | dd->out_sg = scatterwalk_sg_next(dd->out_sg); | ||
1023 | if (dd->out_sg) { | ||
1024 | scatterwalk_start(&dd->out_walk, | ||
1025 | dd->out_sg); | ||
1026 | dst = sg_virt(dd->out_sg) + | ||
1027 | _calc_walked(out); | ||
1028 | } | ||
1029 | } else { | ||
1030 | dst++; | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | dd->total -= AES_BLOCK_SIZE; | ||
1035 | |||
1036 | BUG_ON(dd->total < 0); | ||
1037 | |||
1038 | /* Clear IRQ status */ | ||
1039 | status &= ~AES_REG_IRQ_DATA_OUT; | ||
1040 | omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); | ||
1041 | |||
1042 | if (!dd->total) | ||
1043 | /* All bytes read! */ | ||
1044 | tasklet_schedule(&dd->done_task); | ||
1045 | else | ||
1046 | /* Enable DATA_IN interrupt for next block */ | ||
1047 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); | ||
1048 | } | ||
1049 | |||
1050 | return IRQ_HANDLED; | ||
1051 | } | ||
1052 | |||
1013 | static const struct of_device_id omap_aes_of_match[] = { | 1053 | static const struct of_device_id omap_aes_of_match[] = { |
1014 | { | 1054 | { |
1015 | .compatible = "ti,omap2-aes", | 1055 | .compatible = "ti,omap2-aes", |
@@ -1115,10 +1155,10 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
1115 | struct omap_aes_dev *dd; | 1155 | struct omap_aes_dev *dd; |
1116 | struct crypto_alg *algp; | 1156 | struct crypto_alg *algp; |
1117 | struct resource res; | 1157 | struct resource res; |
1118 | int err = -ENOMEM, i, j; | 1158 | int err = -ENOMEM, i, j, irq = -1; |
1119 | u32 reg; | 1159 | u32 reg; |
1120 | 1160 | ||
1121 | dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); | 1161 | dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL); |
1122 | if (dd == NULL) { | 1162 | if (dd == NULL) { |
1123 | dev_err(dev, "unable to alloc data struct.\n"); | 1163 | dev_err(dev, "unable to alloc data struct.\n"); |
1124 | goto err_data; | 1164 | goto err_data; |
@@ -1158,8 +1198,23 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
1158 | tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); | 1198 | tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); |
1159 | 1199 | ||
1160 | err = omap_aes_dma_init(dd); | 1200 | err = omap_aes_dma_init(dd); |
1161 | if (err) | 1201 | if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { |
1162 | goto err_dma; | 1202 | dd->pio_only = 1; |
1203 | |||
1204 | irq = platform_get_irq(pdev, 0); | ||
1205 | if (irq < 0) { | ||
1206 | dev_err(dev, "can't get IRQ resource\n"); | ||
1207 | goto err_irq; | ||
1208 | } | ||
1209 | |||
1210 | err = devm_request_irq(dev, irq, omap_aes_irq, 0, | ||
1211 | dev_name(dev), dd); | ||
1212 | if (err) { | ||
1213 | dev_err(dev, "Unable to grab omap-aes IRQ\n"); | ||
1214 | goto err_irq; | ||
1215 | } | ||
1216 | } | ||
1217 | |||
1163 | 1218 | ||
1164 | INIT_LIST_HEAD(&dd->list); | 1219 | INIT_LIST_HEAD(&dd->list); |
1165 | spin_lock(&list_lock); | 1220 | spin_lock(&list_lock); |
@@ -1187,13 +1242,13 @@ err_algs: | |||
1187 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) | 1242 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
1188 | crypto_unregister_alg( | 1243 | crypto_unregister_alg( |
1189 | &dd->pdata->algs_info[i].algs_list[j]); | 1244 | &dd->pdata->algs_info[i].algs_list[j]); |
1190 | omap_aes_dma_cleanup(dd); | 1245 | if (!dd->pio_only) |
1191 | err_dma: | 1246 | omap_aes_dma_cleanup(dd); |
1247 | err_irq: | ||
1192 | tasklet_kill(&dd->done_task); | 1248 | tasklet_kill(&dd->done_task); |
1193 | tasklet_kill(&dd->queue_task); | 1249 | tasklet_kill(&dd->queue_task); |
1194 | pm_runtime_disable(dev); | 1250 | pm_runtime_disable(dev); |
1195 | err_res: | 1251 | err_res: |
1196 | kfree(dd); | ||
1197 | dd = NULL; | 1252 | dd = NULL; |
1198 | err_data: | 1253 | err_data: |
1199 | dev_err(dev, "initialization failed.\n"); | 1254 | dev_err(dev, "initialization failed.\n"); |
@@ -1221,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev) | |||
1221 | tasklet_kill(&dd->queue_task); | 1276 | tasklet_kill(&dd->queue_task); |
1222 | omap_aes_dma_cleanup(dd); | 1277 | omap_aes_dma_cleanup(dd); |
1223 | pm_runtime_disable(dd->dev); | 1278 | pm_runtime_disable(dd->dev); |
1224 | kfree(dd); | ||
1225 | dd = NULL; | 1279 | dd = NULL; |
1226 | 1280 | ||
1227 | return 0; | 1281 | return 0; |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4bb67652c200..8bdde57f6bb1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -44,17 +44,13 @@ | |||
44 | #include <crypto/hash.h> | 44 | #include <crypto/hash.h> |
45 | #include <crypto/internal/hash.h> | 45 | #include <crypto/internal/hash.h> |
46 | 46 | ||
47 | #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE | ||
48 | #define MD5_DIGEST_SIZE 16 | 47 | #define MD5_DIGEST_SIZE 16 |
49 | 48 | ||
50 | #define DST_MAXBURST 16 | ||
51 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) | ||
52 | |||
53 | #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04)) | 49 | #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04)) |
54 | #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) | 50 | #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) |
55 | #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) | 51 | #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) |
56 | 52 | ||
57 | #define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04)) | 53 | #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04)) |
58 | 54 | ||
59 | #define SHA_REG_CTRL 0x18 | 55 | #define SHA_REG_CTRL 0x18 |
60 | #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) | 56 | #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) |
@@ -75,18 +71,21 @@ | |||
75 | #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) | 71 | #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) |
76 | #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) | 72 | #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) |
77 | 73 | ||
78 | #define SHA_REG_MODE 0x44 | 74 | #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs) |
79 | #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7) | 75 | #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7) |
80 | #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5) | 76 | #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5) |
81 | #define SHA_REG_MODE_CLOSE_HASH (1 << 4) | 77 | #define SHA_REG_MODE_CLOSE_HASH (1 << 4) |
82 | #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3) | 78 | #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3) |
83 | #define SHA_REG_MODE_ALGO_MASK (3 << 1) | ||
84 | #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1) | ||
85 | #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1) | ||
86 | #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1) | ||
87 | #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1) | ||
88 | 79 | ||
89 | #define SHA_REG_LENGTH 0x48 | 80 | #define SHA_REG_MODE_ALGO_MASK (7 << 0) |
81 | #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1) | ||
82 | #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1) | ||
83 | #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1) | ||
84 | #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1) | ||
85 | #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0) | ||
86 | #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0) | ||
87 | |||
88 | #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs) | ||
90 | 89 | ||
91 | #define SHA_REG_IRQSTATUS 0x118 | 90 | #define SHA_REG_IRQSTATUS 0x118 |
92 | #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3) | 91 | #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3) |
@@ -117,18 +116,16 @@ | |||
117 | #define FLAGS_SG 17 | 116 | #define FLAGS_SG 17 |
118 | 117 | ||
119 | #define FLAGS_MODE_SHIFT 18 | 118 | #define FLAGS_MODE_SHIFT 18 |
120 | #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \ | 119 | #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT) |
121 | << (FLAGS_MODE_SHIFT - 1)) | 120 | #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT) |
122 | #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \ | 121 | #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT) |
123 | << (FLAGS_MODE_SHIFT - 1)) | 122 | #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT) |
124 | #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \ | 123 | #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT) |
125 | << (FLAGS_MODE_SHIFT - 1)) | 124 | #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT) |
126 | #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \ | 125 | #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT) |
127 | << (FLAGS_MODE_SHIFT - 1)) | 126 | |
128 | #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \ | 127 | #define FLAGS_HMAC 21 |
129 | << (FLAGS_MODE_SHIFT - 1)) | 128 | #define FLAGS_ERROR 22 |
130 | #define FLAGS_HMAC 20 | ||
131 | #define FLAGS_ERROR 21 | ||
132 | 129 | ||
133 | #define OP_UPDATE 1 | 130 | #define OP_UPDATE 1 |
134 | #define OP_FINAL 2 | 131 | #define OP_FINAL 2 |
@@ -145,7 +142,7 @@ struct omap_sham_reqctx { | |||
145 | unsigned long flags; | 142 | unsigned long flags; |
146 | unsigned long op; | 143 | unsigned long op; |
147 | 144 | ||
148 | u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED; | 145 | u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED; |
149 | size_t digcnt; | 146 | size_t digcnt; |
150 | size_t bufcnt; | 147 | size_t bufcnt; |
151 | size_t buflen; | 148 | size_t buflen; |
@@ -162,8 +159,8 @@ struct omap_sham_reqctx { | |||
162 | 159 | ||
163 | struct omap_sham_hmac_ctx { | 160 | struct omap_sham_hmac_ctx { |
164 | struct crypto_shash *shash; | 161 | struct crypto_shash *shash; |
165 | u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; | 162 | u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED; |
166 | u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; | 163 | u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED; |
167 | }; | 164 | }; |
168 | 165 | ||
169 | struct omap_sham_ctx { | 166 | struct omap_sham_ctx { |
@@ -205,6 +202,8 @@ struct omap_sham_pdata { | |||
205 | u32 rev_ofs; | 202 | u32 rev_ofs; |
206 | u32 mask_ofs; | 203 | u32 mask_ofs; |
207 | u32 sysstatus_ofs; | 204 | u32 sysstatus_ofs; |
205 | u32 mode_ofs; | ||
206 | u32 length_ofs; | ||
208 | 207 | ||
209 | u32 major_mask; | 208 | u32 major_mask; |
210 | u32 major_shift; | 209 | u32 major_shift; |
@@ -223,6 +222,7 @@ struct omap_sham_dev { | |||
223 | unsigned int dma; | 222 | unsigned int dma; |
224 | struct dma_chan *dma_lch; | 223 | struct dma_chan *dma_lch; |
225 | struct tasklet_struct done_task; | 224 | struct tasklet_struct done_task; |
225 | u8 polling_mode; | ||
226 | 226 | ||
227 | unsigned long flags; | 227 | unsigned long flags; |
228 | struct crypto_queue queue; | 228 | struct crypto_queue queue; |
@@ -306,9 +306,9 @@ static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out) | |||
306 | for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { | 306 | for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { |
307 | if (out) | 307 | if (out) |
308 | opad[i] = omap_sham_read(dd, | 308 | opad[i] = omap_sham_read(dd, |
309 | SHA_REG_ODIGEST(i)); | 309 | SHA_REG_ODIGEST(dd, i)); |
310 | else | 310 | else |
311 | omap_sham_write(dd, SHA_REG_ODIGEST(i), | 311 | omap_sham_write(dd, SHA_REG_ODIGEST(dd, i), |
312 | opad[i]); | 312 | opad[i]); |
313 | } | 313 | } |
314 | } | 314 | } |
@@ -342,6 +342,12 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) | |||
342 | case FLAGS_MODE_SHA256: | 342 | case FLAGS_MODE_SHA256: |
343 | d = SHA256_DIGEST_SIZE / sizeof(u32); | 343 | d = SHA256_DIGEST_SIZE / sizeof(u32); |
344 | break; | 344 | break; |
345 | case FLAGS_MODE_SHA384: | ||
346 | d = SHA384_DIGEST_SIZE / sizeof(u32); | ||
347 | break; | ||
348 | case FLAGS_MODE_SHA512: | ||
349 | d = SHA512_DIGEST_SIZE / sizeof(u32); | ||
350 | break; | ||
345 | default: | 351 | default: |
346 | d = 0; | 352 | d = 0; |
347 | } | 353 | } |
@@ -404,6 +410,30 @@ static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd) | |||
404 | return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); | 410 | return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); |
405 | } | 411 | } |
406 | 412 | ||
413 | static int get_block_size(struct omap_sham_reqctx *ctx) | ||
414 | { | ||
415 | int d; | ||
416 | |||
417 | switch (ctx->flags & FLAGS_MODE_MASK) { | ||
418 | case FLAGS_MODE_MD5: | ||
419 | case FLAGS_MODE_SHA1: | ||
420 | d = SHA1_BLOCK_SIZE; | ||
421 | break; | ||
422 | case FLAGS_MODE_SHA224: | ||
423 | case FLAGS_MODE_SHA256: | ||
424 | d = SHA256_BLOCK_SIZE; | ||
425 | break; | ||
426 | case FLAGS_MODE_SHA384: | ||
427 | case FLAGS_MODE_SHA512: | ||
428 | d = SHA512_BLOCK_SIZE; | ||
429 | break; | ||
430 | default: | ||
431 | d = 0; | ||
432 | } | ||
433 | |||
434 | return d; | ||
435 | } | ||
436 | |||
407 | static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, | 437 | static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, |
408 | u32 *value, int count) | 438 | u32 *value, int count) |
409 | { | 439 | { |
@@ -422,20 +452,24 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, | |||
422 | * CLOSE_HASH only for the last one. Note that flags mode bits | 452 | * CLOSE_HASH only for the last one. Note that flags mode bits |
423 | * correspond to algorithm encoding in mode register. | 453 | * correspond to algorithm encoding in mode register. |
424 | */ | 454 | */ |
425 | val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1); | 455 | val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT); |
426 | if (!ctx->digcnt) { | 456 | if (!ctx->digcnt) { |
427 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); | 457 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); |
428 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); | 458 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); |
429 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 459 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
460 | int bs, nr_dr; | ||
430 | 461 | ||
431 | val |= SHA_REG_MODE_ALGO_CONSTANT; | 462 | val |= SHA_REG_MODE_ALGO_CONSTANT; |
432 | 463 | ||
433 | if (ctx->flags & BIT(FLAGS_HMAC)) { | 464 | if (ctx->flags & BIT(FLAGS_HMAC)) { |
465 | bs = get_block_size(ctx); | ||
466 | nr_dr = bs / (2 * sizeof(u32)); | ||
434 | val |= SHA_REG_MODE_HMAC_KEY_PROC; | 467 | val |= SHA_REG_MODE_HMAC_KEY_PROC; |
435 | omap_sham_write_n(dd, SHA_REG_ODIGEST(0), | 468 | omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0), |
436 | (u32 *)bctx->ipad, | 469 | (u32 *)bctx->ipad, nr_dr); |
437 | SHA1_BLOCK_SIZE / sizeof(u32)); | 470 | omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0), |
438 | ctx->digcnt += SHA1_BLOCK_SIZE; | 471 | (u32 *)bctx->ipad + nr_dr, nr_dr); |
472 | ctx->digcnt += bs; | ||
439 | } | 473 | } |
440 | } | 474 | } |
441 | 475 | ||
@@ -451,7 +485,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, | |||
451 | SHA_REG_MODE_HMAC_KEY_PROC; | 485 | SHA_REG_MODE_HMAC_KEY_PROC; |
452 | 486 | ||
453 | dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); | 487 | dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); |
454 | omap_sham_write_mask(dd, SHA_REG_MODE, val, mask); | 488 | omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask); |
455 | omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); | 489 | omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); |
456 | omap_sham_write_mask(dd, SHA_REG_MASK(dd), | 490 | omap_sham_write_mask(dd, SHA_REG_MASK(dd), |
457 | SHA_REG_MASK_IT_EN | | 491 | SHA_REG_MASK_IT_EN | |
@@ -461,7 +495,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, | |||
461 | 495 | ||
462 | static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) | 496 | static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) |
463 | { | 497 | { |
464 | omap_sham_write(dd, SHA_REG_LENGTH, length); | 498 | omap_sham_write(dd, SHA_REG_LENGTH(dd), length); |
465 | } | 499 | } |
466 | 500 | ||
467 | static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) | 501 | static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) |
@@ -474,7 +508,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
474 | size_t length, int final) | 508 | size_t length, int final) |
475 | { | 509 | { |
476 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 510 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
477 | int count, len32; | 511 | int count, len32, bs32, offset = 0; |
478 | const u32 *buffer = (const u32 *)buf; | 512 | const u32 *buffer = (const u32 *)buf; |
479 | 513 | ||
480 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | 514 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", |
@@ -486,18 +520,23 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
486 | /* should be non-zero before next lines to disable clocks later */ | 520 | /* should be non-zero before next lines to disable clocks later */ |
487 | ctx->digcnt += length; | 521 | ctx->digcnt += length; |
488 | 522 | ||
489 | if (dd->pdata->poll_irq(dd)) | ||
490 | return -ETIMEDOUT; | ||
491 | |||
492 | if (final) | 523 | if (final) |
493 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ | 524 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ |
494 | 525 | ||
495 | set_bit(FLAGS_CPU, &dd->flags); | 526 | set_bit(FLAGS_CPU, &dd->flags); |
496 | 527 | ||
497 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 528 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
529 | bs32 = get_block_size(ctx) / sizeof(u32); | ||
498 | 530 | ||
499 | for (count = 0; count < len32; count++) | 531 | while (len32) { |
500 | omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]); | 532 | if (dd->pdata->poll_irq(dd)) |
533 | return -ETIMEDOUT; | ||
534 | |||
535 | for (count = 0; count < min(len32, bs32); count++, offset++) | ||
536 | omap_sham_write(dd, SHA_REG_DIN(dd, count), | ||
537 | buffer[offset]); | ||
538 | len32 -= min(len32, bs32); | ||
539 | } | ||
501 | 540 | ||
502 | return -EINPROGRESS; | 541 | return -EINPROGRESS; |
503 | } | 542 | } |
@@ -516,7 +555,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
516 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 555 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
517 | struct dma_async_tx_descriptor *tx; | 556 | struct dma_async_tx_descriptor *tx; |
518 | struct dma_slave_config cfg; | 557 | struct dma_slave_config cfg; |
519 | int len32, ret; | 558 | int len32, ret, dma_min = get_block_size(ctx); |
520 | 559 | ||
521 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", | 560 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", |
522 | ctx->digcnt, length, final); | 561 | ctx->digcnt, length, final); |
@@ -525,7 +564,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
525 | 564 | ||
526 | cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); | 565 | cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); |
527 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 566 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
528 | cfg.dst_maxburst = DST_MAXBURST; | 567 | cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES; |
529 | 568 | ||
530 | ret = dmaengine_slave_config(dd->dma_lch, &cfg); | 569 | ret = dmaengine_slave_config(dd->dma_lch, &cfg); |
531 | if (ret) { | 570 | if (ret) { |
@@ -533,7 +572,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
533 | return ret; | 572 | return ret; |
534 | } | 573 | } |
535 | 574 | ||
536 | len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN; | 575 | len32 = DIV_ROUND_UP(length, dma_min) * dma_min; |
537 | 576 | ||
538 | if (is_sg) { | 577 | if (is_sg) { |
539 | /* | 578 | /* |
@@ -666,14 +705,14 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | |||
666 | /* Start address alignment */ | 705 | /* Start address alignment */ |
667 | #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) | 706 | #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) |
668 | /* SHA1 block size alignment */ | 707 | /* SHA1 block size alignment */ |
669 | #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) | 708 | #define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs)) |
670 | 709 | ||
671 | static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | 710 | static int omap_sham_update_dma_start(struct omap_sham_dev *dd) |
672 | { | 711 | { |
673 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 712 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
674 | unsigned int length, final, tail; | 713 | unsigned int length, final, tail; |
675 | struct scatterlist *sg; | 714 | struct scatterlist *sg; |
676 | int ret; | 715 | int ret, bs; |
677 | 716 | ||
678 | if (!ctx->total) | 717 | if (!ctx->total) |
679 | return 0; | 718 | return 0; |
@@ -687,30 +726,31 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
687 | * the dmaengine infrastructure will calculate that it needs | 726 | * the dmaengine infrastructure will calculate that it needs |
688 | * to transfer 0 frames which ultimately fails. | 727 | * to transfer 0 frames which ultimately fails. |
689 | */ | 728 | */ |
690 | if (ctx->total < (DST_MAXBURST * sizeof(u32))) | 729 | if (ctx->total < get_block_size(ctx)) |
691 | return omap_sham_update_dma_slow(dd); | 730 | return omap_sham_update_dma_slow(dd); |
692 | 731 | ||
693 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | 732 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", |
694 | ctx->digcnt, ctx->bufcnt, ctx->total); | 733 | ctx->digcnt, ctx->bufcnt, ctx->total); |
695 | 734 | ||
696 | sg = ctx->sg; | 735 | sg = ctx->sg; |
736 | bs = get_block_size(ctx); | ||
697 | 737 | ||
698 | if (!SG_AA(sg)) | 738 | if (!SG_AA(sg)) |
699 | return omap_sham_update_dma_slow(dd); | 739 | return omap_sham_update_dma_slow(dd); |
700 | 740 | ||
701 | if (!sg_is_last(sg) && !SG_SA(sg)) | 741 | if (!sg_is_last(sg) && !SG_SA(sg, bs)) |
702 | /* size is not SHA1_BLOCK_SIZE aligned */ | 742 | /* size is not BLOCK_SIZE aligned */ |
703 | return omap_sham_update_dma_slow(dd); | 743 | return omap_sham_update_dma_slow(dd); |
704 | 744 | ||
705 | length = min(ctx->total, sg->length); | 745 | length = min(ctx->total, sg->length); |
706 | 746 | ||
707 | if (sg_is_last(sg)) { | 747 | if (sg_is_last(sg)) { |
708 | if (!(ctx->flags & BIT(FLAGS_FINUP))) { | 748 | if (!(ctx->flags & BIT(FLAGS_FINUP))) { |
709 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ | 749 | /* not last sg must be BLOCK_SIZE aligned */ |
710 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); | 750 | tail = length & (bs - 1); |
711 | /* without finup() we need one block to close hash */ | 751 | /* without finup() we need one block to close hash */ |
712 | if (!tail) | 752 | if (!tail) |
713 | tail = SHA1_MD5_BLOCK_SIZE; | 753 | tail = bs; |
714 | length -= tail; | 754 | length -= tail; |
715 | } | 755 | } |
716 | } | 756 | } |
@@ -737,13 +777,22 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
737 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) | 777 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) |
738 | { | 778 | { |
739 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 779 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
740 | int bufcnt; | 780 | int bufcnt, final; |
781 | |||
782 | if (!ctx->total) | ||
783 | return 0; | ||
741 | 784 | ||
742 | omap_sham_append_sg(ctx); | 785 | omap_sham_append_sg(ctx); |
786 | |||
787 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; | ||
788 | |||
789 | dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n", | ||
790 | ctx->bufcnt, ctx->digcnt, final); | ||
791 | |||
743 | bufcnt = ctx->bufcnt; | 792 | bufcnt = ctx->bufcnt; |
744 | ctx->bufcnt = 0; | 793 | ctx->bufcnt = 0; |
745 | 794 | ||
746 | return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); | 795 | return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final); |
747 | } | 796 | } |
748 | 797 | ||
749 | static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | 798 | static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) |
@@ -773,6 +822,7 @@ static int omap_sham_init(struct ahash_request *req) | |||
773 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); | 822 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); |
774 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 823 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
775 | struct omap_sham_dev *dd = NULL, *tmp; | 824 | struct omap_sham_dev *dd = NULL, *tmp; |
825 | int bs = 0; | ||
776 | 826 | ||
777 | spin_lock_bh(&sham.lock); | 827 | spin_lock_bh(&sham.lock); |
778 | if (!tctx->dd) { | 828 | if (!tctx->dd) { |
@@ -796,15 +846,27 @@ static int omap_sham_init(struct ahash_request *req) | |||
796 | switch (crypto_ahash_digestsize(tfm)) { | 846 | switch (crypto_ahash_digestsize(tfm)) { |
797 | case MD5_DIGEST_SIZE: | 847 | case MD5_DIGEST_SIZE: |
798 | ctx->flags |= FLAGS_MODE_MD5; | 848 | ctx->flags |= FLAGS_MODE_MD5; |
849 | bs = SHA1_BLOCK_SIZE; | ||
799 | break; | 850 | break; |
800 | case SHA1_DIGEST_SIZE: | 851 | case SHA1_DIGEST_SIZE: |
801 | ctx->flags |= FLAGS_MODE_SHA1; | 852 | ctx->flags |= FLAGS_MODE_SHA1; |
853 | bs = SHA1_BLOCK_SIZE; | ||
802 | break; | 854 | break; |
803 | case SHA224_DIGEST_SIZE: | 855 | case SHA224_DIGEST_SIZE: |
804 | ctx->flags |= FLAGS_MODE_SHA224; | 856 | ctx->flags |= FLAGS_MODE_SHA224; |
857 | bs = SHA224_BLOCK_SIZE; | ||
805 | break; | 858 | break; |
806 | case SHA256_DIGEST_SIZE: | 859 | case SHA256_DIGEST_SIZE: |
807 | ctx->flags |= FLAGS_MODE_SHA256; | 860 | ctx->flags |= FLAGS_MODE_SHA256; |
861 | bs = SHA256_BLOCK_SIZE; | ||
862 | break; | ||
863 | case SHA384_DIGEST_SIZE: | ||
864 | ctx->flags |= FLAGS_MODE_SHA384; | ||
865 | bs = SHA384_BLOCK_SIZE; | ||
866 | break; | ||
867 | case SHA512_DIGEST_SIZE: | ||
868 | ctx->flags |= FLAGS_MODE_SHA512; | ||
869 | bs = SHA512_BLOCK_SIZE; | ||
808 | break; | 870 | break; |
809 | } | 871 | } |
810 | 872 | ||
@@ -816,8 +878,8 @@ static int omap_sham_init(struct ahash_request *req) | |||
816 | if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { | 878 | if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { |
817 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 879 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
818 | 880 | ||
819 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); | 881 | memcpy(ctx->buffer, bctx->ipad, bs); |
820 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; | 882 | ctx->bufcnt = bs; |
821 | } | 883 | } |
822 | 884 | ||
823 | ctx->flags |= BIT(FLAGS_HMAC); | 885 | ctx->flags |= BIT(FLAGS_HMAC); |
@@ -853,8 +915,11 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) | |||
853 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 915 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
854 | int err = 0, use_dma = 1; | 916 | int err = 0, use_dma = 1; |
855 | 917 | ||
856 | if (ctx->bufcnt <= DMA_MIN) | 918 | if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode) |
857 | /* faster to handle last block with cpu */ | 919 | /* |
920 | * faster to handle last block with cpu or | ||
921 | * use cpu when dma is not present. | ||
922 | */ | ||
858 | use_dma = 0; | 923 | use_dma = 0; |
859 | 924 | ||
860 | if (use_dma) | 925 | if (use_dma) |
@@ -1006,6 +1071,8 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) | |||
1006 | static int omap_sham_update(struct ahash_request *req) | 1071 | static int omap_sham_update(struct ahash_request *req) |
1007 | { | 1072 | { |
1008 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 1073 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
1074 | struct omap_sham_dev *dd = ctx->dd; | ||
1075 | int bs = get_block_size(ctx); | ||
1009 | 1076 | ||
1010 | if (!req->nbytes) | 1077 | if (!req->nbytes) |
1011 | return 0; | 1078 | return 0; |
@@ -1023,10 +1090,12 @@ static int omap_sham_update(struct ahash_request *req) | |||
1023 | */ | 1090 | */ |
1024 | omap_sham_append_sg(ctx); | 1091 | omap_sham_append_sg(ctx); |
1025 | return 0; | 1092 | return 0; |
1026 | } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { | 1093 | } else if ((ctx->bufcnt + ctx->total <= bs) || |
1094 | dd->polling_mode) { | ||
1027 | /* | 1095 | /* |
1028 | * faster to use CPU for short transfers | 1096 | * faster to use CPU for short transfers or |
1029 | */ | 1097 | * use cpu when dma is not present. |
1098 | */ | ||
1030 | ctx->flags |= BIT(FLAGS_CPU); | 1099 | ctx->flags |= BIT(FLAGS_CPU); |
1031 | } | 1100 | } |
1032 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | 1101 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { |
@@ -1214,6 +1283,16 @@ static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) | |||
1214 | return omap_sham_cra_init_alg(tfm, "md5"); | 1283 | return omap_sham_cra_init_alg(tfm, "md5"); |
1215 | } | 1284 | } |
1216 | 1285 | ||
1286 | static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm) | ||
1287 | { | ||
1288 | return omap_sham_cra_init_alg(tfm, "sha384"); | ||
1289 | } | ||
1290 | |||
1291 | static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm) | ||
1292 | { | ||
1293 | return omap_sham_cra_init_alg(tfm, "sha512"); | ||
1294 | } | ||
1295 | |||
1217 | static void omap_sham_cra_exit(struct crypto_tfm *tfm) | 1296 | static void omap_sham_cra_exit(struct crypto_tfm *tfm) |
1218 | { | 1297 | { |
1219 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); | 1298 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); |
@@ -1422,6 +1501,101 @@ static struct ahash_alg algs_sha224_sha256[] = { | |||
1422 | }, | 1501 | }, |
1423 | }; | 1502 | }; |
1424 | 1503 | ||
1504 | static struct ahash_alg algs_sha384_sha512[] = { | ||
1505 | { | ||
1506 | .init = omap_sham_init, | ||
1507 | .update = omap_sham_update, | ||
1508 | .final = omap_sham_final, | ||
1509 | .finup = omap_sham_finup, | ||
1510 | .digest = omap_sham_digest, | ||
1511 | .halg.digestsize = SHA384_DIGEST_SIZE, | ||
1512 | .halg.base = { | ||
1513 | .cra_name = "sha384", | ||
1514 | .cra_driver_name = "omap-sha384", | ||
1515 | .cra_priority = 100, | ||
1516 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1517 | CRYPTO_ALG_ASYNC | | ||
1518 | CRYPTO_ALG_NEED_FALLBACK, | ||
1519 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
1520 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | ||
1521 | .cra_alignmask = 0, | ||
1522 | .cra_module = THIS_MODULE, | ||
1523 | .cra_init = omap_sham_cra_init, | ||
1524 | .cra_exit = omap_sham_cra_exit, | ||
1525 | } | ||
1526 | }, | ||
1527 | { | ||
1528 | .init = omap_sham_init, | ||
1529 | .update = omap_sham_update, | ||
1530 | .final = omap_sham_final, | ||
1531 | .finup = omap_sham_finup, | ||
1532 | .digest = omap_sham_digest, | ||
1533 | .halg.digestsize = SHA512_DIGEST_SIZE, | ||
1534 | .halg.base = { | ||
1535 | .cra_name = "sha512", | ||
1536 | .cra_driver_name = "omap-sha512", | ||
1537 | .cra_priority = 100, | ||
1538 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1539 | CRYPTO_ALG_ASYNC | | ||
1540 | CRYPTO_ALG_NEED_FALLBACK, | ||
1541 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
1542 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | ||
1543 | .cra_alignmask = 0, | ||
1544 | .cra_module = THIS_MODULE, | ||
1545 | .cra_init = omap_sham_cra_init, | ||
1546 | .cra_exit = omap_sham_cra_exit, | ||
1547 | } | ||
1548 | }, | ||
1549 | { | ||
1550 | .init = omap_sham_init, | ||
1551 | .update = omap_sham_update, | ||
1552 | .final = omap_sham_final, | ||
1553 | .finup = omap_sham_finup, | ||
1554 | .digest = omap_sham_digest, | ||
1555 | .setkey = omap_sham_setkey, | ||
1556 | .halg.digestsize = SHA384_DIGEST_SIZE, | ||
1557 | .halg.base = { | ||
1558 | .cra_name = "hmac(sha384)", | ||
1559 | .cra_driver_name = "omap-hmac-sha384", | ||
1560 | .cra_priority = 100, | ||
1561 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1562 | CRYPTO_ALG_ASYNC | | ||
1563 | CRYPTO_ALG_NEED_FALLBACK, | ||
1564 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
1565 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | ||
1566 | sizeof(struct omap_sham_hmac_ctx), | ||
1567 | .cra_alignmask = OMAP_ALIGN_MASK, | ||
1568 | .cra_module = THIS_MODULE, | ||
1569 | .cra_init = omap_sham_cra_sha384_init, | ||
1570 | .cra_exit = omap_sham_cra_exit, | ||
1571 | } | ||
1572 | }, | ||
1573 | { | ||
1574 | .init = omap_sham_init, | ||
1575 | .update = omap_sham_update, | ||
1576 | .final = omap_sham_final, | ||
1577 | .finup = omap_sham_finup, | ||
1578 | .digest = omap_sham_digest, | ||
1579 | .setkey = omap_sham_setkey, | ||
1580 | .halg.digestsize = SHA512_DIGEST_SIZE, | ||
1581 | .halg.base = { | ||
1582 | .cra_name = "hmac(sha512)", | ||
1583 | .cra_driver_name = "omap-hmac-sha512", | ||
1584 | .cra_priority = 100, | ||
1585 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
1586 | CRYPTO_ALG_ASYNC | | ||
1587 | CRYPTO_ALG_NEED_FALLBACK, | ||
1588 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
1589 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | ||
1590 | sizeof(struct omap_sham_hmac_ctx), | ||
1591 | .cra_alignmask = OMAP_ALIGN_MASK, | ||
1592 | .cra_module = THIS_MODULE, | ||
1593 | .cra_init = omap_sham_cra_sha512_init, | ||
1594 | .cra_exit = omap_sham_cra_exit, | ||
1595 | } | ||
1596 | }, | ||
1597 | }; | ||
1598 | |||
1425 | static void omap_sham_done_task(unsigned long data) | 1599 | static void omap_sham_done_task(unsigned long data) |
1426 | { | 1600 | { |
1427 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | 1601 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; |
@@ -1433,8 +1607,12 @@ static void omap_sham_done_task(unsigned long data) | |||
1433 | } | 1607 | } |
1434 | 1608 | ||
1435 | if (test_bit(FLAGS_CPU, &dd->flags)) { | 1609 | if (test_bit(FLAGS_CPU, &dd->flags)) { |
1436 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) | 1610 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { |
1437 | goto finish; | 1611 | /* hash or semi-hash ready */ |
1612 | err = omap_sham_update_cpu(dd); | ||
1613 | if (err != -EINPROGRESS) | ||
1614 | goto finish; | ||
1615 | } | ||
1438 | } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { | 1616 | } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { |
1439 | if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { | 1617 | if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { |
1440 | omap_sham_update_dma_stop(dd); | 1618 | omap_sham_update_dma_stop(dd); |
@@ -1548,11 +1726,54 @@ static const struct omap_sham_pdata omap_sham_pdata_omap4 = { | |||
1548 | .poll_irq = omap_sham_poll_irq_omap4, | 1726 | .poll_irq = omap_sham_poll_irq_omap4, |
1549 | .intr_hdlr = omap_sham_irq_omap4, | 1727 | .intr_hdlr = omap_sham_irq_omap4, |
1550 | .idigest_ofs = 0x020, | 1728 | .idigest_ofs = 0x020, |
1729 | .odigest_ofs = 0x0, | ||
1551 | .din_ofs = 0x080, | 1730 | .din_ofs = 0x080, |
1552 | .digcnt_ofs = 0x040, | 1731 | .digcnt_ofs = 0x040, |
1553 | .rev_ofs = 0x100, | 1732 | .rev_ofs = 0x100, |
1554 | .mask_ofs = 0x110, | 1733 | .mask_ofs = 0x110, |
1555 | .sysstatus_ofs = 0x114, | 1734 | .sysstatus_ofs = 0x114, |
1735 | .mode_ofs = 0x44, | ||
1736 | .length_ofs = 0x48, | ||
1737 | .major_mask = 0x0700, | ||
1738 | .major_shift = 8, | ||
1739 | .minor_mask = 0x003f, | ||
1740 | .minor_shift = 0, | ||
1741 | }; | ||
1742 | |||
1743 | static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = { | ||
1744 | { | ||
1745 | .algs_list = algs_sha1_md5, | ||
1746 | .size = ARRAY_SIZE(algs_sha1_md5), | ||
1747 | }, | ||
1748 | { | ||
1749 | .algs_list = algs_sha224_sha256, | ||
1750 | .size = ARRAY_SIZE(algs_sha224_sha256), | ||
1751 | }, | ||
1752 | { | ||
1753 | .algs_list = algs_sha384_sha512, | ||
1754 | .size = ARRAY_SIZE(algs_sha384_sha512), | ||
1755 | }, | ||
1756 | }; | ||
1757 | |||
1758 | static const struct omap_sham_pdata omap_sham_pdata_omap5 = { | ||
1759 | .algs_info = omap_sham_algs_info_omap5, | ||
1760 | .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5), | ||
1761 | .flags = BIT(FLAGS_AUTO_XOR), | ||
1762 | .digest_size = SHA512_DIGEST_SIZE, | ||
1763 | .copy_hash = omap_sham_copy_hash_omap4, | ||
1764 | .write_ctrl = omap_sham_write_ctrl_omap4, | ||
1765 | .trigger = omap_sham_trigger_omap4, | ||
1766 | .poll_irq = omap_sham_poll_irq_omap4, | ||
1767 | .intr_hdlr = omap_sham_irq_omap4, | ||
1768 | .idigest_ofs = 0x240, | ||
1769 | .odigest_ofs = 0x200, | ||
1770 | .din_ofs = 0x080, | ||
1771 | .digcnt_ofs = 0x280, | ||
1772 | .rev_ofs = 0x100, | ||
1773 | .mask_ofs = 0x110, | ||
1774 | .sysstatus_ofs = 0x114, | ||
1775 | .mode_ofs = 0x284, | ||
1776 | .length_ofs = 0x288, | ||
1556 | .major_mask = 0x0700, | 1777 | .major_mask = 0x0700, |
1557 | .major_shift = 8, | 1778 | .major_shift = 8, |
1558 | .minor_mask = 0x003f, | 1779 | .minor_mask = 0x003f, |
@@ -1568,6 +1789,10 @@ static const struct of_device_id omap_sham_of_match[] = { | |||
1568 | .compatible = "ti,omap4-sham", | 1789 | .compatible = "ti,omap4-sham", |
1569 | .data = &omap_sham_pdata_omap4, | 1790 | .data = &omap_sham_pdata_omap4, |
1570 | }, | 1791 | }, |
1792 | { | ||
1793 | .compatible = "ti,omap5-sham", | ||
1794 | .data = &omap_sham_pdata_omap5, | ||
1795 | }, | ||
1571 | {}, | 1796 | {}, |
1572 | }; | 1797 | }; |
1573 | MODULE_DEVICE_TABLE(of, omap_sham_of_match); | 1798 | MODULE_DEVICE_TABLE(of, omap_sham_of_match); |
@@ -1667,7 +1892,7 @@ static int omap_sham_probe(struct platform_device *pdev) | |||
1667 | int err, i, j; | 1892 | int err, i, j; |
1668 | u32 rev; | 1893 | u32 rev; |
1669 | 1894 | ||
1670 | dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); | 1895 | dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL); |
1671 | if (dd == NULL) { | 1896 | if (dd == NULL) { |
1672 | dev_err(dev, "unable to alloc data struct.\n"); | 1897 | dev_err(dev, "unable to alloc data struct.\n"); |
1673 | err = -ENOMEM; | 1898 | err = -ENOMEM; |
@@ -1684,20 +1909,21 @@ static int omap_sham_probe(struct platform_device *pdev) | |||
1684 | err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : | 1909 | err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : |
1685 | omap_sham_get_res_pdev(dd, pdev, &res); | 1910 | omap_sham_get_res_pdev(dd, pdev, &res); |
1686 | if (err) | 1911 | if (err) |
1687 | goto res_err; | 1912 | goto data_err; |
1688 | 1913 | ||
1689 | dd->io_base = devm_ioremap_resource(dev, &res); | 1914 | dd->io_base = devm_ioremap_resource(dev, &res); |
1690 | if (IS_ERR(dd->io_base)) { | 1915 | if (IS_ERR(dd->io_base)) { |
1691 | err = PTR_ERR(dd->io_base); | 1916 | err = PTR_ERR(dd->io_base); |
1692 | goto res_err; | 1917 | goto data_err; |
1693 | } | 1918 | } |
1694 | dd->phys_base = res.start; | 1919 | dd->phys_base = res.start; |
1695 | 1920 | ||
1696 | err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW, | 1921 | err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr, |
1697 | dev_name(dev), dd); | 1922 | IRQF_TRIGGER_NONE, dev_name(dev), dd); |
1698 | if (err) { | 1923 | if (err) { |
1699 | dev_err(dev, "unable to request irq.\n"); | 1924 | dev_err(dev, "unable to request irq %d, err = %d\n", |
1700 | goto res_err; | 1925 | dd->irq, err); |
1926 | goto data_err; | ||
1701 | } | 1927 | } |
1702 | 1928 | ||
1703 | dma_cap_zero(mask); | 1929 | dma_cap_zero(mask); |
@@ -1706,10 +1932,8 @@ static int omap_sham_probe(struct platform_device *pdev) | |||
1706 | dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, | 1932 | dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, |
1707 | &dd->dma, dev, "rx"); | 1933 | &dd->dma, dev, "rx"); |
1708 | if (!dd->dma_lch) { | 1934 | if (!dd->dma_lch) { |
1709 | dev_err(dev, "unable to obtain RX DMA engine channel %u\n", | 1935 | dd->polling_mode = 1; |
1710 | dd->dma); | 1936 | dev_dbg(dev, "using polling mode instead of dma\n"); |
1711 | err = -ENXIO; | ||
1712 | goto dma_err; | ||
1713 | } | 1937 | } |
1714 | 1938 | ||
1715 | dd->flags |= dd->pdata->flags; | 1939 | dd->flags |= dd->pdata->flags; |
@@ -1747,11 +1971,6 @@ err_algs: | |||
1747 | &dd->pdata->algs_info[i].algs_list[j]); | 1971 | &dd->pdata->algs_info[i].algs_list[j]); |
1748 | pm_runtime_disable(dev); | 1972 | pm_runtime_disable(dev); |
1749 | dma_release_channel(dd->dma_lch); | 1973 | dma_release_channel(dd->dma_lch); |
1750 | dma_err: | ||
1751 | free_irq(dd->irq, dd); | ||
1752 | res_err: | ||
1753 | kfree(dd); | ||
1754 | dd = NULL; | ||
1755 | data_err: | 1974 | data_err: |
1756 | dev_err(dev, "initialization failed.\n"); | 1975 | dev_err(dev, "initialization failed.\n"); |
1757 | 1976 | ||
@@ -1776,9 +1995,6 @@ static int omap_sham_remove(struct platform_device *pdev) | |||
1776 | tasklet_kill(&dd->done_task); | 1995 | tasklet_kill(&dd->done_task); |
1777 | pm_runtime_disable(&pdev->dev); | 1996 | pm_runtime_disable(&pdev->dev); |
1778 | dma_release_channel(dd->dma_lch); | 1997 | dma_release_channel(dd->dma_lch); |
1779 | free_irq(dd->irq, dd); | ||
1780 | kfree(dd); | ||
1781 | dd = NULL; | ||
1782 | 1998 | ||
1783 | return 0; | 1999 | return 0; |
1784 | } | 2000 | } |
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index c3dc1c04a5df..d7bb8bac36e9 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
@@ -417,7 +417,7 @@ static void sahara_aes_done_task(unsigned long data) | |||
417 | dev->req->base.complete(&dev->req->base, dev->error); | 417 | dev->req->base.complete(&dev->req->base, dev->error); |
418 | } | 418 | } |
419 | 419 | ||
420 | void sahara_watchdog(unsigned long data) | 420 | static void sahara_watchdog(unsigned long data) |
421 | { | 421 | { |
422 | struct sahara_dev *dev = (struct sahara_dev *)data; | 422 | struct sahara_dev *dev = (struct sahara_dev *)data; |
423 | unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); | 423 | unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); |
@@ -955,7 +955,7 @@ static int sahara_probe(struct platform_device *pdev) | |||
955 | dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, | 955 | dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, |
956 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), | 956 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), |
957 | &dev->hw_phys_link[0], GFP_KERNEL); | 957 | &dev->hw_phys_link[0], GFP_KERNEL); |
958 | if (!dev->hw_link) { | 958 | if (!dev->hw_link[0]) { |
959 | dev_err(&pdev->dev, "Could not allocate hw links\n"); | 959 | dev_err(&pdev->dev, "Could not allocate hw links\n"); |
960 | err = -ENOMEM; | 960 | err = -ENOMEM; |
961 | goto err_link; | 961 | goto err_link; |
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c index 85ea7525fa36..2d58da972ae2 100644 --- a/drivers/crypto/tegra-aes.c +++ b/drivers/crypto/tegra-aes.c | |||
@@ -275,7 +275,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr, | |||
275 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | 275 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); |
276 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | 276 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; |
277 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | 277 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; |
278 | } while (eng_busy & (!icq_empty)); | 278 | } while (eng_busy && !icq_empty); |
279 | aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR); | 279 | aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR); |
280 | } | 280 | } |
281 | 281 | ||
@@ -365,7 +365,7 @@ static int aes_set_key(struct tegra_aes_dev *dd) | |||
365 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | 365 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; |
366 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | 366 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; |
367 | dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD; | 367 | dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD; |
368 | } while (eng_busy & (!icq_empty) & dma_busy); | 368 | } while (eng_busy && !icq_empty && dma_busy); |
369 | 369 | ||
370 | /* settable command to get key into internal registers */ | 370 | /* settable command to get key into internal registers */ |
371 | value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT | | 371 | value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT | |
@@ -379,7 +379,7 @@ static int aes_set_key(struct tegra_aes_dev *dd) | |||
379 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | 379 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); |
380 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | 380 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; |
381 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | 381 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; |
382 | } while (eng_busy & (!icq_empty)); | 382 | } while (eng_busy && !icq_empty); |
383 | 383 | ||
384 | return 0; | 384 | return 0; |
385 | } | 385 | } |
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 33693d966b6a..1c73f4fbc252 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * License terms: GNU General Public License (GPL) version 2 | 11 | * License terms: GNU General Public License (GPL) version 2 |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define pr_fmt(fmt) "hashX hashX: " fmt | ||
15 | |||
14 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
15 | #include <linux/device.h> | 17 | #include <linux/device.h> |
16 | #include <linux/err.h> | 18 | #include <linux/err.h> |
@@ -35,8 +37,6 @@ | |||
35 | 37 | ||
36 | #include "hash_alg.h" | 38 | #include "hash_alg.h" |
37 | 39 | ||
38 | #define DEV_DBG_NAME "hashX hashX:" | ||
39 | |||
40 | static int hash_mode; | 40 | static int hash_mode; |
41 | module_param(hash_mode, int, 0); | 41 | module_param(hash_mode, int, 0); |
42 | MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); | 42 | MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); |
@@ -44,13 +44,13 @@ MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); | |||
44 | /** | 44 | /** |
45 | * Pre-calculated empty message digests. | 45 | * Pre-calculated empty message digests. |
46 | */ | 46 | */ |
47 | static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { | 47 | static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { |
48 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, | 48 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, |
49 | 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, | 49 | 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, |
50 | 0xaf, 0xd8, 0x07, 0x09 | 50 | 0xaf, 0xd8, 0x07, 0x09 |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { | 53 | static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { |
54 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, | 54 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, |
55 | 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, | 55 | 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, |
56 | 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, | 56 | 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, |
@@ -58,14 +58,14 @@ static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { | |||
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* HMAC-SHA1, no key */ | 60 | /* HMAC-SHA1, no key */ |
61 | static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { | 61 | static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { |
62 | 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, | 62 | 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, |
63 | 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, | 63 | 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, |
64 | 0x70, 0x69, 0x0e, 0x1d | 64 | 0x70, 0x69, 0x0e, 0x1d |
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* HMAC-SHA256, no key */ | 67 | /* HMAC-SHA256, no key */ |
68 | static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { | 68 | static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { |
69 | 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, | 69 | 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, |
70 | 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, | 70 | 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, |
71 | 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, | 71 | 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, |
@@ -97,7 +97,7 @@ static struct hash_driver_data driver_data; | |||
97 | * | 97 | * |
98 | */ | 98 | */ |
99 | static void hash_messagepad(struct hash_device_data *device_data, | 99 | static void hash_messagepad(struct hash_device_data *device_data, |
100 | const u32 *message, u8 index_bytes); | 100 | const u32 *message, u8 index_bytes); |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * release_hash_device - Releases a previously allocated hash device. | 103 | * release_hash_device - Releases a previously allocated hash device. |
@@ -119,7 +119,7 @@ static void release_hash_device(struct hash_device_data *device_data) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | static void hash_dma_setup_channel(struct hash_device_data *device_data, | 121 | static void hash_dma_setup_channel(struct hash_device_data *device_data, |
122 | struct device *dev) | 122 | struct device *dev) |
123 | { | 123 | { |
124 | struct hash_platform_data *platform_data = dev->platform_data; | 124 | struct hash_platform_data *platform_data = dev->platform_data; |
125 | struct dma_slave_config conf = { | 125 | struct dma_slave_config conf = { |
@@ -127,7 +127,7 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, | |||
127 | .dst_addr = device_data->phybase + HASH_DMA_FIFO, | 127 | .dst_addr = device_data->phybase + HASH_DMA_FIFO, |
128 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, | 128 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, |
129 | .dst_maxburst = 16, | 129 | .dst_maxburst = 16, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | dma_cap_zero(device_data->dma.mask); | 132 | dma_cap_zero(device_data->dma.mask); |
133 | dma_cap_set(DMA_SLAVE, device_data->dma.mask); | 133 | dma_cap_set(DMA_SLAVE, device_data->dma.mask); |
@@ -135,8 +135,8 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, | |||
135 | device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; | 135 | device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; |
136 | device_data->dma.chan_mem2hash = | 136 | device_data->dma.chan_mem2hash = |
137 | dma_request_channel(device_data->dma.mask, | 137 | dma_request_channel(device_data->dma.mask, |
138 | platform_data->dma_filter, | 138 | platform_data->dma_filter, |
139 | device_data->dma.cfg_mem2hash); | 139 | device_data->dma.cfg_mem2hash); |
140 | 140 | ||
141 | dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); | 141 | dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); |
142 | 142 | ||
@@ -145,21 +145,21 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data, | |||
145 | 145 | ||
146 | static void hash_dma_callback(void *data) | 146 | static void hash_dma_callback(void *data) |
147 | { | 147 | { |
148 | struct hash_ctx *ctx = (struct hash_ctx *) data; | 148 | struct hash_ctx *ctx = data; |
149 | 149 | ||
150 | complete(&ctx->device->dma.complete); | 150 | complete(&ctx->device->dma.complete); |
151 | } | 151 | } |
152 | 152 | ||
153 | static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, | 153 | static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, |
154 | int len, enum dma_data_direction direction) | 154 | int len, enum dma_data_direction direction) |
155 | { | 155 | { |
156 | struct dma_async_tx_descriptor *desc = NULL; | 156 | struct dma_async_tx_descriptor *desc = NULL; |
157 | struct dma_chan *channel = NULL; | 157 | struct dma_chan *channel = NULL; |
158 | dma_cookie_t cookie; | 158 | dma_cookie_t cookie; |
159 | 159 | ||
160 | if (direction != DMA_TO_DEVICE) { | 160 | if (direction != DMA_TO_DEVICE) { |
161 | dev_err(ctx->device->dev, "[%s] Invalid DMA direction", | 161 | dev_err(ctx->device->dev, "%s: Invalid DMA direction\n", |
162 | __func__); | 162 | __func__); |
163 | return -EFAULT; | 163 | return -EFAULT; |
164 | } | 164 | } |
165 | 165 | ||
@@ -172,20 +172,19 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, | |||
172 | direction); | 172 | direction); |
173 | 173 | ||
174 | if (!ctx->device->dma.sg_len) { | 174 | if (!ctx->device->dma.sg_len) { |
175 | dev_err(ctx->device->dev, | 175 | dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n", |
176 | "[%s]: Could not map the sg list (TO_DEVICE)", | 176 | __func__); |
177 | __func__); | ||
178 | return -EFAULT; | 177 | return -EFAULT; |
179 | } | 178 | } |
180 | 179 | ||
181 | dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " | 180 | dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n", |
182 | "(TO_DEVICE)", __func__); | 181 | __func__); |
183 | desc = dmaengine_prep_slave_sg(channel, | 182 | desc = dmaengine_prep_slave_sg(channel, |
184 | ctx->device->dma.sg, ctx->device->dma.sg_len, | 183 | ctx->device->dma.sg, ctx->device->dma.sg_len, |
185 | direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); | 184 | direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); |
186 | if (!desc) { | 185 | if (!desc) { |
187 | dev_err(ctx->device->dev, | 186 | dev_err(ctx->device->dev, |
188 | "[%s]: device_prep_slave_sg() failed!", __func__); | 187 | "%s: device_prep_slave_sg() failed!\n", __func__); |
189 | return -EFAULT; | 188 | return -EFAULT; |
190 | } | 189 | } |
191 | 190 | ||
@@ -205,17 +204,16 @@ static void hash_dma_done(struct hash_ctx *ctx) | |||
205 | chan = ctx->device->dma.chan_mem2hash; | 204 | chan = ctx->device->dma.chan_mem2hash; |
206 | dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 205 | dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); |
207 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, | 206 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, |
208 | ctx->device->dma.sg_len, DMA_TO_DEVICE); | 207 | ctx->device->dma.sg_len, DMA_TO_DEVICE); |
209 | |||
210 | } | 208 | } |
211 | 209 | ||
212 | static int hash_dma_write(struct hash_ctx *ctx, | 210 | static int hash_dma_write(struct hash_ctx *ctx, |
213 | struct scatterlist *sg, int len) | 211 | struct scatterlist *sg, int len) |
214 | { | 212 | { |
215 | int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); | 213 | int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); |
216 | if (error) { | 214 | if (error) { |
217 | dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() " | 215 | dev_dbg(ctx->device->dev, |
218 | "failed", __func__); | 216 | "%s: hash_set_dma_transfer() failed\n", __func__); |
219 | return error; | 217 | return error; |
220 | } | 218 | } |
221 | 219 | ||
@@ -245,19 +243,18 @@ static int get_empty_message_digest( | |||
245 | if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { | 243 | if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { |
246 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { | 244 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { |
247 | memcpy(zero_hash, &zero_message_hash_sha1[0], | 245 | memcpy(zero_hash, &zero_message_hash_sha1[0], |
248 | SHA1_DIGEST_SIZE); | 246 | SHA1_DIGEST_SIZE); |
249 | *zero_hash_size = SHA1_DIGEST_SIZE; | 247 | *zero_hash_size = SHA1_DIGEST_SIZE; |
250 | *zero_digest = true; | 248 | *zero_digest = true; |
251 | } else if (HASH_ALGO_SHA256 == | 249 | } else if (HASH_ALGO_SHA256 == |
252 | ctx->config.algorithm) { | 250 | ctx->config.algorithm) { |
253 | memcpy(zero_hash, &zero_message_hash_sha256[0], | 251 | memcpy(zero_hash, &zero_message_hash_sha256[0], |
254 | SHA256_DIGEST_SIZE); | 252 | SHA256_DIGEST_SIZE); |
255 | *zero_hash_size = SHA256_DIGEST_SIZE; | 253 | *zero_hash_size = SHA256_DIGEST_SIZE; |
256 | *zero_digest = true; | 254 | *zero_digest = true; |
257 | } else { | 255 | } else { |
258 | dev_err(device_data->dev, "[%s] " | 256 | dev_err(device_data->dev, "%s: Incorrect algorithm!\n", |
259 | "Incorrect algorithm!" | 257 | __func__); |
260 | , __func__); | ||
261 | ret = -EINVAL; | 258 | ret = -EINVAL; |
262 | goto out; | 259 | goto out; |
263 | } | 260 | } |
@@ -265,25 +262,24 @@ static int get_empty_message_digest( | |||
265 | if (!ctx->keylen) { | 262 | if (!ctx->keylen) { |
266 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { | 263 | if (HASH_ALGO_SHA1 == ctx->config.algorithm) { |
267 | memcpy(zero_hash, &zero_message_hmac_sha1[0], | 264 | memcpy(zero_hash, &zero_message_hmac_sha1[0], |
268 | SHA1_DIGEST_SIZE); | 265 | SHA1_DIGEST_SIZE); |
269 | *zero_hash_size = SHA1_DIGEST_SIZE; | 266 | *zero_hash_size = SHA1_DIGEST_SIZE; |
270 | *zero_digest = true; | 267 | *zero_digest = true; |
271 | } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { | 268 | } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { |
272 | memcpy(zero_hash, &zero_message_hmac_sha256[0], | 269 | memcpy(zero_hash, &zero_message_hmac_sha256[0], |
273 | SHA256_DIGEST_SIZE); | 270 | SHA256_DIGEST_SIZE); |
274 | *zero_hash_size = SHA256_DIGEST_SIZE; | 271 | *zero_hash_size = SHA256_DIGEST_SIZE; |
275 | *zero_digest = true; | 272 | *zero_digest = true; |
276 | } else { | 273 | } else { |
277 | dev_err(device_data->dev, "[%s] " | 274 | dev_err(device_data->dev, "%s: Incorrect algorithm!\n", |
278 | "Incorrect algorithm!" | 275 | __func__); |
279 | , __func__); | ||
280 | ret = -EINVAL; | 276 | ret = -EINVAL; |
281 | goto out; | 277 | goto out; |
282 | } | 278 | } |
283 | } else { | 279 | } else { |
284 | dev_dbg(device_data->dev, "[%s] Continue hash " | 280 | dev_dbg(device_data->dev, |
285 | "calculation, since hmac key available", | 281 | "%s: Continue hash calculation, since hmac key available\n", |
286 | __func__); | 282 | __func__); |
287 | } | 283 | } |
288 | } | 284 | } |
289 | out: | 285 | out: |
@@ -299,9 +295,8 @@ out: | |||
299 | * This function request for disabling power (regulator) and clock, | 295 | * This function request for disabling power (regulator) and clock, |
300 | * and could also save current hw state. | 296 | * and could also save current hw state. |
301 | */ | 297 | */ |
302 | static int hash_disable_power( | 298 | static int hash_disable_power(struct hash_device_data *device_data, |
303 | struct hash_device_data *device_data, | 299 | bool save_device_state) |
304 | bool save_device_state) | ||
305 | { | 300 | { |
306 | int ret = 0; | 301 | int ret = 0; |
307 | struct device *dev = device_data->dev; | 302 | struct device *dev = device_data->dev; |
@@ -319,7 +314,7 @@ static int hash_disable_power( | |||
319 | clk_disable(device_data->clk); | 314 | clk_disable(device_data->clk); |
320 | ret = regulator_disable(device_data->regulator); | 315 | ret = regulator_disable(device_data->regulator); |
321 | if (ret) | 316 | if (ret) |
322 | dev_err(dev, "[%s] regulator_disable() failed!", __func__); | 317 | dev_err(dev, "%s: regulator_disable() failed!\n", __func__); |
323 | 318 | ||
324 | device_data->power_state = false; | 319 | device_data->power_state = false; |
325 | 320 | ||
@@ -337,9 +332,8 @@ out: | |||
337 | * This function request for enabling power (regulator) and clock, | 332 | * This function request for enabling power (regulator) and clock, |
338 | * and could also restore a previously saved hw state. | 333 | * and could also restore a previously saved hw state. |
339 | */ | 334 | */ |
340 | static int hash_enable_power( | 335 | static int hash_enable_power(struct hash_device_data *device_data, |
341 | struct hash_device_data *device_data, | 336 | bool restore_device_state) |
342 | bool restore_device_state) | ||
343 | { | 337 | { |
344 | int ret = 0; | 338 | int ret = 0; |
345 | struct device *dev = device_data->dev; | 339 | struct device *dev = device_data->dev; |
@@ -348,14 +342,13 @@ static int hash_enable_power( | |||
348 | if (!device_data->power_state) { | 342 | if (!device_data->power_state) { |
349 | ret = regulator_enable(device_data->regulator); | 343 | ret = regulator_enable(device_data->regulator); |
350 | if (ret) { | 344 | if (ret) { |
351 | dev_err(dev, "[%s]: regulator_enable() failed!", | 345 | dev_err(dev, "%s: regulator_enable() failed!\n", |
352 | __func__); | 346 | __func__); |
353 | goto out; | 347 | goto out; |
354 | } | 348 | } |
355 | ret = clk_enable(device_data->clk); | 349 | ret = clk_enable(device_data->clk); |
356 | if (ret) { | 350 | if (ret) { |
357 | dev_err(dev, "[%s]: clk_enable() failed!", | 351 | dev_err(dev, "%s: clk_enable() failed!\n", __func__); |
358 | __func__); | ||
359 | ret = regulator_disable( | 352 | ret = regulator_disable( |
360 | device_data->regulator); | 353 | device_data->regulator); |
361 | goto out; | 354 | goto out; |
@@ -366,8 +359,7 @@ static int hash_enable_power( | |||
366 | if (device_data->restore_dev_state) { | 359 | if (device_data->restore_dev_state) { |
367 | if (restore_device_state) { | 360 | if (restore_device_state) { |
368 | device_data->restore_dev_state = false; | 361 | device_data->restore_dev_state = false; |
369 | hash_resume_state(device_data, | 362 | hash_resume_state(device_data, &device_data->state); |
370 | &device_data->state); | ||
371 | } | 363 | } |
372 | } | 364 | } |
373 | out: | 365 | out: |
@@ -447,7 +439,7 @@ static int hash_get_device_data(struct hash_ctx *ctx, | |||
447 | * spec or due to a bug in the hw. | 439 | * spec or due to a bug in the hw. |
448 | */ | 440 | */ |
449 | static void hash_hw_write_key(struct hash_device_data *device_data, | 441 | static void hash_hw_write_key(struct hash_device_data *device_data, |
450 | const u8 *key, unsigned int keylen) | 442 | const u8 *key, unsigned int keylen) |
451 | { | 443 | { |
452 | u32 word = 0; | 444 | u32 word = 0; |
453 | int nwords = 1; | 445 | int nwords = 1; |
@@ -491,14 +483,14 @@ static void hash_hw_write_key(struct hash_device_data *device_data, | |||
491 | * calculation. | 483 | * calculation. |
492 | */ | 484 | */ |
493 | static int init_hash_hw(struct hash_device_data *device_data, | 485 | static int init_hash_hw(struct hash_device_data *device_data, |
494 | struct hash_ctx *ctx) | 486 | struct hash_ctx *ctx) |
495 | { | 487 | { |
496 | int ret = 0; | 488 | int ret = 0; |
497 | 489 | ||
498 | ret = hash_setconfiguration(device_data, &ctx->config); | 490 | ret = hash_setconfiguration(device_data, &ctx->config); |
499 | if (ret) { | 491 | if (ret) { |
500 | dev_err(device_data->dev, "[%s] hash_setconfiguration() " | 492 | dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n", |
501 | "failed!", __func__); | 493 | __func__); |
502 | return ret; | 494 | return ret; |
503 | } | 495 | } |
504 | 496 | ||
@@ -528,9 +520,8 @@ static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) | |||
528 | size -= sg->length; | 520 | size -= sg->length; |
529 | 521 | ||
530 | /* hash_set_dma_transfer will align last nent */ | 522 | /* hash_set_dma_transfer will align last nent */ |
531 | if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) | 523 | if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) || |
532 | || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && | 524 | (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0)) |
533 | size > 0)) | ||
534 | aligned_data = false; | 525 | aligned_data = false; |
535 | 526 | ||
536 | sg = sg_next(sg); | 527 | sg = sg_next(sg); |
@@ -585,21 +576,17 @@ static int hash_init(struct ahash_request *req) | |||
585 | if (req->nbytes < HASH_DMA_ALIGN_SIZE) { | 576 | if (req->nbytes < HASH_DMA_ALIGN_SIZE) { |
586 | req_ctx->dma_mode = false; /* Don't use DMA */ | 577 | req_ctx->dma_mode = false; /* Don't use DMA */ |
587 | 578 | ||
588 | pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct " | 579 | pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n", |
589 | "to CPU mode for data size < %d", | 580 | __func__, HASH_DMA_ALIGN_SIZE); |
590 | __func__, HASH_DMA_ALIGN_SIZE); | ||
591 | } else { | 581 | } else { |
592 | if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && | 582 | if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && |
593 | hash_dma_valid_data(req->src, | 583 | hash_dma_valid_data(req->src, req->nbytes)) { |
594 | req->nbytes)) { | ||
595 | req_ctx->dma_mode = true; | 584 | req_ctx->dma_mode = true; |
596 | } else { | 585 | } else { |
597 | req_ctx->dma_mode = false; | 586 | req_ctx->dma_mode = false; |
598 | pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use" | 587 | pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n", |
599 | " CPU mode for datalength < %d" | 588 | __func__, |
600 | " or non-aligned data, except " | 589 | HASH_DMA_PERFORMANCE_MIN_SIZE); |
601 | "in last nent", __func__, | ||
602 | HASH_DMA_PERFORMANCE_MIN_SIZE); | ||
603 | } | 590 | } |
604 | } | 591 | } |
605 | } | 592 | } |
@@ -614,9 +601,8 @@ static int hash_init(struct ahash_request *req) | |||
614 | * the HASH hardware. | 601 | * the HASH hardware. |
615 | * | 602 | * |
616 | */ | 603 | */ |
617 | static void hash_processblock( | 604 | static void hash_processblock(struct hash_device_data *device_data, |
618 | struct hash_device_data *device_data, | 605 | const u32 *message, int length) |
619 | const u32 *message, int length) | ||
620 | { | 606 | { |
621 | int len = length / HASH_BYTES_PER_WORD; | 607 | int len = length / HASH_BYTES_PER_WORD; |
622 | /* | 608 | /* |
@@ -641,7 +627,7 @@ static void hash_processblock( | |||
641 | * | 627 | * |
642 | */ | 628 | */ |
643 | static void hash_messagepad(struct hash_device_data *device_data, | 629 | static void hash_messagepad(struct hash_device_data *device_data, |
644 | const u32 *message, u8 index_bytes) | 630 | const u32 *message, u8 index_bytes) |
645 | { | 631 | { |
646 | int nwords = 1; | 632 | int nwords = 1; |
647 | 633 | ||
@@ -666,15 +652,13 @@ static void hash_messagepad(struct hash_device_data *device_data, | |||
666 | 652 | ||
667 | /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ | 653 | /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ |
668 | HASH_SET_NBLW(index_bytes * 8); | 654 | HASH_SET_NBLW(index_bytes * 8); |
669 | dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, | 655 | dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n", |
670 | readl_relaxed(&device_data->base->din), | 656 | __func__, readl_relaxed(&device_data->base->din), |
671 | (int)(readl_relaxed(&device_data->base->str) & | 657 | readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); |
672 | HASH_STR_NBLW_MASK)); | ||
673 | HASH_SET_DCAL; | 658 | HASH_SET_DCAL; |
674 | dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", | 659 | dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n", |
675 | __func__, readl_relaxed(&device_data->base->din), | 660 | __func__, readl_relaxed(&device_data->base->din), |
676 | (int)(readl_relaxed(&device_data->base->str) & | 661 | readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); |
677 | HASH_STR_NBLW_MASK)); | ||
678 | 662 | ||
679 | while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) | 663 | while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) |
680 | cpu_relax(); | 664 | cpu_relax(); |
@@ -704,7 +688,7 @@ static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr) | |||
704 | * @config: Pointer to a configuration structure. | 688 | * @config: Pointer to a configuration structure. |
705 | */ | 689 | */ |
706 | int hash_setconfiguration(struct hash_device_data *device_data, | 690 | int hash_setconfiguration(struct hash_device_data *device_data, |
707 | struct hash_config *config) | 691 | struct hash_config *config) |
708 | { | 692 | { |
709 | int ret = 0; | 693 | int ret = 0; |
710 | 694 | ||
@@ -731,8 +715,8 @@ int hash_setconfiguration(struct hash_device_data *device_data, | |||
731 | break; | 715 | break; |
732 | 716 | ||
733 | default: | 717 | default: |
734 | dev_err(device_data->dev, "[%s] Incorrect algorithm.", | 718 | dev_err(device_data->dev, "%s: Incorrect algorithm\n", |
735 | __func__); | 719 | __func__); |
736 | return -EPERM; | 720 | return -EPERM; |
737 | } | 721 | } |
738 | 722 | ||
@@ -744,23 +728,22 @@ int hash_setconfiguration(struct hash_device_data *device_data, | |||
744 | HASH_CLEAR_BITS(&device_data->base->cr, | 728 | HASH_CLEAR_BITS(&device_data->base->cr, |
745 | HASH_CR_MODE_MASK); | 729 | HASH_CR_MODE_MASK); |
746 | else if (HASH_OPER_MODE_HMAC == config->oper_mode) { | 730 | else if (HASH_OPER_MODE_HMAC == config->oper_mode) { |
747 | HASH_SET_BITS(&device_data->base->cr, | 731 | HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK); |
748 | HASH_CR_MODE_MASK); | ||
749 | if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { | 732 | if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { |
750 | /* Truncate key to blocksize */ | 733 | /* Truncate key to blocksize */ |
751 | dev_dbg(device_data->dev, "[%s] LKEY set", __func__); | 734 | dev_dbg(device_data->dev, "%s: LKEY set\n", __func__); |
752 | HASH_SET_BITS(&device_data->base->cr, | 735 | HASH_SET_BITS(&device_data->base->cr, |
753 | HASH_CR_LKEY_MASK); | 736 | HASH_CR_LKEY_MASK); |
754 | } else { | 737 | } else { |
755 | dev_dbg(device_data->dev, "[%s] LKEY cleared", | 738 | dev_dbg(device_data->dev, "%s: LKEY cleared\n", |
756 | __func__); | 739 | __func__); |
757 | HASH_CLEAR_BITS(&device_data->base->cr, | 740 | HASH_CLEAR_BITS(&device_data->base->cr, |
758 | HASH_CR_LKEY_MASK); | 741 | HASH_CR_LKEY_MASK); |
759 | } | 742 | } |
760 | } else { /* Wrong hash mode */ | 743 | } else { /* Wrong hash mode */ |
761 | ret = -EPERM; | 744 | ret = -EPERM; |
762 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | 745 | dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", |
763 | __func__); | 746 | __func__); |
764 | } | 747 | } |
765 | return ret; | 748 | return ret; |
766 | } | 749 | } |
@@ -793,8 +776,9 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) | |||
793 | } | 776 | } |
794 | 777 | ||
795 | static int hash_process_data(struct hash_device_data *device_data, | 778 | static int hash_process_data(struct hash_device_data *device_data, |
796 | struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, | 779 | struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, |
797 | int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) | 780 | int msg_length, u8 *data_buffer, u8 *buffer, |
781 | u8 *index) | ||
798 | { | 782 | { |
799 | int ret = 0; | 783 | int ret = 0; |
800 | u32 count; | 784 | u32 count; |
@@ -809,24 +793,23 @@ static int hash_process_data(struct hash_device_data *device_data, | |||
809 | msg_length = 0; | 793 | msg_length = 0; |
810 | } else { | 794 | } else { |
811 | if (req_ctx->updated) { | 795 | if (req_ctx->updated) { |
812 | |||
813 | ret = hash_resume_state(device_data, | 796 | ret = hash_resume_state(device_data, |
814 | &device_data->state); | 797 | &device_data->state); |
815 | memmove(req_ctx->state.buffer, | 798 | memmove(req_ctx->state.buffer, |
816 | device_data->state.buffer, | 799 | device_data->state.buffer, |
817 | HASH_BLOCK_SIZE / sizeof(u32)); | 800 | HASH_BLOCK_SIZE / sizeof(u32)); |
818 | if (ret) { | 801 | if (ret) { |
819 | dev_err(device_data->dev, "[%s] " | 802 | dev_err(device_data->dev, |
820 | "hash_resume_state()" | 803 | "%s: hash_resume_state() failed!\n", |
821 | " failed!", __func__); | 804 | __func__); |
822 | goto out; | 805 | goto out; |
823 | } | 806 | } |
824 | } else { | 807 | } else { |
825 | ret = init_hash_hw(device_data, ctx); | 808 | ret = init_hash_hw(device_data, ctx); |
826 | if (ret) { | 809 | if (ret) { |
827 | dev_err(device_data->dev, "[%s] " | 810 | dev_err(device_data->dev, |
828 | "init_hash_hw()" | 811 | "%s: init_hash_hw() failed!\n", |
829 | " failed!", __func__); | 812 | __func__); |
830 | goto out; | 813 | goto out; |
831 | } | 814 | } |
832 | req_ctx->updated = 1; | 815 | req_ctx->updated = 1; |
@@ -838,22 +821,21 @@ static int hash_process_data(struct hash_device_data *device_data, | |||
838 | * HW peripheral, otherwise we first copy data | 821 | * HW peripheral, otherwise we first copy data |
839 | * to a local buffer | 822 | * to a local buffer |
840 | */ | 823 | */ |
841 | if ((0 == (((u32)data_buffer) % 4)) | 824 | if ((0 == (((u32)data_buffer) % 4)) && |
842 | && (0 == *index)) | 825 | (0 == *index)) |
843 | hash_processblock(device_data, | 826 | hash_processblock(device_data, |
844 | (const u32 *) | 827 | (const u32 *)data_buffer, |
845 | data_buffer, HASH_BLOCK_SIZE); | 828 | HASH_BLOCK_SIZE); |
846 | else { | 829 | else { |
847 | for (count = 0; count < | 830 | for (count = 0; |
848 | (u32)(HASH_BLOCK_SIZE - | 831 | count < (u32)(HASH_BLOCK_SIZE - *index); |
849 | *index); | 832 | count++) { |
850 | count++) { | ||
851 | buffer[*index + count] = | 833 | buffer[*index + count] = |
852 | *(data_buffer + count); | 834 | *(data_buffer + count); |
853 | } | 835 | } |
854 | hash_processblock(device_data, | 836 | hash_processblock(device_data, |
855 | (const u32 *)buffer, | 837 | (const u32 *)buffer, |
856 | HASH_BLOCK_SIZE); | 838 | HASH_BLOCK_SIZE); |
857 | } | 839 | } |
858 | hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); | 840 | hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); |
859 | data_buffer += (HASH_BLOCK_SIZE - *index); | 841 | data_buffer += (HASH_BLOCK_SIZE - *index); |
@@ -865,12 +847,11 @@ static int hash_process_data(struct hash_device_data *device_data, | |||
865 | &device_data->state); | 847 | &device_data->state); |
866 | 848 | ||
867 | memmove(device_data->state.buffer, | 849 | memmove(device_data->state.buffer, |
868 | req_ctx->state.buffer, | 850 | req_ctx->state.buffer, |
869 | HASH_BLOCK_SIZE / sizeof(u32)); | 851 | HASH_BLOCK_SIZE / sizeof(u32)); |
870 | if (ret) { | 852 | if (ret) { |
871 | dev_err(device_data->dev, "[%s] " | 853 | dev_err(device_data->dev, "%s: hash_save_state() failed!\n", |
872 | "hash_save_state()" | 854 | __func__); |
873 | " failed!", __func__); | ||
874 | goto out; | 855 | goto out; |
875 | } | 856 | } |
876 | } | 857 | } |
@@ -898,25 +879,24 @@ static int hash_dma_final(struct ahash_request *req) | |||
898 | if (ret) | 879 | if (ret) |
899 | return ret; | 880 | return ret; |
900 | 881 | ||
901 | dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); | 882 | dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); |
902 | 883 | ||
903 | if (req_ctx->updated) { | 884 | if (req_ctx->updated) { |
904 | ret = hash_resume_state(device_data, &device_data->state); | 885 | ret = hash_resume_state(device_data, &device_data->state); |
905 | 886 | ||
906 | if (ret) { | 887 | if (ret) { |
907 | dev_err(device_data->dev, "[%s] hash_resume_state() " | 888 | dev_err(device_data->dev, "%s: hash_resume_state() failed!\n", |
908 | "failed!", __func__); | 889 | __func__); |
909 | goto out; | 890 | goto out; |
910 | } | 891 | } |
911 | |||
912 | } | 892 | } |
913 | 893 | ||
914 | if (!req_ctx->updated) { | 894 | if (!req_ctx->updated) { |
915 | ret = hash_setconfiguration(device_data, &ctx->config); | 895 | ret = hash_setconfiguration(device_data, &ctx->config); |
916 | if (ret) { | 896 | if (ret) { |
917 | dev_err(device_data->dev, "[%s] " | 897 | dev_err(device_data->dev, |
918 | "hash_setconfiguration() failed!", | 898 | "%s: hash_setconfiguration() failed!\n", |
919 | __func__); | 899 | __func__); |
920 | goto out; | 900 | goto out; |
921 | } | 901 | } |
922 | 902 | ||
@@ -926,9 +906,9 @@ static int hash_dma_final(struct ahash_request *req) | |||
926 | HASH_CR_DMAE_MASK); | 906 | HASH_CR_DMAE_MASK); |
927 | } else { | 907 | } else { |
928 | HASH_SET_BITS(&device_data->base->cr, | 908 | HASH_SET_BITS(&device_data->base->cr, |
929 | HASH_CR_DMAE_MASK); | 909 | HASH_CR_DMAE_MASK); |
930 | HASH_SET_BITS(&device_data->base->cr, | 910 | HASH_SET_BITS(&device_data->base->cr, |
931 | HASH_CR_PRIVN_MASK); | 911 | HASH_CR_PRIVN_MASK); |
932 | } | 912 | } |
933 | 913 | ||
934 | HASH_INITIALIZE; | 914 | HASH_INITIALIZE; |
@@ -944,16 +924,16 @@ static int hash_dma_final(struct ahash_request *req) | |||
944 | /* Store the nents in the dma struct. */ | 924 | /* Store the nents in the dma struct. */ |
945 | ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); | 925 | ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); |
946 | if (!ctx->device->dma.nents) { | 926 | if (!ctx->device->dma.nents) { |
947 | dev_err(device_data->dev, "[%s] " | 927 | dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n", |
948 | "ctx->device->dma.nents = 0", __func__); | 928 | __func__); |
949 | ret = ctx->device->dma.nents; | 929 | ret = ctx->device->dma.nents; |
950 | goto out; | 930 | goto out; |
951 | } | 931 | } |
952 | 932 | ||
953 | bytes_written = hash_dma_write(ctx, req->src, req->nbytes); | 933 | bytes_written = hash_dma_write(ctx, req->src, req->nbytes); |
954 | if (bytes_written != req->nbytes) { | 934 | if (bytes_written != req->nbytes) { |
955 | dev_err(device_data->dev, "[%s] " | 935 | dev_err(device_data->dev, "%s: hash_dma_write() failed!\n", |
956 | "hash_dma_write() failed!", __func__); | 936 | __func__); |
957 | ret = bytes_written; | 937 | ret = bytes_written; |
958 | goto out; | 938 | goto out; |
959 | } | 939 | } |
@@ -968,8 +948,8 @@ static int hash_dma_final(struct ahash_request *req) | |||
968 | unsigned int keylen = ctx->keylen; | 948 | unsigned int keylen = ctx->keylen; |
969 | u8 *key = ctx->key; | 949 | u8 *key = ctx->key; |
970 | 950 | ||
971 | dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, | 951 | dev_dbg(device_data->dev, "%s: keylen: %d\n", |
972 | ctx->keylen); | 952 | __func__, ctx->keylen); |
973 | hash_hw_write_key(device_data, key, keylen); | 953 | hash_hw_write_key(device_data, key, keylen); |
974 | } | 954 | } |
975 | 955 | ||
@@ -1004,14 +984,14 @@ static int hash_hw_final(struct ahash_request *req) | |||
1004 | if (ret) | 984 | if (ret) |
1005 | return ret; | 985 | return ret; |
1006 | 986 | ||
1007 | dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); | 987 | dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); |
1008 | 988 | ||
1009 | if (req_ctx->updated) { | 989 | if (req_ctx->updated) { |
1010 | ret = hash_resume_state(device_data, &device_data->state); | 990 | ret = hash_resume_state(device_data, &device_data->state); |
1011 | 991 | ||
1012 | if (ret) { | 992 | if (ret) { |
1013 | dev_err(device_data->dev, "[%s] hash_resume_state() " | 993 | dev_err(device_data->dev, |
1014 | "failed!", __func__); | 994 | "%s: hash_resume_state() failed!\n", __func__); |
1015 | goto out; | 995 | goto out; |
1016 | } | 996 | } |
1017 | } else if (req->nbytes == 0 && ctx->keylen == 0) { | 997 | } else if (req->nbytes == 0 && ctx->keylen == 0) { |
@@ -1025,31 +1005,33 @@ static int hash_hw_final(struct ahash_request *req) | |||
1025 | ret = get_empty_message_digest(device_data, &zero_hash[0], | 1005 | ret = get_empty_message_digest(device_data, &zero_hash[0], |
1026 | &zero_hash_size, &zero_digest); | 1006 | &zero_hash_size, &zero_digest); |
1027 | if (!ret && likely(zero_hash_size == ctx->digestsize) && | 1007 | if (!ret && likely(zero_hash_size == ctx->digestsize) && |
1028 | zero_digest) { | 1008 | zero_digest) { |
1029 | memcpy(req->result, &zero_hash[0], ctx->digestsize); | 1009 | memcpy(req->result, &zero_hash[0], ctx->digestsize); |
1030 | goto out; | 1010 | goto out; |
1031 | } else if (!ret && !zero_digest) { | 1011 | } else if (!ret && !zero_digest) { |
1032 | dev_dbg(device_data->dev, "[%s] HMAC zero msg with " | 1012 | dev_dbg(device_data->dev, |
1033 | "key, continue...", __func__); | 1013 | "%s: HMAC zero msg with key, continue...\n", |
1014 | __func__); | ||
1034 | } else { | 1015 | } else { |
1035 | dev_err(device_data->dev, "[%s] ret=%d, or wrong " | 1016 | dev_err(device_data->dev, |
1036 | "digest size? %s", __func__, ret, | 1017 | "%s: ret=%d, or wrong digest size? %s\n", |
1037 | (zero_hash_size == ctx->digestsize) ? | 1018 | __func__, ret, |
1038 | "true" : "false"); | 1019 | zero_hash_size == ctx->digestsize ? |
1020 | "true" : "false"); | ||
1039 | /* Return error */ | 1021 | /* Return error */ |
1040 | goto out; | 1022 | goto out; |
1041 | } | 1023 | } |
1042 | } else if (req->nbytes == 0 && ctx->keylen > 0) { | 1024 | } else if (req->nbytes == 0 && ctx->keylen > 0) { |
1043 | dev_err(device_data->dev, "[%s] Empty message with " | 1025 | dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n", |
1044 | "keylength > 0, NOT supported.", __func__); | 1026 | __func__); |
1045 | goto out; | 1027 | goto out; |
1046 | } | 1028 | } |
1047 | 1029 | ||
1048 | if (!req_ctx->updated) { | 1030 | if (!req_ctx->updated) { |
1049 | ret = init_hash_hw(device_data, ctx); | 1031 | ret = init_hash_hw(device_data, ctx); |
1050 | if (ret) { | 1032 | if (ret) { |
1051 | dev_err(device_data->dev, "[%s] init_hash_hw() " | 1033 | dev_err(device_data->dev, |
1052 | "failed!", __func__); | 1034 | "%s: init_hash_hw() failed!\n", __func__); |
1053 | goto out; | 1035 | goto out; |
1054 | } | 1036 | } |
1055 | } | 1037 | } |
@@ -1067,8 +1049,8 @@ static int hash_hw_final(struct ahash_request *req) | |||
1067 | unsigned int keylen = ctx->keylen; | 1049 | unsigned int keylen = ctx->keylen; |
1068 | u8 *key = ctx->key; | 1050 | u8 *key = ctx->key; |
1069 | 1051 | ||
1070 | dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, | 1052 | dev_dbg(device_data->dev, "%s: keylen: %d\n", |
1071 | ctx->keylen); | 1053 | __func__, ctx->keylen); |
1072 | hash_hw_write_key(device_data, key, keylen); | 1054 | hash_hw_write_key(device_data, key, keylen); |
1073 | } | 1055 | } |
1074 | 1056 | ||
@@ -1115,10 +1097,8 @@ int hash_hw_update(struct ahash_request *req) | |||
1115 | /* Check if ctx->state.length + msg_length | 1097 | /* Check if ctx->state.length + msg_length |
1116 | overflows */ | 1098 | overflows */ |
1117 | if (msg_length > (req_ctx->state.length.low_word + msg_length) && | 1099 | if (msg_length > (req_ctx->state.length.low_word + msg_length) && |
1118 | HASH_HIGH_WORD_MAX_VAL == | 1100 | HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) { |
1119 | req_ctx->state.length.high_word) { | 1101 | pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__); |
1120 | pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!", | ||
1121 | __func__); | ||
1122 | return -EPERM; | 1102 | return -EPERM; |
1123 | } | 1103 | } |
1124 | 1104 | ||
@@ -1133,8 +1113,8 @@ int hash_hw_update(struct ahash_request *req) | |||
1133 | data_buffer, buffer, &index); | 1113 | data_buffer, buffer, &index); |
1134 | 1114 | ||
1135 | if (ret) { | 1115 | if (ret) { |
1136 | dev_err(device_data->dev, "[%s] hash_internal_hw_" | 1116 | dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n", |
1137 | "update() failed!", __func__); | 1117 | __func__); |
1138 | goto out; | 1118 | goto out; |
1139 | } | 1119 | } |
1140 | 1120 | ||
@@ -1142,9 +1122,8 @@ int hash_hw_update(struct ahash_request *req) | |||
1142 | } | 1122 | } |
1143 | 1123 | ||
1144 | req_ctx->state.index = index; | 1124 | req_ctx->state.index = index; |
1145 | dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))", | 1125 | dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n", |
1146 | __func__, req_ctx->state.index, | 1126 | __func__, req_ctx->state.index, req_ctx->state.bit_index); |
1147 | req_ctx->state.bit_index); | ||
1148 | 1127 | ||
1149 | out: | 1128 | out: |
1150 | release_hash_device(device_data); | 1129 | release_hash_device(device_data); |
@@ -1158,23 +1137,23 @@ out: | |||
1158 | * @device_state: The state to be restored in the hash hardware | 1137 | * @device_state: The state to be restored in the hash hardware |
1159 | */ | 1138 | */ |
1160 | int hash_resume_state(struct hash_device_data *device_data, | 1139 | int hash_resume_state(struct hash_device_data *device_data, |
1161 | const struct hash_state *device_state) | 1140 | const struct hash_state *device_state) |
1162 | { | 1141 | { |
1163 | u32 temp_cr; | 1142 | u32 temp_cr; |
1164 | s32 count; | 1143 | s32 count; |
1165 | int hash_mode = HASH_OPER_MODE_HASH; | 1144 | int hash_mode = HASH_OPER_MODE_HASH; |
1166 | 1145 | ||
1167 | if (NULL == device_state) { | 1146 | if (NULL == device_state) { |
1168 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | 1147 | dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", |
1169 | __func__); | 1148 | __func__); |
1170 | return -EPERM; | 1149 | return -EPERM; |
1171 | } | 1150 | } |
1172 | 1151 | ||
1173 | /* Check correctness of index and length members */ | 1152 | /* Check correctness of index and length members */ |
1174 | if (device_state->index > HASH_BLOCK_SIZE | 1153 | if (device_state->index > HASH_BLOCK_SIZE || |
1175 | || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { | 1154 | (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { |
1176 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | 1155 | dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", |
1177 | __func__); | 1156 | __func__); |
1178 | return -EPERM; | 1157 | return -EPERM; |
1179 | } | 1158 | } |
1180 | 1159 | ||
@@ -1198,7 +1177,7 @@ int hash_resume_state(struct hash_device_data *device_data, | |||
1198 | break; | 1177 | break; |
1199 | 1178 | ||
1200 | writel_relaxed(device_state->csr[count], | 1179 | writel_relaxed(device_state->csr[count], |
1201 | &device_data->base->csrx[count]); | 1180 | &device_data->base->csrx[count]); |
1202 | } | 1181 | } |
1203 | 1182 | ||
1204 | writel_relaxed(device_state->csfull, &device_data->base->csfull); | 1183 | writel_relaxed(device_state->csfull, &device_data->base->csfull); |
@@ -1216,15 +1195,15 @@ int hash_resume_state(struct hash_device_data *device_data, | |||
1216 | * @device_state: The strucure where the hardware state should be saved. | 1195 | * @device_state: The strucure where the hardware state should be saved. |
1217 | */ | 1196 | */ |
1218 | int hash_save_state(struct hash_device_data *device_data, | 1197 | int hash_save_state(struct hash_device_data *device_data, |
1219 | struct hash_state *device_state) | 1198 | struct hash_state *device_state) |
1220 | { | 1199 | { |
1221 | u32 temp_cr; | 1200 | u32 temp_cr; |
1222 | u32 count; | 1201 | u32 count; |
1223 | int hash_mode = HASH_OPER_MODE_HASH; | 1202 | int hash_mode = HASH_OPER_MODE_HASH; |
1224 | 1203 | ||
1225 | if (NULL == device_state) { | 1204 | if (NULL == device_state) { |
1226 | dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", | 1205 | dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", |
1227 | __func__); | 1206 | __func__); |
1228 | return -ENOTSUPP; | 1207 | return -ENOTSUPP; |
1229 | } | 1208 | } |
1230 | 1209 | ||
@@ -1270,20 +1249,18 @@ int hash_save_state(struct hash_device_data *device_data, | |||
1270 | int hash_check_hw(struct hash_device_data *device_data) | 1249 | int hash_check_hw(struct hash_device_data *device_data) |
1271 | { | 1250 | { |
1272 | /* Checking Peripheral Ids */ | 1251 | /* Checking Peripheral Ids */ |
1273 | if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) | 1252 | if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) && |
1274 | && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) | 1253 | HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) && |
1275 | && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) | 1254 | HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) && |
1276 | && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) | 1255 | HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) && |
1277 | && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) | 1256 | HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) && |
1278 | && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) | 1257 | HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) && |
1279 | && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) | 1258 | HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) && |
1280 | && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3) | 1259 | HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) { |
1281 | ) { | ||
1282 | return 0; | 1260 | return 0; |
1283 | } | 1261 | } |
1284 | 1262 | ||
1285 | dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!", | 1263 | dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__); |
1286 | __func__); | ||
1287 | return -ENOTSUPP; | 1264 | return -ENOTSUPP; |
1288 | } | 1265 | } |
1289 | 1266 | ||
@@ -1294,14 +1271,14 @@ int hash_check_hw(struct hash_device_data *device_data) | |||
1294 | * @algorithm: The algorithm in use. | 1271 | * @algorithm: The algorithm in use. |
1295 | */ | 1272 | */ |
1296 | void hash_get_digest(struct hash_device_data *device_data, | 1273 | void hash_get_digest(struct hash_device_data *device_data, |
1297 | u8 *digest, int algorithm) | 1274 | u8 *digest, int algorithm) |
1298 | { | 1275 | { |
1299 | u32 temp_hx_val, count; | 1276 | u32 temp_hx_val, count; |
1300 | int loop_ctr; | 1277 | int loop_ctr; |
1301 | 1278 | ||
1302 | if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { | 1279 | if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { |
1303 | dev_err(device_data->dev, "[%s] Incorrect algorithm %d", | 1280 | dev_err(device_data->dev, "%s: Incorrect algorithm %d\n", |
1304 | __func__, algorithm); | 1281 | __func__, algorithm); |
1305 | return; | 1282 | return; |
1306 | } | 1283 | } |
1307 | 1284 | ||
@@ -1310,8 +1287,8 @@ void hash_get_digest(struct hash_device_data *device_data, | |||
1310 | else | 1287 | else |
1311 | loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); | 1288 | loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); |
1312 | 1289 | ||
1313 | dev_dbg(device_data->dev, "[%s] digest array:(0x%x)", | 1290 | dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n", |
1314 | __func__, (u32) digest); | 1291 | __func__, (u32) digest); |
1315 | 1292 | ||
1316 | /* Copy result into digest array */ | 1293 | /* Copy result into digest array */ |
1317 | for (count = 0; count < loop_ctr; count++) { | 1294 | for (count = 0; count < loop_ctr; count++) { |
@@ -1337,8 +1314,7 @@ static int ahash_update(struct ahash_request *req) | |||
1337 | /* Skip update for DMA, all data will be passed to DMA in final */ | 1314 | /* Skip update for DMA, all data will be passed to DMA in final */ |
1338 | 1315 | ||
1339 | if (ret) { | 1316 | if (ret) { |
1340 | pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", | 1317 | pr_err("%s: hash_hw_update() failed!\n", __func__); |
1341 | __func__); | ||
1342 | } | 1318 | } |
1343 | 1319 | ||
1344 | return ret; | 1320 | return ret; |
@@ -1353,7 +1329,7 @@ static int ahash_final(struct ahash_request *req) | |||
1353 | int ret = 0; | 1329 | int ret = 0; |
1354 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); | 1330 | struct hash_req_ctx *req_ctx = ahash_request_ctx(req); |
1355 | 1331 | ||
1356 | pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); | 1332 | pr_debug("%s: data size: %d\n", __func__, req->nbytes); |
1357 | 1333 | ||
1358 | if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) | 1334 | if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) |
1359 | ret = hash_dma_final(req); | 1335 | ret = hash_dma_final(req); |
@@ -1361,15 +1337,14 @@ static int ahash_final(struct ahash_request *req) | |||
1361 | ret = hash_hw_final(req); | 1337 | ret = hash_hw_final(req); |
1362 | 1338 | ||
1363 | if (ret) { | 1339 | if (ret) { |
1364 | pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed", | 1340 | pr_err("%s: hash_hw/dma_final() failed\n", __func__); |
1365 | __func__); | ||
1366 | } | 1341 | } |
1367 | 1342 | ||
1368 | return ret; | 1343 | return ret; |
1369 | } | 1344 | } |
1370 | 1345 | ||
1371 | static int hash_setkey(struct crypto_ahash *tfm, | 1346 | static int hash_setkey(struct crypto_ahash *tfm, |
1372 | const u8 *key, unsigned int keylen, int alg) | 1347 | const u8 *key, unsigned int keylen, int alg) |
1373 | { | 1348 | { |
1374 | int ret = 0; | 1349 | int ret = 0; |
1375 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); | 1350 | struct hash_ctx *ctx = crypto_ahash_ctx(tfm); |
@@ -1379,8 +1354,8 @@ static int hash_setkey(struct crypto_ahash *tfm, | |||
1379 | */ | 1354 | */ |
1380 | ctx->key = kmemdup(key, keylen, GFP_KERNEL); | 1355 | ctx->key = kmemdup(key, keylen, GFP_KERNEL); |
1381 | if (!ctx->key) { | 1356 | if (!ctx->key) { |
1382 | pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " | 1357 | pr_err("%s: Failed to allocate ctx->key for %d\n", |
1383 | "for %d\n", __func__, alg); | 1358 | __func__, alg); |
1384 | return -ENOMEM; | 1359 | return -ENOMEM; |
1385 | } | 1360 | } |
1386 | ctx->keylen = keylen; | 1361 | ctx->keylen = keylen; |
@@ -1501,13 +1476,13 @@ out: | |||
1501 | } | 1476 | } |
1502 | 1477 | ||
1503 | static int hmac_sha1_setkey(struct crypto_ahash *tfm, | 1478 | static int hmac_sha1_setkey(struct crypto_ahash *tfm, |
1504 | const u8 *key, unsigned int keylen) | 1479 | const u8 *key, unsigned int keylen) |
1505 | { | 1480 | { |
1506 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); | 1481 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); |
1507 | } | 1482 | } |
1508 | 1483 | ||
1509 | static int hmac_sha256_setkey(struct crypto_ahash *tfm, | 1484 | static int hmac_sha256_setkey(struct crypto_ahash *tfm, |
1510 | const u8 *key, unsigned int keylen) | 1485 | const u8 *key, unsigned int keylen) |
1511 | { | 1486 | { |
1512 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); | 1487 | return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); |
1513 | } | 1488 | } |
@@ -1528,7 +1503,7 @@ static int hash_cra_init(struct crypto_tfm *tfm) | |||
1528 | hash); | 1503 | hash); |
1529 | 1504 | ||
1530 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 1505 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
1531 | sizeof(struct hash_req_ctx)); | 1506 | sizeof(struct hash_req_ctx)); |
1532 | 1507 | ||
1533 | ctx->config.data_format = HASH_DATA_8_BITS; | 1508 | ctx->config.data_format = HASH_DATA_8_BITS; |
1534 | ctx->config.algorithm = hash_alg->conf.algorithm; | 1509 | ctx->config.algorithm = hash_alg->conf.algorithm; |
@@ -1541,98 +1516,97 @@ static int hash_cra_init(struct crypto_tfm *tfm) | |||
1541 | 1516 | ||
1542 | static struct hash_algo_template hash_algs[] = { | 1517 | static struct hash_algo_template hash_algs[] = { |
1543 | { | 1518 | { |
1544 | .conf.algorithm = HASH_ALGO_SHA1, | 1519 | .conf.algorithm = HASH_ALGO_SHA1, |
1545 | .conf.oper_mode = HASH_OPER_MODE_HASH, | 1520 | .conf.oper_mode = HASH_OPER_MODE_HASH, |
1546 | .hash = { | 1521 | .hash = { |
1547 | .init = hash_init, | 1522 | .init = hash_init, |
1548 | .update = ahash_update, | 1523 | .update = ahash_update, |
1549 | .final = ahash_final, | 1524 | .final = ahash_final, |
1550 | .digest = ahash_sha1_digest, | 1525 | .digest = ahash_sha1_digest, |
1551 | .halg.digestsize = SHA1_DIGEST_SIZE, | 1526 | .halg.digestsize = SHA1_DIGEST_SIZE, |
1552 | .halg.statesize = sizeof(struct hash_ctx), | 1527 | .halg.statesize = sizeof(struct hash_ctx), |
1553 | .halg.base = { | 1528 | .halg.base = { |
1554 | .cra_name = "sha1", | 1529 | .cra_name = "sha1", |
1555 | .cra_driver_name = "sha1-ux500", | 1530 | .cra_driver_name = "sha1-ux500", |
1556 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1531 | .cra_flags = (CRYPTO_ALG_TYPE_AHASH | |
1557 | CRYPTO_ALG_ASYNC, | 1532 | CRYPTO_ALG_ASYNC), |
1558 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1533 | .cra_blocksize = SHA1_BLOCK_SIZE, |
1559 | .cra_ctxsize = sizeof(struct hash_ctx), | 1534 | .cra_ctxsize = sizeof(struct hash_ctx), |
1560 | .cra_init = hash_cra_init, | 1535 | .cra_init = hash_cra_init, |
1561 | .cra_module = THIS_MODULE, | 1536 | .cra_module = THIS_MODULE, |
1562 | } | 1537 | } |
1563 | } | 1538 | } |
1564 | }, | 1539 | }, |
1565 | { | 1540 | { |
1566 | .conf.algorithm = HASH_ALGO_SHA256, | 1541 | .conf.algorithm = HASH_ALGO_SHA256, |
1567 | .conf.oper_mode = HASH_OPER_MODE_HASH, | 1542 | .conf.oper_mode = HASH_OPER_MODE_HASH, |
1568 | .hash = { | 1543 | .hash = { |
1569 | .init = hash_init, | 1544 | .init = hash_init, |
1570 | .update = ahash_update, | 1545 | .update = ahash_update, |
1571 | .final = ahash_final, | 1546 | .final = ahash_final, |
1572 | .digest = ahash_sha256_digest, | 1547 | .digest = ahash_sha256_digest, |
1573 | .halg.digestsize = SHA256_DIGEST_SIZE, | 1548 | .halg.digestsize = SHA256_DIGEST_SIZE, |
1574 | .halg.statesize = sizeof(struct hash_ctx), | 1549 | .halg.statesize = sizeof(struct hash_ctx), |
1575 | .halg.base = { | 1550 | .halg.base = { |
1576 | .cra_name = "sha256", | 1551 | .cra_name = "sha256", |
1577 | .cra_driver_name = "sha256-ux500", | 1552 | .cra_driver_name = "sha256-ux500", |
1578 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1553 | .cra_flags = (CRYPTO_ALG_TYPE_AHASH | |
1579 | CRYPTO_ALG_ASYNC, | 1554 | CRYPTO_ALG_ASYNC), |
1580 | .cra_blocksize = SHA256_BLOCK_SIZE, | 1555 | .cra_blocksize = SHA256_BLOCK_SIZE, |
1581 | .cra_ctxsize = sizeof(struct hash_ctx), | 1556 | .cra_ctxsize = sizeof(struct hash_ctx), |
1582 | .cra_type = &crypto_ahash_type, | 1557 | .cra_type = &crypto_ahash_type, |
1583 | .cra_init = hash_cra_init, | 1558 | .cra_init = hash_cra_init, |
1584 | .cra_module = THIS_MODULE, | 1559 | .cra_module = THIS_MODULE, |
1585 | } | ||
1586 | } | 1560 | } |
1587 | 1561 | } | |
1588 | }, | 1562 | }, |
1589 | { | 1563 | { |
1590 | .conf.algorithm = HASH_ALGO_SHA1, | 1564 | .conf.algorithm = HASH_ALGO_SHA1, |
1591 | .conf.oper_mode = HASH_OPER_MODE_HMAC, | 1565 | .conf.oper_mode = HASH_OPER_MODE_HMAC, |
1592 | .hash = { | 1566 | .hash = { |
1593 | .init = hash_init, | 1567 | .init = hash_init, |
1594 | .update = ahash_update, | 1568 | .update = ahash_update, |
1595 | .final = ahash_final, | 1569 | .final = ahash_final, |
1596 | .digest = hmac_sha1_digest, | 1570 | .digest = hmac_sha1_digest, |
1597 | .setkey = hmac_sha1_setkey, | 1571 | .setkey = hmac_sha1_setkey, |
1598 | .halg.digestsize = SHA1_DIGEST_SIZE, | 1572 | .halg.digestsize = SHA1_DIGEST_SIZE, |
1599 | .halg.statesize = sizeof(struct hash_ctx), | 1573 | .halg.statesize = sizeof(struct hash_ctx), |
1600 | .halg.base = { | 1574 | .halg.base = { |
1601 | .cra_name = "hmac(sha1)", | 1575 | .cra_name = "hmac(sha1)", |
1602 | .cra_driver_name = "hmac-sha1-ux500", | 1576 | .cra_driver_name = "hmac-sha1-ux500", |
1603 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1577 | .cra_flags = (CRYPTO_ALG_TYPE_AHASH | |
1604 | CRYPTO_ALG_ASYNC, | 1578 | CRYPTO_ALG_ASYNC), |
1605 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1579 | .cra_blocksize = SHA1_BLOCK_SIZE, |
1606 | .cra_ctxsize = sizeof(struct hash_ctx), | 1580 | .cra_ctxsize = sizeof(struct hash_ctx), |
1607 | .cra_type = &crypto_ahash_type, | 1581 | .cra_type = &crypto_ahash_type, |
1608 | .cra_init = hash_cra_init, | 1582 | .cra_init = hash_cra_init, |
1609 | .cra_module = THIS_MODULE, | 1583 | .cra_module = THIS_MODULE, |
1610 | } | ||
1611 | } | 1584 | } |
1585 | } | ||
1612 | }, | 1586 | }, |
1613 | { | 1587 | { |
1614 | .conf.algorithm = HASH_ALGO_SHA256, | 1588 | .conf.algorithm = HASH_ALGO_SHA256, |
1615 | .conf.oper_mode = HASH_OPER_MODE_HMAC, | 1589 | .conf.oper_mode = HASH_OPER_MODE_HMAC, |
1616 | .hash = { | 1590 | .hash = { |
1617 | .init = hash_init, | 1591 | .init = hash_init, |
1618 | .update = ahash_update, | 1592 | .update = ahash_update, |
1619 | .final = ahash_final, | 1593 | .final = ahash_final, |
1620 | .digest = hmac_sha256_digest, | 1594 | .digest = hmac_sha256_digest, |
1621 | .setkey = hmac_sha256_setkey, | 1595 | .setkey = hmac_sha256_setkey, |
1622 | .halg.digestsize = SHA256_DIGEST_SIZE, | 1596 | .halg.digestsize = SHA256_DIGEST_SIZE, |
1623 | .halg.statesize = sizeof(struct hash_ctx), | 1597 | .halg.statesize = sizeof(struct hash_ctx), |
1624 | .halg.base = { | 1598 | .halg.base = { |
1625 | .cra_name = "hmac(sha256)", | 1599 | .cra_name = "hmac(sha256)", |
1626 | .cra_driver_name = "hmac-sha256-ux500", | 1600 | .cra_driver_name = "hmac-sha256-ux500", |
1627 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1601 | .cra_flags = (CRYPTO_ALG_TYPE_AHASH | |
1628 | CRYPTO_ALG_ASYNC, | 1602 | CRYPTO_ALG_ASYNC), |
1629 | .cra_blocksize = SHA256_BLOCK_SIZE, | 1603 | .cra_blocksize = SHA256_BLOCK_SIZE, |
1630 | .cra_ctxsize = sizeof(struct hash_ctx), | 1604 | .cra_ctxsize = sizeof(struct hash_ctx), |
1631 | .cra_type = &crypto_ahash_type, | 1605 | .cra_type = &crypto_ahash_type, |
1632 | .cra_init = hash_cra_init, | 1606 | .cra_init = hash_cra_init, |
1633 | .cra_module = THIS_MODULE, | 1607 | .cra_module = THIS_MODULE, |
1634 | } | ||
1635 | } | 1608 | } |
1609 | } | ||
1636 | } | 1610 | } |
1637 | }; | 1611 | }; |
1638 | 1612 | ||
@@ -1649,7 +1623,7 @@ static int ahash_algs_register_all(struct hash_device_data *device_data) | |||
1649 | ret = crypto_register_ahash(&hash_algs[i].hash); | 1623 | ret = crypto_register_ahash(&hash_algs[i].hash); |
1650 | if (ret) { | 1624 | if (ret) { |
1651 | count = i; | 1625 | count = i; |
1652 | dev_err(device_data->dev, "[%s] alg registration failed", | 1626 | dev_err(device_data->dev, "%s: alg registration failed\n", |
1653 | hash_algs[i].hash.halg.base.cra_driver_name); | 1627 | hash_algs[i].hash.halg.base.cra_driver_name); |
1654 | goto unreg; | 1628 | goto unreg; |
1655 | } | 1629 | } |
@@ -1683,9 +1657,8 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1683 | struct hash_device_data *device_data; | 1657 | struct hash_device_data *device_data; |
1684 | struct device *dev = &pdev->dev; | 1658 | struct device *dev = &pdev->dev; |
1685 | 1659 | ||
1686 | device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); | 1660 | device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC); |
1687 | if (!device_data) { | 1661 | if (!device_data) { |
1688 | dev_dbg(dev, "[%s] kzalloc() failed!", __func__); | ||
1689 | ret = -ENOMEM; | 1662 | ret = -ENOMEM; |
1690 | goto out; | 1663 | goto out; |
1691 | } | 1664 | } |
@@ -1695,14 +1668,14 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1695 | 1668 | ||
1696 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1669 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1697 | if (!res) { | 1670 | if (!res) { |
1698 | dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__); | 1671 | dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__); |
1699 | ret = -ENODEV; | 1672 | ret = -ENODEV; |
1700 | goto out_kfree; | 1673 | goto out_kfree; |
1701 | } | 1674 | } |
1702 | 1675 | ||
1703 | res = request_mem_region(res->start, resource_size(res), pdev->name); | 1676 | res = request_mem_region(res->start, resource_size(res), pdev->name); |
1704 | if (res == NULL) { | 1677 | if (res == NULL) { |
1705 | dev_dbg(dev, "[%s] request_mem_region() failed!", __func__); | 1678 | dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__); |
1706 | ret = -EBUSY; | 1679 | ret = -EBUSY; |
1707 | goto out_kfree; | 1680 | goto out_kfree; |
1708 | } | 1681 | } |
@@ -1710,8 +1683,7 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1710 | device_data->phybase = res->start; | 1683 | device_data->phybase = res->start; |
1711 | device_data->base = ioremap(res->start, resource_size(res)); | 1684 | device_data->base = ioremap(res->start, resource_size(res)); |
1712 | if (!device_data->base) { | 1685 | if (!device_data->base) { |
1713 | dev_err(dev, "[%s] ioremap() failed!", | 1686 | dev_err(dev, "%s: ioremap() failed!\n", __func__); |
1714 | __func__); | ||
1715 | ret = -ENOMEM; | 1687 | ret = -ENOMEM; |
1716 | goto out_free_mem; | 1688 | goto out_free_mem; |
1717 | } | 1689 | } |
@@ -1721,7 +1693,7 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1721 | /* Enable power for HASH1 hardware block */ | 1693 | /* Enable power for HASH1 hardware block */ |
1722 | device_data->regulator = regulator_get(dev, "v-ape"); | 1694 | device_data->regulator = regulator_get(dev, "v-ape"); |
1723 | if (IS_ERR(device_data->regulator)) { | 1695 | if (IS_ERR(device_data->regulator)) { |
1724 | dev_err(dev, "[%s] regulator_get() failed!", __func__); | 1696 | dev_err(dev, "%s: regulator_get() failed!\n", __func__); |
1725 | ret = PTR_ERR(device_data->regulator); | 1697 | ret = PTR_ERR(device_data->regulator); |
1726 | device_data->regulator = NULL; | 1698 | device_data->regulator = NULL; |
1727 | goto out_unmap; | 1699 | goto out_unmap; |
@@ -1730,27 +1702,27 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1730 | /* Enable the clock for HASH1 hardware block */ | 1702 | /* Enable the clock for HASH1 hardware block */ |
1731 | device_data->clk = clk_get(dev, NULL); | 1703 | device_data->clk = clk_get(dev, NULL); |
1732 | if (IS_ERR(device_data->clk)) { | 1704 | if (IS_ERR(device_data->clk)) { |
1733 | dev_err(dev, "[%s] clk_get() failed!", __func__); | 1705 | dev_err(dev, "%s: clk_get() failed!\n", __func__); |
1734 | ret = PTR_ERR(device_data->clk); | 1706 | ret = PTR_ERR(device_data->clk); |
1735 | goto out_regulator; | 1707 | goto out_regulator; |
1736 | } | 1708 | } |
1737 | 1709 | ||
1738 | ret = clk_prepare(device_data->clk); | 1710 | ret = clk_prepare(device_data->clk); |
1739 | if (ret) { | 1711 | if (ret) { |
1740 | dev_err(dev, "[%s] clk_prepare() failed!", __func__); | 1712 | dev_err(dev, "%s: clk_prepare() failed!\n", __func__); |
1741 | goto out_clk; | 1713 | goto out_clk; |
1742 | } | 1714 | } |
1743 | 1715 | ||
1744 | /* Enable device power (and clock) */ | 1716 | /* Enable device power (and clock) */ |
1745 | ret = hash_enable_power(device_data, false); | 1717 | ret = hash_enable_power(device_data, false); |
1746 | if (ret) { | 1718 | if (ret) { |
1747 | dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); | 1719 | dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); |
1748 | goto out_clk_unprepare; | 1720 | goto out_clk_unprepare; |
1749 | } | 1721 | } |
1750 | 1722 | ||
1751 | ret = hash_check_hw(device_data); | 1723 | ret = hash_check_hw(device_data); |
1752 | if (ret) { | 1724 | if (ret) { |
1753 | dev_err(dev, "[%s] hash_check_hw() failed!", __func__); | 1725 | dev_err(dev, "%s: hash_check_hw() failed!\n", __func__); |
1754 | goto out_power; | 1726 | goto out_power; |
1755 | } | 1727 | } |
1756 | 1728 | ||
@@ -1766,8 +1738,8 @@ static int ux500_hash_probe(struct platform_device *pdev) | |||
1766 | 1738 | ||
1767 | ret = ahash_algs_register_all(device_data); | 1739 | ret = ahash_algs_register_all(device_data); |
1768 | if (ret) { | 1740 | if (ret) { |
1769 | dev_err(dev, "[%s] ahash_algs_register_all() " | 1741 | dev_err(dev, "%s: ahash_algs_register_all() failed!\n", |
1770 | "failed!", __func__); | 1742 | __func__); |
1771 | goto out_power; | 1743 | goto out_power; |
1772 | } | 1744 | } |
1773 | 1745 | ||
@@ -1810,8 +1782,7 @@ static int ux500_hash_remove(struct platform_device *pdev) | |||
1810 | 1782 | ||
1811 | device_data = platform_get_drvdata(pdev); | 1783 | device_data = platform_get_drvdata(pdev); |
1812 | if (!device_data) { | 1784 | if (!device_data) { |
1813 | dev_err(dev, "[%s]: platform_get_drvdata() failed!", | 1785 | dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); |
1814 | __func__); | ||
1815 | return -ENOMEM; | 1786 | return -ENOMEM; |
1816 | } | 1787 | } |
1817 | 1788 | ||
@@ -1841,7 +1812,7 @@ static int ux500_hash_remove(struct platform_device *pdev) | |||
1841 | ahash_algs_unregister_all(device_data); | 1812 | ahash_algs_unregister_all(device_data); |
1842 | 1813 | ||
1843 | if (hash_disable_power(device_data, false)) | 1814 | if (hash_disable_power(device_data, false)) |
1844 | dev_err(dev, "[%s]: hash_disable_power() failed", | 1815 | dev_err(dev, "%s: hash_disable_power() failed\n", |
1845 | __func__); | 1816 | __func__); |
1846 | 1817 | ||
1847 | clk_unprepare(device_data->clk); | 1818 | clk_unprepare(device_data->clk); |
@@ -1870,8 +1841,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) | |||
1870 | 1841 | ||
1871 | device_data = platform_get_drvdata(pdev); | 1842 | device_data = platform_get_drvdata(pdev); |
1872 | if (!device_data) { | 1843 | if (!device_data) { |
1873 | dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", | 1844 | dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n", |
1874 | __func__); | 1845 | __func__); |
1875 | return; | 1846 | return; |
1876 | } | 1847 | } |
1877 | 1848 | ||
@@ -1880,8 +1851,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) | |||
1880 | /* current_ctx allocates a device, NULL = unallocated */ | 1851 | /* current_ctx allocates a device, NULL = unallocated */ |
1881 | if (!device_data->current_ctx) { | 1852 | if (!device_data->current_ctx) { |
1882 | if (down_trylock(&driver_data.device_allocation)) | 1853 | if (down_trylock(&driver_data.device_allocation)) |
1883 | dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" | 1854 | dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n", |
1884 | "Shutting down anyway...", __func__); | 1855 | __func__); |
1885 | /** | 1856 | /** |
1886 | * (Allocate the device) | 1857 | * (Allocate the device) |
1887 | * Need to set this to non-null (dummy) value, | 1858 | * Need to set this to non-null (dummy) value, |
@@ -1906,8 +1877,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev) | |||
1906 | release_mem_region(res->start, resource_size(res)); | 1877 | release_mem_region(res->start, resource_size(res)); |
1907 | 1878 | ||
1908 | if (hash_disable_power(device_data, false)) | 1879 | if (hash_disable_power(device_data, false)) |
1909 | dev_err(&pdev->dev, "[%s] hash_disable_power() failed", | 1880 | dev_err(&pdev->dev, "%s: hash_disable_power() failed\n", |
1910 | __func__); | 1881 | __func__); |
1911 | } | 1882 | } |
1912 | 1883 | ||
1913 | /** | 1884 | /** |
@@ -1922,7 +1893,7 @@ static int ux500_hash_suspend(struct device *dev) | |||
1922 | 1893 | ||
1923 | device_data = dev_get_drvdata(dev); | 1894 | device_data = dev_get_drvdata(dev); |
1924 | if (!device_data) { | 1895 | if (!device_data) { |
1925 | dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); | 1896 | dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); |
1926 | return -ENOMEM; | 1897 | return -ENOMEM; |
1927 | } | 1898 | } |
1928 | 1899 | ||
@@ -1933,15 +1904,16 @@ static int ux500_hash_suspend(struct device *dev) | |||
1933 | 1904 | ||
1934 | if (device_data->current_ctx == ++temp_ctx) { | 1905 | if (device_data->current_ctx == ++temp_ctx) { |
1935 | if (down_interruptible(&driver_data.device_allocation)) | 1906 | if (down_interruptible(&driver_data.device_allocation)) |
1936 | dev_dbg(dev, "[%s]: down_interruptible() failed", | 1907 | dev_dbg(dev, "%s: down_interruptible() failed\n", |
1937 | __func__); | 1908 | __func__); |
1938 | ret = hash_disable_power(device_data, false); | 1909 | ret = hash_disable_power(device_data, false); |
1939 | 1910 | ||
1940 | } else | 1911 | } else { |
1941 | ret = hash_disable_power(device_data, true); | 1912 | ret = hash_disable_power(device_data, true); |
1913 | } | ||
1942 | 1914 | ||
1943 | if (ret) | 1915 | if (ret) |
1944 | dev_err(dev, "[%s]: hash_disable_power()", __func__); | 1916 | dev_err(dev, "%s: hash_disable_power()\n", __func__); |
1945 | 1917 | ||
1946 | return ret; | 1918 | return ret; |
1947 | } | 1919 | } |
@@ -1958,7 +1930,7 @@ static int ux500_hash_resume(struct device *dev) | |||
1958 | 1930 | ||
1959 | device_data = dev_get_drvdata(dev); | 1931 | device_data = dev_get_drvdata(dev); |
1960 | if (!device_data) { | 1932 | if (!device_data) { |
1961 | dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); | 1933 | dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); |
1962 | return -ENOMEM; | 1934 | return -ENOMEM; |
1963 | } | 1935 | } |
1964 | 1936 | ||
@@ -1973,7 +1945,7 @@ static int ux500_hash_resume(struct device *dev) | |||
1973 | ret = hash_enable_power(device_data, true); | 1945 | ret = hash_enable_power(device_data, true); |
1974 | 1946 | ||
1975 | if (ret) | 1947 | if (ret) |
1976 | dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); | 1948 | dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); |
1977 | 1949 | ||
1978 | return ret; | 1950 | return ret; |
1979 | } | 1951 | } |
@@ -1981,8 +1953,8 @@ static int ux500_hash_resume(struct device *dev) | |||
1981 | static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); | 1953 | static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); |
1982 | 1954 | ||
1983 | static const struct of_device_id ux500_hash_match[] = { | 1955 | static const struct of_device_id ux500_hash_match[] = { |
1984 | { .compatible = "stericsson,ux500-hash" }, | 1956 | { .compatible = "stericsson,ux500-hash" }, |
1985 | { }, | 1957 | { }, |
1986 | }; | 1958 | }; |
1987 | 1959 | ||
1988 | static struct platform_driver hash_driver = { | 1960 | static struct platform_driver hash_driver = { |
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 3744d2a642df..13621cc8cf4c 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h | |||
@@ -113,4 +113,6 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more); | |||
113 | void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, | 113 | void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, |
114 | unsigned int start, unsigned int nbytes, int out); | 114 | unsigned int start, unsigned int nbytes, int out); |
115 | 115 | ||
116 | int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes); | ||
117 | |||
116 | #endif /* _CRYPTO_SCATTERWALK_H */ | 118 | #endif /* _CRYPTO_SCATTERWALK_H */ |
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index a9c96d865ee7..b3cb71f0d3b0 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h | |||
@@ -3,6 +3,10 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | #define CRC_T10DIF_DIGEST_SIZE 2 | ||
7 | #define CRC_T10DIF_BLOCK_SIZE 1 | ||
8 | |||
9 | __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); | ||
6 | __u16 crc_t10dif(unsigned char const *, size_t); | 10 | __u16 crc_t10dif(unsigned char const *, size_t); |
7 | 11 | ||
8 | #endif | 12 | #endif |
diff --git a/kernel/padata.c b/kernel/padata.c index 072f4ee4eb89..07af2c95dcfe 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -846,6 +846,8 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
846 | switch (action) { | 846 | switch (action) { |
847 | case CPU_ONLINE: | 847 | case CPU_ONLINE: |
848 | case CPU_ONLINE_FROZEN: | 848 | case CPU_ONLINE_FROZEN: |
849 | case CPU_DOWN_FAILED: | ||
850 | case CPU_DOWN_FAILED_FROZEN: | ||
849 | if (!pinst_has_cpu(pinst, cpu)) | 851 | if (!pinst_has_cpu(pinst, cpu)) |
850 | break; | 852 | break; |
851 | mutex_lock(&pinst->lock); | 853 | mutex_lock(&pinst->lock); |
@@ -857,6 +859,8 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
857 | 859 | ||
858 | case CPU_DOWN_PREPARE: | 860 | case CPU_DOWN_PREPARE: |
859 | case CPU_DOWN_PREPARE_FROZEN: | 861 | case CPU_DOWN_PREPARE_FROZEN: |
862 | case CPU_UP_CANCELED: | ||
863 | case CPU_UP_CANCELED_FROZEN: | ||
860 | if (!pinst_has_cpu(pinst, cpu)) | 864 | if (!pinst_has_cpu(pinst, cpu)) |
861 | break; | 865 | break; |
862 | mutex_lock(&pinst->lock); | 866 | mutex_lock(&pinst->lock); |
@@ -865,22 +869,6 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
865 | if (err) | 869 | if (err) |
866 | return notifier_from_errno(err); | 870 | return notifier_from_errno(err); |
867 | break; | 871 | break; |
868 | |||
869 | case CPU_UP_CANCELED: | ||
870 | case CPU_UP_CANCELED_FROZEN: | ||
871 | if (!pinst_has_cpu(pinst, cpu)) | ||
872 | break; | ||
873 | mutex_lock(&pinst->lock); | ||
874 | __padata_remove_cpu(pinst, cpu); | ||
875 | mutex_unlock(&pinst->lock); | ||
876 | |||
877 | case CPU_DOWN_FAILED: | ||
878 | case CPU_DOWN_FAILED_FROZEN: | ||
879 | if (!pinst_has_cpu(pinst, cpu)) | ||
880 | break; | ||
881 | mutex_lock(&pinst->lock); | ||
882 | __padata_add_cpu(pinst, cpu); | ||
883 | mutex_unlock(&pinst->lock); | ||
884 | } | 872 | } |
885 | 873 | ||
886 | return NOTIFY_OK; | 874 | return NOTIFY_OK; |
@@ -1086,18 +1074,18 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq, | |||
1086 | 1074 | ||
1087 | pinst->flags = 0; | 1075 | pinst->flags = 0; |
1088 | 1076 | ||
1089 | #ifdef CONFIG_HOTPLUG_CPU | ||
1090 | pinst->cpu_notifier.notifier_call = padata_cpu_callback; | ||
1091 | pinst->cpu_notifier.priority = 0; | ||
1092 | register_hotcpu_notifier(&pinst->cpu_notifier); | ||
1093 | #endif | ||
1094 | |||
1095 | put_online_cpus(); | 1077 | put_online_cpus(); |
1096 | 1078 | ||
1097 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); | 1079 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); |
1098 | kobject_init(&pinst->kobj, &padata_attr_type); | 1080 | kobject_init(&pinst->kobj, &padata_attr_type); |
1099 | mutex_init(&pinst->lock); | 1081 | mutex_init(&pinst->lock); |
1100 | 1082 | ||
1083 | #ifdef CONFIG_HOTPLUG_CPU | ||
1084 | pinst->cpu_notifier.notifier_call = padata_cpu_callback; | ||
1085 | pinst->cpu_notifier.priority = 0; | ||
1086 | register_hotcpu_notifier(&pinst->cpu_notifier); | ||
1087 | #endif | ||
1088 | |||
1101 | return pinst; | 1089 | return pinst; |
1102 | 1090 | ||
1103 | err_free_masks: | 1091 | err_free_masks: |
diff --git a/lib/Kconfig b/lib/Kconfig index 65561716c16c..b3c8be0da17f 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -76,6 +76,8 @@ config CRC16 | |||
76 | 76 | ||
77 | config CRC_T10DIF | 77 | config CRC_T10DIF |
78 | tristate "CRC calculation for the T10 Data Integrity Field" | 78 | tristate "CRC calculation for the T10 Data Integrity Field" |
79 | select CRYPTO | ||
80 | select CRYPTO_CRCT10DIF | ||
79 | help | 81 | help |
80 | This option is only needed if a module that's not in the | 82 | This option is only needed if a module that's not in the |
81 | kernel tree needs to calculate CRC checks for use with the | 83 | kernel tree needs to calculate CRC checks for use with the |
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index fbbd66ed86cd..43bc5b071f96 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c | |||
@@ -11,57 +11,45 @@ | |||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/crc-t10dif.h> | 13 | #include <linux/crc-t10dif.h> |
14 | #include <linux/err.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <crypto/hash.h> | ||
14 | 17 | ||
15 | /* Table generated using the following polynomium: | 18 | static struct crypto_shash *crct10dif_tfm; |
16 | * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 | ||
17 | * gt: 0x8bb7 | ||
18 | */ | ||
19 | static const __u16 t10_dif_crc_table[256] = { | ||
20 | 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, | ||
21 | 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, | ||
22 | 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, | ||
23 | 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, | ||
24 | 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, | ||
25 | 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, | ||
26 | 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, | ||
27 | 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, | ||
28 | 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, | ||
29 | 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, | ||
30 | 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, | ||
31 | 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, | ||
32 | 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, | ||
33 | 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, | ||
34 | 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, | ||
35 | 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, | ||
36 | 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, | ||
37 | 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, | ||
38 | 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, | ||
39 | 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, | ||
40 | 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, | ||
41 | 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, | ||
42 | 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, | ||
43 | 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, | ||
44 | 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, | ||
45 | 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, | ||
46 | 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, | ||
47 | 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, | ||
48 | 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, | ||
49 | 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, | ||
50 | 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, | ||
51 | 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 | ||
52 | }; | ||
53 | 19 | ||
54 | __u16 crc_t10dif(const unsigned char *buffer, size_t len) | 20 | __u16 crc_t10dif(const unsigned char *buffer, size_t len) |
55 | { | 21 | { |
56 | __u16 crc = 0; | 22 | struct { |
57 | unsigned int i; | 23 | struct shash_desc shash; |
24 | char ctx[2]; | ||
25 | } desc; | ||
26 | int err; | ||
27 | |||
28 | desc.shash.tfm = crct10dif_tfm; | ||
29 | desc.shash.flags = 0; | ||
30 | *(__u16 *)desc.ctx = 0; | ||
58 | 31 | ||
59 | for (i = 0 ; i < len ; i++) | 32 | err = crypto_shash_update(&desc.shash, buffer, len); |
60 | crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; | 33 | BUG_ON(err); |
61 | 34 | ||
62 | return crc; | 35 | return *(__u16 *)desc.ctx; |
63 | } | 36 | } |
64 | EXPORT_SYMBOL(crc_t10dif); | 37 | EXPORT_SYMBOL(crc_t10dif); |
65 | 38 | ||
39 | static int __init crc_t10dif_mod_init(void) | ||
40 | { | ||
41 | crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); | ||
42 | return PTR_RET(crct10dif_tfm); | ||
43 | } | ||
44 | |||
45 | static void __exit crc_t10dif_mod_fini(void) | ||
46 | { | ||
47 | crypto_free_shash(crct10dif_tfm); | ||
48 | } | ||
49 | |||
50 | module_init(crc_t10dif_mod_init); | ||
51 | module_exit(crc_t10dif_mod_fini); | ||
52 | |||
66 | MODULE_DESCRIPTION("T10 DIF CRC calculation"); | 53 | MODULE_DESCRIPTION("T10 DIF CRC calculation"); |
67 | MODULE_LICENSE("GPL"); | 54 | MODULE_LICENSE("GPL"); |
55 | MODULE_SOFTDEP("pre: crct10dif"); | ||