diff options
68 files changed, 14289 insertions, 1842 deletions
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 977127368a7d..ef6cedd52e3c 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c | |||
| @@ -183,6 +183,13 @@ static struct clk adc_op_clk = { | |||
| 183 | .rate_hz = 13200000, | 183 | .rate_hz = 13200000, |
| 184 | }; | 184 | }; |
| 185 | 185 | ||
| 186 | /* AES/TDES/SHA clock - Only for sam9m11/sam9g56 */ | ||
| 187 | static struct clk aestdessha_clk = { | ||
| 188 | .name = "aestdessha_clk", | ||
| 189 | .pmc_mask = 1 << AT91SAM9G45_ID_AESTDESSHA, | ||
| 190 | .type = CLK_TYPE_PERIPHERAL, | ||
| 191 | }; | ||
| 192 | |||
| 186 | static struct clk *periph_clocks[] __initdata = { | 193 | static struct clk *periph_clocks[] __initdata = { |
| 187 | &pioA_clk, | 194 | &pioA_clk, |
| 188 | &pioB_clk, | 195 | &pioB_clk, |
| @@ -212,6 +219,7 @@ static struct clk *periph_clocks[] __initdata = { | |||
| 212 | &udphs_clk, | 219 | &udphs_clk, |
| 213 | &mmc1_clk, | 220 | &mmc1_clk, |
| 214 | &adc_op_clk, | 221 | &adc_op_clk, |
| 222 | &aestdessha_clk, | ||
| 215 | // irq0 | 223 | // irq0 |
| 216 | }; | 224 | }; |
| 217 | 225 | ||
| @@ -232,6 +240,9 @@ static struct clk_lookup periph_clocks_lookups[] = { | |||
| 232 | CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), | 240 | CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), |
| 233 | CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), | 241 | CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), |
| 234 | CLKDEV_CON_DEV_ID(NULL, "atmel-trng", &trng_clk), | 242 | CLKDEV_CON_DEV_ID(NULL, "atmel-trng", &trng_clk), |
| 243 | CLKDEV_CON_DEV_ID(NULL, "atmel_sha", &aestdessha_clk), | ||
| 244 | CLKDEV_CON_DEV_ID(NULL, "atmel_tdes", &aestdessha_clk), | ||
| 245 | CLKDEV_CON_DEV_ID(NULL, "atmel_aes", &aestdessha_clk), | ||
| 235 | /* more usart lookup table for DT entries */ | 246 | /* more usart lookup table for DT entries */ |
| 236 | CLKDEV_CON_DEV_ID("usart", "ffffee00.serial", &mck), | 247 | CLKDEV_CON_DEV_ID("usart", "ffffee00.serial", &mck), |
| 237 | CLKDEV_CON_DEV_ID("usart", "fff8c000.serial", &usart0_clk), | 248 | CLKDEV_CON_DEV_ID("usart", "fff8c000.serial", &usart0_clk), |
| @@ -388,7 +399,7 @@ static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = { | |||
| 388 | 3, /* Ethernet */ | 399 | 3, /* Ethernet */ |
| 389 | 0, /* Image Sensor Interface */ | 400 | 0, /* Image Sensor Interface */ |
| 390 | 2, /* USB Device High speed port */ | 401 | 2, /* USB Device High speed port */ |
| 391 | 0, | 402 | 0, /* AESTDESSHA Crypto HW Accelerators */ |
| 392 | 0, /* Multimedia Card Interface 1 */ | 403 | 0, /* Multimedia Card Interface 1 */ |
| 393 | 0, | 404 | 0, |
| 394 | 0, /* Advanced Interrupt Controller (IRQ0) */ | 405 | 0, /* Advanced Interrupt Controller (IRQ0) */ |
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 40fb79df2de0..06073996a382 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
| 19 | #include <linux/i2c-gpio.h> | 19 | #include <linux/i2c-gpio.h> |
| 20 | #include <linux/atmel-mci.h> | 20 | #include <linux/atmel-mci.h> |
| 21 | #include <linux/platform_data/atmel-aes.h> | ||
| 21 | 22 | ||
| 22 | #include <linux/platform_data/at91_adc.h> | 23 | #include <linux/platform_data/at91_adc.h> |
| 23 | 24 | ||
| @@ -1830,6 +1831,130 @@ void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} | |||
| 1830 | void __init at91_add_device_serial(void) {} | 1831 | void __init at91_add_device_serial(void) {} |
| 1831 | #endif | 1832 | #endif |
| 1832 | 1833 | ||
| 1834 | /* -------------------------------------------------------------------- | ||
| 1835 | * SHA1/SHA256 | ||
| 1836 | * -------------------------------------------------------------------- */ | ||
| 1837 | |||
| 1838 | #if defined(CONFIG_CRYPTO_DEV_ATMEL_SHA) || defined(CONFIG_CRYPTO_DEV_ATMEL_SHA_MODULE) | ||
| 1839 | static struct resource sha_resources[] = { | ||
| 1840 | { | ||
| 1841 | .start = AT91SAM9G45_BASE_SHA, | ||
| 1842 | .end = AT91SAM9G45_BASE_SHA + SZ_16K - 1, | ||
| 1843 | .flags = IORESOURCE_MEM, | ||
| 1844 | }, | ||
| 1845 | [1] = { | ||
| 1846 | .start = AT91SAM9G45_ID_AESTDESSHA, | ||
| 1847 | .end = AT91SAM9G45_ID_AESTDESSHA, | ||
| 1848 | .flags = IORESOURCE_IRQ, | ||
| 1849 | }, | ||
| 1850 | }; | ||
| 1851 | |||
| 1852 | static struct platform_device at91sam9g45_sha_device = { | ||
| 1853 | .name = "atmel_sha", | ||
| 1854 | .id = -1, | ||
| 1855 | .resource = sha_resources, | ||
| 1856 | .num_resources = ARRAY_SIZE(sha_resources), | ||
| 1857 | }; | ||
| 1858 | |||
| 1859 | static void __init at91_add_device_sha(void) | ||
| 1860 | { | ||
| 1861 | platform_device_register(&at91sam9g45_sha_device); | ||
| 1862 | } | ||
| 1863 | #else | ||
| 1864 | static void __init at91_add_device_sha(void) {} | ||
| 1865 | #endif | ||
| 1866 | |||
| 1867 | /* -------------------------------------------------------------------- | ||
| 1868 | * DES/TDES | ||
| 1869 | * -------------------------------------------------------------------- */ | ||
| 1870 | |||
| 1871 | #if defined(CONFIG_CRYPTO_DEV_ATMEL_TDES) || defined(CONFIG_CRYPTO_DEV_ATMEL_TDES_MODULE) | ||
| 1872 | static struct resource tdes_resources[] = { | ||
| 1873 | [0] = { | ||
| 1874 | .start = AT91SAM9G45_BASE_TDES, | ||
| 1875 | .end = AT91SAM9G45_BASE_TDES + SZ_16K - 1, | ||
| 1876 | .flags = IORESOURCE_MEM, | ||
| 1877 | }, | ||
| 1878 | [1] = { | ||
| 1879 | .start = AT91SAM9G45_ID_AESTDESSHA, | ||
| 1880 | .end = AT91SAM9G45_ID_AESTDESSHA, | ||
| 1881 | .flags = IORESOURCE_IRQ, | ||
| 1882 | }, | ||
| 1883 | }; | ||
| 1884 | |||
| 1885 | static struct platform_device at91sam9g45_tdes_device = { | ||
| 1886 | .name = "atmel_tdes", | ||
| 1887 | .id = -1, | ||
| 1888 | .resource = tdes_resources, | ||
| 1889 | .num_resources = ARRAY_SIZE(tdes_resources), | ||
| 1890 | }; | ||
| 1891 | |||
| 1892 | static void __init at91_add_device_tdes(void) | ||
| 1893 | { | ||
| 1894 | platform_device_register(&at91sam9g45_tdes_device); | ||
| 1895 | } | ||
| 1896 | #else | ||
| 1897 | static void __init at91_add_device_tdes(void) {} | ||
| 1898 | #endif | ||
| 1899 | |||
| 1900 | /* -------------------------------------------------------------------- | ||
| 1901 | * AES | ||
| 1902 | * -------------------------------------------------------------------- */ | ||
| 1903 | |||
| 1904 | #if defined(CONFIG_CRYPTO_DEV_ATMEL_AES) || defined(CONFIG_CRYPTO_DEV_ATMEL_AES_MODULE) | ||
| 1905 | static struct aes_platform_data aes_data; | ||
| 1906 | static u64 aes_dmamask = DMA_BIT_MASK(32); | ||
| 1907 | |||
| 1908 | static struct resource aes_resources[] = { | ||
| 1909 | [0] = { | ||
| 1910 | .start = AT91SAM9G45_BASE_AES, | ||
| 1911 | .end = AT91SAM9G45_BASE_AES + SZ_16K - 1, | ||
| 1912 | .flags = IORESOURCE_MEM, | ||
| 1913 | }, | ||
| 1914 | [1] = { | ||
| 1915 | .start = AT91SAM9G45_ID_AESTDESSHA, | ||
| 1916 | .end = AT91SAM9G45_ID_AESTDESSHA, | ||
| 1917 | .flags = IORESOURCE_IRQ, | ||
| 1918 | }, | ||
| 1919 | }; | ||
| 1920 | |||
| 1921 | static struct platform_device at91sam9g45_aes_device = { | ||
| 1922 | .name = "atmel_aes", | ||
| 1923 | .id = -1, | ||
| 1924 | .dev = { | ||
| 1925 | .dma_mask = &aes_dmamask, | ||
| 1926 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
| 1927 | .platform_data = &aes_data, | ||
| 1928 | }, | ||
| 1929 | .resource = aes_resources, | ||
| 1930 | .num_resources = ARRAY_SIZE(aes_resources), | ||
| 1931 | }; | ||
| 1932 | |||
| 1933 | static void __init at91_add_device_aes(void) | ||
| 1934 | { | ||
| 1935 | struct at_dma_slave *atslave; | ||
| 1936 | struct aes_dma_data *alt_atslave; | ||
| 1937 | |||
| 1938 | alt_atslave = kzalloc(sizeof(struct aes_dma_data), GFP_KERNEL); | ||
| 1939 | |||
| 1940 | /* DMA TX slave channel configuration */ | ||
| 1941 | atslave = &alt_atslave->txdata; | ||
| 1942 | atslave->dma_dev = &at_hdmac_device.dev; | ||
| 1943 | atslave->cfg = ATC_FIFOCFG_ENOUGHSPACE | ATC_SRC_H2SEL_HW | | ||
| 1944 | ATC_SRC_PER(AT_DMA_ID_AES_RX); | ||
| 1945 | |||
| 1946 | /* DMA RX slave channel configuration */ | ||
| 1947 | atslave = &alt_atslave->rxdata; | ||
| 1948 | atslave->dma_dev = &at_hdmac_device.dev; | ||
| 1949 | atslave->cfg = ATC_FIFOCFG_ENOUGHSPACE | ATC_DST_H2SEL_HW | | ||
| 1950 | ATC_DST_PER(AT_DMA_ID_AES_TX); | ||
| 1951 | |||
| 1952 | aes_data.dma_slave = alt_atslave; | ||
| 1953 | platform_device_register(&at91sam9g45_aes_device); | ||
| 1954 | } | ||
| 1955 | #else | ||
| 1956 | static void __init at91_add_device_aes(void) {} | ||
| 1957 | #endif | ||
| 1833 | 1958 | ||
| 1834 | /* -------------------------------------------------------------------- */ | 1959 | /* -------------------------------------------------------------------- */ |
| 1835 | /* | 1960 | /* |
| @@ -1847,6 +1972,9 @@ static int __init at91_add_standard_devices(void) | |||
| 1847 | at91_add_device_trng(); | 1972 | at91_add_device_trng(); |
| 1848 | at91_add_device_watchdog(); | 1973 | at91_add_device_watchdog(); |
| 1849 | at91_add_device_tc(); | 1974 | at91_add_device_tc(); |
| 1975 | at91_add_device_sha(); | ||
| 1976 | at91_add_device_tdes(); | ||
| 1977 | at91_add_device_aes(); | ||
| 1850 | return 0; | 1978 | return 0; |
| 1851 | } | 1979 | } |
| 1852 | 1980 | ||
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45.h b/arch/arm/mach-at91/include/mach/at91sam9g45.h index 3a4da24d5911..8eba1021f533 100644 --- a/arch/arm/mach-at91/include/mach/at91sam9g45.h +++ b/arch/arm/mach-at91/include/mach/at91sam9g45.h | |||
| @@ -136,6 +136,8 @@ | |||
| 136 | #define AT_DMA_ID_SSC1_RX 8 | 136 | #define AT_DMA_ID_SSC1_RX 8 |
| 137 | #define AT_DMA_ID_AC97_TX 9 | 137 | #define AT_DMA_ID_AC97_TX 9 |
| 138 | #define AT_DMA_ID_AC97_RX 10 | 138 | #define AT_DMA_ID_AC97_RX 10 |
| 139 | #define AT_DMA_ID_AES_TX 11 | ||
| 140 | #define AT_DMA_ID_AES_RX 12 | ||
| 139 | #define AT_DMA_ID_MCI1 13 | 141 | #define AT_DMA_ID_MCI1 13 |
| 140 | 142 | ||
| 141 | #endif | 143 | #endif |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 950d1f7a5a39..159e94f4b22a 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
| @@ -149,7 +149,6 @@ core-$(CONFIG_KVM) += arch/powerpc/kvm/ | |||
| 149 | core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/ | 149 | core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/ |
| 150 | 150 | ||
| 151 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ | 151 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ |
| 152 | drivers-$(CONFIG_CRYPTO_DEV_NX) += drivers/crypto/nx/ | ||
| 153 | 152 | ||
| 154 | # Default to zImage, override when needed | 153 | # Default to zImage, override when needed |
| 155 | all: zImage | 154 | all: zImage |
diff --git a/arch/s390/crypto/crypto_des.h b/arch/s390/crypto/crypto_des.h deleted file mode 100644 index 6210457ceebb..000000000000 --- a/arch/s390/crypto/crypto_des.h +++ /dev/null | |||
| @@ -1,18 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic API. | ||
| 3 | * | ||
| 4 | * Function for checking keys for the DES and Tripple DES Encryption | ||
| 5 | * algorithms. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | #ifndef __CRYPTO_DES_H__ | ||
| 14 | #define __CRYPTO_DES_H__ | ||
| 15 | |||
| 16 | extern int crypto_des_check_key(const u8*, unsigned int, u32*); | ||
| 17 | |||
| 18 | #endif /*__CRYPTO_DES_H__*/ | ||
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index e191ac048b59..e908e5de82d3 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
| @@ -2,6 +2,9 @@ | |||
| 2 | # Arch-specific CryptoAPI modules. | 2 | # Arch-specific CryptoAPI modules. |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o | ||
| 6 | obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o | ||
| 7 | |||
| 5 | obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o | 8 | obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o |
| 6 | obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o | 9 | obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o |
| 7 | obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o | 10 | obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o |
| @@ -12,8 +15,10 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o | |||
| 12 | obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o | 15 | obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o |
| 13 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o | 16 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o |
| 14 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o | 17 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o |
| 18 | obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o | ||
| 15 | obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o | 19 | obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o |
| 16 | obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o | 20 | obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o |
| 21 | obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o | ||
| 17 | obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o | 22 | obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o |
| 18 | obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o | 23 | obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o |
| 19 | 24 | ||
| @@ -30,16 +35,11 @@ camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o | |||
| 30 | blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o | 35 | blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o |
| 31 | twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o | 36 | twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o |
| 32 | twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o | 37 | twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o |
| 38 | twofish-avx-x86_64-y := twofish-avx-x86_64-asm_64.o twofish_avx_glue.o | ||
| 33 | salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o | 39 | salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o |
| 34 | serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o | 40 | serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o |
| 41 | serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o | ||
| 35 | 42 | ||
| 36 | aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o | 43 | aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o |
| 37 | |||
| 38 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o | 44 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o |
| 39 | |||
| 40 | # enable AVX support only when $(AS) can actually assemble the instructions | ||
| 41 | ifeq ($(call as-instr,vpxor %xmm0$(comma)%xmm1$(comma)%xmm2,yes,no),yes) | ||
| 42 | AFLAGS_sha1_ssse3_asm.o += -DSHA1_ENABLE_AVX_SUPPORT | ||
| 43 | CFLAGS_sha1_ssse3_glue.o += -DSHA1_ENABLE_AVX_SUPPORT | ||
| 44 | endif | ||
| 45 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o | 45 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o |
diff --git a/arch/x86/crypto/ablk_helper.c b/arch/x86/crypto/ablk_helper.c new file mode 100644 index 000000000000..43282fe04a8b --- /dev/null +++ b/arch/x86/crypto/ablk_helper.c | |||
| @@ -0,0 +1,149 @@ | |||
| 1 | /* | ||
| 2 | * Shared async block cipher helpers | ||
| 3 | * | ||
| 4 | * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
| 5 | * | ||
| 6 | * Based on aesni-intel_glue.c by: | ||
| 7 | * Copyright (C) 2008, Intel Corp. | ||
| 8 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or | ||
| 13 | * (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software | ||
| 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
| 23 | * USA | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/kernel.h> | ||
| 28 | #include <linux/crypto.h> | ||
| 29 | #include <linux/init.h> | ||
| 30 | #include <linux/module.h> | ||
| 31 | #include <crypto/algapi.h> | ||
| 32 | #include <crypto/cryptd.h> | ||
| 33 | #include <asm/i387.h> | ||
| 34 | #include <asm/crypto/ablk_helper.h> | ||
| 35 | |||
| 36 | int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 37 | unsigned int key_len) | ||
| 38 | { | ||
| 39 | struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 40 | struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; | ||
| 41 | int err; | ||
| 42 | |||
| 43 | crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
| 44 | crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) | ||
| 45 | & CRYPTO_TFM_REQ_MASK); | ||
| 46 | err = crypto_ablkcipher_setkey(child, key, key_len); | ||
| 47 | crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) | ||
| 48 | & CRYPTO_TFM_RES_MASK); | ||
| 49 | return err; | ||
| 50 | } | ||
| 51 | EXPORT_SYMBOL_GPL(ablk_set_key); | ||
| 52 | |||
| 53 | int __ablk_encrypt(struct ablkcipher_request *req) | ||
| 54 | { | ||
| 55 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
| 56 | struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 57 | struct blkcipher_desc desc; | ||
| 58 | |||
| 59 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | ||
| 60 | desc.info = req->info; | ||
| 61 | desc.flags = 0; | ||
| 62 | |||
| 63 | return crypto_blkcipher_crt(desc.tfm)->encrypt( | ||
| 64 | &desc, req->dst, req->src, req->nbytes); | ||
| 65 | } | ||
| 66 | EXPORT_SYMBOL_GPL(__ablk_encrypt); | ||
| 67 | |||
| 68 | int ablk_encrypt(struct ablkcipher_request *req) | ||
| 69 | { | ||
| 70 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
| 71 | struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 72 | |||
| 73 | if (!irq_fpu_usable()) { | ||
| 74 | struct ablkcipher_request *cryptd_req = | ||
| 75 | ablkcipher_request_ctx(req); | ||
| 76 | |||
| 77 | memcpy(cryptd_req, req, sizeof(*req)); | ||
| 78 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | ||
| 79 | |||
| 80 | return crypto_ablkcipher_encrypt(cryptd_req); | ||
| 81 | } else { | ||
| 82 | return __ablk_encrypt(req); | ||
| 83 | } | ||
| 84 | } | ||
| 85 | EXPORT_SYMBOL_GPL(ablk_encrypt); | ||
| 86 | |||
| 87 | int ablk_decrypt(struct ablkcipher_request *req) | ||
| 88 | { | ||
| 89 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
| 90 | struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 91 | |||
| 92 | if (!irq_fpu_usable()) { | ||
| 93 | struct ablkcipher_request *cryptd_req = | ||
| 94 | ablkcipher_request_ctx(req); | ||
| 95 | |||
| 96 | memcpy(cryptd_req, req, sizeof(*req)); | ||
| 97 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | ||
| 98 | |||
| 99 | return crypto_ablkcipher_decrypt(cryptd_req); | ||
| 100 | } else { | ||
| 101 | struct blkcipher_desc desc; | ||
| 102 | |||
| 103 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | ||
| 104 | desc.info = req->info; | ||
| 105 | desc.flags = 0; | ||
| 106 | |||
| 107 | return crypto_blkcipher_crt(desc.tfm)->decrypt( | ||
| 108 | &desc, req->dst, req->src, req->nbytes); | ||
| 109 | } | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL_GPL(ablk_decrypt); | ||
| 112 | |||
| 113 | void ablk_exit(struct crypto_tfm *tfm) | ||
| 114 | { | ||
| 115 | struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 116 | |||
| 117 | cryptd_free_ablkcipher(ctx->cryptd_tfm); | ||
| 118 | } | ||
| 119 | EXPORT_SYMBOL_GPL(ablk_exit); | ||
| 120 | |||
| 121 | int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) | ||
| 122 | { | ||
| 123 | struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 124 | struct cryptd_ablkcipher *cryptd_tfm; | ||
| 125 | |||
| 126 | cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); | ||
| 127 | if (IS_ERR(cryptd_tfm)) | ||
| 128 | return PTR_ERR(cryptd_tfm); | ||
| 129 | |||
| 130 | ctx->cryptd_tfm = cryptd_tfm; | ||
| 131 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + | ||
| 132 | crypto_ablkcipher_reqsize(&cryptd_tfm->base); | ||
| 133 | |||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | EXPORT_SYMBOL_GPL(ablk_init_common); | ||
| 137 | |||
| 138 | int ablk_init(struct crypto_tfm *tfm) | ||
| 139 | { | ||
| 140 | char drv_name[CRYPTO_MAX_ALG_NAME]; | ||
| 141 | |||
| 142 | snprintf(drv_name, sizeof(drv_name), "__driver-%s", | ||
| 143 | crypto_tfm_alg_driver_name(tfm)); | ||
| 144 | |||
| 145 | return ablk_init_common(tfm, drv_name); | ||
| 146 | } | ||
| 147 | EXPORT_SYMBOL_GPL(ablk_init); | ||
| 148 | |||
| 149 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c index 8efcf42a9d7e..59b37deb8c8d 100644 --- a/arch/x86/crypto/aes_glue.c +++ b/arch/x86/crypto/aes_glue.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
| 7 | #include <crypto/aes.h> | 7 | #include <crypto/aes.h> |
| 8 | #include <asm/aes.h> | 8 | #include <asm/crypto/aes.h> |
| 9 | 9 | ||
| 10 | asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); | 10 | asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); |
| 11 | asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); | 11 | asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index ac7f5cd019e8..34fdcff4d2c8 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
| @@ -30,7 +30,8 @@ | |||
| 30 | #include <crypto/ctr.h> | 30 | #include <crypto/ctr.h> |
| 31 | #include <asm/cpu_device_id.h> | 31 | #include <asm/cpu_device_id.h> |
| 32 | #include <asm/i387.h> | 32 | #include <asm/i387.h> |
| 33 | #include <asm/aes.h> | 33 | #include <asm/crypto/aes.h> |
| 34 | #include <asm/crypto/ablk_helper.h> | ||
| 34 | #include <crypto/scatterwalk.h> | 35 | #include <crypto/scatterwalk.h> |
| 35 | #include <crypto/internal/aead.h> | 36 | #include <crypto/internal/aead.h> |
| 36 | #include <linux/workqueue.h> | 37 | #include <linux/workqueue.h> |
| @@ -52,10 +53,6 @@ | |||
| 52 | #define HAS_XTS | 53 | #define HAS_XTS |
| 53 | #endif | 54 | #endif |
| 54 | 55 | ||
| 55 | struct async_aes_ctx { | ||
| 56 | struct cryptd_ablkcipher *cryptd_tfm; | ||
| 57 | }; | ||
| 58 | |||
| 59 | /* This data is stored at the end of the crypto_tfm struct. | 56 | /* This data is stored at the end of the crypto_tfm struct. |
| 60 | * It's a type of per "session" data storage location. | 57 | * It's a type of per "session" data storage location. |
| 61 | * This needs to be 16 byte aligned. | 58 | * This needs to be 16 byte aligned. |
| @@ -377,87 +374,6 @@ static int ctr_crypt(struct blkcipher_desc *desc, | |||
| 377 | } | 374 | } |
| 378 | #endif | 375 | #endif |
| 379 | 376 | ||
| 380 | static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 381 | unsigned int key_len) | ||
| 382 | { | ||
| 383 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 384 | struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; | ||
| 385 | int err; | ||
| 386 | |||
| 387 | crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
| 388 | crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) | ||
| 389 | & CRYPTO_TFM_REQ_MASK); | ||
| 390 | err = crypto_ablkcipher_setkey(child, key, key_len); | ||
| 391 | crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) | ||
| 392 | & CRYPTO_TFM_RES_MASK); | ||
| 393 | return err; | ||
| 394 | } | ||
| 395 | |||
| 396 | static int ablk_encrypt(struct ablkcipher_request *req) | ||
| 397 | { | ||
| 398 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
| 399 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 400 | |||
| 401 | if (!irq_fpu_usable()) { | ||
| 402 | struct ablkcipher_request *cryptd_req = | ||
| 403 | ablkcipher_request_ctx(req); | ||
| 404 | memcpy(cryptd_req, req, sizeof(*req)); | ||
| 405 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | ||
| 406 | return crypto_ablkcipher_encrypt(cryptd_req); | ||
| 407 | } else { | ||
| 408 | struct blkcipher_desc desc; | ||
| 409 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | ||
| 410 | desc.info = req->info; | ||
| 411 | desc.flags = 0; | ||
| 412 | return crypto_blkcipher_crt(desc.tfm)->encrypt( | ||
| 413 | &desc, req->dst, req->src, req->nbytes); | ||
| 414 | } | ||
| 415 | } | ||
| 416 | |||
| 417 | static int ablk_decrypt(struct ablkcipher_request *req) | ||
| 418 | { | ||
| 419 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
| 420 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 421 | |||
| 422 | if (!irq_fpu_usable()) { | ||
| 423 | struct ablkcipher_request *cryptd_req = | ||
| 424 | ablkcipher_request_ctx(req); | ||
| 425 | memcpy(cryptd_req, req, sizeof(*req)); | ||
| 426 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | ||
| 427 | return crypto_ablkcipher_decrypt(cryptd_req); | ||
| 428 | } else { | ||
| 429 | struct blkcipher_desc desc; | ||
| 430 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | ||
| 431 | desc.info = req->info; | ||
| 432 | desc.flags = 0; | ||
| 433 | return crypto_blkcipher_crt(desc.tfm)->decrypt( | ||
| 434 | &desc, req->dst, req->src, req->nbytes); | ||
| 435 | } | ||
| 436 | } | ||
| 437 | |||
| 438 | static void ablk_exit(struct crypto_tfm *tfm) | ||
| 439 | { | ||
| 440 | struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 441 | |||
| 442 | cryptd_free_ablkcipher(ctx->cryptd_tfm); | ||
| 443 | } | ||
| 444 | |||
| 445 | static int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) | ||
| 446 | { | ||
| 447 | struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 448 | struct cryptd_ablkcipher *cryptd_tfm; | ||
| 449 | |||
| 450 | cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); | ||
| 451 | if (IS_ERR(cryptd_tfm)) | ||
| 452 | return PTR_ERR(cryptd_tfm); | ||
| 453 | |||
| 454 | ctx->cryptd_tfm = cryptd_tfm; | ||
| 455 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + | ||
| 456 | crypto_ablkcipher_reqsize(&cryptd_tfm->base); | ||
| 457 | |||
| 458 | return 0; | ||
| 459 | } | ||
| 460 | |||
| 461 | static int ablk_ecb_init(struct crypto_tfm *tfm) | 377 | static int ablk_ecb_init(struct crypto_tfm *tfm) |
| 462 | { | 378 | { |
| 463 | return ablk_init_common(tfm, "__driver-ecb-aes-aesni"); | 379 | return ablk_init_common(tfm, "__driver-ecb-aes-aesni"); |
| @@ -613,7 +529,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, | |||
| 613 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | 529 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); |
| 614 | struct aesni_rfc4106_gcm_ctx *child_ctx = | 530 | struct aesni_rfc4106_gcm_ctx *child_ctx = |
| 615 | aesni_rfc4106_gcm_ctx_get(cryptd_child); | 531 | aesni_rfc4106_gcm_ctx_get(cryptd_child); |
| 616 | u8 *new_key_mem = NULL; | 532 | u8 *new_key_align, *new_key_mem = NULL; |
| 617 | 533 | ||
| 618 | if (key_len < 4) { | 534 | if (key_len < 4) { |
| 619 | crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 535 | crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| @@ -637,9 +553,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, | |||
| 637 | if (!new_key_mem) | 553 | if (!new_key_mem) |
| 638 | return -ENOMEM; | 554 | return -ENOMEM; |
| 639 | 555 | ||
| 640 | new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN); | 556 | new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); |
| 641 | memcpy(new_key_mem, key, key_len); | 557 | memcpy(new_key_align, key, key_len); |
| 642 | key = new_key_mem; | 558 | key = new_key_align; |
| 643 | } | 559 | } |
| 644 | 560 | ||
| 645 | if (!irq_fpu_usable()) | 561 | if (!irq_fpu_usable()) |
| @@ -968,7 +884,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 968 | .cra_priority = 400, | 884 | .cra_priority = 400, |
| 969 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 885 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 970 | .cra_blocksize = AES_BLOCK_SIZE, | 886 | .cra_blocksize = AES_BLOCK_SIZE, |
| 971 | .cra_ctxsize = sizeof(struct async_aes_ctx), | 887 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 972 | .cra_alignmask = 0, | 888 | .cra_alignmask = 0, |
| 973 | .cra_type = &crypto_ablkcipher_type, | 889 | .cra_type = &crypto_ablkcipher_type, |
| 974 | .cra_module = THIS_MODULE, | 890 | .cra_module = THIS_MODULE, |
| @@ -989,7 +905,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 989 | .cra_priority = 400, | 905 | .cra_priority = 400, |
| 990 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 906 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 991 | .cra_blocksize = AES_BLOCK_SIZE, | 907 | .cra_blocksize = AES_BLOCK_SIZE, |
| 992 | .cra_ctxsize = sizeof(struct async_aes_ctx), | 908 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 993 | .cra_alignmask = 0, | 909 | .cra_alignmask = 0, |
| 994 | .cra_type = &crypto_ablkcipher_type, | 910 | .cra_type = &crypto_ablkcipher_type, |
| 995 | .cra_module = THIS_MODULE, | 911 | .cra_module = THIS_MODULE, |
| @@ -1033,7 +949,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 1033 | .cra_priority = 400, | 949 | .cra_priority = 400, |
| 1034 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 950 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 1035 | .cra_blocksize = 1, | 951 | .cra_blocksize = 1, |
| 1036 | .cra_ctxsize = sizeof(struct async_aes_ctx), | 952 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 1037 | .cra_alignmask = 0, | 953 | .cra_alignmask = 0, |
| 1038 | .cra_type = &crypto_ablkcipher_type, | 954 | .cra_type = &crypto_ablkcipher_type, |
| 1039 | .cra_module = THIS_MODULE, | 955 | .cra_module = THIS_MODULE, |
| @@ -1098,7 +1014,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 1098 | .cra_priority = 400, | 1014 | .cra_priority = 400, |
| 1099 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1015 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 1100 | .cra_blocksize = 1, | 1016 | .cra_blocksize = 1, |
| 1101 | .cra_ctxsize = sizeof(struct async_aes_ctx), | 1017 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 1102 | .cra_alignmask = 0, | 1018 | .cra_alignmask = 0, |
| 1103 | .cra_type = &crypto_ablkcipher_type, | 1019 | .cra_type = &crypto_ablkcipher_type, |
| 1104 | .cra_module = THIS_MODULE, | 1020 | .cra_module = THIS_MODULE, |
| @@ -1126,7 +1042,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 1126 | .cra_priority = 400, | 1042 | .cra_priority = 400, |
| 1127 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1043 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 1128 | .cra_blocksize = AES_BLOCK_SIZE, | 1044 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1129 | .cra_ctxsize = sizeof(struct async_aes_ctx), | 1045 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 1130 | .cra_alignmask = 0, | 1046 | .cra_alignmask = 0, |
| 1131 | .cra_type = &crypto_ablkcipher_type, | 1047 | .cra_type = &crypto_ablkcipher_type, |
| 1132 | .cra_module = THIS_MODULE, | 1048 | .cra_module = THIS_MODULE, |
| @@ -1150,7 +1066,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 1150 | .cra_priority = 400, | 1066 | .cra_priority = 400, |
| 1151 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1067 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 1152 | .cra_blocksize = AES_BLOCK_SIZE, | 1068 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1153 | .cra_ctxsize = sizeof(struct async_aes_ctx), | 1069 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 1154 | .cra_alignmask = 0, | 1070 | .cra_alignmask = 0, |
| 1155 | .cra_type = &crypto_ablkcipher_type, | 1071 | .cra_type = &crypto_ablkcipher_type, |
| 1156 | .cra_module = THIS_MODULE, | 1072 | .cra_module = THIS_MODULE, |
| @@ -1174,7 +1090,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 1174 | .cra_priority = 400, | 1090 | .cra_priority = 400, |
| 1175 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1091 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 1176 | .cra_blocksize = AES_BLOCK_SIZE, | 1092 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1177 | .cra_ctxsize = sizeof(struct async_aes_ctx), | 1093 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 1178 | .cra_alignmask = 0, | 1094 | .cra_alignmask = 0, |
| 1179 | .cra_type = &crypto_ablkcipher_type, | 1095 | .cra_type = &crypto_ablkcipher_type, |
| 1180 | .cra_module = THIS_MODULE, | 1096 | .cra_module = THIS_MODULE, |
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 3306dc0b139e..eeb2b3b743e9 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c | |||
| @@ -5,10 +5,6 @@ | |||
| 5 | * | 5 | * |
| 6 | * Camellia parts based on code by: | 6 | * Camellia parts based on code by: |
| 7 | * Copyright (C) 2006 NTT (Nippon Telegraph and Telephone Corporation) | 7 | * Copyright (C) 2006 NTT (Nippon Telegraph and Telephone Corporation) |
| 8 | * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: | ||
| 9 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 10 | * CTR part based on code (crypto/ctr.c) by: | ||
| 11 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> | ||
| 12 | * | 8 | * |
| 13 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 14 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
| @@ -34,9 +30,9 @@ | |||
| 34 | #include <linux/module.h> | 30 | #include <linux/module.h> |
| 35 | #include <linux/types.h> | 31 | #include <linux/types.h> |
| 36 | #include <crypto/algapi.h> | 32 | #include <crypto/algapi.h> |
| 37 | #include <crypto/b128ops.h> | ||
| 38 | #include <crypto/lrw.h> | 33 | #include <crypto/lrw.h> |
| 39 | #include <crypto/xts.h> | 34 | #include <crypto/xts.h> |
| 35 | #include <asm/crypto/glue_helper.h> | ||
| 40 | 36 | ||
| 41 | #define CAMELLIA_MIN_KEY_SIZE 16 | 37 | #define CAMELLIA_MIN_KEY_SIZE 16 |
| 42 | #define CAMELLIA_MAX_KEY_SIZE 32 | 38 | #define CAMELLIA_MAX_KEY_SIZE 32 |
| @@ -1312,307 +1308,128 @@ static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key, | |||
| 1312 | &tfm->crt_flags); | 1308 | &tfm->crt_flags); |
| 1313 | } | 1309 | } |
| 1314 | 1310 | ||
| 1315 | static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, | 1311 | static void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) |
| 1316 | void (*fn)(struct camellia_ctx *, u8 *, const u8 *), | ||
| 1317 | void (*fn_2way)(struct camellia_ctx *, u8 *, const u8 *)) | ||
| 1318 | { | 1312 | { |
| 1319 | struct camellia_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 1313 | u128 iv = *src; |
| 1320 | unsigned int bsize = CAMELLIA_BLOCK_SIZE; | ||
| 1321 | unsigned int nbytes; | ||
| 1322 | int err; | ||
| 1323 | |||
| 1324 | err = blkcipher_walk_virt(desc, walk); | ||
| 1325 | |||
| 1326 | while ((nbytes = walk->nbytes)) { | ||
| 1327 | u8 *wsrc = walk->src.virt.addr; | ||
| 1328 | u8 *wdst = walk->dst.virt.addr; | ||
| 1329 | |||
| 1330 | /* Process two block batch */ | ||
| 1331 | if (nbytes >= bsize * 2) { | ||
| 1332 | do { | ||
| 1333 | fn_2way(ctx, wdst, wsrc); | ||
| 1334 | |||
| 1335 | wsrc += bsize * 2; | ||
| 1336 | wdst += bsize * 2; | ||
| 1337 | nbytes -= bsize * 2; | ||
| 1338 | } while (nbytes >= bsize * 2); | ||
| 1339 | |||
| 1340 | if (nbytes < bsize) | ||
| 1341 | goto done; | ||
| 1342 | } | ||
| 1343 | |||
| 1344 | /* Handle leftovers */ | ||
| 1345 | do { | ||
| 1346 | fn(ctx, wdst, wsrc); | ||
| 1347 | |||
| 1348 | wsrc += bsize; | ||
| 1349 | wdst += bsize; | ||
| 1350 | nbytes -= bsize; | ||
| 1351 | } while (nbytes >= bsize); | ||
| 1352 | |||
| 1353 | done: | ||
| 1354 | err = blkcipher_walk_done(desc, walk, nbytes); | ||
| 1355 | } | ||
| 1356 | |||
| 1357 | return err; | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 1361 | struct scatterlist *src, unsigned int nbytes) | ||
| 1362 | { | ||
| 1363 | struct blkcipher_walk walk; | ||
| 1364 | |||
| 1365 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 1366 | return ecb_crypt(desc, &walk, camellia_enc_blk, camellia_enc_blk_2way); | ||
| 1367 | } | ||
| 1368 | 1314 | ||
| 1369 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 1315 | camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); |
| 1370 | struct scatterlist *src, unsigned int nbytes) | ||
| 1371 | { | ||
| 1372 | struct blkcipher_walk walk; | ||
| 1373 | |||
| 1374 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 1375 | return ecb_crypt(desc, &walk, camellia_dec_blk, camellia_dec_blk_2way); | ||
| 1376 | } | ||
| 1377 | 1316 | ||
| 1378 | static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, | 1317 | u128_xor(&dst[1], &dst[1], &iv); |
| 1379 | struct blkcipher_walk *walk) | ||
| 1380 | { | ||
| 1381 | struct camellia_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 1382 | unsigned int bsize = CAMELLIA_BLOCK_SIZE; | ||
| 1383 | unsigned int nbytes = walk->nbytes; | ||
| 1384 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 1385 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 1386 | u128 *iv = (u128 *)walk->iv; | ||
| 1387 | |||
| 1388 | do { | ||
| 1389 | u128_xor(dst, src, iv); | ||
| 1390 | camellia_enc_blk(ctx, (u8 *)dst, (u8 *)dst); | ||
| 1391 | iv = dst; | ||
| 1392 | |||
| 1393 | src += 1; | ||
| 1394 | dst += 1; | ||
| 1395 | nbytes -= bsize; | ||
| 1396 | } while (nbytes >= bsize); | ||
| 1397 | |||
| 1398 | u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv); | ||
| 1399 | return nbytes; | ||
| 1400 | } | 1318 | } |
| 1401 | 1319 | ||
| 1402 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 1320 | static void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv) |
| 1403 | struct scatterlist *src, unsigned int nbytes) | ||
| 1404 | { | 1321 | { |
| 1405 | struct blkcipher_walk walk; | 1322 | be128 ctrblk; |
| 1406 | int err; | ||
| 1407 | 1323 | ||
| 1408 | blkcipher_walk_init(&walk, dst, src, nbytes); | 1324 | if (dst != src) |
| 1409 | err = blkcipher_walk_virt(desc, &walk); | 1325 | *dst = *src; |
| 1410 | 1326 | ||
| 1411 | while ((nbytes = walk.nbytes)) { | 1327 | u128_to_be128(&ctrblk, iv); |
| 1412 | nbytes = __cbc_encrypt(desc, &walk); | 1328 | u128_inc(iv); |
| 1413 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 1414 | } | ||
| 1415 | 1329 | ||
| 1416 | return err; | 1330 | camellia_enc_blk_xor(ctx, (u8 *)dst, (u8 *)&ctrblk); |
| 1417 | } | 1331 | } |
| 1418 | 1332 | ||
| 1419 | static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, | 1333 | static void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, |
| 1420 | struct blkcipher_walk *walk) | 1334 | u128 *iv) |
| 1421 | { | 1335 | { |
| 1422 | struct camellia_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 1336 | be128 ctrblks[2]; |
| 1423 | unsigned int bsize = CAMELLIA_BLOCK_SIZE; | ||
| 1424 | unsigned int nbytes = walk->nbytes; | ||
| 1425 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 1426 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 1427 | u128 ivs[2 - 1]; | ||
| 1428 | u128 last_iv; | ||
| 1429 | 1337 | ||
| 1430 | /* Start of the last block. */ | 1338 | if (dst != src) { |
| 1431 | src += nbytes / bsize - 1; | 1339 | dst[0] = src[0]; |
| 1432 | dst += nbytes / bsize - 1; | 1340 | dst[1] = src[1]; |
| 1433 | |||
| 1434 | last_iv = *src; | ||
| 1435 | |||
| 1436 | /* Process two block batch */ | ||
| 1437 | if (nbytes >= bsize * 2) { | ||
| 1438 | do { | ||
| 1439 | nbytes -= bsize * (2 - 1); | ||
| 1440 | src -= 2 - 1; | ||
| 1441 | dst -= 2 - 1; | ||
| 1442 | |||
| 1443 | ivs[0] = src[0]; | ||
| 1444 | |||
| 1445 | camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); | ||
| 1446 | |||
| 1447 | u128_xor(dst + 1, dst + 1, ivs + 0); | ||
| 1448 | |||
| 1449 | nbytes -= bsize; | ||
| 1450 | if (nbytes < bsize) | ||
| 1451 | goto done; | ||
| 1452 | |||
| 1453 | u128_xor(dst, dst, src - 1); | ||
| 1454 | src -= 1; | ||
| 1455 | dst -= 1; | ||
| 1456 | } while (nbytes >= bsize * 2); | ||
| 1457 | |||
| 1458 | if (nbytes < bsize) | ||
| 1459 | goto done; | ||
| 1460 | } | 1341 | } |
| 1461 | 1342 | ||
| 1462 | /* Handle leftovers */ | 1343 | u128_to_be128(&ctrblks[0], iv); |
| 1463 | for (;;) { | 1344 | u128_inc(iv); |
| 1464 | camellia_dec_blk(ctx, (u8 *)dst, (u8 *)src); | 1345 | u128_to_be128(&ctrblks[1], iv); |
| 1465 | 1346 | u128_inc(iv); | |
| 1466 | nbytes -= bsize; | ||
| 1467 | if (nbytes < bsize) | ||
| 1468 | break; | ||
| 1469 | 1347 | ||
| 1470 | u128_xor(dst, dst, src - 1); | 1348 | camellia_enc_blk_xor_2way(ctx, (u8 *)dst, (u8 *)ctrblks); |
| 1471 | src -= 1; | ||
| 1472 | dst -= 1; | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | done: | ||
| 1476 | u128_xor(dst, dst, (u128 *)walk->iv); | ||
| 1477 | *(u128 *)walk->iv = last_iv; | ||
| 1478 | |||
| 1479 | return nbytes; | ||
| 1480 | } | 1349 | } |
| 1481 | 1350 | ||
| 1482 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 1351 | static const struct common_glue_ctx camellia_enc = { |
| 1483 | struct scatterlist *src, unsigned int nbytes) | 1352 | .num_funcs = 2, |
| 1484 | { | 1353 | .fpu_blocks_limit = -1, |
| 1485 | struct blkcipher_walk walk; | 1354 | |
| 1486 | int err; | 1355 | .funcs = { { |
| 1487 | 1356 | .num_blocks = 2, | |
| 1488 | blkcipher_walk_init(&walk, dst, src, nbytes); | 1357 | .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } |
| 1489 | err = blkcipher_walk_virt(desc, &walk); | 1358 | }, { |
| 1359 | .num_blocks = 1, | ||
| 1360 | .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } | ||
| 1361 | } } | ||
| 1362 | }; | ||
| 1490 | 1363 | ||
| 1491 | while ((nbytes = walk.nbytes)) { | 1364 | static const struct common_glue_ctx camellia_ctr = { |
| 1492 | nbytes = __cbc_decrypt(desc, &walk); | 1365 | .num_funcs = 2, |
| 1493 | err = blkcipher_walk_done(desc, &walk, nbytes); | 1366 | .fpu_blocks_limit = -1, |
| 1494 | } | 1367 | |
| 1368 | .funcs = { { | ||
| 1369 | .num_blocks = 2, | ||
| 1370 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } | ||
| 1371 | }, { | ||
| 1372 | .num_blocks = 1, | ||
| 1373 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } | ||
| 1374 | } } | ||
| 1375 | }; | ||
| 1495 | 1376 | ||
| 1496 | return err; | 1377 | static const struct common_glue_ctx camellia_dec = { |
| 1497 | } | 1378 | .num_funcs = 2, |
| 1379 | .fpu_blocks_limit = -1, | ||
| 1380 | |||
| 1381 | .funcs = { { | ||
| 1382 | .num_blocks = 2, | ||
| 1383 | .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } | ||
| 1384 | }, { | ||
| 1385 | .num_blocks = 1, | ||
| 1386 | .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } | ||
| 1387 | } } | ||
| 1388 | }; | ||
| 1498 | 1389 | ||
| 1499 | static inline void u128_to_be128(be128 *dst, const u128 *src) | 1390 | static const struct common_glue_ctx camellia_dec_cbc = { |
| 1500 | { | 1391 | .num_funcs = 2, |
| 1501 | dst->a = cpu_to_be64(src->a); | 1392 | .fpu_blocks_limit = -1, |
| 1502 | dst->b = cpu_to_be64(src->b); | 1393 | |
| 1503 | } | 1394 | .funcs = { { |
| 1395 | .num_blocks = 2, | ||
| 1396 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } | ||
| 1397 | }, { | ||
| 1398 | .num_blocks = 1, | ||
| 1399 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } | ||
| 1400 | } } | ||
| 1401 | }; | ||
| 1504 | 1402 | ||
| 1505 | static inline void be128_to_u128(u128 *dst, const be128 *src) | 1403 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 1404 | struct scatterlist *src, unsigned int nbytes) | ||
| 1506 | { | 1405 | { |
| 1507 | dst->a = be64_to_cpu(src->a); | 1406 | return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes); |
| 1508 | dst->b = be64_to_cpu(src->b); | ||
| 1509 | } | 1407 | } |
| 1510 | 1408 | ||
| 1511 | static inline void u128_inc(u128 *i) | 1409 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 1410 | struct scatterlist *src, unsigned int nbytes) | ||
| 1512 | { | 1411 | { |
| 1513 | i->b++; | 1412 | return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes); |
| 1514 | if (!i->b) | ||
| 1515 | i->a++; | ||
| 1516 | } | 1413 | } |
| 1517 | 1414 | ||
| 1518 | static void ctr_crypt_final(struct blkcipher_desc *desc, | 1415 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 1519 | struct blkcipher_walk *walk) | 1416 | struct scatterlist *src, unsigned int nbytes) |
| 1520 | { | 1417 | { |
| 1521 | struct camellia_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 1418 | return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc, |
| 1522 | u8 keystream[CAMELLIA_BLOCK_SIZE]; | 1419 | dst, src, nbytes); |
| 1523 | u8 *src = walk->src.virt.addr; | ||
| 1524 | u8 *dst = walk->dst.virt.addr; | ||
| 1525 | unsigned int nbytes = walk->nbytes; | ||
| 1526 | u128 ctrblk; | ||
| 1527 | |||
| 1528 | memcpy(keystream, src, nbytes); | ||
| 1529 | camellia_enc_blk_xor(ctx, keystream, walk->iv); | ||
| 1530 | memcpy(dst, keystream, nbytes); | ||
| 1531 | |||
| 1532 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
| 1533 | u128_inc(&ctrblk); | ||
| 1534 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
| 1535 | } | 1420 | } |
| 1536 | 1421 | ||
| 1537 | static unsigned int __ctr_crypt(struct blkcipher_desc *desc, | 1422 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 1538 | struct blkcipher_walk *walk) | 1423 | struct scatterlist *src, unsigned int nbytes) |
| 1539 | { | 1424 | { |
| 1540 | struct camellia_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 1425 | return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src, |
| 1541 | unsigned int bsize = CAMELLIA_BLOCK_SIZE; | 1426 | nbytes); |
| 1542 | unsigned int nbytes = walk->nbytes; | ||
| 1543 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 1544 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 1545 | u128 ctrblk; | ||
| 1546 | be128 ctrblocks[2]; | ||
| 1547 | |||
| 1548 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
| 1549 | |||
| 1550 | /* Process two block batch */ | ||
| 1551 | if (nbytes >= bsize * 2) { | ||
| 1552 | do { | ||
| 1553 | if (dst != src) { | ||
| 1554 | dst[0] = src[0]; | ||
| 1555 | dst[1] = src[1]; | ||
| 1556 | } | ||
| 1557 | |||
| 1558 | /* create ctrblks for parallel encrypt */ | ||
| 1559 | u128_to_be128(&ctrblocks[0], &ctrblk); | ||
| 1560 | u128_inc(&ctrblk); | ||
| 1561 | u128_to_be128(&ctrblocks[1], &ctrblk); | ||
| 1562 | u128_inc(&ctrblk); | ||
| 1563 | |||
| 1564 | camellia_enc_blk_xor_2way(ctx, (u8 *)dst, | ||
| 1565 | (u8 *)ctrblocks); | ||
| 1566 | |||
| 1567 | src += 2; | ||
| 1568 | dst += 2; | ||
| 1569 | nbytes -= bsize * 2; | ||
| 1570 | } while (nbytes >= bsize * 2); | ||
| 1571 | |||
| 1572 | if (nbytes < bsize) | ||
| 1573 | goto done; | ||
| 1574 | } | ||
| 1575 | |||
| 1576 | /* Handle leftovers */ | ||
| 1577 | do { | ||
| 1578 | if (dst != src) | ||
| 1579 | *dst = *src; | ||
| 1580 | |||
| 1581 | u128_to_be128(&ctrblocks[0], &ctrblk); | ||
| 1582 | u128_inc(&ctrblk); | ||
| 1583 | |||
| 1584 | camellia_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks); | ||
| 1585 | |||
| 1586 | src += 1; | ||
| 1587 | dst += 1; | ||
| 1588 | nbytes -= bsize; | ||
| 1589 | } while (nbytes >= bsize); | ||
| 1590 | |||
| 1591 | done: | ||
| 1592 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
| 1593 | return nbytes; | ||
| 1594 | } | 1427 | } |
| 1595 | 1428 | ||
| 1596 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 1429 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 1597 | struct scatterlist *src, unsigned int nbytes) | 1430 | struct scatterlist *src, unsigned int nbytes) |
| 1598 | { | 1431 | { |
| 1599 | struct blkcipher_walk walk; | 1432 | return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes); |
| 1600 | int err; | ||
| 1601 | |||
| 1602 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 1603 | err = blkcipher_walk_virt_block(desc, &walk, CAMELLIA_BLOCK_SIZE); | ||
| 1604 | |||
| 1605 | while ((nbytes = walk.nbytes) >= CAMELLIA_BLOCK_SIZE) { | ||
| 1606 | nbytes = __ctr_crypt(desc, &walk); | ||
| 1607 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 1608 | } | ||
| 1609 | |||
| 1610 | if (walk.nbytes) { | ||
| 1611 | ctr_crypt_final(desc, &walk); | ||
| 1612 | err = blkcipher_walk_done(desc, &walk, 0); | ||
| 1613 | } | ||
| 1614 | |||
| 1615 | return err; | ||
| 1616 | } | 1433 | } |
| 1617 | 1434 | ||
| 1618 | static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) | 1435 | static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) |
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c new file mode 100644 index 000000000000..4854f0f31e4f --- /dev/null +++ b/arch/x86/crypto/glue_helper.c | |||
| @@ -0,0 +1,307 @@ | |||
| 1 | /* | ||
| 2 | * Shared glue code for 128bit block ciphers | ||
| 3 | * | ||
| 4 | * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
| 5 | * | ||
| 6 | * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: | ||
| 7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 8 | * CTR part based on code (crypto/ctr.c) by: | ||
| 9 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License as published by | ||
| 13 | * the Free Software Foundation; either version 2 of the License, or | ||
| 14 | * (at your option) any later version. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | * | ||
| 21 | * You should have received a copy of the GNU General Public License | ||
| 22 | * along with this program; if not, write to the Free Software | ||
| 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
| 24 | * USA | ||
| 25 | * | ||
| 26 | */ | ||
| 27 | |||
| 28 | #include <linux/module.h> | ||
| 29 | #include <crypto/b128ops.h> | ||
| 30 | #include <crypto/lrw.h> | ||
| 31 | #include <crypto/xts.h> | ||
| 32 | #include <asm/crypto/glue_helper.h> | ||
| 33 | #include <crypto/scatterwalk.h> | ||
| 34 | |||
| 35 | static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | ||
| 36 | struct blkcipher_desc *desc, | ||
| 37 | struct blkcipher_walk *walk) | ||
| 38 | { | ||
| 39 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 40 | const unsigned int bsize = 128 / 8; | ||
| 41 | unsigned int nbytes, i, func_bytes; | ||
| 42 | bool fpu_enabled = false; | ||
| 43 | int err; | ||
| 44 | |||
| 45 | err = blkcipher_walk_virt(desc, walk); | ||
| 46 | |||
| 47 | while ((nbytes = walk->nbytes)) { | ||
| 48 | u8 *wsrc = walk->src.virt.addr; | ||
| 49 | u8 *wdst = walk->dst.virt.addr; | ||
| 50 | |||
| 51 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
| 52 | desc, fpu_enabled, nbytes); | ||
| 53 | |||
| 54 | for (i = 0; i < gctx->num_funcs; i++) { | ||
| 55 | func_bytes = bsize * gctx->funcs[i].num_blocks; | ||
| 56 | |||
| 57 | /* Process multi-block batch */ | ||
| 58 | if (nbytes >= func_bytes) { | ||
| 59 | do { | ||
| 60 | gctx->funcs[i].fn_u.ecb(ctx, wdst, | ||
| 61 | wsrc); | ||
| 62 | |||
| 63 | wsrc += func_bytes; | ||
| 64 | wdst += func_bytes; | ||
| 65 | nbytes -= func_bytes; | ||
| 66 | } while (nbytes >= func_bytes); | ||
| 67 | |||
| 68 | if (nbytes < bsize) | ||
| 69 | goto done; | ||
| 70 | } | ||
| 71 | } | ||
| 72 | |||
| 73 | done: | ||
| 74 | err = blkcipher_walk_done(desc, walk, nbytes); | ||
| 75 | } | ||
| 76 | |||
| 77 | glue_fpu_end(fpu_enabled); | ||
| 78 | return err; | ||
| 79 | } | ||
| 80 | |||
| 81 | int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | ||
| 82 | struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 83 | struct scatterlist *src, unsigned int nbytes) | ||
| 84 | { | ||
| 85 | struct blkcipher_walk walk; | ||
| 86 | |||
| 87 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 88 | return __glue_ecb_crypt_128bit(gctx, desc, &walk); | ||
| 89 | } | ||
| 90 | EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit); | ||
| 91 | |||
| 92 | static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn, | ||
| 93 | struct blkcipher_desc *desc, | ||
| 94 | struct blkcipher_walk *walk) | ||
| 95 | { | ||
| 96 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 97 | const unsigned int bsize = 128 / 8; | ||
| 98 | unsigned int nbytes = walk->nbytes; | ||
| 99 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 100 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 101 | u128 *iv = (u128 *)walk->iv; | ||
| 102 | |||
| 103 | do { | ||
| 104 | u128_xor(dst, src, iv); | ||
| 105 | fn(ctx, (u8 *)dst, (u8 *)dst); | ||
| 106 | iv = dst; | ||
| 107 | |||
| 108 | src += 1; | ||
| 109 | dst += 1; | ||
| 110 | nbytes -= bsize; | ||
| 111 | } while (nbytes >= bsize); | ||
| 112 | |||
| 113 | u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv); | ||
| 114 | return nbytes; | ||
| 115 | } | ||
| 116 | |||
| 117 | int glue_cbc_encrypt_128bit(const common_glue_func_t fn, | ||
| 118 | struct blkcipher_desc *desc, | ||
| 119 | struct scatterlist *dst, | ||
| 120 | struct scatterlist *src, unsigned int nbytes) | ||
| 121 | { | ||
| 122 | struct blkcipher_walk walk; | ||
| 123 | int err; | ||
| 124 | |||
| 125 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 126 | err = blkcipher_walk_virt(desc, &walk); | ||
| 127 | |||
| 128 | while ((nbytes = walk.nbytes)) { | ||
| 129 | nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk); | ||
| 130 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 131 | } | ||
| 132 | |||
| 133 | return err; | ||
| 134 | } | ||
| 135 | EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit); | ||
| 136 | |||
| 137 | static unsigned int | ||
| 138 | __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | ||
| 139 | struct blkcipher_desc *desc, | ||
| 140 | struct blkcipher_walk *walk) | ||
| 141 | { | ||
| 142 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 143 | const unsigned int bsize = 128 / 8; | ||
| 144 | unsigned int nbytes = walk->nbytes; | ||
| 145 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 146 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 147 | u128 last_iv; | ||
| 148 | unsigned int num_blocks, func_bytes; | ||
| 149 | unsigned int i; | ||
| 150 | |||
| 151 | /* Start of the last block. */ | ||
| 152 | src += nbytes / bsize - 1; | ||
| 153 | dst += nbytes / bsize - 1; | ||
| 154 | |||
| 155 | last_iv = *src; | ||
| 156 | |||
| 157 | for (i = 0; i < gctx->num_funcs; i++) { | ||
| 158 | num_blocks = gctx->funcs[i].num_blocks; | ||
| 159 | func_bytes = bsize * num_blocks; | ||
| 160 | |||
| 161 | /* Process multi-block batch */ | ||
| 162 | if (nbytes >= func_bytes) { | ||
| 163 | do { | ||
| 164 | nbytes -= func_bytes - bsize; | ||
| 165 | src -= num_blocks - 1; | ||
| 166 | dst -= num_blocks - 1; | ||
| 167 | |||
| 168 | gctx->funcs[i].fn_u.cbc(ctx, dst, src); | ||
| 169 | |||
| 170 | nbytes -= bsize; | ||
| 171 | if (nbytes < bsize) | ||
| 172 | goto done; | ||
| 173 | |||
| 174 | u128_xor(dst, dst, src - 1); | ||
| 175 | src -= 1; | ||
| 176 | dst -= 1; | ||
| 177 | } while (nbytes >= func_bytes); | ||
| 178 | |||
| 179 | if (nbytes < bsize) | ||
| 180 | goto done; | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | done: | ||
| 185 | u128_xor(dst, dst, (u128 *)walk->iv); | ||
| 186 | *(u128 *)walk->iv = last_iv; | ||
| 187 | |||
| 188 | return nbytes; | ||
| 189 | } | ||
| 190 | |||
| 191 | int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | ||
| 192 | struct blkcipher_desc *desc, | ||
| 193 | struct scatterlist *dst, | ||
| 194 | struct scatterlist *src, unsigned int nbytes) | ||
| 195 | { | ||
| 196 | const unsigned int bsize = 128 / 8; | ||
| 197 | bool fpu_enabled = false; | ||
| 198 | struct blkcipher_walk walk; | ||
| 199 | int err; | ||
| 200 | |||
| 201 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 202 | err = blkcipher_walk_virt(desc, &walk); | ||
| 203 | |||
| 204 | while ((nbytes = walk.nbytes)) { | ||
| 205 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
| 206 | desc, fpu_enabled, nbytes); | ||
| 207 | nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); | ||
| 208 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 209 | } | ||
| 210 | |||
| 211 | glue_fpu_end(fpu_enabled); | ||
| 212 | return err; | ||
| 213 | } | ||
| 214 | EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); | ||
| 215 | |||
| 216 | static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr, | ||
| 217 | struct blkcipher_desc *desc, | ||
| 218 | struct blkcipher_walk *walk) | ||
| 219 | { | ||
| 220 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 221 | u8 *src = (u8 *)walk->src.virt.addr; | ||
| 222 | u8 *dst = (u8 *)walk->dst.virt.addr; | ||
| 223 | unsigned int nbytes = walk->nbytes; | ||
| 224 | u128 ctrblk; | ||
| 225 | u128 tmp; | ||
| 226 | |||
| 227 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
| 228 | |||
| 229 | memcpy(&tmp, src, nbytes); | ||
| 230 | fn_ctr(ctx, &tmp, &tmp, &ctrblk); | ||
| 231 | memcpy(dst, &tmp, nbytes); | ||
| 232 | |||
| 233 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
| 234 | } | ||
| 235 | EXPORT_SYMBOL_GPL(glue_ctr_crypt_final_128bit); | ||
| 236 | |||
| 237 | static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, | ||
| 238 | struct blkcipher_desc *desc, | ||
| 239 | struct blkcipher_walk *walk) | ||
| 240 | { | ||
| 241 | const unsigned int bsize = 128 / 8; | ||
| 242 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 243 | unsigned int nbytes = walk->nbytes; | ||
| 244 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 245 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 246 | u128 ctrblk; | ||
| 247 | unsigned int num_blocks, func_bytes; | ||
| 248 | unsigned int i; | ||
| 249 | |||
| 250 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
| 251 | |||
| 252 | /* Process multi-block batch */ | ||
| 253 | for (i = 0; i < gctx->num_funcs; i++) { | ||
| 254 | num_blocks = gctx->funcs[i].num_blocks; | ||
| 255 | func_bytes = bsize * num_blocks; | ||
| 256 | |||
| 257 | if (nbytes >= func_bytes) { | ||
| 258 | do { | ||
| 259 | gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk); | ||
| 260 | |||
| 261 | src += num_blocks; | ||
| 262 | dst += num_blocks; | ||
| 263 | nbytes -= func_bytes; | ||
| 264 | } while (nbytes >= func_bytes); | ||
| 265 | |||
| 266 | if (nbytes < bsize) | ||
| 267 | goto done; | ||
| 268 | } | ||
| 269 | } | ||
| 270 | |||
| 271 | done: | ||
| 272 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
| 273 | return nbytes; | ||
| 274 | } | ||
| 275 | |||
| 276 | int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, | ||
| 277 | struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 278 | struct scatterlist *src, unsigned int nbytes) | ||
| 279 | { | ||
| 280 | const unsigned int bsize = 128 / 8; | ||
| 281 | bool fpu_enabled = false; | ||
| 282 | struct blkcipher_walk walk; | ||
| 283 | int err; | ||
| 284 | |||
| 285 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 286 | err = blkcipher_walk_virt_block(desc, &walk, bsize); | ||
| 287 | |||
| 288 | while ((nbytes = walk.nbytes) >= bsize) { | ||
| 289 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
| 290 | desc, fpu_enabled, nbytes); | ||
| 291 | nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); | ||
| 292 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 293 | } | ||
| 294 | |||
| 295 | glue_fpu_end(fpu_enabled); | ||
| 296 | |||
| 297 | if (walk.nbytes) { | ||
| 298 | glue_ctr_crypt_final_128bit( | ||
| 299 | gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); | ||
| 300 | err = blkcipher_walk_done(desc, &walk, 0); | ||
| 301 | } | ||
| 302 | |||
| 303 | return err; | ||
| 304 | } | ||
| 305 | EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit); | ||
| 306 | |||
| 307 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S new file mode 100644 index 000000000000..504106bf04a2 --- /dev/null +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S | |||
| @@ -0,0 +1,704 @@ | |||
| 1 | /* | ||
| 2 | * Serpent Cipher 8-way parallel algorithm (x86_64/AVX) | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Johannes Goetzfried | ||
| 5 | * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> | ||
| 6 | * | ||
| 7 | * Based on arch/x86/crypto/serpent-sse2-x86_64-asm_64.S by | ||
| 8 | * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or | ||
| 13 | * (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software | ||
| 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
| 23 | * USA | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | |||
| 27 | .file "serpent-avx-x86_64-asm_64.S" | ||
| 28 | .text | ||
| 29 | |||
| 30 | #define CTX %rdi | ||
| 31 | |||
| 32 | /********************************************************************** | ||
| 33 | 8-way AVX serpent | ||
| 34 | **********************************************************************/ | ||
| 35 | #define RA1 %xmm0 | ||
| 36 | #define RB1 %xmm1 | ||
| 37 | #define RC1 %xmm2 | ||
| 38 | #define RD1 %xmm3 | ||
| 39 | #define RE1 %xmm4 | ||
| 40 | |||
| 41 | #define tp %xmm5 | ||
| 42 | |||
| 43 | #define RA2 %xmm6 | ||
| 44 | #define RB2 %xmm7 | ||
| 45 | #define RC2 %xmm8 | ||
| 46 | #define RD2 %xmm9 | ||
| 47 | #define RE2 %xmm10 | ||
| 48 | |||
| 49 | #define RNOT %xmm11 | ||
| 50 | |||
| 51 | #define RK0 %xmm12 | ||
| 52 | #define RK1 %xmm13 | ||
| 53 | #define RK2 %xmm14 | ||
| 54 | #define RK3 %xmm15 | ||
| 55 | |||
| 56 | |||
| 57 | #define S0_1(x0, x1, x2, x3, x4) \ | ||
| 58 | vpor x0, x3, tp; \ | ||
| 59 | vpxor x3, x0, x0; \ | ||
| 60 | vpxor x2, x3, x4; \ | ||
| 61 | vpxor RNOT, x4, x4; \ | ||
| 62 | vpxor x1, tp, x3; \ | ||
| 63 | vpand x0, x1, x1; \ | ||
| 64 | vpxor x4, x1, x1; \ | ||
| 65 | vpxor x0, x2, x2; | ||
| 66 | #define S0_2(x0, x1, x2, x3, x4) \ | ||
| 67 | vpxor x3, x0, x0; \ | ||
| 68 | vpor x0, x4, x4; \ | ||
| 69 | vpxor x2, x0, x0; \ | ||
| 70 | vpand x1, x2, x2; \ | ||
| 71 | vpxor x2, x3, x3; \ | ||
| 72 | vpxor RNOT, x1, x1; \ | ||
| 73 | vpxor x4, x2, x2; \ | ||
| 74 | vpxor x2, x1, x1; | ||
| 75 | |||
| 76 | #define S1_1(x0, x1, x2, x3, x4) \ | ||
| 77 | vpxor x0, x1, tp; \ | ||
| 78 | vpxor x3, x0, x0; \ | ||
| 79 | vpxor RNOT, x3, x3; \ | ||
| 80 | vpand tp, x1, x4; \ | ||
| 81 | vpor tp, x0, x0; \ | ||
| 82 | vpxor x2, x3, x3; \ | ||
| 83 | vpxor x3, x0, x0; \ | ||
| 84 | vpxor x3, tp, x1; | ||
| 85 | #define S1_2(x0, x1, x2, x3, x4) \ | ||
| 86 | vpxor x4, x3, x3; \ | ||
| 87 | vpor x4, x1, x1; \ | ||
| 88 | vpxor x2, x4, x4; \ | ||
| 89 | vpand x0, x2, x2; \ | ||
| 90 | vpxor x1, x2, x2; \ | ||
| 91 | vpor x0, x1, x1; \ | ||
| 92 | vpxor RNOT, x0, x0; \ | ||
| 93 | vpxor x2, x0, x0; \ | ||
| 94 | vpxor x1, x4, x4; | ||
| 95 | |||
| 96 | #define S2_1(x0, x1, x2, x3, x4) \ | ||
| 97 | vpxor RNOT, x3, x3; \ | ||
| 98 | vpxor x0, x1, x1; \ | ||
| 99 | vpand x2, x0, tp; \ | ||
| 100 | vpxor x3, tp, tp; \ | ||
| 101 | vpor x0, x3, x3; \ | ||
| 102 | vpxor x1, x2, x2; \ | ||
| 103 | vpxor x1, x3, x3; \ | ||
| 104 | vpand tp, x1, x1; | ||
| 105 | #define S2_2(x0, x1, x2, x3, x4) \ | ||
| 106 | vpxor x2, tp, tp; \ | ||
| 107 | vpand x3, x2, x2; \ | ||
| 108 | vpor x1, x3, x3; \ | ||
| 109 | vpxor RNOT, tp, tp; \ | ||
| 110 | vpxor tp, x3, x3; \ | ||
| 111 | vpxor tp, x0, x4; \ | ||
| 112 | vpxor x2, tp, x0; \ | ||
| 113 | vpor x2, x1, x1; | ||
| 114 | |||
| 115 | #define S3_1(x0, x1, x2, x3, x4) \ | ||
| 116 | vpxor x3, x1, tp; \ | ||
| 117 | vpor x0, x3, x3; \ | ||
| 118 | vpand x0, x1, x4; \ | ||
| 119 | vpxor x2, x0, x0; \ | ||
| 120 | vpxor tp, x2, x2; \ | ||
| 121 | vpand x3, tp, x1; \ | ||
| 122 | vpxor x3, x2, x2; \ | ||
| 123 | vpor x4, x0, x0; \ | ||
| 124 | vpxor x3, x4, x4; | ||
| 125 | #define S3_2(x0, x1, x2, x3, x4) \ | ||
| 126 | vpxor x0, x1, x1; \ | ||
| 127 | vpand x3, x0, x0; \ | ||
| 128 | vpand x4, x3, x3; \ | ||
| 129 | vpxor x2, x3, x3; \ | ||
| 130 | vpor x1, x4, x4; \ | ||
| 131 | vpand x1, x2, x2; \ | ||
| 132 | vpxor x3, x4, x4; \ | ||
| 133 | vpxor x3, x0, x0; \ | ||
| 134 | vpxor x2, x3, x3; | ||
| 135 | |||
| 136 | #define S4_1(x0, x1, x2, x3, x4) \ | ||
| 137 | vpand x0, x3, tp; \ | ||
| 138 | vpxor x3, x0, x0; \ | ||
| 139 | vpxor x2, tp, tp; \ | ||
| 140 | vpor x3, x2, x2; \ | ||
| 141 | vpxor x1, x0, x0; \ | ||
| 142 | vpxor tp, x3, x4; \ | ||
| 143 | vpor x0, x2, x2; \ | ||
| 144 | vpxor x1, x2, x2; | ||
| 145 | #define S4_2(x0, x1, x2, x3, x4) \ | ||
| 146 | vpand x0, x1, x1; \ | ||
| 147 | vpxor x4, x1, x1; \ | ||
| 148 | vpand x2, x4, x4; \ | ||
| 149 | vpxor tp, x2, x2; \ | ||
| 150 | vpxor x0, x4, x4; \ | ||
| 151 | vpor x1, tp, x3; \ | ||
| 152 | vpxor RNOT, x1, x1; \ | ||
| 153 | vpxor x0, x3, x3; | ||
| 154 | |||
| 155 | #define S5_1(x0, x1, x2, x3, x4) \ | ||
| 156 | vpor x0, x1, tp; \ | ||
| 157 | vpxor tp, x2, x2; \ | ||
| 158 | vpxor RNOT, x3, x3; \ | ||
| 159 | vpxor x0, x1, x4; \ | ||
| 160 | vpxor x2, x0, x0; \ | ||
| 161 | vpand x4, tp, x1; \ | ||
| 162 | vpor x3, x4, x4; \ | ||
| 163 | vpxor x0, x4, x4; | ||
| 164 | #define S5_2(x0, x1, x2, x3, x4) \ | ||
| 165 | vpand x3, x0, x0; \ | ||
| 166 | vpxor x3, x1, x1; \ | ||
| 167 | vpxor x2, x3, x3; \ | ||
| 168 | vpxor x1, x0, x0; \ | ||
| 169 | vpand x4, x2, x2; \ | ||
| 170 | vpxor x2, x1, x1; \ | ||
| 171 | vpand x0, x2, x2; \ | ||
| 172 | vpxor x2, x3, x3; | ||
| 173 | |||
| 174 | #define S6_1(x0, x1, x2, x3, x4) \ | ||
| 175 | vpxor x0, x3, x3; \ | ||
| 176 | vpxor x2, x1, tp; \ | ||
| 177 | vpxor x0, x2, x2; \ | ||
| 178 | vpand x3, x0, x0; \ | ||
| 179 | vpor x3, tp, tp; \ | ||
| 180 | vpxor RNOT, x1, x4; \ | ||
| 181 | vpxor tp, x0, x0; \ | ||
| 182 | vpxor x2, tp, x1; | ||
| 183 | #define S6_2(x0, x1, x2, x3, x4) \ | ||
| 184 | vpxor x4, x3, x3; \ | ||
| 185 | vpxor x0, x4, x4; \ | ||
| 186 | vpand x0, x2, x2; \ | ||
| 187 | vpxor x1, x4, x4; \ | ||
| 188 | vpxor x3, x2, x2; \ | ||
| 189 | vpand x1, x3, x3; \ | ||
| 190 | vpxor x0, x3, x3; \ | ||
| 191 | vpxor x2, x1, x1; | ||
| 192 | |||
| 193 | #define S7_1(x0, x1, x2, x3, x4) \ | ||
| 194 | vpxor RNOT, x1, tp; \ | ||
| 195 | vpxor RNOT, x0, x0; \ | ||
| 196 | vpand x2, tp, x1; \ | ||
| 197 | vpxor x3, x1, x1; \ | ||
| 198 | vpor tp, x3, x3; \ | ||
| 199 | vpxor x2, tp, x4; \ | ||
| 200 | vpxor x3, x2, x2; \ | ||
| 201 | vpxor x0, x3, x3; \ | ||
| 202 | vpor x1, x0, x0; | ||
| 203 | #define S7_2(x0, x1, x2, x3, x4) \ | ||
| 204 | vpand x0, x2, x2; \ | ||
| 205 | vpxor x4, x0, x0; \ | ||
| 206 | vpxor x3, x4, x4; \ | ||
| 207 | vpand x0, x3, x3; \ | ||
| 208 | vpxor x1, x4, x4; \ | ||
| 209 | vpxor x4, x2, x2; \ | ||
| 210 | vpxor x1, x3, x3; \ | ||
| 211 | vpor x0, x4, x4; \ | ||
| 212 | vpxor x1, x4, x4; | ||
| 213 | |||
| 214 | #define SI0_1(x0, x1, x2, x3, x4) \ | ||
| 215 | vpxor x0, x1, x1; \ | ||
| 216 | vpor x1, x3, tp; \ | ||
| 217 | vpxor x1, x3, x4; \ | ||
| 218 | vpxor RNOT, x0, x0; \ | ||
| 219 | vpxor tp, x2, x2; \ | ||
| 220 | vpxor x0, tp, x3; \ | ||
| 221 | vpand x1, x0, x0; \ | ||
| 222 | vpxor x2, x0, x0; | ||
| 223 | #define SI0_2(x0, x1, x2, x3, x4) \ | ||
| 224 | vpand x3, x2, x2; \ | ||
| 225 | vpxor x4, x3, x3; \ | ||
| 226 | vpxor x3, x2, x2; \ | ||
| 227 | vpxor x3, x1, x1; \ | ||
| 228 | vpand x0, x3, x3; \ | ||
| 229 | vpxor x0, x1, x1; \ | ||
| 230 | vpxor x2, x0, x0; \ | ||
| 231 | vpxor x3, x4, x4; | ||
| 232 | |||
| 233 | #define SI1_1(x0, x1, x2, x3, x4) \ | ||
| 234 | vpxor x3, x1, x1; \ | ||
| 235 | vpxor x2, x0, tp; \ | ||
| 236 | vpxor RNOT, x2, x2; \ | ||
| 237 | vpor x1, x0, x4; \ | ||
| 238 | vpxor x3, x4, x4; \ | ||
| 239 | vpand x1, x3, x3; \ | ||
| 240 | vpxor x2, x1, x1; \ | ||
| 241 | vpand x4, x2, x2; | ||
| 242 | #define SI1_2(x0, x1, x2, x3, x4) \ | ||
| 243 | vpxor x1, x4, x4; \ | ||
| 244 | vpor x3, x1, x1; \ | ||
| 245 | vpxor tp, x3, x3; \ | ||
| 246 | vpxor tp, x2, x2; \ | ||
| 247 | vpor x4, tp, x0; \ | ||
| 248 | vpxor x4, x2, x2; \ | ||
| 249 | vpxor x0, x1, x1; \ | ||
| 250 | vpxor x1, x4, x4; | ||
| 251 | |||
| 252 | #define SI2_1(x0, x1, x2, x3, x4) \ | ||
| 253 | vpxor x1, x2, x2; \ | ||
| 254 | vpxor RNOT, x3, tp; \ | ||
| 255 | vpor x2, tp, tp; \ | ||
| 256 | vpxor x3, x2, x2; \ | ||
| 257 | vpxor x0, x3, x4; \ | ||
| 258 | vpxor x1, tp, x3; \ | ||
| 259 | vpor x2, x1, x1; \ | ||
| 260 | vpxor x0, x2, x2; | ||
| 261 | #define SI2_2(x0, x1, x2, x3, x4) \ | ||
| 262 | vpxor x4, x1, x1; \ | ||
| 263 | vpor x3, x4, x4; \ | ||
| 264 | vpxor x3, x2, x2; \ | ||
| 265 | vpxor x2, x4, x4; \ | ||
| 266 | vpand x1, x2, x2; \ | ||
| 267 | vpxor x3, x2, x2; \ | ||
| 268 | vpxor x4, x3, x3; \ | ||
| 269 | vpxor x0, x4, x4; | ||
| 270 | |||
| 271 | #define SI3_1(x0, x1, x2, x3, x4) \ | ||
| 272 | vpxor x1, x2, x2; \ | ||
| 273 | vpand x2, x1, tp; \ | ||
| 274 | vpxor x0, tp, tp; \ | ||
| 275 | vpor x1, x0, x0; \ | ||
| 276 | vpxor x3, x1, x4; \ | ||
| 277 | vpxor x3, x0, x0; \ | ||
| 278 | vpor tp, x3, x3; \ | ||
| 279 | vpxor x2, tp, x1; | ||
| 280 | #define SI3_2(x0, x1, x2, x3, x4) \ | ||
| 281 | vpxor x3, x1, x1; \ | ||
| 282 | vpxor x2, x0, x0; \ | ||
| 283 | vpxor x3, x2, x2; \ | ||
| 284 | vpand x1, x3, x3; \ | ||
| 285 | vpxor x0, x1, x1; \ | ||
| 286 | vpand x2, x0, x0; \ | ||
| 287 | vpxor x3, x4, x4; \ | ||
| 288 | vpxor x0, x3, x3; \ | ||
| 289 | vpxor x1, x0, x0; | ||
| 290 | |||
| 291 | #define SI4_1(x0, x1, x2, x3, x4) \ | ||
| 292 | vpxor x3, x2, x2; \ | ||
| 293 | vpand x1, x0, tp; \ | ||
| 294 | vpxor x2, tp, tp; \ | ||
| 295 | vpor x3, x2, x2; \ | ||
| 296 | vpxor RNOT, x0, x4; \ | ||
| 297 | vpxor tp, x1, x1; \ | ||
| 298 | vpxor x2, tp, x0; \ | ||
| 299 | vpand x4, x2, x2; | ||
| 300 | #define SI4_2(x0, x1, x2, x3, x4) \ | ||
| 301 | vpxor x0, x2, x2; \ | ||
| 302 | vpor x4, x0, x0; \ | ||
| 303 | vpxor x3, x0, x0; \ | ||
| 304 | vpand x2, x3, x3; \ | ||
| 305 | vpxor x3, x4, x4; \ | ||
| 306 | vpxor x1, x3, x3; \ | ||
| 307 | vpand x0, x1, x1; \ | ||
| 308 | vpxor x1, x4, x4; \ | ||
| 309 | vpxor x3, x0, x0; | ||
| 310 | |||
| 311 | #define SI5_1(x0, x1, x2, x3, x4) \ | ||
| 312 | vpor x2, x1, tp; \ | ||
| 313 | vpxor x1, x2, x2; \ | ||
| 314 | vpxor x3, tp, tp; \ | ||
| 315 | vpand x1, x3, x3; \ | ||
| 316 | vpxor x3, x2, x2; \ | ||
| 317 | vpor x0, x3, x3; \ | ||
| 318 | vpxor RNOT, x0, x0; \ | ||
| 319 | vpxor x2, x3, x3; \ | ||
| 320 | vpor x0, x2, x2; | ||
| 321 | #define SI5_2(x0, x1, x2, x3, x4) \ | ||
| 322 | vpxor tp, x1, x4; \ | ||
| 323 | vpxor x4, x2, x2; \ | ||
| 324 | vpand x0, x4, x4; \ | ||
| 325 | vpxor tp, x0, x0; \ | ||
| 326 | vpxor x3, tp, x1; \ | ||
| 327 | vpand x2, x0, x0; \ | ||
| 328 | vpxor x3, x2, x2; \ | ||
| 329 | vpxor x2, x0, x0; \ | ||
| 330 | vpxor x4, x2, x2; \ | ||
| 331 | vpxor x3, x4, x4; | ||
| 332 | |||
| 333 | #define SI6_1(x0, x1, x2, x3, x4) \ | ||
| 334 | vpxor x2, x0, x0; \ | ||
| 335 | vpand x3, x0, tp; \ | ||
| 336 | vpxor x3, x2, x2; \ | ||
| 337 | vpxor x2, tp, tp; \ | ||
| 338 | vpxor x1, x3, x3; \ | ||
| 339 | vpor x0, x2, x2; \ | ||
| 340 | vpxor x3, x2, x2; \ | ||
| 341 | vpand tp, x3, x3; | ||
| 342 | #define SI6_2(x0, x1, x2, x3, x4) \ | ||
| 343 | vpxor RNOT, tp, tp; \ | ||
| 344 | vpxor x1, x3, x3; \ | ||
| 345 | vpand x2, x1, x1; \ | ||
| 346 | vpxor tp, x0, x4; \ | ||
| 347 | vpxor x4, x3, x3; \ | ||
| 348 | vpxor x2, x4, x4; \ | ||
| 349 | vpxor x1, tp, x0; \ | ||
| 350 | vpxor x0, x2, x2; | ||
| 351 | |||
| 352 | #define SI7_1(x0, x1, x2, x3, x4) \ | ||
| 353 | vpand x0, x3, tp; \ | ||
| 354 | vpxor x2, x0, x0; \ | ||
| 355 | vpor x3, x2, x2; \ | ||
| 356 | vpxor x1, x3, x4; \ | ||
| 357 | vpxor RNOT, x0, x0; \ | ||
| 358 | vpor tp, x1, x1; \ | ||
| 359 | vpxor x0, x4, x4; \ | ||
| 360 | vpand x2, x0, x0; \ | ||
| 361 | vpxor x1, x0, x0; | ||
| 362 | #define SI7_2(x0, x1, x2, x3, x4) \ | ||
| 363 | vpand x2, x1, x1; \ | ||
| 364 | vpxor x2, tp, x3; \ | ||
| 365 | vpxor x3, x4, x4; \ | ||
| 366 | vpand x3, x2, x2; \ | ||
| 367 | vpor x0, x3, x3; \ | ||
| 368 | vpxor x4, x1, x1; \ | ||
| 369 | vpxor x4, x3, x3; \ | ||
| 370 | vpand x0, x4, x4; \ | ||
| 371 | vpxor x2, x4, x4; | ||
| 372 | |||
| 373 | #define get_key(i, j, t) \ | ||
| 374 | vbroadcastss (4*(i)+(j))*4(CTX), t; | ||
| 375 | |||
| 376 | #define K2(x0, x1, x2, x3, x4, i) \ | ||
| 377 | get_key(i, 0, RK0); \ | ||
| 378 | get_key(i, 1, RK1); \ | ||
| 379 | get_key(i, 2, RK2); \ | ||
| 380 | get_key(i, 3, RK3); \ | ||
| 381 | vpxor RK0, x0 ## 1, x0 ## 1; \ | ||
| 382 | vpxor RK1, x1 ## 1, x1 ## 1; \ | ||
| 383 | vpxor RK2, x2 ## 1, x2 ## 1; \ | ||
| 384 | vpxor RK3, x3 ## 1, x3 ## 1; \ | ||
| 385 | vpxor RK0, x0 ## 2, x0 ## 2; \ | ||
| 386 | vpxor RK1, x1 ## 2, x1 ## 2; \ | ||
| 387 | vpxor RK2, x2 ## 2, x2 ## 2; \ | ||
| 388 | vpxor RK3, x3 ## 2, x3 ## 2; | ||
| 389 | |||
| 390 | #define LK2(x0, x1, x2, x3, x4, i) \ | ||
| 391 | vpslld $13, x0 ## 1, x4 ## 1; \ | ||
| 392 | vpsrld $(32 - 13), x0 ## 1, x0 ## 1; \ | ||
| 393 | vpor x4 ## 1, x0 ## 1, x0 ## 1; \ | ||
| 394 | vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ | ||
| 395 | vpslld $3, x2 ## 1, x4 ## 1; \ | ||
| 396 | vpsrld $(32 - 3), x2 ## 1, x2 ## 1; \ | ||
| 397 | vpor x4 ## 1, x2 ## 1, x2 ## 1; \ | ||
| 398 | vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ | ||
| 399 | vpslld $13, x0 ## 2, x4 ## 2; \ | ||
| 400 | vpsrld $(32 - 13), x0 ## 2, x0 ## 2; \ | ||
| 401 | vpor x4 ## 2, x0 ## 2, x0 ## 2; \ | ||
| 402 | vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ | ||
| 403 | vpslld $3, x2 ## 2, x4 ## 2; \ | ||
| 404 | vpsrld $(32 - 3), x2 ## 2, x2 ## 2; \ | ||
| 405 | vpor x4 ## 2, x2 ## 2, x2 ## 2; \ | ||
| 406 | vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ | ||
| 407 | vpslld $1, x1 ## 1, x4 ## 1; \ | ||
| 408 | vpsrld $(32 - 1), x1 ## 1, x1 ## 1; \ | ||
| 409 | vpor x4 ## 1, x1 ## 1, x1 ## 1; \ | ||
| 410 | vpslld $3, x0 ## 1, x4 ## 1; \ | ||
| 411 | vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ | ||
| 412 | vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ | ||
| 413 | get_key(i, 1, RK1); \ | ||
| 414 | vpslld $1, x1 ## 2, x4 ## 2; \ | ||
| 415 | vpsrld $(32 - 1), x1 ## 2, x1 ## 2; \ | ||
| 416 | vpor x4 ## 2, x1 ## 2, x1 ## 2; \ | ||
| 417 | vpslld $3, x0 ## 2, x4 ## 2; \ | ||
| 418 | vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ | ||
| 419 | vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ | ||
| 420 | get_key(i, 3, RK3); \ | ||
| 421 | vpslld $7, x3 ## 1, x4 ## 1; \ | ||
| 422 | vpsrld $(32 - 7), x3 ## 1, x3 ## 1; \ | ||
| 423 | vpor x4 ## 1, x3 ## 1, x3 ## 1; \ | ||
| 424 | vpslld $7, x1 ## 1, x4 ## 1; \ | ||
| 425 | vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ | ||
| 426 | vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ | ||
| 427 | vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ | ||
| 428 | vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ | ||
| 429 | get_key(i, 0, RK0); \ | ||
| 430 | vpslld $7, x3 ## 2, x4 ## 2; \ | ||
| 431 | vpsrld $(32 - 7), x3 ## 2, x3 ## 2; \ | ||
| 432 | vpor x4 ## 2, x3 ## 2, x3 ## 2; \ | ||
| 433 | vpslld $7, x1 ## 2, x4 ## 2; \ | ||
| 434 | vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ | ||
| 435 | vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ | ||
| 436 | vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ | ||
| 437 | vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ | ||
| 438 | get_key(i, 2, RK2); \ | ||
| 439 | vpxor RK1, x1 ## 1, x1 ## 1; \ | ||
| 440 | vpxor RK3, x3 ## 1, x3 ## 1; \ | ||
| 441 | vpslld $5, x0 ## 1, x4 ## 1; \ | ||
| 442 | vpsrld $(32 - 5), x0 ## 1, x0 ## 1; \ | ||
| 443 | vpor x4 ## 1, x0 ## 1, x0 ## 1; \ | ||
| 444 | vpslld $22, x2 ## 1, x4 ## 1; \ | ||
| 445 | vpsrld $(32 - 22), x2 ## 1, x2 ## 1; \ | ||
| 446 | vpor x4 ## 1, x2 ## 1, x2 ## 1; \ | ||
| 447 | vpxor RK0, x0 ## 1, x0 ## 1; \ | ||
| 448 | vpxor RK2, x2 ## 1, x2 ## 1; \ | ||
| 449 | vpxor RK1, x1 ## 2, x1 ## 2; \ | ||
| 450 | vpxor RK3, x3 ## 2, x3 ## 2; \ | ||
| 451 | vpslld $5, x0 ## 2, x4 ## 2; \ | ||
| 452 | vpsrld $(32 - 5), x0 ## 2, x0 ## 2; \ | ||
| 453 | vpor x4 ## 2, x0 ## 2, x0 ## 2; \ | ||
| 454 | vpslld $22, x2 ## 2, x4 ## 2; \ | ||
| 455 | vpsrld $(32 - 22), x2 ## 2, x2 ## 2; \ | ||
| 456 | vpor x4 ## 2, x2 ## 2, x2 ## 2; \ | ||
| 457 | vpxor RK0, x0 ## 2, x0 ## 2; \ | ||
| 458 | vpxor RK2, x2 ## 2, x2 ## 2; | ||
| 459 | |||
| 460 | #define KL2(x0, x1, x2, x3, x4, i) \ | ||
| 461 | vpxor RK0, x0 ## 1, x0 ## 1; \ | ||
| 462 | vpxor RK2, x2 ## 1, x2 ## 1; \ | ||
| 463 | vpsrld $5, x0 ## 1, x4 ## 1; \ | ||
| 464 | vpslld $(32 - 5), x0 ## 1, x0 ## 1; \ | ||
| 465 | vpor x4 ## 1, x0 ## 1, x0 ## 1; \ | ||
| 466 | vpxor RK3, x3 ## 1, x3 ## 1; \ | ||
| 467 | vpxor RK1, x1 ## 1, x1 ## 1; \ | ||
| 468 | vpsrld $22, x2 ## 1, x4 ## 1; \ | ||
| 469 | vpslld $(32 - 22), x2 ## 1, x2 ## 1; \ | ||
| 470 | vpor x4 ## 1, x2 ## 1, x2 ## 1; \ | ||
| 471 | vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ | ||
| 472 | vpxor RK0, x0 ## 2, x0 ## 2; \ | ||
| 473 | vpxor RK2, x2 ## 2, x2 ## 2; \ | ||
| 474 | vpsrld $5, x0 ## 2, x4 ## 2; \ | ||
| 475 | vpslld $(32 - 5), x0 ## 2, x0 ## 2; \ | ||
| 476 | vpor x4 ## 2, x0 ## 2, x0 ## 2; \ | ||
| 477 | vpxor RK3, x3 ## 2, x3 ## 2; \ | ||
| 478 | vpxor RK1, x1 ## 2, x1 ## 2; \ | ||
| 479 | vpsrld $22, x2 ## 2, x4 ## 2; \ | ||
| 480 | vpslld $(32 - 22), x2 ## 2, x2 ## 2; \ | ||
| 481 | vpor x4 ## 2, x2 ## 2, x2 ## 2; \ | ||
| 482 | vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ | ||
| 483 | vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ | ||
| 484 | vpslld $7, x1 ## 1, x4 ## 1; \ | ||
| 485 | vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ | ||
| 486 | vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ | ||
| 487 | vpsrld $1, x1 ## 1, x4 ## 1; \ | ||
| 488 | vpslld $(32 - 1), x1 ## 1, x1 ## 1; \ | ||
| 489 | vpor x4 ## 1, x1 ## 1, x1 ## 1; \ | ||
| 490 | vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ | ||
| 491 | vpslld $7, x1 ## 2, x4 ## 2; \ | ||
| 492 | vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ | ||
| 493 | vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ | ||
| 494 | vpsrld $1, x1 ## 2, x4 ## 2; \ | ||
| 495 | vpslld $(32 - 1), x1 ## 2, x1 ## 2; \ | ||
| 496 | vpor x4 ## 2, x1 ## 2, x1 ## 2; \ | ||
| 497 | vpsrld $7, x3 ## 1, x4 ## 1; \ | ||
| 498 | vpslld $(32 - 7), x3 ## 1, x3 ## 1; \ | ||
| 499 | vpor x4 ## 1, x3 ## 1, x3 ## 1; \ | ||
| 500 | vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ | ||
| 501 | vpslld $3, x0 ## 1, x4 ## 1; \ | ||
| 502 | vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ | ||
| 503 | vpsrld $7, x3 ## 2, x4 ## 2; \ | ||
| 504 | vpslld $(32 - 7), x3 ## 2, x3 ## 2; \ | ||
| 505 | vpor x4 ## 2, x3 ## 2, x3 ## 2; \ | ||
| 506 | vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ | ||
| 507 | vpslld $3, x0 ## 2, x4 ## 2; \ | ||
| 508 | vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ | ||
| 509 | vpsrld $13, x0 ## 1, x4 ## 1; \ | ||
| 510 | vpslld $(32 - 13), x0 ## 1, x0 ## 1; \ | ||
| 511 | vpor x4 ## 1, x0 ## 1, x0 ## 1; \ | ||
| 512 | vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ | ||
| 513 | vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ | ||
| 514 | vpsrld $3, x2 ## 1, x4 ## 1; \ | ||
| 515 | vpslld $(32 - 3), x2 ## 1, x2 ## 1; \ | ||
| 516 | vpor x4 ## 1, x2 ## 1, x2 ## 1; \ | ||
| 517 | vpsrld $13, x0 ## 2, x4 ## 2; \ | ||
| 518 | vpslld $(32 - 13), x0 ## 2, x0 ## 2; \ | ||
| 519 | vpor x4 ## 2, x0 ## 2, x0 ## 2; \ | ||
| 520 | vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ | ||
| 521 | vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ | ||
| 522 | vpsrld $3, x2 ## 2, x4 ## 2; \ | ||
| 523 | vpslld $(32 - 3), x2 ## 2, x2 ## 2; \ | ||
| 524 | vpor x4 ## 2, x2 ## 2, x2 ## 2; | ||
| 525 | |||
| 526 | #define S(SBOX, x0, x1, x2, x3, x4) \ | ||
| 527 | SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ | ||
| 528 | SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ | ||
| 529 | SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ | ||
| 530 | SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); | ||
| 531 | |||
| 532 | #define SP(SBOX, x0, x1, x2, x3, x4, i) \ | ||
| 533 | get_key(i, 0, RK0); \ | ||
| 534 | SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ | ||
| 535 | get_key(i, 2, RK2); \ | ||
| 536 | SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ | ||
| 537 | get_key(i, 3, RK3); \ | ||
| 538 | SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ | ||
| 539 | get_key(i, 1, RK1); \ | ||
| 540 | SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ | ||
| 541 | |||
| 542 | #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ | ||
| 543 | vpunpckldq x1, x0, t0; \ | ||
| 544 | vpunpckhdq x1, x0, t2; \ | ||
| 545 | vpunpckldq x3, x2, t1; \ | ||
| 546 | vpunpckhdq x3, x2, x3; \ | ||
| 547 | \ | ||
| 548 | vpunpcklqdq t1, t0, x0; \ | ||
| 549 | vpunpckhqdq t1, t0, x1; \ | ||
| 550 | vpunpcklqdq x3, t2, x2; \ | ||
| 551 | vpunpckhqdq x3, t2, x3; | ||
| 552 | |||
| 553 | #define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \ | ||
| 554 | vmovdqu (0*4*4)(in), x0; \ | ||
| 555 | vmovdqu (1*4*4)(in), x1; \ | ||
| 556 | vmovdqu (2*4*4)(in), x2; \ | ||
| 557 | vmovdqu (3*4*4)(in), x3; \ | ||
| 558 | \ | ||
| 559 | transpose_4x4(x0, x1, x2, x3, t0, t1, t2) | ||
| 560 | |||
| 561 | #define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ | ||
| 562 | transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ | ||
| 563 | \ | ||
| 564 | vmovdqu x0, (0*4*4)(out); \ | ||
| 565 | vmovdqu x1, (1*4*4)(out); \ | ||
| 566 | vmovdqu x2, (2*4*4)(out); \ | ||
| 567 | vmovdqu x3, (3*4*4)(out); | ||
| 568 | |||
| 569 | #define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ | ||
| 570 | transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ | ||
| 571 | \ | ||
| 572 | vpxor (0*4*4)(out), x0, x0; \ | ||
| 573 | vmovdqu x0, (0*4*4)(out); \ | ||
| 574 | vpxor (1*4*4)(out), x1, x1; \ | ||
| 575 | vmovdqu x1, (1*4*4)(out); \ | ||
| 576 | vpxor (2*4*4)(out), x2, x2; \ | ||
| 577 | vmovdqu x2, (2*4*4)(out); \ | ||
| 578 | vpxor (3*4*4)(out), x3, x3; \ | ||
| 579 | vmovdqu x3, (3*4*4)(out); | ||
| 580 | |||
| 581 | .align 8 | ||
| 582 | .global __serpent_enc_blk_8way_avx | ||
| 583 | .type __serpent_enc_blk_8way_avx,@function; | ||
| 584 | |||
| 585 | __serpent_enc_blk_8way_avx: | ||
| 586 | /* input: | ||
| 587 | * %rdi: ctx, CTX | ||
| 588 | * %rsi: dst | ||
| 589 | * %rdx: src | ||
| 590 | * %rcx: bool, if true: xor output | ||
| 591 | */ | ||
| 592 | |||
| 593 | vpcmpeqd RNOT, RNOT, RNOT; | ||
| 594 | |||
| 595 | leaq (4*4*4)(%rdx), %rax; | ||
| 596 | read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2); | ||
| 597 | read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); | ||
| 598 | |||
| 599 | K2(RA, RB, RC, RD, RE, 0); | ||
| 600 | S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); | ||
| 601 | S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); | ||
| 602 | S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); | ||
| 603 | S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); | ||
| 604 | S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); | ||
| 605 | S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); | ||
| 606 | S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); | ||
| 607 | S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); | ||
| 608 | S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); | ||
| 609 | S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10); | ||
| 610 | S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11); | ||
| 611 | S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12); | ||
| 612 | S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13); | ||
| 613 | S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14); | ||
| 614 | S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15); | ||
| 615 | S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16); | ||
| 616 | S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17); | ||
| 617 | S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18); | ||
| 618 | S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19); | ||
| 619 | S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20); | ||
| 620 | S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21); | ||
| 621 | S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22); | ||
| 622 | S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23); | ||
| 623 | S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24); | ||
| 624 | S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25); | ||
| 625 | S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26); | ||
| 626 | S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27); | ||
| 627 | S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28); | ||
| 628 | S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29); | ||
| 629 | S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30); | ||
| 630 | S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31); | ||
| 631 | S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32); | ||
| 632 | |||
| 633 | leaq (4*4*4)(%rsi), %rax; | ||
| 634 | |||
| 635 | testb %cl, %cl; | ||
| 636 | jnz __enc_xor8; | ||
| 637 | |||
| 638 | write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); | ||
| 639 | write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); | ||
| 640 | |||
| 641 | ret; | ||
| 642 | |||
| 643 | __enc_xor8: | ||
| 644 | xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); | ||
| 645 | xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); | ||
| 646 | |||
| 647 | ret; | ||
| 648 | |||
| 649 | .align 8 | ||
| 650 | .global serpent_dec_blk_8way_avx | ||
| 651 | .type serpent_dec_blk_8way_avx,@function; | ||
| 652 | |||
| 653 | serpent_dec_blk_8way_avx: | ||
| 654 | /* input: | ||
| 655 | * %rdi: ctx, CTX | ||
| 656 | * %rsi: dst | ||
| 657 | * %rdx: src | ||
| 658 | */ | ||
| 659 | |||
| 660 | vpcmpeqd RNOT, RNOT, RNOT; | ||
| 661 | |||
| 662 | leaq (4*4*4)(%rdx), %rax; | ||
| 663 | read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2); | ||
| 664 | read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); | ||
| 665 | |||
| 666 | K2(RA, RB, RC, RD, RE, 32); | ||
| 667 | SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); | ||
| 668 | SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); | ||
| 669 | SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); | ||
| 670 | SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); | ||
| 671 | SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); | ||
| 672 | SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); | ||
| 673 | SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); | ||
| 674 | SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); | ||
| 675 | SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); | ||
| 676 | SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22); | ||
| 677 | SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21); | ||
| 678 | SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20); | ||
| 679 | SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19); | ||
| 680 | SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18); | ||
| 681 | SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17); | ||
| 682 | SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16); | ||
| 683 | SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15); | ||
| 684 | SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14); | ||
| 685 | SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13); | ||
| 686 | SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12); | ||
| 687 | SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11); | ||
| 688 | SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10); | ||
| 689 | SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9); | ||
| 690 | SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8); | ||
| 691 | SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7); | ||
| 692 | SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6); | ||
| 693 | SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5); | ||
| 694 | SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4); | ||
| 695 | SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3); | ||
| 696 | SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2); | ||
| 697 | SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1); | ||
| 698 | S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0); | ||
| 699 | |||
| 700 | leaq (4*4*4)(%rsi), %rax; | ||
| 701 | write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); | ||
| 702 | write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); | ||
| 703 | |||
| 704 | ret; | ||
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c new file mode 100644 index 000000000000..b36bdac237eb --- /dev/null +++ b/arch/x86/crypto/serpent_avx_glue.c | |||
| @@ -0,0 +1,636 @@ | |||
| 1 | /* | ||
| 2 | * Glue Code for AVX assembler versions of Serpent Cipher | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Johannes Goetzfried | ||
| 5 | * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> | ||
| 6 | * | ||
| 7 | * Glue code based on serpent_sse2_glue.c by: | ||
| 8 | * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or | ||
| 13 | * (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software | ||
| 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
| 23 | * USA | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/hardirq.h> | ||
| 29 | #include <linux/types.h> | ||
| 30 | #include <linux/crypto.h> | ||
| 31 | #include <linux/err.h> | ||
| 32 | #include <crypto/algapi.h> | ||
| 33 | #include <crypto/serpent.h> | ||
| 34 | #include <crypto/cryptd.h> | ||
| 35 | #include <crypto/b128ops.h> | ||
| 36 | #include <crypto/ctr.h> | ||
| 37 | #include <crypto/lrw.h> | ||
| 38 | #include <crypto/xts.h> | ||
| 39 | #include <asm/xcr.h> | ||
| 40 | #include <asm/xsave.h> | ||
| 41 | #include <asm/crypto/serpent-avx.h> | ||
| 42 | #include <asm/crypto/ablk_helper.h> | ||
| 43 | #include <asm/crypto/glue_helper.h> | ||
| 44 | |||
| 45 | static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) | ||
| 46 | { | ||
| 47 | u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; | ||
| 48 | unsigned int j; | ||
| 49 | |||
| 50 | for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) | ||
| 51 | ivs[j] = src[j]; | ||
| 52 | |||
| 53 | serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); | ||
| 54 | |||
| 55 | for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) | ||
| 56 | u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); | ||
| 57 | } | ||
| 58 | |||
| 59 | static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv) | ||
| 60 | { | ||
| 61 | be128 ctrblk; | ||
| 62 | |||
| 63 | u128_to_be128(&ctrblk, iv); | ||
| 64 | u128_inc(iv); | ||
| 65 | |||
| 66 | __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); | ||
| 67 | u128_xor(dst, src, (u128 *)&ctrblk); | ||
| 68 | } | ||
| 69 | |||
| 70 | static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src, | ||
| 71 | u128 *iv) | ||
| 72 | { | ||
| 73 | be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; | ||
| 74 | unsigned int i; | ||
| 75 | |||
| 76 | for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { | ||
| 77 | if (dst != src) | ||
| 78 | dst[i] = src[i]; | ||
| 79 | |||
| 80 | u128_to_be128(&ctrblks[i], iv); | ||
| 81 | u128_inc(iv); | ||
| 82 | } | ||
| 83 | |||
| 84 | serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks); | ||
| 85 | } | ||
| 86 | |||
| 87 | static const struct common_glue_ctx serpent_enc = { | ||
| 88 | .num_funcs = 2, | ||
| 89 | .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, | ||
| 90 | |||
| 91 | .funcs = { { | ||
| 92 | .num_blocks = SERPENT_PARALLEL_BLOCKS, | ||
| 93 | .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) } | ||
| 94 | }, { | ||
| 95 | .num_blocks = 1, | ||
| 96 | .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } | ||
| 97 | } } | ||
| 98 | }; | ||
| 99 | |||
| 100 | static const struct common_glue_ctx serpent_ctr = { | ||
| 101 | .num_funcs = 2, | ||
| 102 | .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, | ||
| 103 | |||
| 104 | .funcs = { { | ||
| 105 | .num_blocks = SERPENT_PARALLEL_BLOCKS, | ||
| 106 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) } | ||
| 107 | }, { | ||
| 108 | .num_blocks = 1, | ||
| 109 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) } | ||
| 110 | } } | ||
| 111 | }; | ||
| 112 | |||
| 113 | static const struct common_glue_ctx serpent_dec = { | ||
| 114 | .num_funcs = 2, | ||
| 115 | .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, | ||
| 116 | |||
| 117 | .funcs = { { | ||
| 118 | .num_blocks = SERPENT_PARALLEL_BLOCKS, | ||
| 119 | .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) } | ||
| 120 | }, { | ||
| 121 | .num_blocks = 1, | ||
| 122 | .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } | ||
| 123 | } } | ||
| 124 | }; | ||
| 125 | |||
| 126 | static const struct common_glue_ctx serpent_dec_cbc = { | ||
| 127 | .num_funcs = 2, | ||
| 128 | .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, | ||
| 129 | |||
| 130 | .funcs = { { | ||
| 131 | .num_blocks = SERPENT_PARALLEL_BLOCKS, | ||
| 132 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) } | ||
| 133 | }, { | ||
| 134 | .num_blocks = 1, | ||
| 135 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } | ||
| 136 | } } | ||
| 137 | }; | ||
| 138 | |||
| 139 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 140 | struct scatterlist *src, unsigned int nbytes) | ||
| 141 | { | ||
| 142 | return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes); | ||
| 143 | } | ||
| 144 | |||
| 145 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 146 | struct scatterlist *src, unsigned int nbytes) | ||
| 147 | { | ||
| 148 | return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes); | ||
| 149 | } | ||
| 150 | |||
| 151 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 152 | struct scatterlist *src, unsigned int nbytes) | ||
| 153 | { | ||
| 154 | return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc, | ||
| 155 | dst, src, nbytes); | ||
| 156 | } | ||
| 157 | |||
| 158 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 159 | struct scatterlist *src, unsigned int nbytes) | ||
| 160 | { | ||
| 161 | return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src, | ||
| 162 | nbytes); | ||
| 163 | } | ||
| 164 | |||
| 165 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 166 | struct scatterlist *src, unsigned int nbytes) | ||
| 167 | { | ||
| 168 | return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes); | ||
| 169 | } | ||
| 170 | |||
| 171 | static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes) | ||
| 172 | { | ||
| 173 | return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS, | ||
| 174 | NULL, fpu_enabled, nbytes); | ||
| 175 | } | ||
| 176 | |||
| 177 | static inline void serpent_fpu_end(bool fpu_enabled) | ||
| 178 | { | ||
| 179 | glue_fpu_end(fpu_enabled); | ||
| 180 | } | ||
| 181 | |||
| 182 | struct crypt_priv { | ||
| 183 | struct serpent_ctx *ctx; | ||
| 184 | bool fpu_enabled; | ||
| 185 | }; | ||
| 186 | |||
| 187 | static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) | ||
| 188 | { | ||
| 189 | const unsigned int bsize = SERPENT_BLOCK_SIZE; | ||
| 190 | struct crypt_priv *ctx = priv; | ||
| 191 | int i; | ||
| 192 | |||
| 193 | ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); | ||
| 194 | |||
| 195 | if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) { | ||
| 196 | serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst); | ||
| 197 | return; | ||
| 198 | } | ||
| 199 | |||
| 200 | for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) | ||
| 201 | __serpent_encrypt(ctx->ctx, srcdst, srcdst); | ||
| 202 | } | ||
| 203 | |||
| 204 | static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) | ||
| 205 | { | ||
| 206 | const unsigned int bsize = SERPENT_BLOCK_SIZE; | ||
| 207 | struct crypt_priv *ctx = priv; | ||
| 208 | int i; | ||
| 209 | |||
| 210 | ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); | ||
| 211 | |||
| 212 | if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) { | ||
| 213 | serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst); | ||
| 214 | return; | ||
| 215 | } | ||
| 216 | |||
| 217 | for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) | ||
| 218 | __serpent_decrypt(ctx->ctx, srcdst, srcdst); | ||
| 219 | } | ||
| 220 | |||
| 221 | struct serpent_lrw_ctx { | ||
| 222 | struct lrw_table_ctx lrw_table; | ||
| 223 | struct serpent_ctx serpent_ctx; | ||
| 224 | }; | ||
| 225 | |||
| 226 | static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 227 | unsigned int keylen) | ||
| 228 | { | ||
| 229 | struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 230 | int err; | ||
| 231 | |||
| 232 | err = __serpent_setkey(&ctx->serpent_ctx, key, keylen - | ||
| 233 | SERPENT_BLOCK_SIZE); | ||
| 234 | if (err) | ||
| 235 | return err; | ||
| 236 | |||
| 237 | return lrw_init_table(&ctx->lrw_table, key + keylen - | ||
| 238 | SERPENT_BLOCK_SIZE); | ||
| 239 | } | ||
| 240 | |||
| 241 | static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 242 | struct scatterlist *src, unsigned int nbytes) | ||
| 243 | { | ||
| 244 | struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 245 | be128 buf[SERPENT_PARALLEL_BLOCKS]; | ||
| 246 | struct crypt_priv crypt_ctx = { | ||
| 247 | .ctx = &ctx->serpent_ctx, | ||
| 248 | .fpu_enabled = false, | ||
| 249 | }; | ||
| 250 | struct lrw_crypt_req req = { | ||
| 251 | .tbuf = buf, | ||
| 252 | .tbuflen = sizeof(buf), | ||
| 253 | |||
| 254 | .table_ctx = &ctx->lrw_table, | ||
| 255 | .crypt_ctx = &crypt_ctx, | ||
| 256 | .crypt_fn = encrypt_callback, | ||
| 257 | }; | ||
| 258 | int ret; | ||
| 259 | |||
| 260 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 261 | ret = lrw_crypt(desc, dst, src, nbytes, &req); | ||
| 262 | serpent_fpu_end(crypt_ctx.fpu_enabled); | ||
| 263 | |||
| 264 | return ret; | ||
| 265 | } | ||
| 266 | |||
| 267 | static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 268 | struct scatterlist *src, unsigned int nbytes) | ||
| 269 | { | ||
| 270 | struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 271 | be128 buf[SERPENT_PARALLEL_BLOCKS]; | ||
| 272 | struct crypt_priv crypt_ctx = { | ||
| 273 | .ctx = &ctx->serpent_ctx, | ||
| 274 | .fpu_enabled = false, | ||
| 275 | }; | ||
| 276 | struct lrw_crypt_req req = { | ||
| 277 | .tbuf = buf, | ||
| 278 | .tbuflen = sizeof(buf), | ||
| 279 | |||
| 280 | .table_ctx = &ctx->lrw_table, | ||
| 281 | .crypt_ctx = &crypt_ctx, | ||
| 282 | .crypt_fn = decrypt_callback, | ||
| 283 | }; | ||
| 284 | int ret; | ||
| 285 | |||
| 286 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 287 | ret = lrw_crypt(desc, dst, src, nbytes, &req); | ||
| 288 | serpent_fpu_end(crypt_ctx.fpu_enabled); | ||
| 289 | |||
| 290 | return ret; | ||
| 291 | } | ||
| 292 | |||
| 293 | static void lrw_exit_tfm(struct crypto_tfm *tfm) | ||
| 294 | { | ||
| 295 | struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 296 | |||
| 297 | lrw_free_table(&ctx->lrw_table); | ||
| 298 | } | ||
| 299 | |||
| 300 | struct serpent_xts_ctx { | ||
| 301 | struct serpent_ctx tweak_ctx; | ||
| 302 | struct serpent_ctx crypt_ctx; | ||
| 303 | }; | ||
| 304 | |||
| 305 | static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 306 | unsigned int keylen) | ||
| 307 | { | ||
| 308 | struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 309 | u32 *flags = &tfm->crt_flags; | ||
| 310 | int err; | ||
| 311 | |||
| 312 | /* key consists of keys of equal size concatenated, therefore | ||
| 313 | * the length must be even | ||
| 314 | */ | ||
| 315 | if (keylen % 2) { | ||
| 316 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
| 317 | return -EINVAL; | ||
| 318 | } | ||
| 319 | |||
| 320 | /* first half of xts-key is for crypt */ | ||
| 321 | err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2); | ||
| 322 | if (err) | ||
| 323 | return err; | ||
| 324 | |||
| 325 | /* second half of xts-key is for tweak */ | ||
| 326 | return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); | ||
| 327 | } | ||
| 328 | |||
| 329 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 330 | struct scatterlist *src, unsigned int nbytes) | ||
| 331 | { | ||
| 332 | struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 333 | be128 buf[SERPENT_PARALLEL_BLOCKS]; | ||
| 334 | struct crypt_priv crypt_ctx = { | ||
| 335 | .ctx = &ctx->crypt_ctx, | ||
| 336 | .fpu_enabled = false, | ||
| 337 | }; | ||
| 338 | struct xts_crypt_req req = { | ||
| 339 | .tbuf = buf, | ||
| 340 | .tbuflen = sizeof(buf), | ||
| 341 | |||
| 342 | .tweak_ctx = &ctx->tweak_ctx, | ||
| 343 | .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt), | ||
| 344 | .crypt_ctx = &crypt_ctx, | ||
| 345 | .crypt_fn = encrypt_callback, | ||
| 346 | }; | ||
| 347 | int ret; | ||
| 348 | |||
| 349 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 350 | ret = xts_crypt(desc, dst, src, nbytes, &req); | ||
| 351 | serpent_fpu_end(crypt_ctx.fpu_enabled); | ||
| 352 | |||
| 353 | return ret; | ||
| 354 | } | ||
| 355 | |||
| 356 | static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 357 | struct scatterlist *src, unsigned int nbytes) | ||
| 358 | { | ||
| 359 | struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 360 | be128 buf[SERPENT_PARALLEL_BLOCKS]; | ||
| 361 | struct crypt_priv crypt_ctx = { | ||
| 362 | .ctx = &ctx->crypt_ctx, | ||
| 363 | .fpu_enabled = false, | ||
| 364 | }; | ||
| 365 | struct xts_crypt_req req = { | ||
| 366 | .tbuf = buf, | ||
| 367 | .tbuflen = sizeof(buf), | ||
| 368 | |||
| 369 | .tweak_ctx = &ctx->tweak_ctx, | ||
| 370 | .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt), | ||
| 371 | .crypt_ctx = &crypt_ctx, | ||
| 372 | .crypt_fn = decrypt_callback, | ||
| 373 | }; | ||
| 374 | int ret; | ||
| 375 | |||
| 376 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 377 | ret = xts_crypt(desc, dst, src, nbytes, &req); | ||
| 378 | serpent_fpu_end(crypt_ctx.fpu_enabled); | ||
| 379 | |||
| 380 | return ret; | ||
| 381 | } | ||
| 382 | |||
| 383 | static struct crypto_alg serpent_algs[10] = { { | ||
| 384 | .cra_name = "__ecb-serpent-avx", | ||
| 385 | .cra_driver_name = "__driver-ecb-serpent-avx", | ||
| 386 | .cra_priority = 0, | ||
| 387 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 388 | .cra_blocksize = SERPENT_BLOCK_SIZE, | ||
| 389 | .cra_ctxsize = sizeof(struct serpent_ctx), | ||
| 390 | .cra_alignmask = 0, | ||
| 391 | .cra_type = &crypto_blkcipher_type, | ||
| 392 | .cra_module = THIS_MODULE, | ||
| 393 | .cra_list = LIST_HEAD_INIT(serpent_algs[0].cra_list), | ||
| 394 | .cra_u = { | ||
| 395 | .blkcipher = { | ||
| 396 | .min_keysize = SERPENT_MIN_KEY_SIZE, | ||
| 397 | .max_keysize = SERPENT_MAX_KEY_SIZE, | ||
| 398 | .setkey = serpent_setkey, | ||
| 399 | .encrypt = ecb_encrypt, | ||
| 400 | .decrypt = ecb_decrypt, | ||
| 401 | }, | ||
| 402 | }, | ||
| 403 | }, { | ||
| 404 | .cra_name = "__cbc-serpent-avx", | ||
| 405 | .cra_driver_name = "__driver-cbc-serpent-avx", | ||
| 406 | .cra_priority = 0, | ||
| 407 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 408 | .cra_blocksize = SERPENT_BLOCK_SIZE, | ||
| 409 | .cra_ctxsize = sizeof(struct serpent_ctx), | ||
| 410 | .cra_alignmask = 0, | ||
| 411 | .cra_type = &crypto_blkcipher_type, | ||
| 412 | .cra_module = THIS_MODULE, | ||
| 413 | .cra_list = LIST_HEAD_INIT(serpent_algs[1].cra_list), | ||
| 414 | .cra_u = { | ||
| 415 | .blkcipher = { | ||
| 416 | .min_keysize = SERPENT_MIN_KEY_SIZE, | ||
| 417 | .max_keysize = SERPENT_MAX_KEY_SIZE, | ||
| 418 | .setkey = serpent_setkey, | ||
| 419 | .encrypt = cbc_encrypt, | ||
| 420 | .decrypt = cbc_decrypt, | ||
| 421 | }, | ||
| 422 | }, | ||
| 423 | }, { | ||
| 424 | .cra_name = "__ctr-serpent-avx", | ||
| 425 | .cra_driver_name = "__driver-ctr-serpent-avx", | ||
| 426 | .cra_priority = 0, | ||
| 427 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 428 | .cra_blocksize = 1, | ||
| 429 | .cra_ctxsize = sizeof(struct serpent_ctx), | ||
| 430 | .cra_alignmask = 0, | ||
| 431 | .cra_type = &crypto_blkcipher_type, | ||
| 432 | .cra_module = THIS_MODULE, | ||
| 433 | .cra_list = LIST_HEAD_INIT(serpent_algs[2].cra_list), | ||
| 434 | .cra_u = { | ||
| 435 | .blkcipher = { | ||
| 436 | .min_keysize = SERPENT_MIN_KEY_SIZE, | ||
| 437 | .max_keysize = SERPENT_MAX_KEY_SIZE, | ||
| 438 | .ivsize = SERPENT_BLOCK_SIZE, | ||
| 439 | .setkey = serpent_setkey, | ||
| 440 | .encrypt = ctr_crypt, | ||
| 441 | .decrypt = ctr_crypt, | ||
| 442 | }, | ||
| 443 | }, | ||
| 444 | }, { | ||
| 445 | .cra_name = "__lrw-serpent-avx", | ||
| 446 | .cra_driver_name = "__driver-lrw-serpent-avx", | ||
| 447 | .cra_priority = 0, | ||
| 448 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 449 | .cra_blocksize = SERPENT_BLOCK_SIZE, | ||
| 450 | .cra_ctxsize = sizeof(struct serpent_lrw_ctx), | ||
| 451 | .cra_alignmask = 0, | ||
| 452 | .cra_type = &crypto_blkcipher_type, | ||
| 453 | .cra_module = THIS_MODULE, | ||
| 454 | .cra_list = LIST_HEAD_INIT(serpent_algs[3].cra_list), | ||
| 455 | .cra_exit = lrw_exit_tfm, | ||
| 456 | .cra_u = { | ||
| 457 | .blkcipher = { | ||
| 458 | .min_keysize = SERPENT_MIN_KEY_SIZE + | ||
| 459 | SERPENT_BLOCK_SIZE, | ||
| 460 | .max_keysize = SERPENT_MAX_KEY_SIZE + | ||
| 461 | SERPENT_BLOCK_SIZE, | ||
| 462 | .ivsize = SERPENT_BLOCK_SIZE, | ||
| 463 | .setkey = lrw_serpent_setkey, | ||
| 464 | .encrypt = lrw_encrypt, | ||
| 465 | .decrypt = lrw_decrypt, | ||
| 466 | }, | ||
| 467 | }, | ||
| 468 | }, { | ||
| 469 | .cra_name = "__xts-serpent-avx", | ||
| 470 | .cra_driver_name = "__driver-xts-serpent-avx", | ||
| 471 | .cra_priority = 0, | ||
| 472 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 473 | .cra_blocksize = SERPENT_BLOCK_SIZE, | ||
| 474 | .cra_ctxsize = sizeof(struct serpent_xts_ctx), | ||
| 475 | .cra_alignmask = 0, | ||
| 476 | .cra_type = &crypto_blkcipher_type, | ||
| 477 | .cra_module = THIS_MODULE, | ||
| 478 | .cra_list = LIST_HEAD_INIT(serpent_algs[4].cra_list), | ||
| 479 | .cra_u = { | ||
| 480 | .blkcipher = { | ||
| 481 | .min_keysize = SERPENT_MIN_KEY_SIZE * 2, | ||
| 482 | .max_keysize = SERPENT_MAX_KEY_SIZE * 2, | ||
| 483 | .ivsize = SERPENT_BLOCK_SIZE, | ||
| 484 | .setkey = xts_serpent_setkey, | ||
| 485 | .encrypt = xts_encrypt, | ||
| 486 | .decrypt = xts_decrypt, | ||
| 487 | }, | ||
| 488 | }, | ||
| 489 | }, { | ||
| 490 | .cra_name = "ecb(serpent)", | ||
| 491 | .cra_driver_name = "ecb-serpent-avx", | ||
| 492 | .cra_priority = 500, | ||
| 493 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 494 | .cra_blocksize = SERPENT_BLOCK_SIZE, | ||
| 495 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 496 | .cra_alignmask = 0, | ||
| 497 | .cra_type = &crypto_ablkcipher_type, | ||
| 498 | .cra_module = THIS_MODULE, | ||
| 499 | .cra_list = LIST_HEAD_INIT(serpent_algs[5].cra_list), | ||
| 500 | .cra_init = ablk_init, | ||
| 501 | .cra_exit = ablk_exit, | ||
| 502 | .cra_u = { | ||
| 503 | .ablkcipher = { | ||
| 504 | .min_keysize = SERPENT_MIN_KEY_SIZE, | ||
| 505 | .max_keysize = SERPENT_MAX_KEY_SIZE, | ||
| 506 | .setkey = ablk_set_key, | ||
| 507 | .encrypt = ablk_encrypt, | ||
| 508 | .decrypt = ablk_decrypt, | ||
| 509 | }, | ||
| 510 | }, | ||
| 511 | }, { | ||
| 512 | .cra_name = "cbc(serpent)", | ||
| 513 | .cra_driver_name = "cbc-serpent-avx", | ||
| 514 | .cra_priority = 500, | ||
| 515 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 516 | .cra_blocksize = SERPENT_BLOCK_SIZE, | ||
| 517 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 518 | .cra_alignmask = 0, | ||
| 519 | .cra_type = &crypto_ablkcipher_type, | ||
| 520 | .cra_module = THIS_MODULE, | ||
| 521 | .cra_list = LIST_HEAD_INIT(serpent_algs[6].cra_list), | ||
| 522 | .cra_init = ablk_init, | ||
| 523 | .cra_exit = ablk_exit, | ||
| 524 | .cra_u = { | ||
| 525 | .ablkcipher = { | ||
| 526 | .min_keysize = SERPENT_MIN_KEY_SIZE, | ||
| 527 | .max_keysize = SERPENT_MAX_KEY_SIZE, | ||
| 528 | .ivsize = SERPENT_BLOCK_SIZE, | ||
| 529 | .setkey = ablk_set_key, | ||
| 530 | .encrypt = __ablk_encrypt, | ||
| 531 | .decrypt = ablk_decrypt, | ||
| 532 | }, | ||
| 533 | }, | ||
| 534 | }, { | ||
| 535 | .cra_name = "ctr(serpent)", | ||
| 536 | .cra_driver_name = "ctr-serpent-avx", | ||
| 537 | .cra_priority = 500, | ||
| 538 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 539 | .cra_blocksize = 1, | ||
| 540 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 541 | .cra_alignmask = 0, | ||
| 542 | .cra_type = &crypto_ablkcipher_type, | ||
| 543 | .cra_module = THIS_MODULE, | ||
| 544 | .cra_list = LIST_HEAD_INIT(serpent_algs[7].cra_list), | ||
| 545 | .cra_init = ablk_init, | ||
| 546 | .cra_exit = ablk_exit, | ||
| 547 | .cra_u = { | ||
| 548 | .ablkcipher = { | ||
| 549 | .min_keysize = SERPENT_MIN_KEY_SIZE, | ||
| 550 | .max_keysize = SERPENT_MAX_KEY_SIZE, | ||
| 551 | .ivsize = SERPENT_BLOCK_SIZE, | ||
| 552 | .setkey = ablk_set_key, | ||
| 553 | .encrypt = ablk_encrypt, | ||
| 554 | .decrypt = ablk_encrypt, | ||
| 555 | .geniv = "chainiv", | ||
| 556 | }, | ||
| 557 | }, | ||
| 558 | }, { | ||
| 559 | .cra_name = "lrw(serpent)", | ||
| 560 | .cra_driver_name = "lrw-serpent-avx", | ||
| 561 | .cra_priority = 500, | ||
| 562 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 563 | .cra_blocksize = SERPENT_BLOCK_SIZE, | ||
| 564 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 565 | .cra_alignmask = 0, | ||
| 566 | .cra_type = &crypto_ablkcipher_type, | ||
| 567 | .cra_module = THIS_MODULE, | ||
| 568 | .cra_list = LIST_HEAD_INIT(serpent_algs[8].cra_list), | ||
| 569 | .cra_init = ablk_init, | ||
| 570 | .cra_exit = ablk_exit, | ||
| 571 | .cra_u = { | ||
| 572 | .ablkcipher = { | ||
| 573 | .min_keysize = SERPENT_MIN_KEY_SIZE + | ||
| 574 | SERPENT_BLOCK_SIZE, | ||
| 575 | .max_keysize = SERPENT_MAX_KEY_SIZE + | ||
| 576 | SERPENT_BLOCK_SIZE, | ||
| 577 | .ivsize = SERPENT_BLOCK_SIZE, | ||
| 578 | .setkey = ablk_set_key, | ||
| 579 | .encrypt = ablk_encrypt, | ||
| 580 | .decrypt = ablk_decrypt, | ||
| 581 | }, | ||
| 582 | }, | ||
| 583 | }, { | ||
| 584 | .cra_name = "xts(serpent)", | ||
| 585 | .cra_driver_name = "xts-serpent-avx", | ||
| 586 | .cra_priority = 500, | ||
| 587 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 588 | .cra_blocksize = SERPENT_BLOCK_SIZE, | ||
| 589 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 590 | .cra_alignmask = 0, | ||
| 591 | .cra_type = &crypto_ablkcipher_type, | ||
| 592 | .cra_module = THIS_MODULE, | ||
| 593 | .cra_list = LIST_HEAD_INIT(serpent_algs[9].cra_list), | ||
| 594 | .cra_init = ablk_init, | ||
| 595 | .cra_exit = ablk_exit, | ||
| 596 | .cra_u = { | ||
| 597 | .ablkcipher = { | ||
| 598 | .min_keysize = SERPENT_MIN_KEY_SIZE * 2, | ||
| 599 | .max_keysize = SERPENT_MAX_KEY_SIZE * 2, | ||
| 600 | .ivsize = SERPENT_BLOCK_SIZE, | ||
| 601 | .setkey = ablk_set_key, | ||
| 602 | .encrypt = ablk_encrypt, | ||
| 603 | .decrypt = ablk_decrypt, | ||
| 604 | }, | ||
| 605 | }, | ||
| 606 | } }; | ||
| 607 | |||
| 608 | static int __init serpent_init(void) | ||
| 609 | { | ||
| 610 | u64 xcr0; | ||
| 611 | |||
| 612 | if (!cpu_has_avx || !cpu_has_osxsave) { | ||
| 613 | printk(KERN_INFO "AVX instructions are not detected.\n"); | ||
| 614 | return -ENODEV; | ||
| 615 | } | ||
| 616 | |||
| 617 | xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); | ||
| 618 | if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { | ||
| 619 | printk(KERN_INFO "AVX detected but unusable.\n"); | ||
| 620 | return -ENODEV; | ||
| 621 | } | ||
| 622 | |||
| 623 | return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs)); | ||
| 624 | } | ||
| 625 | |||
| 626 | static void __exit serpent_exit(void) | ||
| 627 | { | ||
| 628 | crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs)); | ||
| 629 | } | ||
| 630 | |||
| 631 | module_init(serpent_init); | ||
| 632 | module_exit(serpent_exit); | ||
| 633 | |||
| 634 | MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized"); | ||
| 635 | MODULE_LICENSE("GPL"); | ||
| 636 | MODULE_ALIAS("serpent"); | ||
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 4b21be85e0a1..d679c8675f4a 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c | |||
| @@ -41,358 +41,145 @@ | |||
| 41 | #include <crypto/ctr.h> | 41 | #include <crypto/ctr.h> |
| 42 | #include <crypto/lrw.h> | 42 | #include <crypto/lrw.h> |
| 43 | #include <crypto/xts.h> | 43 | #include <crypto/xts.h> |
| 44 | #include <asm/i387.h> | 44 | #include <asm/crypto/serpent-sse2.h> |
| 45 | #include <asm/serpent.h> | 45 | #include <asm/crypto/ablk_helper.h> |
| 46 | #include <crypto/scatterwalk.h> | 46 | #include <asm/crypto/glue_helper.h> |
| 47 | #include <linux/workqueue.h> | ||
| 48 | #include <linux/spinlock.h> | ||
| 49 | |||
| 50 | struct async_serpent_ctx { | ||
| 51 | struct cryptd_ablkcipher *cryptd_tfm; | ||
| 52 | }; | ||
| 53 | 47 | ||
| 54 | static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes) | 48 | static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) |
| 55 | { | ||
| 56 | if (fpu_enabled) | ||
| 57 | return true; | ||
| 58 | |||
| 59 | /* SSE2 is only used when chunk to be processed is large enough, so | ||
| 60 | * do not enable FPU until it is necessary. | ||
| 61 | */ | ||
| 62 | if (nbytes < SERPENT_BLOCK_SIZE * SERPENT_PARALLEL_BLOCKS) | ||
| 63 | return false; | ||
| 64 | |||
| 65 | kernel_fpu_begin(); | ||
| 66 | return true; | ||
| 67 | } | ||
| 68 | |||
| 69 | static inline void serpent_fpu_end(bool fpu_enabled) | ||
| 70 | { | 49 | { |
| 71 | if (fpu_enabled) | 50 | u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; |
| 72 | kernel_fpu_end(); | 51 | unsigned int j; |
| 73 | } | ||
| 74 | |||
| 75 | static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, | ||
| 76 | bool enc) | ||
| 77 | { | ||
| 78 | bool fpu_enabled = false; | ||
| 79 | struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 80 | const unsigned int bsize = SERPENT_BLOCK_SIZE; | ||
| 81 | unsigned int nbytes; | ||
| 82 | int err; | ||
| 83 | |||
| 84 | err = blkcipher_walk_virt(desc, walk); | ||
| 85 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 86 | |||
| 87 | while ((nbytes = walk->nbytes)) { | ||
| 88 | u8 *wsrc = walk->src.virt.addr; | ||
| 89 | u8 *wdst = walk->dst.virt.addr; | ||
| 90 | |||
| 91 | fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes); | ||
| 92 | |||
| 93 | /* Process multi-block batch */ | ||
| 94 | if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) { | ||
| 95 | do { | ||
| 96 | if (enc) | ||
| 97 | serpent_enc_blk_xway(ctx, wdst, wsrc); | ||
| 98 | else | ||
| 99 | serpent_dec_blk_xway(ctx, wdst, wsrc); | ||
| 100 | |||
| 101 | wsrc += bsize * SERPENT_PARALLEL_BLOCKS; | ||
| 102 | wdst += bsize * SERPENT_PARALLEL_BLOCKS; | ||
| 103 | nbytes -= bsize * SERPENT_PARALLEL_BLOCKS; | ||
| 104 | } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS); | ||
| 105 | |||
| 106 | if (nbytes < bsize) | ||
| 107 | goto done; | ||
| 108 | } | ||
| 109 | |||
| 110 | /* Handle leftovers */ | ||
| 111 | do { | ||
| 112 | if (enc) | ||
| 113 | __serpent_encrypt(ctx, wdst, wsrc); | ||
| 114 | else | ||
| 115 | __serpent_decrypt(ctx, wdst, wsrc); | ||
| 116 | |||
| 117 | wsrc += bsize; | ||
| 118 | wdst += bsize; | ||
| 119 | nbytes -= bsize; | ||
| 120 | } while (nbytes >= bsize); | ||
| 121 | |||
| 122 | done: | ||
| 123 | err = blkcipher_walk_done(desc, walk, nbytes); | ||
| 124 | } | ||
| 125 | 52 | ||
| 126 | serpent_fpu_end(fpu_enabled); | 53 | for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) |
| 127 | return err; | 54 | ivs[j] = src[j]; |
| 128 | } | ||
| 129 | 55 | ||
| 130 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 56 | serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); |
| 131 | struct scatterlist *src, unsigned int nbytes) | ||
| 132 | { | ||
| 133 | struct blkcipher_walk walk; | ||
| 134 | 57 | ||
| 135 | blkcipher_walk_init(&walk, dst, src, nbytes); | 58 | for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) |
| 136 | return ecb_crypt(desc, &walk, true); | 59 | u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); |
| 137 | } | 60 | } |
| 138 | 61 | ||
| 139 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 62 | static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv) |
| 140 | struct scatterlist *src, unsigned int nbytes) | ||
| 141 | { | 63 | { |
| 142 | struct blkcipher_walk walk; | 64 | be128 ctrblk; |
| 143 | 65 | ||
| 144 | blkcipher_walk_init(&walk, dst, src, nbytes); | 66 | u128_to_be128(&ctrblk, iv); |
| 145 | return ecb_crypt(desc, &walk, false); | 67 | u128_inc(iv); |
| 146 | } | ||
| 147 | 68 | ||
| 148 | static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, | 69 | __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); |
| 149 | struct blkcipher_walk *walk) | 70 | u128_xor(dst, src, (u128 *)&ctrblk); |
| 150 | { | ||
| 151 | struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 152 | const unsigned int bsize = SERPENT_BLOCK_SIZE; | ||
| 153 | unsigned int nbytes = walk->nbytes; | ||
| 154 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 155 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 156 | u128 *iv = (u128 *)walk->iv; | ||
| 157 | |||
| 158 | do { | ||
| 159 | u128_xor(dst, src, iv); | ||
| 160 | __serpent_encrypt(ctx, (u8 *)dst, (u8 *)dst); | ||
| 161 | iv = dst; | ||
| 162 | |||
| 163 | src += 1; | ||
| 164 | dst += 1; | ||
| 165 | nbytes -= bsize; | ||
| 166 | } while (nbytes >= bsize); | ||
| 167 | |||
| 168 | u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv); | ||
| 169 | return nbytes; | ||
| 170 | } | 71 | } |
| 171 | 72 | ||
| 172 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 73 | static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src, |
| 173 | struct scatterlist *src, unsigned int nbytes) | 74 | u128 *iv) |
| 174 | { | 75 | { |
| 175 | struct blkcipher_walk walk; | 76 | be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; |
| 176 | int err; | 77 | unsigned int i; |
| 177 | 78 | ||
| 178 | blkcipher_walk_init(&walk, dst, src, nbytes); | 79 | for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { |
| 179 | err = blkcipher_walk_virt(desc, &walk); | 80 | if (dst != src) |
| 81 | dst[i] = src[i]; | ||
| 180 | 82 | ||
| 181 | while ((nbytes = walk.nbytes)) { | 83 | u128_to_be128(&ctrblks[i], iv); |
| 182 | nbytes = __cbc_encrypt(desc, &walk); | 84 | u128_inc(iv); |
| 183 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 184 | } | 85 | } |
| 185 | 86 | ||
| 186 | return err; | 87 | serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks); |
| 187 | } | 88 | } |
| 188 | 89 | ||
| 189 | static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, | 90 | static const struct common_glue_ctx serpent_enc = { |
| 190 | struct blkcipher_walk *walk) | 91 | .num_funcs = 2, |
| 191 | { | 92 | .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, |
| 192 | struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 193 | const unsigned int bsize = SERPENT_BLOCK_SIZE; | ||
| 194 | unsigned int nbytes = walk->nbytes; | ||
| 195 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 196 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 197 | u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; | ||
| 198 | u128 last_iv; | ||
| 199 | int i; | ||
| 200 | |||
| 201 | /* Start of the last block. */ | ||
| 202 | src += nbytes / bsize - 1; | ||
| 203 | dst += nbytes / bsize - 1; | ||
| 204 | |||
| 205 | last_iv = *src; | ||
| 206 | |||
| 207 | /* Process multi-block batch */ | ||
| 208 | if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) { | ||
| 209 | do { | ||
| 210 | nbytes -= bsize * (SERPENT_PARALLEL_BLOCKS - 1); | ||
| 211 | src -= SERPENT_PARALLEL_BLOCKS - 1; | ||
| 212 | dst -= SERPENT_PARALLEL_BLOCKS - 1; | ||
| 213 | |||
| 214 | for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++) | ||
| 215 | ivs[i] = src[i]; | ||
| 216 | |||
| 217 | serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); | ||
| 218 | |||
| 219 | for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++) | ||
| 220 | u128_xor(dst + (i + 1), dst + (i + 1), ivs + i); | ||
| 221 | |||
| 222 | nbytes -= bsize; | ||
| 223 | if (nbytes < bsize) | ||
| 224 | goto done; | ||
| 225 | 93 | ||
| 226 | u128_xor(dst, dst, src - 1); | 94 | .funcs = { { |
| 227 | src -= 1; | 95 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
| 228 | dst -= 1; | 96 | .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) } |
| 229 | } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS); | 97 | }, { |
| 230 | 98 | .num_blocks = 1, | |
| 231 | if (nbytes < bsize) | 99 | .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } |
| 232 | goto done; | 100 | } } |
| 233 | } | 101 | }; |
| 234 | |||
| 235 | /* Handle leftovers */ | ||
| 236 | for (;;) { | ||
| 237 | __serpent_decrypt(ctx, (u8 *)dst, (u8 *)src); | ||
| 238 | |||
| 239 | nbytes -= bsize; | ||
| 240 | if (nbytes < bsize) | ||
| 241 | break; | ||
| 242 | 102 | ||
| 243 | u128_xor(dst, dst, src - 1); | 103 | static const struct common_glue_ctx serpent_ctr = { |
| 244 | src -= 1; | 104 | .num_funcs = 2, |
| 245 | dst -= 1; | 105 | .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, |
| 246 | } | 106 | |
| 107 | .funcs = { { | ||
| 108 | .num_blocks = SERPENT_PARALLEL_BLOCKS, | ||
| 109 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) } | ||
| 110 | }, { | ||
| 111 | .num_blocks = 1, | ||
| 112 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) } | ||
| 113 | } } | ||
| 114 | }; | ||
| 247 | 115 | ||
| 248 | done: | 116 | static const struct common_glue_ctx serpent_dec = { |
| 249 | u128_xor(dst, dst, (u128 *)walk->iv); | 117 | .num_funcs = 2, |
| 250 | *(u128 *)walk->iv = last_iv; | 118 | .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, |
| 119 | |||
| 120 | .funcs = { { | ||
| 121 | .num_blocks = SERPENT_PARALLEL_BLOCKS, | ||
| 122 | .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) } | ||
| 123 | }, { | ||
| 124 | .num_blocks = 1, | ||
| 125 | .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } | ||
| 126 | } } | ||
| 127 | }; | ||
| 251 | 128 | ||
| 252 | return nbytes; | 129 | static const struct common_glue_ctx serpent_dec_cbc = { |
| 253 | } | 130 | .num_funcs = 2, |
| 131 | .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, | ||
| 132 | |||
| 133 | .funcs = { { | ||
| 134 | .num_blocks = SERPENT_PARALLEL_BLOCKS, | ||
| 135 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) } | ||
| 136 | }, { | ||
| 137 | .num_blocks = 1, | ||
| 138 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } | ||
| 139 | } } | ||
| 140 | }; | ||
| 254 | 141 | ||
| 255 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 142 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 256 | struct scatterlist *src, unsigned int nbytes) | 143 | struct scatterlist *src, unsigned int nbytes) |
| 257 | { | 144 | { |
| 258 | bool fpu_enabled = false; | 145 | return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes); |
| 259 | struct blkcipher_walk walk; | ||
| 260 | int err; | ||
| 261 | |||
| 262 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 263 | err = blkcipher_walk_virt(desc, &walk); | ||
| 264 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 265 | |||
| 266 | while ((nbytes = walk.nbytes)) { | ||
| 267 | fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes); | ||
| 268 | nbytes = __cbc_decrypt(desc, &walk); | ||
| 269 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 270 | } | ||
| 271 | |||
| 272 | serpent_fpu_end(fpu_enabled); | ||
| 273 | return err; | ||
| 274 | } | 146 | } |
| 275 | 147 | ||
| 276 | static inline void u128_to_be128(be128 *dst, const u128 *src) | 148 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 149 | struct scatterlist *src, unsigned int nbytes) | ||
| 277 | { | 150 | { |
| 278 | dst->a = cpu_to_be64(src->a); | 151 | return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes); |
| 279 | dst->b = cpu_to_be64(src->b); | ||
| 280 | } | 152 | } |
| 281 | 153 | ||
| 282 | static inline void be128_to_u128(u128 *dst, const be128 *src) | 154 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 155 | struct scatterlist *src, unsigned int nbytes) | ||
| 283 | { | 156 | { |
| 284 | dst->a = be64_to_cpu(src->a); | 157 | return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc, |
| 285 | dst->b = be64_to_cpu(src->b); | 158 | dst, src, nbytes); |
| 286 | } | 159 | } |
| 287 | 160 | ||
| 288 | static inline void u128_inc(u128 *i) | 161 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 162 | struct scatterlist *src, unsigned int nbytes) | ||
| 289 | { | 163 | { |
| 290 | i->b++; | 164 | return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src, |
| 291 | if (!i->b) | 165 | nbytes); |
| 292 | i->a++; | ||
| 293 | } | 166 | } |
| 294 | 167 | ||
| 295 | static void ctr_crypt_final(struct blkcipher_desc *desc, | 168 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 296 | struct blkcipher_walk *walk) | 169 | struct scatterlist *src, unsigned int nbytes) |
| 297 | { | 170 | { |
| 298 | struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 171 | return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes); |
| 299 | u8 *ctrblk = walk->iv; | ||
| 300 | u8 keystream[SERPENT_BLOCK_SIZE]; | ||
| 301 | u8 *src = walk->src.virt.addr; | ||
| 302 | u8 *dst = walk->dst.virt.addr; | ||
| 303 | unsigned int nbytes = walk->nbytes; | ||
| 304 | |||
| 305 | __serpent_encrypt(ctx, keystream, ctrblk); | ||
| 306 | crypto_xor(keystream, src, nbytes); | ||
| 307 | memcpy(dst, keystream, nbytes); | ||
| 308 | |||
| 309 | crypto_inc(ctrblk, SERPENT_BLOCK_SIZE); | ||
| 310 | } | 172 | } |
| 311 | 173 | ||
| 312 | static unsigned int __ctr_crypt(struct blkcipher_desc *desc, | 174 | static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes) |
| 313 | struct blkcipher_walk *walk) | ||
| 314 | { | 175 | { |
| 315 | struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 176 | return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS, |
| 316 | const unsigned int bsize = SERPENT_BLOCK_SIZE; | 177 | NULL, fpu_enabled, nbytes); |
| 317 | unsigned int nbytes = walk->nbytes; | ||
| 318 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 319 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 320 | u128 ctrblk; | ||
| 321 | be128 ctrblocks[SERPENT_PARALLEL_BLOCKS]; | ||
| 322 | int i; | ||
| 323 | |||
| 324 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
| 325 | |||
| 326 | /* Process multi-block batch */ | ||
| 327 | if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) { | ||
| 328 | do { | ||
| 329 | /* create ctrblks for parallel encrypt */ | ||
| 330 | for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { | ||
| 331 | if (dst != src) | ||
| 332 | dst[i] = src[i]; | ||
| 333 | |||
| 334 | u128_to_be128(&ctrblocks[i], &ctrblk); | ||
| 335 | u128_inc(&ctrblk); | ||
| 336 | } | ||
| 337 | |||
| 338 | serpent_enc_blk_xway_xor(ctx, (u8 *)dst, | ||
| 339 | (u8 *)ctrblocks); | ||
| 340 | |||
| 341 | src += SERPENT_PARALLEL_BLOCKS; | ||
| 342 | dst += SERPENT_PARALLEL_BLOCKS; | ||
| 343 | nbytes -= bsize * SERPENT_PARALLEL_BLOCKS; | ||
| 344 | } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS); | ||
| 345 | |||
| 346 | if (nbytes < bsize) | ||
| 347 | goto done; | ||
| 348 | } | ||
| 349 | |||
| 350 | /* Handle leftovers */ | ||
| 351 | do { | ||
| 352 | if (dst != src) | ||
| 353 | *dst = *src; | ||
| 354 | |||
| 355 | u128_to_be128(&ctrblocks[0], &ctrblk); | ||
| 356 | u128_inc(&ctrblk); | ||
| 357 | |||
| 358 | __serpent_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); | ||
| 359 | u128_xor(dst, dst, (u128 *)ctrblocks); | ||
| 360 | |||
| 361 | src += 1; | ||
| 362 | dst += 1; | ||
| 363 | nbytes -= bsize; | ||
| 364 | } while (nbytes >= bsize); | ||
| 365 | |||
| 366 | done: | ||
| 367 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
| 368 | return nbytes; | ||
| 369 | } | 178 | } |
| 370 | 179 | ||
| 371 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 180 | static inline void serpent_fpu_end(bool fpu_enabled) |
| 372 | struct scatterlist *src, unsigned int nbytes) | ||
| 373 | { | 181 | { |
| 374 | bool fpu_enabled = false; | 182 | glue_fpu_end(fpu_enabled); |
| 375 | struct blkcipher_walk walk; | ||
| 376 | int err; | ||
| 377 | |||
| 378 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 379 | err = blkcipher_walk_virt_block(desc, &walk, SERPENT_BLOCK_SIZE); | ||
| 380 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 381 | |||
| 382 | while ((nbytes = walk.nbytes) >= SERPENT_BLOCK_SIZE) { | ||
| 383 | fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes); | ||
| 384 | nbytes = __ctr_crypt(desc, &walk); | ||
| 385 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 386 | } | ||
| 387 | |||
| 388 | serpent_fpu_end(fpu_enabled); | ||
| 389 | |||
| 390 | if (walk.nbytes) { | ||
| 391 | ctr_crypt_final(desc, &walk); | ||
| 392 | err = blkcipher_walk_done(desc, &walk, 0); | ||
| 393 | } | ||
| 394 | |||
| 395 | return err; | ||
| 396 | } | 183 | } |
| 397 | 184 | ||
| 398 | struct crypt_priv { | 185 | struct crypt_priv { |
| @@ -596,106 +383,6 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
| 596 | return ret; | 383 | return ret; |
| 597 | } | 384 | } |
| 598 | 385 | ||
| 599 | static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 600 | unsigned int key_len) | ||
| 601 | { | ||
| 602 | struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 603 | struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; | ||
| 604 | int err; | ||
| 605 | |||
| 606 | crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
| 607 | crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) | ||
| 608 | & CRYPTO_TFM_REQ_MASK); | ||
| 609 | err = crypto_ablkcipher_setkey(child, key, key_len); | ||
| 610 | crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) | ||
| 611 | & CRYPTO_TFM_RES_MASK); | ||
| 612 | return err; | ||
| 613 | } | ||
| 614 | |||
| 615 | static int __ablk_encrypt(struct ablkcipher_request *req) | ||
| 616 | { | ||
| 617 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
| 618 | struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 619 | struct blkcipher_desc desc; | ||
| 620 | |||
| 621 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | ||
| 622 | desc.info = req->info; | ||
| 623 | desc.flags = 0; | ||
| 624 | |||
| 625 | return crypto_blkcipher_crt(desc.tfm)->encrypt( | ||
| 626 | &desc, req->dst, req->src, req->nbytes); | ||
| 627 | } | ||
| 628 | |||
| 629 | static int ablk_encrypt(struct ablkcipher_request *req) | ||
| 630 | { | ||
| 631 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
| 632 | struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 633 | |||
| 634 | if (!irq_fpu_usable()) { | ||
| 635 | struct ablkcipher_request *cryptd_req = | ||
| 636 | ablkcipher_request_ctx(req); | ||
| 637 | |||
| 638 | memcpy(cryptd_req, req, sizeof(*req)); | ||
| 639 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | ||
| 640 | |||
| 641 | return crypto_ablkcipher_encrypt(cryptd_req); | ||
| 642 | } else { | ||
| 643 | return __ablk_encrypt(req); | ||
| 644 | } | ||
| 645 | } | ||
| 646 | |||
| 647 | static int ablk_decrypt(struct ablkcipher_request *req) | ||
| 648 | { | ||
| 649 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
| 650 | struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 651 | |||
| 652 | if (!irq_fpu_usable()) { | ||
| 653 | struct ablkcipher_request *cryptd_req = | ||
| 654 | ablkcipher_request_ctx(req); | ||
| 655 | |||
| 656 | memcpy(cryptd_req, req, sizeof(*req)); | ||
| 657 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | ||
| 658 | |||
| 659 | return crypto_ablkcipher_decrypt(cryptd_req); | ||
| 660 | } else { | ||
| 661 | struct blkcipher_desc desc; | ||
| 662 | |||
| 663 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | ||
| 664 | desc.info = req->info; | ||
| 665 | desc.flags = 0; | ||
| 666 | |||
| 667 | return crypto_blkcipher_crt(desc.tfm)->decrypt( | ||
| 668 | &desc, req->dst, req->src, req->nbytes); | ||
| 669 | } | ||
| 670 | } | ||
| 671 | |||
| 672 | static void ablk_exit(struct crypto_tfm *tfm) | ||
| 673 | { | ||
| 674 | struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 675 | |||
| 676 | cryptd_free_ablkcipher(ctx->cryptd_tfm); | ||
| 677 | } | ||
| 678 | |||
| 679 | static int ablk_init(struct crypto_tfm *tfm) | ||
| 680 | { | ||
| 681 | struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 682 | struct cryptd_ablkcipher *cryptd_tfm; | ||
| 683 | char drv_name[CRYPTO_MAX_ALG_NAME]; | ||
| 684 | |||
| 685 | snprintf(drv_name, sizeof(drv_name), "__driver-%s", | ||
| 686 | crypto_tfm_alg_driver_name(tfm)); | ||
| 687 | |||
| 688 | cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); | ||
| 689 | if (IS_ERR(cryptd_tfm)) | ||
| 690 | return PTR_ERR(cryptd_tfm); | ||
| 691 | |||
| 692 | ctx->cryptd_tfm = cryptd_tfm; | ||
| 693 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + | ||
| 694 | crypto_ablkcipher_reqsize(&cryptd_tfm->base); | ||
| 695 | |||
| 696 | return 0; | ||
| 697 | } | ||
| 698 | |||
| 699 | static struct crypto_alg serpent_algs[10] = { { | 386 | static struct crypto_alg serpent_algs[10] = { { |
| 700 | .cra_name = "__ecb-serpent-sse2", | 387 | .cra_name = "__ecb-serpent-sse2", |
| 701 | .cra_driver_name = "__driver-ecb-serpent-sse2", | 388 | .cra_driver_name = "__driver-ecb-serpent-sse2", |
| @@ -808,7 +495,7 @@ static struct crypto_alg serpent_algs[10] = { { | |||
| 808 | .cra_priority = 400, | 495 | .cra_priority = 400, |
| 809 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 496 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 810 | .cra_blocksize = SERPENT_BLOCK_SIZE, | 497 | .cra_blocksize = SERPENT_BLOCK_SIZE, |
| 811 | .cra_ctxsize = sizeof(struct async_serpent_ctx), | 498 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 812 | .cra_alignmask = 0, | 499 | .cra_alignmask = 0, |
| 813 | .cra_type = &crypto_ablkcipher_type, | 500 | .cra_type = &crypto_ablkcipher_type, |
| 814 | .cra_module = THIS_MODULE, | 501 | .cra_module = THIS_MODULE, |
| @@ -830,7 +517,7 @@ static struct crypto_alg serpent_algs[10] = { { | |||
| 830 | .cra_priority = 400, | 517 | .cra_priority = 400, |
| 831 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 518 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 832 | .cra_blocksize = SERPENT_BLOCK_SIZE, | 519 | .cra_blocksize = SERPENT_BLOCK_SIZE, |
| 833 | .cra_ctxsize = sizeof(struct async_serpent_ctx), | 520 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 834 | .cra_alignmask = 0, | 521 | .cra_alignmask = 0, |
| 835 | .cra_type = &crypto_ablkcipher_type, | 522 | .cra_type = &crypto_ablkcipher_type, |
| 836 | .cra_module = THIS_MODULE, | 523 | .cra_module = THIS_MODULE, |
| @@ -853,7 +540,7 @@ static struct crypto_alg serpent_algs[10] = { { | |||
| 853 | .cra_priority = 400, | 540 | .cra_priority = 400, |
| 854 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 541 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 855 | .cra_blocksize = 1, | 542 | .cra_blocksize = 1, |
| 856 | .cra_ctxsize = sizeof(struct async_serpent_ctx), | 543 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 857 | .cra_alignmask = 0, | 544 | .cra_alignmask = 0, |
| 858 | .cra_type = &crypto_ablkcipher_type, | 545 | .cra_type = &crypto_ablkcipher_type, |
| 859 | .cra_module = THIS_MODULE, | 546 | .cra_module = THIS_MODULE, |
| @@ -877,7 +564,7 @@ static struct crypto_alg serpent_algs[10] = { { | |||
| 877 | .cra_priority = 400, | 564 | .cra_priority = 400, |
| 878 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 565 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 879 | .cra_blocksize = SERPENT_BLOCK_SIZE, | 566 | .cra_blocksize = SERPENT_BLOCK_SIZE, |
| 880 | .cra_ctxsize = sizeof(struct async_serpent_ctx), | 567 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 881 | .cra_alignmask = 0, | 568 | .cra_alignmask = 0, |
| 882 | .cra_type = &crypto_ablkcipher_type, | 569 | .cra_type = &crypto_ablkcipher_type, |
| 883 | .cra_module = THIS_MODULE, | 570 | .cra_module = THIS_MODULE, |
| @@ -902,7 +589,7 @@ static struct crypto_alg serpent_algs[10] = { { | |||
| 902 | .cra_priority = 400, | 589 | .cra_priority = 400, |
| 903 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 590 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 904 | .cra_blocksize = SERPENT_BLOCK_SIZE, | 591 | .cra_blocksize = SERPENT_BLOCK_SIZE, |
| 905 | .cra_ctxsize = sizeof(struct async_serpent_ctx), | 592 | .cra_ctxsize = sizeof(struct async_helper_ctx), |
| 906 | .cra_alignmask = 0, | 593 | .cra_alignmask = 0, |
| 907 | .cra_type = &crypto_ablkcipher_type, | 594 | .cra_type = &crypto_ablkcipher_type, |
| 908 | .cra_module = THIS_MODULE, | 595 | .cra_module = THIS_MODULE, |
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index b2c2f57d70e8..49d6987a73d9 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S | |||
| @@ -468,7 +468,7 @@ W_PRECALC_SSSE3 | |||
| 468 | */ | 468 | */ |
| 469 | SHA1_VECTOR_ASM sha1_transform_ssse3 | 469 | SHA1_VECTOR_ASM sha1_transform_ssse3 |
| 470 | 470 | ||
| 471 | #ifdef SHA1_ENABLE_AVX_SUPPORT | 471 | #ifdef CONFIG_AS_AVX |
| 472 | 472 | ||
| 473 | .macro W_PRECALC_AVX | 473 | .macro W_PRECALC_AVX |
| 474 | 474 | ||
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index f916499d0abe..4a11a9d72451 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | 35 | ||
| 36 | asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, | 36 | asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, |
| 37 | unsigned int rounds); | 37 | unsigned int rounds); |
| 38 | #ifdef SHA1_ENABLE_AVX_SUPPORT | 38 | #ifdef CONFIG_AS_AVX |
| 39 | asmlinkage void sha1_transform_avx(u32 *digest, const char *data, | 39 | asmlinkage void sha1_transform_avx(u32 *digest, const char *data, |
| 40 | unsigned int rounds); | 40 | unsigned int rounds); |
| 41 | #endif | 41 | #endif |
| @@ -184,7 +184,7 @@ static struct shash_alg alg = { | |||
| 184 | } | 184 | } |
| 185 | }; | 185 | }; |
| 186 | 186 | ||
| 187 | #ifdef SHA1_ENABLE_AVX_SUPPORT | 187 | #ifdef CONFIG_AS_AVX |
| 188 | static bool __init avx_usable(void) | 188 | static bool __init avx_usable(void) |
| 189 | { | 189 | { |
| 190 | u64 xcr0; | 190 | u64 xcr0; |
| @@ -209,7 +209,7 @@ static int __init sha1_ssse3_mod_init(void) | |||
| 209 | if (cpu_has_ssse3) | 209 | if (cpu_has_ssse3) |
| 210 | sha1_transform_asm = sha1_transform_ssse3; | 210 | sha1_transform_asm = sha1_transform_ssse3; |
| 211 | 211 | ||
| 212 | #ifdef SHA1_ENABLE_AVX_SUPPORT | 212 | #ifdef CONFIG_AS_AVX |
| 213 | /* allow AVX to override SSSE3, it's a little faster */ | 213 | /* allow AVX to override SSSE3, it's a little faster */ |
| 214 | if (avx_usable()) | 214 | if (avx_usable()) |
| 215 | sha1_transform_asm = sha1_transform_avx; | 215 | sha1_transform_asm = sha1_transform_avx; |
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S new file mode 100644 index 000000000000..35f45574390d --- /dev/null +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S | |||
| @@ -0,0 +1,300 @@ | |||
| 1 | /* | ||
| 2 | * Twofish Cipher 8-way parallel algorithm (AVX/x86_64) | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Johannes Goetzfried | ||
| 5 | * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
| 20 | * USA | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | .file "twofish-avx-x86_64-asm_64.S" | ||
| 25 | .text | ||
| 26 | |||
| 27 | /* structure of crypto context */ | ||
| 28 | #define s0 0 | ||
| 29 | #define s1 1024 | ||
| 30 | #define s2 2048 | ||
| 31 | #define s3 3072 | ||
| 32 | #define w 4096 | ||
| 33 | #define k 4128 | ||
| 34 | |||
| 35 | /********************************************************************** | ||
| 36 | 8-way AVX twofish | ||
| 37 | **********************************************************************/ | ||
| 38 | #define CTX %rdi | ||
| 39 | |||
| 40 | #define RA1 %xmm0 | ||
| 41 | #define RB1 %xmm1 | ||
| 42 | #define RC1 %xmm2 | ||
| 43 | #define RD1 %xmm3 | ||
| 44 | |||
| 45 | #define RA2 %xmm4 | ||
| 46 | #define RB2 %xmm5 | ||
| 47 | #define RC2 %xmm6 | ||
| 48 | #define RD2 %xmm7 | ||
| 49 | |||
| 50 | #define RX %xmm8 | ||
| 51 | #define RY %xmm9 | ||
| 52 | |||
| 53 | #define RK1 %xmm10 | ||
| 54 | #define RK2 %xmm11 | ||
| 55 | |||
| 56 | #define RID1 %rax | ||
| 57 | #define RID1b %al | ||
| 58 | #define RID2 %rbx | ||
| 59 | #define RID2b %bl | ||
| 60 | |||
| 61 | #define RGI1 %rdx | ||
| 62 | #define RGI1bl %dl | ||
| 63 | #define RGI1bh %dh | ||
| 64 | #define RGI2 %rcx | ||
| 65 | #define RGI2bl %cl | ||
| 66 | #define RGI2bh %ch | ||
| 67 | |||
| 68 | #define RGS1 %r8 | ||
| 69 | #define RGS1d %r8d | ||
| 70 | #define RGS2 %r9 | ||
| 71 | #define RGS2d %r9d | ||
| 72 | #define RGS3 %r10 | ||
| 73 | #define RGS3d %r10d | ||
| 74 | |||
| 75 | |||
| 76 | #define lookup_32bit(t0, t1, t2, t3, src, dst) \ | ||
| 77 | movb src ## bl, RID1b; \ | ||
| 78 | movb src ## bh, RID2b; \ | ||
| 79 | movl t0(CTX, RID1, 4), dst ## d; \ | ||
| 80 | xorl t1(CTX, RID2, 4), dst ## d; \ | ||
| 81 | shrq $16, src; \ | ||
| 82 | movb src ## bl, RID1b; \ | ||
| 83 | movb src ## bh, RID2b; \ | ||
| 84 | xorl t2(CTX, RID1, 4), dst ## d; \ | ||
| 85 | xorl t3(CTX, RID2, 4), dst ## d; | ||
| 86 | |||
| 87 | #define G(a, x, t0, t1, t2, t3) \ | ||
| 88 | vmovq a, RGI1; \ | ||
| 89 | vpsrldq $8, a, x; \ | ||
| 90 | vmovq x, RGI2; \ | ||
| 91 | \ | ||
| 92 | lookup_32bit(t0, t1, t2, t3, RGI1, RGS1); \ | ||
| 93 | shrq $16, RGI1; \ | ||
| 94 | lookup_32bit(t0, t1, t2, t3, RGI1, RGS2); \ | ||
| 95 | shlq $32, RGS2; \ | ||
| 96 | orq RGS1, RGS2; \ | ||
| 97 | \ | ||
| 98 | lookup_32bit(t0, t1, t2, t3, RGI2, RGS1); \ | ||
| 99 | shrq $16, RGI2; \ | ||
| 100 | lookup_32bit(t0, t1, t2, t3, RGI2, RGS3); \ | ||
| 101 | shlq $32, RGS3; \ | ||
| 102 | orq RGS1, RGS3; \ | ||
| 103 | \ | ||
| 104 | vmovq RGS2, x; \ | ||
| 105 | vpinsrq $1, RGS3, x, x; | ||
| 106 | |||
| 107 | #define encround(a, b, c, d, x, y) \ | ||
| 108 | G(a, x, s0, s1, s2, s3); \ | ||
| 109 | G(b, y, s1, s2, s3, s0); \ | ||
| 110 | vpaddd x, y, x; \ | ||
| 111 | vpaddd y, x, y; \ | ||
| 112 | vpaddd x, RK1, x; \ | ||
| 113 | vpaddd y, RK2, y; \ | ||
| 114 | vpxor x, c, c; \ | ||
| 115 | vpsrld $1, c, x; \ | ||
| 116 | vpslld $(32 - 1), c, c; \ | ||
| 117 | vpor c, x, c; \ | ||
| 118 | vpslld $1, d, x; \ | ||
| 119 | vpsrld $(32 - 1), d, d; \ | ||
| 120 | vpor d, x, d; \ | ||
| 121 | vpxor d, y, d; | ||
| 122 | |||
| 123 | #define decround(a, b, c, d, x, y) \ | ||
| 124 | G(a, x, s0, s1, s2, s3); \ | ||
| 125 | G(b, y, s1, s2, s3, s0); \ | ||
| 126 | vpaddd x, y, x; \ | ||
| 127 | vpaddd y, x, y; \ | ||
| 128 | vpaddd y, RK2, y; \ | ||
| 129 | vpxor d, y, d; \ | ||
| 130 | vpsrld $1, d, y; \ | ||
| 131 | vpslld $(32 - 1), d, d; \ | ||
| 132 | vpor d, y, d; \ | ||
| 133 | vpslld $1, c, y; \ | ||
| 134 | vpsrld $(32 - 1), c, c; \ | ||
| 135 | vpor c, y, c; \ | ||
| 136 | vpaddd x, RK1, x; \ | ||
| 137 | vpxor x, c, c; | ||
| 138 | |||
| 139 | #define encrypt_round(n, a, b, c, d) \ | ||
| 140 | vbroadcastss (k+4*(2*(n)))(CTX), RK1; \ | ||
| 141 | vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \ | ||
| 142 | encround(a ## 1, b ## 1, c ## 1, d ## 1, RX, RY); \ | ||
| 143 | encround(a ## 2, b ## 2, c ## 2, d ## 2, RX, RY); | ||
| 144 | |||
| 145 | #define decrypt_round(n, a, b, c, d) \ | ||
| 146 | vbroadcastss (k+4*(2*(n)))(CTX), RK1; \ | ||
| 147 | vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \ | ||
| 148 | decround(a ## 1, b ## 1, c ## 1, d ## 1, RX, RY); \ | ||
| 149 | decround(a ## 2, b ## 2, c ## 2, d ## 2, RX, RY); | ||
| 150 | |||
| 151 | #define encrypt_cycle(n) \ | ||
| 152 | encrypt_round((2*n), RA, RB, RC, RD); \ | ||
| 153 | encrypt_round(((2*n) + 1), RC, RD, RA, RB); | ||
| 154 | |||
| 155 | #define decrypt_cycle(n) \ | ||
| 156 | decrypt_round(((2*n) + 1), RC, RD, RA, RB); \ | ||
| 157 | decrypt_round((2*n), RA, RB, RC, RD); | ||
| 158 | |||
| 159 | |||
| 160 | #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ | ||
| 161 | vpunpckldq x1, x0, t0; \ | ||
| 162 | vpunpckhdq x1, x0, t2; \ | ||
| 163 | vpunpckldq x3, x2, t1; \ | ||
| 164 | vpunpckhdq x3, x2, x3; \ | ||
| 165 | \ | ||
| 166 | vpunpcklqdq t1, t0, x0; \ | ||
| 167 | vpunpckhqdq t1, t0, x1; \ | ||
| 168 | vpunpcklqdq x3, t2, x2; \ | ||
| 169 | vpunpckhqdq x3, t2, x3; | ||
| 170 | |||
| 171 | #define inpack_blocks(in, x0, x1, x2, x3, wkey, t0, t1, t2) \ | ||
| 172 | vpxor (0*4*4)(in), wkey, x0; \ | ||
| 173 | vpxor (1*4*4)(in), wkey, x1; \ | ||
| 174 | vpxor (2*4*4)(in), wkey, x2; \ | ||
| 175 | vpxor (3*4*4)(in), wkey, x3; \ | ||
| 176 | \ | ||
| 177 | transpose_4x4(x0, x1, x2, x3, t0, t1, t2) | ||
| 178 | |||
| 179 | #define outunpack_blocks(out, x0, x1, x2, x3, wkey, t0, t1, t2) \ | ||
| 180 | transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ | ||
| 181 | \ | ||
| 182 | vpxor x0, wkey, x0; \ | ||
| 183 | vmovdqu x0, (0*4*4)(out); \ | ||
| 184 | vpxor x1, wkey, x1; \ | ||
| 185 | vmovdqu x1, (1*4*4)(out); \ | ||
| 186 | vpxor x2, wkey, x2; \ | ||
| 187 | vmovdqu x2, (2*4*4)(out); \ | ||
| 188 | vpxor x3, wkey, x3; \ | ||
| 189 | vmovdqu x3, (3*4*4)(out); | ||
| 190 | |||
| 191 | #define outunpack_xor_blocks(out, x0, x1, x2, x3, wkey, t0, t1, t2) \ | ||
| 192 | transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ | ||
| 193 | \ | ||
| 194 | vpxor x0, wkey, x0; \ | ||
| 195 | vpxor (0*4*4)(out), x0, x0; \ | ||
| 196 | vmovdqu x0, (0*4*4)(out); \ | ||
| 197 | vpxor x1, wkey, x1; \ | ||
| 198 | vpxor (1*4*4)(out), x1, x1; \ | ||
| 199 | vmovdqu x1, (1*4*4)(out); \ | ||
| 200 | vpxor x2, wkey, x2; \ | ||
| 201 | vpxor (2*4*4)(out), x2, x2; \ | ||
| 202 | vmovdqu x2, (2*4*4)(out); \ | ||
| 203 | vpxor x3, wkey, x3; \ | ||
| 204 | vpxor (3*4*4)(out), x3, x3; \ | ||
| 205 | vmovdqu x3, (3*4*4)(out); | ||
| 206 | |||
| 207 | .align 8 | ||
| 208 | .global __twofish_enc_blk_8way | ||
| 209 | .type __twofish_enc_blk_8way,@function; | ||
| 210 | |||
| 211 | __twofish_enc_blk_8way: | ||
| 212 | /* input: | ||
| 213 | * %rdi: ctx, CTX | ||
| 214 | * %rsi: dst | ||
| 215 | * %rdx: src | ||
| 216 | * %rcx: bool, if true: xor output | ||
| 217 | */ | ||
| 218 | |||
| 219 | pushq %rbx; | ||
| 220 | pushq %rcx; | ||
| 221 | |||
| 222 | vmovdqu w(CTX), RK1; | ||
| 223 | |||
| 224 | leaq (4*4*4)(%rdx), %rax; | ||
| 225 | inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RK1, RX, RY, RK2); | ||
| 226 | inpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX, RY, RK2); | ||
| 227 | |||
| 228 | xorq RID1, RID1; | ||
| 229 | xorq RID2, RID2; | ||
| 230 | |||
| 231 | encrypt_cycle(0); | ||
| 232 | encrypt_cycle(1); | ||
| 233 | encrypt_cycle(2); | ||
| 234 | encrypt_cycle(3); | ||
| 235 | encrypt_cycle(4); | ||
| 236 | encrypt_cycle(5); | ||
| 237 | encrypt_cycle(6); | ||
| 238 | encrypt_cycle(7); | ||
| 239 | |||
| 240 | vmovdqu (w+4*4)(CTX), RK1; | ||
| 241 | |||
| 242 | popq %rcx; | ||
| 243 | popq %rbx; | ||
| 244 | |||
| 245 | leaq (4*4*4)(%rsi), %rax; | ||
| 246 | |||
| 247 | testb %cl, %cl; | ||
| 248 | jnz __enc_xor8; | ||
| 249 | |||
| 250 | outunpack_blocks(%rsi, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2); | ||
| 251 | outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2); | ||
| 252 | |||
| 253 | ret; | ||
| 254 | |||
| 255 | __enc_xor8: | ||
| 256 | outunpack_xor_blocks(%rsi, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2); | ||
| 257 | outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2); | ||
| 258 | |||
| 259 | ret; | ||
| 260 | |||
| 261 | .align 8 | ||
| 262 | .global twofish_dec_blk_8way | ||
| 263 | .type twofish_dec_blk_8way,@function; | ||
| 264 | |||
| 265 | twofish_dec_blk_8way: | ||
| 266 | /* input: | ||
| 267 | * %rdi: ctx, CTX | ||
| 268 | * %rsi: dst | ||
| 269 | * %rdx: src | ||
| 270 | */ | ||
| 271 | |||
| 272 | pushq %rbx; | ||
| 273 | |||
| 274 | vmovdqu (w+4*4)(CTX), RK1; | ||
| 275 | |||
| 276 | leaq (4*4*4)(%rdx), %rax; | ||
| 277 | inpack_blocks(%rdx, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2); | ||
| 278 | inpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2); | ||
| 279 | |||
| 280 | xorq RID1, RID1; | ||
| 281 | xorq RID2, RID2; | ||
| 282 | |||
| 283 | decrypt_cycle(7); | ||
| 284 | decrypt_cycle(6); | ||
| 285 | decrypt_cycle(5); | ||
| 286 | decrypt_cycle(4); | ||
| 287 | decrypt_cycle(3); | ||
| 288 | decrypt_cycle(2); | ||
| 289 | decrypt_cycle(1); | ||
| 290 | decrypt_cycle(0); | ||
| 291 | |||
| 292 | vmovdqu (w)(CTX), RK1; | ||
| 293 | |||
| 294 | popq %rbx; | ||
| 295 | |||
| 296 | leaq (4*4*4)(%rsi), %rax; | ||
| 297 | outunpack_blocks(%rsi, RA1, RB1, RC1, RD1, RK1, RX, RY, RK2); | ||
| 298 | outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX, RY, RK2); | ||
| 299 | |||
| 300 | ret; | ||
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c new file mode 100644 index 000000000000..782b67ddaf6a --- /dev/null +++ b/arch/x86/crypto/twofish_avx_glue.c | |||
| @@ -0,0 +1,624 @@ | |||
| 1 | /* | ||
| 2 | * Glue Code for AVX assembler version of Twofish Cipher | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Johannes Goetzfried | ||
| 5 | * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
| 20 | * USA | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/hardirq.h> | ||
| 26 | #include <linux/types.h> | ||
| 27 | #include <linux/crypto.h> | ||
| 28 | #include <linux/err.h> | ||
| 29 | #include <crypto/algapi.h> | ||
| 30 | #include <crypto/twofish.h> | ||
| 31 | #include <crypto/cryptd.h> | ||
| 32 | #include <crypto/b128ops.h> | ||
| 33 | #include <crypto/ctr.h> | ||
| 34 | #include <crypto/lrw.h> | ||
| 35 | #include <crypto/xts.h> | ||
| 36 | #include <asm/i387.h> | ||
| 37 | #include <asm/xcr.h> | ||
| 38 | #include <asm/xsave.h> | ||
| 39 | #include <asm/crypto/twofish.h> | ||
| 40 | #include <asm/crypto/ablk_helper.h> | ||
| 41 | #include <asm/crypto/glue_helper.h> | ||
| 42 | #include <crypto/scatterwalk.h> | ||
| 43 | #include <linux/workqueue.h> | ||
| 44 | #include <linux/spinlock.h> | ||
| 45 | |||
| 46 | #define TWOFISH_PARALLEL_BLOCKS 8 | ||
| 47 | |||
| 48 | static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, | ||
| 49 | const u8 *src) | ||
| 50 | { | ||
| 51 | __twofish_enc_blk_3way(ctx, dst, src, false); | ||
| 52 | } | ||
| 53 | |||
| 54 | /* 8-way parallel cipher functions */ | ||
| 55 | asmlinkage void __twofish_enc_blk_8way(struct twofish_ctx *ctx, u8 *dst, | ||
| 56 | const u8 *src, bool xor); | ||
| 57 | asmlinkage void twofish_dec_blk_8way(struct twofish_ctx *ctx, u8 *dst, | ||
| 58 | const u8 *src); | ||
| 59 | |||
| 60 | static inline void twofish_enc_blk_xway(struct twofish_ctx *ctx, u8 *dst, | ||
| 61 | const u8 *src) | ||
| 62 | { | ||
| 63 | __twofish_enc_blk_8way(ctx, dst, src, false); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline void twofish_enc_blk_xway_xor(struct twofish_ctx *ctx, u8 *dst, | ||
| 67 | const u8 *src) | ||
| 68 | { | ||
| 69 | __twofish_enc_blk_8way(ctx, dst, src, true); | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline void twofish_dec_blk_xway(struct twofish_ctx *ctx, u8 *dst, | ||
| 73 | const u8 *src) | ||
| 74 | { | ||
| 75 | twofish_dec_blk_8way(ctx, dst, src); | ||
| 76 | } | ||
| 77 | |||
| 78 | static void twofish_dec_blk_cbc_xway(void *ctx, u128 *dst, const u128 *src) | ||
| 79 | { | ||
| 80 | u128 ivs[TWOFISH_PARALLEL_BLOCKS - 1]; | ||
| 81 | unsigned int j; | ||
| 82 | |||
| 83 | for (j = 0; j < TWOFISH_PARALLEL_BLOCKS - 1; j++) | ||
| 84 | ivs[j] = src[j]; | ||
| 85 | |||
| 86 | twofish_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); | ||
| 87 | |||
| 88 | for (j = 0; j < TWOFISH_PARALLEL_BLOCKS - 1; j++) | ||
| 89 | u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); | ||
| 90 | } | ||
| 91 | |||
| 92 | static void twofish_enc_blk_ctr_xway(void *ctx, u128 *dst, const u128 *src, | ||
| 93 | u128 *iv) | ||
| 94 | { | ||
| 95 | be128 ctrblks[TWOFISH_PARALLEL_BLOCKS]; | ||
| 96 | unsigned int i; | ||
| 97 | |||
| 98 | for (i = 0; i < TWOFISH_PARALLEL_BLOCKS; i++) { | ||
| 99 | if (dst != src) | ||
| 100 | dst[i] = src[i]; | ||
| 101 | |||
| 102 | u128_to_be128(&ctrblks[i], iv); | ||
| 103 | u128_inc(iv); | ||
| 104 | } | ||
| 105 | |||
| 106 | twofish_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks); | ||
| 107 | } | ||
| 108 | |||
| 109 | static const struct common_glue_ctx twofish_enc = { | ||
| 110 | .num_funcs = 3, | ||
| 111 | .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, | ||
| 112 | |||
| 113 | .funcs = { { | ||
| 114 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, | ||
| 115 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_xway) } | ||
| 116 | }, { | ||
| 117 | .num_blocks = 3, | ||
| 118 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } | ||
| 119 | }, { | ||
| 120 | .num_blocks = 1, | ||
| 121 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } | ||
| 122 | } } | ||
| 123 | }; | ||
| 124 | |||
| 125 | static const struct common_glue_ctx twofish_ctr = { | ||
| 126 | .num_funcs = 3, | ||
| 127 | .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, | ||
| 128 | |||
| 129 | .funcs = { { | ||
| 130 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, | ||
| 131 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_xway) } | ||
| 132 | }, { | ||
| 133 | .num_blocks = 3, | ||
| 134 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) } | ||
| 135 | }, { | ||
| 136 | .num_blocks = 1, | ||
| 137 | .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) } | ||
| 138 | } } | ||
| 139 | }; | ||
| 140 | |||
| 141 | static const struct common_glue_ctx twofish_dec = { | ||
| 142 | .num_funcs = 3, | ||
| 143 | .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, | ||
| 144 | |||
| 145 | .funcs = { { | ||
| 146 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, | ||
| 147 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_xway) } | ||
| 148 | }, { | ||
| 149 | .num_blocks = 3, | ||
| 150 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } | ||
| 151 | }, { | ||
| 152 | .num_blocks = 1, | ||
| 153 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } | ||
| 154 | } } | ||
| 155 | }; | ||
| 156 | |||
| 157 | static const struct common_glue_ctx twofish_dec_cbc = { | ||
| 158 | .num_funcs = 3, | ||
| 159 | .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, | ||
| 160 | |||
| 161 | .funcs = { { | ||
| 162 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, | ||
| 163 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_xway) } | ||
| 164 | }, { | ||
| 165 | .num_blocks = 3, | ||
| 166 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } | ||
| 167 | }, { | ||
| 168 | .num_blocks = 1, | ||
| 169 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } | ||
| 170 | } } | ||
| 171 | }; | ||
| 172 | |||
| 173 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 174 | struct scatterlist *src, unsigned int nbytes) | ||
| 175 | { | ||
| 176 | return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes); | ||
| 177 | } | ||
| 178 | |||
| 179 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 180 | struct scatterlist *src, unsigned int nbytes) | ||
| 181 | { | ||
| 182 | return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes); | ||
| 183 | } | ||
| 184 | |||
| 185 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 186 | struct scatterlist *src, unsigned int nbytes) | ||
| 187 | { | ||
| 188 | return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc, | ||
| 189 | dst, src, nbytes); | ||
| 190 | } | ||
| 191 | |||
| 192 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 193 | struct scatterlist *src, unsigned int nbytes) | ||
| 194 | { | ||
| 195 | return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src, | ||
| 196 | nbytes); | ||
| 197 | } | ||
| 198 | |||
| 199 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 200 | struct scatterlist *src, unsigned int nbytes) | ||
| 201 | { | ||
| 202 | return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes); | ||
| 203 | } | ||
| 204 | |||
| 205 | static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes) | ||
| 206 | { | ||
| 207 | return glue_fpu_begin(TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS, NULL, | ||
| 208 | fpu_enabled, nbytes); | ||
| 209 | } | ||
| 210 | |||
| 211 | static inline void twofish_fpu_end(bool fpu_enabled) | ||
| 212 | { | ||
| 213 | glue_fpu_end(fpu_enabled); | ||
| 214 | } | ||
| 215 | |||
| 216 | struct crypt_priv { | ||
| 217 | struct twofish_ctx *ctx; | ||
| 218 | bool fpu_enabled; | ||
| 219 | }; | ||
| 220 | |||
| 221 | static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) | ||
| 222 | { | ||
| 223 | const unsigned int bsize = TF_BLOCK_SIZE; | ||
| 224 | struct crypt_priv *ctx = priv; | ||
| 225 | int i; | ||
| 226 | |||
| 227 | ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes); | ||
| 228 | |||
| 229 | if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) { | ||
| 230 | twofish_enc_blk_xway(ctx->ctx, srcdst, srcdst); | ||
| 231 | return; | ||
| 232 | } | ||
| 233 | |||
| 234 | for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) | ||
| 235 | twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst); | ||
| 236 | |||
| 237 | nbytes %= bsize * 3; | ||
| 238 | |||
| 239 | for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) | ||
| 240 | twofish_enc_blk(ctx->ctx, srcdst, srcdst); | ||
| 241 | } | ||
| 242 | |||
| 243 | static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) | ||
| 244 | { | ||
| 245 | const unsigned int bsize = TF_BLOCK_SIZE; | ||
| 246 | struct crypt_priv *ctx = priv; | ||
| 247 | int i; | ||
| 248 | |||
| 249 | ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes); | ||
| 250 | |||
| 251 | if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) { | ||
| 252 | twofish_dec_blk_xway(ctx->ctx, srcdst, srcdst); | ||
| 253 | return; | ||
| 254 | } | ||
| 255 | |||
| 256 | for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) | ||
| 257 | twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst); | ||
| 258 | |||
| 259 | nbytes %= bsize * 3; | ||
| 260 | |||
| 261 | for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) | ||
| 262 | twofish_dec_blk(ctx->ctx, srcdst, srcdst); | ||
| 263 | } | ||
| 264 | |||
| 265 | static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 266 | struct scatterlist *src, unsigned int nbytes) | ||
| 267 | { | ||
| 268 | struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 269 | be128 buf[TWOFISH_PARALLEL_BLOCKS]; | ||
| 270 | struct crypt_priv crypt_ctx = { | ||
| 271 | .ctx = &ctx->twofish_ctx, | ||
| 272 | .fpu_enabled = false, | ||
| 273 | }; | ||
| 274 | struct lrw_crypt_req req = { | ||
| 275 | .tbuf = buf, | ||
| 276 | .tbuflen = sizeof(buf), | ||
| 277 | |||
| 278 | .table_ctx = &ctx->lrw_table, | ||
| 279 | .crypt_ctx = &crypt_ctx, | ||
| 280 | .crypt_fn = encrypt_callback, | ||
| 281 | }; | ||
| 282 | int ret; | ||
| 283 | |||
| 284 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 285 | ret = lrw_crypt(desc, dst, src, nbytes, &req); | ||
| 286 | twofish_fpu_end(crypt_ctx.fpu_enabled); | ||
| 287 | |||
| 288 | return ret; | ||
| 289 | } | ||
| 290 | |||
| 291 | static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 292 | struct scatterlist *src, unsigned int nbytes) | ||
| 293 | { | ||
| 294 | struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 295 | be128 buf[TWOFISH_PARALLEL_BLOCKS]; | ||
| 296 | struct crypt_priv crypt_ctx = { | ||
| 297 | .ctx = &ctx->twofish_ctx, | ||
| 298 | .fpu_enabled = false, | ||
| 299 | }; | ||
| 300 | struct lrw_crypt_req req = { | ||
| 301 | .tbuf = buf, | ||
| 302 | .tbuflen = sizeof(buf), | ||
| 303 | |||
| 304 | .table_ctx = &ctx->lrw_table, | ||
| 305 | .crypt_ctx = &crypt_ctx, | ||
| 306 | .crypt_fn = decrypt_callback, | ||
| 307 | }; | ||
| 308 | int ret; | ||
| 309 | |||
| 310 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 311 | ret = lrw_crypt(desc, dst, src, nbytes, &req); | ||
| 312 | twofish_fpu_end(crypt_ctx.fpu_enabled); | ||
| 313 | |||
| 314 | return ret; | ||
| 315 | } | ||
| 316 | |||
| 317 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 318 | struct scatterlist *src, unsigned int nbytes) | ||
| 319 | { | ||
| 320 | struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 321 | be128 buf[TWOFISH_PARALLEL_BLOCKS]; | ||
| 322 | struct crypt_priv crypt_ctx = { | ||
| 323 | .ctx = &ctx->crypt_ctx, | ||
| 324 | .fpu_enabled = false, | ||
| 325 | }; | ||
| 326 | struct xts_crypt_req req = { | ||
| 327 | .tbuf = buf, | ||
| 328 | .tbuflen = sizeof(buf), | ||
| 329 | |||
| 330 | .tweak_ctx = &ctx->tweak_ctx, | ||
| 331 | .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk), | ||
| 332 | .crypt_ctx = &crypt_ctx, | ||
| 333 | .crypt_fn = encrypt_callback, | ||
| 334 | }; | ||
| 335 | int ret; | ||
| 336 | |||
| 337 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 338 | ret = xts_crypt(desc, dst, src, nbytes, &req); | ||
| 339 | twofish_fpu_end(crypt_ctx.fpu_enabled); | ||
| 340 | |||
| 341 | return ret; | ||
| 342 | } | ||
| 343 | |||
| 344 | static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 345 | struct scatterlist *src, unsigned int nbytes) | ||
| 346 | { | ||
| 347 | struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 348 | be128 buf[TWOFISH_PARALLEL_BLOCKS]; | ||
| 349 | struct crypt_priv crypt_ctx = { | ||
| 350 | .ctx = &ctx->crypt_ctx, | ||
| 351 | .fpu_enabled = false, | ||
| 352 | }; | ||
| 353 | struct xts_crypt_req req = { | ||
| 354 | .tbuf = buf, | ||
| 355 | .tbuflen = sizeof(buf), | ||
| 356 | |||
| 357 | .tweak_ctx = &ctx->tweak_ctx, | ||
| 358 | .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk), | ||
| 359 | .crypt_ctx = &crypt_ctx, | ||
| 360 | .crypt_fn = decrypt_callback, | ||
| 361 | }; | ||
| 362 | int ret; | ||
| 363 | |||
| 364 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 365 | ret = xts_crypt(desc, dst, src, nbytes, &req); | ||
| 366 | twofish_fpu_end(crypt_ctx.fpu_enabled); | ||
| 367 | |||
| 368 | return ret; | ||
| 369 | } | ||
| 370 | |||
| 371 | static struct crypto_alg twofish_algs[10] = { { | ||
| 372 | .cra_name = "__ecb-twofish-avx", | ||
| 373 | .cra_driver_name = "__driver-ecb-twofish-avx", | ||
| 374 | .cra_priority = 0, | ||
| 375 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 376 | .cra_blocksize = TF_BLOCK_SIZE, | ||
| 377 | .cra_ctxsize = sizeof(struct twofish_ctx), | ||
| 378 | .cra_alignmask = 0, | ||
| 379 | .cra_type = &crypto_blkcipher_type, | ||
| 380 | .cra_module = THIS_MODULE, | ||
| 381 | .cra_list = LIST_HEAD_INIT(twofish_algs[0].cra_list), | ||
| 382 | .cra_u = { | ||
| 383 | .blkcipher = { | ||
| 384 | .min_keysize = TF_MIN_KEY_SIZE, | ||
| 385 | .max_keysize = TF_MAX_KEY_SIZE, | ||
| 386 | .setkey = twofish_setkey, | ||
| 387 | .encrypt = ecb_encrypt, | ||
| 388 | .decrypt = ecb_decrypt, | ||
| 389 | }, | ||
| 390 | }, | ||
| 391 | }, { | ||
| 392 | .cra_name = "__cbc-twofish-avx", | ||
| 393 | .cra_driver_name = "__driver-cbc-twofish-avx", | ||
| 394 | .cra_priority = 0, | ||
| 395 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 396 | .cra_blocksize = TF_BLOCK_SIZE, | ||
| 397 | .cra_ctxsize = sizeof(struct twofish_ctx), | ||
| 398 | .cra_alignmask = 0, | ||
| 399 | .cra_type = &crypto_blkcipher_type, | ||
| 400 | .cra_module = THIS_MODULE, | ||
| 401 | .cra_list = LIST_HEAD_INIT(twofish_algs[1].cra_list), | ||
| 402 | .cra_u = { | ||
| 403 | .blkcipher = { | ||
| 404 | .min_keysize = TF_MIN_KEY_SIZE, | ||
| 405 | .max_keysize = TF_MAX_KEY_SIZE, | ||
| 406 | .setkey = twofish_setkey, | ||
| 407 | .encrypt = cbc_encrypt, | ||
| 408 | .decrypt = cbc_decrypt, | ||
| 409 | }, | ||
| 410 | }, | ||
| 411 | }, { | ||
| 412 | .cra_name = "__ctr-twofish-avx", | ||
| 413 | .cra_driver_name = "__driver-ctr-twofish-avx", | ||
| 414 | .cra_priority = 0, | ||
| 415 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 416 | .cra_blocksize = 1, | ||
| 417 | .cra_ctxsize = sizeof(struct twofish_ctx), | ||
| 418 | .cra_alignmask = 0, | ||
| 419 | .cra_type = &crypto_blkcipher_type, | ||
| 420 | .cra_module = THIS_MODULE, | ||
| 421 | .cra_list = LIST_HEAD_INIT(twofish_algs[2].cra_list), | ||
| 422 | .cra_u = { | ||
| 423 | .blkcipher = { | ||
| 424 | .min_keysize = TF_MIN_KEY_SIZE, | ||
| 425 | .max_keysize = TF_MAX_KEY_SIZE, | ||
| 426 | .ivsize = TF_BLOCK_SIZE, | ||
| 427 | .setkey = twofish_setkey, | ||
| 428 | .encrypt = ctr_crypt, | ||
| 429 | .decrypt = ctr_crypt, | ||
| 430 | }, | ||
| 431 | }, | ||
| 432 | }, { | ||
| 433 | .cra_name = "__lrw-twofish-avx", | ||
| 434 | .cra_driver_name = "__driver-lrw-twofish-avx", | ||
| 435 | .cra_priority = 0, | ||
| 436 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 437 | .cra_blocksize = TF_BLOCK_SIZE, | ||
| 438 | .cra_ctxsize = sizeof(struct twofish_lrw_ctx), | ||
| 439 | .cra_alignmask = 0, | ||
| 440 | .cra_type = &crypto_blkcipher_type, | ||
| 441 | .cra_module = THIS_MODULE, | ||
| 442 | .cra_list = LIST_HEAD_INIT(twofish_algs[3].cra_list), | ||
| 443 | .cra_exit = lrw_twofish_exit_tfm, | ||
| 444 | .cra_u = { | ||
| 445 | .blkcipher = { | ||
| 446 | .min_keysize = TF_MIN_KEY_SIZE + | ||
| 447 | TF_BLOCK_SIZE, | ||
| 448 | .max_keysize = TF_MAX_KEY_SIZE + | ||
| 449 | TF_BLOCK_SIZE, | ||
| 450 | .ivsize = TF_BLOCK_SIZE, | ||
| 451 | .setkey = lrw_twofish_setkey, | ||
| 452 | .encrypt = lrw_encrypt, | ||
| 453 | .decrypt = lrw_decrypt, | ||
| 454 | }, | ||
| 455 | }, | ||
| 456 | }, { | ||
| 457 | .cra_name = "__xts-twofish-avx", | ||
| 458 | .cra_driver_name = "__driver-xts-twofish-avx", | ||
| 459 | .cra_priority = 0, | ||
| 460 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 461 | .cra_blocksize = TF_BLOCK_SIZE, | ||
| 462 | .cra_ctxsize = sizeof(struct twofish_xts_ctx), | ||
| 463 | .cra_alignmask = 0, | ||
| 464 | .cra_type = &crypto_blkcipher_type, | ||
| 465 | .cra_module = THIS_MODULE, | ||
| 466 | .cra_list = LIST_HEAD_INIT(twofish_algs[4].cra_list), | ||
| 467 | .cra_u = { | ||
| 468 | .blkcipher = { | ||
| 469 | .min_keysize = TF_MIN_KEY_SIZE * 2, | ||
| 470 | .max_keysize = TF_MAX_KEY_SIZE * 2, | ||
| 471 | .ivsize = TF_BLOCK_SIZE, | ||
| 472 | .setkey = xts_twofish_setkey, | ||
| 473 | .encrypt = xts_encrypt, | ||
| 474 | .decrypt = xts_decrypt, | ||
| 475 | }, | ||
| 476 | }, | ||
| 477 | }, { | ||
| 478 | .cra_name = "ecb(twofish)", | ||
| 479 | .cra_driver_name = "ecb-twofish-avx", | ||
| 480 | .cra_priority = 400, | ||
| 481 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 482 | .cra_blocksize = TF_BLOCK_SIZE, | ||
| 483 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 484 | .cra_alignmask = 0, | ||
| 485 | .cra_type = &crypto_ablkcipher_type, | ||
| 486 | .cra_module = THIS_MODULE, | ||
| 487 | .cra_list = LIST_HEAD_INIT(twofish_algs[5].cra_list), | ||
| 488 | .cra_init = ablk_init, | ||
| 489 | .cra_exit = ablk_exit, | ||
| 490 | .cra_u = { | ||
| 491 | .ablkcipher = { | ||
| 492 | .min_keysize = TF_MIN_KEY_SIZE, | ||
| 493 | .max_keysize = TF_MAX_KEY_SIZE, | ||
| 494 | .setkey = ablk_set_key, | ||
| 495 | .encrypt = ablk_encrypt, | ||
| 496 | .decrypt = ablk_decrypt, | ||
| 497 | }, | ||
| 498 | }, | ||
| 499 | }, { | ||
| 500 | .cra_name = "cbc(twofish)", | ||
| 501 | .cra_driver_name = "cbc-twofish-avx", | ||
| 502 | .cra_priority = 400, | ||
| 503 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 504 | .cra_blocksize = TF_BLOCK_SIZE, | ||
| 505 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 506 | .cra_alignmask = 0, | ||
| 507 | .cra_type = &crypto_ablkcipher_type, | ||
| 508 | .cra_module = THIS_MODULE, | ||
| 509 | .cra_list = LIST_HEAD_INIT(twofish_algs[6].cra_list), | ||
| 510 | .cra_init = ablk_init, | ||
| 511 | .cra_exit = ablk_exit, | ||
| 512 | .cra_u = { | ||
| 513 | .ablkcipher = { | ||
| 514 | .min_keysize = TF_MIN_KEY_SIZE, | ||
| 515 | .max_keysize = TF_MAX_KEY_SIZE, | ||
| 516 | .ivsize = TF_BLOCK_SIZE, | ||
| 517 | .setkey = ablk_set_key, | ||
| 518 | .encrypt = __ablk_encrypt, | ||
| 519 | .decrypt = ablk_decrypt, | ||
| 520 | }, | ||
| 521 | }, | ||
| 522 | }, { | ||
| 523 | .cra_name = "ctr(twofish)", | ||
| 524 | .cra_driver_name = "ctr-twofish-avx", | ||
| 525 | .cra_priority = 400, | ||
| 526 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 527 | .cra_blocksize = 1, | ||
| 528 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 529 | .cra_alignmask = 0, | ||
| 530 | .cra_type = &crypto_ablkcipher_type, | ||
| 531 | .cra_module = THIS_MODULE, | ||
| 532 | .cra_list = LIST_HEAD_INIT(twofish_algs[7].cra_list), | ||
| 533 | .cra_init = ablk_init, | ||
| 534 | .cra_exit = ablk_exit, | ||
| 535 | .cra_u = { | ||
| 536 | .ablkcipher = { | ||
| 537 | .min_keysize = TF_MIN_KEY_SIZE, | ||
| 538 | .max_keysize = TF_MAX_KEY_SIZE, | ||
| 539 | .ivsize = TF_BLOCK_SIZE, | ||
| 540 | .setkey = ablk_set_key, | ||
| 541 | .encrypt = ablk_encrypt, | ||
| 542 | .decrypt = ablk_encrypt, | ||
| 543 | .geniv = "chainiv", | ||
| 544 | }, | ||
| 545 | }, | ||
| 546 | }, { | ||
| 547 | .cra_name = "lrw(twofish)", | ||
| 548 | .cra_driver_name = "lrw-twofish-avx", | ||
| 549 | .cra_priority = 400, | ||
| 550 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 551 | .cra_blocksize = TF_BLOCK_SIZE, | ||
| 552 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 553 | .cra_alignmask = 0, | ||
| 554 | .cra_type = &crypto_ablkcipher_type, | ||
| 555 | .cra_module = THIS_MODULE, | ||
| 556 | .cra_list = LIST_HEAD_INIT(twofish_algs[8].cra_list), | ||
| 557 | .cra_init = ablk_init, | ||
| 558 | .cra_exit = ablk_exit, | ||
| 559 | .cra_u = { | ||
| 560 | .ablkcipher = { | ||
| 561 | .min_keysize = TF_MIN_KEY_SIZE + | ||
| 562 | TF_BLOCK_SIZE, | ||
| 563 | .max_keysize = TF_MAX_KEY_SIZE + | ||
| 564 | TF_BLOCK_SIZE, | ||
| 565 | .ivsize = TF_BLOCK_SIZE, | ||
| 566 | .setkey = ablk_set_key, | ||
| 567 | .encrypt = ablk_encrypt, | ||
| 568 | .decrypt = ablk_decrypt, | ||
| 569 | }, | ||
| 570 | }, | ||
| 571 | }, { | ||
| 572 | .cra_name = "xts(twofish)", | ||
| 573 | .cra_driver_name = "xts-twofish-avx", | ||
| 574 | .cra_priority = 400, | ||
| 575 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 576 | .cra_blocksize = TF_BLOCK_SIZE, | ||
| 577 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 578 | .cra_alignmask = 0, | ||
| 579 | .cra_type = &crypto_ablkcipher_type, | ||
| 580 | .cra_module = THIS_MODULE, | ||
| 581 | .cra_list = LIST_HEAD_INIT(twofish_algs[9].cra_list), | ||
| 582 | .cra_init = ablk_init, | ||
| 583 | .cra_exit = ablk_exit, | ||
| 584 | .cra_u = { | ||
| 585 | .ablkcipher = { | ||
| 586 | .min_keysize = TF_MIN_KEY_SIZE * 2, | ||
| 587 | .max_keysize = TF_MAX_KEY_SIZE * 2, | ||
| 588 | .ivsize = TF_BLOCK_SIZE, | ||
| 589 | .setkey = ablk_set_key, | ||
| 590 | .encrypt = ablk_encrypt, | ||
| 591 | .decrypt = ablk_decrypt, | ||
| 592 | }, | ||
| 593 | }, | ||
| 594 | } }; | ||
| 595 | |||
| 596 | static int __init twofish_init(void) | ||
| 597 | { | ||
| 598 | u64 xcr0; | ||
| 599 | |||
| 600 | if (!cpu_has_avx || !cpu_has_osxsave) { | ||
| 601 | printk(KERN_INFO "AVX instructions are not detected.\n"); | ||
| 602 | return -ENODEV; | ||
| 603 | } | ||
| 604 | |||
| 605 | xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); | ||
| 606 | if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { | ||
| 607 | printk(KERN_INFO "AVX detected but unusable.\n"); | ||
| 608 | return -ENODEV; | ||
| 609 | } | ||
| 610 | |||
| 611 | return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs)); | ||
| 612 | } | ||
| 613 | |||
| 614 | static void __exit twofish_exit(void) | ||
| 615 | { | ||
| 616 | crypto_unregister_algs(twofish_algs, ARRAY_SIZE(twofish_algs)); | ||
| 617 | } | ||
| 618 | |||
| 619 | module_init(twofish_init); | ||
| 620 | module_exit(twofish_exit); | ||
| 621 | |||
| 622 | MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized"); | ||
| 623 | MODULE_LICENSE("GPL"); | ||
| 624 | MODULE_ALIAS("twofish"); | ||
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 922ab24cce31..15f9347316c8 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c | |||
| @@ -3,11 +3,6 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | 4 | * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> |
| 5 | * | 5 | * |
| 6 | * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: | ||
| 7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 8 | * CTR part based on code (crypto/ctr.c) by: | ||
| 9 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| 13 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
| @@ -33,20 +28,13 @@ | |||
| 33 | #include <crypto/algapi.h> | 28 | #include <crypto/algapi.h> |
| 34 | #include <crypto/twofish.h> | 29 | #include <crypto/twofish.h> |
| 35 | #include <crypto/b128ops.h> | 30 | #include <crypto/b128ops.h> |
| 31 | #include <asm/crypto/twofish.h> | ||
| 32 | #include <asm/crypto/glue_helper.h> | ||
| 36 | #include <crypto/lrw.h> | 33 | #include <crypto/lrw.h> |
| 37 | #include <crypto/xts.h> | 34 | #include <crypto/xts.h> |
| 38 | 35 | ||
| 39 | /* regular block cipher functions from twofish_x86_64 module */ | 36 | EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way); |
| 40 | asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, | 37 | EXPORT_SYMBOL_GPL(twofish_dec_blk_3way); |
| 41 | const u8 *src); | ||
| 42 | asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, | ||
| 43 | const u8 *src); | ||
| 44 | |||
| 45 | /* 3-way parallel cipher functions */ | ||
| 46 | asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, | ||
| 47 | const u8 *src, bool xor); | ||
| 48 | asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, | ||
| 49 | const u8 *src); | ||
| 50 | 38 | ||
| 51 | static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, | 39 | static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, |
| 52 | const u8 *src) | 40 | const u8 *src) |
| @@ -60,311 +48,139 @@ static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst, | |||
| 60 | __twofish_enc_blk_3way(ctx, dst, src, true); | 48 | __twofish_enc_blk_3way(ctx, dst, src, true); |
| 61 | } | 49 | } |
| 62 | 50 | ||
| 63 | static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, | 51 | void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) |
| 64 | void (*fn)(struct twofish_ctx *, u8 *, const u8 *), | ||
| 65 | void (*fn_3way)(struct twofish_ctx *, u8 *, const u8 *)) | ||
| 66 | { | ||
| 67 | struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 68 | unsigned int bsize = TF_BLOCK_SIZE; | ||
| 69 | unsigned int nbytes; | ||
| 70 | int err; | ||
| 71 | |||
| 72 | err = blkcipher_walk_virt(desc, walk); | ||
| 73 | |||
| 74 | while ((nbytes = walk->nbytes)) { | ||
| 75 | u8 *wsrc = walk->src.virt.addr; | ||
| 76 | u8 *wdst = walk->dst.virt.addr; | ||
| 77 | |||
| 78 | /* Process three block batch */ | ||
| 79 | if (nbytes >= bsize * 3) { | ||
| 80 | do { | ||
| 81 | fn_3way(ctx, wdst, wsrc); | ||
| 82 | |||
| 83 | wsrc += bsize * 3; | ||
| 84 | wdst += bsize * 3; | ||
| 85 | nbytes -= bsize * 3; | ||
| 86 | } while (nbytes >= bsize * 3); | ||
| 87 | |||
| 88 | if (nbytes < bsize) | ||
| 89 | goto done; | ||
| 90 | } | ||
| 91 | |||
| 92 | /* Handle leftovers */ | ||
| 93 | do { | ||
| 94 | fn(ctx, wdst, wsrc); | ||
| 95 | |||
| 96 | wsrc += bsize; | ||
| 97 | wdst += bsize; | ||
| 98 | nbytes -= bsize; | ||
| 99 | } while (nbytes >= bsize); | ||
| 100 | |||
| 101 | done: | ||
| 102 | err = blkcipher_walk_done(desc, walk, nbytes); | ||
| 103 | } | ||
| 104 | |||
| 105 | return err; | ||
| 106 | } | ||
| 107 | |||
| 108 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 109 | struct scatterlist *src, unsigned int nbytes) | ||
| 110 | { | 52 | { |
| 111 | struct blkcipher_walk walk; | 53 | u128 ivs[2]; |
| 112 | 54 | ||
| 113 | blkcipher_walk_init(&walk, dst, src, nbytes); | 55 | ivs[0] = src[0]; |
| 114 | return ecb_crypt(desc, &walk, twofish_enc_blk, twofish_enc_blk_3way); | 56 | ivs[1] = src[1]; |
| 115 | } | ||
| 116 | 57 | ||
| 117 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 58 | twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); |
| 118 | struct scatterlist *src, unsigned int nbytes) | ||
| 119 | { | ||
| 120 | struct blkcipher_walk walk; | ||
| 121 | 59 | ||
| 122 | blkcipher_walk_init(&walk, dst, src, nbytes); | 60 | u128_xor(&dst[1], &dst[1], &ivs[0]); |
| 123 | return ecb_crypt(desc, &walk, twofish_dec_blk, twofish_dec_blk_3way); | 61 | u128_xor(&dst[2], &dst[2], &ivs[1]); |
| 124 | } | 62 | } |
| 63 | EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); | ||
| 125 | 64 | ||
| 126 | static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, | 65 | void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv) |
| 127 | struct blkcipher_walk *walk) | ||
| 128 | { | ||
| 129 | struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 130 | unsigned int bsize = TF_BLOCK_SIZE; | ||
| 131 | unsigned int nbytes = walk->nbytes; | ||
| 132 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 133 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 134 | u128 *iv = (u128 *)walk->iv; | ||
| 135 | |||
| 136 | do { | ||
| 137 | u128_xor(dst, src, iv); | ||
| 138 | twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst); | ||
| 139 | iv = dst; | ||
| 140 | |||
| 141 | src += 1; | ||
| 142 | dst += 1; | ||
| 143 | nbytes -= bsize; | ||
| 144 | } while (nbytes >= bsize); | ||
| 145 | |||
| 146 | u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv); | ||
| 147 | return nbytes; | ||
| 148 | } | ||
| 149 | |||
| 150 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 151 | struct scatterlist *src, unsigned int nbytes) | ||
| 152 | { | 66 | { |
| 153 | struct blkcipher_walk walk; | 67 | be128 ctrblk; |
| 154 | int err; | ||
| 155 | 68 | ||
| 156 | blkcipher_walk_init(&walk, dst, src, nbytes); | 69 | if (dst != src) |
| 157 | err = blkcipher_walk_virt(desc, &walk); | 70 | *dst = *src; |
| 158 | 71 | ||
| 159 | while ((nbytes = walk.nbytes)) { | 72 | u128_to_be128(&ctrblk, iv); |
| 160 | nbytes = __cbc_encrypt(desc, &walk); | 73 | u128_inc(iv); |
| 161 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 162 | } | ||
| 163 | 74 | ||
| 164 | return err; | 75 | twofish_enc_blk(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); |
| 76 | u128_xor(dst, dst, (u128 *)&ctrblk); | ||
| 165 | } | 77 | } |
| 78 | EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr); | ||
| 166 | 79 | ||
| 167 | static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, | 80 | void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, |
| 168 | struct blkcipher_walk *walk) | 81 | u128 *iv) |
| 169 | { | 82 | { |
| 170 | struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 83 | be128 ctrblks[3]; |
| 171 | unsigned int bsize = TF_BLOCK_SIZE; | ||
| 172 | unsigned int nbytes = walk->nbytes; | ||
| 173 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 174 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 175 | u128 ivs[3 - 1]; | ||
| 176 | u128 last_iv; | ||
| 177 | |||
| 178 | /* Start of the last block. */ | ||
| 179 | src += nbytes / bsize - 1; | ||
| 180 | dst += nbytes / bsize - 1; | ||
| 181 | |||
| 182 | last_iv = *src; | ||
| 183 | |||
| 184 | /* Process three block batch */ | ||
| 185 | if (nbytes >= bsize * 3) { | ||
| 186 | do { | ||
| 187 | nbytes -= bsize * (3 - 1); | ||
| 188 | src -= 3 - 1; | ||
| 189 | dst -= 3 - 1; | ||
| 190 | |||
| 191 | ivs[0] = src[0]; | ||
| 192 | ivs[1] = src[1]; | ||
| 193 | |||
| 194 | twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); | ||
| 195 | |||
| 196 | u128_xor(dst + 1, dst + 1, ivs + 0); | ||
| 197 | u128_xor(dst + 2, dst + 2, ivs + 1); | ||
| 198 | |||
| 199 | nbytes -= bsize; | ||
| 200 | if (nbytes < bsize) | ||
| 201 | goto done; | ||
| 202 | |||
| 203 | u128_xor(dst, dst, src - 1); | ||
| 204 | src -= 1; | ||
| 205 | dst -= 1; | ||
| 206 | } while (nbytes >= bsize * 3); | ||
| 207 | |||
| 208 | if (nbytes < bsize) | ||
| 209 | goto done; | ||
| 210 | } | ||
| 211 | |||
| 212 | /* Handle leftovers */ | ||
| 213 | for (;;) { | ||
| 214 | twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src); | ||
| 215 | |||
| 216 | nbytes -= bsize; | ||
| 217 | if (nbytes < bsize) | ||
| 218 | break; | ||
| 219 | 84 | ||
| 220 | u128_xor(dst, dst, src - 1); | 85 | if (dst != src) { |
| 221 | src -= 1; | 86 | dst[0] = src[0]; |
| 222 | dst -= 1; | 87 | dst[1] = src[1]; |
| 88 | dst[2] = src[2]; | ||
| 223 | } | 89 | } |
| 224 | 90 | ||
| 225 | done: | 91 | u128_to_be128(&ctrblks[0], iv); |
| 226 | u128_xor(dst, dst, (u128 *)walk->iv); | 92 | u128_inc(iv); |
| 227 | *(u128 *)walk->iv = last_iv; | 93 | u128_to_be128(&ctrblks[1], iv); |
| 94 | u128_inc(iv); | ||
| 95 | u128_to_be128(&ctrblks[2], iv); | ||
| 96 | u128_inc(iv); | ||
| 228 | 97 | ||
| 229 | return nbytes; | 98 | twofish_enc_blk_xor_3way(ctx, (u8 *)dst, (u8 *)ctrblks); |
| 230 | } | 99 | } |
| 100 | EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr_3way); | ||
| 101 | |||
| 102 | static const struct common_glue_ctx twofish_enc = { | ||
| 103 | .num_funcs = 2, | ||
| 104 | .fpu_blocks_limit = -1, | ||
| 105 | |||
| 106 | .funcs = { { | ||
| 107 | .num_blocks = 3, | ||
| 108 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } | ||
| 109 | }, { | ||
| 110 | .num_blocks = 1, | ||
| 111 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } | ||
| 112 | } } | ||
| 113 | }; | ||
| 231 | 114 | ||
| 232 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 115 | static const struct common_glue_ctx twofish_ctr = { |
| 233 | struct scatterlist *src, unsigned int nbytes) | 116 | .num_funcs = 2, |
| 234 | { | 117 | .fpu_blocks_limit = -1, |
| 235 | struct blkcipher_walk walk; | 118 | |
| 236 | int err; | 119 | .funcs = { { |
| 237 | 120 | .num_blocks = 3, | |
| 238 | blkcipher_walk_init(&walk, dst, src, nbytes); | 121 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) } |
| 239 | err = blkcipher_walk_virt(desc, &walk); | 122 | }, { |
| 123 | .num_blocks = 1, | ||
| 124 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) } | ||
| 125 | } } | ||
| 126 | }; | ||
| 240 | 127 | ||
| 241 | while ((nbytes = walk.nbytes)) { | 128 | static const struct common_glue_ctx twofish_dec = { |
| 242 | nbytes = __cbc_decrypt(desc, &walk); | 129 | .num_funcs = 2, |
| 243 | err = blkcipher_walk_done(desc, &walk, nbytes); | 130 | .fpu_blocks_limit = -1, |
| 244 | } | 131 | |
| 132 | .funcs = { { | ||
| 133 | .num_blocks = 3, | ||
| 134 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } | ||
| 135 | }, { | ||
| 136 | .num_blocks = 1, | ||
| 137 | .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } | ||
| 138 | } } | ||
| 139 | }; | ||
| 245 | 140 | ||
| 246 | return err; | 141 | static const struct common_glue_ctx twofish_dec_cbc = { |
| 247 | } | 142 | .num_funcs = 2, |
| 143 | .fpu_blocks_limit = -1, | ||
| 144 | |||
| 145 | .funcs = { { | ||
| 146 | .num_blocks = 3, | ||
| 147 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } | ||
| 148 | }, { | ||
| 149 | .num_blocks = 1, | ||
| 150 | .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } | ||
| 151 | } } | ||
| 152 | }; | ||
| 248 | 153 | ||
| 249 | static inline void u128_to_be128(be128 *dst, const u128 *src) | 154 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 155 | struct scatterlist *src, unsigned int nbytes) | ||
| 250 | { | 156 | { |
| 251 | dst->a = cpu_to_be64(src->a); | 157 | return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes); |
| 252 | dst->b = cpu_to_be64(src->b); | ||
| 253 | } | 158 | } |
| 254 | 159 | ||
| 255 | static inline void be128_to_u128(u128 *dst, const be128 *src) | 160 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 161 | struct scatterlist *src, unsigned int nbytes) | ||
| 256 | { | 162 | { |
| 257 | dst->a = be64_to_cpu(src->a); | 163 | return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes); |
| 258 | dst->b = be64_to_cpu(src->b); | ||
| 259 | } | 164 | } |
| 260 | 165 | ||
| 261 | static inline void u128_inc(u128 *i) | 166 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 167 | struct scatterlist *src, unsigned int nbytes) | ||
| 262 | { | 168 | { |
| 263 | i->b++; | 169 | return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc, |
| 264 | if (!i->b) | 170 | dst, src, nbytes); |
| 265 | i->a++; | ||
| 266 | } | 171 | } |
| 267 | 172 | ||
| 268 | static void ctr_crypt_final(struct blkcipher_desc *desc, | 173 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 269 | struct blkcipher_walk *walk) | 174 | struct scatterlist *src, unsigned int nbytes) |
| 270 | { | 175 | { |
| 271 | struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 176 | return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src, |
| 272 | u8 *ctrblk = walk->iv; | 177 | nbytes); |
| 273 | u8 keystream[TF_BLOCK_SIZE]; | ||
| 274 | u8 *src = walk->src.virt.addr; | ||
| 275 | u8 *dst = walk->dst.virt.addr; | ||
| 276 | unsigned int nbytes = walk->nbytes; | ||
| 277 | |||
| 278 | twofish_enc_blk(ctx, keystream, ctrblk); | ||
| 279 | crypto_xor(keystream, src, nbytes); | ||
| 280 | memcpy(dst, keystream, nbytes); | ||
| 281 | |||
| 282 | crypto_inc(ctrblk, TF_BLOCK_SIZE); | ||
| 283 | } | ||
| 284 | |||
| 285 | static unsigned int __ctr_crypt(struct blkcipher_desc *desc, | ||
| 286 | struct blkcipher_walk *walk) | ||
| 287 | { | ||
| 288 | struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 289 | unsigned int bsize = TF_BLOCK_SIZE; | ||
| 290 | unsigned int nbytes = walk->nbytes; | ||
| 291 | u128 *src = (u128 *)walk->src.virt.addr; | ||
| 292 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
| 293 | u128 ctrblk; | ||
| 294 | be128 ctrblocks[3]; | ||
| 295 | |||
| 296 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
| 297 | |||
| 298 | /* Process three block batch */ | ||
| 299 | if (nbytes >= bsize * 3) { | ||
| 300 | do { | ||
| 301 | if (dst != src) { | ||
| 302 | dst[0] = src[0]; | ||
| 303 | dst[1] = src[1]; | ||
| 304 | dst[2] = src[2]; | ||
| 305 | } | ||
| 306 | |||
| 307 | /* create ctrblks for parallel encrypt */ | ||
| 308 | u128_to_be128(&ctrblocks[0], &ctrblk); | ||
| 309 | u128_inc(&ctrblk); | ||
| 310 | u128_to_be128(&ctrblocks[1], &ctrblk); | ||
| 311 | u128_inc(&ctrblk); | ||
| 312 | u128_to_be128(&ctrblocks[2], &ctrblk); | ||
| 313 | u128_inc(&ctrblk); | ||
| 314 | |||
| 315 | twofish_enc_blk_xor_3way(ctx, (u8 *)dst, | ||
| 316 | (u8 *)ctrblocks); | ||
| 317 | |||
| 318 | src += 3; | ||
| 319 | dst += 3; | ||
| 320 | nbytes -= bsize * 3; | ||
| 321 | } while (nbytes >= bsize * 3); | ||
| 322 | |||
| 323 | if (nbytes < bsize) | ||
| 324 | goto done; | ||
| 325 | } | ||
| 326 | |||
| 327 | /* Handle leftovers */ | ||
| 328 | do { | ||
| 329 | if (dst != src) | ||
| 330 | *dst = *src; | ||
| 331 | |||
| 332 | u128_to_be128(&ctrblocks[0], &ctrblk); | ||
| 333 | u128_inc(&ctrblk); | ||
| 334 | |||
| 335 | twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); | ||
| 336 | u128_xor(dst, dst, (u128 *)ctrblocks); | ||
| 337 | |||
| 338 | src += 1; | ||
| 339 | dst += 1; | ||
| 340 | nbytes -= bsize; | ||
| 341 | } while (nbytes >= bsize); | ||
| 342 | |||
| 343 | done: | ||
| 344 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
| 345 | return nbytes; | ||
| 346 | } | 178 | } |
| 347 | 179 | ||
| 348 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 180 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 349 | struct scatterlist *src, unsigned int nbytes) | 181 | struct scatterlist *src, unsigned int nbytes) |
| 350 | { | 182 | { |
| 351 | struct blkcipher_walk walk; | 183 | return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes); |
| 352 | int err; | ||
| 353 | |||
| 354 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 355 | err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE); | ||
| 356 | |||
| 357 | while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) { | ||
| 358 | nbytes = __ctr_crypt(desc, &walk); | ||
| 359 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 360 | } | ||
| 361 | |||
| 362 | if (walk.nbytes) { | ||
| 363 | ctr_crypt_final(desc, &walk); | ||
| 364 | err = blkcipher_walk_done(desc, &walk, 0); | ||
| 365 | } | ||
| 366 | |||
| 367 | return err; | ||
| 368 | } | 184 | } |
| 369 | 185 | ||
| 370 | static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) | 186 | static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) |
| @@ -397,13 +213,8 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) | |||
| 397 | twofish_dec_blk(ctx, srcdst, srcdst); | 213 | twofish_dec_blk(ctx, srcdst, srcdst); |
| 398 | } | 214 | } |
| 399 | 215 | ||
| 400 | struct twofish_lrw_ctx { | 216 | int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, |
| 401 | struct lrw_table_ctx lrw_table; | 217 | unsigned int keylen) |
| 402 | struct twofish_ctx twofish_ctx; | ||
| 403 | }; | ||
| 404 | |||
| 405 | static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 406 | unsigned int keylen) | ||
| 407 | { | 218 | { |
| 408 | struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm); | 219 | struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm); |
| 409 | int err; | 220 | int err; |
| @@ -415,6 +226,7 @@ static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
| 415 | 226 | ||
| 416 | return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE); | 227 | return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE); |
| 417 | } | 228 | } |
| 229 | EXPORT_SYMBOL_GPL(lrw_twofish_setkey); | ||
| 418 | 230 | ||
| 419 | static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 231 | static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 420 | struct scatterlist *src, unsigned int nbytes) | 232 | struct scatterlist *src, unsigned int nbytes) |
| @@ -450,20 +262,16 @@ static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
| 450 | return lrw_crypt(desc, dst, src, nbytes, &req); | 262 | return lrw_crypt(desc, dst, src, nbytes, &req); |
| 451 | } | 263 | } |
| 452 | 264 | ||
| 453 | static void lrw_exit_tfm(struct crypto_tfm *tfm) | 265 | void lrw_twofish_exit_tfm(struct crypto_tfm *tfm) |
| 454 | { | 266 | { |
| 455 | struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm); | 267 | struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm); |
| 456 | 268 | ||
| 457 | lrw_free_table(&ctx->lrw_table); | 269 | lrw_free_table(&ctx->lrw_table); |
| 458 | } | 270 | } |
| 271 | EXPORT_SYMBOL_GPL(lrw_twofish_exit_tfm); | ||
| 459 | 272 | ||
| 460 | struct twofish_xts_ctx { | 273 | int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, |
| 461 | struct twofish_ctx tweak_ctx; | 274 | unsigned int keylen) |
| 462 | struct twofish_ctx crypt_ctx; | ||
| 463 | }; | ||
| 464 | |||
| 465 | static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 466 | unsigned int keylen) | ||
| 467 | { | 275 | { |
| 468 | struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm); | 276 | struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm); |
| 469 | u32 *flags = &tfm->crt_flags; | 277 | u32 *flags = &tfm->crt_flags; |
| @@ -486,6 +294,7 @@ static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
| 486 | return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, | 294 | return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, |
| 487 | flags); | 295 | flags); |
| 488 | } | 296 | } |
| 297 | EXPORT_SYMBOL_GPL(xts_twofish_setkey); | ||
| 489 | 298 | ||
| 490 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 299 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 491 | struct scatterlist *src, unsigned int nbytes) | 300 | struct scatterlist *src, unsigned int nbytes) |
| @@ -596,7 +405,7 @@ static struct crypto_alg tf_algs[5] = { { | |||
| 596 | .cra_type = &crypto_blkcipher_type, | 405 | .cra_type = &crypto_blkcipher_type, |
| 597 | .cra_module = THIS_MODULE, | 406 | .cra_module = THIS_MODULE, |
| 598 | .cra_list = LIST_HEAD_INIT(tf_algs[3].cra_list), | 407 | .cra_list = LIST_HEAD_INIT(tf_algs[3].cra_list), |
| 599 | .cra_exit = lrw_exit_tfm, | 408 | .cra_exit = lrw_twofish_exit_tfm, |
| 600 | .cra_u = { | 409 | .cra_u = { |
| 601 | .blkcipher = { | 410 | .blkcipher = { |
| 602 | .min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE, | 411 | .min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE, |
diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/arch/x86/include/asm/crypto/ablk_helper.h new file mode 100644 index 000000000000..4f93df50c23e --- /dev/null +++ b/arch/x86/include/asm/crypto/ablk_helper.h | |||
| @@ -0,0 +1,31 @@ | |||
| 1 | /* | ||
| 2 | * Shared async block cipher helpers | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef _CRYPTO_ABLK_HELPER_H | ||
| 6 | #define _CRYPTO_ABLK_HELPER_H | ||
| 7 | |||
| 8 | #include <linux/crypto.h> | ||
| 9 | #include <linux/kernel.h> | ||
| 10 | #include <crypto/cryptd.h> | ||
| 11 | |||
| 12 | struct async_helper_ctx { | ||
| 13 | struct cryptd_ablkcipher *cryptd_tfm; | ||
| 14 | }; | ||
| 15 | |||
| 16 | extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 17 | unsigned int key_len); | ||
| 18 | |||
| 19 | extern int __ablk_encrypt(struct ablkcipher_request *req); | ||
| 20 | |||
| 21 | extern int ablk_encrypt(struct ablkcipher_request *req); | ||
| 22 | |||
| 23 | extern int ablk_decrypt(struct ablkcipher_request *req); | ||
| 24 | |||
| 25 | extern void ablk_exit(struct crypto_tfm *tfm); | ||
| 26 | |||
| 27 | extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name); | ||
| 28 | |||
| 29 | extern int ablk_init(struct crypto_tfm *tfm); | ||
| 30 | |||
| 31 | #endif /* _CRYPTO_ABLK_HELPER_H */ | ||
diff --git a/arch/x86/include/asm/aes.h b/arch/x86/include/asm/crypto/aes.h index 80545a1cbe39..80545a1cbe39 100644 --- a/arch/x86/include/asm/aes.h +++ b/arch/x86/include/asm/crypto/aes.h | |||
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h new file mode 100644 index 000000000000..3e408bddc96f --- /dev/null +++ b/arch/x86/include/asm/crypto/glue_helper.h | |||
| @@ -0,0 +1,115 @@ | |||
| 1 | /* | ||
| 2 | * Shared glue code for 128bit block ciphers | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef _CRYPTO_GLUE_HELPER_H | ||
| 6 | #define _CRYPTO_GLUE_HELPER_H | ||
| 7 | |||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/crypto.h> | ||
| 10 | #include <asm/i387.h> | ||
| 11 | #include <crypto/b128ops.h> | ||
| 12 | |||
| 13 | typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); | ||
| 14 | typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src); | ||
| 15 | typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src, | ||
| 16 | u128 *iv); | ||
| 17 | |||
| 18 | #define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn)) | ||
| 19 | #define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn)) | ||
| 20 | #define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn)) | ||
| 21 | |||
| 22 | struct common_glue_func_entry { | ||
| 23 | unsigned int num_blocks; /* number of blocks that @fn will process */ | ||
| 24 | union { | ||
| 25 | common_glue_func_t ecb; | ||
| 26 | common_glue_cbc_func_t cbc; | ||
| 27 | common_glue_ctr_func_t ctr; | ||
| 28 | } fn_u; | ||
| 29 | }; | ||
| 30 | |||
| 31 | struct common_glue_ctx { | ||
| 32 | unsigned int num_funcs; | ||
| 33 | int fpu_blocks_limit; /* -1 means fpu not needed at all */ | ||
| 34 | |||
| 35 | /* | ||
| 36 | * First funcs entry must have largest num_blocks and last funcs entry | ||
| 37 | * must have num_blocks == 1! | ||
| 38 | */ | ||
| 39 | struct common_glue_func_entry funcs[]; | ||
| 40 | }; | ||
| 41 | |||
| 42 | static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, | ||
| 43 | struct blkcipher_desc *desc, | ||
| 44 | bool fpu_enabled, unsigned int nbytes) | ||
| 45 | { | ||
| 46 | if (likely(fpu_blocks_limit < 0)) | ||
| 47 | return false; | ||
| 48 | |||
| 49 | if (fpu_enabled) | ||
| 50 | return true; | ||
| 51 | |||
| 52 | /* | ||
| 53 | * Vector-registers are only used when chunk to be processed is large | ||
| 54 | * enough, so do not enable FPU until it is necessary. | ||
| 55 | */ | ||
| 56 | if (nbytes < bsize * (unsigned int)fpu_blocks_limit) | ||
| 57 | return false; | ||
| 58 | |||
| 59 | if (desc) { | ||
| 60 | /* prevent sleeping if FPU is in use */ | ||
| 61 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 62 | } | ||
| 63 | |||
| 64 | kernel_fpu_begin(); | ||
| 65 | return true; | ||
| 66 | } | ||
| 67 | |||
| 68 | static inline void glue_fpu_end(bool fpu_enabled) | ||
| 69 | { | ||
| 70 | if (fpu_enabled) | ||
| 71 | kernel_fpu_end(); | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline void u128_to_be128(be128 *dst, const u128 *src) | ||
| 75 | { | ||
| 76 | dst->a = cpu_to_be64(src->a); | ||
| 77 | dst->b = cpu_to_be64(src->b); | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline void be128_to_u128(u128 *dst, const be128 *src) | ||
| 81 | { | ||
| 82 | dst->a = be64_to_cpu(src->a); | ||
| 83 | dst->b = be64_to_cpu(src->b); | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void u128_inc(u128 *i) | ||
| 87 | { | ||
| 88 | i->b++; | ||
| 89 | if (!i->b) | ||
| 90 | i->a++; | ||
| 91 | } | ||
| 92 | |||
| 93 | extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | ||
| 94 | struct blkcipher_desc *desc, | ||
| 95 | struct scatterlist *dst, | ||
| 96 | struct scatterlist *src, unsigned int nbytes); | ||
| 97 | |||
| 98 | extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn, | ||
| 99 | struct blkcipher_desc *desc, | ||
| 100 | struct scatterlist *dst, | ||
| 101 | struct scatterlist *src, | ||
| 102 | unsigned int nbytes); | ||
| 103 | |||
| 104 | extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | ||
| 105 | struct blkcipher_desc *desc, | ||
| 106 | struct scatterlist *dst, | ||
| 107 | struct scatterlist *src, | ||
| 108 | unsigned int nbytes); | ||
| 109 | |||
| 110 | extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, | ||
| 111 | struct blkcipher_desc *desc, | ||
| 112 | struct scatterlist *dst, | ||
| 113 | struct scatterlist *src, unsigned int nbytes); | ||
| 114 | |||
| 115 | #endif /* _CRYPTO_GLUE_HELPER_H */ | ||
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h new file mode 100644 index 000000000000..432deedd2945 --- /dev/null +++ b/arch/x86/include/asm/crypto/serpent-avx.h | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | #ifndef ASM_X86_SERPENT_AVX_H | ||
| 2 | #define ASM_X86_SERPENT_AVX_H | ||
| 3 | |||
| 4 | #include <linux/crypto.h> | ||
| 5 | #include <crypto/serpent.h> | ||
| 6 | |||
| 7 | #define SERPENT_PARALLEL_BLOCKS 8 | ||
| 8 | |||
| 9 | asmlinkage void __serpent_enc_blk_8way_avx(struct serpent_ctx *ctx, u8 *dst, | ||
| 10 | const u8 *src, bool xor); | ||
| 11 | asmlinkage void serpent_dec_blk_8way_avx(struct serpent_ctx *ctx, u8 *dst, | ||
| 12 | const u8 *src); | ||
| 13 | |||
| 14 | static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, | ||
| 15 | const u8 *src) | ||
| 16 | { | ||
| 17 | __serpent_enc_blk_8way_avx(ctx, dst, src, false); | ||
| 18 | } | ||
| 19 | |||
| 20 | static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, | ||
| 21 | const u8 *src) | ||
| 22 | { | ||
| 23 | __serpent_enc_blk_8way_avx(ctx, dst, src, true); | ||
| 24 | } | ||
| 25 | |||
| 26 | static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, | ||
| 27 | const u8 *src) | ||
| 28 | { | ||
| 29 | serpent_dec_blk_8way_avx(ctx, dst, src); | ||
| 30 | } | ||
| 31 | |||
| 32 | #endif | ||
diff --git a/arch/x86/include/asm/serpent.h b/arch/x86/include/asm/crypto/serpent-sse2.h index d3ef63fe0c81..e6e77dffbdab 100644 --- a/arch/x86/include/asm/serpent.h +++ b/arch/x86/include/asm/crypto/serpent-sse2.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | #ifndef ASM_X86_SERPENT_H | 1 | #ifndef ASM_X86_SERPENT_SSE2_H |
| 2 | #define ASM_X86_SERPENT_H | 2 | #define ASM_X86_SERPENT_SSE2_H |
| 3 | 3 | ||
| 4 | #include <linux/crypto.h> | 4 | #include <linux/crypto.h> |
| 5 | #include <crypto/serpent.h> | 5 | #include <crypto/serpent.h> |
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h new file mode 100644 index 000000000000..9d2c514bd5f9 --- /dev/null +++ b/arch/x86/include/asm/crypto/twofish.h | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | #ifndef ASM_X86_TWOFISH_H | ||
| 2 | #define ASM_X86_TWOFISH_H | ||
| 3 | |||
| 4 | #include <linux/crypto.h> | ||
| 5 | #include <crypto/twofish.h> | ||
| 6 | #include <crypto/lrw.h> | ||
| 7 | #include <crypto/b128ops.h> | ||
| 8 | |||
| 9 | struct twofish_lrw_ctx { | ||
| 10 | struct lrw_table_ctx lrw_table; | ||
| 11 | struct twofish_ctx twofish_ctx; | ||
| 12 | }; | ||
| 13 | |||
| 14 | struct twofish_xts_ctx { | ||
| 15 | struct twofish_ctx tweak_ctx; | ||
| 16 | struct twofish_ctx crypt_ctx; | ||
| 17 | }; | ||
| 18 | |||
| 19 | /* regular block cipher functions from twofish_x86_64 module */ | ||
| 20 | asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, | ||
| 21 | const u8 *src); | ||
| 22 | asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, | ||
| 23 | const u8 *src); | ||
| 24 | |||
| 25 | /* 3-way parallel cipher functions */ | ||
| 26 | asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, | ||
| 27 | const u8 *src, bool xor); | ||
| 28 | asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, | ||
| 29 | const u8 *src); | ||
| 30 | |||
| 31 | /* helpers from twofish_x86_64-3way module */ | ||
| 32 | extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); | ||
| 33 | extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, | ||
| 34 | u128 *iv); | ||
| 35 | extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, | ||
| 36 | u128 *iv); | ||
| 37 | |||
| 38 | extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 39 | unsigned int keylen); | ||
| 40 | |||
| 41 | extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm); | ||
| 42 | |||
| 43 | extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 44 | unsigned int keylen); | ||
| 45 | |||
| 46 | #endif /* ASM_X86_TWOFISH_H */ | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index 8e84225c096b..a3238051b03e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -174,6 +174,16 @@ config CRYPTO_TEST | |||
| 174 | help | 174 | help |
| 175 | Quick & dirty crypto test module. | 175 | Quick & dirty crypto test module. |
| 176 | 176 | ||
| 177 | config CRYPTO_ABLK_HELPER_X86 | ||
| 178 | tristate | ||
| 179 | depends on X86 | ||
| 180 | select CRYPTO_CRYPTD | ||
| 181 | |||
| 182 | config CRYPTO_GLUE_HELPER_X86 | ||
| 183 | tristate | ||
| 184 | depends on X86 | ||
| 185 | select CRYPTO_ALGAPI | ||
| 186 | |||
| 177 | comment "Authenticated Encryption with Associated Data" | 187 | comment "Authenticated Encryption with Associated Data" |
| 178 | 188 | ||
| 179 | config CRYPTO_CCM | 189 | config CRYPTO_CCM |
| @@ -552,6 +562,7 @@ config CRYPTO_AES_NI_INTEL | |||
| 552 | select CRYPTO_AES_X86_64 if 64BIT | 562 | select CRYPTO_AES_X86_64 if 64BIT |
| 553 | select CRYPTO_AES_586 if !64BIT | 563 | select CRYPTO_AES_586 if !64BIT |
| 554 | select CRYPTO_CRYPTD | 564 | select CRYPTO_CRYPTD |
| 565 | select CRYPTO_ABLK_HELPER_X86 | ||
| 555 | select CRYPTO_ALGAPI | 566 | select CRYPTO_ALGAPI |
| 556 | help | 567 | help |
| 557 | Use Intel AES-NI instructions for AES algorithm. | 568 | Use Intel AES-NI instructions for AES algorithm. |
| @@ -593,7 +604,7 @@ config CRYPTO_ANUBIS | |||
| 593 | 604 | ||
| 594 | config CRYPTO_ARC4 | 605 | config CRYPTO_ARC4 |
| 595 | tristate "ARC4 cipher algorithm" | 606 | tristate "ARC4 cipher algorithm" |
| 596 | select CRYPTO_ALGAPI | 607 | select CRYPTO_BLKCIPHER |
| 597 | help | 608 | help |
| 598 | ARC4 cipher algorithm. | 609 | ARC4 cipher algorithm. |
| 599 | 610 | ||
| @@ -660,6 +671,7 @@ config CRYPTO_CAMELLIA_X86_64 | |||
| 660 | depends on X86 && 64BIT | 671 | depends on X86 && 64BIT |
| 661 | depends on CRYPTO | 672 | depends on CRYPTO |
| 662 | select CRYPTO_ALGAPI | 673 | select CRYPTO_ALGAPI |
| 674 | select CRYPTO_GLUE_HELPER_X86 | ||
| 663 | select CRYPTO_LRW | 675 | select CRYPTO_LRW |
| 664 | select CRYPTO_XTS | 676 | select CRYPTO_XTS |
| 665 | help | 677 | help |
| @@ -786,6 +798,8 @@ config CRYPTO_SERPENT_SSE2_X86_64 | |||
| 786 | depends on X86 && 64BIT | 798 | depends on X86 && 64BIT |
| 787 | select CRYPTO_ALGAPI | 799 | select CRYPTO_ALGAPI |
| 788 | select CRYPTO_CRYPTD | 800 | select CRYPTO_CRYPTD |
| 801 | select CRYPTO_ABLK_HELPER_X86 | ||
| 802 | select CRYPTO_GLUE_HELPER_X86 | ||
| 789 | select CRYPTO_SERPENT | 803 | select CRYPTO_SERPENT |
| 790 | select CRYPTO_LRW | 804 | select CRYPTO_LRW |
| 791 | select CRYPTO_XTS | 805 | select CRYPTO_XTS |
| @@ -806,6 +820,8 @@ config CRYPTO_SERPENT_SSE2_586 | |||
| 806 | depends on X86 && !64BIT | 820 | depends on X86 && !64BIT |
| 807 | select CRYPTO_ALGAPI | 821 | select CRYPTO_ALGAPI |
| 808 | select CRYPTO_CRYPTD | 822 | select CRYPTO_CRYPTD |
| 823 | select CRYPTO_ABLK_HELPER_X86 | ||
| 824 | select CRYPTO_GLUE_HELPER_X86 | ||
| 809 | select CRYPTO_SERPENT | 825 | select CRYPTO_SERPENT |
| 810 | select CRYPTO_LRW | 826 | select CRYPTO_LRW |
| 811 | select CRYPTO_XTS | 827 | select CRYPTO_XTS |
| @@ -821,6 +837,28 @@ config CRYPTO_SERPENT_SSE2_586 | |||
| 821 | See also: | 837 | See also: |
| 822 | <http://www.cl.cam.ac.uk/~rja14/serpent.html> | 838 | <http://www.cl.cam.ac.uk/~rja14/serpent.html> |
| 823 | 839 | ||
| 840 | config CRYPTO_SERPENT_AVX_X86_64 | ||
| 841 | tristate "Serpent cipher algorithm (x86_64/AVX)" | ||
| 842 | depends on X86 && 64BIT | ||
| 843 | select CRYPTO_ALGAPI | ||
| 844 | select CRYPTO_CRYPTD | ||
| 845 | select CRYPTO_ABLK_HELPER_X86 | ||
| 846 | select CRYPTO_GLUE_HELPER_X86 | ||
| 847 | select CRYPTO_SERPENT | ||
| 848 | select CRYPTO_LRW | ||
| 849 | select CRYPTO_XTS | ||
| 850 | help | ||
| 851 | Serpent cipher algorithm, by Anderson, Biham & Knudsen. | ||
| 852 | |||
| 853 | Keys are allowed to be from 0 to 256 bits in length, in steps | ||
| 854 | of 8 bits. | ||
| 855 | |||
| 856 | This module provides the Serpent cipher algorithm that processes | ||
| 857 | eight blocks parallel using the AVX instruction set. | ||
| 858 | |||
| 859 | See also: | ||
| 860 | <http://www.cl.cam.ac.uk/~rja14/serpent.html> | ||
| 861 | |||
| 824 | config CRYPTO_TEA | 862 | config CRYPTO_TEA |
| 825 | tristate "TEA, XTEA and XETA cipher algorithms" | 863 | tristate "TEA, XTEA and XETA cipher algorithms" |
| 826 | select CRYPTO_ALGAPI | 864 | select CRYPTO_ALGAPI |
| @@ -897,6 +935,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY | |||
| 897 | select CRYPTO_ALGAPI | 935 | select CRYPTO_ALGAPI |
| 898 | select CRYPTO_TWOFISH_COMMON | 936 | select CRYPTO_TWOFISH_COMMON |
| 899 | select CRYPTO_TWOFISH_X86_64 | 937 | select CRYPTO_TWOFISH_X86_64 |
| 938 | select CRYPTO_GLUE_HELPER_X86 | ||
| 900 | select CRYPTO_LRW | 939 | select CRYPTO_LRW |
| 901 | select CRYPTO_XTS | 940 | select CRYPTO_XTS |
| 902 | help | 941 | help |
| @@ -913,6 +952,32 @@ config CRYPTO_TWOFISH_X86_64_3WAY | |||
| 913 | See also: | 952 | See also: |
| 914 | <http://www.schneier.com/twofish.html> | 953 | <http://www.schneier.com/twofish.html> |
| 915 | 954 | ||
| 955 | config CRYPTO_TWOFISH_AVX_X86_64 | ||
| 956 | tristate "Twofish cipher algorithm (x86_64/AVX)" | ||
| 957 | depends on X86 && 64BIT | ||
| 958 | select CRYPTO_ALGAPI | ||
| 959 | select CRYPTO_CRYPTD | ||
| 960 | select CRYPTO_ABLK_HELPER_X86 | ||
| 961 | select CRYPTO_GLUE_HELPER_X86 | ||
| 962 | select CRYPTO_TWOFISH_COMMON | ||
| 963 | select CRYPTO_TWOFISH_X86_64 | ||
| 964 | select CRYPTO_TWOFISH_X86_64_3WAY | ||
| 965 | select CRYPTO_LRW | ||
| 966 | select CRYPTO_XTS | ||
| 967 | help | ||
| 968 | Twofish cipher algorithm (x86_64/AVX). | ||
| 969 | |||
| 970 | Twofish was submitted as an AES (Advanced Encryption Standard) | ||
| 971 | candidate cipher by researchers at CounterPane Systems. It is a | ||
| 972 | 16 round block cipher supporting key sizes of 128, 192, and 256 | ||
| 973 | bits. | ||
| 974 | |||
| 975 | This module provides the Twofish cipher algorithm that processes | ||
| 976 | eight blocks parallel using the AVX Instruction Set. | ||
| 977 | |||
| 978 | See also: | ||
| 979 | <http://www.schneier.com/twofish.html> | ||
| 980 | |||
| 916 | comment "Compression" | 981 | comment "Compression" |
| 917 | 982 | ||
| 918 | config CRYPTO_DEFLATE | 983 | config CRYPTO_DEFLATE |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 056571b85445..c3b9bfeeb7ff 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
| @@ -24,22 +24,6 @@ | |||
| 24 | 24 | ||
| 25 | static LIST_HEAD(crypto_template_list); | 25 | static LIST_HEAD(crypto_template_list); |
| 26 | 26 | ||
| 27 | void crypto_larval_error(const char *name, u32 type, u32 mask) | ||
| 28 | { | ||
| 29 | struct crypto_alg *alg; | ||
| 30 | |||
| 31 | alg = crypto_alg_lookup(name, type, mask); | ||
| 32 | |||
| 33 | if (alg) { | ||
| 34 | if (crypto_is_larval(alg)) { | ||
| 35 | struct crypto_larval *larval = (void *)alg; | ||
| 36 | complete_all(&larval->completion); | ||
| 37 | } | ||
| 38 | crypto_mod_put(alg); | ||
| 39 | } | ||
| 40 | } | ||
| 41 | EXPORT_SYMBOL_GPL(crypto_larval_error); | ||
| 42 | |||
| 43 | static inline int crypto_set_driver_name(struct crypto_alg *alg) | 27 | static inline int crypto_set_driver_name(struct crypto_alg *alg) |
| 44 | { | 28 | { |
| 45 | static const char suffix[] = "-generic"; | 29 | static const char suffix[] = "-generic"; |
| @@ -295,7 +279,6 @@ found: | |||
| 295 | continue; | 279 | continue; |
| 296 | 280 | ||
| 297 | larval->adult = alg; | 281 | larval->adult = alg; |
| 298 | complete_all(&larval->completion); | ||
| 299 | continue; | 282 | continue; |
| 300 | } | 283 | } |
| 301 | 284 | ||
diff --git a/crypto/algboss.c b/crypto/algboss.c index 791d194958fa..769219b29309 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <crypto/internal/aead.h> | 13 | #include <crypto/internal/aead.h> |
| 14 | #include <linux/completion.h> | ||
| 14 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
| 15 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| @@ -47,6 +48,8 @@ struct cryptomgr_param { | |||
| 47 | char larval[CRYPTO_MAX_ALG_NAME]; | 48 | char larval[CRYPTO_MAX_ALG_NAME]; |
| 48 | char template[CRYPTO_MAX_ALG_NAME]; | 49 | char template[CRYPTO_MAX_ALG_NAME]; |
| 49 | 50 | ||
| 51 | struct completion *completion; | ||
| 52 | |||
| 50 | u32 otype; | 53 | u32 otype; |
| 51 | u32 omask; | 54 | u32 omask; |
| 52 | }; | 55 | }; |
| @@ -66,7 +69,7 @@ static int cryptomgr_probe(void *data) | |||
| 66 | 69 | ||
| 67 | tmpl = crypto_lookup_template(param->template); | 70 | tmpl = crypto_lookup_template(param->template); |
| 68 | if (!tmpl) | 71 | if (!tmpl) |
| 69 | goto err; | 72 | goto out; |
| 70 | 73 | ||
| 71 | do { | 74 | do { |
| 72 | if (tmpl->create) { | 75 | if (tmpl->create) { |
| @@ -83,16 +86,10 @@ static int cryptomgr_probe(void *data) | |||
| 83 | 86 | ||
| 84 | crypto_tmpl_put(tmpl); | 87 | crypto_tmpl_put(tmpl); |
| 85 | 88 | ||
| 86 | if (err) | ||
| 87 | goto err; | ||
| 88 | |||
| 89 | out: | 89 | out: |
| 90 | complete_all(param->completion); | ||
| 90 | kfree(param); | 91 | kfree(param); |
| 91 | module_put_and_exit(0); | 92 | module_put_and_exit(0); |
| 92 | |||
| 93 | err: | ||
| 94 | crypto_larval_error(param->larval, param->otype, param->omask); | ||
| 95 | goto out; | ||
| 96 | } | 93 | } |
| 97 | 94 | ||
| 98 | static int cryptomgr_schedule_probe(struct crypto_larval *larval) | 95 | static int cryptomgr_schedule_probe(struct crypto_larval *larval) |
| @@ -192,10 +189,14 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) | |||
| 192 | 189 | ||
| 193 | memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); | 190 | memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); |
| 194 | 191 | ||
| 192 | param->completion = &larval->completion; | ||
| 193 | |||
| 195 | thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe"); | 194 | thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe"); |
| 196 | if (IS_ERR(thread)) | 195 | if (IS_ERR(thread)) |
| 197 | goto err_free_param; | 196 | goto err_free_param; |
| 198 | 197 | ||
| 198 | wait_for_completion_interruptible(&larval->completion); | ||
| 199 | |||
| 199 | return NOTIFY_STOP; | 200 | return NOTIFY_STOP; |
| 200 | 201 | ||
| 201 | err_free_param: | 202 | err_free_param: |
diff --git a/crypto/arc4.c b/crypto/arc4.c index 0d12a96da1d8..5a772c3657d5 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c | |||
| @@ -11,17 +11,19 @@ | |||
| 11 | * (at your option) any later version. | 11 | * (at your option) any later version. |
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | |||
| 14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 16 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
| 18 | #include <crypto/algapi.h> | ||
| 17 | 19 | ||
| 18 | #define ARC4_MIN_KEY_SIZE 1 | 20 | #define ARC4_MIN_KEY_SIZE 1 |
| 19 | #define ARC4_MAX_KEY_SIZE 256 | 21 | #define ARC4_MAX_KEY_SIZE 256 |
| 20 | #define ARC4_BLOCK_SIZE 1 | 22 | #define ARC4_BLOCK_SIZE 1 |
| 21 | 23 | ||
| 22 | struct arc4_ctx { | 24 | struct arc4_ctx { |
| 23 | u8 S[256]; | 25 | u32 S[256]; |
| 24 | u8 x, y; | 26 | u32 x, y; |
| 25 | }; | 27 | }; |
| 26 | 28 | ||
| 27 | static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 29 | static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| @@ -37,7 +39,7 @@ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
| 37 | ctx->S[i] = i; | 39 | ctx->S[i] = i; |
| 38 | 40 | ||
| 39 | for (i = 0; i < 256; i++) { | 41 | for (i = 0; i < 256; i++) { |
| 40 | u8 a = ctx->S[i]; | 42 | u32 a = ctx->S[i]; |
| 41 | j = (j + in_key[k] + a) & 0xff; | 43 | j = (j + in_key[k] + a) & 0xff; |
| 42 | ctx->S[i] = ctx->S[j]; | 44 | ctx->S[i] = ctx->S[j]; |
| 43 | ctx->S[j] = a; | 45 | ctx->S[j] = a; |
| @@ -48,51 +50,114 @@ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
| 48 | return 0; | 50 | return 0; |
| 49 | } | 51 | } |
| 50 | 52 | ||
| 51 | static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 53 | static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, |
| 54 | unsigned int len) | ||
| 52 | { | 55 | { |
| 53 | struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); | 56 | u32 *const S = ctx->S; |
| 57 | u32 x, y, a, b; | ||
| 58 | u32 ty, ta, tb; | ||
| 59 | |||
| 60 | if (len == 0) | ||
| 61 | return; | ||
| 54 | 62 | ||
| 55 | u8 *const S = ctx->S; | 63 | x = ctx->x; |
| 56 | u8 x = ctx->x; | 64 | y = ctx->y; |
| 57 | u8 y = ctx->y; | ||
| 58 | u8 a, b; | ||
| 59 | 65 | ||
| 60 | a = S[x]; | 66 | a = S[x]; |
| 61 | y = (y + a) & 0xff; | 67 | y = (y + a) & 0xff; |
| 62 | b = S[y]; | 68 | b = S[y]; |
| 63 | S[x] = b; | 69 | |
| 64 | S[y] = a; | 70 | do { |
| 65 | x = (x + 1) & 0xff; | 71 | S[y] = a; |
| 66 | *out++ = *in ^ S[(a + b) & 0xff]; | 72 | a = (a + b) & 0xff; |
| 73 | S[x] = b; | ||
| 74 | x = (x + 1) & 0xff; | ||
| 75 | ta = S[x]; | ||
| 76 | ty = (y + ta) & 0xff; | ||
| 77 | tb = S[ty]; | ||
| 78 | *out++ = *in++ ^ S[a]; | ||
| 79 | if (--len == 0) | ||
| 80 | break; | ||
| 81 | y = ty; | ||
| 82 | a = ta; | ||
| 83 | b = tb; | ||
| 84 | } while (true); | ||
| 67 | 85 | ||
| 68 | ctx->x = x; | 86 | ctx->x = x; |
| 69 | ctx->y = y; | 87 | ctx->y = y; |
| 70 | } | 88 | } |
| 71 | 89 | ||
| 72 | static struct crypto_alg arc4_alg = { | 90 | static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
| 91 | { | ||
| 92 | arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1); | ||
| 93 | } | ||
| 94 | |||
| 95 | static int ecb_arc4_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 96 | struct scatterlist *src, unsigned int nbytes) | ||
| 97 | { | ||
| 98 | struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 99 | struct blkcipher_walk walk; | ||
| 100 | int err; | ||
| 101 | |||
| 102 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 103 | |||
| 104 | err = blkcipher_walk_virt(desc, &walk); | ||
| 105 | |||
| 106 | while (walk.nbytes > 0) { | ||
| 107 | u8 *wsrc = walk.src.virt.addr; | ||
| 108 | u8 *wdst = walk.dst.virt.addr; | ||
| 109 | |||
| 110 | arc4_crypt(ctx, wdst, wsrc, walk.nbytes); | ||
| 111 | |||
| 112 | err = blkcipher_walk_done(desc, &walk, 0); | ||
| 113 | } | ||
| 114 | |||
| 115 | return err; | ||
| 116 | } | ||
| 117 | |||
| 118 | static struct crypto_alg arc4_algs[2] = { { | ||
| 73 | .cra_name = "arc4", | 119 | .cra_name = "arc4", |
| 74 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 120 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
| 75 | .cra_blocksize = ARC4_BLOCK_SIZE, | 121 | .cra_blocksize = ARC4_BLOCK_SIZE, |
| 76 | .cra_ctxsize = sizeof(struct arc4_ctx), | 122 | .cra_ctxsize = sizeof(struct arc4_ctx), |
| 77 | .cra_module = THIS_MODULE, | 123 | .cra_module = THIS_MODULE, |
| 78 | .cra_list = LIST_HEAD_INIT(arc4_alg.cra_list), | 124 | .cra_u = { |
| 79 | .cra_u = { .cipher = { | 125 | .cipher = { |
| 80 | .cia_min_keysize = ARC4_MIN_KEY_SIZE, | 126 | .cia_min_keysize = ARC4_MIN_KEY_SIZE, |
| 81 | .cia_max_keysize = ARC4_MAX_KEY_SIZE, | 127 | .cia_max_keysize = ARC4_MAX_KEY_SIZE, |
| 82 | .cia_setkey = arc4_set_key, | 128 | .cia_setkey = arc4_set_key, |
| 83 | .cia_encrypt = arc4_crypt, | 129 | .cia_encrypt = arc4_crypt_one, |
| 84 | .cia_decrypt = arc4_crypt } } | 130 | .cia_decrypt = arc4_crypt_one, |
| 85 | }; | 131 | }, |
| 132 | }, | ||
| 133 | }, { | ||
| 134 | .cra_name = "ecb(arc4)", | ||
| 135 | .cra_priority = 100, | ||
| 136 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 137 | .cra_blocksize = ARC4_BLOCK_SIZE, | ||
| 138 | .cra_ctxsize = sizeof(struct arc4_ctx), | ||
| 139 | .cra_alignmask = 0, | ||
| 140 | .cra_type = &crypto_blkcipher_type, | ||
| 141 | .cra_module = THIS_MODULE, | ||
| 142 | .cra_u = { | ||
| 143 | .blkcipher = { | ||
| 144 | .min_keysize = ARC4_MIN_KEY_SIZE, | ||
| 145 | .max_keysize = ARC4_MAX_KEY_SIZE, | ||
| 146 | .setkey = arc4_set_key, | ||
| 147 | .encrypt = ecb_arc4_crypt, | ||
| 148 | .decrypt = ecb_arc4_crypt, | ||
| 149 | }, | ||
| 150 | }, | ||
| 151 | } }; | ||
| 86 | 152 | ||
| 87 | static int __init arc4_init(void) | 153 | static int __init arc4_init(void) |
| 88 | { | 154 | { |
| 89 | return crypto_register_alg(&arc4_alg); | 155 | return crypto_register_algs(arc4_algs, ARRAY_SIZE(arc4_algs)); |
| 90 | } | 156 | } |
| 91 | 157 | ||
| 92 | |||
| 93 | static void __exit arc4_exit(void) | 158 | static void __exit arc4_exit(void) |
| 94 | { | 159 | { |
| 95 | crypto_unregister_alg(&arc4_alg); | 160 | crypto_unregister_algs(arc4_algs, ARRAY_SIZE(arc4_algs)); |
| 96 | } | 161 | } |
| 97 | 162 | ||
| 98 | module_init(arc4_init); | 163 | module_init(arc4_init); |
diff --git a/crypto/internal.h b/crypto/internal.h index b865ca1a8613..9ebedae3fb54 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
| @@ -83,7 +83,6 @@ void crypto_exit_compress_ops(struct crypto_tfm *tfm); | |||
| 83 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); | 83 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); |
| 84 | void crypto_larval_kill(struct crypto_alg *alg); | 84 | void crypto_larval_kill(struct crypto_alg *alg); |
| 85 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); | 85 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); |
| 86 | void crypto_larval_error(const char *name, u32 type, u32 mask); | ||
| 87 | void crypto_alg_tested(const char *name, int err); | 86 | void crypto_alg_tested(const char *name, int err); |
| 88 | 87 | ||
| 89 | void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, | 88 | void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 8f147bff0980..5cf2ccb1540c 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
| @@ -809,7 +809,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec, | |||
| 809 | struct cipher_speed_template *template, | 809 | struct cipher_speed_template *template, |
| 810 | unsigned int tcount, u8 *keysize) | 810 | unsigned int tcount, u8 *keysize) |
| 811 | { | 811 | { |
| 812 | unsigned int ret, i, j, iv_len; | 812 | unsigned int ret, i, j, k, iv_len; |
| 813 | struct tcrypt_result tresult; | 813 | struct tcrypt_result tresult; |
| 814 | const char *key; | 814 | const char *key; |
| 815 | char iv[128]; | 815 | char iv[128]; |
| @@ -883,11 +883,23 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec, | |||
| 883 | } | 883 | } |
| 884 | 884 | ||
| 885 | sg_init_table(sg, TVMEMSIZE); | 885 | sg_init_table(sg, TVMEMSIZE); |
| 886 | sg_set_buf(sg, tvmem[0] + *keysize, | 886 | |
| 887 | k = *keysize + *b_size; | ||
| 888 | if (k > PAGE_SIZE) { | ||
| 889 | sg_set_buf(sg, tvmem[0] + *keysize, | ||
| 887 | PAGE_SIZE - *keysize); | 890 | PAGE_SIZE - *keysize); |
| 888 | for (j = 1; j < TVMEMSIZE; j++) { | 891 | k -= PAGE_SIZE; |
| 889 | sg_set_buf(sg + j, tvmem[j], PAGE_SIZE); | 892 | j = 1; |
| 890 | memset(tvmem[j], 0xff, PAGE_SIZE); | 893 | while (k > PAGE_SIZE) { |
| 894 | sg_set_buf(sg + j, tvmem[j], PAGE_SIZE); | ||
| 895 | memset(tvmem[j], 0xff, PAGE_SIZE); | ||
| 896 | j++; | ||
| 897 | k -= PAGE_SIZE; | ||
| 898 | } | ||
| 899 | sg_set_buf(sg + j, tvmem[j], k); | ||
| 900 | memset(tvmem[j], 0xff, k); | ||
| 901 | } else { | ||
| 902 | sg_set_buf(sg, tvmem[0] + *keysize, *b_size); | ||
| 891 | } | 903 | } |
| 892 | 904 | ||
| 893 | iv_len = crypto_ablkcipher_ivsize(tfm); | 905 | iv_len = crypto_ablkcipher_ivsize(tfm); |
| @@ -1192,6 +1204,9 @@ static int do_test(int m) | |||
| 1192 | case 109: | 1204 | case 109: |
| 1193 | ret += tcrypt_test("vmac(aes)"); | 1205 | ret += tcrypt_test("vmac(aes)"); |
| 1194 | break; | 1206 | break; |
| 1207 | case 110: | ||
| 1208 | ret += tcrypt_test("hmac(crc32)"); | ||
| 1209 | break; | ||
| 1195 | 1210 | ||
| 1196 | case 150: | 1211 | case 150: |
| 1197 | ret += tcrypt_test("ansi_cprng"); | 1212 | ret += tcrypt_test("ansi_cprng"); |
| @@ -1339,6 +1354,11 @@ static int do_test(int m) | |||
| 1339 | speed_template_32_64); | 1354 | speed_template_32_64); |
| 1340 | break; | 1355 | break; |
| 1341 | 1356 | ||
| 1357 | case 208: | ||
| 1358 | test_cipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0, | ||
| 1359 | speed_template_8); | ||
| 1360 | break; | ||
| 1361 | |||
| 1342 | case 300: | 1362 | case 300: |
| 1343 | /* fall through */ | 1363 | /* fall through */ |
| 1344 | 1364 | ||
| @@ -1512,6 +1532,14 @@ static int do_test(int m) | |||
| 1512 | speed_template_16_24_32); | 1532 | speed_template_16_24_32); |
| 1513 | test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, | 1533 | test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, |
| 1514 | speed_template_16_24_32); | 1534 | speed_template_16_24_32); |
| 1535 | test_acipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, | ||
| 1536 | speed_template_16_24_32); | ||
| 1537 | test_acipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, | ||
| 1538 | speed_template_16_24_32); | ||
| 1539 | test_acipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0, | ||
| 1540 | speed_template_16_24_32); | ||
| 1541 | test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0, | ||
| 1542 | speed_template_16_24_32); | ||
| 1515 | break; | 1543 | break; |
| 1516 | 1544 | ||
| 1517 | case 501: | 1545 | case 501: |
| @@ -1527,6 +1555,18 @@ static int do_test(int m) | |||
| 1527 | test_acipher_speed("cbc(des3_ede)", DECRYPT, sec, | 1555 | test_acipher_speed("cbc(des3_ede)", DECRYPT, sec, |
| 1528 | des3_speed_template, DES3_SPEED_VECTORS, | 1556 | des3_speed_template, DES3_SPEED_VECTORS, |
| 1529 | speed_template_24); | 1557 | speed_template_24); |
| 1558 | test_acipher_speed("cfb(des3_ede)", ENCRYPT, sec, | ||
| 1559 | des3_speed_template, DES3_SPEED_VECTORS, | ||
| 1560 | speed_template_24); | ||
| 1561 | test_acipher_speed("cfb(des3_ede)", DECRYPT, sec, | ||
| 1562 | des3_speed_template, DES3_SPEED_VECTORS, | ||
| 1563 | speed_template_24); | ||
| 1564 | test_acipher_speed("ofb(des3_ede)", ENCRYPT, sec, | ||
| 1565 | des3_speed_template, DES3_SPEED_VECTORS, | ||
| 1566 | speed_template_24); | ||
| 1567 | test_acipher_speed("ofb(des3_ede)", DECRYPT, sec, | ||
| 1568 | des3_speed_template, DES3_SPEED_VECTORS, | ||
| 1569 | speed_template_24); | ||
| 1530 | break; | 1570 | break; |
| 1531 | 1571 | ||
| 1532 | case 502: | 1572 | case 502: |
| @@ -1538,6 +1578,14 @@ static int do_test(int m) | |||
| 1538 | speed_template_8); | 1578 | speed_template_8); |
| 1539 | test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, | 1579 | test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, |
| 1540 | speed_template_8); | 1580 | speed_template_8); |
| 1581 | test_acipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0, | ||
| 1582 | speed_template_8); | ||
| 1583 | test_acipher_speed("cfb(des)", DECRYPT, sec, NULL, 0, | ||
| 1584 | speed_template_8); | ||
| 1585 | test_acipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0, | ||
| 1586 | speed_template_8); | ||
| 1587 | test_acipher_speed("ofb(des)", DECRYPT, sec, NULL, 0, | ||
| 1588 | speed_template_8); | ||
| 1541 | break; | 1589 | break; |
| 1542 | 1590 | ||
| 1543 | case 503: | 1591 | case 503: |
| @@ -1563,6 +1611,34 @@ static int do_test(int m) | |||
| 1563 | speed_template_32_64); | 1611 | speed_template_32_64); |
| 1564 | break; | 1612 | break; |
| 1565 | 1613 | ||
| 1614 | case 504: | ||
| 1615 | test_acipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0, | ||
| 1616 | speed_template_16_24_32); | ||
| 1617 | test_acipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0, | ||
| 1618 | speed_template_16_24_32); | ||
| 1619 | test_acipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0, | ||
| 1620 | speed_template_16_24_32); | ||
| 1621 | test_acipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0, | ||
| 1622 | speed_template_16_24_32); | ||
| 1623 | test_acipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0, | ||
| 1624 | speed_template_16_24_32); | ||
| 1625 | test_acipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0, | ||
| 1626 | speed_template_16_24_32); | ||
| 1627 | test_acipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0, | ||
| 1628 | speed_template_32_40_48); | ||
| 1629 | test_acipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0, | ||
| 1630 | speed_template_32_40_48); | ||
| 1631 | test_acipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0, | ||
| 1632 | speed_template_32_48_64); | ||
| 1633 | test_acipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0, | ||
| 1634 | speed_template_32_48_64); | ||
| 1635 | break; | ||
| 1636 | |||
| 1637 | case 505: | ||
| 1638 | test_acipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0, | ||
| 1639 | speed_template_8); | ||
| 1640 | break; | ||
| 1641 | |||
| 1566 | case 1000: | 1642 | case 1000: |
| 1567 | test_available(); | 1643 | test_available(); |
| 1568 | break; | 1644 | break; |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5674878ff6c1..a2ca7431760a 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
| @@ -1534,6 +1534,21 @@ static int alg_test_null(const struct alg_test_desc *desc, | |||
| 1534 | /* Please keep this list sorted by algorithm name. */ | 1534 | /* Please keep this list sorted by algorithm name. */ |
| 1535 | static const struct alg_test_desc alg_test_descs[] = { | 1535 | static const struct alg_test_desc alg_test_descs[] = { |
| 1536 | { | 1536 | { |
| 1537 | .alg = "__cbc-serpent-avx", | ||
| 1538 | .test = alg_test_null, | ||
| 1539 | .suite = { | ||
| 1540 | .cipher = { | ||
| 1541 | .enc = { | ||
| 1542 | .vecs = NULL, | ||
| 1543 | .count = 0 | ||
| 1544 | }, | ||
| 1545 | .dec = { | ||
| 1546 | .vecs = NULL, | ||
| 1547 | .count = 0 | ||
| 1548 | } | ||
| 1549 | } | ||
| 1550 | } | ||
| 1551 | }, { | ||
| 1537 | .alg = "__cbc-serpent-sse2", | 1552 | .alg = "__cbc-serpent-sse2", |
| 1538 | .test = alg_test_null, | 1553 | .test = alg_test_null, |
| 1539 | .suite = { | 1554 | .suite = { |
| @@ -1549,8 +1564,39 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 1549 | } | 1564 | } |
| 1550 | } | 1565 | } |
| 1551 | }, { | 1566 | }, { |
| 1567 | .alg = "__cbc-twofish-avx", | ||
| 1568 | .test = alg_test_null, | ||
| 1569 | .suite = { | ||
| 1570 | .cipher = { | ||
| 1571 | .enc = { | ||
| 1572 | .vecs = NULL, | ||
| 1573 | .count = 0 | ||
| 1574 | }, | ||
| 1575 | .dec = { | ||
| 1576 | .vecs = NULL, | ||
| 1577 | .count = 0 | ||
| 1578 | } | ||
| 1579 | } | ||
| 1580 | } | ||
| 1581 | }, { | ||
| 1552 | .alg = "__driver-cbc-aes-aesni", | 1582 | .alg = "__driver-cbc-aes-aesni", |
| 1553 | .test = alg_test_null, | 1583 | .test = alg_test_null, |
| 1584 | .fips_allowed = 1, | ||
| 1585 | .suite = { | ||
| 1586 | .cipher = { | ||
| 1587 | .enc = { | ||
| 1588 | .vecs = NULL, | ||
| 1589 | .count = 0 | ||
| 1590 | }, | ||
| 1591 | .dec = { | ||
| 1592 | .vecs = NULL, | ||
| 1593 | .count = 0 | ||
| 1594 | } | ||
| 1595 | } | ||
| 1596 | } | ||
| 1597 | }, { | ||
| 1598 | .alg = "__driver-cbc-serpent-avx", | ||
| 1599 | .test = alg_test_null, | ||
| 1554 | .suite = { | 1600 | .suite = { |
| 1555 | .cipher = { | 1601 | .cipher = { |
| 1556 | .enc = { | 1602 | .enc = { |
| @@ -1579,8 +1625,39 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 1579 | } | 1625 | } |
| 1580 | } | 1626 | } |
| 1581 | }, { | 1627 | }, { |
| 1628 | .alg = "__driver-cbc-twofish-avx", | ||
| 1629 | .test = alg_test_null, | ||
| 1630 | .suite = { | ||
| 1631 | .cipher = { | ||
| 1632 | .enc = { | ||
| 1633 | .vecs = NULL, | ||
| 1634 | .count = 0 | ||
| 1635 | }, | ||
| 1636 | .dec = { | ||
| 1637 | .vecs = NULL, | ||
| 1638 | .count = 0 | ||
| 1639 | } | ||
| 1640 | } | ||
| 1641 | } | ||
| 1642 | }, { | ||
| 1582 | .alg = "__driver-ecb-aes-aesni", | 1643 | .alg = "__driver-ecb-aes-aesni", |
| 1583 | .test = alg_test_null, | 1644 | .test = alg_test_null, |
| 1645 | .fips_allowed = 1, | ||
| 1646 | .suite = { | ||
| 1647 | .cipher = { | ||
| 1648 | .enc = { | ||
| 1649 | .vecs = NULL, | ||
| 1650 | .count = 0 | ||
| 1651 | }, | ||
| 1652 | .dec = { | ||
| 1653 | .vecs = NULL, | ||
| 1654 | .count = 0 | ||
| 1655 | } | ||
| 1656 | } | ||
| 1657 | } | ||
| 1658 | }, { | ||
| 1659 | .alg = "__driver-ecb-serpent-avx", | ||
| 1660 | .test = alg_test_null, | ||
| 1584 | .suite = { | 1661 | .suite = { |
| 1585 | .cipher = { | 1662 | .cipher = { |
| 1586 | .enc = { | 1663 | .enc = { |
| @@ -1609,8 +1686,24 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 1609 | } | 1686 | } |
| 1610 | } | 1687 | } |
| 1611 | }, { | 1688 | }, { |
| 1689 | .alg = "__driver-ecb-twofish-avx", | ||
| 1690 | .test = alg_test_null, | ||
| 1691 | .suite = { | ||
| 1692 | .cipher = { | ||
| 1693 | .enc = { | ||
| 1694 | .vecs = NULL, | ||
| 1695 | .count = 0 | ||
| 1696 | }, | ||
| 1697 | .dec = { | ||
| 1698 | .vecs = NULL, | ||
| 1699 | .count = 0 | ||
| 1700 | } | ||
| 1701 | } | ||
| 1702 | } | ||
| 1703 | }, { | ||
| 1612 | .alg = "__ghash-pclmulqdqni", | 1704 | .alg = "__ghash-pclmulqdqni", |
| 1613 | .test = alg_test_null, | 1705 | .test = alg_test_null, |
| 1706 | .fips_allowed = 1, | ||
| 1614 | .suite = { | 1707 | .suite = { |
| 1615 | .hash = { | 1708 | .hash = { |
| 1616 | .vecs = NULL, | 1709 | .vecs = NULL, |
| @@ -1628,6 +1721,42 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 1628 | } | 1721 | } |
| 1629 | } | 1722 | } |
| 1630 | }, { | 1723 | }, { |
| 1724 | .alg = "authenc(hmac(sha1),cbc(aes))", | ||
| 1725 | .test = alg_test_aead, | ||
| 1726 | .fips_allowed = 1, | ||
| 1727 | .suite = { | ||
| 1728 | .aead = { | ||
| 1729 | .enc = { | ||
| 1730 | .vecs = hmac_sha1_aes_cbc_enc_tv_template, | ||
| 1731 | .count = HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS | ||
| 1732 | } | ||
| 1733 | } | ||
| 1734 | } | ||
| 1735 | }, { | ||
| 1736 | .alg = "authenc(hmac(sha256),cbc(aes))", | ||
| 1737 | .test = alg_test_aead, | ||
| 1738 | .fips_allowed = 1, | ||
| 1739 | .suite = { | ||
| 1740 | .aead = { | ||
| 1741 | .enc = { | ||
| 1742 | .vecs = hmac_sha256_aes_cbc_enc_tv_template, | ||
| 1743 | .count = HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS | ||
| 1744 | } | ||
| 1745 | } | ||
| 1746 | } | ||
| 1747 | }, { | ||
| 1748 | .alg = "authenc(hmac(sha512),cbc(aes))", | ||
| 1749 | .test = alg_test_aead, | ||
| 1750 | .fips_allowed = 1, | ||
| 1751 | .suite = { | ||
| 1752 | .aead = { | ||
| 1753 | .enc = { | ||
| 1754 | .vecs = hmac_sha512_aes_cbc_enc_tv_template, | ||
| 1755 | .count = HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS | ||
| 1756 | } | ||
| 1757 | } | ||
| 1758 | } | ||
| 1759 | }, { | ||
| 1631 | .alg = "cbc(aes)", | 1760 | .alg = "cbc(aes)", |
| 1632 | .test = alg_test_skcipher, | 1761 | .test = alg_test_skcipher, |
| 1633 | .fips_allowed = 1, | 1762 | .fips_allowed = 1, |
| @@ -1776,8 +1905,40 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 1776 | } | 1905 | } |
| 1777 | } | 1906 | } |
| 1778 | }, { | 1907 | }, { |
| 1908 | .alg = "cryptd(__driver-cbc-aes-aesni)", | ||
| 1909 | .test = alg_test_null, | ||
| 1910 | .fips_allowed = 1, | ||
| 1911 | .suite = { | ||
| 1912 | .cipher = { | ||
| 1913 | .enc = { | ||
| 1914 | .vecs = NULL, | ||
| 1915 | .count = 0 | ||
| 1916 | }, | ||
| 1917 | .dec = { | ||
| 1918 | .vecs = NULL, | ||
| 1919 | .count = 0 | ||
| 1920 | } | ||
| 1921 | } | ||
| 1922 | } | ||
| 1923 | }, { | ||
| 1779 | .alg = "cryptd(__driver-ecb-aes-aesni)", | 1924 | .alg = "cryptd(__driver-ecb-aes-aesni)", |
| 1780 | .test = alg_test_null, | 1925 | .test = alg_test_null, |
| 1926 | .fips_allowed = 1, | ||
| 1927 | .suite = { | ||
| 1928 | .cipher = { | ||
| 1929 | .enc = { | ||
| 1930 | .vecs = NULL, | ||
| 1931 | .count = 0 | ||
| 1932 | }, | ||
| 1933 | .dec = { | ||
| 1934 | .vecs = NULL, | ||
| 1935 | .count = 0 | ||
| 1936 | } | ||
| 1937 | } | ||
| 1938 | } | ||
| 1939 | }, { | ||
| 1940 | .alg = "cryptd(__driver-ecb-serpent-avx)", | ||
| 1941 | .test = alg_test_null, | ||
| 1781 | .suite = { | 1942 | .suite = { |
| 1782 | .cipher = { | 1943 | .cipher = { |
| 1783 | .enc = { | 1944 | .enc = { |
| @@ -1806,8 +1967,40 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 1806 | } | 1967 | } |
| 1807 | } | 1968 | } |
| 1808 | }, { | 1969 | }, { |
| 1970 | .alg = "cryptd(__driver-ecb-twofish-avx)", | ||
| 1971 | .test = alg_test_null, | ||
| 1972 | .suite = { | ||
| 1973 | .cipher = { | ||
| 1974 | .enc = { | ||
| 1975 | .vecs = NULL, | ||
| 1976 | .count = 0 | ||
| 1977 | }, | ||
| 1978 | .dec = { | ||
| 1979 | .vecs = NULL, | ||
| 1980 | .count = 0 | ||
| 1981 | } | ||
| 1982 | } | ||
| 1983 | } | ||
| 1984 | }, { | ||
| 1985 | .alg = "cryptd(__driver-gcm-aes-aesni)", | ||
| 1986 | .test = alg_test_null, | ||
| 1987 | .fips_allowed = 1, | ||
| 1988 | .suite = { | ||
| 1989 | .cipher = { | ||
| 1990 | .enc = { | ||
| 1991 | .vecs = NULL, | ||
| 1992 | .count = 0 | ||
| 1993 | }, | ||
| 1994 | .dec = { | ||
| 1995 | .vecs = NULL, | ||
| 1996 | .count = 0 | ||
| 1997 | } | ||
| 1998 | } | ||
| 1999 | } | ||
| 2000 | }, { | ||
| 1809 | .alg = "cryptd(__ghash-pclmulqdqni)", | 2001 | .alg = "cryptd(__ghash-pclmulqdqni)", |
| 1810 | .test = alg_test_null, | 2002 | .test = alg_test_null, |
| 2003 | .fips_allowed = 1, | ||
| 1811 | .suite = { | 2004 | .suite = { |
| 1812 | .hash = { | 2005 | .hash = { |
| 1813 | .vecs = NULL, | 2006 | .vecs = NULL, |
| @@ -1923,6 +2116,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 1923 | }, { | 2116 | }, { |
| 1924 | .alg = "ecb(__aes-aesni)", | 2117 | .alg = "ecb(__aes-aesni)", |
| 1925 | .test = alg_test_null, | 2118 | .test = alg_test_null, |
| 2119 | .fips_allowed = 1, | ||
| 1926 | .suite = { | 2120 | .suite = { |
| 1927 | .cipher = { | 2121 | .cipher = { |
| 1928 | .enc = { | 2122 | .enc = { |
| @@ -2220,6 +2414,15 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 2220 | } | 2414 | } |
| 2221 | } | 2415 | } |
| 2222 | }, { | 2416 | }, { |
| 2417 | .alg = "hmac(crc32)", | ||
| 2418 | .test = alg_test_hash, | ||
| 2419 | .suite = { | ||
| 2420 | .hash = { | ||
| 2421 | .vecs = bfin_crc_tv_template, | ||
| 2422 | .count = BFIN_CRC_TEST_VECTORS | ||
| 2423 | } | ||
| 2424 | } | ||
| 2425 | }, { | ||
| 2223 | .alg = "hmac(md5)", | 2426 | .alg = "hmac(md5)", |
| 2224 | .test = alg_test_hash, | 2427 | .test = alg_test_hash, |
| 2225 | .suite = { | 2428 | .suite = { |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 36e5a8ee0e1e..f8179e0344ed 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
| @@ -2765,8 +2765,62 @@ static struct cipher_testvec tf_enc_tv_template[] = { | |||
| 2765 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" | 2765 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" |
| 2766 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" | 2766 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" |
| 2767 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" | 2767 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" |
| 2768 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", | 2768 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" |
| 2769 | .ilen = 64, | 2769 | "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" |
| 2770 | "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" | ||
| 2771 | "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" | ||
| 2772 | "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" | ||
| 2773 | "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" | ||
| 2774 | "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" | ||
| 2775 | "\x29\xC0\x57\xEE\x62\xF9\x90\x04" | ||
| 2776 | "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" | ||
| 2777 | "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" | ||
| 2778 | "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" | ||
| 2779 | "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" | ||
| 2780 | "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" | ||
| 2781 | "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" | ||
| 2782 | "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" | ||
| 2783 | "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" | ||
| 2784 | "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" | ||
| 2785 | "\x57\xEE\x85\x1C\x90\x27\xBE\x32" | ||
| 2786 | "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" | ||
| 2787 | "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" | ||
| 2788 | "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" | ||
| 2789 | "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" | ||
| 2790 | "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" | ||
| 2791 | "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" | ||
| 2792 | "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" | ||
| 2793 | "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" | ||
| 2794 | "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" | ||
| 2795 | "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" | ||
| 2796 | "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" | ||
| 2797 | "\x69\x00\x74\x0B\xA2\x16\xAD\x44" | ||
| 2798 | "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" | ||
| 2799 | "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" | ||
| 2800 | "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" | ||
| 2801 | "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" | ||
| 2802 | "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" | ||
| 2803 | "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" | ||
| 2804 | "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" | ||
| 2805 | "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" | ||
| 2806 | "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" | ||
| 2807 | "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" | ||
| 2808 | "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" | ||
| 2809 | "\x58\xEF\x86\x1D\x91\x28\xBF\x33" | ||
| 2810 | "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" | ||
| 2811 | "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" | ||
| 2812 | "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" | ||
| 2813 | "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" | ||
| 2814 | "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" | ||
| 2815 | "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" | ||
| 2816 | "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" | ||
| 2817 | "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" | ||
| 2818 | "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" | ||
| 2819 | "\x86\x1D\xB4\x28\xBF\x56\xED\x61" | ||
| 2820 | "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" | ||
| 2821 | "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" | ||
| 2822 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | ||
| 2823 | .ilen = 496, | ||
| 2770 | .result = "\x88\xCB\x1E\xC2\xAF\x8A\x97\xFF" | 2824 | .result = "\x88\xCB\x1E\xC2\xAF\x8A\x97\xFF" |
| 2771 | "\xF6\x90\x46\x9C\x4A\x0F\x08\xDC" | 2825 | "\xF6\x90\x46\x9C\x4A\x0F\x08\xDC" |
| 2772 | "\xDE\xAB\xAD\xFA\xFC\xA8\xC2\x3D" | 2826 | "\xDE\xAB\xAD\xFA\xFC\xA8\xC2\x3D" |
| @@ -2774,8 +2828,62 @@ static struct cipher_testvec tf_enc_tv_template[] = { | |||
| 2774 | "\x34\x9E\xB6\x08\xB2\xDD\xA8\xF5" | 2828 | "\x34\x9E\xB6\x08\xB2\xDD\xA8\xF5" |
| 2775 | "\xDF\xFA\xC7\xE8\x09\x50\x76\x08" | 2829 | "\xDF\xFA\xC7\xE8\x09\x50\x76\x08" |
| 2776 | "\xA2\xB6\x6A\x59\xC0\x2B\x6D\x05" | 2830 | "\xA2\xB6\x6A\x59\xC0\x2B\x6D\x05" |
| 2777 | "\x89\xF6\x82\xF0\xD3\xDB\x06\x02", | 2831 | "\x89\xF6\x82\xF0\xD3\xDB\x06\x02" |
| 2778 | .rlen = 64, | 2832 | "\xB5\x11\x5C\x5E\x79\x1A\xAC\x43" |
| 2833 | "\x5C\xC0\x30\x4B\x6B\x16\xA1\x40" | ||
| 2834 | "\x80\x27\x88\xBA\x2C\x74\x42\xE0" | ||
| 2835 | "\x1B\xA5\x85\x08\xB9\xE6\x22\x7A" | ||
| 2836 | "\x36\x3B\x0D\x9F\xA0\x22\x6C\x2A" | ||
| 2837 | "\x91\x75\x47\xBC\x67\x21\x4E\xF9" | ||
| 2838 | "\xEA\xFF\xD9\xD5\xC0\xFC\x9E\x2C" | ||
| 2839 | "\x3E\xAD\xC6\x61\x0E\x93\x7A\x22" | ||
| 2840 | "\x09\xC8\x8D\xC1\x8E\xB4\x8B\x5C" | ||
| 2841 | "\xC6\x24\x42\xB8\x23\x66\x80\xA9" | ||
| 2842 | "\x32\x0B\x7A\x29\xBF\xB3\x0B\x63" | ||
| 2843 | "\x43\x27\x13\xA9\xBE\xEB\xBD\xF3" | ||
| 2844 | "\x33\x62\x70\xE2\x1B\x86\x7A\xA1" | ||
| 2845 | "\x51\x4A\x16\xFE\x29\x63\x7E\xD0" | ||
| 2846 | "\x7A\xA4\x6E\x2C\xF8\xC1\xDB\xE8" | ||
| 2847 | "\xCB\x4D\xD2\x8C\x04\x14\xB4\x66" | ||
| 2848 | "\x41\xB7\x3A\x96\x16\x7C\x1D\x5B" | ||
| 2849 | "\xB6\x41\x42\x64\x43\xEE\x6E\x7C" | ||
| 2850 | "\x8B\xAF\x01\x9C\xA4\x6E\x75\x8F" | ||
| 2851 | "\xDE\x10\x9F\xA6\xE7\xD6\x44\x97" | ||
| 2852 | "\x66\xA3\x96\x0F\x1C\x25\x60\xF5" | ||
| 2853 | "\x3C\x2E\x32\x69\x0E\x82\xFF\x27" | ||
| 2854 | "\x0F\xB5\x06\xDA\xD8\x31\x15\x6C" | ||
| 2855 | "\xDF\x18\x6C\x87\xF5\x3B\x11\x9A" | ||
| 2856 | "\x1B\x42\x1F\x5B\x29\x19\x96\x13" | ||
| 2857 | "\x68\x2E\x5E\x08\x1C\x8F\x32\x4B" | ||
| 2858 | "\x81\x77\x6D\xF4\xA0\x01\x42\xEC" | ||
| 2859 | "\xDD\x5B\xFD\x3A\x8E\x6A\x14\xFB" | ||
| 2860 | "\x83\x54\xDF\x0F\x86\xB7\xEA\x40" | ||
| 2861 | "\x46\x39\xF7\x2A\x89\x8D\x4E\x96" | ||
| 2862 | "\x5F\x5F\x6D\x76\xC6\x13\x9D\x3D" | ||
| 2863 | "\x1D\x5F\x0C\x7D\xE2\xBC\xC2\x16" | ||
| 2864 | "\x16\xBE\x89\x3E\xB0\x61\xA2\x5D" | ||
| 2865 | "\xAF\xD1\x40\x5F\x1A\xB8\x26\x41" | ||
| 2866 | "\xC6\xBD\x36\xEF\xED\x29\x50\x6D" | ||
| 2867 | "\x10\xEF\x26\xE8\xA8\x93\x11\x3F" | ||
| 2868 | "\x2D\x1F\x88\x20\x77\x45\xF5\x66" | ||
| 2869 | "\x08\xB9\xF1\xEF\xB1\x93\xA8\x81" | ||
| 2870 | "\x65\xC5\xCD\x3E\x8C\x06\x60\x2C" | ||
| 2871 | "\xB2\x10\x7A\xCA\x05\x25\x59\xDB" | ||
| 2872 | "\xC7\x28\xF5\x20\x35\x52\x9E\x62" | ||
| 2873 | "\xF8\x88\x24\x1C\x4D\x84\x12\x39" | ||
| 2874 | "\x39\xE4\x2E\xF4\xD4\x9D\x2B\xBC" | ||
| 2875 | "\x87\x66\xE6\xC0\x6B\x31\x9A\x66" | ||
| 2876 | "\x03\xDC\x95\xD8\x6B\xD0\x30\x8F" | ||
| 2877 | "\xDF\x8F\x8D\xFA\xEC\x1F\x08\xBD" | ||
| 2878 | "\xA3\x63\xE2\x71\x4F\x03\x94\x87" | ||
| 2879 | "\x50\xDF\x15\x1F\xED\x3A\xA3\x7F" | ||
| 2880 | "\x1F\x2A\xB5\xA1\x69\xAC\x4B\x0D" | ||
| 2881 | "\x84\x9B\x2A\xE9\x55\xDD\x46\x91" | ||
| 2882 | "\x15\x33\xF3\x2B\x9B\x46\x97\x00" | ||
| 2883 | "\xF0\x29\xD8\x59\x5D\x33\x37\xF9" | ||
| 2884 | "\x58\x33\x9B\x78\xC7\x58\x48\x6B" | ||
| 2885 | "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5", | ||
| 2886 | .rlen = 496, | ||
| 2779 | }, | 2887 | }, |
| 2780 | }; | 2888 | }; |
| 2781 | 2889 | ||
| @@ -2822,8 +2930,62 @@ static struct cipher_testvec tf_dec_tv_template[] = { | |||
| 2822 | "\x34\x9E\xB6\x08\xB2\xDD\xA8\xF5" | 2930 | "\x34\x9E\xB6\x08\xB2\xDD\xA8\xF5" |
| 2823 | "\xDF\xFA\xC7\xE8\x09\x50\x76\x08" | 2931 | "\xDF\xFA\xC7\xE8\x09\x50\x76\x08" |
| 2824 | "\xA2\xB6\x6A\x59\xC0\x2B\x6D\x05" | 2932 | "\xA2\xB6\x6A\x59\xC0\x2B\x6D\x05" |
| 2825 | "\x89\xF6\x82\xF0\xD3\xDB\x06\x02", | 2933 | "\x89\xF6\x82\xF0\xD3\xDB\x06\x02" |
| 2826 | .ilen = 64, | 2934 | "\xB5\x11\x5C\x5E\x79\x1A\xAC\x43" |
| 2935 | "\x5C\xC0\x30\x4B\x6B\x16\xA1\x40" | ||
| 2936 | "\x80\x27\x88\xBA\x2C\x74\x42\xE0" | ||
| 2937 | "\x1B\xA5\x85\x08\xB9\xE6\x22\x7A" | ||
| 2938 | "\x36\x3B\x0D\x9F\xA0\x22\x6C\x2A" | ||
| 2939 | "\x91\x75\x47\xBC\x67\x21\x4E\xF9" | ||
| 2940 | "\xEA\xFF\xD9\xD5\xC0\xFC\x9E\x2C" | ||
| 2941 | "\x3E\xAD\xC6\x61\x0E\x93\x7A\x22" | ||
| 2942 | "\x09\xC8\x8D\xC1\x8E\xB4\x8B\x5C" | ||
| 2943 | "\xC6\x24\x42\xB8\x23\x66\x80\xA9" | ||
| 2944 | "\x32\x0B\x7A\x29\xBF\xB3\x0B\x63" | ||
| 2945 | "\x43\x27\x13\xA9\xBE\xEB\xBD\xF3" | ||
| 2946 | "\x33\x62\x70\xE2\x1B\x86\x7A\xA1" | ||
| 2947 | "\x51\x4A\x16\xFE\x29\x63\x7E\xD0" | ||
| 2948 | "\x7A\xA4\x6E\x2C\xF8\xC1\xDB\xE8" | ||
| 2949 | "\xCB\x4D\xD2\x8C\x04\x14\xB4\x66" | ||
| 2950 | "\x41\xB7\x3A\x96\x16\x7C\x1D\x5B" | ||
| 2951 | "\xB6\x41\x42\x64\x43\xEE\x6E\x7C" | ||
| 2952 | "\x8B\xAF\x01\x9C\xA4\x6E\x75\x8F" | ||
| 2953 | "\xDE\x10\x9F\xA6\xE7\xD6\x44\x97" | ||
| 2954 | "\x66\xA3\x96\x0F\x1C\x25\x60\xF5" | ||
| 2955 | "\x3C\x2E\x32\x69\x0E\x82\xFF\x27" | ||
| 2956 | "\x0F\xB5\x06\xDA\xD8\x31\x15\x6C" | ||
| 2957 | "\xDF\x18\x6C\x87\xF5\x3B\x11\x9A" | ||
| 2958 | "\x1B\x42\x1F\x5B\x29\x19\x96\x13" | ||
| 2959 | "\x68\x2E\x5E\x08\x1C\x8F\x32\x4B" | ||
| 2960 | "\x81\x77\x6D\xF4\xA0\x01\x42\xEC" | ||
| 2961 | "\xDD\x5B\xFD\x3A\x8E\x6A\x14\xFB" | ||
| 2962 | "\x83\x54\xDF\x0F\x86\xB7\xEA\x40" | ||
| 2963 | "\x46\x39\xF7\x2A\x89\x8D\x4E\x96" | ||
| 2964 | "\x5F\x5F\x6D\x76\xC6\x13\x9D\x3D" | ||
| 2965 | "\x1D\x5F\x0C\x7D\xE2\xBC\xC2\x16" | ||
| 2966 | "\x16\xBE\x89\x3E\xB0\x61\xA2\x5D" | ||
| 2967 | "\xAF\xD1\x40\x5F\x1A\xB8\x26\x41" | ||
| 2968 | "\xC6\xBD\x36\xEF\xED\x29\x50\x6D" | ||
| 2969 | "\x10\xEF\x26\xE8\xA8\x93\x11\x3F" | ||
| 2970 | "\x2D\x1F\x88\x20\x77\x45\xF5\x66" | ||
| 2971 | "\x08\xB9\xF1\xEF\xB1\x93\xA8\x81" | ||
| 2972 | "\x65\xC5\xCD\x3E\x8C\x06\x60\x2C" | ||
| 2973 | "\xB2\x10\x7A\xCA\x05\x25\x59\xDB" | ||
| 2974 | "\xC7\x28\xF5\x20\x35\x52\x9E\x62" | ||
| 2975 | "\xF8\x88\x24\x1C\x4D\x84\x12\x39" | ||
| 2976 | "\x39\xE4\x2E\xF4\xD4\x9D\x2B\xBC" | ||
| 2977 | "\x87\x66\xE6\xC0\x6B\x31\x9A\x66" | ||
| 2978 | "\x03\xDC\x95\xD8\x6B\xD0\x30\x8F" | ||
| 2979 | "\xDF\x8F\x8D\xFA\xEC\x1F\x08\xBD" | ||
| 2980 | "\xA3\x63\xE2\x71\x4F\x03\x94\x87" | ||
| 2981 | "\x50\xDF\x15\x1F\xED\x3A\xA3\x7F" | ||
| 2982 | "\x1F\x2A\xB5\xA1\x69\xAC\x4B\x0D" | ||
| 2983 | "\x84\x9B\x2A\xE9\x55\xDD\x46\x91" | ||
| 2984 | "\x15\x33\xF3\x2B\x9B\x46\x97\x00" | ||
| 2985 | "\xF0\x29\xD8\x59\x5D\x33\x37\xF9" | ||
| 2986 | "\x58\x33\x9B\x78\xC7\x58\x48\x6B" | ||
| 2987 | "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5", | ||
| 2988 | .ilen = 496, | ||
| 2827 | .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" | 2989 | .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" |
| 2828 | "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" | 2990 | "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" |
| 2829 | "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" | 2991 | "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" |
| @@ -2831,8 +2993,62 @@ static struct cipher_testvec tf_dec_tv_template[] = { | |||
| 2831 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" | 2993 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" |
| 2832 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" | 2994 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" |
| 2833 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" | 2995 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" |
| 2834 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", | 2996 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" |
| 2835 | .rlen = 64, | 2997 | "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" |
| 2998 | "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" | ||
| 2999 | "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" | ||
| 3000 | "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" | ||
| 3001 | "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" | ||
| 3002 | "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" | ||
| 3003 | "\x29\xC0\x57\xEE\x62\xF9\x90\x04" | ||
| 3004 | "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" | ||
| 3005 | "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" | ||
| 3006 | "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" | ||
| 3007 | "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" | ||
| 3008 | "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" | ||
| 3009 | "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" | ||
| 3010 | "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" | ||
| 3011 | "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" | ||
| 3012 | "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" | ||
| 3013 | "\x57\xEE\x85\x1C\x90\x27\xBE\x32" | ||
| 3014 | "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" | ||
| 3015 | "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" | ||
| 3016 | "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" | ||
| 3017 | "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" | ||
| 3018 | "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" | ||
| 3019 | "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" | ||
| 3020 | "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" | ||
| 3021 | "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" | ||
| 3022 | "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" | ||
| 3023 | "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" | ||
| 3024 | "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" | ||
| 3025 | "\x69\x00\x74\x0B\xA2\x16\xAD\x44" | ||
| 3026 | "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" | ||
| 3027 | "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" | ||
| 3028 | "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" | ||
| 3029 | "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" | ||
| 3030 | "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" | ||
| 3031 | "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" | ||
| 3032 | "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" | ||
| 3033 | "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" | ||
| 3034 | "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" | ||
| 3035 | "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" | ||
| 3036 | "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" | ||
| 3037 | "\x58\xEF\x86\x1D\x91\x28\xBF\x33" | ||
| 3038 | "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" | ||
| 3039 | "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" | ||
| 3040 | "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" | ||
| 3041 | "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" | ||
| 3042 | "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" | ||
| 3043 | "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" | ||
| 3044 | "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" | ||
| 3045 | "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" | ||
| 3046 | "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" | ||
| 3047 | "\x86\x1D\xB4\x28\xBF\x56\xED\x61" | ||
| 3048 | "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" | ||
| 3049 | "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" | ||
| 3050 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | ||
| 3051 | .rlen = 496, | ||
| 2836 | }, | 3052 | }, |
| 2837 | }; | 3053 | }; |
| 2838 | 3054 | ||
| @@ -2894,8 +3110,62 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = { | |||
| 2894 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" | 3110 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" |
| 2895 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" | 3111 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" |
| 2896 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" | 3112 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" |
| 2897 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", | 3113 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" |
| 2898 | .ilen = 64, | 3114 | "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" |
| 3115 | "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" | ||
| 3116 | "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" | ||
| 3117 | "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" | ||
| 3118 | "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" | ||
| 3119 | "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" | ||
| 3120 | "\x29\xC0\x57\xEE\x62\xF9\x90\x04" | ||
| 3121 | "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" | ||
| 3122 | "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" | ||
| 3123 | "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" | ||
| 3124 | "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" | ||
| 3125 | "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" | ||
| 3126 | "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" | ||
| 3127 | "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" | ||
| 3128 | "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" | ||
| 3129 | "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" | ||
| 3130 | "\x57\xEE\x85\x1C\x90\x27\xBE\x32" | ||
| 3131 | "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" | ||
| 3132 | "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" | ||
| 3133 | "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" | ||
| 3134 | "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" | ||
| 3135 | "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" | ||
| 3136 | "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" | ||
| 3137 | "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" | ||
| 3138 | "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" | ||
| 3139 | "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" | ||
| 3140 | "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" | ||
| 3141 | "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" | ||
| 3142 | "\x69\x00\x74\x0B\xA2\x16\xAD\x44" | ||
| 3143 | "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" | ||
| 3144 | "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" | ||
| 3145 | "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" | ||
| 3146 | "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" | ||
| 3147 | "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" | ||
| 3148 | "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" | ||
| 3149 | "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" | ||
| 3150 | "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" | ||
| 3151 | "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" | ||
| 3152 | "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" | ||
| 3153 | "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" | ||
| 3154 | "\x58\xEF\x86\x1D\x91\x28\xBF\x33" | ||
| 3155 | "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" | ||
| 3156 | "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" | ||
| 3157 | "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" | ||
| 3158 | "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" | ||
| 3159 | "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" | ||
| 3160 | "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" | ||
| 3161 | "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" | ||
| 3162 | "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" | ||
| 3163 | "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" | ||
| 3164 | "\x86\x1D\xB4\x28\xBF\x56\xED\x61" | ||
| 3165 | "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" | ||
| 3166 | "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" | ||
| 3167 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | ||
| 3168 | .ilen = 496, | ||
| 2899 | .result = "\xC8\xFF\xF2\x53\xA6\x27\x09\xD1" | 3169 | .result = "\xC8\xFF\xF2\x53\xA6\x27\x09\xD1" |
| 2900 | "\x33\x38\xC2\xC0\x0C\x14\x7E\xB5" | 3170 | "\x33\x38\xC2\xC0\x0C\x14\x7E\xB5" |
| 2901 | "\x26\x1B\x05\x0C\x05\x12\x3F\xC0" | 3171 | "\x26\x1B\x05\x0C\x05\x12\x3F\xC0" |
| @@ -2903,8 +3173,62 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = { | |||
| 2903 | "\x3D\x32\xDF\xDA\x56\x00\x6E\xEE" | 3173 | "\x3D\x32\xDF\xDA\x56\x00\x6E\xEE" |
| 2904 | "\x5B\x2A\x72\x9D\xC2\x4D\x19\xBC" | 3174 | "\x5B\x2A\x72\x9D\xC2\x4D\x19\xBC" |
| 2905 | "\x8C\x53\xFA\x87\x6F\xDD\x81\xA3" | 3175 | "\x8C\x53\xFA\x87\x6F\xDD\x81\xA3" |
| 2906 | "\xB1\xD3\x44\x65\xDF\xE7\x63\x38", | 3176 | "\xB1\xD3\x44\x65\xDF\xE7\x63\x38" |
| 2907 | .rlen = 64, | 3177 | "\x4A\xFC\xDC\xEC\x3F\x26\x8E\xB8" |
| 3178 | "\x43\xFC\xFE\x18\xB5\x11\x6D\x31" | ||
| 3179 | "\x81\x8B\x0D\x75\xF6\x80\xEC\x84" | ||
| 3180 | "\x04\xB9\xE6\x09\x63\xED\x39\xDB" | ||
| 3181 | "\xC3\xF6\x14\xD6\x6E\x5E\x8B\xBD" | ||
| 3182 | "\x3E\xFA\xD7\x98\x50\x6F\xD9\x63" | ||
| 3183 | "\x02\xCD\x0D\x39\x4B\x0D\xEC\x80" | ||
| 3184 | "\xE3\x6A\x17\xF4\xCC\xAD\xFF\x68" | ||
| 3185 | "\x45\xDD\xC8\x83\x1D\x41\x96\x0D" | ||
| 3186 | "\x91\x2E\x05\xD3\x59\x82\xE0\x43" | ||
| 3187 | "\x90\x4F\xB9\xF7\xAD\x6B\x2E\xAF" | ||
| 3188 | "\xA7\x84\x00\x53\xCD\x6F\xD1\x0C" | ||
| 3189 | "\x4E\xF9\x5A\x23\xFB\xCA\xC7\xD3" | ||
| 3190 | "\xA9\xAA\x9D\xB2\x3F\x66\xF1\xAC" | ||
| 3191 | "\x25\x21\x8F\xF7\xEF\xF2\x6A\xDF" | ||
| 3192 | "\xE8\xDA\x75\x1A\x8A\xF1\xDD\x38" | ||
| 3193 | "\x1F\xF9\x3D\x68\x4A\xBB\x9E\x34" | ||
| 3194 | "\x1F\x66\x1F\x9C\x2B\x54\xFF\x60" | ||
| 3195 | "\x7F\x29\x4B\x55\x80\x8F\x4E\xA7" | ||
| 3196 | "\xA6\x9A\x0A\xD9\x0D\x19\x00\xF8" | ||
| 3197 | "\x1F\xBC\x0C\x40\x6B\xEC\x99\x25" | ||
| 3198 | "\x94\x70\x74\x0E\x1D\xC5\xBC\x12" | ||
| 3199 | "\xF3\x42\xBE\x95\xBF\xFB\x4E\x55" | ||
| 3200 | "\x9A\xB9\xCE\x14\x16\x5B\xDC\xD3" | ||
| 3201 | "\x75\x42\x62\x04\x31\x1F\x95\x7C" | ||
| 3202 | "\x66\x1A\x97\xDC\x2F\x40\x5C\x39" | ||
| 3203 | "\x78\xE6\x02\xDB\x49\xE1\xC6\x47" | ||
| 3204 | "\xC2\x78\x9A\xBB\xF3\xBE\xCB\x93" | ||
| 3205 | "\xD8\xB8\xE8\xBB\x8C\xB3\x9B\xA7" | ||
| 3206 | "\xC2\x89\xF3\x91\x88\x83\x3D\xF0" | ||
| 3207 | "\x29\xA2\xCD\xB5\x79\x16\xC2\x40" | ||
| 3208 | "\x11\x03\x8E\x9C\xFD\xC9\x43\xC4" | ||
| 3209 | "\xC2\x19\xF0\x4A\x32\xEF\x0C\x2B" | ||
| 3210 | "\xD3\x2B\xE9\xD4\x4C\xDE\x95\xCF" | ||
| 3211 | "\x04\x03\xD3\x2C\x7F\x82\xC8\xFA" | ||
| 3212 | "\x0F\xD8\x7A\x39\x7B\x01\x41\x9C" | ||
| 3213 | "\x78\xB6\xC9\xBF\xF9\x78\x57\x88" | ||
| 3214 | "\xB1\xA5\xE1\xE0\xD9\x16\xD4\xC8" | ||
| 3215 | "\xEE\xC4\xBE\x7B\x55\x59\x00\x48" | ||
| 3216 | "\x1B\xBC\x14\xFA\x2A\x9D\xC9\x1C" | ||
| 3217 | "\xFB\x28\x3F\x95\xDD\xB7\xD6\xCE" | ||
| 3218 | "\x3A\x7F\x09\x0C\x0E\x69\x30\x7D" | ||
| 3219 | "\xBC\x68\x9C\x91\x2A\x59\x57\x04" | ||
| 3220 | "\xED\x1A\x1E\x00\xB1\x85\x92\x04" | ||
| 3221 | "\x28\x8C\x0C\x3C\xC1\xD5\x12\xF7" | ||
| 3222 | "\x4C\x3E\xB0\xE7\x86\x62\x68\x91" | ||
| 3223 | "\xFC\xC4\xE2\xCE\xA6\xDC\x5E\x93" | ||
| 3224 | "\x5D\x8D\x8C\x68\xB3\xB2\xB9\x64" | ||
| 3225 | "\x16\xB8\xC8\x6F\xD8\xEE\x21\xBD" | ||
| 3226 | "\xAC\x18\x0C\x7D\x0D\x05\xAB\xF1" | ||
| 3227 | "\xFA\xDD\xE2\x48\xDF\x4C\x02\x39" | ||
| 3228 | "\x69\xA1\x62\xBD\x49\x3A\x9D\x91" | ||
| 3229 | "\x30\x70\x56\xA4\x37\xDD\x7C\xC0" | ||
| 3230 | "\x0A\xA3\x30\x10\x26\x25\x41\x2C", | ||
| 3231 | .rlen = 496, | ||
| 2908 | }, | 3232 | }, |
| 2909 | }; | 3233 | }; |
| 2910 | 3234 | ||
| @@ -2966,8 +3290,62 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = { | |||
| 2966 | "\x3D\x32\xDF\xDA\x56\x00\x6E\xEE" | 3290 | "\x3D\x32\xDF\xDA\x56\x00\x6E\xEE" |
| 2967 | "\x5B\x2A\x72\x9D\xC2\x4D\x19\xBC" | 3291 | "\x5B\x2A\x72\x9D\xC2\x4D\x19\xBC" |
| 2968 | "\x8C\x53\xFA\x87\x6F\xDD\x81\xA3" | 3292 | "\x8C\x53\xFA\x87\x6F\xDD\x81\xA3" |
| 2969 | "\xB1\xD3\x44\x65\xDF\xE7\x63\x38", | 3293 | "\xB1\xD3\x44\x65\xDF\xE7\x63\x38" |
| 2970 | .ilen = 64, | 3294 | "\x4A\xFC\xDC\xEC\x3F\x26\x8E\xB8" |
| 3295 | "\x43\xFC\xFE\x18\xB5\x11\x6D\x31" | ||
| 3296 | "\x81\x8B\x0D\x75\xF6\x80\xEC\x84" | ||
| 3297 | "\x04\xB9\xE6\x09\x63\xED\x39\xDB" | ||
| 3298 | "\xC3\xF6\x14\xD6\x6E\x5E\x8B\xBD" | ||
| 3299 | "\x3E\xFA\xD7\x98\x50\x6F\xD9\x63" | ||
| 3300 | "\x02\xCD\x0D\x39\x4B\x0D\xEC\x80" | ||
| 3301 | "\xE3\x6A\x17\xF4\xCC\xAD\xFF\x68" | ||
| 3302 | "\x45\xDD\xC8\x83\x1D\x41\x96\x0D" | ||
| 3303 | "\x91\x2E\x05\xD3\x59\x82\xE0\x43" | ||
| 3304 | "\x90\x4F\xB9\xF7\xAD\x6B\x2E\xAF" | ||
| 3305 | "\xA7\x84\x00\x53\xCD\x6F\xD1\x0C" | ||
| 3306 | "\x4E\xF9\x5A\x23\xFB\xCA\xC7\xD3" | ||
| 3307 | "\xA9\xAA\x9D\xB2\x3F\x66\xF1\xAC" | ||
| 3308 | "\x25\x21\x8F\xF7\xEF\xF2\x6A\xDF" | ||
| 3309 | "\xE8\xDA\x75\x1A\x8A\xF1\xDD\x38" | ||
| 3310 | "\x1F\xF9\x3D\x68\x4A\xBB\x9E\x34" | ||
| 3311 | "\x1F\x66\x1F\x9C\x2B\x54\xFF\x60" | ||
| 3312 | "\x7F\x29\x4B\x55\x80\x8F\x4E\xA7" | ||
| 3313 | "\xA6\x9A\x0A\xD9\x0D\x19\x00\xF8" | ||
| 3314 | "\x1F\xBC\x0C\x40\x6B\xEC\x99\x25" | ||
| 3315 | "\x94\x70\x74\x0E\x1D\xC5\xBC\x12" | ||
| 3316 | "\xF3\x42\xBE\x95\xBF\xFB\x4E\x55" | ||
| 3317 | "\x9A\xB9\xCE\x14\x16\x5B\xDC\xD3" | ||
| 3318 | "\x75\x42\x62\x04\x31\x1F\x95\x7C" | ||
| 3319 | "\x66\x1A\x97\xDC\x2F\x40\x5C\x39" | ||
| 3320 | "\x78\xE6\x02\xDB\x49\xE1\xC6\x47" | ||
| 3321 | "\xC2\x78\x9A\xBB\xF3\xBE\xCB\x93" | ||
| 3322 | "\xD8\xB8\xE8\xBB\x8C\xB3\x9B\xA7" | ||
| 3323 | "\xC2\x89\xF3\x91\x88\x83\x3D\xF0" | ||
| 3324 | "\x29\xA2\xCD\xB5\x79\x16\xC2\x40" | ||
| 3325 | "\x11\x03\x8E\x9C\xFD\xC9\x43\xC4" | ||
| 3326 | "\xC2\x19\xF0\x4A\x32\xEF\x0C\x2B" | ||
| 3327 | "\xD3\x2B\xE9\xD4\x4C\xDE\x95\xCF" | ||
| 3328 | "\x04\x03\xD3\x2C\x7F\x82\xC8\xFA" | ||
| 3329 | "\x0F\xD8\x7A\x39\x7B\x01\x41\x9C" | ||
| 3330 | "\x78\xB6\xC9\xBF\xF9\x78\x57\x88" | ||
| 3331 | "\xB1\xA5\xE1\xE0\xD9\x16\xD4\xC8" | ||
| 3332 | "\xEE\xC4\xBE\x7B\x55\x59\x00\x48" | ||
| 3333 | "\x1B\xBC\x14\xFA\x2A\x9D\xC9\x1C" | ||
| 3334 | "\xFB\x28\x3F\x95\xDD\xB7\xD6\xCE" | ||
| 3335 | "\x3A\x7F\x09\x0C\x0E\x69\x30\x7D" | ||
| 3336 | "\xBC\x68\x9C\x91\x2A\x59\x57\x04" | ||
| 3337 | "\xED\x1A\x1E\x00\xB1\x85\x92\x04" | ||
| 3338 | "\x28\x8C\x0C\x3C\xC1\xD5\x12\xF7" | ||
| 3339 | "\x4C\x3E\xB0\xE7\x86\x62\x68\x91" | ||
| 3340 | "\xFC\xC4\xE2\xCE\xA6\xDC\x5E\x93" | ||
| 3341 | "\x5D\x8D\x8C\x68\xB3\xB2\xB9\x64" | ||
| 3342 | "\x16\xB8\xC8\x6F\xD8\xEE\x21\xBD" | ||
| 3343 | "\xAC\x18\x0C\x7D\x0D\x05\xAB\xF1" | ||
| 3344 | "\xFA\xDD\xE2\x48\xDF\x4C\x02\x39" | ||
| 3345 | "\x69\xA1\x62\xBD\x49\x3A\x9D\x91" | ||
| 3346 | "\x30\x70\x56\xA4\x37\xDD\x7C\xC0" | ||
| 3347 | "\x0A\xA3\x30\x10\x26\x25\x41\x2C", | ||
| 3348 | .ilen = 496, | ||
| 2971 | .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" | 3349 | .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" |
| 2972 | "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" | 3350 | "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" |
| 2973 | "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" | 3351 | "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" |
| @@ -2975,8 +3353,62 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = { | |||
| 2975 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" | 3353 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" |
| 2976 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" | 3354 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" |
| 2977 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" | 3355 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" |
| 2978 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", | 3356 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" |
| 2979 | .rlen = 64, | 3357 | "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" |
| 3358 | "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" | ||
| 3359 | "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" | ||
| 3360 | "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" | ||
| 3361 | "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" | ||
| 3362 | "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" | ||
| 3363 | "\x29\xC0\x57\xEE\x62\xF9\x90\x04" | ||
| 3364 | "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" | ||
| 3365 | "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" | ||
| 3366 | "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" | ||
| 3367 | "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" | ||
| 3368 | "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" | ||
| 3369 | "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" | ||
| 3370 | "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" | ||
| 3371 | "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" | ||
| 3372 | "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" | ||
| 3373 | "\x57\xEE\x85\x1C\x90\x27\xBE\x32" | ||
| 3374 | "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" | ||
| 3375 | "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" | ||
| 3376 | "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" | ||
| 3377 | "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" | ||
| 3378 | "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" | ||
| 3379 | "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" | ||
| 3380 | "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" | ||
| 3381 | "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" | ||
| 3382 | "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" | ||
| 3383 | "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" | ||
| 3384 | "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" | ||
| 3385 | "\x69\x00\x74\x0B\xA2\x16\xAD\x44" | ||
| 3386 | "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" | ||
| 3387 | "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" | ||
| 3388 | "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" | ||
| 3389 | "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" | ||
| 3390 | "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" | ||
| 3391 | "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" | ||
| 3392 | "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" | ||
| 3393 | "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" | ||
| 3394 | "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" | ||
| 3395 | "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" | ||
| 3396 | "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" | ||
| 3397 | "\x58\xEF\x86\x1D\x91\x28\xBF\x33" | ||
| 3398 | "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" | ||
| 3399 | "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" | ||
| 3400 | "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" | ||
| 3401 | "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" | ||
| 3402 | "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" | ||
| 3403 | "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" | ||
| 3404 | "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" | ||
| 3405 | "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" | ||
| 3406 | "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" | ||
| 3407 | "\x86\x1D\xB4\x28\xBF\x56\xED\x61" | ||
| 3408 | "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" | ||
| 3409 | "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" | ||
| 3410 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | ||
| 3411 | .rlen = 496, | ||
| 2980 | }, | 3412 | }, |
| 2981 | }; | 3413 | }; |
| 2982 | 3414 | ||
| @@ -2996,8 +3428,62 @@ static struct cipher_testvec tf_ctr_enc_tv_template[] = { | |||
| 2996 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" | 3428 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" |
| 2997 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" | 3429 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" |
| 2998 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" | 3430 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" |
| 2999 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", | 3431 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" |
| 3000 | .ilen = 64, | 3432 | "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" |
| 3433 | "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" | ||
| 3434 | "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" | ||
| 3435 | "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" | ||
| 3436 | "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" | ||
| 3437 | "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" | ||
| 3438 | "\x29\xC0\x57\xEE\x62\xF9\x90\x04" | ||
| 3439 | "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" | ||
| 3440 | "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" | ||
| 3441 | "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" | ||
| 3442 | "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" | ||
| 3443 | "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" | ||
| 3444 | "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" | ||
| 3445 | "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" | ||
| 3446 | "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" | ||
| 3447 | "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" | ||
| 3448 | "\x57\xEE\x85\x1C\x90\x27\xBE\x32" | ||
| 3449 | "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" | ||
| 3450 | "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" | ||
| 3451 | "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" | ||
| 3452 | "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" | ||
| 3453 | "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" | ||
| 3454 | "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" | ||
| 3455 | "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" | ||
| 3456 | "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" | ||
| 3457 | "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" | ||
| 3458 | "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" | ||
| 3459 | "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" | ||
| 3460 | "\x69\x00\x74\x0B\xA2\x16\xAD\x44" | ||
| 3461 | "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" | ||
| 3462 | "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" | ||
| 3463 | "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" | ||
| 3464 | "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" | ||
| 3465 | "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" | ||
| 3466 | "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" | ||
| 3467 | "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" | ||
| 3468 | "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" | ||
| 3469 | "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" | ||
| 3470 | "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" | ||
| 3471 | "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" | ||
| 3472 | "\x58\xEF\x86\x1D\x91\x28\xBF\x33" | ||
| 3473 | "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" | ||
| 3474 | "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" | ||
| 3475 | "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" | ||
| 3476 | "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" | ||
| 3477 | "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" | ||
| 3478 | "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" | ||
| 3479 | "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" | ||
| 3480 | "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" | ||
| 3481 | "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" | ||
| 3482 | "\x86\x1D\xB4\x28\xBF\x56\xED\x61" | ||
| 3483 | "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" | ||
| 3484 | "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" | ||
| 3485 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | ||
| 3486 | .ilen = 496, | ||
| 3001 | .result = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE" | 3487 | .result = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE" |
| 3002 | "\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30" | 3488 | "\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30" |
| 3003 | "\x26\x9B\x89\xA1\xEE\x43\xE0\x52" | 3489 | "\x26\x9B\x89\xA1\xEE\x43\xE0\x52" |
| @@ -3005,8 +3491,62 @@ static struct cipher_testvec tf_ctr_enc_tv_template[] = { | |||
| 3005 | "\x9F\x8D\x40\x9F\x24\xFD\x92\xA0" | 3491 | "\x9F\x8D\x40\x9F\x24\xFD\x92\xA0" |
| 3006 | "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" | 3492 | "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" |
| 3007 | "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" | 3493 | "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" |
| 3008 | "\x01\x41\x21\x12\x38\xAB\x52\x4F", | 3494 | "\x01\x41\x21\x12\x38\xAB\x52\x4F" |
| 3009 | .rlen = 64, | 3495 | "\xA8\x57\x20\xE0\x21\x6A\x17\x0D" |
| 3496 | "\x0E\xF9\x8E\x49\x42\x00\x3C\x94" | ||
| 3497 | "\x14\xC0\xD0\x8D\x8A\x98\xEB\x29" | ||
| 3498 | "\xEC\xAE\x96\x44\xC0\x3C\x48\xDC" | ||
| 3499 | "\x29\x35\x25\x2F\xE7\x11\x6C\x68" | ||
| 3500 | "\xC8\x67\x0A\x2F\xF4\x07\xBE\xF9" | ||
| 3501 | "\x2C\x31\x87\x40\xAB\xB2\xB6\xFA" | ||
| 3502 | "\xD2\xC9\x6D\x5C\x50\xE9\xE6\x7E" | ||
| 3503 | "\xE3\x0A\xD2\xD5\x6D\x8D\x64\x9E" | ||
| 3504 | "\x70\xCE\x03\x76\xDD\xE0\xF0\x8C" | ||
| 3505 | "\x84\x86\x8B\x6A\xFE\xC7\xF9\x69" | ||
| 3506 | "\x2E\xFE\xFC\xC2\xC4\x1A\x55\x58" | ||
| 3507 | "\xB3\xBE\xE2\x7E\xED\x39\x42\x6C" | ||
| 3508 | "\xB4\x42\x97\x9A\xEC\xE1\x0A\x06" | ||
| 3509 | "\x02\xC5\x03\x9D\xC4\x48\x15\x66" | ||
| 3510 | "\x35\x6A\xC2\xC9\xA2\x26\x30\xBB" | ||
| 3511 | "\xDB\x2D\xC8\x08\x2B\xA0\x29\x1A" | ||
| 3512 | "\x23\x61\x48\xEA\x80\x04\x27\xAA" | ||
| 3513 | "\x69\x49\xE8\xE8\x4A\x83\x6B\x5A" | ||
| 3514 | "\xCA\x7C\xD3\xB1\xB5\x0B\xCC\x23" | ||
| 3515 | "\x74\x1F\xA9\x87\xCD\xED\xC0\x2D" | ||
| 3516 | "\xBF\xEB\xCF\x16\x2D\x2A\x2E\x1D" | ||
| 3517 | "\x96\xBA\x36\x11\x45\x41\xDA\xCE" | ||
| 3518 | "\xA4\x48\x80\x8B\x06\xF4\x98\x89" | ||
| 3519 | "\x8B\x23\x08\x53\xF4\xD4\x5A\x24" | ||
| 3520 | "\x8B\xF8\x43\x73\xD1\xEE\xC4\xB0" | ||
| 3521 | "\xF8\xFE\x09\x0C\x75\x05\x38\x0B" | ||
| 3522 | "\x7C\x81\xDE\x9D\xE4\x61\x37\x63" | ||
| 3523 | "\x63\xAD\x12\xD2\x04\xB9\xCE\x45" | ||
| 3524 | "\x5A\x1A\x6E\xB3\x78\x2A\xA4\x74" | ||
| 3525 | "\x86\xD0\xE3\xFF\xDA\x38\x9C\xB5" | ||
| 3526 | "\xB8\xB1\xDB\x38\x2F\xC5\x6A\xB4" | ||
| 3527 | "\xEB\x6E\x96\xE8\x43\x80\xB5\x51" | ||
| 3528 | "\x61\x2D\x48\xAA\x07\x65\x11\x8C" | ||
| 3529 | "\x48\xE3\x90\x7E\x78\x3A\xEC\x97" | ||
| 3530 | "\x05\x3D\x84\xE7\x90\x2B\xAA\xBD" | ||
| 3531 | "\x83\x29\x0E\x1A\x81\x73\x7B\xE0" | ||
| 3532 | "\x7A\x01\x4A\x37\x3B\x77\x7F\x8D" | ||
| 3533 | "\x49\xA4\x2F\x6E\xBE\x68\x99\x08" | ||
| 3534 | "\x99\xAA\x4C\x12\x04\xAE\x1F\x77" | ||
| 3535 | "\x35\x88\xF1\x65\x06\x0A\x0B\x4D" | ||
| 3536 | "\x47\xF9\x50\x38\x5D\x71\xF9\x6E" | ||
| 3537 | "\xDE\xEC\x61\x35\x2C\x4C\x96\x50" | ||
| 3538 | "\xE8\x28\x93\x9C\x7E\x01\xC6\x04" | ||
| 3539 | "\xB2\xD6\xBC\x6C\x17\xEB\xC1\x7D" | ||
| 3540 | "\x11\xE9\x43\x83\x76\xAA\x53\x37" | ||
| 3541 | "\x0C\x1D\x39\x89\x53\x72\x09\x7E" | ||
| 3542 | "\xD9\x85\x16\x04\xA5\x2C\x05\x6F" | ||
| 3543 | "\x17\x0C\x6E\x66\xAA\x84\xA7\xD9" | ||
| 3544 | "\xE2\xD9\xC4\xEB\x43\x3E\xB1\x8D" | ||
| 3545 | "\x7C\x36\xC7\x71\x70\x9C\x10\xD8" | ||
| 3546 | "\xE8\x47\x2A\x4D\xFD\xA1\xBC\xE3" | ||
| 3547 | "\xB9\x32\xE2\xC1\x82\xAC\xFE\xCC" | ||
| 3548 | "\xC5\xC9\x7F\x9E\xCF\x33\x7A\xDF", | ||
| 3549 | .rlen = 496, | ||
| 3010 | }, { /* Generated with Crypto++ */ | 3550 | }, { /* Generated with Crypto++ */ |
| 3011 | .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" | 3551 | .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" |
| 3012 | "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" | 3552 | "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" |
| @@ -3023,8 +3563,62 @@ static struct cipher_testvec tf_ctr_enc_tv_template[] = { | |||
| 3023 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" | 3563 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" |
| 3024 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" | 3564 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" |
| 3025 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" | 3565 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" |
| 3026 | "\xC3\x37\xCE", | 3566 | "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" |
| 3027 | .ilen = 67, | 3567 | "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" |
| 3568 | "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" | ||
| 3569 | "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" | ||
| 3570 | "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" | ||
| 3571 | "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" | ||
| 3572 | "\x29\xC0\x57\xEE\x62\xF9\x90\x04" | ||
| 3573 | "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" | ||
| 3574 | "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" | ||
| 3575 | "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" | ||
| 3576 | "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" | ||
| 3577 | "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" | ||
| 3578 | "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" | ||
| 3579 | "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" | ||
| 3580 | "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" | ||
| 3581 | "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" | ||
| 3582 | "\x57\xEE\x85\x1C\x90\x27\xBE\x32" | ||
| 3583 | "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" | ||
| 3584 | "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" | ||
| 3585 | "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" | ||
| 3586 | "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" | ||
| 3587 | "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" | ||
| 3588 | "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" | ||
| 3589 | "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" | ||
| 3590 | "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" | ||
| 3591 | "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" | ||
| 3592 | "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" | ||
| 3593 | "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" | ||
| 3594 | "\x69\x00\x74\x0B\xA2\x16\xAD\x44" | ||
| 3595 | "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" | ||
| 3596 | "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" | ||
| 3597 | "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" | ||
| 3598 | "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" | ||
| 3599 | "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" | ||
| 3600 | "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" | ||
| 3601 | "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" | ||
| 3602 | "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" | ||
| 3603 | "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" | ||
| 3604 | "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" | ||
| 3605 | "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" | ||
| 3606 | "\x58\xEF\x86\x1D\x91\x28\xBF\x33" | ||
| 3607 | "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" | ||
| 3608 | "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" | ||
| 3609 | "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" | ||
| 3610 | "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" | ||
| 3611 | "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" | ||
| 3612 | "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" | ||
| 3613 | "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" | ||
| 3614 | "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" | ||
| 3615 | "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" | ||
| 3616 | "\x86\x1D\xB4\x28\xBF\x56\xED\x61" | ||
| 3617 | "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" | ||
| 3618 | "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" | ||
| 3619 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7" | ||
| 3620 | "\x2B\xC2\x59", | ||
| 3621 | .ilen = 499, | ||
| 3028 | .result = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE" | 3622 | .result = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE" |
| 3029 | "\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30" | 3623 | "\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30" |
| 3030 | "\x26\x9B\x89\xA1\xEE\x43\xE0\x52" | 3624 | "\x26\x9B\x89\xA1\xEE\x43\xE0\x52" |
| @@ -3033,8 +3627,62 @@ static struct cipher_testvec tf_ctr_enc_tv_template[] = { | |||
| 3033 | "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" | 3627 | "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" |
| 3034 | "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" | 3628 | "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" |
| 3035 | "\x01\x41\x21\x12\x38\xAB\x52\x4F" | 3629 | "\x01\x41\x21\x12\x38\xAB\x52\x4F" |
| 3036 | "\xA8\x57\x20", | 3630 | "\xA8\x57\x20\xE0\x21\x6A\x17\x0D" |
| 3037 | .rlen = 67, | 3631 | "\x0E\xF9\x8E\x49\x42\x00\x3C\x94" |
| 3632 | "\x14\xC0\xD0\x8D\x8A\x98\xEB\x29" | ||
| 3633 | "\xEC\xAE\x96\x44\xC0\x3C\x48\xDC" | ||
| 3634 | "\x29\x35\x25\x2F\xE7\x11\x6C\x68" | ||
| 3635 | "\xC8\x67\x0A\x2F\xF4\x07\xBE\xF9" | ||
| 3636 | "\x2C\x31\x87\x40\xAB\xB2\xB6\xFA" | ||
| 3637 | "\xD2\xC9\x6D\x5C\x50\xE9\xE6\x7E" | ||
| 3638 | "\xE3\x0A\xD2\xD5\x6D\x8D\x64\x9E" | ||
| 3639 | "\x70\xCE\x03\x76\xDD\xE0\xF0\x8C" | ||
| 3640 | "\x84\x86\x8B\x6A\xFE\xC7\xF9\x69" | ||
| 3641 | "\x2E\xFE\xFC\xC2\xC4\x1A\x55\x58" | ||
| 3642 | "\xB3\xBE\xE2\x7E\xED\x39\x42\x6C" | ||
| 3643 | "\xB4\x42\x97\x9A\xEC\xE1\x0A\x06" | ||
| 3644 | "\x02\xC5\x03\x9D\xC4\x48\x15\x66" | ||
| 3645 | "\x35\x6A\xC2\xC9\xA2\x26\x30\xBB" | ||
| 3646 | "\xDB\x2D\xC8\x08\x2B\xA0\x29\x1A" | ||
| 3647 | "\x23\x61\x48\xEA\x80\x04\x27\xAA" | ||
| 3648 | "\x69\x49\xE8\xE8\x4A\x83\x6B\x5A" | ||
| 3649 | "\xCA\x7C\xD3\xB1\xB5\x0B\xCC\x23" | ||
| 3650 | "\x74\x1F\xA9\x87\xCD\xED\xC0\x2D" | ||
| 3651 | "\xBF\xEB\xCF\x16\x2D\x2A\x2E\x1D" | ||
| 3652 | "\x96\xBA\x36\x11\x45\x41\xDA\xCE" | ||
| 3653 | "\xA4\x48\x80\x8B\x06\xF4\x98\x89" | ||
| 3654 | "\x8B\x23\x08\x53\xF4\xD4\x5A\x24" | ||
| 3655 | "\x8B\xF8\x43\x73\xD1\xEE\xC4\xB0" | ||
| 3656 | "\xF8\xFE\x09\x0C\x75\x05\x38\x0B" | ||
| 3657 | "\x7C\x81\xDE\x9D\xE4\x61\x37\x63" | ||
| 3658 | "\x63\xAD\x12\xD2\x04\xB9\xCE\x45" | ||
| 3659 | "\x5A\x1A\x6E\xB3\x78\x2A\xA4\x74" | ||
| 3660 | "\x86\xD0\xE3\xFF\xDA\x38\x9C\xB5" | ||
| 3661 | "\xB8\xB1\xDB\x38\x2F\xC5\x6A\xB4" | ||
| 3662 | "\xEB\x6E\x96\xE8\x43\x80\xB5\x51" | ||
| 3663 | "\x61\x2D\x48\xAA\x07\x65\x11\x8C" | ||
| 3664 | "\x48\xE3\x90\x7E\x78\x3A\xEC\x97" | ||
| 3665 | "\x05\x3D\x84\xE7\x90\x2B\xAA\xBD" | ||
| 3666 | "\x83\x29\x0E\x1A\x81\x73\x7B\xE0" | ||
| 3667 | "\x7A\x01\x4A\x37\x3B\x77\x7F\x8D" | ||
| 3668 | "\x49\xA4\x2F\x6E\xBE\x68\x99\x08" | ||
| 3669 | "\x99\xAA\x4C\x12\x04\xAE\x1F\x77" | ||
| 3670 | "\x35\x88\xF1\x65\x06\x0A\x0B\x4D" | ||
| 3671 | "\x47\xF9\x50\x38\x5D\x71\xF9\x6E" | ||
| 3672 | "\xDE\xEC\x61\x35\x2C\x4C\x96\x50" | ||
| 3673 | "\xE8\x28\x93\x9C\x7E\x01\xC6\x04" | ||
| 3674 | "\xB2\xD6\xBC\x6C\x17\xEB\xC1\x7D" | ||
| 3675 | "\x11\xE9\x43\x83\x76\xAA\x53\x37" | ||
| 3676 | "\x0C\x1D\x39\x89\x53\x72\x09\x7E" | ||
| 3677 | "\xD9\x85\x16\x04\xA5\x2C\x05\x6F" | ||
| 3678 | "\x17\x0C\x6E\x66\xAA\x84\xA7\xD9" | ||
| 3679 | "\xE2\xD9\xC4\xEB\x43\x3E\xB1\x8D" | ||
| 3680 | "\x7C\x36\xC7\x71\x70\x9C\x10\xD8" | ||
| 3681 | "\xE8\x47\x2A\x4D\xFD\xA1\xBC\xE3" | ||
| 3682 | "\xB9\x32\xE2\xC1\x82\xAC\xFE\xCC" | ||
| 3683 | "\xC5\xC9\x7F\x9E\xCF\x33\x7A\xDF" | ||
| 3684 | "\x6C\x82\x9D", | ||
| 3685 | .rlen = 499, | ||
| 3038 | }, | 3686 | }, |
| 3039 | }; | 3687 | }; |
| 3040 | 3688 | ||
| @@ -3054,8 +3702,62 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = { | |||
| 3054 | "\x9F\x8D\x40\x9F\x24\xFD\x92\xA0" | 3702 | "\x9F\x8D\x40\x9F\x24\xFD\x92\xA0" |
| 3055 | "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" | 3703 | "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" |
| 3056 | "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" | 3704 | "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" |
| 3057 | "\x01\x41\x21\x12\x38\xAB\x52\x4F", | 3705 | "\x01\x41\x21\x12\x38\xAB\x52\x4F" |
| 3058 | .ilen = 64, | 3706 | "\xA8\x57\x20\xE0\x21\x6A\x17\x0D" |
| 3707 | "\x0E\xF9\x8E\x49\x42\x00\x3C\x94" | ||
| 3708 | "\x14\xC0\xD0\x8D\x8A\x98\xEB\x29" | ||
| 3709 | "\xEC\xAE\x96\x44\xC0\x3C\x48\xDC" | ||
| 3710 | "\x29\x35\x25\x2F\xE7\x11\x6C\x68" | ||
| 3711 | "\xC8\x67\x0A\x2F\xF4\x07\xBE\xF9" | ||
| 3712 | "\x2C\x31\x87\x40\xAB\xB2\xB6\xFA" | ||
| 3713 | "\xD2\xC9\x6D\x5C\x50\xE9\xE6\x7E" | ||
| 3714 | "\xE3\x0A\xD2\xD5\x6D\x8D\x64\x9E" | ||
| 3715 | "\x70\xCE\x03\x76\xDD\xE0\xF0\x8C" | ||
| 3716 | "\x84\x86\x8B\x6A\xFE\xC7\xF9\x69" | ||
| 3717 | "\x2E\xFE\xFC\xC2\xC4\x1A\x55\x58" | ||
| 3718 | "\xB3\xBE\xE2\x7E\xED\x39\x42\x6C" | ||
| 3719 | "\xB4\x42\x97\x9A\xEC\xE1\x0A\x06" | ||
| 3720 | "\x02\xC5\x03\x9D\xC4\x48\x15\x66" | ||
| 3721 | "\x35\x6A\xC2\xC9\xA2\x26\x30\xBB" | ||
| 3722 | "\xDB\x2D\xC8\x08\x2B\xA0\x29\x1A" | ||
| 3723 | "\x23\x61\x48\xEA\x80\x04\x27\xAA" | ||
| 3724 | "\x69\x49\xE8\xE8\x4A\x83\x6B\x5A" | ||
| 3725 | "\xCA\x7C\xD3\xB1\xB5\x0B\xCC\x23" | ||
| 3726 | "\x74\x1F\xA9\x87\xCD\xED\xC0\x2D" | ||
| 3727 | "\xBF\xEB\xCF\x16\x2D\x2A\x2E\x1D" | ||
| 3728 | "\x96\xBA\x36\x11\x45\x41\xDA\xCE" | ||
| 3729 | "\xA4\x48\x80\x8B\x06\xF4\x98\x89" | ||
| 3730 | "\x8B\x23\x08\x53\xF4\xD4\x5A\x24" | ||
| 3731 | "\x8B\xF8\x43\x73\xD1\xEE\xC4\xB0" | ||
| 3732 | "\xF8\xFE\x09\x0C\x75\x05\x38\x0B" | ||
| 3733 | "\x7C\x81\xDE\x9D\xE4\x61\x37\x63" | ||
| 3734 | "\x63\xAD\x12\xD2\x04\xB9\xCE\x45" | ||
| 3735 | "\x5A\x1A\x6E\xB3\x78\x2A\xA4\x74" | ||
| 3736 | "\x86\xD0\xE3\xFF\xDA\x38\x9C\xB5" | ||
| 3737 | "\xB8\xB1\xDB\x38\x2F\xC5\x6A\xB4" | ||
| 3738 | "\xEB\x6E\x96\xE8\x43\x80\xB5\x51" | ||
| 3739 | "\x61\x2D\x48\xAA\x07\x65\x11\x8C" | ||
| 3740 | "\x48\xE3\x90\x7E\x78\x3A\xEC\x97" | ||
| 3741 | "\x05\x3D\x84\xE7\x90\x2B\xAA\xBD" | ||
| 3742 | "\x83\x29\x0E\x1A\x81\x73\x7B\xE0" | ||
| 3743 | "\x7A\x01\x4A\x37\x3B\x77\x7F\x8D" | ||
| 3744 | "\x49\xA4\x2F\x6E\xBE\x68\x99\x08" | ||
| 3745 | "\x99\xAA\x4C\x12\x04\xAE\x1F\x77" | ||
| 3746 | "\x35\x88\xF1\x65\x06\x0A\x0B\x4D" | ||
| 3747 | "\x47\xF9\x50\x38\x5D\x71\xF9\x6E" | ||
| 3748 | "\xDE\xEC\x61\x35\x2C\x4C\x96\x50" | ||
| 3749 | "\xE8\x28\x93\x9C\x7E\x01\xC6\x04" | ||
| 3750 | "\xB2\xD6\xBC\x6C\x17\xEB\xC1\x7D" | ||
| 3751 | "\x11\xE9\x43\x83\x76\xAA\x53\x37" | ||
| 3752 | "\x0C\x1D\x39\x89\x53\x72\x09\x7E" | ||
| 3753 | "\xD9\x85\x16\x04\xA5\x2C\x05\x6F" | ||
| 3754 | "\x17\x0C\x6E\x66\xAA\x84\xA7\xD9" | ||
| 3755 | "\xE2\xD9\xC4\xEB\x43\x3E\xB1\x8D" | ||
| 3756 | "\x7C\x36\xC7\x71\x70\x9C\x10\xD8" | ||
| 3757 | "\xE8\x47\x2A\x4D\xFD\xA1\xBC\xE3" | ||
| 3758 | "\xB9\x32\xE2\xC1\x82\xAC\xFE\xCC" | ||
| 3759 | "\xC5\xC9\x7F\x9E\xCF\x33\x7A\xDF", | ||
| 3760 | .ilen = 496, | ||
| 3059 | .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" | 3761 | .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" |
| 3060 | "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" | 3762 | "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" |
| 3061 | "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" | 3763 | "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" |
| @@ -3063,8 +3765,62 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = { | |||
| 3063 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" | 3765 | "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" |
| 3064 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" | 3766 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" |
| 3065 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" | 3767 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" |
| 3066 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C", | 3768 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" |
| 3067 | .rlen = 64, | 3769 | "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" |
| 3770 | "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" | ||
| 3771 | "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" | ||
| 3772 | "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" | ||
| 3773 | "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" | ||
| 3774 | "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" | ||
| 3775 | "\x29\xC0\x57\xEE\x62\xF9\x90\x04" | ||
| 3776 | "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" | ||
| 3777 | "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" | ||
| 3778 | "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" | ||
| 3779 | "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" | ||
| 3780 | "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" | ||
| 3781 | "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" | ||
| 3782 | "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" | ||
| 3783 | "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" | ||
| 3784 | "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" | ||
| 3785 | "\x57\xEE\x85\x1C\x90\x27\xBE\x32" | ||
| 3786 | "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" | ||
| 3787 | "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" | ||
| 3788 | "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" | ||
| 3789 | "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" | ||
| 3790 | "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" | ||
| 3791 | "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" | ||
| 3792 | "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" | ||
| 3793 | "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" | ||
| 3794 | "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" | ||
| 3795 | "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" | ||
| 3796 | "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" | ||
| 3797 | "\x69\x00\x74\x0B\xA2\x16\xAD\x44" | ||
| 3798 | "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" | ||
| 3799 | "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" | ||
| 3800 | "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" | ||
| 3801 | "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" | ||
| 3802 | "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" | ||
| 3803 | "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" | ||
| 3804 | "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" | ||
| 3805 | "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" | ||
| 3806 | "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" | ||
| 3807 | "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" | ||
| 3808 | "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" | ||
| 3809 | "\x58\xEF\x86\x1D\x91\x28\xBF\x33" | ||
| 3810 | "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" | ||
| 3811 | "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" | ||
| 3812 | "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" | ||
| 3813 | "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" | ||
| 3814 | "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" | ||
| 3815 | "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" | ||
| 3816 | "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" | ||
| 3817 | "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" | ||
| 3818 | "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" | ||
| 3819 | "\x86\x1D\xB4\x28\xBF\x56\xED\x61" | ||
| 3820 | "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" | ||
| 3821 | "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" | ||
| 3822 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", | ||
| 3823 | .rlen = 496, | ||
| 3068 | }, { /* Generated with Crypto++ */ | 3824 | }, { /* Generated with Crypto++ */ |
| 3069 | .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" | 3825 | .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" |
| 3070 | "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" | 3826 | "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" |
| @@ -3081,8 +3837,62 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = { | |||
| 3081 | "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" | 3837 | "\xBC\x8F\x35\xDD\x67\x38\xD8\xAA" |
| 3082 | "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" | 3838 | "\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60" |
| 3083 | "\x01\x41\x21\x12\x38\xAB\x52\x4F" | 3839 | "\x01\x41\x21\x12\x38\xAB\x52\x4F" |
| 3084 | "\xA8\x57\x20", | 3840 | "\xA8\x57\x20\xE0\x21\x6A\x17\x0D" |
| 3085 | .ilen = 67, | 3841 | "\x0E\xF9\x8E\x49\x42\x00\x3C\x94" |
| 3842 | "\x14\xC0\xD0\x8D\x8A\x98\xEB\x29" | ||
| 3843 | "\xEC\xAE\x96\x44\xC0\x3C\x48\xDC" | ||
| 3844 | "\x29\x35\x25\x2F\xE7\x11\x6C\x68" | ||
| 3845 | "\xC8\x67\x0A\x2F\xF4\x07\xBE\xF9" | ||
| 3846 | "\x2C\x31\x87\x40\xAB\xB2\xB6\xFA" | ||
| 3847 | "\xD2\xC9\x6D\x5C\x50\xE9\xE6\x7E" | ||
| 3848 | "\xE3\x0A\xD2\xD5\x6D\x8D\x64\x9E" | ||
| 3849 | "\x70\xCE\x03\x76\xDD\xE0\xF0\x8C" | ||
| 3850 | "\x84\x86\x8B\x6A\xFE\xC7\xF9\x69" | ||
| 3851 | "\x2E\xFE\xFC\xC2\xC4\x1A\x55\x58" | ||
| 3852 | "\xB3\xBE\xE2\x7E\xED\x39\x42\x6C" | ||
| 3853 | "\xB4\x42\x97\x9A\xEC\xE1\x0A\x06" | ||
| 3854 | "\x02\xC5\x03\x9D\xC4\x48\x15\x66" | ||
| 3855 | "\x35\x6A\xC2\xC9\xA2\x26\x30\xBB" | ||
| 3856 | "\xDB\x2D\xC8\x08\x2B\xA0\x29\x1A" | ||
| 3857 | "\x23\x61\x48\xEA\x80\x04\x27\xAA" | ||
| 3858 | "\x69\x49\xE8\xE8\x4A\x83\x6B\x5A" | ||
| 3859 | "\xCA\x7C\xD3\xB1\xB5\x0B\xCC\x23" | ||
| 3860 | "\x74\x1F\xA9\x87\xCD\xED\xC0\x2D" | ||
| 3861 | "\xBF\xEB\xCF\x16\x2D\x2A\x2E\x1D" | ||
| 3862 | "\x96\xBA\x36\x11\x45\x41\xDA\xCE" | ||
| 3863 | "\xA4\x48\x80\x8B\x06\xF4\x98\x89" | ||
| 3864 | "\x8B\x23\x08\x53\xF4\xD4\x5A\x24" | ||
| 3865 | "\x8B\xF8\x43\x73\xD1\xEE\xC4\xB0" | ||
| 3866 | "\xF8\xFE\x09\x0C\x75\x05\x38\x0B" | ||
| 3867 | "\x7C\x81\xDE\x9D\xE4\x61\x37\x63" | ||
| 3868 | "\x63\xAD\x12\xD2\x04\xB9\xCE\x45" | ||
| 3869 | "\x5A\x1A\x6E\xB3\x78\x2A\xA4\x74" | ||
| 3870 | "\x86\xD0\xE3\xFF\xDA\x38\x9C\xB5" | ||
| 3871 | "\xB8\xB1\xDB\x38\x2F\xC5\x6A\xB4" | ||
| 3872 | "\xEB\x6E\x96\xE8\x43\x80\xB5\x51" | ||
| 3873 | "\x61\x2D\x48\xAA\x07\x65\x11\x8C" | ||
| 3874 | "\x48\xE3\x90\x7E\x78\x3A\xEC\x97" | ||
| 3875 | "\x05\x3D\x84\xE7\x90\x2B\xAA\xBD" | ||
| 3876 | "\x83\x29\x0E\x1A\x81\x73\x7B\xE0" | ||
| 3877 | "\x7A\x01\x4A\x37\x3B\x77\x7F\x8D" | ||
| 3878 | "\x49\xA4\x2F\x6E\xBE\x68\x99\x08" | ||
| 3879 | "\x99\xAA\x4C\x12\x04\xAE\x1F\x77" | ||
| 3880 | "\x35\x88\xF1\x65\x06\x0A\x0B\x4D" | ||
| 3881 | "\x47\xF9\x50\x38\x5D\x71\xF9\x6E" | ||
| 3882 | "\xDE\xEC\x61\x35\x2C\x4C\x96\x50" | ||
| 3883 | "\xE8\x28\x93\x9C\x7E\x01\xC6\x04" | ||
| 3884 | "\xB2\xD6\xBC\x6C\x17\xEB\xC1\x7D" | ||
| 3885 | "\x11\xE9\x43\x83\x76\xAA\x53\x37" | ||
| 3886 | "\x0C\x1D\x39\x89\x53\x72\x09\x7E" | ||
| 3887 | "\xD9\x85\x16\x04\xA5\x2C\x05\x6F" | ||
| 3888 | "\x17\x0C\x6E\x66\xAA\x84\xA7\xD9" | ||
| 3889 | "\xE2\xD9\xC4\xEB\x43\x3E\xB1\x8D" | ||
| 3890 | "\x7C\x36\xC7\x71\x70\x9C\x10\xD8" | ||
| 3891 | "\xE8\x47\x2A\x4D\xFD\xA1\xBC\xE3" | ||
| 3892 | "\xB9\x32\xE2\xC1\x82\xAC\xFE\xCC" | ||
| 3893 | "\xC5\xC9\x7F\x9E\xCF\x33\x7A\xDF" | ||
| 3894 | "\x6C\x82\x9D", | ||
| 3895 | .ilen = 499, | ||
| 3086 | .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" | 3896 | .result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" |
| 3087 | "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" | 3897 | "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" |
| 3088 | "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" | 3898 | "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" |
| @@ -3091,8 +3901,62 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = { | |||
| 3091 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" | 3901 | "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" |
| 3092 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" | 3902 | "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" |
| 3093 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" | 3903 | "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" |
| 3094 | "\xC3\x37\xCE", | 3904 | "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" |
| 3095 | .rlen = 67, | 3905 | "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" |
| 3906 | "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" | ||
| 3907 | "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" | ||
| 3908 | "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" | ||
| 3909 | "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" | ||
| 3910 | "\x29\xC0\x57\xEE\x62\xF9\x90\x04" | ||
| 3911 | "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" | ||
| 3912 | "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" | ||
| 3913 | "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" | ||
| 3914 | "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" | ||
| 3915 | "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" | ||
| 3916 | "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" | ||
| 3917 | "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" | ||
| 3918 | "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" | ||
| 3919 | "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" | ||
| 3920 | "\x57\xEE\x85\x1C\x90\x27\xBE\x32" | ||
| 3921 | "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" | ||
| 3922 | "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" | ||
| 3923 | "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" | ||
| 3924 | "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" | ||
| 3925 | "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" | ||
| 3926 | "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" | ||
| 3927 | "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" | ||
| 3928 | "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" | ||
| 3929 | "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" | ||
| 3930 | "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" | ||
| 3931 | "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" | ||
| 3932 | "\x69\x00\x74\x0B\xA2\x16\xAD\x44" | ||
| 3933 | "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" | ||
| 3934 | "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" | ||
| 3935 | "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" | ||
| 3936 | "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" | ||
| 3937 | "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" | ||
| 3938 | "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" | ||
| 3939 | "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" | ||
| 3940 | "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" | ||
| 3941 | "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" | ||
| 3942 | "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" | ||
| 3943 | "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" | ||
| 3944 | "\x58\xEF\x86\x1D\x91\x28\xBF\x33" | ||
| 3945 | "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" | ||
| 3946 | "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" | ||
| 3947 | "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" | ||
| 3948 | "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" | ||
| 3949 | "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" | ||
| 3950 | "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" | ||
| 3951 | "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" | ||
| 3952 | "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" | ||
| 3953 | "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" | ||
| 3954 | "\x86\x1D\xB4\x28\xBF\x56\xED\x61" | ||
| 3955 | "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" | ||
| 3956 | "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" | ||
| 3957 | "\xDC\x50\xE7\x7E\x15\x89\x20\xB7" | ||
| 3958 | "\x2B\xC2\x59", | ||
| 3959 | .rlen = 499, | ||
| 3096 | }, | 3960 | }, |
| 3097 | }; | 3961 | }; |
| 3098 | 3962 | ||
| @@ -6111,6 +6975,9 @@ static struct cipher_testvec cast6_dec_tv_template[] = { | |||
| 6111 | #define AES_DEC_TEST_VECTORS 3 | 6975 | #define AES_DEC_TEST_VECTORS 3 |
| 6112 | #define AES_CBC_ENC_TEST_VECTORS 4 | 6976 | #define AES_CBC_ENC_TEST_VECTORS 4 |
| 6113 | #define AES_CBC_DEC_TEST_VECTORS 4 | 6977 | #define AES_CBC_DEC_TEST_VECTORS 4 |
| 6978 | #define HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS 7 | ||
| 6979 | #define HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS 7 | ||
| 6980 | #define HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS 7 | ||
| 6114 | #define AES_LRW_ENC_TEST_VECTORS 8 | 6981 | #define AES_LRW_ENC_TEST_VECTORS 8 |
| 6115 | #define AES_LRW_DEC_TEST_VECTORS 8 | 6982 | #define AES_LRW_DEC_TEST_VECTORS 8 |
| 6116 | #define AES_XTS_ENC_TEST_VECTORS 5 | 6983 | #define AES_XTS_ENC_TEST_VECTORS 5 |
| @@ -6368,6 +7235,837 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = { | |||
| 6368 | }, | 7235 | }, |
| 6369 | }; | 7236 | }; |
| 6370 | 7237 | ||
| 7238 | static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = { | ||
| 7239 | { /* RFC 3602 Case 1 */ | ||
| 7240 | #ifdef __LITTLE_ENDIAN | ||
| 7241 | .key = "\x08\x00" /* rta length */ | ||
| 7242 | "\x01\x00" /* rta type */ | ||
| 7243 | #else | ||
| 7244 | .key = "\x00\x08" /* rta length */ | ||
| 7245 | "\x00\x01" /* rta type */ | ||
| 7246 | #endif | ||
| 7247 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7248 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7249 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7250 | "\x00\x00\x00\x00" | ||
| 7251 | "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" | ||
| 7252 | "\x51\x2e\x03\xd5\x34\x12\x00\x06", | ||
| 7253 | .klen = 8 + 20 + 16, | ||
| 7254 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | ||
| 7255 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | ||
| 7256 | .input = "Single block msg", | ||
| 7257 | .ilen = 16, | ||
| 7258 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" | ||
| 7259 | "\x27\x08\x94\x2d\xbe\x77\x18\x1a" | ||
| 7260 | "\x1b\x13\xcb\xaf\x89\x5e\xe1\x2c" | ||
| 7261 | "\x13\xc5\x2e\xa3\xcc\xed\xdc\xb5" | ||
| 7262 | "\x03\x71\xa2\x06", | ||
| 7263 | .rlen = 16 + 20, | ||
| 7264 | }, { /* RFC 3602 Case 2 */ | ||
| 7265 | #ifdef __LITTLE_ENDIAN | ||
| 7266 | .key = "\x08\x00" /* rta length */ | ||
| 7267 | "\x01\x00" /* rta type */ | ||
| 7268 | #else | ||
| 7269 | .key = "\x00\x08" /* rta length */ | ||
| 7270 | "\x00\x01" /* rta type */ | ||
| 7271 | #endif | ||
| 7272 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7273 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
| 7274 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
| 7275 | "\x30\x31\x32\x33" | ||
| 7276 | "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" | ||
| 7277 | "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", | ||
| 7278 | .klen = 8 + 20 + 16, | ||
| 7279 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | ||
| 7280 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | ||
| 7281 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 7282 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
| 7283 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
| 7284 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", | ||
| 7285 | .ilen = 32, | ||
| 7286 | .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" | ||
| 7287 | "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" | ||
| 7288 | "\x75\x86\x60\x2d\x25\x3c\xff\xf9" | ||
| 7289 | "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" | ||
| 7290 | "\xad\x9b\x4c\x5c\x85\xe1\xda\xae" | ||
| 7291 | "\xee\x81\x4e\xd7\xdb\x74\xcf\x58" | ||
| 7292 | "\x65\x39\xf8\xde", | ||
| 7293 | .rlen = 32 + 20, | ||
| 7294 | }, { /* RFC 3602 Case 3 */ | ||
| 7295 | #ifdef __LITTLE_ENDIAN | ||
| 7296 | .key = "\x08\x00" /* rta length */ | ||
| 7297 | "\x01\x00" /* rta type */ | ||
| 7298 | #else | ||
| 7299 | .key = "\x00\x08" /* rta length */ | ||
| 7300 | "\x00\x01" /* rta type */ | ||
| 7301 | #endif | ||
| 7302 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7303 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7304 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7305 | "\x22\x33\x44\x55" | ||
| 7306 | "\x6c\x3e\xa0\x47\x76\x30\xce\x21" | ||
| 7307 | "\xa2\xce\x33\x4a\xa7\x46\xc2\xcd", | ||
| 7308 | .klen = 8 + 20 + 16, | ||
| 7309 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | ||
| 7310 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | ||
| 7311 | .input = "This is a 48-byte message (exactly 3 AES blocks)", | ||
| 7312 | .ilen = 48, | ||
| 7313 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" | ||
| 7314 | "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" | ||
| 7315 | "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" | ||
| 7316 | "\x50\x69\x39\x27\x67\x72\xf8\xd5" | ||
| 7317 | "\x02\x1c\x19\x21\x6b\xad\x52\x5c" | ||
| 7318 | "\x85\x79\x69\x5d\x83\xba\x26\x84" | ||
| 7319 | "\xc2\xec\x0c\xf8\x7f\x05\xba\xca" | ||
| 7320 | "\xff\xee\x4c\xd0\x93\xe6\x36\x7f" | ||
| 7321 | "\x8d\x62\xf2\x1e", | ||
| 7322 | .rlen = 48 + 20, | ||
| 7323 | }, { /* RFC 3602 Case 4 */ | ||
| 7324 | #ifdef __LITTLE_ENDIAN | ||
| 7325 | .key = "\x08\x00" /* rta length */ | ||
| 7326 | "\x01\x00" /* rta type */ | ||
| 7327 | #else | ||
| 7328 | .key = "\x00\x08" /* rta length */ | ||
| 7329 | "\x00\x01" /* rta type */ | ||
| 7330 | #endif | ||
| 7331 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7332 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7333 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7334 | "\x22\x33\x44\x55" | ||
| 7335 | "\x56\xe4\x7a\x38\xc5\x59\x89\x74" | ||
| 7336 | "\xbc\x46\x90\x3d\xba\x29\x03\x49", | ||
| 7337 | .klen = 8 + 20 + 16, | ||
| 7338 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | ||
| 7339 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | ||
| 7340 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | ||
| 7341 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | ||
| 7342 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | ||
| 7343 | "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" | ||
| 7344 | "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" | ||
| 7345 | "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" | ||
| 7346 | "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" | ||
| 7347 | "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", | ||
| 7348 | .ilen = 64, | ||
| 7349 | .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" | ||
| 7350 | "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" | ||
| 7351 | "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" | ||
| 7352 | "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" | ||
| 7353 | "\x35\x90\x7a\xa6\x32\xc3\xff\xdf" | ||
| 7354 | "\x86\x8b\xb7\xb2\x9d\x3d\x46\xad" | ||
| 7355 | "\x83\xce\x9f\x9a\x10\x2e\xe9\x9d" | ||
| 7356 | "\x49\xa5\x3e\x87\xf4\xc3\xda\x55" | ||
| 7357 | "\x1c\x45\x57\xa9\x56\xcb\xa9\x2d" | ||
| 7358 | "\x18\xac\xf1\xc7\x5d\xd1\xcd\x0d" | ||
| 7359 | "\x1d\xbe\xc6\xe9", | ||
| 7360 | .rlen = 64 + 20, | ||
| 7361 | }, { /* RFC 3602 Case 5 */ | ||
| 7362 | #ifdef __LITTLE_ENDIAN | ||
| 7363 | .key = "\x08\x00" /* rta length */ | ||
| 7364 | "\x01\x00" /* rta type */ | ||
| 7365 | #else | ||
| 7366 | .key = "\x00\x08" /* rta length */ | ||
| 7367 | "\x00\x01" /* rta type */ | ||
| 7368 | #endif | ||
| 7369 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7370 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7371 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7372 | "\x22\x33\x44\x55" | ||
| 7373 | "\x90\xd3\x82\xb4\x10\xee\xba\x7a" | ||
| 7374 | "\xd9\x38\xc4\x6c\xec\x1a\x82\xbf", | ||
| 7375 | .klen = 8 + 20 + 16, | ||
| 7376 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" | ||
| 7377 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | ||
| 7378 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | ||
| 7379 | .alen = 8, | ||
| 7380 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" | ||
| 7381 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" | ||
| 7382 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
| 7383 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
| 7384 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" | ||
| 7385 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
| 7386 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
| 7387 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
| 7388 | "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
| 7389 | "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", | ||
| 7390 | .ilen = 80, | ||
| 7391 | .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" | ||
| 7392 | "\xa9\x45\x3e\x19\x4e\x12\x08\x49" | ||
| 7393 | "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" | ||
| 7394 | "\x33\x00\x13\xb4\x89\x8d\xc8\x56" | ||
| 7395 | "\xa4\x69\x9e\x52\x3a\x55\xdb\x08" | ||
| 7396 | "\x0b\x59\xec\x3a\x8e\x4b\x7e\x52" | ||
| 7397 | "\x77\x5b\x07\xd1\xdb\x34\xed\x9c" | ||
| 7398 | "\x53\x8a\xb5\x0c\x55\x1b\x87\x4a" | ||
| 7399 | "\xa2\x69\xad\xd0\x47\xad\x2d\x59" | ||
| 7400 | "\x13\xac\x19\xb7\xcf\xba\xd4\xa6" | ||
| 7401 | "\x58\xc6\x84\x75\xe4\xe9\x6b\x0c" | ||
| 7402 | "\xe1\xc5\x0b\x73\x4d\x82\x55\xa8" | ||
| 7403 | "\x85\xe1\x59\xf7", | ||
| 7404 | .rlen = 80 + 20, | ||
| 7405 | }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ | ||
| 7406 | #ifdef __LITTLE_ENDIAN | ||
| 7407 | .key = "\x08\x00" /* rta length */ | ||
| 7408 | "\x01\x00" /* rta type */ | ||
| 7409 | #else | ||
| 7410 | .key = "\x00\x08" /* rta length */ | ||
| 7411 | "\x00\x01" /* rta type */ | ||
| 7412 | #endif | ||
| 7413 | "\x00\x00\x00\x18" /* enc key length */ | ||
| 7414 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7415 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7416 | "\x22\x33\x44\x55" | ||
| 7417 | "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" | ||
| 7418 | "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" | ||
| 7419 | "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", | ||
| 7420 | .klen = 8 + 20 + 24, | ||
| 7421 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 7422 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 7423 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
| 7424 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
| 7425 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
| 7426 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
| 7427 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
| 7428 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
| 7429 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
| 7430 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
| 7431 | .ilen = 64, | ||
| 7432 | .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" | ||
| 7433 | "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" | ||
| 7434 | "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" | ||
| 7435 | "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" | ||
| 7436 | "\x57\x1b\x24\x20\x12\xfb\x7a\xe0" | ||
| 7437 | "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0" | ||
| 7438 | "\x08\xb0\xe2\x79\x88\x59\x88\x81" | ||
| 7439 | "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd" | ||
| 7440 | "\x73\xe3\x19\x3f\x8b\xc9\xc6\xf4" | ||
| 7441 | "\x5a\xf1\x5b\xa8\x98\x07\xc5\x36" | ||
| 7442 | "\x47\x4c\xfc\x36", | ||
| 7443 | .rlen = 64 + 20, | ||
| 7444 | }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ | ||
| 7445 | #ifdef __LITTLE_ENDIAN | ||
| 7446 | .key = "\x08\x00" /* rta length */ | ||
| 7447 | "\x01\x00" /* rta type */ | ||
| 7448 | #else | ||
| 7449 | .key = "\x00\x08" /* rta length */ | ||
| 7450 | "\x00\x01" /* rta type */ | ||
| 7451 | #endif | ||
| 7452 | "\x00\x00\x00\x20" /* enc key length */ | ||
| 7453 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7454 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7455 | "\x22\x33\x44\x55" | ||
| 7456 | "\x60\x3d\xeb\x10\x15\xca\x71\xbe" | ||
| 7457 | "\x2b\x73\xae\xf0\x85\x7d\x77\x81" | ||
| 7458 | "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" | ||
| 7459 | "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", | ||
| 7460 | .klen = 8 + 20 + 32, | ||
| 7461 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 7462 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 7463 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
| 7464 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
| 7465 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
| 7466 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
| 7467 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
| 7468 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
| 7469 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
| 7470 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
| 7471 | .ilen = 64, | ||
| 7472 | .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" | ||
| 7473 | "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" | ||
| 7474 | "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" | ||
| 7475 | "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" | ||
| 7476 | "\x39\xf2\x33\x69\xa9\xd9\xba\xcf" | ||
| 7477 | "\xa5\x30\xe2\x63\x04\x23\x14\x61" | ||
| 7478 | "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc" | ||
| 7479 | "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b" | ||
| 7480 | "\xa3\xe8\x9b\x17\xe3\xf4\x7f\xde" | ||
| 7481 | "\x1b\x9f\xc6\x81\x26\x43\x4a\x87" | ||
| 7482 | "\x51\xee\xd6\x4e", | ||
| 7483 | .rlen = 64 + 20, | ||
| 7484 | }, | ||
| 7485 | }; | ||
| 7486 | |||
| 7487 | static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_template[] = { | ||
| 7488 | { /* RFC 3602 Case 1 */ | ||
| 7489 | #ifdef __LITTLE_ENDIAN | ||
| 7490 | .key = "\x08\x00" /* rta length */ | ||
| 7491 | "\x01\x00" /* rta type */ | ||
| 7492 | #else | ||
| 7493 | .key = "\x00\x08" /* rta length */ | ||
| 7494 | "\x00\x01" /* rta type */ | ||
| 7495 | #endif | ||
| 7496 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7497 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7498 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7499 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7500 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7501 | "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" | ||
| 7502 | "\x51\x2e\x03\xd5\x34\x12\x00\x06", | ||
| 7503 | .klen = 8 + 32 + 16, | ||
| 7504 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | ||
| 7505 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | ||
| 7506 | .input = "Single block msg", | ||
| 7507 | .ilen = 16, | ||
| 7508 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" | ||
| 7509 | "\x27\x08\x94\x2d\xbe\x77\x18\x1a" | ||
| 7510 | "\xcc\xde\x2d\x6a\xae\xf1\x0b\xcc" | ||
| 7511 | "\x38\x06\x38\x51\xb4\xb8\xf3\x5b" | ||
| 7512 | "\x5c\x34\xa6\xa3\x6e\x0b\x05\xe5" | ||
| 7513 | "\x6a\x6d\x44\xaa\x26\xa8\x44\xa5", | ||
| 7514 | .rlen = 16 + 32, | ||
| 7515 | }, { /* RFC 3602 Case 2 */ | ||
| 7516 | #ifdef __LITTLE_ENDIAN | ||
| 7517 | .key = "\x08\x00" /* rta length */ | ||
| 7518 | "\x01\x00" /* rta type */ | ||
| 7519 | #else | ||
| 7520 | .key = "\x00\x08" /* rta length */ | ||
| 7521 | "\x00\x01" /* rta type */ | ||
| 7522 | #endif | ||
| 7523 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7524 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
| 7525 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
| 7526 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
| 7527 | "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" | ||
| 7528 | "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" | ||
| 7529 | "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", | ||
| 7530 | .klen = 8 + 32 + 16, | ||
| 7531 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | ||
| 7532 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | ||
| 7533 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 7534 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
| 7535 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
| 7536 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", | ||
| 7537 | .ilen = 32, | ||
| 7538 | .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" | ||
| 7539 | "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" | ||
| 7540 | "\x75\x86\x60\x2d\x25\x3c\xff\xf9" | ||
| 7541 | "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" | ||
| 7542 | "\xf5\x33\x53\xf3\x68\x85\x2a\x99" | ||
| 7543 | "\x0e\x06\x58\x8f\xba\xf6\x06\xda" | ||
| 7544 | "\x49\x69\x0d\x5b\xd4\x36\x06\x62" | ||
| 7545 | "\x35\x5e\x54\x58\x53\x4d\xdf\xbf", | ||
| 7546 | .rlen = 32 + 32, | ||
| 7547 | }, { /* RFC 3602 Case 3 */ | ||
| 7548 | #ifdef __LITTLE_ENDIAN | ||
| 7549 | .key = "\x08\x00" /* rta length */ | ||
| 7550 | "\x01\x00" /* rta type */ | ||
| 7551 | #else | ||
| 7552 | .key = "\x00\x08" /* rta length */ | ||
| 7553 | "\x00\x01" /* rta type */ | ||
| 7554 | #endif | ||
| 7555 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7556 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7557 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7558 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7559 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7560 | "\x6c\x3e\xa0\x47\x76\x30\xce\x21" | ||
| 7561 | "\xa2\xce\x33\x4a\xa7\x46\xc2\xcd", | ||
| 7562 | .klen = 8 + 32 + 16, | ||
| 7563 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | ||
| 7564 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | ||
| 7565 | .input = "This is a 48-byte message (exactly 3 AES blocks)", | ||
| 7566 | .ilen = 48, | ||
| 7567 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" | ||
| 7568 | "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" | ||
| 7569 | "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" | ||
| 7570 | "\x50\x69\x39\x27\x67\x72\xf8\xd5" | ||
| 7571 | "\x02\x1c\x19\x21\x6b\xad\x52\x5c" | ||
| 7572 | "\x85\x79\x69\x5d\x83\xba\x26\x84" | ||
| 7573 | "\x68\xb9\x3e\x90\x38\xa0\x88\x01" | ||
| 7574 | "\xe7\xc6\xce\x10\x31\x2f\x9b\x1d" | ||
| 7575 | "\x24\x78\xfb\xbe\x02\xe0\x4f\x40" | ||
| 7576 | "\x10\xbd\xaa\xc6\xa7\x79\xe0\x1a", | ||
| 7577 | .rlen = 48 + 32, | ||
| 7578 | }, { /* RFC 3602 Case 4 */ | ||
| 7579 | #ifdef __LITTLE_ENDIAN | ||
| 7580 | .key = "\x08\x00" /* rta length */ | ||
| 7581 | "\x01\x00" /* rta type */ | ||
| 7582 | #else | ||
| 7583 | .key = "\x00\x08" /* rta length */ | ||
| 7584 | "\x00\x01" /* rta type */ | ||
| 7585 | #endif | ||
| 7586 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7587 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7588 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7589 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7590 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7591 | "\x56\xe4\x7a\x38\xc5\x59\x89\x74" | ||
| 7592 | "\xbc\x46\x90\x3d\xba\x29\x03\x49", | ||
| 7593 | .klen = 8 + 32 + 16, | ||
| 7594 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | ||
| 7595 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | ||
| 7596 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | ||
| 7597 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | ||
| 7598 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | ||
| 7599 | "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" | ||
| 7600 | "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" | ||
| 7601 | "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" | ||
| 7602 | "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" | ||
| 7603 | "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", | ||
| 7604 | .ilen = 64, | ||
| 7605 | .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" | ||
| 7606 | "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" | ||
| 7607 | "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" | ||
| 7608 | "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" | ||
| 7609 | "\x35\x90\x7a\xa6\x32\xc3\xff\xdf" | ||
| 7610 | "\x86\x8b\xb7\xb2\x9d\x3d\x46\xad" | ||
| 7611 | "\x83\xce\x9f\x9a\x10\x2e\xe9\x9d" | ||
| 7612 | "\x49\xa5\x3e\x87\xf4\xc3\xda\x55" | ||
| 7613 | "\x7a\x1b\xd4\x3c\xdb\x17\x95\xe2" | ||
| 7614 | "\xe0\x93\xec\xc9\x9f\xf7\xce\xd8" | ||
| 7615 | "\x3f\x54\xe2\x49\x39\xe3\x71\x25" | ||
| 7616 | "\x2b\x6c\xe9\x5d\xec\xec\x2b\x64", | ||
| 7617 | .rlen = 64 + 32, | ||
| 7618 | }, { /* RFC 3602 Case 5 */ | ||
| 7619 | #ifdef __LITTLE_ENDIAN | ||
| 7620 | .key = "\x08\x00" /* rta length */ | ||
| 7621 | "\x01\x00" /* rta type */ | ||
| 7622 | #else | ||
| 7623 | .key = "\x00\x08" /* rta length */ | ||
| 7624 | "\x00\x01" /* rta type */ | ||
| 7625 | #endif | ||
| 7626 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7627 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7628 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7629 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7630 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7631 | "\x90\xd3\x82\xb4\x10\xee\xba\x7a" | ||
| 7632 | "\xd9\x38\xc4\x6c\xec\x1a\x82\xbf", | ||
| 7633 | .klen = 8 + 32 + 16, | ||
| 7634 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" | ||
| 7635 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | ||
| 7636 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | ||
| 7637 | .alen = 8, | ||
| 7638 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" | ||
| 7639 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" | ||
| 7640 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
| 7641 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
| 7642 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" | ||
| 7643 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
| 7644 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
| 7645 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
| 7646 | "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
| 7647 | "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", | ||
| 7648 | .ilen = 80, | ||
| 7649 | .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" | ||
| 7650 | "\xa9\x45\x3e\x19\x4e\x12\x08\x49" | ||
| 7651 | "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" | ||
| 7652 | "\x33\x00\x13\xb4\x89\x8d\xc8\x56" | ||
| 7653 | "\xa4\x69\x9e\x52\x3a\x55\xdb\x08" | ||
| 7654 | "\x0b\x59\xec\x3a\x8e\x4b\x7e\x52" | ||
| 7655 | "\x77\x5b\x07\xd1\xdb\x34\xed\x9c" | ||
| 7656 | "\x53\x8a\xb5\x0c\x55\x1b\x87\x4a" | ||
| 7657 | "\xa2\x69\xad\xd0\x47\xad\x2d\x59" | ||
| 7658 | "\x13\xac\x19\xb7\xcf\xba\xd4\xa6" | ||
| 7659 | "\xbb\xd4\x0f\xbe\xa3\x3b\x4c\xb8" | ||
| 7660 | "\x3a\xd2\xe1\x03\x86\xa5\x59\xb7" | ||
| 7661 | "\x73\xc3\x46\x20\x2c\xb1\xef\x68" | ||
| 7662 | "\xbb\x8a\x32\x7e\x12\x8c\x69\xcf", | ||
| 7663 | .rlen = 80 + 32, | ||
| 7664 | }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ | ||
| 7665 | #ifdef __LITTLE_ENDIAN | ||
| 7666 | .key = "\x08\x00" /* rta length */ | ||
| 7667 | "\x01\x00" /* rta type */ | ||
| 7668 | #else | ||
| 7669 | .key = "\x00\x08" /* rta length */ | ||
| 7670 | "\x00\x01" /* rta type */ | ||
| 7671 | #endif | ||
| 7672 | "\x00\x00\x00\x18" /* enc key length */ | ||
| 7673 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7674 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7675 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7676 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7677 | "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" | ||
| 7678 | "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" | ||
| 7679 | "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", | ||
| 7680 | .klen = 8 + 32 + 24, | ||
| 7681 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 7682 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 7683 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
| 7684 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
| 7685 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
| 7686 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
| 7687 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
| 7688 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
| 7689 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
| 7690 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
| 7691 | .ilen = 64, | ||
| 7692 | .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" | ||
| 7693 | "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" | ||
| 7694 | "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" | ||
| 7695 | "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" | ||
| 7696 | "\x57\x1b\x24\x20\x12\xfb\x7a\xe0" | ||
| 7697 | "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0" | ||
| 7698 | "\x08\xb0\xe2\x79\x88\x59\x88\x81" | ||
| 7699 | "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd" | ||
| 7700 | "\x2f\xee\x5f\xdb\x66\xfe\x79\x09" | ||
| 7701 | "\x61\x81\x31\xea\x5b\x3d\x8e\xfb" | ||
| 7702 | "\xca\x71\x85\x93\xf7\x85\x55\x8b" | ||
| 7703 | "\x7a\xe4\x94\xca\x8b\xba\x19\x33", | ||
| 7704 | .rlen = 64 + 32, | ||
| 7705 | }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ | ||
| 7706 | #ifdef __LITTLE_ENDIAN | ||
| 7707 | .key = "\x08\x00" /* rta length */ | ||
| 7708 | "\x01\x00" /* rta type */ | ||
| 7709 | #else | ||
| 7710 | .key = "\x00\x08" /* rta length */ | ||
| 7711 | "\x00\x01" /* rta type */ | ||
| 7712 | #endif | ||
| 7713 | "\x00\x00\x00\x20" /* enc key length */ | ||
| 7714 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7715 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7716 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7717 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7718 | "\x60\x3d\xeb\x10\x15\xca\x71\xbe" | ||
| 7719 | "\x2b\x73\xae\xf0\x85\x7d\x77\x81" | ||
| 7720 | "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" | ||
| 7721 | "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", | ||
| 7722 | .klen = 8 + 32 + 32, | ||
| 7723 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 7724 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 7725 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
| 7726 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
| 7727 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
| 7728 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
| 7729 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
| 7730 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
| 7731 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
| 7732 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
| 7733 | .ilen = 64, | ||
| 7734 | .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" | ||
| 7735 | "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" | ||
| 7736 | "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" | ||
| 7737 | "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" | ||
| 7738 | "\x39\xf2\x33\x69\xa9\xd9\xba\xcf" | ||
| 7739 | "\xa5\x30\xe2\x63\x04\x23\x14\x61" | ||
| 7740 | "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc" | ||
| 7741 | "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b" | ||
| 7742 | "\x24\x29\xed\xc2\x31\x49\xdb\xb1" | ||
| 7743 | "\x8f\x74\xbd\x17\x92\x03\xbe\x8f" | ||
| 7744 | "\xf3\x61\xde\x1c\xe9\xdb\xcd\xd0" | ||
| 7745 | "\xcc\xce\xe9\x85\x57\xcf\x6f\x5f", | ||
| 7746 | .rlen = 64 + 32, | ||
| 7747 | }, | ||
| 7748 | }; | ||
| 7749 | |||
| 7750 | static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_template[] = { | ||
| 7751 | { /* RFC 3602 Case 1 */ | ||
| 7752 | #ifdef __LITTLE_ENDIAN | ||
| 7753 | .key = "\x08\x00" /* rta length */ | ||
| 7754 | "\x01\x00" /* rta type */ | ||
| 7755 | #else | ||
| 7756 | .key = "\x00\x08" /* rta length */ | ||
| 7757 | "\x00\x01" /* rta type */ | ||
| 7758 | #endif | ||
| 7759 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7760 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7761 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7762 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7763 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7764 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7765 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7766 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7767 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 7768 | "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" | ||
| 7769 | "\x51\x2e\x03\xd5\x34\x12\x00\x06", | ||
| 7770 | .klen = 8 + 64 + 16, | ||
| 7771 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | ||
| 7772 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | ||
| 7773 | .input = "Single block msg", | ||
| 7774 | .ilen = 16, | ||
| 7775 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" | ||
| 7776 | "\x27\x08\x94\x2d\xbe\x77\x18\x1a" | ||
| 7777 | "\x3f\xdc\xad\x90\x03\x63\x5e\x68" | ||
| 7778 | "\xc3\x13\xdd\xa4\x5c\x4d\x54\xa7" | ||
| 7779 | "\x19\x6e\x03\x75\x2b\xa1\x62\xce" | ||
| 7780 | "\xe0\xc6\x96\x75\xb2\x14\xca\x96" | ||
| 7781 | "\xec\xbd\x50\x08\x07\x64\x1a\x49" | ||
| 7782 | "\xe8\x9a\x7c\x06\x3d\xcb\xff\xb2" | ||
| 7783 | "\xfa\x20\x89\xdd\x9c\xac\x9e\x16" | ||
| 7784 | "\x18\x8a\xa0\x6d\x01\x6c\xa3\x3a", | ||
| 7785 | .rlen = 16 + 64, | ||
| 7786 | }, { /* RFC 3602 Case 2 */ | ||
| 7787 | #ifdef __LITTLE_ENDIAN | ||
| 7788 | .key = "\x08\x00" /* rta length */ | ||
| 7789 | "\x01\x00" /* rta type */ | ||
| 7790 | #else | ||
| 7791 | .key = "\x00\x08" /* rta length */ | ||
| 7792 | "\x00\x01" /* rta type */ | ||
| 7793 | #endif | ||
| 7794 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7795 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
| 7796 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
| 7797 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
| 7798 | "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" | ||
| 7799 | "\x40\x41\x42\x43\x44\x45\x46\x47" | ||
| 7800 | "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" | ||
| 7801 | "\x50\x51\x52\x53\x54\x55\x56\x57" | ||
| 7802 | "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" | ||
| 7803 | "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" | ||
| 7804 | "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", | ||
| 7805 | .klen = 8 + 64 + 16, | ||
| 7806 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | ||
| 7807 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | ||
| 7808 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 7809 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
| 7810 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
| 7811 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", | ||
| 7812 | .ilen = 32, | ||
| 7813 | .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" | ||
| 7814 | "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" | ||
| 7815 | "\x75\x86\x60\x2d\x25\x3c\xff\xf9" | ||
| 7816 | "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" | ||
| 7817 | "\xda\xb2\x0c\xb2\x26\xc4\xd5\xef" | ||
| 7818 | "\x60\x38\xa4\x5e\x9a\x8c\x1b\x41" | ||
| 7819 | "\x03\x9f\xc4\x64\x7f\x01\x42\x9b" | ||
| 7820 | "\x0e\x1b\xea\xef\xbc\x88\x19\x5e" | ||
| 7821 | "\x31\x7e\xc2\x95\xfc\x09\x32\x0a" | ||
| 7822 | "\x46\x32\x7c\x41\x9c\x59\x3e\xe9" | ||
| 7823 | "\x8f\x9f\xd4\x31\xd6\x22\xbd\xf8" | ||
| 7824 | "\xf7\x0a\x94\xe5\xa9\xc3\xf6\x9d", | ||
| 7825 | .rlen = 32 + 64, | ||
| 7826 | }, { /* RFC 3602 Case 3 */ | ||
| 7827 | #ifdef __LITTLE_ENDIAN | ||
| 7828 | .key = "\x08\x00" /* rta length */ | ||
| 7829 | "\x01\x00" /* rta type */ | ||
| 7830 | #else | ||
| 7831 | .key = "\x00\x08" /* rta length */ | ||
| 7832 | "\x00\x01" /* rta type */ | ||
| 7833 | #endif | ||
| 7834 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7835 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7836 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7837 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7838 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7839 | "\x33\x44\x55\x66\x77\x88\x99\xaa" | ||
| 7840 | "\xbb\xcc\xdd\xee\xff\x11\x22\x33" | ||
| 7841 | "\x44\x55\x66\x77\x88\x99\xaa\xbb" | ||
| 7842 | "\xcc\xdd\xee\xff\x11\x22\x33\x44" | ||
| 7843 | "\x6c\x3e\xa0\x47\x76\x30\xce\x21" | ||
| 7844 | "\xa2\xce\x33\x4a\xa7\x46\xc2\xcd", | ||
| 7845 | .klen = 8 + 64 + 16, | ||
| 7846 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | ||
| 7847 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | ||
| 7848 | .input = "This is a 48-byte message (exactly 3 AES blocks)", | ||
| 7849 | .ilen = 48, | ||
| 7850 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" | ||
| 7851 | "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" | ||
| 7852 | "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" | ||
| 7853 | "\x50\x69\x39\x27\x67\x72\xf8\xd5" | ||
| 7854 | "\x02\x1c\x19\x21\x6b\xad\x52\x5c" | ||
| 7855 | "\x85\x79\x69\x5d\x83\xba\x26\x84" | ||
| 7856 | "\x64\x19\x17\x5b\x57\xe0\x21\x0f" | ||
| 7857 | "\xca\xdb\xa1\x26\x38\x14\xa2\x69" | ||
| 7858 | "\xdb\x54\x67\x80\xc0\x54\xe0\xfd" | ||
| 7859 | "\x3e\x91\xe7\x91\x7f\x13\x38\x44" | ||
| 7860 | "\xb7\xb1\xd6\xc8\x7d\x48\x8d\x41" | ||
| 7861 | "\x08\xea\x29\x6c\x74\x67\x3f\xb0" | ||
| 7862 | "\xac\x7f\x5c\x1d\xf5\xee\x22\x66" | ||
| 7863 | "\x27\xa6\xb6\x13\xba\xba\xf0\xc2", | ||
| 7864 | .rlen = 48 + 64, | ||
| 7865 | }, { /* RFC 3602 Case 4 */ | ||
| 7866 | #ifdef __LITTLE_ENDIAN | ||
| 7867 | .key = "\x08\x00" /* rta length */ | ||
| 7868 | "\x01\x00" /* rta type */ | ||
| 7869 | #else | ||
| 7870 | .key = "\x00\x08" /* rta length */ | ||
| 7871 | "\x00\x01" /* rta type */ | ||
| 7872 | #endif | ||
| 7873 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7874 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7875 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7876 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7877 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7878 | "\x33\x44\x55\x66\x77\x88\x99\xaa" | ||
| 7879 | "\xbb\xcc\xdd\xee\xff\x11\x22\x33" | ||
| 7880 | "\x44\x55\x66\x77\x88\x99\xaa\xbb" | ||
| 7881 | "\xcc\xdd\xee\xff\x11\x22\x33\x44" | ||
| 7882 | "\x56\xe4\x7a\x38\xc5\x59\x89\x74" | ||
| 7883 | "\xbc\x46\x90\x3d\xba\x29\x03\x49", | ||
| 7884 | .klen = 8 + 64 + 16, | ||
| 7885 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | ||
| 7886 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | ||
| 7887 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | ||
| 7888 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | ||
| 7889 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | ||
| 7890 | "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" | ||
| 7891 | "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" | ||
| 7892 | "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" | ||
| 7893 | "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" | ||
| 7894 | "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", | ||
| 7895 | .ilen = 64, | ||
| 7896 | .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" | ||
| 7897 | "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" | ||
| 7898 | "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" | ||
| 7899 | "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" | ||
| 7900 | "\x35\x90\x7a\xa6\x32\xc3\xff\xdf" | ||
| 7901 | "\x86\x8b\xb7\xb2\x9d\x3d\x46\xad" | ||
| 7902 | "\x83\xce\x9f\x9a\x10\x2e\xe9\x9d" | ||
| 7903 | "\x49\xa5\x3e\x87\xf4\xc3\xda\x55" | ||
| 7904 | "\x82\xcd\x42\x28\x21\x20\x15\xcc" | ||
| 7905 | "\xb7\xb2\x48\x40\xc7\x64\x41\x3a" | ||
| 7906 | "\x61\x32\x82\x85\xcf\x27\xed\xb4" | ||
| 7907 | "\xe4\x68\xa2\xf5\x79\x26\x27\xb2" | ||
| 7908 | "\x51\x67\x6a\xc4\xf0\x66\x55\x50" | ||
| 7909 | "\xbc\x6f\xed\xd5\x8d\xde\x23\x7c" | ||
| 7910 | "\x62\x98\x14\xd7\x2f\x37\x8d\xdf" | ||
| 7911 | "\xf4\x33\x80\xeb\x8e\xb4\xa4\xda", | ||
| 7912 | .rlen = 64 + 64, | ||
| 7913 | }, { /* RFC 3602 Case 5 */ | ||
| 7914 | #ifdef __LITTLE_ENDIAN | ||
| 7915 | .key = "\x08\x00" /* rta length */ | ||
| 7916 | "\x01\x00" /* rta type */ | ||
| 7917 | #else | ||
| 7918 | .key = "\x00\x08" /* rta length */ | ||
| 7919 | "\x00\x01" /* rta type */ | ||
| 7920 | #endif | ||
| 7921 | "\x00\x00\x00\x10" /* enc key length */ | ||
| 7922 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7923 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7924 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7925 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7926 | "\x33\x44\x55\x66\x77\x88\x99\xaa" | ||
| 7927 | "\xbb\xcc\xdd\xee\xff\x11\x22\x33" | ||
| 7928 | "\x44\x55\x66\x77\x88\x99\xaa\xbb" | ||
| 7929 | "\xcc\xdd\xee\xff\x11\x22\x33\x44" | ||
| 7930 | "\x90\xd3\x82\xb4\x10\xee\xba\x7a" | ||
| 7931 | "\xd9\x38\xc4\x6c\xec\x1a\x82\xbf", | ||
| 7932 | .klen = 8 + 64 + 16, | ||
| 7933 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" | ||
| 7934 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | ||
| 7935 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | ||
| 7936 | .alen = 8, | ||
| 7937 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" | ||
| 7938 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" | ||
| 7939 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
| 7940 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
| 7941 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" | ||
| 7942 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
| 7943 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
| 7944 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
| 7945 | "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
| 7946 | "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", | ||
| 7947 | .ilen = 80, | ||
| 7948 | .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" | ||
| 7949 | "\xa9\x45\x3e\x19\x4e\x12\x08\x49" | ||
| 7950 | "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" | ||
| 7951 | "\x33\x00\x13\xb4\x89\x8d\xc8\x56" | ||
| 7952 | "\xa4\x69\x9e\x52\x3a\x55\xdb\x08" | ||
| 7953 | "\x0b\x59\xec\x3a\x8e\x4b\x7e\x52" | ||
| 7954 | "\x77\x5b\x07\xd1\xdb\x34\xed\x9c" | ||
| 7955 | "\x53\x8a\xb5\x0c\x55\x1b\x87\x4a" | ||
| 7956 | "\xa2\x69\xad\xd0\x47\xad\x2d\x59" | ||
| 7957 | "\x13\xac\x19\xb7\xcf\xba\xd4\xa6" | ||
| 7958 | "\x74\x84\x94\xe2\xd7\x7a\xf9\xbf" | ||
| 7959 | "\x00\x8a\xa2\xd5\xb7\xf3\x60\xcf" | ||
| 7960 | "\xa0\x47\xdf\x4e\x09\xf4\xb1\x7f" | ||
| 7961 | "\x14\xd9\x3d\x53\x8e\x12\xb3\x00" | ||
| 7962 | "\x4c\x0a\x4e\x32\x40\x43\x88\xce" | ||
| 7963 | "\x92\x26\xc1\x76\x20\x11\xeb\xba" | ||
| 7964 | "\x62\x4f\x9a\x62\x25\xc3\x75\x80" | ||
| 7965 | "\xb7\x0a\x17\xf5\xd7\x94\xb4\x14", | ||
| 7966 | .rlen = 80 + 64, | ||
| 7967 | }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ | ||
| 7968 | #ifdef __LITTLE_ENDIAN | ||
| 7969 | .key = "\x08\x00" /* rta length */ | ||
| 7970 | "\x01\x00" /* rta type */ | ||
| 7971 | #else | ||
| 7972 | .key = "\x00\x08" /* rta length */ | ||
| 7973 | "\x00\x01" /* rta type */ | ||
| 7974 | #endif | ||
| 7975 | "\x00\x00\x00\x18" /* enc key length */ | ||
| 7976 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 7977 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 7978 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 7979 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 7980 | "\x33\x44\x55\x66\x77\x88\x99\xaa" | ||
| 7981 | "\xbb\xcc\xdd\xee\xff\x11\x22\x33" | ||
| 7982 | "\x44\x55\x66\x77\x88\x99\xaa\xbb" | ||
| 7983 | "\xcc\xdd\xee\xff\x11\x22\x33\x44" | ||
| 7984 | "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" | ||
| 7985 | "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" | ||
| 7986 | "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", | ||
| 7987 | .klen = 8 + 64 + 24, | ||
| 7988 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 7989 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 7990 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
| 7991 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
| 7992 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
| 7993 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
| 7994 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
| 7995 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
| 7996 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
| 7997 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
| 7998 | .ilen = 64, | ||
| 7999 | .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" | ||
| 8000 | "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" | ||
| 8001 | "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" | ||
| 8002 | "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" | ||
| 8003 | "\x57\x1b\x24\x20\x12\xfb\x7a\xe0" | ||
| 8004 | "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0" | ||
| 8005 | "\x08\xb0\xe2\x79\x88\x59\x88\x81" | ||
| 8006 | "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd" | ||
| 8007 | "\x77\x4b\x69\x9d\x3a\x0d\xb4\x99" | ||
| 8008 | "\x8f\xc6\x8e\x0e\x72\x58\xe3\x56" | ||
| 8009 | "\xbb\x21\xd2\x7d\x93\x11\x17\x91" | ||
| 8010 | "\xc4\x83\xfd\x0a\xea\x71\xfe\x77" | ||
| 8011 | "\xae\x6f\x0a\xa5\xf0\xcf\xe1\x35" | ||
| 8012 | "\xba\x03\xd5\x32\xfa\x5f\x41\x58" | ||
| 8013 | "\x8d\x43\x98\xa7\x94\x16\x07\x02" | ||
| 8014 | "\x0f\xb6\x81\x50\x28\x95\x2e\x75", | ||
| 8015 | .rlen = 64 + 64, | ||
| 8016 | }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ | ||
| 8017 | #ifdef __LITTLE_ENDIAN | ||
| 8018 | .key = "\x08\x00" /* rta length */ | ||
| 8019 | "\x01\x00" /* rta type */ | ||
| 8020 | #else | ||
| 8021 | .key = "\x00\x08" /* rta length */ | ||
| 8022 | "\x00\x01" /* rta type */ | ||
| 8023 | #endif | ||
| 8024 | "\x00\x00\x00\x20" /* enc key length */ | ||
| 8025 | "\x11\x22\x33\x44\x55\x66\x77\x88" | ||
| 8026 | "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" | ||
| 8027 | "\x22\x33\x44\x55\x66\x77\x88\x99" | ||
| 8028 | "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" | ||
| 8029 | "\x33\x44\x55\x66\x77\x88\x99\xaa" | ||
| 8030 | "\xbb\xcc\xdd\xee\xff\x11\x22\x33" | ||
| 8031 | "\x44\x55\x66\x77\x88\x99\xaa\xbb" | ||
| 8032 | "\xcc\xdd\xee\xff\x11\x22\x33\x44" | ||
| 8033 | "\x60\x3d\xeb\x10\x15\xca\x71\xbe" | ||
| 8034 | "\x2b\x73\xae\xf0\x85\x7d\x77\x81" | ||
| 8035 | "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" | ||
| 8036 | "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", | ||
| 8037 | .klen = 8 + 64 + 32, | ||
| 8038 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 8039 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 8040 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | ||
| 8041 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | ||
| 8042 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | ||
| 8043 | "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" | ||
| 8044 | "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" | ||
| 8045 | "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" | ||
| 8046 | "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" | ||
| 8047 | "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", | ||
| 8048 | .ilen = 64, | ||
| 8049 | .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" | ||
| 8050 | "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" | ||
| 8051 | "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" | ||
| 8052 | "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" | ||
| 8053 | "\x39\xf2\x33\x69\xa9\xd9\xba\xcf" | ||
| 8054 | "\xa5\x30\xe2\x63\x04\x23\x14\x61" | ||
| 8055 | "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc" | ||
| 8056 | "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b" | ||
| 8057 | "\xb2\x27\x69\x7f\x45\x64\x79\x2b" | ||
| 8058 | "\xb7\xb8\x4c\xd4\x75\x94\x68\x40" | ||
| 8059 | "\x2a\xea\x91\xc7\x3f\x7c\xed\x7b" | ||
| 8060 | "\x95\x2c\x9b\xa8\xf5\xe5\x52\x8d" | ||
| 8061 | "\x6b\xe1\xae\xf1\x74\xfa\x0d\x0c" | ||
| 8062 | "\xe3\x8d\x64\xc3\x8d\xff\x7c\x8c" | ||
| 8063 | "\xdb\xbf\xa0\xb4\x01\xa2\xa8\xa2" | ||
| 8064 | "\x2c\xb1\x62\x2c\x10\xca\xf1\x21", | ||
| 8065 | .rlen = 64 + 64, | ||
| 8066 | }, | ||
| 8067 | }; | ||
| 8068 | |||
| 6371 | static struct cipher_testvec aes_lrw_enc_tv_template[] = { | 8069 | static struct cipher_testvec aes_lrw_enc_tv_template[] = { |
| 6372 | /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */ | 8070 | /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */ |
| 6373 | { /* LRW-32-AES 1 */ | 8071 | { /* LRW-32-AES 1 */ |
| @@ -14858,4 +16556,94 @@ static struct hash_testvec crc32c_tv_template[] = { | |||
| 14858 | }, | 16556 | }, |
| 14859 | }; | 16557 | }; |
| 14860 | 16558 | ||
| 16559 | /* | ||
| 16560 | * Blakcifn CRC test vectors | ||
| 16561 | */ | ||
| 16562 | #define BFIN_CRC_TEST_VECTORS 6 | ||
| 16563 | |||
| 16564 | static struct hash_testvec bfin_crc_tv_template[] = { | ||
| 16565 | { | ||
| 16566 | .psize = 0, | ||
| 16567 | .digest = "\x00\x00\x00\x00", | ||
| 16568 | }, | ||
| 16569 | { | ||
| 16570 | .key = "\x87\xa9\xcb\xed", | ||
| 16571 | .ksize = 4, | ||
| 16572 | .psize = 0, | ||
| 16573 | .digest = "\x87\xa9\xcb\xed", | ||
| 16574 | }, | ||
| 16575 | { | ||
| 16576 | .key = "\xff\xff\xff\xff", | ||
| 16577 | .ksize = 4, | ||
| 16578 | .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
| 16579 | "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" | ||
| 16580 | "\x11\x12\x13\x14\x15\x16\x17\x18" | ||
| 16581 | "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" | ||
| 16582 | "\x21\x22\x23\x24\x25\x26\x27\x28", | ||
| 16583 | .psize = 40, | ||
| 16584 | .digest = "\x84\x0c\x8d\xa2", | ||
| 16585 | }, | ||
| 16586 | { | ||
| 16587 | .key = "\xff\xff\xff\xff", | ||
| 16588 | .ksize = 4, | ||
| 16589 | .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
| 16590 | "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" | ||
| 16591 | "\x11\x12\x13\x14\x15\x16\x17\x18" | ||
| 16592 | "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" | ||
| 16593 | "\x21\x22\x23\x24\x25\x26", | ||
| 16594 | .psize = 38, | ||
| 16595 | .digest = "\x8c\x58\xec\xb7", | ||
| 16596 | }, | ||
| 16597 | { | ||
| 16598 | .key = "\xff\xff\xff\xff", | ||
| 16599 | .ksize = 4, | ||
| 16600 | .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
| 16601 | "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" | ||
| 16602 | "\x11\x12\x13\x14\x15\x16\x17\x18" | ||
| 16603 | "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" | ||
| 16604 | "\x21\x22\x23\x24\x25\x26\x27", | ||
| 16605 | .psize = 39, | ||
| 16606 | .digest = "\xdc\x50\x28\x7b", | ||
| 16607 | }, | ||
| 16608 | { | ||
| 16609 | .key = "\xff\xff\xff\xff", | ||
| 16610 | .ksize = 4, | ||
| 16611 | .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
| 16612 | "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" | ||
| 16613 | "\x11\x12\x13\x14\x15\x16\x17\x18" | ||
| 16614 | "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" | ||
| 16615 | "\x21\x22\x23\x24\x25\x26\x27\x28" | ||
| 16616 | "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" | ||
| 16617 | "\x31\x32\x33\x34\x35\x36\x37\x38" | ||
| 16618 | "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" | ||
| 16619 | "\x41\x42\x43\x44\x45\x46\x47\x48" | ||
| 16620 | "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50" | ||
| 16621 | "\x51\x52\x53\x54\x55\x56\x57\x58" | ||
| 16622 | "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" | ||
| 16623 | "\x61\x62\x63\x64\x65\x66\x67\x68" | ||
| 16624 | "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" | ||
| 16625 | "\x71\x72\x73\x74\x75\x76\x77\x78" | ||
| 16626 | "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" | ||
| 16627 | "\x81\x82\x83\x84\x85\x86\x87\x88" | ||
| 16628 | "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" | ||
| 16629 | "\x91\x92\x93\x94\x95\x96\x97\x98" | ||
| 16630 | "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0" | ||
| 16631 | "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" | ||
| 16632 | "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" | ||
| 16633 | "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" | ||
| 16634 | "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" | ||
| 16635 | "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8" | ||
| 16636 | "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" | ||
| 16637 | "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" | ||
| 16638 | "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" | ||
| 16639 | "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" | ||
| 16640 | "\xe9\xea\xeb\xec\xed\xee\xef\xf0", | ||
| 16641 | .psize = 240, | ||
| 16642 | .digest = "\x10\x19\x4a\x5c", | ||
| 16643 | .np = 2, | ||
| 16644 | .tap = { 31, 209 } | ||
| 16645 | }, | ||
| 16646 | |||
| 16647 | }; | ||
| 16648 | |||
| 14861 | #endif /* _CRYPTO_TESTMGR_H */ | 16649 | #endif /* _CRYPTO_TESTMGR_H */ |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index f45dad39a18b..b01d67328243 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
| @@ -263,3 +263,15 @@ config HW_RANDOM_PSERIES | |||
| 263 | module will be called pseries-rng. | 263 | module will be called pseries-rng. |
| 264 | 264 | ||
| 265 | If unsure, say Y. | 265 | If unsure, say Y. |
| 266 | |||
| 267 | config HW_RANDOM_EXYNOS | ||
| 268 | tristate "EXYNOS HW random number generator support" | ||
| 269 | depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK | ||
| 270 | ---help--- | ||
| 271 | This driver provides kernel-side support for the Random Number | ||
| 272 | Generator hardware found on EXYNOS SOCs. | ||
| 273 | |||
| 274 | To compile this driver as a module, choose M here: the | ||
| 275 | module will be called exynos-rng. | ||
| 276 | |||
| 277 | If unsure, say Y. | ||
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index d901dfa30321..8d6d173b65e6 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
| @@ -23,3 +23,4 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o | |||
| 23 | obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o | 23 | obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o |
| 24 | obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o | 24 | obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o |
| 25 | obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o | 25 | obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o |
| 26 | obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o | ||
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c new file mode 100644 index 000000000000..232ba9ce579c --- /dev/null +++ b/drivers/char/hw_random/exynos-rng.c | |||
| @@ -0,0 +1,182 @@ | |||
| 1 | /* | ||
| 2 | * exynos-rng.c - Random Number Generator driver for the exynos | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Samsung Electronics | ||
| 5 | * Jonghwa Lee <jonghwa3.lee@smasung.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 19 | * | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/hw_random.h> | ||
| 23 | #include <linux/kernel.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/io.h> | ||
| 27 | #include <linux/platform_device.h> | ||
| 28 | #include <linux/clk.h> | ||
| 29 | #include <linux/pm_runtime.h> | ||
| 30 | #include <linux/err.h> | ||
| 31 | |||
| 32 | #define EXYNOS_PRNG_STATUS_OFFSET 0x10 | ||
| 33 | #define EXYNOS_PRNG_SEED_OFFSET 0x140 | ||
| 34 | #define EXYNOS_PRNG_OUT1_OFFSET 0x160 | ||
| 35 | #define SEED_SETTING_DONE BIT(1) | ||
| 36 | #define PRNG_START 0x18 | ||
| 37 | #define PRNG_DONE BIT(5) | ||
| 38 | #define EXYNOS_AUTOSUSPEND_DELAY 100 | ||
| 39 | |||
| 40 | struct exynos_rng { | ||
| 41 | struct device *dev; | ||
| 42 | struct hwrng rng; | ||
| 43 | void __iomem *mem; | ||
| 44 | struct clk *clk; | ||
| 45 | }; | ||
| 46 | |||
| 47 | static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset) | ||
| 48 | { | ||
| 49 | return __raw_readl(rng->mem + offset); | ||
| 50 | } | ||
| 51 | |||
| 52 | static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset) | ||
| 53 | { | ||
| 54 | __raw_writel(val, rng->mem + offset); | ||
| 55 | } | ||
| 56 | |||
| 57 | static int exynos_init(struct hwrng *rng) | ||
| 58 | { | ||
| 59 | struct exynos_rng *exynos_rng = container_of(rng, | ||
| 60 | struct exynos_rng, rng); | ||
| 61 | int i; | ||
| 62 | int ret = 0; | ||
| 63 | |||
| 64 | pm_runtime_get_sync(exynos_rng->dev); | ||
| 65 | |||
| 66 | for (i = 0 ; i < 5 ; i++) | ||
| 67 | exynos_rng_writel(exynos_rng, jiffies, | ||
| 68 | EXYNOS_PRNG_SEED_OFFSET + 4*i); | ||
| 69 | |||
| 70 | if (!(exynos_rng_readl(exynos_rng, EXYNOS_PRNG_STATUS_OFFSET) | ||
| 71 | & SEED_SETTING_DONE)) | ||
| 72 | ret = -EIO; | ||
| 73 | |||
| 74 | pm_runtime_put_noidle(exynos_rng->dev); | ||
| 75 | |||
| 76 | return ret; | ||
| 77 | } | ||
| 78 | |||
| 79 | static int exynos_read(struct hwrng *rng, void *buf, | ||
| 80 | size_t max, bool wait) | ||
| 81 | { | ||
| 82 | struct exynos_rng *exynos_rng = container_of(rng, | ||
| 83 | struct exynos_rng, rng); | ||
| 84 | u32 *data = buf; | ||
| 85 | |||
| 86 | pm_runtime_get_sync(exynos_rng->dev); | ||
| 87 | |||
| 88 | exynos_rng_writel(exynos_rng, PRNG_START, 0); | ||
| 89 | |||
| 90 | while (!(exynos_rng_readl(exynos_rng, | ||
| 91 | EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE)) | ||
| 92 | cpu_relax(); | ||
| 93 | |||
| 94 | exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET); | ||
| 95 | |||
| 96 | *data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET); | ||
| 97 | |||
| 98 | pm_runtime_mark_last_busy(exynos_rng->dev); | ||
| 99 | pm_runtime_autosuspend(exynos_rng->dev); | ||
| 100 | |||
| 101 | return 4; | ||
| 102 | } | ||
| 103 | |||
| 104 | static int __devinit exynos_rng_probe(struct platform_device *pdev) | ||
| 105 | { | ||
| 106 | struct exynos_rng *exynos_rng; | ||
| 107 | |||
| 108 | exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng), | ||
| 109 | GFP_KERNEL); | ||
| 110 | if (!exynos_rng) | ||
| 111 | return -ENOMEM; | ||
| 112 | |||
| 113 | exynos_rng->dev = &pdev->dev; | ||
| 114 | exynos_rng->rng.name = "exynos"; | ||
| 115 | exynos_rng->rng.init = exynos_init; | ||
| 116 | exynos_rng->rng.read = exynos_read; | ||
| 117 | exynos_rng->clk = devm_clk_get(&pdev->dev, "secss"); | ||
| 118 | if (IS_ERR(exynos_rng->clk)) { | ||
| 119 | dev_err(&pdev->dev, "Couldn't get clock.\n"); | ||
| 120 | return -ENOENT; | ||
| 121 | } | ||
| 122 | |||
| 123 | exynos_rng->mem = devm_request_and_ioremap(&pdev->dev, | ||
| 124 | platform_get_resource(pdev, IORESOURCE_MEM, 0)); | ||
| 125 | if (!exynos_rng->mem) | ||
| 126 | return -EBUSY; | ||
| 127 | |||
| 128 | platform_set_drvdata(pdev, exynos_rng); | ||
| 129 | |||
| 130 | pm_runtime_set_autosuspend_delay(&pdev->dev, EXYNOS_AUTOSUSPEND_DELAY); | ||
| 131 | pm_runtime_use_autosuspend(&pdev->dev); | ||
| 132 | pm_runtime_enable(&pdev->dev); | ||
| 133 | |||
| 134 | return hwrng_register(&exynos_rng->rng); | ||
| 135 | } | ||
| 136 | |||
| 137 | static int __devexit exynos_rng_remove(struct platform_device *pdev) | ||
| 138 | { | ||
| 139 | struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); | ||
| 140 | |||
| 141 | hwrng_unregister(&exynos_rng->rng); | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static int exynos_rng_runtime_suspend(struct device *dev) | ||
| 147 | { | ||
| 148 | struct platform_device *pdev = to_platform_device(dev); | ||
| 149 | struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); | ||
| 150 | |||
| 151 | clk_disable_unprepare(exynos_rng->clk); | ||
| 152 | |||
| 153 | return 0; | ||
| 154 | } | ||
| 155 | |||
| 156 | static int exynos_rng_runtime_resume(struct device *dev) | ||
| 157 | { | ||
| 158 | struct platform_device *pdev = to_platform_device(dev); | ||
| 159 | struct exynos_rng *exynos_rng = platform_get_drvdata(pdev); | ||
| 160 | |||
| 161 | return clk_prepare_enable(exynos_rng->clk); | ||
| 162 | } | ||
| 163 | |||
| 164 | |||
| 165 | UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend, | ||
| 166 | exynos_rng_runtime_resume, NULL); | ||
| 167 | |||
| 168 | static struct platform_driver exynos_rng_driver = { | ||
| 169 | .driver = { | ||
| 170 | .name = "exynos-rng", | ||
| 171 | .owner = THIS_MODULE, | ||
| 172 | .pm = &exynos_rng_pm_ops, | ||
| 173 | }, | ||
| 174 | .probe = exynos_rng_probe, | ||
| 175 | .remove = __devexit_p(exynos_rng_remove), | ||
| 176 | }; | ||
| 177 | |||
| 178 | module_platform_driver(exynos_rng_driver); | ||
| 179 | |||
| 180 | MODULE_DESCRIPTION("EXYNOS 4 H/W Random Number Generator driver"); | ||
| 181 | MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>"); | ||
| 182 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 187c6be80f43..85074de5042e 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
| 25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/hw_random.h> | 26 | #include <linux/hw_random.h> |
| 27 | #include <linux/delay.h> | ||
| 27 | #include <linux/io.h> | 28 | #include <linux/io.h> |
| 28 | 29 | ||
| 29 | /* RNGA Registers */ | 30 | /* RNGA Registers */ |
| @@ -60,16 +61,20 @@ | |||
| 60 | 61 | ||
| 61 | static struct platform_device *rng_dev; | 62 | static struct platform_device *rng_dev; |
| 62 | 63 | ||
| 63 | static int mxc_rnga_data_present(struct hwrng *rng) | 64 | static int mxc_rnga_data_present(struct hwrng *rng, int wait) |
| 64 | { | 65 | { |
| 65 | int level; | ||
| 66 | void __iomem *rng_base = (void __iomem *)rng->priv; | 66 | void __iomem *rng_base = (void __iomem *)rng->priv; |
| 67 | 67 | int i; | |
| 68 | /* how many random numbers is in FIFO? [0-16] */ | 68 | |
| 69 | level = ((__raw_readl(rng_base + RNGA_STATUS) & | 69 | for (i = 0; i < 20; i++) { |
| 70 | RNGA_STATUS_LEVEL_MASK) >> 8); | 70 | /* how many random numbers are in FIFO? [0-16] */ |
| 71 | 71 | int level = (__raw_readl(rng_base + RNGA_STATUS) & | |
| 72 | return level > 0 ? 1 : 0; | 72 | RNGA_STATUS_LEVEL_MASK) >> 8; |
| 73 | if (level || !wait) | ||
| 74 | return !!level; | ||
| 75 | udelay(10); | ||
| 76 | } | ||
| 77 | return 0; | ||
| 73 | } | 78 | } |
| 74 | 79 | ||
| 75 | static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) | 80 | static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1092a770482e..7d74d092aa8f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
| @@ -298,7 +298,7 @@ config CRYPTO_DEV_TEGRA_AES | |||
| 298 | will be called tegra-aes. | 298 | will be called tegra-aes. |
| 299 | 299 | ||
| 300 | config CRYPTO_DEV_NX | 300 | config CRYPTO_DEV_NX |
| 301 | tristate "Support for Power7+ in-Nest cryptographic accleration" | 301 | tristate "Support for Power7+ in-Nest cryptographic acceleration" |
| 302 | depends on PPC64 && IBMVIO | 302 | depends on PPC64 && IBMVIO |
| 303 | select CRYPTO_AES | 303 | select CRYPTO_AES |
| 304 | select CRYPTO_CBC | 304 | select CRYPTO_CBC |
| @@ -325,4 +325,58 @@ if CRYPTO_DEV_UX500 | |||
| 325 | source "drivers/crypto/ux500/Kconfig" | 325 | source "drivers/crypto/ux500/Kconfig" |
| 326 | endif # if CRYPTO_DEV_UX500 | 326 | endif # if CRYPTO_DEV_UX500 |
| 327 | 327 | ||
| 328 | config CRYPTO_DEV_BFIN_CRC | ||
| 329 | tristate "Support for Blackfin CRC hardware" | ||
| 330 | depends on BF60x | ||
| 331 | help | ||
| 332 | Newer Blackfin processors have CRC hardware. Select this if you | ||
| 333 | want to use the Blackfin CRC module. | ||
| 334 | |||
| 335 | config CRYPTO_DEV_ATMEL_AES | ||
| 336 | tristate "Support for Atmel AES hw accelerator" | ||
| 337 | depends on ARCH_AT91 | ||
| 338 | select CRYPTO_CBC | ||
| 339 | select CRYPTO_ECB | ||
| 340 | select CRYPTO_AES | ||
| 341 | select CRYPTO_ALGAPI | ||
| 342 | select CRYPTO_BLKCIPHER | ||
| 343 | select CONFIG_AT_HDMAC | ||
| 344 | help | ||
| 345 | Some Atmel processors have AES hw accelerator. | ||
| 346 | Select this if you want to use the Atmel module for | ||
| 347 | AES algorithms. | ||
| 348 | |||
| 349 | To compile this driver as a module, choose M here: the module | ||
| 350 | will be called atmel-aes. | ||
| 351 | |||
| 352 | config CRYPTO_DEV_ATMEL_TDES | ||
| 353 | tristate "Support for Atmel DES/TDES hw accelerator" | ||
| 354 | depends on ARCH_AT91 | ||
| 355 | select CRYPTO_DES | ||
| 356 | select CRYPTO_CBC | ||
| 357 | select CRYPTO_ECB | ||
| 358 | select CRYPTO_ALGAPI | ||
| 359 | select CRYPTO_BLKCIPHER | ||
| 360 | help | ||
| 361 | Some Atmel processors have DES/TDES hw accelerator. | ||
| 362 | Select this if you want to use the Atmel module for | ||
| 363 | DES/TDES algorithms. | ||
| 364 | |||
| 365 | To compile this driver as a module, choose M here: the module | ||
| 366 | will be called atmel-tdes. | ||
| 367 | |||
| 368 | config CRYPTO_DEV_ATMEL_SHA | ||
| 369 | tristate "Support for Atmel SHA1/SHA256 hw accelerator" | ||
| 370 | depends on ARCH_AT91 | ||
| 371 | select CRYPTO_SHA1 | ||
| 372 | select CRYPTO_SHA256 | ||
| 373 | select CRYPTO_ALGAPI | ||
| 374 | help | ||
| 375 | Some Atmel processors have SHA1/SHA256 hw accelerator. | ||
| 376 | Select this if you want to use the Atmel module for | ||
| 377 | SHA1/SHA256 algorithms. | ||
| 378 | |||
| 379 | To compile this driver as a module, choose M here: the module | ||
| 380 | will be called atmel-sha. | ||
| 381 | |||
| 328 | endif # CRYPTO_HW | 382 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 01390325d72d..880a47b0b023 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
| @@ -14,4 +14,9 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o | |||
| 14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o | 14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o |
| 15 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | 15 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o |
| 16 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o | 16 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o |
| 17 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ \ No newline at end of file | 17 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ |
| 18 | obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o | ||
| 19 | obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ | ||
| 20 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o | ||
| 21 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o | ||
| 22 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o | ||
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h new file mode 100644 index 000000000000..2786bb1a5aa0 --- /dev/null +++ b/drivers/crypto/atmel-aes-regs.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | #ifndef __ATMEL_AES_REGS_H__ | ||
| 2 | #define __ATMEL_AES_REGS_H__ | ||
| 3 | |||
| 4 | #define AES_CR 0x00 | ||
| 5 | #define AES_CR_START (1 << 0) | ||
| 6 | #define AES_CR_SWRST (1 << 8) | ||
| 7 | #define AES_CR_LOADSEED (1 << 16) | ||
| 8 | |||
| 9 | #define AES_MR 0x04 | ||
| 10 | #define AES_MR_CYPHER_DEC (0 << 0) | ||
| 11 | #define AES_MR_CYPHER_ENC (1 << 0) | ||
| 12 | #define AES_MR_DUALBUFF (1 << 3) | ||
| 13 | #define AES_MR_PROCDLY_MASK (0xF << 4) | ||
| 14 | #define AES_MR_PROCDLY_OFFSET 4 | ||
| 15 | #define AES_MR_SMOD_MASK (0x3 << 8) | ||
| 16 | #define AES_MR_SMOD_MANUAL (0x0 << 8) | ||
| 17 | #define AES_MR_SMOD_AUTO (0x1 << 8) | ||
| 18 | #define AES_MR_SMOD_IDATAR0 (0x2 << 8) | ||
| 19 | #define AES_MR_KEYSIZE_MASK (0x3 << 10) | ||
| 20 | #define AES_MR_KEYSIZE_128 (0x0 << 10) | ||
| 21 | #define AES_MR_KEYSIZE_192 (0x1 << 10) | ||
| 22 | #define AES_MR_KEYSIZE_256 (0x2 << 10) | ||
| 23 | #define AES_MR_OPMOD_MASK (0x7 << 12) | ||
| 24 | #define AES_MR_OPMOD_ECB (0x0 << 12) | ||
| 25 | #define AES_MR_OPMOD_CBC (0x1 << 12) | ||
| 26 | #define AES_MR_OPMOD_OFB (0x2 << 12) | ||
| 27 | #define AES_MR_OPMOD_CFB (0x3 << 12) | ||
| 28 | #define AES_MR_OPMOD_CTR (0x4 << 12) | ||
| 29 | #define AES_MR_LOD (0x1 << 15) | ||
| 30 | #define AES_MR_CFBS_MASK (0x7 << 16) | ||
| 31 | #define AES_MR_CFBS_128b (0x0 << 16) | ||
| 32 | #define AES_MR_CFBS_64b (0x1 << 16) | ||
| 33 | #define AES_MR_CFBS_32b (0x2 << 16) | ||
| 34 | #define AES_MR_CFBS_16b (0x3 << 16) | ||
| 35 | #define AES_MR_CFBS_8b (0x4 << 16) | ||
| 36 | #define AES_MR_CKEY_MASK (0xF << 20) | ||
| 37 | #define AES_MR_CKEY_OFFSET 20 | ||
| 38 | #define AES_MR_CMTYP_MASK (0x1F << 24) | ||
| 39 | #define AES_MR_CMTYP_OFFSET 24 | ||
| 40 | |||
| 41 | #define AES_IER 0x10 | ||
| 42 | #define AES_IDR 0x14 | ||
| 43 | #define AES_IMR 0x18 | ||
| 44 | #define AES_ISR 0x1C | ||
| 45 | #define AES_INT_DATARDY (1 << 0) | ||
| 46 | #define AES_INT_URAD (1 << 8) | ||
| 47 | #define AES_ISR_URAT_MASK (0xF << 12) | ||
| 48 | #define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12) | ||
| 49 | #define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12) | ||
| 50 | #define AES_ISR_URAT_MR_WR_PROC (0x2 << 12) | ||
| 51 | #define AES_ISR_URAT_ODR_RD_SUBK (0x3 << 12) | ||
| 52 | #define AES_ISR_URAT_MR_WR_SUBK (0x4 << 12) | ||
| 53 | #define AES_ISR_URAT_WOR_RD (0x5 << 12) | ||
| 54 | |||
| 55 | #define AES_KEYWR(x) (0x20 + ((x) * 0x04)) | ||
| 56 | #define AES_IDATAR(x) (0x40 + ((x) * 0x04)) | ||
| 57 | #define AES_ODATAR(x) (0x50 + ((x) * 0x04)) | ||
| 58 | #define AES_IVR(x) (0x60 + ((x) * 0x04)) | ||
| 59 | |||
| 60 | #define AES_HW_VERSION 0xFC | ||
| 61 | |||
| 62 | #endif /* __ATMEL_AES_REGS_H__ */ | ||
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c new file mode 100644 index 000000000000..6bb20fffbf49 --- /dev/null +++ b/drivers/crypto/atmel-aes.c | |||
| @@ -0,0 +1,1206 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic API. | ||
| 3 | * | ||
| 4 | * Support for ATMEL AES HW acceleration. | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | ||
| 7 | * Author: Nicolas Royer <nicolas@eukrea.com> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as published | ||
| 11 | * by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * Some ideas are from omap-aes.c driver. | ||
| 14 | */ | ||
| 15 | |||
| 16 | |||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/slab.h> | ||
| 20 | #include <linux/err.h> | ||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/io.h> | ||
| 23 | #include <linux/hw_random.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | |||
| 26 | #include <linux/device.h> | ||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/errno.h> | ||
| 30 | #include <linux/interrupt.h> | ||
| 31 | #include <linux/kernel.h> | ||
| 32 | #include <linux/clk.h> | ||
| 33 | #include <linux/irq.h> | ||
| 34 | #include <linux/io.h> | ||
| 35 | #include <linux/platform_device.h> | ||
| 36 | #include <linux/scatterlist.h> | ||
| 37 | #include <linux/dma-mapping.h> | ||
| 38 | #include <linux/delay.h> | ||
| 39 | #include <linux/crypto.h> | ||
| 40 | #include <linux/cryptohash.h> | ||
| 41 | #include <crypto/scatterwalk.h> | ||
| 42 | #include <crypto/algapi.h> | ||
| 43 | #include <crypto/aes.h> | ||
| 44 | #include <crypto/hash.h> | ||
| 45 | #include <crypto/internal/hash.h> | ||
| 46 | #include <linux/platform_data/atmel-aes.h> | ||
| 47 | #include "atmel-aes-regs.h" | ||
| 48 | |||
| 49 | #define CFB8_BLOCK_SIZE 1 | ||
| 50 | #define CFB16_BLOCK_SIZE 2 | ||
| 51 | #define CFB32_BLOCK_SIZE 4 | ||
| 52 | #define CFB64_BLOCK_SIZE 8 | ||
| 53 | |||
| 54 | /* AES flags */ | ||
| 55 | #define AES_FLAGS_MODE_MASK 0x01ff | ||
| 56 | #define AES_FLAGS_ENCRYPT BIT(0) | ||
| 57 | #define AES_FLAGS_CBC BIT(1) | ||
| 58 | #define AES_FLAGS_CFB BIT(2) | ||
| 59 | #define AES_FLAGS_CFB8 BIT(3) | ||
| 60 | #define AES_FLAGS_CFB16 BIT(4) | ||
| 61 | #define AES_FLAGS_CFB32 BIT(5) | ||
| 62 | #define AES_FLAGS_CFB64 BIT(6) | ||
| 63 | #define AES_FLAGS_OFB BIT(7) | ||
| 64 | #define AES_FLAGS_CTR BIT(8) | ||
| 65 | |||
| 66 | #define AES_FLAGS_INIT BIT(16) | ||
| 67 | #define AES_FLAGS_DMA BIT(17) | ||
| 68 | #define AES_FLAGS_BUSY BIT(18) | ||
| 69 | |||
| 70 | #define AES_FLAGS_DUALBUFF BIT(24) | ||
| 71 | |||
| 72 | #define ATMEL_AES_QUEUE_LENGTH 1 | ||
| 73 | #define ATMEL_AES_CACHE_SIZE 0 | ||
| 74 | |||
| 75 | #define ATMEL_AES_DMA_THRESHOLD 16 | ||
| 76 | |||
| 77 | |||
| 78 | struct atmel_aes_dev; | ||
| 79 | |||
| 80 | struct atmel_aes_ctx { | ||
| 81 | struct atmel_aes_dev *dd; | ||
| 82 | |||
| 83 | int keylen; | ||
| 84 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | ||
| 85 | }; | ||
| 86 | |||
| 87 | struct atmel_aes_reqctx { | ||
| 88 | unsigned long mode; | ||
| 89 | }; | ||
| 90 | |||
| 91 | struct atmel_aes_dma { | ||
| 92 | struct dma_chan *chan; | ||
| 93 | struct dma_slave_config dma_conf; | ||
| 94 | }; | ||
| 95 | |||
| 96 | struct atmel_aes_dev { | ||
| 97 | struct list_head list; | ||
| 98 | unsigned long phys_base; | ||
| 99 | void __iomem *io_base; | ||
| 100 | |||
| 101 | struct atmel_aes_ctx *ctx; | ||
| 102 | struct device *dev; | ||
| 103 | struct clk *iclk; | ||
| 104 | int irq; | ||
| 105 | |||
| 106 | unsigned long flags; | ||
| 107 | int err; | ||
| 108 | |||
| 109 | spinlock_t lock; | ||
| 110 | struct crypto_queue queue; | ||
| 111 | |||
| 112 | struct tasklet_struct done_task; | ||
| 113 | struct tasklet_struct queue_task; | ||
| 114 | |||
| 115 | struct ablkcipher_request *req; | ||
| 116 | size_t total; | ||
| 117 | |||
| 118 | struct scatterlist *in_sg; | ||
| 119 | unsigned int nb_in_sg; | ||
| 120 | |||
| 121 | struct scatterlist *out_sg; | ||
| 122 | unsigned int nb_out_sg; | ||
| 123 | |||
| 124 | size_t bufcnt; | ||
| 125 | |||
| 126 | u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32)); | ||
| 127 | int dma_in; | ||
| 128 | struct atmel_aes_dma dma_lch_in; | ||
| 129 | |||
| 130 | u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32)); | ||
| 131 | int dma_out; | ||
| 132 | struct atmel_aes_dma dma_lch_out; | ||
| 133 | |||
| 134 | u32 hw_version; | ||
| 135 | }; | ||
| 136 | |||
| 137 | struct atmel_aes_drv { | ||
| 138 | struct list_head dev_list; | ||
| 139 | spinlock_t lock; | ||
| 140 | }; | ||
| 141 | |||
| 142 | static struct atmel_aes_drv atmel_aes = { | ||
| 143 | .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list), | ||
| 144 | .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), | ||
| 145 | }; | ||
| 146 | |||
| 147 | static int atmel_aes_sg_length(struct ablkcipher_request *req, | ||
| 148 | struct scatterlist *sg) | ||
| 149 | { | ||
| 150 | unsigned int total = req->nbytes; | ||
| 151 | int sg_nb; | ||
| 152 | unsigned int len; | ||
| 153 | struct scatterlist *sg_list; | ||
| 154 | |||
| 155 | sg_nb = 0; | ||
| 156 | sg_list = sg; | ||
| 157 | total = req->nbytes; | ||
| 158 | |||
| 159 | while (total) { | ||
| 160 | len = min(sg_list->length, total); | ||
| 161 | |||
| 162 | sg_nb++; | ||
| 163 | total -= len; | ||
| 164 | |||
| 165 | sg_list = sg_next(sg_list); | ||
| 166 | if (!sg_list) | ||
| 167 | total = 0; | ||
| 168 | } | ||
| 169 | |||
| 170 | return sg_nb; | ||
| 171 | } | ||
| 172 | |||
| 173 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) | ||
| 174 | { | ||
| 175 | return readl_relaxed(dd->io_base + offset); | ||
| 176 | } | ||
| 177 | |||
| 178 | static inline void atmel_aes_write(struct atmel_aes_dev *dd, | ||
| 179 | u32 offset, u32 value) | ||
| 180 | { | ||
| 181 | writel_relaxed(value, dd->io_base + offset); | ||
| 182 | } | ||
| 183 | |||
| 184 | static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, | ||
| 185 | u32 *value, int count) | ||
| 186 | { | ||
| 187 | for (; count--; value++, offset += 4) | ||
| 188 | *value = atmel_aes_read(dd, offset); | ||
| 189 | } | ||
| 190 | |||
| 191 | static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, | ||
| 192 | u32 *value, int count) | ||
| 193 | { | ||
| 194 | for (; count--; value++, offset += 4) | ||
| 195 | atmel_aes_write(dd, offset, *value); | ||
| 196 | } | ||
| 197 | |||
| 198 | static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd) | ||
| 199 | { | ||
| 200 | atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF); | ||
| 201 | |||
| 202 | if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF) | ||
| 203 | dd->flags |= AES_FLAGS_DUALBUFF; | ||
| 204 | } | ||
| 205 | |||
| 206 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) | ||
| 207 | { | ||
| 208 | struct atmel_aes_dev *aes_dd = NULL; | ||
| 209 | struct atmel_aes_dev *tmp; | ||
| 210 | |||
| 211 | spin_lock_bh(&atmel_aes.lock); | ||
| 212 | if (!ctx->dd) { | ||
| 213 | list_for_each_entry(tmp, &atmel_aes.dev_list, list) { | ||
| 214 | aes_dd = tmp; | ||
| 215 | break; | ||
| 216 | } | ||
| 217 | ctx->dd = aes_dd; | ||
| 218 | } else { | ||
| 219 | aes_dd = ctx->dd; | ||
| 220 | } | ||
| 221 | |||
| 222 | spin_unlock_bh(&atmel_aes.lock); | ||
| 223 | |||
| 224 | return aes_dd; | ||
| 225 | } | ||
| 226 | |||
| 227 | static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | ||
| 228 | { | ||
| 229 | clk_prepare_enable(dd->iclk); | ||
| 230 | |||
| 231 | if (!(dd->flags & AES_FLAGS_INIT)) { | ||
| 232 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); | ||
| 233 | atmel_aes_dualbuff_test(dd); | ||
| 234 | dd->flags |= AES_FLAGS_INIT; | ||
| 235 | dd->err = 0; | ||
| 236 | } | ||
| 237 | |||
| 238 | return 0; | ||
| 239 | } | ||
| 240 | |||
| 241 | static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) | ||
| 242 | { | ||
| 243 | atmel_aes_hw_init(dd); | ||
| 244 | |||
| 245 | dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION); | ||
| 246 | |||
| 247 | clk_disable_unprepare(dd->iclk); | ||
| 248 | } | ||
| 249 | |||
| 250 | static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) | ||
| 251 | { | ||
| 252 | struct ablkcipher_request *req = dd->req; | ||
| 253 | |||
| 254 | clk_disable_unprepare(dd->iclk); | ||
| 255 | dd->flags &= ~AES_FLAGS_BUSY; | ||
| 256 | |||
| 257 | req->base.complete(&req->base, err); | ||
| 258 | } | ||
| 259 | |||
| 260 | static void atmel_aes_dma_callback(void *data) | ||
| 261 | { | ||
| 262 | struct atmel_aes_dev *dd = data; | ||
| 263 | |||
| 264 | /* dma_lch_out - completed */ | ||
| 265 | tasklet_schedule(&dd->done_task); | ||
| 266 | } | ||
| 267 | |||
| 268 | static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd) | ||
| 269 | { | ||
| 270 | struct dma_async_tx_descriptor *in_desc, *out_desc; | ||
| 271 | int nb_dma_sg_in, nb_dma_sg_out; | ||
| 272 | |||
| 273 | dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); | ||
| 274 | if (!dd->nb_in_sg) | ||
| 275 | goto exit_err; | ||
| 276 | |||
| 277 | nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg, | ||
| 278 | DMA_TO_DEVICE); | ||
| 279 | if (!nb_dma_sg_in) | ||
| 280 | goto exit_err; | ||
| 281 | |||
| 282 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg, | ||
| 283 | nb_dma_sg_in, DMA_MEM_TO_DEV, | ||
| 284 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 285 | |||
| 286 | if (!in_desc) | ||
| 287 | goto unmap_in; | ||
| 288 | |||
| 289 | /* callback not needed */ | ||
| 290 | |||
| 291 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | ||
| 292 | if (!dd->nb_out_sg) | ||
| 293 | goto unmap_in; | ||
| 294 | |||
| 295 | nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg, | ||
| 296 | DMA_FROM_DEVICE); | ||
| 297 | if (!nb_dma_sg_out) | ||
| 298 | goto unmap_out; | ||
| 299 | |||
| 300 | out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg, | ||
| 301 | nb_dma_sg_out, DMA_DEV_TO_MEM, | ||
| 302 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 303 | |||
| 304 | if (!out_desc) | ||
| 305 | goto unmap_out; | ||
| 306 | |||
| 307 | out_desc->callback = atmel_aes_dma_callback; | ||
| 308 | out_desc->callback_param = dd; | ||
| 309 | |||
| 310 | dd->total -= dd->req->nbytes; | ||
| 311 | |||
| 312 | dmaengine_submit(out_desc); | ||
| 313 | dma_async_issue_pending(dd->dma_lch_out.chan); | ||
| 314 | |||
| 315 | dmaengine_submit(in_desc); | ||
| 316 | dma_async_issue_pending(dd->dma_lch_in.chan); | ||
| 317 | |||
| 318 | return 0; | ||
| 319 | |||
| 320 | unmap_out: | ||
| 321 | dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg, | ||
| 322 | DMA_FROM_DEVICE); | ||
| 323 | unmap_in: | ||
| 324 | dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg, | ||
| 325 | DMA_TO_DEVICE); | ||
| 326 | exit_err: | ||
| 327 | return -EINVAL; | ||
| 328 | } | ||
| 329 | |||
| 330 | static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) | ||
| 331 | { | ||
| 332 | dd->flags &= ~AES_FLAGS_DMA; | ||
| 333 | |||
| 334 | /* use cache buffers */ | ||
| 335 | dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); | ||
| 336 | if (!dd->nb_in_sg) | ||
| 337 | return -EINVAL; | ||
| 338 | |||
| 339 | dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); | ||
| 340 | if (!dd->nb_in_sg) | ||
| 341 | return -EINVAL; | ||
| 342 | |||
| 343 | dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, | ||
| 344 | dd->buf_in, dd->total); | ||
| 345 | |||
| 346 | if (!dd->bufcnt) | ||
| 347 | return -EINVAL; | ||
| 348 | |||
| 349 | dd->total -= dd->bufcnt; | ||
| 350 | |||
| 351 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | ||
| 352 | atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in, | ||
| 353 | dd->bufcnt >> 2); | ||
| 354 | |||
| 355 | return 0; | ||
| 356 | } | ||
| 357 | |||
| 358 | static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) | ||
| 359 | { | ||
| 360 | int err; | ||
| 361 | |||
| 362 | if (dd->flags & AES_FLAGS_CFB8) { | ||
| 363 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
| 364 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
| 365 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
| 366 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
| 367 | } else if (dd->flags & AES_FLAGS_CFB16) { | ||
| 368 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
| 369 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
| 370 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
| 371 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
| 372 | } else { | ||
| 373 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
| 374 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 375 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
| 376 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 377 | } | ||
| 378 | |||
| 379 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | ||
| 380 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | ||
| 381 | |||
| 382 | dd->flags |= AES_FLAGS_DMA; | ||
| 383 | err = atmel_aes_crypt_dma(dd); | ||
| 384 | |||
| 385 | return err; | ||
| 386 | } | ||
| 387 | |||
| 388 | static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) | ||
| 389 | { | ||
| 390 | int err; | ||
| 391 | u32 valcr = 0, valmr = 0; | ||
| 392 | |||
| 393 | err = atmel_aes_hw_init(dd); | ||
| 394 | |||
| 395 | if (err) | ||
| 396 | return err; | ||
| 397 | |||
| 398 | /* MR register must be set before IV registers */ | ||
| 399 | if (dd->ctx->keylen == AES_KEYSIZE_128) | ||
| 400 | valmr |= AES_MR_KEYSIZE_128; | ||
| 401 | else if (dd->ctx->keylen == AES_KEYSIZE_192) | ||
| 402 | valmr |= AES_MR_KEYSIZE_192; | ||
| 403 | else | ||
| 404 | valmr |= AES_MR_KEYSIZE_256; | ||
| 405 | |||
| 406 | if (dd->flags & AES_FLAGS_CBC) { | ||
| 407 | valmr |= AES_MR_OPMOD_CBC; | ||
| 408 | } else if (dd->flags & AES_FLAGS_CFB) { | ||
| 409 | valmr |= AES_MR_OPMOD_CFB; | ||
| 410 | if (dd->flags & AES_FLAGS_CFB8) | ||
| 411 | valmr |= AES_MR_CFBS_8b; | ||
| 412 | else if (dd->flags & AES_FLAGS_CFB16) | ||
| 413 | valmr |= AES_MR_CFBS_16b; | ||
| 414 | else if (dd->flags & AES_FLAGS_CFB32) | ||
| 415 | valmr |= AES_MR_CFBS_32b; | ||
| 416 | else if (dd->flags & AES_FLAGS_CFB64) | ||
| 417 | valmr |= AES_MR_CFBS_64b; | ||
| 418 | } else if (dd->flags & AES_FLAGS_OFB) { | ||
| 419 | valmr |= AES_MR_OPMOD_OFB; | ||
| 420 | } else if (dd->flags & AES_FLAGS_CTR) { | ||
| 421 | valmr |= AES_MR_OPMOD_CTR; | ||
| 422 | } else { | ||
| 423 | valmr |= AES_MR_OPMOD_ECB; | ||
| 424 | } | ||
| 425 | |||
| 426 | if (dd->flags & AES_FLAGS_ENCRYPT) | ||
| 427 | valmr |= AES_MR_CYPHER_ENC; | ||
| 428 | |||
| 429 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) { | ||
| 430 | valmr |= AES_MR_SMOD_IDATAR0; | ||
| 431 | if (dd->flags & AES_FLAGS_DUALBUFF) | ||
| 432 | valmr |= AES_MR_DUALBUFF; | ||
| 433 | } else { | ||
| 434 | valmr |= AES_MR_SMOD_AUTO; | ||
| 435 | } | ||
| 436 | |||
| 437 | atmel_aes_write(dd, AES_CR, valcr); | ||
| 438 | atmel_aes_write(dd, AES_MR, valmr); | ||
| 439 | |||
| 440 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, | ||
| 441 | dd->ctx->keylen >> 2); | ||
| 442 | |||
| 443 | if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || | ||
| 444 | (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && | ||
| 445 | dd->req->info) { | ||
| 446 | atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4); | ||
| 447 | } | ||
| 448 | |||
| 449 | return 0; | ||
| 450 | } | ||
| 451 | |||
| 452 | static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | ||
| 453 | struct ablkcipher_request *req) | ||
| 454 | { | ||
| 455 | struct crypto_async_request *async_req, *backlog; | ||
| 456 | struct atmel_aes_ctx *ctx; | ||
| 457 | struct atmel_aes_reqctx *rctx; | ||
| 458 | unsigned long flags; | ||
| 459 | int err, ret = 0; | ||
| 460 | |||
| 461 | spin_lock_irqsave(&dd->lock, flags); | ||
| 462 | if (req) | ||
| 463 | ret = ablkcipher_enqueue_request(&dd->queue, req); | ||
| 464 | if (dd->flags & AES_FLAGS_BUSY) { | ||
| 465 | spin_unlock_irqrestore(&dd->lock, flags); | ||
| 466 | return ret; | ||
| 467 | } | ||
| 468 | backlog = crypto_get_backlog(&dd->queue); | ||
| 469 | async_req = crypto_dequeue_request(&dd->queue); | ||
| 470 | if (async_req) | ||
| 471 | dd->flags |= AES_FLAGS_BUSY; | ||
| 472 | spin_unlock_irqrestore(&dd->lock, flags); | ||
| 473 | |||
| 474 | if (!async_req) | ||
| 475 | return ret; | ||
| 476 | |||
| 477 | if (backlog) | ||
| 478 | backlog->complete(backlog, -EINPROGRESS); | ||
| 479 | |||
| 480 | req = ablkcipher_request_cast(async_req); | ||
| 481 | |||
| 482 | /* assign new request to device */ | ||
| 483 | dd->req = req; | ||
| 484 | dd->total = req->nbytes; | ||
| 485 | dd->in_sg = req->src; | ||
| 486 | dd->out_sg = req->dst; | ||
| 487 | |||
| 488 | rctx = ablkcipher_request_ctx(req); | ||
| 489 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
| 490 | rctx->mode &= AES_FLAGS_MODE_MASK; | ||
| 491 | dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; | ||
| 492 | dd->ctx = ctx; | ||
| 493 | ctx->dd = dd; | ||
| 494 | |||
| 495 | err = atmel_aes_write_ctrl(dd); | ||
| 496 | if (!err) { | ||
| 497 | if (dd->total > ATMEL_AES_DMA_THRESHOLD) | ||
| 498 | err = atmel_aes_crypt_dma_start(dd); | ||
| 499 | else | ||
| 500 | err = atmel_aes_crypt_cpu_start(dd); | ||
| 501 | } | ||
| 502 | if (err) { | ||
| 503 | /* aes_task will not finish it, so do it here */ | ||
| 504 | atmel_aes_finish_req(dd, err); | ||
| 505 | tasklet_schedule(&dd->queue_task); | ||
| 506 | } | ||
| 507 | |||
| 508 | return ret; | ||
| 509 | } | ||
| 510 | |||
| 511 | static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) | ||
| 512 | { | ||
| 513 | int err = -EINVAL; | ||
| 514 | |||
| 515 | if (dd->flags & AES_FLAGS_DMA) { | ||
| 516 | dma_unmap_sg(dd->dev, dd->out_sg, | ||
| 517 | dd->nb_out_sg, DMA_FROM_DEVICE); | ||
| 518 | dma_unmap_sg(dd->dev, dd->in_sg, | ||
| 519 | dd->nb_in_sg, DMA_TO_DEVICE); | ||
| 520 | err = 0; | ||
| 521 | } | ||
| 522 | |||
| 523 | return err; | ||
| 524 | } | ||
| 525 | |||
| 526 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
| 527 | { | ||
| 528 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( | ||
| 529 | crypto_ablkcipher_reqtfm(req)); | ||
| 530 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
| 531 | struct atmel_aes_dev *dd; | ||
| 532 | |||
| 533 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
| 534 | pr_err("request size is not exact amount of AES blocks\n"); | ||
| 535 | return -EINVAL; | ||
| 536 | } | ||
| 537 | |||
| 538 | dd = atmel_aes_find_dev(ctx); | ||
| 539 | if (!dd) | ||
| 540 | return -ENODEV; | ||
| 541 | |||
| 542 | rctx->mode = mode; | ||
| 543 | |||
| 544 | return atmel_aes_handle_queue(dd, req); | ||
| 545 | } | ||
| 546 | |||
| 547 | static bool atmel_aes_filter(struct dma_chan *chan, void *slave) | ||
| 548 | { | ||
| 549 | struct at_dma_slave *sl = slave; | ||
| 550 | |||
| 551 | if (sl && sl->dma_dev == chan->device->dev) { | ||
| 552 | chan->private = sl; | ||
| 553 | return true; | ||
| 554 | } else { | ||
| 555 | return false; | ||
| 556 | } | ||
| 557 | } | ||
| 558 | |||
| 559 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd) | ||
| 560 | { | ||
| 561 | int err = -ENOMEM; | ||
| 562 | struct aes_platform_data *pdata; | ||
| 563 | dma_cap_mask_t mask_in, mask_out; | ||
| 564 | |||
| 565 | pdata = dd->dev->platform_data; | ||
| 566 | |||
| 567 | if (pdata && pdata->dma_slave->txdata.dma_dev && | ||
| 568 | pdata->dma_slave->rxdata.dma_dev) { | ||
| 569 | |||
| 570 | /* Try to grab 2 DMA channels */ | ||
| 571 | dma_cap_zero(mask_in); | ||
| 572 | dma_cap_set(DMA_SLAVE, mask_in); | ||
| 573 | |||
| 574 | dd->dma_lch_in.chan = dma_request_channel(mask_in, | ||
| 575 | atmel_aes_filter, &pdata->dma_slave->rxdata); | ||
| 576 | if (!dd->dma_lch_in.chan) | ||
| 577 | goto err_dma_in; | ||
| 578 | |||
| 579 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; | ||
| 580 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | ||
| 581 | AES_IDATAR(0); | ||
| 582 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | ||
| 583 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
| 584 | dd->dma_lch_in.dma_conf.device_fc = false; | ||
| 585 | |||
| 586 | dma_cap_zero(mask_out); | ||
| 587 | dma_cap_set(DMA_SLAVE, mask_out); | ||
| 588 | dd->dma_lch_out.chan = dma_request_channel(mask_out, | ||
| 589 | atmel_aes_filter, &pdata->dma_slave->txdata); | ||
| 590 | if (!dd->dma_lch_out.chan) | ||
| 591 | goto err_dma_out; | ||
| 592 | |||
| 593 | dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; | ||
| 594 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + | ||
| 595 | AES_ODATAR(0); | ||
| 596 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | ||
| 597 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | ||
| 598 | dd->dma_lch_out.dma_conf.device_fc = false; | ||
| 599 | |||
| 600 | return 0; | ||
| 601 | } else { | ||
| 602 | return -ENODEV; | ||
| 603 | } | ||
| 604 | |||
| 605 | err_dma_out: | ||
| 606 | dma_release_channel(dd->dma_lch_in.chan); | ||
| 607 | err_dma_in: | ||
| 608 | return err; | ||
| 609 | } | ||
| 610 | |||
| 611 | static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) | ||
| 612 | { | ||
| 613 | dma_release_channel(dd->dma_lch_in.chan); | ||
| 614 | dma_release_channel(dd->dma_lch_out.chan); | ||
| 615 | } | ||
| 616 | |||
| 617 | static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 618 | unsigned int keylen) | ||
| 619 | { | ||
| 620 | struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 621 | |||
| 622 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | ||
| 623 | keylen != AES_KEYSIZE_256) { | ||
| 624 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 625 | return -EINVAL; | ||
| 626 | } | ||
| 627 | |||
| 628 | memcpy(ctx->key, key, keylen); | ||
| 629 | ctx->keylen = keylen; | ||
| 630 | |||
| 631 | return 0; | ||
| 632 | } | ||
| 633 | |||
| 634 | static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
| 635 | { | ||
| 636 | return atmel_aes_crypt(req, | ||
| 637 | AES_FLAGS_ENCRYPT); | ||
| 638 | } | ||
| 639 | |||
| 640 | static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
| 641 | { | ||
| 642 | return atmel_aes_crypt(req, | ||
| 643 | 0); | ||
| 644 | } | ||
| 645 | |||
| 646 | static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
| 647 | { | ||
| 648 | return atmel_aes_crypt(req, | ||
| 649 | AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); | ||
| 650 | } | ||
| 651 | |||
| 652 | static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
| 653 | { | ||
| 654 | return atmel_aes_crypt(req, | ||
| 655 | AES_FLAGS_CBC); | ||
| 656 | } | ||
| 657 | |||
| 658 | static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) | ||
| 659 | { | ||
| 660 | return atmel_aes_crypt(req, | ||
| 661 | AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); | ||
| 662 | } | ||
| 663 | |||
| 664 | static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) | ||
| 665 | { | ||
| 666 | return atmel_aes_crypt(req, | ||
| 667 | AES_FLAGS_OFB); | ||
| 668 | } | ||
| 669 | |||
| 670 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) | ||
| 671 | { | ||
| 672 | return atmel_aes_crypt(req, | ||
| 673 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB); | ||
| 674 | } | ||
| 675 | |||
| 676 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) | ||
| 677 | { | ||
| 678 | return atmel_aes_crypt(req, | ||
| 679 | AES_FLAGS_CFB); | ||
| 680 | } | ||
| 681 | |||
| 682 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) | ||
| 683 | { | ||
| 684 | return atmel_aes_crypt(req, | ||
| 685 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64); | ||
| 686 | } | ||
| 687 | |||
| 688 | static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) | ||
| 689 | { | ||
| 690 | return atmel_aes_crypt(req, | ||
| 691 | AES_FLAGS_CFB | AES_FLAGS_CFB64); | ||
| 692 | } | ||
| 693 | |||
| 694 | static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) | ||
| 695 | { | ||
| 696 | return atmel_aes_crypt(req, | ||
| 697 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32); | ||
| 698 | } | ||
| 699 | |||
| 700 | static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) | ||
| 701 | { | ||
| 702 | return atmel_aes_crypt(req, | ||
| 703 | AES_FLAGS_CFB | AES_FLAGS_CFB32); | ||
| 704 | } | ||
| 705 | |||
| 706 | static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) | ||
| 707 | { | ||
| 708 | return atmel_aes_crypt(req, | ||
| 709 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16); | ||
| 710 | } | ||
| 711 | |||
| 712 | static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) | ||
| 713 | { | ||
| 714 | return atmel_aes_crypt(req, | ||
| 715 | AES_FLAGS_CFB | AES_FLAGS_CFB16); | ||
| 716 | } | ||
| 717 | |||
| 718 | static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) | ||
| 719 | { | ||
| 720 | return atmel_aes_crypt(req, | ||
| 721 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8); | ||
| 722 | } | ||
| 723 | |||
| 724 | static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) | ||
| 725 | { | ||
| 726 | return atmel_aes_crypt(req, | ||
| 727 | AES_FLAGS_CFB | AES_FLAGS_CFB8); | ||
| 728 | } | ||
| 729 | |||
| 730 | static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) | ||
| 731 | { | ||
| 732 | return atmel_aes_crypt(req, | ||
| 733 | AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); | ||
| 734 | } | ||
| 735 | |||
| 736 | static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) | ||
| 737 | { | ||
| 738 | return atmel_aes_crypt(req, | ||
| 739 | AES_FLAGS_CTR); | ||
| 740 | } | ||
| 741 | |||
| 742 | static int atmel_aes_cra_init(struct crypto_tfm *tfm) | ||
| 743 | { | ||
| 744 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); | ||
| 745 | |||
| 746 | return 0; | ||
| 747 | } | ||
| 748 | |||
| 749 | static void atmel_aes_cra_exit(struct crypto_tfm *tfm) | ||
| 750 | { | ||
| 751 | } | ||
| 752 | |||
| 753 | static struct crypto_alg aes_algs[] = { | ||
| 754 | { | ||
| 755 | .cra_name = "ecb(aes)", | ||
| 756 | .cra_driver_name = "atmel-ecb-aes", | ||
| 757 | .cra_priority = 100, | ||
| 758 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 759 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 760 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 761 | .cra_alignmask = 0x0, | ||
| 762 | .cra_type = &crypto_ablkcipher_type, | ||
| 763 | .cra_module = THIS_MODULE, | ||
| 764 | .cra_init = atmel_aes_cra_init, | ||
| 765 | .cra_exit = atmel_aes_cra_exit, | ||
| 766 | .cra_u.ablkcipher = { | ||
| 767 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 768 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 769 | .setkey = atmel_aes_setkey, | ||
| 770 | .encrypt = atmel_aes_ecb_encrypt, | ||
| 771 | .decrypt = atmel_aes_ecb_decrypt, | ||
| 772 | } | ||
| 773 | }, | ||
| 774 | { | ||
| 775 | .cra_name = "cbc(aes)", | ||
| 776 | .cra_driver_name = "atmel-cbc-aes", | ||
| 777 | .cra_priority = 100, | ||
| 778 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 779 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 780 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 781 | .cra_alignmask = 0x0, | ||
| 782 | .cra_type = &crypto_ablkcipher_type, | ||
| 783 | .cra_module = THIS_MODULE, | ||
| 784 | .cra_init = atmel_aes_cra_init, | ||
| 785 | .cra_exit = atmel_aes_cra_exit, | ||
| 786 | .cra_u.ablkcipher = { | ||
| 787 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 788 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 789 | .ivsize = AES_BLOCK_SIZE, | ||
| 790 | .setkey = atmel_aes_setkey, | ||
| 791 | .encrypt = atmel_aes_cbc_encrypt, | ||
| 792 | .decrypt = atmel_aes_cbc_decrypt, | ||
| 793 | } | ||
| 794 | }, | ||
| 795 | { | ||
| 796 | .cra_name = "ofb(aes)", | ||
| 797 | .cra_driver_name = "atmel-ofb-aes", | ||
| 798 | .cra_priority = 100, | ||
| 799 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 800 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 801 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 802 | .cra_alignmask = 0x0, | ||
| 803 | .cra_type = &crypto_ablkcipher_type, | ||
| 804 | .cra_module = THIS_MODULE, | ||
| 805 | .cra_init = atmel_aes_cra_init, | ||
| 806 | .cra_exit = atmel_aes_cra_exit, | ||
| 807 | .cra_u.ablkcipher = { | ||
| 808 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 809 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 810 | .ivsize = AES_BLOCK_SIZE, | ||
| 811 | .setkey = atmel_aes_setkey, | ||
| 812 | .encrypt = atmel_aes_ofb_encrypt, | ||
| 813 | .decrypt = atmel_aes_ofb_decrypt, | ||
| 814 | } | ||
| 815 | }, | ||
| 816 | { | ||
| 817 | .cra_name = "cfb(aes)", | ||
| 818 | .cra_driver_name = "atmel-cfb-aes", | ||
| 819 | .cra_priority = 100, | ||
| 820 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 821 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 822 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 823 | .cra_alignmask = 0x0, | ||
| 824 | .cra_type = &crypto_ablkcipher_type, | ||
| 825 | .cra_module = THIS_MODULE, | ||
| 826 | .cra_init = atmel_aes_cra_init, | ||
| 827 | .cra_exit = atmel_aes_cra_exit, | ||
| 828 | .cra_u.ablkcipher = { | ||
| 829 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 830 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 831 | .ivsize = AES_BLOCK_SIZE, | ||
| 832 | .setkey = atmel_aes_setkey, | ||
| 833 | .encrypt = atmel_aes_cfb_encrypt, | ||
| 834 | .decrypt = atmel_aes_cfb_decrypt, | ||
| 835 | } | ||
| 836 | }, | ||
| 837 | { | ||
| 838 | .cra_name = "cfb32(aes)", | ||
| 839 | .cra_driver_name = "atmel-cfb32-aes", | ||
| 840 | .cra_priority = 100, | ||
| 841 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 842 | .cra_blocksize = CFB32_BLOCK_SIZE, | ||
| 843 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 844 | .cra_alignmask = 0x0, | ||
| 845 | .cra_type = &crypto_ablkcipher_type, | ||
| 846 | .cra_module = THIS_MODULE, | ||
| 847 | .cra_init = atmel_aes_cra_init, | ||
| 848 | .cra_exit = atmel_aes_cra_exit, | ||
| 849 | .cra_u.ablkcipher = { | ||
| 850 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 851 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 852 | .ivsize = AES_BLOCK_SIZE, | ||
| 853 | .setkey = atmel_aes_setkey, | ||
| 854 | .encrypt = atmel_aes_cfb32_encrypt, | ||
| 855 | .decrypt = atmel_aes_cfb32_decrypt, | ||
| 856 | } | ||
| 857 | }, | ||
| 858 | { | ||
| 859 | .cra_name = "cfb16(aes)", | ||
| 860 | .cra_driver_name = "atmel-cfb16-aes", | ||
| 861 | .cra_priority = 100, | ||
| 862 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 863 | .cra_blocksize = CFB16_BLOCK_SIZE, | ||
| 864 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 865 | .cra_alignmask = 0x0, | ||
| 866 | .cra_type = &crypto_ablkcipher_type, | ||
| 867 | .cra_module = THIS_MODULE, | ||
| 868 | .cra_init = atmel_aes_cra_init, | ||
| 869 | .cra_exit = atmel_aes_cra_exit, | ||
| 870 | .cra_u.ablkcipher = { | ||
| 871 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 872 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 873 | .ivsize = AES_BLOCK_SIZE, | ||
| 874 | .setkey = atmel_aes_setkey, | ||
| 875 | .encrypt = atmel_aes_cfb16_encrypt, | ||
| 876 | .decrypt = atmel_aes_cfb16_decrypt, | ||
| 877 | } | ||
| 878 | }, | ||
| 879 | { | ||
| 880 | .cra_name = "cfb8(aes)", | ||
| 881 | .cra_driver_name = "atmel-cfb8-aes", | ||
| 882 | .cra_priority = 100, | ||
| 883 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 884 | .cra_blocksize = CFB64_BLOCK_SIZE, | ||
| 885 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 886 | .cra_alignmask = 0x0, | ||
| 887 | .cra_type = &crypto_ablkcipher_type, | ||
| 888 | .cra_module = THIS_MODULE, | ||
| 889 | .cra_init = atmel_aes_cra_init, | ||
| 890 | .cra_exit = atmel_aes_cra_exit, | ||
| 891 | .cra_u.ablkcipher = { | ||
| 892 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 893 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 894 | .ivsize = AES_BLOCK_SIZE, | ||
| 895 | .setkey = atmel_aes_setkey, | ||
| 896 | .encrypt = atmel_aes_cfb8_encrypt, | ||
| 897 | .decrypt = atmel_aes_cfb8_decrypt, | ||
| 898 | } | ||
| 899 | }, | ||
| 900 | { | ||
| 901 | .cra_name = "ctr(aes)", | ||
| 902 | .cra_driver_name = "atmel-ctr-aes", | ||
| 903 | .cra_priority = 100, | ||
| 904 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 905 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 906 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 907 | .cra_alignmask = 0x0, | ||
| 908 | .cra_type = &crypto_ablkcipher_type, | ||
| 909 | .cra_module = THIS_MODULE, | ||
| 910 | .cra_init = atmel_aes_cra_init, | ||
| 911 | .cra_exit = atmel_aes_cra_exit, | ||
| 912 | .cra_u.ablkcipher = { | ||
| 913 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 914 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 915 | .ivsize = AES_BLOCK_SIZE, | ||
| 916 | .setkey = atmel_aes_setkey, | ||
| 917 | .encrypt = atmel_aes_ctr_encrypt, | ||
| 918 | .decrypt = atmel_aes_ctr_decrypt, | ||
| 919 | } | ||
| 920 | }, | ||
| 921 | }; | ||
| 922 | |||
| 923 | static struct crypto_alg aes_cfb64_alg[] = { | ||
| 924 | { | ||
| 925 | .cra_name = "cfb64(aes)", | ||
| 926 | .cra_driver_name = "atmel-cfb64-aes", | ||
| 927 | .cra_priority = 100, | ||
| 928 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 929 | .cra_blocksize = CFB64_BLOCK_SIZE, | ||
| 930 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | ||
| 931 | .cra_alignmask = 0x0, | ||
| 932 | .cra_type = &crypto_ablkcipher_type, | ||
| 933 | .cra_module = THIS_MODULE, | ||
| 934 | .cra_init = atmel_aes_cra_init, | ||
| 935 | .cra_exit = atmel_aes_cra_exit, | ||
| 936 | .cra_u.ablkcipher = { | ||
| 937 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 938 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 939 | .ivsize = AES_BLOCK_SIZE, | ||
| 940 | .setkey = atmel_aes_setkey, | ||
| 941 | .encrypt = atmel_aes_cfb64_encrypt, | ||
| 942 | .decrypt = atmel_aes_cfb64_decrypt, | ||
| 943 | } | ||
| 944 | }, | ||
| 945 | }; | ||
| 946 | |||
| 947 | static void atmel_aes_queue_task(unsigned long data) | ||
| 948 | { | ||
| 949 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; | ||
| 950 | |||
| 951 | atmel_aes_handle_queue(dd, NULL); | ||
| 952 | } | ||
| 953 | |||
| 954 | static void atmel_aes_done_task(unsigned long data) | ||
| 955 | { | ||
| 956 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data; | ||
| 957 | int err; | ||
| 958 | |||
| 959 | if (!(dd->flags & AES_FLAGS_DMA)) { | ||
| 960 | atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, | ||
| 961 | dd->bufcnt >> 2); | ||
| 962 | |||
| 963 | if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, | ||
| 964 | dd->buf_out, dd->bufcnt)) | ||
| 965 | err = 0; | ||
| 966 | else | ||
| 967 | err = -EINVAL; | ||
| 968 | |||
| 969 | goto cpu_end; | ||
| 970 | } | ||
| 971 | |||
| 972 | err = atmel_aes_crypt_dma_stop(dd); | ||
| 973 | |||
| 974 | err = dd->err ? : err; | ||
| 975 | |||
| 976 | if (dd->total && !err) { | ||
| 977 | err = atmel_aes_crypt_dma_start(dd); | ||
| 978 | if (!err) | ||
| 979 | return; /* DMA started. Not fininishing. */ | ||
| 980 | } | ||
| 981 | |||
| 982 | cpu_end: | ||
| 983 | atmel_aes_finish_req(dd, err); | ||
| 984 | atmel_aes_handle_queue(dd, NULL); | ||
| 985 | } | ||
| 986 | |||
| 987 | static irqreturn_t atmel_aes_irq(int irq, void *dev_id) | ||
| 988 | { | ||
| 989 | struct atmel_aes_dev *aes_dd = dev_id; | ||
| 990 | u32 reg; | ||
| 991 | |||
| 992 | reg = atmel_aes_read(aes_dd, AES_ISR); | ||
| 993 | if (reg & atmel_aes_read(aes_dd, AES_IMR)) { | ||
| 994 | atmel_aes_write(aes_dd, AES_IDR, reg); | ||
| 995 | if (AES_FLAGS_BUSY & aes_dd->flags) | ||
| 996 | tasklet_schedule(&aes_dd->done_task); | ||
| 997 | else | ||
| 998 | dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n"); | ||
| 999 | return IRQ_HANDLED; | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | return IRQ_NONE; | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) | ||
| 1006 | { | ||
| 1007 | int i; | ||
| 1008 | |||
| 1009 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | ||
| 1010 | crypto_unregister_alg(&aes_algs[i]); | ||
| 1011 | if (dd->hw_version >= 0x130) | ||
| 1012 | crypto_unregister_alg(&aes_cfb64_alg[0]); | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | ||
| 1016 | { | ||
| 1017 | int err, i, j; | ||
| 1018 | |||
| 1019 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | ||
| 1020 | INIT_LIST_HEAD(&aes_algs[i].cra_list); | ||
| 1021 | err = crypto_register_alg(&aes_algs[i]); | ||
| 1022 | if (err) | ||
| 1023 | goto err_aes_algs; | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | atmel_aes_hw_version_init(dd); | ||
| 1027 | |||
| 1028 | if (dd->hw_version >= 0x130) { | ||
| 1029 | INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list); | ||
| 1030 | err = crypto_register_alg(&aes_cfb64_alg[0]); | ||
| 1031 | if (err) | ||
| 1032 | goto err_aes_cfb64_alg; | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | return 0; | ||
| 1036 | |||
| 1037 | err_aes_cfb64_alg: | ||
| 1038 | i = ARRAY_SIZE(aes_algs); | ||
| 1039 | err_aes_algs: | ||
| 1040 | for (j = 0; j < i; j++) | ||
| 1041 | crypto_unregister_alg(&aes_algs[j]); | ||
| 1042 | |||
| 1043 | return err; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | static int __devinit atmel_aes_probe(struct platform_device *pdev) | ||
| 1047 | { | ||
| 1048 | struct atmel_aes_dev *aes_dd; | ||
| 1049 | struct aes_platform_data *pdata; | ||
| 1050 | struct device *dev = &pdev->dev; | ||
| 1051 | struct resource *aes_res; | ||
| 1052 | unsigned long aes_phys_size; | ||
| 1053 | int err; | ||
| 1054 | |||
| 1055 | pdata = pdev->dev.platform_data; | ||
| 1056 | if (!pdata) { | ||
| 1057 | err = -ENXIO; | ||
| 1058 | goto aes_dd_err; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL); | ||
| 1062 | if (aes_dd == NULL) { | ||
| 1063 | dev_err(dev, "unable to alloc data struct.\n"); | ||
| 1064 | err = -ENOMEM; | ||
| 1065 | goto aes_dd_err; | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | aes_dd->dev = dev; | ||
| 1069 | |||
| 1070 | platform_set_drvdata(pdev, aes_dd); | ||
| 1071 | |||
| 1072 | INIT_LIST_HEAD(&aes_dd->list); | ||
| 1073 | |||
| 1074 | tasklet_init(&aes_dd->done_task, atmel_aes_done_task, | ||
| 1075 | (unsigned long)aes_dd); | ||
| 1076 | tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task, | ||
| 1077 | (unsigned long)aes_dd); | ||
| 1078 | |||
| 1079 | crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); | ||
| 1080 | |||
| 1081 | aes_dd->irq = -1; | ||
| 1082 | |||
| 1083 | /* Get the base address */ | ||
| 1084 | aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1085 | if (!aes_res) { | ||
| 1086 | dev_err(dev, "no MEM resource info\n"); | ||
| 1087 | err = -ENODEV; | ||
| 1088 | goto res_err; | ||
| 1089 | } | ||
| 1090 | aes_dd->phys_base = aes_res->start; | ||
| 1091 | aes_phys_size = resource_size(aes_res); | ||
| 1092 | |||
| 1093 | /* Get the IRQ */ | ||
| 1094 | aes_dd->irq = platform_get_irq(pdev, 0); | ||
| 1095 | if (aes_dd->irq < 0) { | ||
| 1096 | dev_err(dev, "no IRQ resource info\n"); | ||
| 1097 | err = aes_dd->irq; | ||
| 1098 | goto aes_irq_err; | ||
| 1099 | } | ||
| 1100 | |||
| 1101 | err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes", | ||
| 1102 | aes_dd); | ||
| 1103 | if (err) { | ||
| 1104 | dev_err(dev, "unable to request aes irq.\n"); | ||
| 1105 | goto aes_irq_err; | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | /* Initializing the clock */ | ||
| 1109 | aes_dd->iclk = clk_get(&pdev->dev, NULL); | ||
| 1110 | if (IS_ERR(aes_dd->iclk)) { | ||
| 1111 | dev_err(dev, "clock intialization failed.\n"); | ||
| 1112 | err = PTR_ERR(aes_dd->iclk); | ||
| 1113 | goto clk_err; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size); | ||
| 1117 | if (!aes_dd->io_base) { | ||
| 1118 | dev_err(dev, "can't ioremap\n"); | ||
| 1119 | err = -ENOMEM; | ||
| 1120 | goto aes_io_err; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | err = atmel_aes_dma_init(aes_dd); | ||
| 1124 | if (err) | ||
| 1125 | goto err_aes_dma; | ||
| 1126 | |||
| 1127 | spin_lock(&atmel_aes.lock); | ||
| 1128 | list_add_tail(&aes_dd->list, &atmel_aes.dev_list); | ||
| 1129 | spin_unlock(&atmel_aes.lock); | ||
| 1130 | |||
| 1131 | err = atmel_aes_register_algs(aes_dd); | ||
| 1132 | if (err) | ||
| 1133 | goto err_algs; | ||
| 1134 | |||
| 1135 | dev_info(dev, "Atmel AES\n"); | ||
| 1136 | |||
| 1137 | return 0; | ||
| 1138 | |||
| 1139 | err_algs: | ||
| 1140 | spin_lock(&atmel_aes.lock); | ||
| 1141 | list_del(&aes_dd->list); | ||
| 1142 | spin_unlock(&atmel_aes.lock); | ||
| 1143 | atmel_aes_dma_cleanup(aes_dd); | ||
| 1144 | err_aes_dma: | ||
| 1145 | iounmap(aes_dd->io_base); | ||
| 1146 | aes_io_err: | ||
| 1147 | clk_put(aes_dd->iclk); | ||
| 1148 | clk_err: | ||
| 1149 | free_irq(aes_dd->irq, aes_dd); | ||
| 1150 | aes_irq_err: | ||
| 1151 | res_err: | ||
| 1152 | tasklet_kill(&aes_dd->done_task); | ||
| 1153 | tasklet_kill(&aes_dd->queue_task); | ||
| 1154 | kfree(aes_dd); | ||
| 1155 | aes_dd = NULL; | ||
| 1156 | aes_dd_err: | ||
| 1157 | dev_err(dev, "initialization failed.\n"); | ||
| 1158 | |||
| 1159 | return err; | ||
| 1160 | } | ||
| 1161 | |||
| 1162 | static int __devexit atmel_aes_remove(struct platform_device *pdev) | ||
| 1163 | { | ||
| 1164 | static struct atmel_aes_dev *aes_dd; | ||
| 1165 | |||
| 1166 | aes_dd = platform_get_drvdata(pdev); | ||
| 1167 | if (!aes_dd) | ||
| 1168 | return -ENODEV; | ||
| 1169 | spin_lock(&atmel_aes.lock); | ||
| 1170 | list_del(&aes_dd->list); | ||
| 1171 | spin_unlock(&atmel_aes.lock); | ||
| 1172 | |||
| 1173 | atmel_aes_unregister_algs(aes_dd); | ||
| 1174 | |||
| 1175 | tasklet_kill(&aes_dd->done_task); | ||
| 1176 | tasklet_kill(&aes_dd->queue_task); | ||
| 1177 | |||
| 1178 | atmel_aes_dma_cleanup(aes_dd); | ||
| 1179 | |||
| 1180 | iounmap(aes_dd->io_base); | ||
| 1181 | |||
| 1182 | clk_put(aes_dd->iclk); | ||
| 1183 | |||
| 1184 | if (aes_dd->irq > 0) | ||
| 1185 | free_irq(aes_dd->irq, aes_dd); | ||
| 1186 | |||
| 1187 | kfree(aes_dd); | ||
| 1188 | aes_dd = NULL; | ||
| 1189 | |||
| 1190 | return 0; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | static struct platform_driver atmel_aes_driver = { | ||
| 1194 | .probe = atmel_aes_probe, | ||
| 1195 | .remove = __devexit_p(atmel_aes_remove), | ||
| 1196 | .driver = { | ||
| 1197 | .name = "atmel_aes", | ||
| 1198 | .owner = THIS_MODULE, | ||
| 1199 | }, | ||
| 1200 | }; | ||
| 1201 | |||
| 1202 | module_platform_driver(atmel_aes_driver); | ||
| 1203 | |||
| 1204 | MODULE_DESCRIPTION("Atmel AES hw acceleration support."); | ||
| 1205 | MODULE_LICENSE("GPL v2"); | ||
| 1206 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); | ||
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h new file mode 100644 index 000000000000..dc53a20d7da1 --- /dev/null +++ b/drivers/crypto/atmel-sha-regs.h | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | #ifndef __ATMEL_SHA_REGS_H__ | ||
| 2 | #define __ATMEL_SHA_REGS_H__ | ||
| 3 | |||
| 4 | #define SHA_REG_DIGEST(x) (0x80 + ((x) * 0x04)) | ||
| 5 | #define SHA_REG_DIN(x) (0x40 + ((x) * 0x04)) | ||
| 6 | |||
| 7 | #define SHA_CR 0x00 | ||
| 8 | #define SHA_CR_START (1 << 0) | ||
| 9 | #define SHA_CR_FIRST (1 << 4) | ||
| 10 | #define SHA_CR_SWRST (1 << 8) | ||
| 11 | |||
| 12 | #define SHA_MR 0x04 | ||
| 13 | #define SHA_MR_MODE_MASK (0x3 << 0) | ||
| 14 | #define SHA_MR_MODE_MANUAL 0x0 | ||
| 15 | #define SHA_MR_MODE_AUTO 0x1 | ||
| 16 | #define SHA_MR_MODE_PDC 0x2 | ||
| 17 | #define SHA_MR_DUALBUFF (1 << 3) | ||
| 18 | #define SHA_MR_PROCDLY (1 << 4) | ||
| 19 | #define SHA_MR_ALGO_SHA1 (0 << 8) | ||
| 20 | #define SHA_MR_ALGO_SHA256 (1 << 8) | ||
| 21 | |||
| 22 | #define SHA_IER 0x10 | ||
| 23 | #define SHA_IDR 0x14 | ||
| 24 | #define SHA_IMR 0x18 | ||
| 25 | #define SHA_ISR 0x1C | ||
| 26 | #define SHA_INT_DATARDY (1 << 0) | ||
| 27 | #define SHA_INT_ENDTX (1 << 1) | ||
| 28 | #define SHA_INT_TXBUFE (1 << 2) | ||
| 29 | #define SHA_INT_URAD (1 << 8) | ||
| 30 | #define SHA_ISR_URAT_MASK (0x7 << 12) | ||
| 31 | #define SHA_ISR_URAT_IDR (0x0 << 12) | ||
| 32 | #define SHA_ISR_URAT_ODR (0x1 << 12) | ||
| 33 | #define SHA_ISR_URAT_MR (0x2 << 12) | ||
| 34 | #define SHA_ISR_URAT_WO (0x5 << 12) | ||
| 35 | |||
| 36 | #define SHA_TPR 0x108 | ||
| 37 | #define SHA_TCR 0x10C | ||
| 38 | #define SHA_TNPR 0x118 | ||
| 39 | #define SHA_TNCR 0x11C | ||
| 40 | #define SHA_PTCR 0x120 | ||
| 41 | #define SHA_PTCR_TXTEN (1 << 8) | ||
| 42 | #define SHA_PTCR_TXTDIS (1 << 9) | ||
| 43 | #define SHA_PTSR 0x124 | ||
| 44 | #define SHA_PTSR_TXTEN (1 << 8) | ||
| 45 | |||
| 46 | #endif /* __ATMEL_SHA_REGS_H__ */ | ||
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c new file mode 100644 index 000000000000..f938b9d79b66 --- /dev/null +++ b/drivers/crypto/atmel-sha.c | |||
| @@ -0,0 +1,1112 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic API. | ||
| 3 | * | ||
| 4 | * Support for ATMEL SHA1/SHA256 HW acceleration. | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | ||
| 7 | * Author: Nicolas Royer <nicolas@eukrea.com> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as published | ||
| 11 | * by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * Some ideas are from omap-sham.c drivers. | ||
| 14 | */ | ||
| 15 | |||
| 16 | |||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/slab.h> | ||
| 20 | #include <linux/err.h> | ||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/io.h> | ||
| 23 | #include <linux/hw_random.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | |||
| 26 | #include <linux/device.h> | ||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/errno.h> | ||
| 30 | #include <linux/interrupt.h> | ||
| 31 | #include <linux/kernel.h> | ||
| 32 | #include <linux/clk.h> | ||
| 33 | #include <linux/irq.h> | ||
| 34 | #include <linux/io.h> | ||
| 35 | #include <linux/platform_device.h> | ||
| 36 | #include <linux/scatterlist.h> | ||
| 37 | #include <linux/dma-mapping.h> | ||
| 38 | #include <linux/delay.h> | ||
| 39 | #include <linux/crypto.h> | ||
| 40 | #include <linux/cryptohash.h> | ||
| 41 | #include <crypto/scatterwalk.h> | ||
| 42 | #include <crypto/algapi.h> | ||
| 43 | #include <crypto/sha.h> | ||
| 44 | #include <crypto/hash.h> | ||
| 45 | #include <crypto/internal/hash.h> | ||
| 46 | #include "atmel-sha-regs.h" | ||
| 47 | |||
| 48 | /* SHA flags */ | ||
| 49 | #define SHA_FLAGS_BUSY BIT(0) | ||
| 50 | #define SHA_FLAGS_FINAL BIT(1) | ||
| 51 | #define SHA_FLAGS_DMA_ACTIVE BIT(2) | ||
| 52 | #define SHA_FLAGS_OUTPUT_READY BIT(3) | ||
| 53 | #define SHA_FLAGS_INIT BIT(4) | ||
| 54 | #define SHA_FLAGS_CPU BIT(5) | ||
| 55 | #define SHA_FLAGS_DMA_READY BIT(6) | ||
| 56 | |||
| 57 | #define SHA_FLAGS_FINUP BIT(16) | ||
| 58 | #define SHA_FLAGS_SG BIT(17) | ||
| 59 | #define SHA_FLAGS_SHA1 BIT(18) | ||
| 60 | #define SHA_FLAGS_SHA256 BIT(19) | ||
| 61 | #define SHA_FLAGS_ERROR BIT(20) | ||
| 62 | #define SHA_FLAGS_PAD BIT(21) | ||
| 63 | |||
| 64 | #define SHA_FLAGS_DUALBUFF BIT(24) | ||
| 65 | |||
| 66 | #define SHA_OP_UPDATE 1 | ||
| 67 | #define SHA_OP_FINAL 2 | ||
| 68 | |||
| 69 | #define SHA_BUFFER_LEN PAGE_SIZE | ||
| 70 | |||
| 71 | #define ATMEL_SHA_DMA_THRESHOLD 56 | ||
| 72 | |||
| 73 | |||
| 74 | struct atmel_sha_dev; | ||
| 75 | |||
| 76 | struct atmel_sha_reqctx { | ||
| 77 | struct atmel_sha_dev *dd; | ||
| 78 | unsigned long flags; | ||
| 79 | unsigned long op; | ||
| 80 | |||
| 81 | u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); | ||
| 82 | size_t digcnt; | ||
| 83 | size_t bufcnt; | ||
| 84 | size_t buflen; | ||
| 85 | dma_addr_t dma_addr; | ||
| 86 | |||
| 87 | /* walk state */ | ||
| 88 | struct scatterlist *sg; | ||
| 89 | unsigned int offset; /* offset in current sg */ | ||
| 90 | unsigned int total; /* total request */ | ||
| 91 | |||
| 92 | u8 buffer[0] __aligned(sizeof(u32)); | ||
| 93 | }; | ||
| 94 | |||
| 95 | struct atmel_sha_ctx { | ||
| 96 | struct atmel_sha_dev *dd; | ||
| 97 | |||
| 98 | unsigned long flags; | ||
| 99 | |||
| 100 | /* fallback stuff */ | ||
| 101 | struct crypto_shash *fallback; | ||
| 102 | |||
| 103 | }; | ||
| 104 | |||
| 105 | #define ATMEL_SHA_QUEUE_LENGTH 1 | ||
| 106 | |||
| 107 | struct atmel_sha_dev { | ||
| 108 | struct list_head list; | ||
| 109 | unsigned long phys_base; | ||
| 110 | struct device *dev; | ||
| 111 | struct clk *iclk; | ||
| 112 | int irq; | ||
| 113 | void __iomem *io_base; | ||
| 114 | |||
| 115 | spinlock_t lock; | ||
| 116 | int err; | ||
| 117 | struct tasklet_struct done_task; | ||
| 118 | |||
| 119 | unsigned long flags; | ||
| 120 | struct crypto_queue queue; | ||
| 121 | struct ahash_request *req; | ||
| 122 | }; | ||
| 123 | |||
| 124 | struct atmel_sha_drv { | ||
| 125 | struct list_head dev_list; | ||
| 126 | spinlock_t lock; | ||
| 127 | }; | ||
| 128 | |||
| 129 | static struct atmel_sha_drv atmel_sha = { | ||
| 130 | .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list), | ||
| 131 | .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock), | ||
| 132 | }; | ||
| 133 | |||
| 134 | static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) | ||
| 135 | { | ||
| 136 | return readl_relaxed(dd->io_base + offset); | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline void atmel_sha_write(struct atmel_sha_dev *dd, | ||
| 140 | u32 offset, u32 value) | ||
| 141 | { | ||
| 142 | writel_relaxed(value, dd->io_base + offset); | ||
| 143 | } | ||
| 144 | |||
| 145 | static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd) | ||
| 146 | { | ||
| 147 | atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF); | ||
| 148 | |||
| 149 | if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF) | ||
| 150 | dd->flags |= SHA_FLAGS_DUALBUFF; | ||
| 151 | } | ||
| 152 | |||
| 153 | static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) | ||
| 154 | { | ||
| 155 | size_t count; | ||
| 156 | |||
| 157 | while ((ctx->bufcnt < ctx->buflen) && ctx->total) { | ||
| 158 | count = min(ctx->sg->length - ctx->offset, ctx->total); | ||
| 159 | count = min(count, ctx->buflen - ctx->bufcnt); | ||
| 160 | |||
| 161 | if (count <= 0) | ||
| 162 | break; | ||
| 163 | |||
| 164 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, | ||
| 165 | ctx->offset, count, 0); | ||
| 166 | |||
| 167 | ctx->bufcnt += count; | ||
| 168 | ctx->offset += count; | ||
| 169 | ctx->total -= count; | ||
| 170 | |||
| 171 | if (ctx->offset == ctx->sg->length) { | ||
| 172 | ctx->sg = sg_next(ctx->sg); | ||
| 173 | if (ctx->sg) | ||
| 174 | ctx->offset = 0; | ||
| 175 | else | ||
| 176 | ctx->total = 0; | ||
| 177 | } | ||
| 178 | } | ||
| 179 | |||
| 180 | return 0; | ||
| 181 | } | ||
| 182 | |||
| 183 | /* | ||
| 184 | * The purpose of this padding is to ensure that the padded message | ||
| 185 | * is a multiple of 512 bits. The bit "1" is appended at the end of | ||
| 186 | * the message followed by "padlen-1" zero bits. Then a 64 bits block | ||
| 187 | * equals to the message length in bits is appended. | ||
| 188 | * | ||
| 189 | * padlen is calculated as followed: | ||
| 190 | * - if message length < 56 bytes then padlen = 56 - message length | ||
| 191 | * - else padlen = 64 + 56 - message length | ||
| 192 | */ | ||
| 193 | static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) | ||
| 194 | { | ||
| 195 | unsigned int index, padlen; | ||
| 196 | u64 bits; | ||
| 197 | u64 size; | ||
| 198 | |||
| 199 | bits = (ctx->bufcnt + ctx->digcnt + length) << 3; | ||
| 200 | size = cpu_to_be64(bits); | ||
| 201 | |||
| 202 | index = ctx->bufcnt & 0x3f; | ||
| 203 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | ||
| 204 | *(ctx->buffer + ctx->bufcnt) = 0x80; | ||
| 205 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | ||
| 206 | memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8); | ||
| 207 | ctx->bufcnt += padlen + 8; | ||
| 208 | ctx->flags |= SHA_FLAGS_PAD; | ||
| 209 | } | ||
| 210 | |||
| 211 | static int atmel_sha_init(struct ahash_request *req) | ||
| 212 | { | ||
| 213 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
| 214 | struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); | ||
| 215 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 216 | struct atmel_sha_dev *dd = NULL; | ||
| 217 | struct atmel_sha_dev *tmp; | ||
| 218 | |||
| 219 | spin_lock_bh(&atmel_sha.lock); | ||
| 220 | if (!tctx->dd) { | ||
| 221 | list_for_each_entry(tmp, &atmel_sha.dev_list, list) { | ||
| 222 | dd = tmp; | ||
| 223 | break; | ||
| 224 | } | ||
| 225 | tctx->dd = dd; | ||
| 226 | } else { | ||
| 227 | dd = tctx->dd; | ||
| 228 | } | ||
| 229 | |||
| 230 | spin_unlock_bh(&atmel_sha.lock); | ||
| 231 | |||
| 232 | ctx->dd = dd; | ||
| 233 | |||
| 234 | ctx->flags = 0; | ||
| 235 | |||
| 236 | dev_dbg(dd->dev, "init: digest size: %d\n", | ||
| 237 | crypto_ahash_digestsize(tfm)); | ||
| 238 | |||
| 239 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | ||
| 240 | ctx->flags |= SHA_FLAGS_SHA1; | ||
| 241 | else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE) | ||
| 242 | ctx->flags |= SHA_FLAGS_SHA256; | ||
| 243 | |||
| 244 | ctx->bufcnt = 0; | ||
| 245 | ctx->digcnt = 0; | ||
| 246 | ctx->buflen = SHA_BUFFER_LEN; | ||
| 247 | |||
| 248 | return 0; | ||
| 249 | } | ||
| 250 | |||
| 251 | static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) | ||
| 252 | { | ||
| 253 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
| 254 | u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; | ||
| 255 | |||
| 256 | if (likely(dma)) { | ||
| 257 | atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); | ||
| 258 | valmr = SHA_MR_MODE_PDC; | ||
| 259 | if (dd->flags & SHA_FLAGS_DUALBUFF) | ||
| 260 | valmr = SHA_MR_DUALBUFF; | ||
| 261 | } else { | ||
| 262 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | ||
| 263 | } | ||
| 264 | |||
| 265 | if (ctx->flags & SHA_FLAGS_SHA256) | ||
| 266 | valmr |= SHA_MR_ALGO_SHA256; | ||
| 267 | |||
| 268 | /* Setting CR_FIRST only for the first iteration */ | ||
| 269 | if (!ctx->digcnt) | ||
| 270 | valcr = SHA_CR_FIRST; | ||
| 271 | |||
| 272 | atmel_sha_write(dd, SHA_CR, valcr); | ||
| 273 | atmel_sha_write(dd, SHA_MR, valmr); | ||
| 274 | } | ||
| 275 | |||
| 276 | static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, | ||
| 277 | size_t length, int final) | ||
| 278 | { | ||
| 279 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
| 280 | int count, len32; | ||
| 281 | const u32 *buffer = (const u32 *)buf; | ||
| 282 | |||
| 283 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | ||
| 284 | ctx->digcnt, length, final); | ||
| 285 | |||
| 286 | atmel_sha_write_ctrl(dd, 0); | ||
| 287 | |||
| 288 | /* should be non-zero before next lines to disable clocks later */ | ||
| 289 | ctx->digcnt += length; | ||
| 290 | |||
| 291 | if (final) | ||
| 292 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | ||
| 293 | |||
| 294 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
| 295 | |||
| 296 | dd->flags |= SHA_FLAGS_CPU; | ||
| 297 | |||
| 298 | for (count = 0; count < len32; count++) | ||
| 299 | atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); | ||
| 300 | |||
| 301 | return -EINPROGRESS; | ||
| 302 | } | ||
| 303 | |||
| 304 | static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | ||
| 305 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | ||
| 306 | { | ||
| 307 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
| 308 | int len32; | ||
| 309 | |||
| 310 | dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n", | ||
| 311 | ctx->digcnt, length1, final); | ||
| 312 | |||
| 313 | len32 = DIV_ROUND_UP(length1, sizeof(u32)); | ||
| 314 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); | ||
| 315 | atmel_sha_write(dd, SHA_TPR, dma_addr1); | ||
| 316 | atmel_sha_write(dd, SHA_TCR, len32); | ||
| 317 | |||
| 318 | len32 = DIV_ROUND_UP(length2, sizeof(u32)); | ||
| 319 | atmel_sha_write(dd, SHA_TNPR, dma_addr2); | ||
| 320 | atmel_sha_write(dd, SHA_TNCR, len32); | ||
| 321 | |||
| 322 | atmel_sha_write_ctrl(dd, 1); | ||
| 323 | |||
| 324 | /* should be non-zero before next lines to disable clocks later */ | ||
| 325 | ctx->digcnt += length1; | ||
| 326 | |||
| 327 | if (final) | ||
| 328 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | ||
| 329 | |||
| 330 | dd->flags |= SHA_FLAGS_DMA_ACTIVE; | ||
| 331 | |||
| 332 | /* Start DMA transfer */ | ||
| 333 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); | ||
| 334 | |||
| 335 | return -EINPROGRESS; | ||
| 336 | } | ||
| 337 | |||
| 338 | static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) | ||
| 339 | { | ||
| 340 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
| 341 | int bufcnt; | ||
| 342 | |||
| 343 | atmel_sha_append_sg(ctx); | ||
| 344 | atmel_sha_fill_padding(ctx, 0); | ||
| 345 | |||
| 346 | bufcnt = ctx->bufcnt; | ||
| 347 | ctx->bufcnt = 0; | ||
| 348 | |||
| 349 | return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); | ||
| 350 | } | ||
| 351 | |||
| 352 | static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, | ||
| 353 | struct atmel_sha_reqctx *ctx, | ||
| 354 | size_t length, int final) | ||
| 355 | { | ||
| 356 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | ||
| 357 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | ||
| 358 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
| 359 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + | ||
| 360 | SHA1_BLOCK_SIZE); | ||
| 361 | return -EINVAL; | ||
| 362 | } | ||
| 363 | |||
| 364 | ctx->flags &= ~SHA_FLAGS_SG; | ||
| 365 | |||
| 366 | /* next call does not fail... so no unmap in the case of error */ | ||
| 367 | return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final); | ||
| 368 | } | ||
| 369 | |||
| 370 | static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) | ||
| 371 | { | ||
| 372 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
| 373 | unsigned int final; | ||
| 374 | size_t count; | ||
| 375 | |||
| 376 | atmel_sha_append_sg(ctx); | ||
| 377 | |||
| 378 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | ||
| 379 | |||
| 380 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | ||
| 381 | ctx->bufcnt, ctx->digcnt, final); | ||
| 382 | |||
| 383 | if (final) | ||
| 384 | atmel_sha_fill_padding(ctx, 0); | ||
| 385 | |||
| 386 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | ||
| 387 | count = ctx->bufcnt; | ||
| 388 | ctx->bufcnt = 0; | ||
| 389 | return atmel_sha_xmit_dma_map(dd, ctx, count, final); | ||
| 390 | } | ||
| 391 | |||
| 392 | return 0; | ||
| 393 | } | ||
| 394 | |||
| 395 | static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | ||
| 396 | { | ||
| 397 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
| 398 | unsigned int length, final, tail; | ||
| 399 | struct scatterlist *sg; | ||
| 400 | unsigned int count; | ||
| 401 | |||
| 402 | if (!ctx->total) | ||
| 403 | return 0; | ||
| 404 | |||
| 405 | if (ctx->bufcnt || ctx->offset) | ||
| 406 | return atmel_sha_update_dma_slow(dd); | ||
| 407 | |||
| 408 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | ||
| 409 | ctx->digcnt, ctx->bufcnt, ctx->total); | ||
| 410 | |||
| 411 | sg = ctx->sg; | ||
| 412 | |||
| 413 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | ||
| 414 | return atmel_sha_update_dma_slow(dd); | ||
| 415 | |||
| 416 | if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE)) | ||
| 417 | /* size is not SHA1_BLOCK_SIZE aligned */ | ||
| 418 | return atmel_sha_update_dma_slow(dd); | ||
| 419 | |||
| 420 | length = min(ctx->total, sg->length); | ||
| 421 | |||
| 422 | if (sg_is_last(sg)) { | ||
| 423 | if (!(ctx->flags & SHA_FLAGS_FINUP)) { | ||
| 424 | /* not last sg must be SHA1_BLOCK_SIZE aligned */ | ||
| 425 | tail = length & (SHA1_BLOCK_SIZE - 1); | ||
| 426 | length -= tail; | ||
| 427 | if (length == 0) { | ||
| 428 | /* offset where to start slow */ | ||
| 429 | ctx->offset = length; | ||
| 430 | return atmel_sha_update_dma_slow(dd); | ||
| 431 | } | ||
| 432 | } | ||
| 433 | } | ||
| 434 | |||
| 435 | ctx->total -= length; | ||
| 436 | ctx->offset = length; /* offset where to start slow */ | ||
| 437 | |||
| 438 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | ||
| 439 | |||
| 440 | /* Add padding */ | ||
| 441 | if (final) { | ||
| 442 | tail = length & (SHA1_BLOCK_SIZE - 1); | ||
| 443 | length -= tail; | ||
| 444 | ctx->total += tail; | ||
| 445 | ctx->offset = length; /* offset where to start slow */ | ||
| 446 | |||
| 447 | sg = ctx->sg; | ||
| 448 | atmel_sha_append_sg(ctx); | ||
| 449 | |||
| 450 | atmel_sha_fill_padding(ctx, length); | ||
| 451 | |||
| 452 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | ||
| 453 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | ||
| 454 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
| 455 | dev_err(dd->dev, "dma %u bytes error\n", | ||
| 456 | ctx->buflen + SHA1_BLOCK_SIZE); | ||
| 457 | return -EINVAL; | ||
| 458 | } | ||
| 459 | |||
| 460 | if (length == 0) { | ||
| 461 | ctx->flags &= ~SHA_FLAGS_SG; | ||
| 462 | count = ctx->bufcnt; | ||
| 463 | ctx->bufcnt = 0; | ||
| 464 | return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0, | ||
| 465 | 0, final); | ||
| 466 | } else { | ||
| 467 | ctx->sg = sg; | ||
| 468 | if (!dma_map_sg(dd->dev, ctx->sg, 1, | ||
| 469 | DMA_TO_DEVICE)) { | ||
| 470 | dev_err(dd->dev, "dma_map_sg error\n"); | ||
| 471 | return -EINVAL; | ||
| 472 | } | ||
| 473 | |||
| 474 | ctx->flags |= SHA_FLAGS_SG; | ||
| 475 | |||
| 476 | count = ctx->bufcnt; | ||
| 477 | ctx->bufcnt = 0; | ||
| 478 | return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), | ||
| 479 | length, ctx->dma_addr, count, final); | ||
| 480 | } | ||
| 481 | } | ||
| 482 | |||
| 483 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | ||
| 484 | dev_err(dd->dev, "dma_map_sg error\n"); | ||
| 485 | return -EINVAL; | ||
| 486 | } | ||
| 487 | |||
| 488 | ctx->flags |= SHA_FLAGS_SG; | ||
| 489 | |||
| 490 | /* next call does not fail... so no unmap in the case of error */ | ||
| 491 | return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0, | ||
| 492 | 0, final); | ||
| 493 | } | ||
| 494 | |||
| 495 | static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) | ||
| 496 | { | ||
| 497 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | ||
| 498 | |||
| 499 | if (ctx->flags & SHA_FLAGS_SG) { | ||
| 500 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | ||
| 501 | if (ctx->sg->length == ctx->offset) { | ||
| 502 | ctx->sg = sg_next(ctx->sg); | ||
| 503 | if (ctx->sg) | ||
| 504 | ctx->offset = 0; | ||
| 505 | } | ||
| 506 | if (ctx->flags & SHA_FLAGS_PAD) | ||
| 507 | dma_unmap_single(dd->dev, ctx->dma_addr, | ||
| 508 | ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | ||
| 509 | } else { | ||
| 510 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + | ||
| 511 | SHA1_BLOCK_SIZE, DMA_TO_DEVICE); | ||
| 512 | } | ||
| 513 | |||
| 514 | return 0; | ||
| 515 | } | ||
| 516 | |||
| 517 | static int atmel_sha_update_req(struct atmel_sha_dev *dd) | ||
| 518 | { | ||
| 519 | struct ahash_request *req = dd->req; | ||
| 520 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 521 | int err; | ||
| 522 | |||
| 523 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | ||
| 524 | ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0); | ||
| 525 | |||
| 526 | if (ctx->flags & SHA_FLAGS_CPU) | ||
| 527 | err = atmel_sha_update_cpu(dd); | ||
| 528 | else | ||
| 529 | err = atmel_sha_update_dma_start(dd); | ||
| 530 | |||
| 531 | /* wait for dma completion before can take more data */ | ||
| 532 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", | ||
| 533 | err, ctx->digcnt); | ||
| 534 | |||
| 535 | return err; | ||
| 536 | } | ||
| 537 | |||
| 538 | static int atmel_sha_final_req(struct atmel_sha_dev *dd) | ||
| 539 | { | ||
| 540 | struct ahash_request *req = dd->req; | ||
| 541 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 542 | int err = 0; | ||
| 543 | int count; | ||
| 544 | |||
| 545 | if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { | ||
| 546 | atmel_sha_fill_padding(ctx, 0); | ||
| 547 | count = ctx->bufcnt; | ||
| 548 | ctx->bufcnt = 0; | ||
| 549 | err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); | ||
| 550 | } | ||
| 551 | /* faster to handle last block with cpu */ | ||
| 552 | else { | ||
| 553 | atmel_sha_fill_padding(ctx, 0); | ||
| 554 | count = ctx->bufcnt; | ||
| 555 | ctx->bufcnt = 0; | ||
| 556 | err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); | ||
| 557 | } | ||
| 558 | |||
| 559 | dev_dbg(dd->dev, "final_req: err: %d\n", err); | ||
| 560 | |||
| 561 | return err; | ||
| 562 | } | ||
| 563 | |||
| 564 | static void atmel_sha_copy_hash(struct ahash_request *req) | ||
| 565 | { | ||
| 566 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 567 | u32 *hash = (u32 *)ctx->digest; | ||
| 568 | int i; | ||
| 569 | |||
| 570 | if (likely(ctx->flags & SHA_FLAGS_SHA1)) | ||
| 571 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | ||
| 572 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
| 573 | else | ||
| 574 | for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) | ||
| 575 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | ||
| 576 | } | ||
| 577 | |||
| 578 | static void atmel_sha_copy_ready_hash(struct ahash_request *req) | ||
| 579 | { | ||
| 580 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 581 | |||
| 582 | if (!req->result) | ||
| 583 | return; | ||
| 584 | |||
| 585 | if (likely(ctx->flags & SHA_FLAGS_SHA1)) | ||
| 586 | memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); | ||
| 587 | else | ||
| 588 | memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); | ||
| 589 | } | ||
| 590 | |||
| 591 | static int atmel_sha_finish(struct ahash_request *req) | ||
| 592 | { | ||
| 593 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 594 | struct atmel_sha_dev *dd = ctx->dd; | ||
| 595 | int err = 0; | ||
| 596 | |||
| 597 | if (ctx->digcnt) | ||
| 598 | atmel_sha_copy_ready_hash(req); | ||
| 599 | |||
| 600 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, | ||
| 601 | ctx->bufcnt); | ||
| 602 | |||
| 603 | return err; | ||
| 604 | } | ||
| 605 | |||
| 606 | static void atmel_sha_finish_req(struct ahash_request *req, int err) | ||
| 607 | { | ||
| 608 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 609 | struct atmel_sha_dev *dd = ctx->dd; | ||
| 610 | |||
| 611 | if (!err) { | ||
| 612 | atmel_sha_copy_hash(req); | ||
| 613 | if (SHA_FLAGS_FINAL & dd->flags) | ||
| 614 | err = atmel_sha_finish(req); | ||
| 615 | } else { | ||
| 616 | ctx->flags |= SHA_FLAGS_ERROR; | ||
| 617 | } | ||
| 618 | |||
| 619 | /* atomic operation is not needed here */ | ||
| 620 | dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | | ||
| 621 | SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); | ||
| 622 | |||
| 623 | clk_disable_unprepare(dd->iclk); | ||
| 624 | |||
| 625 | if (req->base.complete) | ||
| 626 | req->base.complete(&req->base, err); | ||
| 627 | |||
| 628 | /* handle new request */ | ||
| 629 | tasklet_schedule(&dd->done_task); | ||
| 630 | } | ||
| 631 | |||
| 632 | static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | ||
| 633 | { | ||
| 634 | clk_prepare_enable(dd->iclk); | ||
| 635 | |||
| 636 | if (SHA_FLAGS_INIT & dd->flags) { | ||
| 637 | atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); | ||
| 638 | atmel_sha_dualbuff_test(dd); | ||
| 639 | dd->flags |= SHA_FLAGS_INIT; | ||
| 640 | dd->err = 0; | ||
| 641 | } | ||
| 642 | |||
| 643 | return 0; | ||
| 644 | } | ||
| 645 | |||
| 646 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, | ||
| 647 | struct ahash_request *req) | ||
| 648 | { | ||
| 649 | struct crypto_async_request *async_req, *backlog; | ||
| 650 | struct atmel_sha_reqctx *ctx; | ||
| 651 | unsigned long flags; | ||
| 652 | int err = 0, ret = 0; | ||
| 653 | |||
| 654 | spin_lock_irqsave(&dd->lock, flags); | ||
| 655 | if (req) | ||
| 656 | ret = ahash_enqueue_request(&dd->queue, req); | ||
| 657 | |||
| 658 | if (SHA_FLAGS_BUSY & dd->flags) { | ||
| 659 | spin_unlock_irqrestore(&dd->lock, flags); | ||
| 660 | return ret; | ||
| 661 | } | ||
| 662 | |||
| 663 | backlog = crypto_get_backlog(&dd->queue); | ||
| 664 | async_req = crypto_dequeue_request(&dd->queue); | ||
| 665 | if (async_req) | ||
| 666 | dd->flags |= SHA_FLAGS_BUSY; | ||
| 667 | |||
| 668 | spin_unlock_irqrestore(&dd->lock, flags); | ||
| 669 | |||
| 670 | if (!async_req) | ||
| 671 | return ret; | ||
| 672 | |||
| 673 | if (backlog) | ||
| 674 | backlog->complete(backlog, -EINPROGRESS); | ||
| 675 | |||
| 676 | req = ahash_request_cast(async_req); | ||
| 677 | dd->req = req; | ||
| 678 | ctx = ahash_request_ctx(req); | ||
| 679 | |||
| 680 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | ||
| 681 | ctx->op, req->nbytes); | ||
| 682 | |||
| 683 | err = atmel_sha_hw_init(dd); | ||
| 684 | |||
| 685 | if (err) | ||
| 686 | goto err1; | ||
| 687 | |||
| 688 | if (ctx->op == SHA_OP_UPDATE) { | ||
| 689 | err = atmel_sha_update_req(dd); | ||
| 690 | if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) { | ||
| 691 | /* no final() after finup() */ | ||
| 692 | err = atmel_sha_final_req(dd); | ||
| 693 | } | ||
| 694 | } else if (ctx->op == SHA_OP_FINAL) { | ||
| 695 | err = atmel_sha_final_req(dd); | ||
| 696 | } | ||
| 697 | |||
| 698 | err1: | ||
| 699 | if (err != -EINPROGRESS) | ||
| 700 | /* done_task will not finish it, so do it here */ | ||
| 701 | atmel_sha_finish_req(req, err); | ||
| 702 | |||
| 703 | dev_dbg(dd->dev, "exit, err: %d\n", err); | ||
| 704 | |||
| 705 | return ret; | ||
| 706 | } | ||
| 707 | |||
| 708 | static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op) | ||
| 709 | { | ||
| 710 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 711 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
| 712 | struct atmel_sha_dev *dd = tctx->dd; | ||
| 713 | |||
| 714 | ctx->op = op; | ||
| 715 | |||
| 716 | return atmel_sha_handle_queue(dd, req); | ||
| 717 | } | ||
| 718 | |||
| 719 | static int atmel_sha_update(struct ahash_request *req) | ||
| 720 | { | ||
| 721 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 722 | |||
| 723 | if (!req->nbytes) | ||
| 724 | return 0; | ||
| 725 | |||
| 726 | ctx->total = req->nbytes; | ||
| 727 | ctx->sg = req->src; | ||
| 728 | ctx->offset = 0; | ||
| 729 | |||
| 730 | if (ctx->flags & SHA_FLAGS_FINUP) { | ||
| 731 | if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) | ||
| 732 | /* faster to use CPU for short transfers */ | ||
| 733 | ctx->flags |= SHA_FLAGS_CPU; | ||
| 734 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | ||
| 735 | atmel_sha_append_sg(ctx); | ||
| 736 | return 0; | ||
| 737 | } | ||
| 738 | return atmel_sha_enqueue(req, SHA_OP_UPDATE); | ||
| 739 | } | ||
| 740 | |||
| 741 | static int atmel_sha_final(struct ahash_request *req) | ||
| 742 | { | ||
| 743 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 744 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
| 745 | struct atmel_sha_dev *dd = tctx->dd; | ||
| 746 | |||
| 747 | int err = 0; | ||
| 748 | |||
| 749 | ctx->flags |= SHA_FLAGS_FINUP; | ||
| 750 | |||
| 751 | if (ctx->flags & SHA_FLAGS_ERROR) | ||
| 752 | return 0; /* uncompleted hash is not needed */ | ||
| 753 | |||
| 754 | if (ctx->bufcnt) { | ||
| 755 | return atmel_sha_enqueue(req, SHA_OP_FINAL); | ||
| 756 | } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */ | ||
| 757 | err = atmel_sha_hw_init(dd); | ||
| 758 | if (err) | ||
| 759 | goto err1; | ||
| 760 | |||
| 761 | dd->flags |= SHA_FLAGS_BUSY; | ||
| 762 | err = atmel_sha_final_req(dd); | ||
| 763 | } else { | ||
| 764 | /* copy ready hash (+ finalize hmac) */ | ||
| 765 | return atmel_sha_finish(req); | ||
| 766 | } | ||
| 767 | |||
| 768 | err1: | ||
| 769 | if (err != -EINPROGRESS) | ||
| 770 | /* done_task will not finish it, so do it here */ | ||
| 771 | atmel_sha_finish_req(req, err); | ||
| 772 | |||
| 773 | return err; | ||
| 774 | } | ||
| 775 | |||
| 776 | static int atmel_sha_finup(struct ahash_request *req) | ||
| 777 | { | ||
| 778 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | ||
| 779 | int err1, err2; | ||
| 780 | |||
| 781 | ctx->flags |= SHA_FLAGS_FINUP; | ||
| 782 | |||
| 783 | err1 = atmel_sha_update(req); | ||
| 784 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | ||
| 785 | return err1; | ||
| 786 | |||
| 787 | /* | ||
| 788 | * final() has to be always called to cleanup resources | ||
| 789 | * even if udpate() failed, except EINPROGRESS | ||
| 790 | */ | ||
| 791 | err2 = atmel_sha_final(req); | ||
| 792 | |||
| 793 | return err1 ?: err2; | ||
| 794 | } | ||
| 795 | |||
| 796 | static int atmel_sha_digest(struct ahash_request *req) | ||
| 797 | { | ||
| 798 | return atmel_sha_init(req) ?: atmel_sha_finup(req); | ||
| 799 | } | ||
| 800 | |||
| 801 | static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | ||
| 802 | { | ||
| 803 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm); | ||
| 804 | const char *alg_name = crypto_tfm_alg_name(tfm); | ||
| 805 | |||
| 806 | /* Allocate a fallback and abort if it failed. */ | ||
| 807 | tctx->fallback = crypto_alloc_shash(alg_name, 0, | ||
| 808 | CRYPTO_ALG_NEED_FALLBACK); | ||
| 809 | if (IS_ERR(tctx->fallback)) { | ||
| 810 | pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n", | ||
| 811 | alg_name); | ||
| 812 | return PTR_ERR(tctx->fallback); | ||
| 813 | } | ||
| 814 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
| 815 | sizeof(struct atmel_sha_reqctx) + | ||
| 816 | SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); | ||
| 817 | |||
| 818 | return 0; | ||
| 819 | } | ||
| 820 | |||
| 821 | static int atmel_sha_cra_init(struct crypto_tfm *tfm) | ||
| 822 | { | ||
| 823 | return atmel_sha_cra_init_alg(tfm, NULL); | ||
| 824 | } | ||
| 825 | |||
| 826 | static void atmel_sha_cra_exit(struct crypto_tfm *tfm) | ||
| 827 | { | ||
| 828 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm); | ||
| 829 | |||
| 830 | crypto_free_shash(tctx->fallback); | ||
| 831 | tctx->fallback = NULL; | ||
| 832 | } | ||
| 833 | |||
| 834 | static struct ahash_alg sha_algs[] = { | ||
| 835 | { | ||
| 836 | .init = atmel_sha_init, | ||
| 837 | .update = atmel_sha_update, | ||
| 838 | .final = atmel_sha_final, | ||
| 839 | .finup = atmel_sha_finup, | ||
| 840 | .digest = atmel_sha_digest, | ||
| 841 | .halg = { | ||
| 842 | .digestsize = SHA1_DIGEST_SIZE, | ||
| 843 | .base = { | ||
| 844 | .cra_name = "sha1", | ||
| 845 | .cra_driver_name = "atmel-sha1", | ||
| 846 | .cra_priority = 100, | ||
| 847 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
| 848 | CRYPTO_ALG_NEED_FALLBACK, | ||
| 849 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
| 850 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
| 851 | .cra_alignmask = 0, | ||
| 852 | .cra_module = THIS_MODULE, | ||
| 853 | .cra_init = atmel_sha_cra_init, | ||
| 854 | .cra_exit = atmel_sha_cra_exit, | ||
| 855 | } | ||
| 856 | } | ||
| 857 | }, | ||
| 858 | { | ||
| 859 | .init = atmel_sha_init, | ||
| 860 | .update = atmel_sha_update, | ||
| 861 | .final = atmel_sha_final, | ||
| 862 | .finup = atmel_sha_finup, | ||
| 863 | .digest = atmel_sha_digest, | ||
| 864 | .halg = { | ||
| 865 | .digestsize = SHA256_DIGEST_SIZE, | ||
| 866 | .base = { | ||
| 867 | .cra_name = "sha256", | ||
| 868 | .cra_driver_name = "atmel-sha256", | ||
| 869 | .cra_priority = 100, | ||
| 870 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
| 871 | CRYPTO_ALG_NEED_FALLBACK, | ||
| 872 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
| 873 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | ||
| 874 | .cra_alignmask = 0, | ||
| 875 | .cra_module = THIS_MODULE, | ||
| 876 | .cra_init = atmel_sha_cra_init, | ||
| 877 | .cra_exit = atmel_sha_cra_exit, | ||
| 878 | } | ||
| 879 | } | ||
| 880 | }, | ||
| 881 | }; | ||
| 882 | |||
| 883 | static void atmel_sha_done_task(unsigned long data) | ||
| 884 | { | ||
| 885 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; | ||
| 886 | int err = 0; | ||
| 887 | |||
| 888 | if (!(SHA_FLAGS_BUSY & dd->flags)) { | ||
| 889 | atmel_sha_handle_queue(dd, NULL); | ||
| 890 | return; | ||
| 891 | } | ||
| 892 | |||
| 893 | if (SHA_FLAGS_CPU & dd->flags) { | ||
| 894 | if (SHA_FLAGS_OUTPUT_READY & dd->flags) { | ||
| 895 | dd->flags &= ~SHA_FLAGS_OUTPUT_READY; | ||
| 896 | goto finish; | ||
| 897 | } | ||
| 898 | } else if (SHA_FLAGS_DMA_READY & dd->flags) { | ||
| 899 | if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { | ||
| 900 | dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; | ||
| 901 | atmel_sha_update_dma_stop(dd); | ||
| 902 | if (dd->err) { | ||
| 903 | err = dd->err; | ||
| 904 | goto finish; | ||
| 905 | } | ||
| 906 | } | ||
| 907 | if (SHA_FLAGS_OUTPUT_READY & dd->flags) { | ||
| 908 | /* hash or semi-hash ready */ | ||
| 909 | dd->flags &= ~(SHA_FLAGS_DMA_READY | | ||
| 910 | SHA_FLAGS_OUTPUT_READY); | ||
| 911 | err = atmel_sha_update_dma_start(dd); | ||
| 912 | if (err != -EINPROGRESS) | ||
| 913 | goto finish; | ||
| 914 | } | ||
| 915 | } | ||
| 916 | return; | ||
| 917 | |||
| 918 | finish: | ||
| 919 | /* finish curent request */ | ||
| 920 | atmel_sha_finish_req(dd->req, err); | ||
| 921 | } | ||
| 922 | |||
| 923 | static irqreturn_t atmel_sha_irq(int irq, void *dev_id) | ||
| 924 | { | ||
| 925 | struct atmel_sha_dev *sha_dd = dev_id; | ||
| 926 | u32 reg; | ||
| 927 | |||
| 928 | reg = atmel_sha_read(sha_dd, SHA_ISR); | ||
| 929 | if (reg & atmel_sha_read(sha_dd, SHA_IMR)) { | ||
| 930 | atmel_sha_write(sha_dd, SHA_IDR, reg); | ||
| 931 | if (SHA_FLAGS_BUSY & sha_dd->flags) { | ||
| 932 | sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; | ||
| 933 | if (!(SHA_FLAGS_CPU & sha_dd->flags)) | ||
| 934 | sha_dd->flags |= SHA_FLAGS_DMA_READY; | ||
| 935 | tasklet_schedule(&sha_dd->done_task); | ||
| 936 | } else { | ||
| 937 | dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); | ||
| 938 | } | ||
| 939 | return IRQ_HANDLED; | ||
| 940 | } | ||
| 941 | |||
| 942 | return IRQ_NONE; | ||
| 943 | } | ||
| 944 | |||
| 945 | static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) | ||
| 946 | { | ||
| 947 | int i; | ||
| 948 | |||
| 949 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) | ||
| 950 | crypto_unregister_ahash(&sha_algs[i]); | ||
| 951 | } | ||
| 952 | |||
| 953 | static int atmel_sha_register_algs(struct atmel_sha_dev *dd) | ||
| 954 | { | ||
| 955 | int err, i, j; | ||
| 956 | |||
| 957 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { | ||
| 958 | err = crypto_register_ahash(&sha_algs[i]); | ||
| 959 | if (err) | ||
| 960 | goto err_sha_algs; | ||
| 961 | } | ||
| 962 | |||
| 963 | return 0; | ||
| 964 | |||
| 965 | err_sha_algs: | ||
| 966 | for (j = 0; j < i; j++) | ||
| 967 | crypto_unregister_ahash(&sha_algs[j]); | ||
| 968 | |||
| 969 | return err; | ||
| 970 | } | ||
| 971 | |||
| 972 | static int __devinit atmel_sha_probe(struct platform_device *pdev) | ||
| 973 | { | ||
| 974 | struct atmel_sha_dev *sha_dd; | ||
| 975 | struct device *dev = &pdev->dev; | ||
| 976 | struct resource *sha_res; | ||
| 977 | unsigned long sha_phys_size; | ||
| 978 | int err; | ||
| 979 | |||
| 980 | sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL); | ||
| 981 | if (sha_dd == NULL) { | ||
| 982 | dev_err(dev, "unable to alloc data struct.\n"); | ||
| 983 | err = -ENOMEM; | ||
| 984 | goto sha_dd_err; | ||
| 985 | } | ||
| 986 | |||
| 987 | sha_dd->dev = dev; | ||
| 988 | |||
| 989 | platform_set_drvdata(pdev, sha_dd); | ||
| 990 | |||
| 991 | INIT_LIST_HEAD(&sha_dd->list); | ||
| 992 | |||
| 993 | tasklet_init(&sha_dd->done_task, atmel_sha_done_task, | ||
| 994 | (unsigned long)sha_dd); | ||
| 995 | |||
| 996 | crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); | ||
| 997 | |||
| 998 | sha_dd->irq = -1; | ||
| 999 | |||
| 1000 | /* Get the base address */ | ||
| 1001 | sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1002 | if (!sha_res) { | ||
| 1003 | dev_err(dev, "no MEM resource info\n"); | ||
| 1004 | err = -ENODEV; | ||
| 1005 | goto res_err; | ||
| 1006 | } | ||
| 1007 | sha_dd->phys_base = sha_res->start; | ||
| 1008 | sha_phys_size = resource_size(sha_res); | ||
| 1009 | |||
| 1010 | /* Get the IRQ */ | ||
| 1011 | sha_dd->irq = platform_get_irq(pdev, 0); | ||
| 1012 | if (sha_dd->irq < 0) { | ||
| 1013 | dev_err(dev, "no IRQ resource info\n"); | ||
| 1014 | err = sha_dd->irq; | ||
| 1015 | goto res_err; | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha", | ||
| 1019 | sha_dd); | ||
| 1020 | if (err) { | ||
| 1021 | dev_err(dev, "unable to request sha irq.\n"); | ||
| 1022 | goto res_err; | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | /* Initializing the clock */ | ||
| 1026 | sha_dd->iclk = clk_get(&pdev->dev, NULL); | ||
| 1027 | if (IS_ERR(sha_dd->iclk)) { | ||
| 1028 | dev_err(dev, "clock intialization failed.\n"); | ||
| 1029 | err = PTR_ERR(sha_dd->iclk); | ||
| 1030 | goto clk_err; | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size); | ||
| 1034 | if (!sha_dd->io_base) { | ||
| 1035 | dev_err(dev, "can't ioremap\n"); | ||
| 1036 | err = -ENOMEM; | ||
| 1037 | goto sha_io_err; | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | spin_lock(&atmel_sha.lock); | ||
| 1041 | list_add_tail(&sha_dd->list, &atmel_sha.dev_list); | ||
| 1042 | spin_unlock(&atmel_sha.lock); | ||
| 1043 | |||
| 1044 | err = atmel_sha_register_algs(sha_dd); | ||
| 1045 | if (err) | ||
| 1046 | goto err_algs; | ||
| 1047 | |||
| 1048 | dev_info(dev, "Atmel SHA1/SHA256\n"); | ||
| 1049 | |||
| 1050 | return 0; | ||
| 1051 | |||
| 1052 | err_algs: | ||
| 1053 | spin_lock(&atmel_sha.lock); | ||
| 1054 | list_del(&sha_dd->list); | ||
| 1055 | spin_unlock(&atmel_sha.lock); | ||
| 1056 | iounmap(sha_dd->io_base); | ||
| 1057 | sha_io_err: | ||
| 1058 | clk_put(sha_dd->iclk); | ||
| 1059 | clk_err: | ||
| 1060 | free_irq(sha_dd->irq, sha_dd); | ||
| 1061 | res_err: | ||
| 1062 | tasklet_kill(&sha_dd->done_task); | ||
| 1063 | kfree(sha_dd); | ||
| 1064 | sha_dd = NULL; | ||
| 1065 | sha_dd_err: | ||
| 1066 | dev_err(dev, "initialization failed.\n"); | ||
| 1067 | |||
| 1068 | return err; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | static int __devexit atmel_sha_remove(struct platform_device *pdev) | ||
| 1072 | { | ||
| 1073 | static struct atmel_sha_dev *sha_dd; | ||
| 1074 | |||
| 1075 | sha_dd = platform_get_drvdata(pdev); | ||
| 1076 | if (!sha_dd) | ||
| 1077 | return -ENODEV; | ||
| 1078 | spin_lock(&atmel_sha.lock); | ||
| 1079 | list_del(&sha_dd->list); | ||
| 1080 | spin_unlock(&atmel_sha.lock); | ||
| 1081 | |||
| 1082 | atmel_sha_unregister_algs(sha_dd); | ||
| 1083 | |||
| 1084 | tasklet_kill(&sha_dd->done_task); | ||
| 1085 | |||
| 1086 | iounmap(sha_dd->io_base); | ||
| 1087 | |||
| 1088 | clk_put(sha_dd->iclk); | ||
| 1089 | |||
| 1090 | if (sha_dd->irq >= 0) | ||
| 1091 | free_irq(sha_dd->irq, sha_dd); | ||
| 1092 | |||
| 1093 | kfree(sha_dd); | ||
| 1094 | sha_dd = NULL; | ||
| 1095 | |||
| 1096 | return 0; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | static struct platform_driver atmel_sha_driver = { | ||
| 1100 | .probe = atmel_sha_probe, | ||
| 1101 | .remove = __devexit_p(atmel_sha_remove), | ||
| 1102 | .driver = { | ||
| 1103 | .name = "atmel_sha", | ||
| 1104 | .owner = THIS_MODULE, | ||
| 1105 | }, | ||
| 1106 | }; | ||
| 1107 | |||
| 1108 | module_platform_driver(atmel_sha_driver); | ||
| 1109 | |||
| 1110 | MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support."); | ||
| 1111 | MODULE_LICENSE("GPL v2"); | ||
| 1112 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); | ||
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h new file mode 100644 index 000000000000..5ac2a900d80c --- /dev/null +++ b/drivers/crypto/atmel-tdes-regs.h | |||
| @@ -0,0 +1,89 @@ | |||
| 1 | #ifndef __ATMEL_TDES_REGS_H__ | ||
| 2 | #define __ATMEL_TDES_REGS_H__ | ||
| 3 | |||
| 4 | #define TDES_CR 0x00 | ||
| 5 | #define TDES_CR_START (1 << 0) | ||
| 6 | #define TDES_CR_SWRST (1 << 8) | ||
| 7 | #define TDES_CR_LOADSEED (1 << 16) | ||
| 8 | |||
| 9 | #define TDES_MR 0x04 | ||
| 10 | #define TDES_MR_CYPHER_DEC (0 << 0) | ||
| 11 | #define TDES_MR_CYPHER_ENC (1 << 0) | ||
| 12 | #define TDES_MR_TDESMOD_MASK (0x3 << 1) | ||
| 13 | #define TDES_MR_TDESMOD_DES (0x0 << 1) | ||
| 14 | #define TDES_MR_TDESMOD_TDES (0x1 << 1) | ||
| 15 | #define TDES_MR_TDESMOD_XTEA (0x2 << 1) | ||
| 16 | #define TDES_MR_KEYMOD_3KEY (0 << 4) | ||
| 17 | #define TDES_MR_KEYMOD_2KEY (1 << 4) | ||
| 18 | #define TDES_MR_SMOD_MASK (0x3 << 8) | ||
| 19 | #define TDES_MR_SMOD_MANUAL (0x0 << 8) | ||
| 20 | #define TDES_MR_SMOD_AUTO (0x1 << 8) | ||
| 21 | #define TDES_MR_SMOD_PDC (0x2 << 8) | ||
| 22 | #define TDES_MR_OPMOD_MASK (0x3 << 12) | ||
| 23 | #define TDES_MR_OPMOD_ECB (0x0 << 12) | ||
| 24 | #define TDES_MR_OPMOD_CBC (0x1 << 12) | ||
| 25 | #define TDES_MR_OPMOD_OFB (0x2 << 12) | ||
| 26 | #define TDES_MR_OPMOD_CFB (0x3 << 12) | ||
| 27 | #define TDES_MR_LOD (0x1 << 15) | ||
| 28 | #define TDES_MR_CFBS_MASK (0x3 << 16) | ||
| 29 | #define TDES_MR_CFBS_64b (0x0 << 16) | ||
| 30 | #define TDES_MR_CFBS_32b (0x1 << 16) | ||
| 31 | #define TDES_MR_CFBS_16b (0x2 << 16) | ||
| 32 | #define TDES_MR_CFBS_8b (0x3 << 16) | ||
| 33 | #define TDES_MR_CKEY_MASK (0xF << 20) | ||
| 34 | #define TDES_MR_CKEY_OFFSET 20 | ||
| 35 | #define TDES_MR_CTYPE_MASK (0x3F << 24) | ||
| 36 | #define TDES_MR_CTYPE_OFFSET 24 | ||
| 37 | |||
| 38 | #define TDES_IER 0x10 | ||
| 39 | #define TDES_IDR 0x14 | ||
| 40 | #define TDES_IMR 0x18 | ||
| 41 | #define TDES_ISR 0x1C | ||
| 42 | #define TDES_INT_DATARDY (1 << 0) | ||
| 43 | #define TDES_INT_ENDRX (1 << 1) | ||
| 44 | #define TDES_INT_ENDTX (1 << 2) | ||
| 45 | #define TDES_INT_RXBUFF (1 << 3) | ||
| 46 | #define TDES_INT_TXBUFE (1 << 4) | ||
| 47 | #define TDES_INT_URAD (1 << 8) | ||
| 48 | #define TDES_ISR_URAT_MASK (0x3 << 12) | ||
| 49 | #define TDES_ISR_URAT_IDR (0x0 << 12) | ||
| 50 | #define TDES_ISR_URAT_ODR (0x1 << 12) | ||
| 51 | #define TDES_ISR_URAT_MR (0x2 << 12) | ||
| 52 | #define TDES_ISR_URAT_WO (0x3 << 12) | ||
| 53 | |||
| 54 | |||
| 55 | #define TDES_KEY1W1R 0x20 | ||
| 56 | #define TDES_KEY1W2R 0x24 | ||
| 57 | #define TDES_KEY2W1R 0x28 | ||
| 58 | #define TDES_KEY2W2R 0x2C | ||
| 59 | #define TDES_KEY3W1R 0x30 | ||
| 60 | #define TDES_KEY3W2R 0x34 | ||
| 61 | #define TDES_IDATA1R 0x40 | ||
| 62 | #define TDES_IDATA2R 0x44 | ||
| 63 | #define TDES_ODATA1R 0x50 | ||
| 64 | #define TDES_ODATA2R 0x54 | ||
| 65 | #define TDES_IV1R 0x60 | ||
| 66 | #define TDES_IV2R 0x64 | ||
| 67 | |||
| 68 | #define TDES_XTEARNDR 0x70 | ||
| 69 | #define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0) | ||
| 70 | #define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0 | ||
| 71 | |||
| 72 | #define TDES_RPR 0x100 | ||
| 73 | #define TDES_RCR 0x104 | ||
| 74 | #define TDES_TPR 0x108 | ||
| 75 | #define TDES_TCR 0x10C | ||
| 76 | #define TDES_RNPR 0x118 | ||
| 77 | #define TDES_RNCR 0x11C | ||
| 78 | #define TDES_TNPR 0x118 | ||
| 79 | #define TDES_TNCR 0x11C | ||
| 80 | #define TDES_PTCR 0x120 | ||
| 81 | #define TDES_PTCR_RXTEN (1 << 0) | ||
| 82 | #define TDES_PTCR_RXTDIS (1 << 1) | ||
| 83 | #define TDES_PTCR_TXTEN (1 << 8) | ||
| 84 | #define TDES_PTCR_TXTDIS (1 << 9) | ||
| 85 | #define TDES_PTSR 0x124 | ||
| 86 | #define TDES_PTSR_RXTEN (1 << 0) | ||
| 87 | #define TDES_PTSR_TXTEN (1 << 8) | ||
| 88 | |||
| 89 | #endif /* __ATMEL_TDES_REGS_H__ */ | ||
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c new file mode 100644 index 000000000000..eb2b61e57e2d --- /dev/null +++ b/drivers/crypto/atmel-tdes.c | |||
| @@ -0,0 +1,1215 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic API. | ||
| 3 | * | ||
| 4 | * Support for ATMEL DES/TDES HW acceleration. | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | ||
| 7 | * Author: Nicolas Royer <nicolas@eukrea.com> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as published | ||
| 11 | * by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * Some ideas are from omap-aes.c drivers. | ||
| 14 | */ | ||
| 15 | |||
| 16 | |||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/slab.h> | ||
| 20 | #include <linux/err.h> | ||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/io.h> | ||
| 23 | #include <linux/hw_random.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | |||
| 26 | #include <linux/device.h> | ||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/errno.h> | ||
| 30 | #include <linux/interrupt.h> | ||
| 31 | #include <linux/kernel.h> | ||
| 32 | #include <linux/clk.h> | ||
| 33 | #include <linux/irq.h> | ||
| 34 | #include <linux/io.h> | ||
| 35 | #include <linux/platform_device.h> | ||
| 36 | #include <linux/scatterlist.h> | ||
| 37 | #include <linux/dma-mapping.h> | ||
| 38 | #include <linux/delay.h> | ||
| 39 | #include <linux/crypto.h> | ||
| 40 | #include <linux/cryptohash.h> | ||
| 41 | #include <crypto/scatterwalk.h> | ||
| 42 | #include <crypto/algapi.h> | ||
| 43 | #include <crypto/des.h> | ||
| 44 | #include <crypto/hash.h> | ||
| 45 | #include <crypto/internal/hash.h> | ||
| 46 | #include "atmel-tdes-regs.h" | ||
| 47 | |||
| 48 | /* TDES flags */ | ||
| 49 | #define TDES_FLAGS_MODE_MASK 0x007f | ||
| 50 | #define TDES_FLAGS_ENCRYPT BIT(0) | ||
| 51 | #define TDES_FLAGS_CBC BIT(1) | ||
| 52 | #define TDES_FLAGS_CFB BIT(2) | ||
| 53 | #define TDES_FLAGS_CFB8 BIT(3) | ||
| 54 | #define TDES_FLAGS_CFB16 BIT(4) | ||
| 55 | #define TDES_FLAGS_CFB32 BIT(5) | ||
| 56 | #define TDES_FLAGS_OFB BIT(6) | ||
| 57 | |||
| 58 | #define TDES_FLAGS_INIT BIT(16) | ||
| 59 | #define TDES_FLAGS_FAST BIT(17) | ||
| 60 | #define TDES_FLAGS_BUSY BIT(18) | ||
| 61 | |||
| 62 | #define ATMEL_TDES_QUEUE_LENGTH 1 | ||
| 63 | |||
| 64 | #define CFB8_BLOCK_SIZE 1 | ||
| 65 | #define CFB16_BLOCK_SIZE 2 | ||
| 66 | #define CFB32_BLOCK_SIZE 4 | ||
| 67 | #define CFB64_BLOCK_SIZE 8 | ||
| 68 | |||
| 69 | |||
| 70 | struct atmel_tdes_dev; | ||
| 71 | |||
| 72 | struct atmel_tdes_ctx { | ||
| 73 | struct atmel_tdes_dev *dd; | ||
| 74 | |||
| 75 | int keylen; | ||
| 76 | u32 key[3*DES_KEY_SIZE / sizeof(u32)]; | ||
| 77 | unsigned long flags; | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct atmel_tdes_reqctx { | ||
| 81 | unsigned long mode; | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct atmel_tdes_dev { | ||
| 85 | struct list_head list; | ||
| 86 | unsigned long phys_base; | ||
| 87 | void __iomem *io_base; | ||
| 88 | |||
| 89 | struct atmel_tdes_ctx *ctx; | ||
| 90 | struct device *dev; | ||
| 91 | struct clk *iclk; | ||
| 92 | int irq; | ||
| 93 | |||
| 94 | unsigned long flags; | ||
| 95 | int err; | ||
| 96 | |||
| 97 | spinlock_t lock; | ||
| 98 | struct crypto_queue queue; | ||
| 99 | |||
| 100 | struct tasklet_struct done_task; | ||
| 101 | struct tasklet_struct queue_task; | ||
| 102 | |||
| 103 | struct ablkcipher_request *req; | ||
| 104 | size_t total; | ||
| 105 | |||
| 106 | struct scatterlist *in_sg; | ||
| 107 | size_t in_offset; | ||
| 108 | struct scatterlist *out_sg; | ||
| 109 | size_t out_offset; | ||
| 110 | |||
| 111 | size_t buflen; | ||
| 112 | size_t dma_size; | ||
| 113 | |||
| 114 | void *buf_in; | ||
| 115 | int dma_in; | ||
| 116 | dma_addr_t dma_addr_in; | ||
| 117 | |||
| 118 | void *buf_out; | ||
| 119 | int dma_out; | ||
| 120 | dma_addr_t dma_addr_out; | ||
| 121 | }; | ||
| 122 | |||
| 123 | struct atmel_tdes_drv { | ||
| 124 | struct list_head dev_list; | ||
| 125 | spinlock_t lock; | ||
| 126 | }; | ||
| 127 | |||
| 128 | static struct atmel_tdes_drv atmel_tdes = { | ||
| 129 | .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list), | ||
| 130 | .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock), | ||
| 131 | }; | ||
| 132 | |||
| 133 | static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset, | ||
| 134 | void *buf, size_t buflen, size_t total, int out) | ||
| 135 | { | ||
| 136 | unsigned int count, off = 0; | ||
| 137 | |||
| 138 | while (buflen && total) { | ||
| 139 | count = min((*sg)->length - *offset, total); | ||
| 140 | count = min(count, buflen); | ||
| 141 | |||
| 142 | if (!count) | ||
| 143 | return off; | ||
| 144 | |||
| 145 | scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); | ||
| 146 | |||
| 147 | off += count; | ||
| 148 | buflen -= count; | ||
| 149 | *offset += count; | ||
| 150 | total -= count; | ||
| 151 | |||
| 152 | if (*offset == (*sg)->length) { | ||
| 153 | *sg = sg_next(*sg); | ||
| 154 | if (*sg) | ||
| 155 | *offset = 0; | ||
| 156 | else | ||
| 157 | total = 0; | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | return off; | ||
| 162 | } | ||
| 163 | |||
| 164 | static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset) | ||
| 165 | { | ||
| 166 | return readl_relaxed(dd->io_base + offset); | ||
| 167 | } | ||
| 168 | |||
| 169 | static inline void atmel_tdes_write(struct atmel_tdes_dev *dd, | ||
| 170 | u32 offset, u32 value) | ||
| 171 | { | ||
| 172 | writel_relaxed(value, dd->io_base + offset); | ||
| 173 | } | ||
| 174 | |||
| 175 | static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset, | ||
| 176 | u32 *value, int count) | ||
| 177 | { | ||
| 178 | for (; count--; value++, offset += 4) | ||
| 179 | atmel_tdes_write(dd, offset, *value); | ||
| 180 | } | ||
| 181 | |||
| 182 | static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx) | ||
| 183 | { | ||
| 184 | struct atmel_tdes_dev *tdes_dd = NULL; | ||
| 185 | struct atmel_tdes_dev *tmp; | ||
| 186 | |||
| 187 | spin_lock_bh(&atmel_tdes.lock); | ||
| 188 | if (!ctx->dd) { | ||
| 189 | list_for_each_entry(tmp, &atmel_tdes.dev_list, list) { | ||
| 190 | tdes_dd = tmp; | ||
| 191 | break; | ||
| 192 | } | ||
| 193 | ctx->dd = tdes_dd; | ||
| 194 | } else { | ||
| 195 | tdes_dd = ctx->dd; | ||
| 196 | } | ||
| 197 | spin_unlock_bh(&atmel_tdes.lock); | ||
| 198 | |||
| 199 | return tdes_dd; | ||
| 200 | } | ||
| 201 | |||
| 202 | static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd) | ||
| 203 | { | ||
| 204 | clk_prepare_enable(dd->iclk); | ||
| 205 | |||
| 206 | if (!(dd->flags & TDES_FLAGS_INIT)) { | ||
| 207 | atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST); | ||
| 208 | dd->flags |= TDES_FLAGS_INIT; | ||
| 209 | dd->err = 0; | ||
| 210 | } | ||
| 211 | |||
| 212 | return 0; | ||
| 213 | } | ||
| 214 | |||
| 215 | static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) | ||
| 216 | { | ||
| 217 | int err; | ||
| 218 | u32 valcr = 0, valmr = TDES_MR_SMOD_PDC; | ||
| 219 | |||
| 220 | err = atmel_tdes_hw_init(dd); | ||
| 221 | |||
| 222 | if (err) | ||
| 223 | return err; | ||
| 224 | |||
| 225 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); | ||
| 226 | |||
| 227 | /* MR register must be set before IV registers */ | ||
| 228 | if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) { | ||
| 229 | valmr |= TDES_MR_KEYMOD_3KEY; | ||
| 230 | valmr |= TDES_MR_TDESMOD_TDES; | ||
| 231 | } else if (dd->ctx->keylen > DES_KEY_SIZE) { | ||
| 232 | valmr |= TDES_MR_KEYMOD_2KEY; | ||
| 233 | valmr |= TDES_MR_TDESMOD_TDES; | ||
| 234 | } else { | ||
| 235 | valmr |= TDES_MR_TDESMOD_DES; | ||
| 236 | } | ||
| 237 | |||
| 238 | if (dd->flags & TDES_FLAGS_CBC) { | ||
| 239 | valmr |= TDES_MR_OPMOD_CBC; | ||
| 240 | } else if (dd->flags & TDES_FLAGS_CFB) { | ||
| 241 | valmr |= TDES_MR_OPMOD_CFB; | ||
| 242 | |||
| 243 | if (dd->flags & TDES_FLAGS_CFB8) | ||
| 244 | valmr |= TDES_MR_CFBS_8b; | ||
| 245 | else if (dd->flags & TDES_FLAGS_CFB16) | ||
| 246 | valmr |= TDES_MR_CFBS_16b; | ||
| 247 | else if (dd->flags & TDES_FLAGS_CFB32) | ||
| 248 | valmr |= TDES_MR_CFBS_32b; | ||
| 249 | } else if (dd->flags & TDES_FLAGS_OFB) { | ||
| 250 | valmr |= TDES_MR_OPMOD_OFB; | ||
| 251 | } | ||
| 252 | |||
| 253 | if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB)) | ||
| 254 | valmr |= TDES_MR_CYPHER_ENC; | ||
| 255 | |||
| 256 | atmel_tdes_write(dd, TDES_CR, valcr); | ||
| 257 | atmel_tdes_write(dd, TDES_MR, valmr); | ||
| 258 | |||
| 259 | atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key, | ||
| 260 | dd->ctx->keylen >> 2); | ||
| 261 | |||
| 262 | if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) || | ||
| 263 | (dd->flags & TDES_FLAGS_OFB)) && dd->req->info) { | ||
| 264 | atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2); | ||
| 265 | } | ||
| 266 | |||
| 267 | return 0; | ||
| 268 | } | ||
| 269 | |||
| 270 | static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd) | ||
| 271 | { | ||
| 272 | int err = 0; | ||
| 273 | size_t count; | ||
| 274 | |||
| 275 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); | ||
| 276 | |||
| 277 | if (dd->flags & TDES_FLAGS_FAST) { | ||
| 278 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
| 279 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
| 280 | } else { | ||
| 281 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | ||
| 282 | dd->dma_size, DMA_FROM_DEVICE); | ||
| 283 | |||
| 284 | /* copy data */ | ||
| 285 | count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset, | ||
| 286 | dd->buf_out, dd->buflen, dd->dma_size, 1); | ||
| 287 | if (count != dd->dma_size) { | ||
| 288 | err = -EINVAL; | ||
| 289 | pr_err("not all data converted: %u\n", count); | ||
| 290 | } | ||
| 291 | } | ||
| 292 | |||
| 293 | return err; | ||
| 294 | } | ||
| 295 | |||
| 296 | static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd) | ||
| 297 | { | ||
| 298 | int err = -ENOMEM; | ||
| 299 | |||
| 300 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); | ||
| 301 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); | ||
| 302 | dd->buflen = PAGE_SIZE; | ||
| 303 | dd->buflen &= ~(DES_BLOCK_SIZE - 1); | ||
| 304 | |||
| 305 | if (!dd->buf_in || !dd->buf_out) { | ||
| 306 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
| 307 | goto err_alloc; | ||
| 308 | } | ||
| 309 | |||
| 310 | /* MAP here */ | ||
| 311 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, | ||
| 312 | dd->buflen, DMA_TO_DEVICE); | ||
| 313 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
| 314 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
| 315 | err = -EINVAL; | ||
| 316 | goto err_map_in; | ||
| 317 | } | ||
| 318 | |||
| 319 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, | ||
| 320 | dd->buflen, DMA_FROM_DEVICE); | ||
| 321 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | ||
| 322 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
| 323 | err = -EINVAL; | ||
| 324 | goto err_map_out; | ||
| 325 | } | ||
| 326 | |||
| 327 | return 0; | ||
| 328 | |||
| 329 | err_map_out: | ||
| 330 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
| 331 | DMA_TO_DEVICE); | ||
| 332 | err_map_in: | ||
| 333 | free_page((unsigned long)dd->buf_out); | ||
| 334 | free_page((unsigned long)dd->buf_in); | ||
| 335 | err_alloc: | ||
| 336 | if (err) | ||
| 337 | pr_err("error: %d\n", err); | ||
| 338 | return err; | ||
| 339 | } | ||
| 340 | |||
| 341 | static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) | ||
| 342 | { | ||
| 343 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
| 344 | DMA_FROM_DEVICE); | ||
| 345 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | ||
| 346 | DMA_TO_DEVICE); | ||
| 347 | free_page((unsigned long)dd->buf_out); | ||
| 348 | free_page((unsigned long)dd->buf_in); | ||
| 349 | } | ||
| 350 | |||
| 351 | static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | ||
| 352 | dma_addr_t dma_addr_out, int length) | ||
| 353 | { | ||
| 354 | struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 355 | struct atmel_tdes_dev *dd = ctx->dd; | ||
| 356 | int len32; | ||
| 357 | |||
| 358 | dd->dma_size = length; | ||
| 359 | |||
| 360 | if (!(dd->flags & TDES_FLAGS_FAST)) { | ||
| 361 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | ||
| 362 | DMA_TO_DEVICE); | ||
| 363 | } | ||
| 364 | |||
| 365 | if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8)) | ||
| 366 | len32 = DIV_ROUND_UP(length, sizeof(u8)); | ||
| 367 | else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16)) | ||
| 368 | len32 = DIV_ROUND_UP(length, sizeof(u16)); | ||
| 369 | else | ||
| 370 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
| 371 | |||
| 372 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); | ||
| 373 | atmel_tdes_write(dd, TDES_TPR, dma_addr_in); | ||
| 374 | atmel_tdes_write(dd, TDES_TCR, len32); | ||
| 375 | atmel_tdes_write(dd, TDES_RPR, dma_addr_out); | ||
| 376 | atmel_tdes_write(dd, TDES_RCR, len32); | ||
| 377 | |||
| 378 | /* Enable Interrupt */ | ||
| 379 | atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX); | ||
| 380 | |||
| 381 | /* Start DMA transfer */ | ||
| 382 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN); | ||
| 383 | |||
| 384 | return 0; | ||
| 385 | } | ||
| 386 | |||
| 387 | static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd) | ||
| 388 | { | ||
| 389 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( | ||
| 390 | crypto_ablkcipher_reqtfm(dd->req)); | ||
| 391 | int err, fast = 0, in, out; | ||
| 392 | size_t count; | ||
| 393 | dma_addr_t addr_in, addr_out; | ||
| 394 | |||
| 395 | if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { | ||
| 396 | /* check for alignment */ | ||
| 397 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); | ||
| 398 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); | ||
| 399 | |||
| 400 | fast = in && out; | ||
| 401 | } | ||
| 402 | |||
| 403 | if (fast) { | ||
| 404 | count = min(dd->total, sg_dma_len(dd->in_sg)); | ||
| 405 | count = min(count, sg_dma_len(dd->out_sg)); | ||
| 406 | |||
| 407 | if (count != dd->total) { | ||
| 408 | pr_err("request length != buffer length\n"); | ||
| 409 | return -EINVAL; | ||
| 410 | } | ||
| 411 | |||
| 412 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
| 413 | if (!err) { | ||
| 414 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
| 415 | return -EINVAL; | ||
| 416 | } | ||
| 417 | |||
| 418 | err = dma_map_sg(dd->dev, dd->out_sg, 1, | ||
| 419 | DMA_FROM_DEVICE); | ||
| 420 | if (!err) { | ||
| 421 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
| 422 | dma_unmap_sg(dd->dev, dd->in_sg, 1, | ||
| 423 | DMA_TO_DEVICE); | ||
| 424 | return -EINVAL; | ||
| 425 | } | ||
| 426 | |||
| 427 | addr_in = sg_dma_address(dd->in_sg); | ||
| 428 | addr_out = sg_dma_address(dd->out_sg); | ||
| 429 | |||
| 430 | dd->flags |= TDES_FLAGS_FAST; | ||
| 431 | |||
| 432 | } else { | ||
| 433 | /* use cache buffers */ | ||
| 434 | count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset, | ||
| 435 | dd->buf_in, dd->buflen, dd->total, 0); | ||
| 436 | |||
| 437 | addr_in = dd->dma_addr_in; | ||
| 438 | addr_out = dd->dma_addr_out; | ||
| 439 | |||
| 440 | dd->flags &= ~TDES_FLAGS_FAST; | ||
| 441 | |||
| 442 | } | ||
| 443 | |||
| 444 | dd->total -= count; | ||
| 445 | |||
| 446 | err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count); | ||
| 447 | if (err) { | ||
| 448 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
| 449 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | ||
| 450 | } | ||
| 451 | |||
| 452 | return err; | ||
| 453 | } | ||
| 454 | |||
| 455 | |||
| 456 | static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err) | ||
| 457 | { | ||
| 458 | struct ablkcipher_request *req = dd->req; | ||
| 459 | |||
| 460 | clk_disable_unprepare(dd->iclk); | ||
| 461 | |||
| 462 | dd->flags &= ~TDES_FLAGS_BUSY; | ||
| 463 | |||
| 464 | req->base.complete(&req->base, err); | ||
| 465 | } | ||
| 466 | |||
| 467 | static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd, | ||
| 468 | struct ablkcipher_request *req) | ||
| 469 | { | ||
| 470 | struct crypto_async_request *async_req, *backlog; | ||
| 471 | struct atmel_tdes_ctx *ctx; | ||
| 472 | struct atmel_tdes_reqctx *rctx; | ||
| 473 | unsigned long flags; | ||
| 474 | int err, ret = 0; | ||
| 475 | |||
| 476 | spin_lock_irqsave(&dd->lock, flags); | ||
| 477 | if (req) | ||
| 478 | ret = ablkcipher_enqueue_request(&dd->queue, req); | ||
| 479 | if (dd->flags & TDES_FLAGS_BUSY) { | ||
| 480 | spin_unlock_irqrestore(&dd->lock, flags); | ||
| 481 | return ret; | ||
| 482 | } | ||
| 483 | backlog = crypto_get_backlog(&dd->queue); | ||
| 484 | async_req = crypto_dequeue_request(&dd->queue); | ||
| 485 | if (async_req) | ||
| 486 | dd->flags |= TDES_FLAGS_BUSY; | ||
| 487 | spin_unlock_irqrestore(&dd->lock, flags); | ||
| 488 | |||
| 489 | if (!async_req) | ||
| 490 | return ret; | ||
| 491 | |||
| 492 | if (backlog) | ||
| 493 | backlog->complete(backlog, -EINPROGRESS); | ||
| 494 | |||
| 495 | req = ablkcipher_request_cast(async_req); | ||
| 496 | |||
| 497 | /* assign new request to device */ | ||
| 498 | dd->req = req; | ||
| 499 | dd->total = req->nbytes; | ||
| 500 | dd->in_offset = 0; | ||
| 501 | dd->in_sg = req->src; | ||
| 502 | dd->out_offset = 0; | ||
| 503 | dd->out_sg = req->dst; | ||
| 504 | |||
| 505 | rctx = ablkcipher_request_ctx(req); | ||
| 506 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
| 507 | rctx->mode &= TDES_FLAGS_MODE_MASK; | ||
| 508 | dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode; | ||
| 509 | dd->ctx = ctx; | ||
| 510 | ctx->dd = dd; | ||
| 511 | |||
| 512 | err = atmel_tdes_write_ctrl(dd); | ||
| 513 | if (!err) | ||
| 514 | err = atmel_tdes_crypt_dma_start(dd); | ||
| 515 | if (err) { | ||
| 516 | /* des_task will not finish it, so do it here */ | ||
| 517 | atmel_tdes_finish_req(dd, err); | ||
| 518 | tasklet_schedule(&dd->queue_task); | ||
| 519 | } | ||
| 520 | |||
| 521 | return ret; | ||
| 522 | } | ||
| 523 | |||
| 524 | |||
| 525 | static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
| 526 | { | ||
| 527 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx( | ||
| 528 | crypto_ablkcipher_reqtfm(req)); | ||
| 529 | struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
| 530 | struct atmel_tdes_dev *dd; | ||
| 531 | |||
| 532 | if (mode & TDES_FLAGS_CFB8) { | ||
| 533 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { | ||
| 534 | pr_err("request size is not exact amount of CFB8 blocks\n"); | ||
| 535 | return -EINVAL; | ||
| 536 | } | ||
| 537 | } else if (mode & TDES_FLAGS_CFB16) { | ||
| 538 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { | ||
| 539 | pr_err("request size is not exact amount of CFB16 blocks\n"); | ||
| 540 | return -EINVAL; | ||
| 541 | } | ||
| 542 | } else if (mode & TDES_FLAGS_CFB32) { | ||
| 543 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { | ||
| 544 | pr_err("request size is not exact amount of CFB32 blocks\n"); | ||
| 545 | return -EINVAL; | ||
| 546 | } | ||
| 547 | } else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) { | ||
| 548 | pr_err("request size is not exact amount of DES blocks\n"); | ||
| 549 | return -EINVAL; | ||
| 550 | } | ||
| 551 | |||
| 552 | dd = atmel_tdes_find_dev(ctx); | ||
| 553 | if (!dd) | ||
| 554 | return -ENODEV; | ||
| 555 | |||
| 556 | rctx->mode = mode; | ||
| 557 | |||
| 558 | return atmel_tdes_handle_queue(dd, req); | ||
| 559 | } | ||
| 560 | |||
| 561 | static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 562 | unsigned int keylen) | ||
| 563 | { | ||
| 564 | u32 tmp[DES_EXPKEY_WORDS]; | ||
| 565 | int err; | ||
| 566 | struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm); | ||
| 567 | |||
| 568 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 569 | |||
| 570 | if (keylen != DES_KEY_SIZE) { | ||
| 571 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 572 | return -EINVAL; | ||
| 573 | } | ||
| 574 | |||
| 575 | err = des_ekey(tmp, key); | ||
| 576 | if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
| 577 | ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
| 578 | return -EINVAL; | ||
| 579 | } | ||
| 580 | |||
| 581 | memcpy(ctx->key, key, keylen); | ||
| 582 | ctx->keylen = keylen; | ||
| 583 | |||
| 584 | return 0; | ||
| 585 | } | ||
| 586 | |||
| 587 | static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 588 | unsigned int keylen) | ||
| 589 | { | ||
| 590 | struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 591 | const char *alg_name; | ||
| 592 | |||
| 593 | alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)); | ||
| 594 | |||
| 595 | /* | ||
| 596 | * HW bug in cfb 3-keys mode. | ||
| 597 | */ | ||
| 598 | if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) { | ||
| 599 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 600 | return -EINVAL; | ||
| 601 | } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) { | ||
| 602 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 603 | return -EINVAL; | ||
| 604 | } | ||
| 605 | |||
| 606 | memcpy(ctx->key, key, keylen); | ||
| 607 | ctx->keylen = keylen; | ||
| 608 | |||
| 609 | return 0; | ||
| 610 | } | ||
| 611 | |||
| 612 | static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req) | ||
| 613 | { | ||
| 614 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT); | ||
| 615 | } | ||
| 616 | |||
| 617 | static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req) | ||
| 618 | { | ||
| 619 | return atmel_tdes_crypt(req, 0); | ||
| 620 | } | ||
| 621 | |||
| 622 | static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req) | ||
| 623 | { | ||
| 624 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC); | ||
| 625 | } | ||
| 626 | |||
| 627 | static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req) | ||
| 628 | { | ||
| 629 | return atmel_tdes_crypt(req, TDES_FLAGS_CBC); | ||
| 630 | } | ||
| 631 | static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req) | ||
| 632 | { | ||
| 633 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB); | ||
| 634 | } | ||
| 635 | |||
| 636 | static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req) | ||
| 637 | { | ||
| 638 | return atmel_tdes_crypt(req, TDES_FLAGS_CFB); | ||
| 639 | } | ||
| 640 | |||
| 641 | static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req) | ||
| 642 | { | ||
| 643 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | | ||
| 644 | TDES_FLAGS_CFB8); | ||
| 645 | } | ||
| 646 | |||
| 647 | static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req) | ||
| 648 | { | ||
| 649 | return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8); | ||
| 650 | } | ||
| 651 | |||
| 652 | static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req) | ||
| 653 | { | ||
| 654 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | | ||
| 655 | TDES_FLAGS_CFB16); | ||
| 656 | } | ||
| 657 | |||
| 658 | static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req) | ||
| 659 | { | ||
| 660 | return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16); | ||
| 661 | } | ||
| 662 | |||
| 663 | static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req) | ||
| 664 | { | ||
| 665 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | | ||
| 666 | TDES_FLAGS_CFB32); | ||
| 667 | } | ||
| 668 | |||
| 669 | static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req) | ||
| 670 | { | ||
| 671 | return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32); | ||
| 672 | } | ||
| 673 | |||
| 674 | static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req) | ||
| 675 | { | ||
| 676 | return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB); | ||
| 677 | } | ||
| 678 | |||
| 679 | static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req) | ||
| 680 | { | ||
| 681 | return atmel_tdes_crypt(req, TDES_FLAGS_OFB); | ||
| 682 | } | ||
| 683 | |||
| 684 | static int atmel_tdes_cra_init(struct crypto_tfm *tfm) | ||
| 685 | { | ||
| 686 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx); | ||
| 687 | |||
| 688 | return 0; | ||
| 689 | } | ||
| 690 | |||
| 691 | static void atmel_tdes_cra_exit(struct crypto_tfm *tfm) | ||
| 692 | { | ||
| 693 | } | ||
| 694 | |||
| 695 | static struct crypto_alg tdes_algs[] = { | ||
| 696 | { | ||
| 697 | .cra_name = "ecb(des)", | ||
| 698 | .cra_driver_name = "atmel-ecb-des", | ||
| 699 | .cra_priority = 100, | ||
| 700 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 701 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 702 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 703 | .cra_alignmask = 0, | ||
| 704 | .cra_type = &crypto_ablkcipher_type, | ||
| 705 | .cra_module = THIS_MODULE, | ||
| 706 | .cra_init = atmel_tdes_cra_init, | ||
| 707 | .cra_exit = atmel_tdes_cra_exit, | ||
| 708 | .cra_u.ablkcipher = { | ||
| 709 | .min_keysize = DES_KEY_SIZE, | ||
| 710 | .max_keysize = DES_KEY_SIZE, | ||
| 711 | .setkey = atmel_des_setkey, | ||
| 712 | .encrypt = atmel_tdes_ecb_encrypt, | ||
| 713 | .decrypt = atmel_tdes_ecb_decrypt, | ||
| 714 | } | ||
| 715 | }, | ||
| 716 | { | ||
| 717 | .cra_name = "cbc(des)", | ||
| 718 | .cra_driver_name = "atmel-cbc-des", | ||
| 719 | .cra_priority = 100, | ||
| 720 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 721 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 722 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 723 | .cra_alignmask = 0, | ||
| 724 | .cra_type = &crypto_ablkcipher_type, | ||
| 725 | .cra_module = THIS_MODULE, | ||
| 726 | .cra_init = atmel_tdes_cra_init, | ||
| 727 | .cra_exit = atmel_tdes_cra_exit, | ||
| 728 | .cra_u.ablkcipher = { | ||
| 729 | .min_keysize = DES_KEY_SIZE, | ||
| 730 | .max_keysize = DES_KEY_SIZE, | ||
| 731 | .ivsize = DES_BLOCK_SIZE, | ||
| 732 | .setkey = atmel_des_setkey, | ||
| 733 | .encrypt = atmel_tdes_cbc_encrypt, | ||
| 734 | .decrypt = atmel_tdes_cbc_decrypt, | ||
| 735 | } | ||
| 736 | }, | ||
| 737 | { | ||
| 738 | .cra_name = "cfb(des)", | ||
| 739 | .cra_driver_name = "atmel-cfb-des", | ||
| 740 | .cra_priority = 100, | ||
| 741 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 742 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 743 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 744 | .cra_alignmask = 0, | ||
| 745 | .cra_type = &crypto_ablkcipher_type, | ||
| 746 | .cra_module = THIS_MODULE, | ||
| 747 | .cra_init = atmel_tdes_cra_init, | ||
| 748 | .cra_exit = atmel_tdes_cra_exit, | ||
| 749 | .cra_u.ablkcipher = { | ||
| 750 | .min_keysize = DES_KEY_SIZE, | ||
| 751 | .max_keysize = DES_KEY_SIZE, | ||
| 752 | .ivsize = DES_BLOCK_SIZE, | ||
| 753 | .setkey = atmel_des_setkey, | ||
| 754 | .encrypt = atmel_tdes_cfb_encrypt, | ||
| 755 | .decrypt = atmel_tdes_cfb_decrypt, | ||
| 756 | } | ||
| 757 | }, | ||
| 758 | { | ||
| 759 | .cra_name = "cfb8(des)", | ||
| 760 | .cra_driver_name = "atmel-cfb8-des", | ||
| 761 | .cra_priority = 100, | ||
| 762 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 763 | .cra_blocksize = CFB8_BLOCK_SIZE, | ||
| 764 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 765 | .cra_alignmask = 0, | ||
| 766 | .cra_type = &crypto_ablkcipher_type, | ||
| 767 | .cra_module = THIS_MODULE, | ||
| 768 | .cra_init = atmel_tdes_cra_init, | ||
| 769 | .cra_exit = atmel_tdes_cra_exit, | ||
| 770 | .cra_u.ablkcipher = { | ||
| 771 | .min_keysize = DES_KEY_SIZE, | ||
| 772 | .max_keysize = DES_KEY_SIZE, | ||
| 773 | .ivsize = DES_BLOCK_SIZE, | ||
| 774 | .setkey = atmel_des_setkey, | ||
| 775 | .encrypt = atmel_tdes_cfb8_encrypt, | ||
| 776 | .decrypt = atmel_tdes_cfb8_decrypt, | ||
| 777 | } | ||
| 778 | }, | ||
| 779 | { | ||
| 780 | .cra_name = "cfb16(des)", | ||
| 781 | .cra_driver_name = "atmel-cfb16-des", | ||
| 782 | .cra_priority = 100, | ||
| 783 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 784 | .cra_blocksize = CFB16_BLOCK_SIZE, | ||
| 785 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 786 | .cra_alignmask = 0, | ||
| 787 | .cra_type = &crypto_ablkcipher_type, | ||
| 788 | .cra_module = THIS_MODULE, | ||
| 789 | .cra_init = atmel_tdes_cra_init, | ||
| 790 | .cra_exit = atmel_tdes_cra_exit, | ||
| 791 | .cra_u.ablkcipher = { | ||
| 792 | .min_keysize = DES_KEY_SIZE, | ||
| 793 | .max_keysize = DES_KEY_SIZE, | ||
| 794 | .ivsize = DES_BLOCK_SIZE, | ||
| 795 | .setkey = atmel_des_setkey, | ||
| 796 | .encrypt = atmel_tdes_cfb16_encrypt, | ||
| 797 | .decrypt = atmel_tdes_cfb16_decrypt, | ||
| 798 | } | ||
| 799 | }, | ||
| 800 | { | ||
| 801 | .cra_name = "cfb32(des)", | ||
| 802 | .cra_driver_name = "atmel-cfb32-des", | ||
| 803 | .cra_priority = 100, | ||
| 804 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 805 | .cra_blocksize = CFB32_BLOCK_SIZE, | ||
| 806 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 807 | .cra_alignmask = 0, | ||
| 808 | .cra_type = &crypto_ablkcipher_type, | ||
| 809 | .cra_module = THIS_MODULE, | ||
| 810 | .cra_init = atmel_tdes_cra_init, | ||
| 811 | .cra_exit = atmel_tdes_cra_exit, | ||
| 812 | .cra_u.ablkcipher = { | ||
| 813 | .min_keysize = DES_KEY_SIZE, | ||
| 814 | .max_keysize = DES_KEY_SIZE, | ||
| 815 | .ivsize = DES_BLOCK_SIZE, | ||
| 816 | .setkey = atmel_des_setkey, | ||
| 817 | .encrypt = atmel_tdes_cfb32_encrypt, | ||
| 818 | .decrypt = atmel_tdes_cfb32_decrypt, | ||
| 819 | } | ||
| 820 | }, | ||
| 821 | { | ||
| 822 | .cra_name = "ofb(des)", | ||
| 823 | .cra_driver_name = "atmel-ofb-des", | ||
| 824 | .cra_priority = 100, | ||
| 825 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 826 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 827 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 828 | .cra_alignmask = 0, | ||
| 829 | .cra_type = &crypto_ablkcipher_type, | ||
| 830 | .cra_module = THIS_MODULE, | ||
| 831 | .cra_init = atmel_tdes_cra_init, | ||
| 832 | .cra_exit = atmel_tdes_cra_exit, | ||
| 833 | .cra_u.ablkcipher = { | ||
| 834 | .min_keysize = DES_KEY_SIZE, | ||
| 835 | .max_keysize = DES_KEY_SIZE, | ||
| 836 | .ivsize = DES_BLOCK_SIZE, | ||
| 837 | .setkey = atmel_des_setkey, | ||
| 838 | .encrypt = atmel_tdes_ofb_encrypt, | ||
| 839 | .decrypt = atmel_tdes_ofb_decrypt, | ||
| 840 | } | ||
| 841 | }, | ||
| 842 | { | ||
| 843 | .cra_name = "ecb(des3_ede)", | ||
| 844 | .cra_driver_name = "atmel-ecb-tdes", | ||
| 845 | .cra_priority = 100, | ||
| 846 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 847 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 848 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 849 | .cra_alignmask = 0, | ||
| 850 | .cra_type = &crypto_ablkcipher_type, | ||
| 851 | .cra_module = THIS_MODULE, | ||
| 852 | .cra_init = atmel_tdes_cra_init, | ||
| 853 | .cra_exit = atmel_tdes_cra_exit, | ||
| 854 | .cra_u.ablkcipher = { | ||
| 855 | .min_keysize = 2 * DES_KEY_SIZE, | ||
| 856 | .max_keysize = 3 * DES_KEY_SIZE, | ||
| 857 | .setkey = atmel_tdes_setkey, | ||
| 858 | .encrypt = atmel_tdes_ecb_encrypt, | ||
| 859 | .decrypt = atmel_tdes_ecb_decrypt, | ||
| 860 | } | ||
| 861 | }, | ||
| 862 | { | ||
| 863 | .cra_name = "cbc(des3_ede)", | ||
| 864 | .cra_driver_name = "atmel-cbc-tdes", | ||
| 865 | .cra_priority = 100, | ||
| 866 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 867 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 868 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 869 | .cra_alignmask = 0, | ||
| 870 | .cra_type = &crypto_ablkcipher_type, | ||
| 871 | .cra_module = THIS_MODULE, | ||
| 872 | .cra_init = atmel_tdes_cra_init, | ||
| 873 | .cra_exit = atmel_tdes_cra_exit, | ||
| 874 | .cra_u.ablkcipher = { | ||
| 875 | .min_keysize = 2*DES_KEY_SIZE, | ||
| 876 | .max_keysize = 3*DES_KEY_SIZE, | ||
| 877 | .ivsize = DES_BLOCK_SIZE, | ||
| 878 | .setkey = atmel_tdes_setkey, | ||
| 879 | .encrypt = atmel_tdes_cbc_encrypt, | ||
| 880 | .decrypt = atmel_tdes_cbc_decrypt, | ||
| 881 | } | ||
| 882 | }, | ||
| 883 | { | ||
| 884 | .cra_name = "cfb(des3_ede)", | ||
| 885 | .cra_driver_name = "atmel-cfb-tdes", | ||
| 886 | .cra_priority = 100, | ||
| 887 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 888 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 889 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 890 | .cra_alignmask = 0, | ||
| 891 | .cra_type = &crypto_ablkcipher_type, | ||
| 892 | .cra_module = THIS_MODULE, | ||
| 893 | .cra_init = atmel_tdes_cra_init, | ||
| 894 | .cra_exit = atmel_tdes_cra_exit, | ||
| 895 | .cra_u.ablkcipher = { | ||
| 896 | .min_keysize = 2*DES_KEY_SIZE, | ||
| 897 | .max_keysize = 2*DES_KEY_SIZE, | ||
| 898 | .ivsize = DES_BLOCK_SIZE, | ||
| 899 | .setkey = atmel_tdes_setkey, | ||
| 900 | .encrypt = atmel_tdes_cfb_encrypt, | ||
| 901 | .decrypt = atmel_tdes_cfb_decrypt, | ||
| 902 | } | ||
| 903 | }, | ||
| 904 | { | ||
| 905 | .cra_name = "cfb8(des3_ede)", | ||
| 906 | .cra_driver_name = "atmel-cfb8-tdes", | ||
| 907 | .cra_priority = 100, | ||
| 908 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 909 | .cra_blocksize = CFB8_BLOCK_SIZE, | ||
| 910 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 911 | .cra_alignmask = 0, | ||
| 912 | .cra_type = &crypto_ablkcipher_type, | ||
| 913 | .cra_module = THIS_MODULE, | ||
| 914 | .cra_init = atmel_tdes_cra_init, | ||
| 915 | .cra_exit = atmel_tdes_cra_exit, | ||
| 916 | .cra_u.ablkcipher = { | ||
| 917 | .min_keysize = 2*DES_KEY_SIZE, | ||
| 918 | .max_keysize = 2*DES_KEY_SIZE, | ||
| 919 | .ivsize = DES_BLOCK_SIZE, | ||
| 920 | .setkey = atmel_tdes_setkey, | ||
| 921 | .encrypt = atmel_tdes_cfb8_encrypt, | ||
| 922 | .decrypt = atmel_tdes_cfb8_decrypt, | ||
| 923 | } | ||
| 924 | }, | ||
| 925 | { | ||
| 926 | .cra_name = "cfb16(des3_ede)", | ||
| 927 | .cra_driver_name = "atmel-cfb16-tdes", | ||
| 928 | .cra_priority = 100, | ||
| 929 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 930 | .cra_blocksize = CFB16_BLOCK_SIZE, | ||
| 931 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 932 | .cra_alignmask = 0, | ||
| 933 | .cra_type = &crypto_ablkcipher_type, | ||
| 934 | .cra_module = THIS_MODULE, | ||
| 935 | .cra_init = atmel_tdes_cra_init, | ||
| 936 | .cra_exit = atmel_tdes_cra_exit, | ||
| 937 | .cra_u.ablkcipher = { | ||
| 938 | .min_keysize = 2*DES_KEY_SIZE, | ||
| 939 | .max_keysize = 2*DES_KEY_SIZE, | ||
| 940 | .ivsize = DES_BLOCK_SIZE, | ||
| 941 | .setkey = atmel_tdes_setkey, | ||
| 942 | .encrypt = atmel_tdes_cfb16_encrypt, | ||
| 943 | .decrypt = atmel_tdes_cfb16_decrypt, | ||
| 944 | } | ||
| 945 | }, | ||
| 946 | { | ||
| 947 | .cra_name = "cfb32(des3_ede)", | ||
| 948 | .cra_driver_name = "atmel-cfb32-tdes", | ||
| 949 | .cra_priority = 100, | ||
| 950 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 951 | .cra_blocksize = CFB32_BLOCK_SIZE, | ||
| 952 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 953 | .cra_alignmask = 0, | ||
| 954 | .cra_type = &crypto_ablkcipher_type, | ||
| 955 | .cra_module = THIS_MODULE, | ||
| 956 | .cra_init = atmel_tdes_cra_init, | ||
| 957 | .cra_exit = atmel_tdes_cra_exit, | ||
| 958 | .cra_u.ablkcipher = { | ||
| 959 | .min_keysize = 2*DES_KEY_SIZE, | ||
| 960 | .max_keysize = 2*DES_KEY_SIZE, | ||
| 961 | .ivsize = DES_BLOCK_SIZE, | ||
| 962 | .setkey = atmel_tdes_setkey, | ||
| 963 | .encrypt = atmel_tdes_cfb32_encrypt, | ||
| 964 | .decrypt = atmel_tdes_cfb32_decrypt, | ||
| 965 | } | ||
| 966 | }, | ||
| 967 | { | ||
| 968 | .cra_name = "ofb(des3_ede)", | ||
| 969 | .cra_driver_name = "atmel-ofb-tdes", | ||
| 970 | .cra_priority = 100, | ||
| 971 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 972 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 973 | .cra_ctxsize = sizeof(struct atmel_tdes_ctx), | ||
| 974 | .cra_alignmask = 0, | ||
| 975 | .cra_type = &crypto_ablkcipher_type, | ||
| 976 | .cra_module = THIS_MODULE, | ||
| 977 | .cra_init = atmel_tdes_cra_init, | ||
| 978 | .cra_exit = atmel_tdes_cra_exit, | ||
| 979 | .cra_u.ablkcipher = { | ||
| 980 | .min_keysize = 2*DES_KEY_SIZE, | ||
| 981 | .max_keysize = 3*DES_KEY_SIZE, | ||
| 982 | .ivsize = DES_BLOCK_SIZE, | ||
| 983 | .setkey = atmel_tdes_setkey, | ||
| 984 | .encrypt = atmel_tdes_ofb_encrypt, | ||
| 985 | .decrypt = atmel_tdes_ofb_decrypt, | ||
| 986 | } | ||
| 987 | }, | ||
| 988 | }; | ||
| 989 | |||
| 990 | static void atmel_tdes_queue_task(unsigned long data) | ||
| 991 | { | ||
| 992 | struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data; | ||
| 993 | |||
| 994 | atmel_tdes_handle_queue(dd, NULL); | ||
| 995 | } | ||
| 996 | |||
| 997 | static void atmel_tdes_done_task(unsigned long data) | ||
| 998 | { | ||
| 999 | struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data; | ||
| 1000 | int err; | ||
| 1001 | |||
| 1002 | err = atmel_tdes_crypt_dma_stop(dd); | ||
| 1003 | |||
| 1004 | err = dd->err ? : err; | ||
| 1005 | |||
| 1006 | if (dd->total && !err) { | ||
| 1007 | err = atmel_tdes_crypt_dma_start(dd); | ||
| 1008 | if (!err) | ||
| 1009 | return; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | atmel_tdes_finish_req(dd, err); | ||
| 1013 | atmel_tdes_handle_queue(dd, NULL); | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | static irqreturn_t atmel_tdes_irq(int irq, void *dev_id) | ||
| 1017 | { | ||
| 1018 | struct atmel_tdes_dev *tdes_dd = dev_id; | ||
| 1019 | u32 reg; | ||
| 1020 | |||
| 1021 | reg = atmel_tdes_read(tdes_dd, TDES_ISR); | ||
| 1022 | if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) { | ||
| 1023 | atmel_tdes_write(tdes_dd, TDES_IDR, reg); | ||
| 1024 | if (TDES_FLAGS_BUSY & tdes_dd->flags) | ||
| 1025 | tasklet_schedule(&tdes_dd->done_task); | ||
| 1026 | else | ||
| 1027 | dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n"); | ||
| 1028 | return IRQ_HANDLED; | ||
| 1029 | } | ||
| 1030 | |||
| 1031 | return IRQ_NONE; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd) | ||
| 1035 | { | ||
| 1036 | int i; | ||
| 1037 | |||
| 1038 | for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) | ||
| 1039 | crypto_unregister_alg(&tdes_algs[i]); | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd) | ||
| 1043 | { | ||
| 1044 | int err, i, j; | ||
| 1045 | |||
| 1046 | for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) { | ||
| 1047 | INIT_LIST_HEAD(&tdes_algs[i].cra_list); | ||
| 1048 | err = crypto_register_alg(&tdes_algs[i]); | ||
| 1049 | if (err) | ||
| 1050 | goto err_tdes_algs; | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | return 0; | ||
| 1054 | |||
| 1055 | err_tdes_algs: | ||
| 1056 | for (j = 0; j < i; j++) | ||
| 1057 | crypto_unregister_alg(&tdes_algs[j]); | ||
| 1058 | |||
| 1059 | return err; | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | static int __devinit atmel_tdes_probe(struct platform_device *pdev) | ||
| 1063 | { | ||
| 1064 | struct atmel_tdes_dev *tdes_dd; | ||
| 1065 | struct device *dev = &pdev->dev; | ||
| 1066 | struct resource *tdes_res; | ||
| 1067 | unsigned long tdes_phys_size; | ||
| 1068 | int err; | ||
| 1069 | |||
| 1070 | tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL); | ||
| 1071 | if (tdes_dd == NULL) { | ||
| 1072 | dev_err(dev, "unable to alloc data struct.\n"); | ||
| 1073 | err = -ENOMEM; | ||
| 1074 | goto tdes_dd_err; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | tdes_dd->dev = dev; | ||
| 1078 | |||
| 1079 | platform_set_drvdata(pdev, tdes_dd); | ||
| 1080 | |||
| 1081 | INIT_LIST_HEAD(&tdes_dd->list); | ||
| 1082 | |||
| 1083 | tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task, | ||
| 1084 | (unsigned long)tdes_dd); | ||
| 1085 | tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task, | ||
| 1086 | (unsigned long)tdes_dd); | ||
| 1087 | |||
| 1088 | crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH); | ||
| 1089 | |||
| 1090 | tdes_dd->irq = -1; | ||
| 1091 | |||
| 1092 | /* Get the base address */ | ||
| 1093 | tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1094 | if (!tdes_res) { | ||
| 1095 | dev_err(dev, "no MEM resource info\n"); | ||
| 1096 | err = -ENODEV; | ||
| 1097 | goto res_err; | ||
| 1098 | } | ||
| 1099 | tdes_dd->phys_base = tdes_res->start; | ||
| 1100 | tdes_phys_size = resource_size(tdes_res); | ||
| 1101 | |||
| 1102 | /* Get the IRQ */ | ||
| 1103 | tdes_dd->irq = platform_get_irq(pdev, 0); | ||
| 1104 | if (tdes_dd->irq < 0) { | ||
| 1105 | dev_err(dev, "no IRQ resource info\n"); | ||
| 1106 | err = tdes_dd->irq; | ||
| 1107 | goto res_err; | ||
| 1108 | } | ||
| 1109 | |||
| 1110 | err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED, | ||
| 1111 | "atmel-tdes", tdes_dd); | ||
| 1112 | if (err) { | ||
| 1113 | dev_err(dev, "unable to request tdes irq.\n"); | ||
| 1114 | goto tdes_irq_err; | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | /* Initializing the clock */ | ||
| 1118 | tdes_dd->iclk = clk_get(&pdev->dev, NULL); | ||
| 1119 | if (IS_ERR(tdes_dd->iclk)) { | ||
| 1120 | dev_err(dev, "clock intialization failed.\n"); | ||
| 1121 | err = PTR_ERR(tdes_dd->iclk); | ||
| 1122 | goto clk_err; | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size); | ||
| 1126 | if (!tdes_dd->io_base) { | ||
| 1127 | dev_err(dev, "can't ioremap\n"); | ||
| 1128 | err = -ENOMEM; | ||
| 1129 | goto tdes_io_err; | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | err = atmel_tdes_dma_init(tdes_dd); | ||
| 1133 | if (err) | ||
| 1134 | goto err_tdes_dma; | ||
| 1135 | |||
| 1136 | spin_lock(&atmel_tdes.lock); | ||
| 1137 | list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list); | ||
| 1138 | spin_unlock(&atmel_tdes.lock); | ||
| 1139 | |||
| 1140 | err = atmel_tdes_register_algs(tdes_dd); | ||
| 1141 | if (err) | ||
| 1142 | goto err_algs; | ||
| 1143 | |||
| 1144 | dev_info(dev, "Atmel DES/TDES\n"); | ||
| 1145 | |||
| 1146 | return 0; | ||
| 1147 | |||
| 1148 | err_algs: | ||
| 1149 | spin_lock(&atmel_tdes.lock); | ||
| 1150 | list_del(&tdes_dd->list); | ||
| 1151 | spin_unlock(&atmel_tdes.lock); | ||
| 1152 | atmel_tdes_dma_cleanup(tdes_dd); | ||
| 1153 | err_tdes_dma: | ||
| 1154 | iounmap(tdes_dd->io_base); | ||
| 1155 | tdes_io_err: | ||
| 1156 | clk_put(tdes_dd->iclk); | ||
| 1157 | clk_err: | ||
| 1158 | free_irq(tdes_dd->irq, tdes_dd); | ||
| 1159 | tdes_irq_err: | ||
| 1160 | res_err: | ||
| 1161 | tasklet_kill(&tdes_dd->done_task); | ||
| 1162 | tasklet_kill(&tdes_dd->queue_task); | ||
| 1163 | kfree(tdes_dd); | ||
| 1164 | tdes_dd = NULL; | ||
| 1165 | tdes_dd_err: | ||
| 1166 | dev_err(dev, "initialization failed.\n"); | ||
| 1167 | |||
| 1168 | return err; | ||
| 1169 | } | ||
| 1170 | |||
| 1171 | static int __devexit atmel_tdes_remove(struct platform_device *pdev) | ||
| 1172 | { | ||
| 1173 | static struct atmel_tdes_dev *tdes_dd; | ||
| 1174 | |||
| 1175 | tdes_dd = platform_get_drvdata(pdev); | ||
| 1176 | if (!tdes_dd) | ||
| 1177 | return -ENODEV; | ||
| 1178 | spin_lock(&atmel_tdes.lock); | ||
| 1179 | list_del(&tdes_dd->list); | ||
| 1180 | spin_unlock(&atmel_tdes.lock); | ||
| 1181 | |||
| 1182 | atmel_tdes_unregister_algs(tdes_dd); | ||
| 1183 | |||
| 1184 | tasklet_kill(&tdes_dd->done_task); | ||
| 1185 | tasklet_kill(&tdes_dd->queue_task); | ||
| 1186 | |||
| 1187 | atmel_tdes_dma_cleanup(tdes_dd); | ||
| 1188 | |||
| 1189 | iounmap(tdes_dd->io_base); | ||
| 1190 | |||
| 1191 | clk_put(tdes_dd->iclk); | ||
| 1192 | |||
| 1193 | if (tdes_dd->irq >= 0) | ||
| 1194 | free_irq(tdes_dd->irq, tdes_dd); | ||
| 1195 | |||
| 1196 | kfree(tdes_dd); | ||
| 1197 | tdes_dd = NULL; | ||
| 1198 | |||
| 1199 | return 0; | ||
| 1200 | } | ||
| 1201 | |||
| 1202 | static struct platform_driver atmel_tdes_driver = { | ||
| 1203 | .probe = atmel_tdes_probe, | ||
| 1204 | .remove = __devexit_p(atmel_tdes_remove), | ||
| 1205 | .driver = { | ||
| 1206 | .name = "atmel_tdes", | ||
| 1207 | .owner = THIS_MODULE, | ||
| 1208 | }, | ||
| 1209 | }; | ||
| 1210 | |||
| 1211 | module_platform_driver(atmel_tdes_driver); | ||
| 1212 | |||
| 1213 | MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support."); | ||
| 1214 | MODULE_LICENSE("GPL v2"); | ||
| 1215 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); | ||
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c new file mode 100644 index 000000000000..5398580b4313 --- /dev/null +++ b/drivers/crypto/bfin_crc.c | |||
| @@ -0,0 +1,780 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic API. | ||
| 3 | * | ||
| 4 | * Support Blackfin CRC HW acceleration. | ||
| 5 | * | ||
| 6 | * Copyright 2012 Analog Devices Inc. | ||
| 7 | * | ||
| 8 | * Licensed under the GPL-2. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/err.h> | ||
| 12 | #include <linux/device.h> | ||
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/errno.h> | ||
| 16 | #include <linux/interrupt.h> | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/irq.h> | ||
| 19 | #include <linux/io.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <linux/scatterlist.h> | ||
| 22 | #include <linux/dma-mapping.h> | ||
| 23 | #include <linux/delay.h> | ||
| 24 | #include <linux/unaligned/access_ok.h> | ||
| 25 | #include <linux/crypto.h> | ||
| 26 | #include <linux/cryptohash.h> | ||
| 27 | #include <crypto/scatterwalk.h> | ||
| 28 | #include <crypto/algapi.h> | ||
| 29 | #include <crypto/hash.h> | ||
| 30 | #include <crypto/internal/hash.h> | ||
| 31 | |||
| 32 | #include <asm/blackfin.h> | ||
| 33 | #include <asm/bfin_crc.h> | ||
| 34 | #include <asm/dma.h> | ||
| 35 | #include <asm/portmux.h> | ||
| 36 | |||
| 37 | #define CRC_CCRYPTO_QUEUE_LENGTH 5 | ||
| 38 | |||
| 39 | #define DRIVER_NAME "bfin-hmac-crc" | ||
| 40 | #define CHKSUM_DIGEST_SIZE 4 | ||
| 41 | #define CHKSUM_BLOCK_SIZE 1 | ||
| 42 | |||
| 43 | #define CRC_MAX_DMA_DESC 100 | ||
| 44 | |||
| 45 | #define CRC_CRYPTO_STATE_UPDATE 1 | ||
| 46 | #define CRC_CRYPTO_STATE_FINALUPDATE 2 | ||
| 47 | #define CRC_CRYPTO_STATE_FINISH 3 | ||
| 48 | |||
| 49 | struct bfin_crypto_crc { | ||
| 50 | struct list_head list; | ||
| 51 | struct device *dev; | ||
| 52 | spinlock_t lock; | ||
| 53 | |||
| 54 | int irq; | ||
| 55 | int dma_ch; | ||
| 56 | u32 poly; | ||
| 57 | volatile struct crc_register *regs; | ||
| 58 | |||
| 59 | struct ahash_request *req; /* current request in operation */ | ||
| 60 | struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */ | ||
| 61 | dma_addr_t sg_dma; /* phy addr of sg dma descriptors */ | ||
| 62 | u8 *sg_mid_buf; | ||
| 63 | |||
| 64 | struct tasklet_struct done_task; | ||
| 65 | struct crypto_queue queue; /* waiting requests */ | ||
| 66 | |||
| 67 | u8 busy:1; /* crc device in operation flag */ | ||
| 68 | }; | ||
| 69 | |||
| 70 | static struct bfin_crypto_crc_list { | ||
| 71 | struct list_head dev_list; | ||
| 72 | spinlock_t lock; | ||
| 73 | } crc_list; | ||
| 74 | |||
| 75 | struct bfin_crypto_crc_reqctx { | ||
| 76 | struct bfin_crypto_crc *crc; | ||
| 77 | |||
| 78 | unsigned int total; /* total request bytes */ | ||
| 79 | size_t sg_buflen; /* bytes for this update */ | ||
| 80 | unsigned int sg_nents; | ||
| 81 | struct scatterlist *sg; /* sg list head for this update*/ | ||
| 82 | struct scatterlist bufsl[2]; /* chained sg list */ | ||
| 83 | |||
| 84 | size_t bufnext_len; | ||
| 85 | size_t buflast_len; | ||
| 86 | u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */ | ||
| 87 | u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */ | ||
| 88 | |||
| 89 | u8 flag; | ||
| 90 | }; | ||
| 91 | |||
| 92 | struct bfin_crypto_crc_ctx { | ||
| 93 | struct bfin_crypto_crc *crc; | ||
| 94 | u32 key; | ||
| 95 | }; | ||
| 96 | |||
| 97 | |||
| 98 | /* | ||
| 99 | * derive number of elements in scatterlist | ||
| 100 | */ | ||
| 101 | static int sg_count(struct scatterlist *sg_list) | ||
| 102 | { | ||
| 103 | struct scatterlist *sg = sg_list; | ||
| 104 | int sg_nents = 1; | ||
| 105 | |||
| 106 | if (sg_list == NULL) | ||
| 107 | return 0; | ||
| 108 | |||
| 109 | while (!sg_is_last(sg)) { | ||
| 110 | sg_nents++; | ||
| 111 | sg = scatterwalk_sg_next(sg); | ||
| 112 | } | ||
| 113 | |||
| 114 | return sg_nents; | ||
| 115 | } | ||
| 116 | |||
| 117 | /* | ||
| 118 | * get element in scatter list by given index | ||
| 119 | */ | ||
| 120 | static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents, | ||
| 121 | unsigned int index) | ||
| 122 | { | ||
| 123 | struct scatterlist *sg = NULL; | ||
| 124 | int i; | ||
| 125 | |||
| 126 | for_each_sg(sg_list, sg, nents, i) | ||
| 127 | if (i == index) | ||
| 128 | break; | ||
| 129 | |||
| 130 | return sg; | ||
| 131 | } | ||
| 132 | |||
| 133 | static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key) | ||
| 134 | { | ||
| 135 | crc->regs->datacntrld = 0; | ||
| 136 | crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET; | ||
| 137 | crc->regs->curresult = key; | ||
| 138 | |||
| 139 | /* setup CRC interrupts */ | ||
| 140 | crc->regs->status = CMPERRI | DCNTEXPI; | ||
| 141 | crc->regs->intrenset = CMPERRI | DCNTEXPI; | ||
| 142 | SSYNC(); | ||
| 143 | |||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | |||
| 147 | static int bfin_crypto_crc_init(struct ahash_request *req) | ||
| 148 | { | ||
| 149 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
| 150 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); | ||
| 151 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | ||
| 152 | struct bfin_crypto_crc *crc; | ||
| 153 | |||
| 154 | dev_dbg(crc->dev, "crc_init\n"); | ||
| 155 | spin_lock_bh(&crc_list.lock); | ||
| 156 | list_for_each_entry(crc, &crc_list.dev_list, list) { | ||
| 157 | crc_ctx->crc = crc; | ||
| 158 | break; | ||
| 159 | } | ||
| 160 | spin_unlock_bh(&crc_list.lock); | ||
| 161 | |||
| 162 | if (sg_count(req->src) > CRC_MAX_DMA_DESC) { | ||
| 163 | dev_dbg(crc->dev, "init: requested sg list is too big > %d\n", | ||
| 164 | CRC_MAX_DMA_DESC); | ||
| 165 | return -EINVAL; | ||
| 166 | } | ||
| 167 | |||
| 168 | ctx->crc = crc; | ||
| 169 | ctx->bufnext_len = 0; | ||
| 170 | ctx->buflast_len = 0; | ||
| 171 | ctx->sg_buflen = 0; | ||
| 172 | ctx->total = 0; | ||
| 173 | ctx->flag = 0; | ||
| 174 | |||
| 175 | /* init crc results */ | ||
| 176 | put_unaligned_le32(crc_ctx->key, req->result); | ||
| 177 | |||
| 178 | dev_dbg(crc->dev, "init: digest size: %d\n", | ||
| 179 | crypto_ahash_digestsize(tfm)); | ||
| 180 | |||
| 181 | return bfin_crypto_crc_init_hw(crc, crc_ctx->key); | ||
| 182 | } | ||
| 183 | |||
| 184 | static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc) | ||
| 185 | { | ||
| 186 | struct scatterlist *sg; | ||
| 187 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req); | ||
| 188 | int i = 0, j = 0; | ||
| 189 | unsigned long dma_config; | ||
| 190 | unsigned int dma_count; | ||
| 191 | unsigned int dma_addr; | ||
| 192 | unsigned int mid_dma_count = 0; | ||
| 193 | int dma_mod; | ||
| 194 | |||
| 195 | dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE); | ||
| 196 | |||
| 197 | for_each_sg(ctx->sg, sg, ctx->sg_nents, j) { | ||
| 198 | dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32; | ||
| 199 | dma_addr = sg_dma_address(sg); | ||
| 200 | /* deduce extra bytes in last sg */ | ||
| 201 | if (sg_is_last(sg)) | ||
| 202 | dma_count = sg_dma_len(sg) - ctx->bufnext_len; | ||
| 203 | else | ||
| 204 | dma_count = sg_dma_len(sg); | ||
| 205 | |||
| 206 | if (mid_dma_count) { | ||
| 207 | /* Append last middle dma buffer to 4 bytes with first | ||
| 208 | bytes in current sg buffer. Move addr of current | ||
| 209 | sg and deduce the length of current sg. | ||
| 210 | */ | ||
| 211 | memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count, | ||
| 212 | (void *)dma_addr, | ||
| 213 | CHKSUM_DIGEST_SIZE - mid_dma_count); | ||
| 214 | dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count; | ||
| 215 | dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count; | ||
| 216 | } | ||
| 217 | /* chop current sg dma len to multiple of 32 bits */ | ||
| 218 | mid_dma_count = dma_count % 4; | ||
| 219 | dma_count &= ~0x3; | ||
| 220 | |||
| 221 | if (dma_addr % 4 == 0) { | ||
| 222 | dma_config |= WDSIZE_32; | ||
| 223 | dma_count >>= 2; | ||
| 224 | dma_mod = 4; | ||
| 225 | } else if (dma_addr % 2 == 0) { | ||
| 226 | dma_config |= WDSIZE_16; | ||
| 227 | dma_count >>= 1; | ||
| 228 | dma_mod = 2; | ||
| 229 | } else { | ||
| 230 | dma_config |= WDSIZE_8; | ||
| 231 | dma_mod = 1; | ||
| 232 | } | ||
| 233 | |||
| 234 | crc->sg_cpu[i].start_addr = dma_addr; | ||
| 235 | crc->sg_cpu[i].cfg = dma_config; | ||
| 236 | crc->sg_cpu[i].x_count = dma_count; | ||
| 237 | crc->sg_cpu[i].x_modify = dma_mod; | ||
| 238 | dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " | ||
| 239 | "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", | ||
| 240 | i, crc->sg_cpu[i].start_addr, | ||
| 241 | crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, | ||
| 242 | crc->sg_cpu[i].x_modify); | ||
| 243 | i++; | ||
| 244 | |||
| 245 | if (mid_dma_count) { | ||
| 246 | /* copy extra bytes to next middle dma buffer */ | ||
| 247 | dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | | ||
| 248 | DMAEN | PSIZE_32 | WDSIZE_32; | ||
| 249 | memcpy(crc->sg_mid_buf + (i << 2), | ||
| 250 | (void *)(dma_addr + (dma_count << 2)), | ||
| 251 | mid_dma_count); | ||
| 252 | /* setup new dma descriptor for next middle dma */ | ||
| 253 | crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, | ||
| 254 | crc->sg_mid_buf + (i << 2), | ||
| 255 | CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE); | ||
| 256 | crc->sg_cpu[i].cfg = dma_config; | ||
| 257 | crc->sg_cpu[i].x_count = 1; | ||
| 258 | crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE; | ||
| 259 | dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " | ||
| 260 | "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", | ||
| 261 | i, crc->sg_cpu[i].start_addr, | ||
| 262 | crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, | ||
| 263 | crc->sg_cpu[i].x_modify); | ||
| 264 | i++; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32; | ||
| 269 | /* For final update req, append the buffer for next update as well*/ | ||
| 270 | if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || | ||
| 271 | ctx->flag == CRC_CRYPTO_STATE_FINISH)) { | ||
| 272 | crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext, | ||
| 273 | CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE); | ||
| 274 | crc->sg_cpu[i].cfg = dma_config; | ||
| 275 | crc->sg_cpu[i].x_count = 1; | ||
| 276 | crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE; | ||
| 277 | dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, " | ||
| 278 | "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", | ||
| 279 | i, crc->sg_cpu[i].start_addr, | ||
| 280 | crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count, | ||
| 281 | crc->sg_cpu[i].x_modify); | ||
| 282 | i++; | ||
| 283 | } | ||
| 284 | |||
| 285 | if (i == 0) | ||
| 286 | return; | ||
| 287 | |||
| 288 | flush_dcache_range((unsigned int)crc->sg_cpu, | ||
| 289 | (unsigned int)crc->sg_cpu + | ||
| 290 | i * sizeof(struct dma_desc_array)); | ||
| 291 | |||
| 292 | /* Set the last descriptor to stop mode */ | ||
| 293 | crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE); | ||
| 294 | crc->sg_cpu[i - 1].cfg |= DI_EN; | ||
| 295 | set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma); | ||
| 296 | set_dma_x_count(crc->dma_ch, 0); | ||
| 297 | set_dma_x_modify(crc->dma_ch, 0); | ||
| 298 | SSYNC(); | ||
| 299 | set_dma_config(crc->dma_ch, dma_config); | ||
| 300 | } | ||
| 301 | |||
| 302 | static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc, | ||
| 303 | struct ahash_request *req) | ||
| 304 | { | ||
| 305 | struct crypto_async_request *async_req, *backlog; | ||
| 306 | struct bfin_crypto_crc_reqctx *ctx; | ||
| 307 | struct scatterlist *sg; | ||
| 308 | int ret = 0; | ||
| 309 | int nsg, i, j; | ||
| 310 | unsigned int nextlen; | ||
| 311 | unsigned long flags; | ||
| 312 | |||
| 313 | spin_lock_irqsave(&crc->lock, flags); | ||
| 314 | if (req) | ||
| 315 | ret = ahash_enqueue_request(&crc->queue, req); | ||
| 316 | if (crc->busy) { | ||
| 317 | spin_unlock_irqrestore(&crc->lock, flags); | ||
| 318 | return ret; | ||
| 319 | } | ||
| 320 | backlog = crypto_get_backlog(&crc->queue); | ||
| 321 | async_req = crypto_dequeue_request(&crc->queue); | ||
| 322 | if (async_req) | ||
| 323 | crc->busy = 1; | ||
| 324 | spin_unlock_irqrestore(&crc->lock, flags); | ||
| 325 | |||
| 326 | if (!async_req) | ||
| 327 | return ret; | ||
| 328 | |||
| 329 | if (backlog) | ||
| 330 | backlog->complete(backlog, -EINPROGRESS); | ||
| 331 | |||
| 332 | req = ahash_request_cast(async_req); | ||
| 333 | crc->req = req; | ||
| 334 | ctx = ahash_request_ctx(req); | ||
| 335 | ctx->sg = NULL; | ||
| 336 | ctx->sg_buflen = 0; | ||
| 337 | ctx->sg_nents = 0; | ||
| 338 | |||
| 339 | dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n", | ||
| 340 | ctx->flag, req->nbytes); | ||
| 341 | |||
| 342 | if (ctx->flag == CRC_CRYPTO_STATE_FINISH) { | ||
| 343 | if (ctx->bufnext_len == 0) { | ||
| 344 | crc->busy = 0; | ||
| 345 | return 0; | ||
| 346 | } | ||
| 347 | |||
| 348 | /* Pack last crc update buffer to 32bit */ | ||
| 349 | memset(ctx->bufnext + ctx->bufnext_len, 0, | ||
| 350 | CHKSUM_DIGEST_SIZE - ctx->bufnext_len); | ||
| 351 | } else { | ||
| 352 | /* Pack small data which is less than 32bit to buffer for next update. */ | ||
| 353 | if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) { | ||
| 354 | memcpy(ctx->bufnext + ctx->bufnext_len, | ||
| 355 | sg_virt(req->src), req->nbytes); | ||
| 356 | ctx->bufnext_len += req->nbytes; | ||
| 357 | if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE && | ||
| 358 | ctx->bufnext_len) { | ||
| 359 | goto finish_update; | ||
| 360 | } else { | ||
| 361 | crc->busy = 0; | ||
| 362 | return 0; | ||
| 363 | } | ||
| 364 | } | ||
| 365 | |||
| 366 | if (ctx->bufnext_len) { | ||
| 367 | /* Chain in extra bytes of last update */ | ||
| 368 | ctx->buflast_len = ctx->bufnext_len; | ||
| 369 | memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len); | ||
| 370 | |||
| 371 | nsg = ctx->sg_buflen ? 2 : 1; | ||
| 372 | sg_init_table(ctx->bufsl, nsg); | ||
| 373 | sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); | ||
| 374 | if (nsg > 1) | ||
| 375 | scatterwalk_sg_chain(ctx->bufsl, nsg, | ||
| 376 | req->src); | ||
| 377 | ctx->sg = ctx->bufsl; | ||
| 378 | } else | ||
| 379 | ctx->sg = req->src; | ||
| 380 | |||
| 381 | /* Chop crc buffer size to multiple of 32 bit */ | ||
| 382 | nsg = ctx->sg_nents = sg_count(ctx->sg); | ||
| 383 | ctx->sg_buflen = ctx->buflast_len + req->nbytes; | ||
| 384 | ctx->bufnext_len = ctx->sg_buflen % 4; | ||
| 385 | ctx->sg_buflen &= ~0x3; | ||
| 386 | |||
| 387 | if (ctx->bufnext_len) { | ||
| 388 | /* copy extra bytes to buffer for next update */ | ||
| 389 | memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE); | ||
| 390 | nextlen = ctx->bufnext_len; | ||
| 391 | for (i = nsg - 1; i >= 0; i--) { | ||
| 392 | sg = sg_get(ctx->sg, nsg, i); | ||
| 393 | j = min(nextlen, sg_dma_len(sg)); | ||
| 394 | memcpy(ctx->bufnext + nextlen - j, | ||
| 395 | sg_virt(sg) + sg_dma_len(sg) - j, j); | ||
| 396 | if (j == sg_dma_len(sg)) | ||
| 397 | ctx->sg_nents--; | ||
| 398 | nextlen -= j; | ||
| 399 | if (nextlen == 0) | ||
| 400 | break; | ||
| 401 | } | ||
| 402 | } | ||
| 403 | } | ||
| 404 | |||
| 405 | finish_update: | ||
| 406 | if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || | ||
| 407 | ctx->flag == CRC_CRYPTO_STATE_FINISH)) | ||
| 408 | ctx->sg_buflen += CHKSUM_DIGEST_SIZE; | ||
| 409 | |||
| 410 | /* set CRC data count before start DMA */ | ||
| 411 | crc->regs->datacnt = ctx->sg_buflen >> 2; | ||
| 412 | |||
| 413 | /* setup and enable CRC DMA */ | ||
| 414 | bfin_crypto_crc_config_dma(crc); | ||
| 415 | |||
| 416 | /* finally kick off CRC operation */ | ||
| 417 | crc->regs->control |= BLKEN; | ||
| 418 | SSYNC(); | ||
| 419 | |||
| 420 | return -EINPROGRESS; | ||
| 421 | } | ||
| 422 | |||
| 423 | static int bfin_crypto_crc_update(struct ahash_request *req) | ||
| 424 | { | ||
| 425 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | ||
| 426 | |||
| 427 | if (!req->nbytes) | ||
| 428 | return 0; | ||
| 429 | |||
| 430 | dev_dbg(ctx->crc->dev, "crc_update\n"); | ||
| 431 | ctx->total += req->nbytes; | ||
| 432 | ctx->flag = CRC_CRYPTO_STATE_UPDATE; | ||
| 433 | |||
| 434 | return bfin_crypto_crc_handle_queue(ctx->crc, req); | ||
| 435 | } | ||
| 436 | |||
| 437 | static int bfin_crypto_crc_final(struct ahash_request *req) | ||
| 438 | { | ||
| 439 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
| 440 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); | ||
| 441 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | ||
| 442 | |||
| 443 | dev_dbg(ctx->crc->dev, "crc_final\n"); | ||
| 444 | ctx->flag = CRC_CRYPTO_STATE_FINISH; | ||
| 445 | crc_ctx->key = 0; | ||
| 446 | |||
| 447 | return bfin_crypto_crc_handle_queue(ctx->crc, req); | ||
| 448 | } | ||
| 449 | |||
| 450 | static int bfin_crypto_crc_finup(struct ahash_request *req) | ||
| 451 | { | ||
| 452 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
| 453 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); | ||
| 454 | struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); | ||
| 455 | |||
| 456 | dev_dbg(ctx->crc->dev, "crc_finishupdate\n"); | ||
| 457 | ctx->total += req->nbytes; | ||
| 458 | ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE; | ||
| 459 | crc_ctx->key = 0; | ||
| 460 | |||
| 461 | return bfin_crypto_crc_handle_queue(ctx->crc, req); | ||
| 462 | } | ||
| 463 | |||
| 464 | static int bfin_crypto_crc_digest(struct ahash_request *req) | ||
| 465 | { | ||
| 466 | int ret; | ||
| 467 | |||
| 468 | ret = bfin_crypto_crc_init(req); | ||
| 469 | if (ret) | ||
| 470 | return ret; | ||
| 471 | |||
| 472 | return bfin_crypto_crc_finup(req); | ||
| 473 | } | ||
| 474 | |||
| 475 | static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
| 476 | unsigned int keylen) | ||
| 477 | { | ||
| 478 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm); | ||
| 479 | |||
| 480 | dev_dbg(crc_ctx->crc->dev, "crc_setkey\n"); | ||
| 481 | if (keylen != CHKSUM_DIGEST_SIZE) { | ||
| 482 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 483 | return -EINVAL; | ||
| 484 | } | ||
| 485 | |||
| 486 | crc_ctx->key = get_unaligned_le32(key); | ||
| 487 | |||
| 488 | return 0; | ||
| 489 | } | ||
| 490 | |||
| 491 | static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm) | ||
| 492 | { | ||
| 493 | struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm); | ||
| 494 | |||
| 495 | crc_ctx->key = 0; | ||
| 496 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
| 497 | sizeof(struct bfin_crypto_crc_reqctx)); | ||
| 498 | |||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm) | ||
| 503 | { | ||
| 504 | } | ||
| 505 | |||
| 506 | static struct ahash_alg algs = { | ||
| 507 | .init = bfin_crypto_crc_init, | ||
| 508 | .update = bfin_crypto_crc_update, | ||
| 509 | .final = bfin_crypto_crc_final, | ||
| 510 | .finup = bfin_crypto_crc_finup, | ||
| 511 | .digest = bfin_crypto_crc_digest, | ||
| 512 | .setkey = bfin_crypto_crc_setkey, | ||
| 513 | .halg.digestsize = CHKSUM_DIGEST_SIZE, | ||
| 514 | .halg.base = { | ||
| 515 | .cra_name = "hmac(crc32)", | ||
| 516 | .cra_driver_name = DRIVER_NAME, | ||
| 517 | .cra_priority = 100, | ||
| 518 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
| 519 | CRYPTO_ALG_ASYNC, | ||
| 520 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | ||
| 521 | .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), | ||
| 522 | .cra_alignmask = 3, | ||
| 523 | .cra_module = THIS_MODULE, | ||
| 524 | .cra_init = bfin_crypto_crc_cra_init, | ||
| 525 | .cra_exit = bfin_crypto_crc_cra_exit, | ||
| 526 | } | ||
| 527 | }; | ||
| 528 | |||
| 529 | static void bfin_crypto_crc_done_task(unsigned long data) | ||
| 530 | { | ||
| 531 | struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data; | ||
| 532 | |||
| 533 | bfin_crypto_crc_handle_queue(crc, NULL); | ||
| 534 | } | ||
| 535 | |||
| 536 | static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id) | ||
| 537 | { | ||
| 538 | struct bfin_crypto_crc *crc = dev_id; | ||
| 539 | |||
| 540 | if (crc->regs->status & DCNTEXP) { | ||
| 541 | crc->regs->status = DCNTEXP; | ||
| 542 | SSYNC(); | ||
| 543 | |||
| 544 | /* prepare results */ | ||
| 545 | put_unaligned_le32(crc->regs->result, crc->req->result); | ||
| 546 | |||
| 547 | crc->regs->control &= ~BLKEN; | ||
| 548 | crc->busy = 0; | ||
| 549 | |||
| 550 | if (crc->req->base.complete) | ||
| 551 | crc->req->base.complete(&crc->req->base, 0); | ||
| 552 | |||
| 553 | tasklet_schedule(&crc->done_task); | ||
| 554 | |||
| 555 | return IRQ_HANDLED; | ||
| 556 | } else | ||
| 557 | return IRQ_NONE; | ||
| 558 | } | ||
| 559 | |||
| 560 | #ifdef CONFIG_PM | ||
| 561 | /** | ||
| 562 | * bfin_crypto_crc_suspend - suspend crc device | ||
| 563 | * @pdev: device being suspended | ||
| 564 | * @state: requested suspend state | ||
| 565 | */ | ||
| 566 | static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state) | ||
| 567 | { | ||
| 568 | struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); | ||
| 569 | int i = 100000; | ||
| 570 | |||
| 571 | while ((crc->regs->control & BLKEN) && --i) | ||
| 572 | cpu_relax(); | ||
| 573 | |||
| 574 | if (i == 0) | ||
| 575 | return -EBUSY; | ||
| 576 | |||
| 577 | return 0; | ||
| 578 | } | ||
| 579 | #else | ||
| 580 | # define bfin_crypto_crc_suspend NULL | ||
| 581 | #endif | ||
| 582 | |||
| 583 | #define bfin_crypto_crc_resume NULL | ||
| 584 | |||
| 585 | /** | ||
| 586 | * bfin_crypto_crc_probe - Initialize module | ||
| 587 | * | ||
| 588 | */ | ||
| 589 | static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev) | ||
| 590 | { | ||
| 591 | struct device *dev = &pdev->dev; | ||
| 592 | struct resource *res; | ||
| 593 | struct bfin_crypto_crc *crc; | ||
| 594 | unsigned int timeout = 100000; | ||
| 595 | int ret; | ||
| 596 | |||
| 597 | crc = kzalloc(sizeof(*crc), GFP_KERNEL); | ||
| 598 | if (!crc) { | ||
| 599 | dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n"); | ||
| 600 | return -ENOMEM; | ||
| 601 | } | ||
| 602 | |||
| 603 | crc->dev = dev; | ||
| 604 | |||
| 605 | INIT_LIST_HEAD(&crc->list); | ||
| 606 | spin_lock_init(&crc->lock); | ||
| 607 | tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc); | ||
| 608 | crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH); | ||
| 609 | |||
| 610 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 611 | if (res == NULL) { | ||
| 612 | dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); | ||
| 613 | ret = -ENOENT; | ||
| 614 | goto out_error_free_mem; | ||
| 615 | } | ||
| 616 | |||
| 617 | crc->regs = ioremap(res->start, resource_size(res)); | ||
| 618 | if (!crc->regs) { | ||
| 619 | dev_err(&pdev->dev, "Cannot map CRC IO\n"); | ||
| 620 | ret = -ENXIO; | ||
| 621 | goto out_error_free_mem; | ||
| 622 | } | ||
| 623 | |||
| 624 | crc->irq = platform_get_irq(pdev, 0); | ||
| 625 | if (crc->irq < 0) { | ||
| 626 | dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n"); | ||
| 627 | ret = -ENOENT; | ||
| 628 | goto out_error_unmap; | ||
| 629 | } | ||
| 630 | |||
| 631 | ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc); | ||
| 632 | if (ret) { | ||
| 633 | dev_err(&pdev->dev, "Unable to request blackfin crc irq\n"); | ||
| 634 | goto out_error_unmap; | ||
| 635 | } | ||
| 636 | |||
| 637 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
| 638 | if (res == NULL) { | ||
| 639 | dev_err(&pdev->dev, "No CRC DMA channel specified\n"); | ||
| 640 | ret = -ENOENT; | ||
| 641 | goto out_error_irq; | ||
| 642 | } | ||
| 643 | crc->dma_ch = res->start; | ||
| 644 | |||
| 645 | ret = request_dma(crc->dma_ch, dev_name(dev)); | ||
| 646 | if (ret) { | ||
| 647 | dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n"); | ||
| 648 | goto out_error_irq; | ||
| 649 | } | ||
| 650 | |||
| 651 | crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL); | ||
| 652 | if (crc->sg_cpu == NULL) { | ||
| 653 | ret = -ENOMEM; | ||
| 654 | goto out_error_dma; | ||
| 655 | } | ||
| 656 | /* | ||
| 657 | * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle + | ||
| 658 | * 1 last + 1 next dma descriptors | ||
| 659 | */ | ||
| 660 | crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1)); | ||
| 661 | |||
| 662 | crc->regs->control = 0; | ||
| 663 | SSYNC(); | ||
| 664 | crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data; | ||
| 665 | SSYNC(); | ||
| 666 | |||
| 667 | while (!(crc->regs->status & LUTDONE) && (--timeout) > 0) | ||
| 668 | cpu_relax(); | ||
| 669 | |||
| 670 | if (timeout == 0) | ||
| 671 | dev_info(&pdev->dev, "init crc poly timeout\n"); | ||
| 672 | |||
| 673 | spin_lock(&crc_list.lock); | ||
| 674 | list_add(&crc->list, &crc_list.dev_list); | ||
| 675 | spin_unlock(&crc_list.lock); | ||
| 676 | |||
| 677 | platform_set_drvdata(pdev, crc); | ||
| 678 | |||
| 679 | ret = crypto_register_ahash(&algs); | ||
| 680 | if (ret) { | ||
| 681 | spin_lock(&crc_list.lock); | ||
| 682 | list_del(&crc->list); | ||
| 683 | spin_unlock(&crc_list.lock); | ||
| 684 | dev_err(&pdev->dev, "Cann't register crypto ahash device\n"); | ||
| 685 | goto out_error_dma; | ||
| 686 | } | ||
| 687 | |||
| 688 | dev_info(&pdev->dev, "initialized\n"); | ||
| 689 | |||
| 690 | return 0; | ||
| 691 | |||
| 692 | out_error_dma: | ||
| 693 | if (crc->sg_cpu) | ||
| 694 | dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma); | ||
| 695 | free_dma(crc->dma_ch); | ||
| 696 | out_error_irq: | ||
| 697 | free_irq(crc->irq, crc->dev); | ||
| 698 | out_error_unmap: | ||
| 699 | iounmap((void *)crc->regs); | ||
| 700 | out_error_free_mem: | ||
| 701 | kfree(crc); | ||
| 702 | |||
| 703 | return ret; | ||
| 704 | } | ||
| 705 | |||
| 706 | /** | ||
| 707 | * bfin_crypto_crc_remove - Initialize module | ||
| 708 | * | ||
| 709 | */ | ||
| 710 | static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev) | ||
| 711 | { | ||
| 712 | struct bfin_crypto_crc *crc = platform_get_drvdata(pdev); | ||
| 713 | |||
| 714 | if (!crc) | ||
| 715 | return -ENODEV; | ||
| 716 | |||
| 717 | spin_lock(&crc_list.lock); | ||
| 718 | list_del(&crc->list); | ||
| 719 | spin_unlock(&crc_list.lock); | ||
| 720 | |||
| 721 | crypto_unregister_ahash(&algs); | ||
| 722 | tasklet_kill(&crc->done_task); | ||
| 723 | iounmap((void *)crc->regs); | ||
| 724 | free_dma(crc->dma_ch); | ||
| 725 | if (crc->irq > 0) | ||
| 726 | free_irq(crc->irq, crc->dev); | ||
| 727 | kfree(crc); | ||
| 728 | |||
| 729 | return 0; | ||
| 730 | } | ||
| 731 | |||
| 732 | static struct platform_driver bfin_crypto_crc_driver = { | ||
| 733 | .probe = bfin_crypto_crc_probe, | ||
| 734 | .remove = __devexit_p(bfin_crypto_crc_remove), | ||
| 735 | .suspend = bfin_crypto_crc_suspend, | ||
| 736 | .resume = bfin_crypto_crc_resume, | ||
| 737 | .driver = { | ||
| 738 | .name = DRIVER_NAME, | ||
| 739 | .owner = THIS_MODULE, | ||
| 740 | }, | ||
| 741 | }; | ||
| 742 | |||
| 743 | /** | ||
| 744 | * bfin_crypto_crc_mod_init - Initialize module | ||
| 745 | * | ||
| 746 | * Checks the module params and registers the platform driver. | ||
| 747 | * Real work is in the platform probe function. | ||
| 748 | */ | ||
| 749 | static int __init bfin_crypto_crc_mod_init(void) | ||
| 750 | { | ||
| 751 | int ret; | ||
| 752 | |||
| 753 | pr_info("Blackfin hardware CRC crypto driver\n"); | ||
| 754 | |||
| 755 | INIT_LIST_HEAD(&crc_list.dev_list); | ||
| 756 | spin_lock_init(&crc_list.lock); | ||
| 757 | |||
| 758 | ret = platform_driver_register(&bfin_crypto_crc_driver); | ||
| 759 | if (ret) { | ||
| 760 | pr_info(KERN_ERR "unable to register driver\n"); | ||
| 761 | return ret; | ||
| 762 | } | ||
| 763 | |||
| 764 | return 0; | ||
| 765 | } | ||
| 766 | |||
| 767 | /** | ||
| 768 | * bfin_crypto_crc_mod_exit - Deinitialize module | ||
| 769 | */ | ||
| 770 | static void __exit bfin_crypto_crc_mod_exit(void) | ||
| 771 | { | ||
| 772 | platform_driver_unregister(&bfin_crypto_crc_driver); | ||
| 773 | } | ||
| 774 | |||
| 775 | module_init(bfin_crypto_crc_mod_init); | ||
| 776 | module_exit(bfin_crypto_crc_mod_exit); | ||
| 777 | |||
| 778 | MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); | ||
| 779 | MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver"); | ||
| 780 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 2d876bb98ff4..65c7668614ab 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
| @@ -32,10 +32,13 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE | |||
| 32 | config CRYPTO_DEV_FSL_CAAM_INTC | 32 | config CRYPTO_DEV_FSL_CAAM_INTC |
| 33 | bool "Job Ring interrupt coalescing" | 33 | bool "Job Ring interrupt coalescing" |
| 34 | depends on CRYPTO_DEV_FSL_CAAM | 34 | depends on CRYPTO_DEV_FSL_CAAM |
| 35 | default y | 35 | default n |
| 36 | help | 36 | help |
| 37 | Enable the Job Ring's interrupt coalescing feature. | 37 | Enable the Job Ring's interrupt coalescing feature. |
| 38 | 38 | ||
| 39 | Note: the driver already provides adequate | ||
| 40 | interrupt coalescing in software. | ||
| 41 | |||
| 39 | config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD | 42 | config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD |
| 40 | int "Job Ring interrupt coalescing count threshold" | 43 | int "Job Ring interrupt coalescing count threshold" |
| 41 | depends on CRYPTO_DEV_FSL_CAAM_INTC | 44 | depends on CRYPTO_DEV_FSL_CAAM_INTC |
| @@ -70,3 +73,28 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
| 70 | 73 | ||
| 71 | To compile this as a module, choose M here: the module | 74 | To compile this as a module, choose M here: the module |
| 72 | will be called caamalg. | 75 | will be called caamalg. |
| 76 | |||
| 77 | config CRYPTO_DEV_FSL_CAAM_AHASH_API | ||
| 78 | tristate "Register hash algorithm implementations with Crypto API" | ||
| 79 | depends on CRYPTO_DEV_FSL_CAAM | ||
| 80 | default y | ||
| 81 | select CRYPTO_AHASH | ||
| 82 | help | ||
| 83 | Selecting this will offload ahash for users of the | ||
| 84 | scatterlist crypto API to the SEC4 via job ring. | ||
| 85 | |||
| 86 | To compile this as a module, choose M here: the module | ||
| 87 | will be called caamhash. | ||
| 88 | |||
| 89 | config CRYPTO_DEV_FSL_CAAM_RNG_API | ||
| 90 | tristate "Register caam device for hwrng API" | ||
| 91 | depends on CRYPTO_DEV_FSL_CAAM | ||
| 92 | default y | ||
| 93 | select CRYPTO_RNG | ||
| 94 | select HW_RANDOM | ||
| 95 | help | ||
| 96 | Selecting this will register the SEC4 hardware rng to | ||
| 97 | the hw_random API for suppying the kernel entropy pool. | ||
| 98 | |||
| 99 | To compile this as a module, choose M here: the module | ||
| 100 | will be called caamrng. | ||
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index ef39011b4505..b1eb44838db5 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
| @@ -4,5 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | 5 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
| 6 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | 6 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
| 7 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | ||
| 8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | ||
| 7 | 9 | ||
| 8 | caam-objs := ctrl.o jr.o error.o | 10 | caam-objs := ctrl.o jr.o error.o key_gen.o |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 4eec389184d3..0c1ea8492eff 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -37,9 +37,10 @@ | |||
| 37 | * | ShareDesc Pointer | | 37 | * | ShareDesc Pointer | |
| 38 | * | SEQ_OUT_PTR | | 38 | * | SEQ_OUT_PTR | |
| 39 | * | (output buffer) | | 39 | * | (output buffer) | |
| 40 | * | (output length) | | ||
| 40 | * | SEQ_IN_PTR | | 41 | * | SEQ_IN_PTR | |
| 41 | * | (input buffer) | | 42 | * | (input buffer) | |
| 42 | * | LOAD (to DECO) | | 43 | * | (input length) | |
| 43 | * --------------------- | 44 | * --------------------- |
| 44 | */ | 45 | */ |
| 45 | 46 | ||
| @@ -50,6 +51,8 @@ | |||
| 50 | #include "desc_constr.h" | 51 | #include "desc_constr.h" |
| 51 | #include "jr.h" | 52 | #include "jr.h" |
| 52 | #include "error.h" | 53 | #include "error.h" |
| 54 | #include "sg_sw_sec4.h" | ||
| 55 | #include "key_gen.h" | ||
| 53 | 56 | ||
| 54 | /* | 57 | /* |
| 55 | * crypto alg | 58 | * crypto alg |
| @@ -62,7 +65,7 @@ | |||
| 62 | #define CAAM_MAX_IV_LENGTH 16 | 65 | #define CAAM_MAX_IV_LENGTH 16 |
| 63 | 66 | ||
| 64 | /* length of descriptors text */ | 67 | /* length of descriptors text */ |
| 65 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) | 68 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) |
| 66 | 69 | ||
| 67 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | 70 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
| 68 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) | 71 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) |
| @@ -143,11 +146,11 @@ static inline void aead_append_ld_iv(u32 *desc, int ivsize) | |||
| 143 | */ | 146 | */ |
| 144 | static inline void ablkcipher_append_src_dst(u32 *desc) | 147 | static inline void ablkcipher_append_src_dst(u32 *desc) |
| 145 | { | 148 | { |
| 146 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ | 149 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
| 147 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ | 150 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
| 148 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \ | 151 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | |
| 149 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \ | 152 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); |
| 150 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \ | 153 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); |
| 151 | } | 154 | } |
| 152 | 155 | ||
| 153 | /* | 156 | /* |
| @@ -452,121 +455,12 @@ static int aead_setauthsize(struct crypto_aead *authenc, | |||
| 452 | return 0; | 455 | return 0; |
| 453 | } | 456 | } |
| 454 | 457 | ||
| 455 | struct split_key_result { | 458 | static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, |
| 456 | struct completion completion; | 459 | u32 authkeylen) |
| 457 | int err; | ||
| 458 | }; | ||
| 459 | |||
| 460 | static void split_key_done(struct device *dev, u32 *desc, u32 err, | ||
| 461 | void *context) | ||
| 462 | { | 460 | { |
| 463 | struct split_key_result *res = context; | 461 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, |
| 464 | 462 | ctx->split_key_pad_len, key_in, authkeylen, | |
| 465 | #ifdef DEBUG | 463 | ctx->alg_op); |
| 466 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 467 | #endif | ||
| 468 | |||
| 469 | if (err) { | ||
| 470 | char tmp[CAAM_ERROR_STR_MAX]; | ||
| 471 | |||
| 472 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
| 473 | } | ||
| 474 | |||
| 475 | res->err = err; | ||
| 476 | |||
| 477 | complete(&res->completion); | ||
| 478 | } | ||
| 479 | |||
| 480 | /* | ||
| 481 | get a split ipad/opad key | ||
| 482 | |||
| 483 | Split key generation----------------------------------------------- | ||
| 484 | |||
| 485 | [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 | ||
| 486 | [01] 0x04000014 key: class2->keyreg len=20 | ||
| 487 | @0xffe01000 | ||
| 488 | [03] 0x84410014 operation: cls2-op sha1 hmac init dec | ||
| 489 | [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm | ||
| 490 | [05] 0xa4000001 jump: class2 local all ->1 [06] | ||
| 491 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | ||
| 492 | @0xffe04000 | ||
| 493 | */ | ||
| 494 | static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | ||
| 495 | { | ||
| 496 | struct device *jrdev = ctx->jrdev; | ||
| 497 | u32 *desc; | ||
| 498 | struct split_key_result result; | ||
| 499 | dma_addr_t dma_addr_in, dma_addr_out; | ||
| 500 | int ret = 0; | ||
| 501 | |||
| 502 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | ||
| 503 | |||
| 504 | init_job_desc(desc, 0); | ||
| 505 | |||
| 506 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, | ||
| 507 | DMA_TO_DEVICE); | ||
| 508 | if (dma_mapping_error(jrdev, dma_addr_in)) { | ||
| 509 | dev_err(jrdev, "unable to map key input memory\n"); | ||
| 510 | kfree(desc); | ||
| 511 | return -ENOMEM; | ||
| 512 | } | ||
| 513 | append_key(desc, dma_addr_in, authkeylen, CLASS_2 | | ||
| 514 | KEY_DEST_CLASS_REG); | ||
| 515 | |||
| 516 | /* Sets MDHA up into an HMAC-INIT */ | ||
| 517 | append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | | ||
| 518 | OP_ALG_AS_INIT); | ||
| 519 | |||
| 520 | /* | ||
| 521 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | ||
| 522 | into both pads inside MDHA | ||
| 523 | */ | ||
| 524 | append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | | ||
| 525 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); | ||
| 526 | |||
| 527 | /* | ||
| 528 | * FIFO_STORE with the explicit split-key content store | ||
| 529 | * (0x26 output type) | ||
| 530 | */ | ||
| 531 | dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | ||
| 532 | DMA_FROM_DEVICE); | ||
| 533 | if (dma_mapping_error(jrdev, dma_addr_out)) { | ||
| 534 | dev_err(jrdev, "unable to map key output memory\n"); | ||
| 535 | kfree(desc); | ||
| 536 | return -ENOMEM; | ||
| 537 | } | ||
| 538 | append_fifo_store(desc, dma_addr_out, ctx->split_key_len, | ||
| 539 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | ||
| 540 | |||
| 541 | #ifdef DEBUG | ||
| 542 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
| 543 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); | ||
| 544 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 545 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 546 | #endif | ||
| 547 | |||
| 548 | result.err = 0; | ||
| 549 | init_completion(&result.completion); | ||
| 550 | |||
| 551 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | ||
| 552 | if (!ret) { | ||
| 553 | /* in progress */ | ||
| 554 | wait_for_completion_interruptible(&result.completion); | ||
| 555 | ret = result.err; | ||
| 556 | #ifdef DEBUG | ||
| 557 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
| 558 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | ||
| 559 | ctx->split_key_pad_len, 1); | ||
| 560 | #endif | ||
| 561 | } | ||
| 562 | |||
| 563 | dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, | ||
| 564 | DMA_FROM_DEVICE); | ||
| 565 | dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); | ||
| 566 | |||
| 567 | kfree(desc); | ||
| 568 | |||
| 569 | return ret; | ||
| 570 | } | 464 | } |
| 571 | 465 | ||
| 572 | static int aead_setkey(struct crypto_aead *aead, | 466 | static int aead_setkey(struct crypto_aead *aead, |
| @@ -610,7 +504,7 @@ static int aead_setkey(struct crypto_aead *aead, | |||
| 610 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 504 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 611 | #endif | 505 | #endif |
| 612 | 506 | ||
| 613 | ret = gen_split_key(ctx, key, authkeylen); | 507 | ret = gen_split_aead_key(ctx, key, authkeylen); |
| 614 | if (ret) { | 508 | if (ret) { |
| 615 | goto badkey; | 509 | goto badkey; |
| 616 | } | 510 | } |
| @@ -757,72 +651,78 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 757 | return ret; | 651 | return ret; |
| 758 | } | 652 | } |
| 759 | 653 | ||
| 760 | struct link_tbl_entry { | ||
| 761 | u64 ptr; | ||
| 762 | u32 len; | ||
| 763 | u8 reserved; | ||
| 764 | u8 buf_pool_id; | ||
| 765 | u16 offset; | ||
| 766 | }; | ||
| 767 | |||
| 768 | /* | 654 | /* |
| 769 | * aead_edesc - s/w-extended aead descriptor | 655 | * aead_edesc - s/w-extended aead descriptor |
| 770 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | 656 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist |
| 657 | * @assoc_chained: if source is chained | ||
| 771 | * @src_nents: number of segments in input scatterlist | 658 | * @src_nents: number of segments in input scatterlist |
| 659 | * @src_chained: if source is chained | ||
| 772 | * @dst_nents: number of segments in output scatterlist | 660 | * @dst_nents: number of segments in output scatterlist |
| 661 | * @dst_chained: if destination is chained | ||
| 773 | * @iv_dma: dma address of iv for checking continuity and link table | 662 | * @iv_dma: dma address of iv for checking continuity and link table |
| 774 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | 663 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) |
| 775 | * @link_tbl_bytes: length of dma mapped link_tbl space | 664 | * @sec4_sg_bytes: length of dma mapped sec4_sg space |
| 776 | * @link_tbl_dma: bus physical mapped address of h/w link table | 665 | * @sec4_sg_dma: bus physical mapped address of h/w link table |
| 777 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 666 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
| 778 | */ | 667 | */ |
| 779 | struct aead_edesc { | 668 | struct aead_edesc { |
| 780 | int assoc_nents; | 669 | int assoc_nents; |
| 670 | bool assoc_chained; | ||
| 781 | int src_nents; | 671 | int src_nents; |
| 672 | bool src_chained; | ||
| 782 | int dst_nents; | 673 | int dst_nents; |
| 674 | bool dst_chained; | ||
| 783 | dma_addr_t iv_dma; | 675 | dma_addr_t iv_dma; |
| 784 | int link_tbl_bytes; | 676 | int sec4_sg_bytes; |
| 785 | dma_addr_t link_tbl_dma; | 677 | dma_addr_t sec4_sg_dma; |
| 786 | struct link_tbl_entry *link_tbl; | 678 | struct sec4_sg_entry *sec4_sg; |
| 787 | u32 hw_desc[0]; | 679 | u32 hw_desc[0]; |
| 788 | }; | 680 | }; |
| 789 | 681 | ||
| 790 | /* | 682 | /* |
| 791 | * ablkcipher_edesc - s/w-extended ablkcipher descriptor | 683 | * ablkcipher_edesc - s/w-extended ablkcipher descriptor |
| 792 | * @src_nents: number of segments in input scatterlist | 684 | * @src_nents: number of segments in input scatterlist |
| 685 | * @src_chained: if source is chained | ||
| 793 | * @dst_nents: number of segments in output scatterlist | 686 | * @dst_nents: number of segments in output scatterlist |
| 687 | * @dst_chained: if destination is chained | ||
| 794 | * @iv_dma: dma address of iv for checking continuity and link table | 688 | * @iv_dma: dma address of iv for checking continuity and link table |
| 795 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | 689 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) |
| 796 | * @link_tbl_bytes: length of dma mapped link_tbl space | 690 | * @sec4_sg_bytes: length of dma mapped sec4_sg space |
| 797 | * @link_tbl_dma: bus physical mapped address of h/w link table | 691 | * @sec4_sg_dma: bus physical mapped address of h/w link table |
| 798 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 692 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
| 799 | */ | 693 | */ |
| 800 | struct ablkcipher_edesc { | 694 | struct ablkcipher_edesc { |
| 801 | int src_nents; | 695 | int src_nents; |
| 696 | bool src_chained; | ||
| 802 | int dst_nents; | 697 | int dst_nents; |
| 698 | bool dst_chained; | ||
| 803 | dma_addr_t iv_dma; | 699 | dma_addr_t iv_dma; |
| 804 | int link_tbl_bytes; | 700 | int sec4_sg_bytes; |
| 805 | dma_addr_t link_tbl_dma; | 701 | dma_addr_t sec4_sg_dma; |
| 806 | struct link_tbl_entry *link_tbl; | 702 | struct sec4_sg_entry *sec4_sg; |
| 807 | u32 hw_desc[0]; | 703 | u32 hw_desc[0]; |
| 808 | }; | 704 | }; |
| 809 | 705 | ||
| 810 | static void caam_unmap(struct device *dev, struct scatterlist *src, | 706 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
| 811 | struct scatterlist *dst, int src_nents, int dst_nents, | 707 | struct scatterlist *dst, int src_nents, |
| 812 | dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, | 708 | bool src_chained, int dst_nents, bool dst_chained, |
| 813 | int link_tbl_bytes) | 709 | dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, |
| 710 | int sec4_sg_bytes) | ||
| 814 | { | 711 | { |
| 815 | if (unlikely(dst != src)) { | 712 | if (dst != src) { |
| 816 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | 713 | dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, |
| 817 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); | 714 | src_chained); |
| 715 | dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, | ||
| 716 | dst_chained); | ||
| 818 | } else { | 717 | } else { |
| 819 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); | 718 | dma_unmap_sg_chained(dev, src, src_nents ? : 1, |
| 719 | DMA_BIDIRECTIONAL, src_chained); | ||
| 820 | } | 720 | } |
| 821 | 721 | ||
| 822 | if (iv_dma) | 722 | if (iv_dma) |
| 823 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 723 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
| 824 | if (link_tbl_bytes) | 724 | if (sec4_sg_bytes) |
| 825 | dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, | 725 | dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, |
| 826 | DMA_TO_DEVICE); | 726 | DMA_TO_DEVICE); |
| 827 | } | 727 | } |
| 828 | 728 | ||
| @@ -833,12 +733,13 @@ static void aead_unmap(struct device *dev, | |||
| 833 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 733 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 834 | int ivsize = crypto_aead_ivsize(aead); | 734 | int ivsize = crypto_aead_ivsize(aead); |
| 835 | 735 | ||
| 836 | dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | 736 | dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, |
| 737 | DMA_TO_DEVICE, edesc->assoc_chained); | ||
| 837 | 738 | ||
| 838 | caam_unmap(dev, req->src, req->dst, | 739 | caam_unmap(dev, req->src, req->dst, |
| 839 | edesc->src_nents, edesc->dst_nents, | 740 | edesc->src_nents, edesc->src_chained, edesc->dst_nents, |
| 840 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | 741 | edesc->dst_chained, edesc->iv_dma, ivsize, |
| 841 | edesc->link_tbl_bytes); | 742 | edesc->sec4_sg_dma, edesc->sec4_sg_bytes); |
| 842 | } | 743 | } |
| 843 | 744 | ||
| 844 | static void ablkcipher_unmap(struct device *dev, | 745 | static void ablkcipher_unmap(struct device *dev, |
| @@ -849,9 +750,9 @@ static void ablkcipher_unmap(struct device *dev, | |||
| 849 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 750 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
| 850 | 751 | ||
| 851 | caam_unmap(dev, req->src, req->dst, | 752 | caam_unmap(dev, req->src, req->dst, |
| 852 | edesc->src_nents, edesc->dst_nents, | 753 | edesc->src_nents, edesc->src_chained, edesc->dst_nents, |
| 853 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | 754 | edesc->dst_chained, edesc->iv_dma, ivsize, |
| 854 | edesc->link_tbl_bytes); | 755 | edesc->sec4_sg_dma, edesc->sec4_sg_bytes); |
| 855 | } | 756 | } |
| 856 | 757 | ||
| 857 | static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | 758 | static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, |
| @@ -942,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 942 | sizeof(struct iphdr) + req->assoclen + | 843 | sizeof(struct iphdr) + req->assoclen + |
| 943 | ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + | 844 | ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + |
| 944 | ctx->authsize + 36, 1); | 845 | ctx->authsize + 36, 1); |
| 945 | if (!err && edesc->link_tbl_bytes) { | 846 | if (!err && edesc->sec4_sg_bytes) { |
| 946 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); | 847 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); |
| 947 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | 848 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", |
| 948 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | 849 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), |
| @@ -1026,50 +927,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 1026 | ablkcipher_request_complete(req, err); | 927 | ablkcipher_request_complete(req, err); |
| 1027 | } | 928 | } |
| 1028 | 929 | ||
| 1029 | static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, | ||
| 1030 | dma_addr_t dma, u32 len, u32 offset) | ||
| 1031 | { | ||
| 1032 | link_tbl_ptr->ptr = dma; | ||
| 1033 | link_tbl_ptr->len = len; | ||
| 1034 | link_tbl_ptr->reserved = 0; | ||
| 1035 | link_tbl_ptr->buf_pool_id = 0; | ||
| 1036 | link_tbl_ptr->offset = offset; | ||
| 1037 | #ifdef DEBUG | ||
| 1038 | print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", | ||
| 1039 | DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, | ||
| 1040 | sizeof(struct link_tbl_entry), 1); | ||
| 1041 | #endif | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | /* | ||
| 1045 | * convert scatterlist to h/w link table format | ||
| 1046 | * but does not have final bit; instead, returns last entry | ||
| 1047 | */ | ||
| 1048 | static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, | ||
| 1049 | int sg_count, struct link_tbl_entry | ||
| 1050 | *link_tbl_ptr, u32 offset) | ||
| 1051 | { | ||
| 1052 | while (sg_count) { | ||
| 1053 | sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), | ||
| 1054 | sg_dma_len(sg), offset); | ||
| 1055 | link_tbl_ptr++; | ||
| 1056 | sg = sg_next(sg); | ||
| 1057 | sg_count--; | ||
| 1058 | } | ||
| 1059 | return link_tbl_ptr - 1; | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | /* | ||
| 1063 | * convert scatterlist to h/w link table format | ||
| 1064 | * scatterlist must have been previously dma mapped | ||
| 1065 | */ | ||
| 1066 | static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, | ||
| 1067 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | ||
| 1068 | { | ||
| 1069 | link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); | ||
| 1070 | link_tbl_ptr->len |= 0x40000000; | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | /* | 930 | /* |
| 1074 | * Fill in aead job descriptor | 931 | * Fill in aead job descriptor |
| 1075 | */ | 932 | */ |
| @@ -1085,7 +942,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1085 | u32 *desc = edesc->hw_desc; | 942 | u32 *desc = edesc->hw_desc; |
| 1086 | u32 out_options = 0, in_options; | 943 | u32 out_options = 0, in_options; |
| 1087 | dma_addr_t dst_dma, src_dma; | 944 | dma_addr_t dst_dma, src_dma; |
| 1088 | int len, link_tbl_index = 0; | 945 | int len, sec4_sg_index = 0; |
| 1089 | 946 | ||
| 1090 | #ifdef DEBUG | 947 | #ifdef DEBUG |
| 1091 | debug("assoclen %d cryptlen %d authsize %d\n", | 948 | debug("assoclen %d cryptlen %d authsize %d\n", |
| @@ -1111,9 +968,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1111 | src_dma = sg_dma_address(req->assoc); | 968 | src_dma = sg_dma_address(req->assoc); |
| 1112 | in_options = 0; | 969 | in_options = 0; |
| 1113 | } else { | 970 | } else { |
| 1114 | src_dma = edesc->link_tbl_dma; | 971 | src_dma = edesc->sec4_sg_dma; |
| 1115 | link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + | 972 | sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + |
| 1116 | (edesc->src_nents ? : 1); | 973 | (edesc->src_nents ? : 1); |
| 1117 | in_options = LDST_SGF; | 974 | in_options = LDST_SGF; |
| 1118 | } | 975 | } |
| 1119 | if (encrypt) | 976 | if (encrypt) |
| @@ -1127,7 +984,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1127 | if (all_contig) { | 984 | if (all_contig) { |
| 1128 | dst_dma = sg_dma_address(req->src); | 985 | dst_dma = sg_dma_address(req->src); |
| 1129 | } else { | 986 | } else { |
| 1130 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * | 987 | dst_dma = src_dma + sizeof(struct sec4_sg_entry) * |
| 1131 | ((edesc->assoc_nents ? : 1) + 1); | 988 | ((edesc->assoc_nents ? : 1) + 1); |
| 1132 | out_options = LDST_SGF; | 989 | out_options = LDST_SGF; |
| 1133 | } | 990 | } |
| @@ -1135,9 +992,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1135 | if (!edesc->dst_nents) { | 992 | if (!edesc->dst_nents) { |
| 1136 | dst_dma = sg_dma_address(req->dst); | 993 | dst_dma = sg_dma_address(req->dst); |
| 1137 | } else { | 994 | } else { |
| 1138 | dst_dma = edesc->link_tbl_dma + | 995 | dst_dma = edesc->sec4_sg_dma + |
| 1139 | link_tbl_index * | 996 | sec4_sg_index * |
| 1140 | sizeof(struct link_tbl_entry); | 997 | sizeof(struct sec4_sg_entry); |
| 1141 | out_options = LDST_SGF; | 998 | out_options = LDST_SGF; |
| 1142 | } | 999 | } |
| 1143 | } | 1000 | } |
| @@ -1163,7 +1020,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1163 | u32 *desc = edesc->hw_desc; | 1020 | u32 *desc = edesc->hw_desc; |
| 1164 | u32 out_options = 0, in_options; | 1021 | u32 out_options = 0, in_options; |
| 1165 | dma_addr_t dst_dma, src_dma; | 1022 | dma_addr_t dst_dma, src_dma; |
| 1166 | int len, link_tbl_index = 0; | 1023 | int len, sec4_sg_index = 0; |
| 1167 | 1024 | ||
| 1168 | #ifdef DEBUG | 1025 | #ifdef DEBUG |
| 1169 | debug("assoclen %d cryptlen %d authsize %d\n", | 1026 | debug("assoclen %d cryptlen %d authsize %d\n", |
| @@ -1188,8 +1045,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1188 | src_dma = sg_dma_address(req->assoc); | 1045 | src_dma = sg_dma_address(req->assoc); |
| 1189 | in_options = 0; | 1046 | in_options = 0; |
| 1190 | } else { | 1047 | } else { |
| 1191 | src_dma = edesc->link_tbl_dma; | 1048 | src_dma = edesc->sec4_sg_dma; |
| 1192 | link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; | 1049 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; |
| 1193 | in_options = LDST_SGF; | 1050 | in_options = LDST_SGF; |
| 1194 | } | 1051 | } |
| 1195 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | 1052 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + |
| @@ -1199,13 +1056,13 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1199 | dst_dma = edesc->iv_dma; | 1056 | dst_dma = edesc->iv_dma; |
| 1200 | } else { | 1057 | } else { |
| 1201 | if (likely(req->src == req->dst)) { | 1058 | if (likely(req->src == req->dst)) { |
| 1202 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * | 1059 | dst_dma = src_dma + sizeof(struct sec4_sg_entry) * |
| 1203 | edesc->assoc_nents; | 1060 | edesc->assoc_nents; |
| 1204 | out_options = LDST_SGF; | 1061 | out_options = LDST_SGF; |
| 1205 | } else { | 1062 | } else { |
| 1206 | dst_dma = edesc->link_tbl_dma + | 1063 | dst_dma = edesc->sec4_sg_dma + |
| 1207 | link_tbl_index * | 1064 | sec4_sg_index * |
| 1208 | sizeof(struct link_tbl_entry); | 1065 | sizeof(struct sec4_sg_entry); |
| 1209 | out_options = LDST_SGF; | 1066 | out_options = LDST_SGF; |
| 1210 | } | 1067 | } |
| 1211 | } | 1068 | } |
| @@ -1226,7 +1083,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1226 | u32 *desc = edesc->hw_desc; | 1083 | u32 *desc = edesc->hw_desc; |
| 1227 | u32 out_options = 0, in_options; | 1084 | u32 out_options = 0, in_options; |
| 1228 | dma_addr_t dst_dma, src_dma; | 1085 | dma_addr_t dst_dma, src_dma; |
| 1229 | int len, link_tbl_index = 0; | 1086 | int len, sec4_sg_index = 0; |
| 1230 | 1087 | ||
| 1231 | #ifdef DEBUG | 1088 | #ifdef DEBUG |
| 1232 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | 1089 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", |
| @@ -1244,8 +1101,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1244 | src_dma = edesc->iv_dma; | 1101 | src_dma = edesc->iv_dma; |
| 1245 | in_options = 0; | 1102 | in_options = 0; |
| 1246 | } else { | 1103 | } else { |
| 1247 | src_dma = edesc->link_tbl_dma; | 1104 | src_dma = edesc->sec4_sg_dma; |
| 1248 | link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; | 1105 | sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents; |
| 1249 | in_options = LDST_SGF; | 1106 | in_options = LDST_SGF; |
| 1250 | } | 1107 | } |
| 1251 | append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); | 1108 | append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); |
| @@ -1254,16 +1111,16 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1254 | if (!edesc->src_nents && iv_contig) { | 1111 | if (!edesc->src_nents && iv_contig) { |
| 1255 | dst_dma = sg_dma_address(req->src); | 1112 | dst_dma = sg_dma_address(req->src); |
| 1256 | } else { | 1113 | } else { |
| 1257 | dst_dma = edesc->link_tbl_dma + | 1114 | dst_dma = edesc->sec4_sg_dma + |
| 1258 | sizeof(struct link_tbl_entry); | 1115 | sizeof(struct sec4_sg_entry); |
| 1259 | out_options = LDST_SGF; | 1116 | out_options = LDST_SGF; |
| 1260 | } | 1117 | } |
| 1261 | } else { | 1118 | } else { |
| 1262 | if (!edesc->dst_nents) { | 1119 | if (!edesc->dst_nents) { |
| 1263 | dst_dma = sg_dma_address(req->dst); | 1120 | dst_dma = sg_dma_address(req->dst); |
| 1264 | } else { | 1121 | } else { |
| 1265 | dst_dma = edesc->link_tbl_dma + | 1122 | dst_dma = edesc->sec4_sg_dma + |
| 1266 | link_tbl_index * sizeof(struct link_tbl_entry); | 1123 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
| 1267 | out_options = LDST_SGF; | 1124 | out_options = LDST_SGF; |
| 1268 | } | 1125 | } |
| 1269 | } | 1126 | } |
| @@ -1271,28 +1128,6 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 1271 | } | 1128 | } |
| 1272 | 1129 | ||
| 1273 | /* | 1130 | /* |
| 1274 | * derive number of elements in scatterlist | ||
| 1275 | */ | ||
| 1276 | static int sg_count(struct scatterlist *sg_list, int nbytes) | ||
| 1277 | { | ||
| 1278 | struct scatterlist *sg = sg_list; | ||
| 1279 | int sg_nents = 0; | ||
| 1280 | |||
| 1281 | while (nbytes > 0) { | ||
| 1282 | sg_nents++; | ||
| 1283 | nbytes -= sg->length; | ||
| 1284 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | ||
| 1285 | BUG(); /* Not support chaining */ | ||
| 1286 | sg = scatterwalk_sg_next(sg); | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | if (likely(sg_nents == 1)) | ||
| 1290 | return 0; | ||
| 1291 | |||
| 1292 | return sg_nents; | ||
| 1293 | } | ||
| 1294 | |||
| 1295 | /* | ||
| 1296 | * allocate and map the aead extended descriptor | 1131 | * allocate and map the aead extended descriptor |
| 1297 | */ | 1132 | */ |
| 1298 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | 1133 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
| @@ -1308,25 +1143,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1308 | dma_addr_t iv_dma = 0; | 1143 | dma_addr_t iv_dma = 0; |
| 1309 | int sgc; | 1144 | int sgc; |
| 1310 | bool all_contig = true; | 1145 | bool all_contig = true; |
| 1146 | bool assoc_chained = false, src_chained = false, dst_chained = false; | ||
| 1311 | int ivsize = crypto_aead_ivsize(aead); | 1147 | int ivsize = crypto_aead_ivsize(aead); |
| 1312 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; | 1148 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
| 1313 | 1149 | ||
| 1314 | assoc_nents = sg_count(req->assoc, req->assoclen); | 1150 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); |
| 1315 | src_nents = sg_count(req->src, req->cryptlen); | 1151 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
| 1316 | 1152 | ||
| 1317 | if (unlikely(req->dst != req->src)) | 1153 | if (unlikely(req->dst != req->src)) |
| 1318 | dst_nents = sg_count(req->dst, req->cryptlen); | 1154 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); |
| 1319 | 1155 | ||
| 1320 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, | 1156 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
| 1321 | DMA_BIDIRECTIONAL); | 1157 | DMA_BIDIRECTIONAL, assoc_chained); |
| 1322 | if (likely(req->src == req->dst)) { | 1158 | if (likely(req->src == req->dst)) { |
| 1323 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1159 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
| 1324 | DMA_BIDIRECTIONAL); | 1160 | DMA_BIDIRECTIONAL, src_chained); |
| 1325 | } else { | 1161 | } else { |
| 1326 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1162 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
| 1327 | DMA_TO_DEVICE); | 1163 | DMA_TO_DEVICE, src_chained); |
| 1328 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | 1164 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, |
| 1329 | DMA_FROM_DEVICE); | 1165 | DMA_FROM_DEVICE, dst_chained); |
| 1330 | } | 1166 | } |
| 1331 | 1167 | ||
| 1332 | /* Check if data are contiguous */ | 1168 | /* Check if data are contiguous */ |
| @@ -1337,50 +1173,53 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1337 | all_contig = false; | 1173 | all_contig = false; |
| 1338 | assoc_nents = assoc_nents ? : 1; | 1174 | assoc_nents = assoc_nents ? : 1; |
| 1339 | src_nents = src_nents ? : 1; | 1175 | src_nents = src_nents ? : 1; |
| 1340 | link_tbl_len = assoc_nents + 1 + src_nents; | 1176 | sec4_sg_len = assoc_nents + 1 + src_nents; |
| 1341 | } | 1177 | } |
| 1342 | link_tbl_len += dst_nents; | 1178 | sec4_sg_len += dst_nents; |
| 1343 | 1179 | ||
| 1344 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | 1180 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
| 1345 | 1181 | ||
| 1346 | /* allocate space for base edesc and hw desc commands, link tables */ | 1182 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1347 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | 1183 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + |
| 1348 | link_tbl_bytes, GFP_DMA | flags); | 1184 | sec4_sg_bytes, GFP_DMA | flags); |
| 1349 | if (!edesc) { | 1185 | if (!edesc) { |
| 1350 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1186 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1351 | return ERR_PTR(-ENOMEM); | 1187 | return ERR_PTR(-ENOMEM); |
| 1352 | } | 1188 | } |
| 1353 | 1189 | ||
| 1354 | edesc->assoc_nents = assoc_nents; | 1190 | edesc->assoc_nents = assoc_nents; |
| 1191 | edesc->assoc_chained = assoc_chained; | ||
| 1355 | edesc->src_nents = src_nents; | 1192 | edesc->src_nents = src_nents; |
| 1193 | edesc->src_chained = src_chained; | ||
| 1356 | edesc->dst_nents = dst_nents; | 1194 | edesc->dst_nents = dst_nents; |
| 1195 | edesc->dst_chained = dst_chained; | ||
| 1357 | edesc->iv_dma = iv_dma; | 1196 | edesc->iv_dma = iv_dma; |
| 1358 | edesc->link_tbl_bytes = link_tbl_bytes; | 1197 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
| 1359 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | 1198 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
| 1360 | desc_bytes; | 1199 | desc_bytes; |
| 1361 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1200 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
| 1362 | link_tbl_bytes, DMA_TO_DEVICE); | 1201 | sec4_sg_bytes, DMA_TO_DEVICE); |
| 1363 | *all_contig_ptr = all_contig; | 1202 | *all_contig_ptr = all_contig; |
| 1364 | 1203 | ||
| 1365 | link_tbl_index = 0; | 1204 | sec4_sg_index = 0; |
| 1366 | if (!all_contig) { | 1205 | if (!all_contig) { |
| 1367 | sg_to_link_tbl(req->assoc, | 1206 | sg_to_sec4_sg(req->assoc, |
| 1368 | (assoc_nents ? : 1), | 1207 | (assoc_nents ? : 1), |
| 1369 | edesc->link_tbl + | 1208 | edesc->sec4_sg + |
| 1370 | link_tbl_index, 0); | 1209 | sec4_sg_index, 0); |
| 1371 | link_tbl_index += assoc_nents ? : 1; | 1210 | sec4_sg_index += assoc_nents ? : 1; |
| 1372 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | 1211 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, |
| 1373 | iv_dma, ivsize, 0); | 1212 | iv_dma, ivsize, 0); |
| 1374 | link_tbl_index += 1; | 1213 | sec4_sg_index += 1; |
| 1375 | sg_to_link_tbl_last(req->src, | 1214 | sg_to_sec4_sg_last(req->src, |
| 1376 | (src_nents ? : 1), | 1215 | (src_nents ? : 1), |
| 1377 | edesc->link_tbl + | 1216 | edesc->sec4_sg + |
| 1378 | link_tbl_index, 0); | 1217 | sec4_sg_index, 0); |
| 1379 | link_tbl_index += src_nents ? : 1; | 1218 | sec4_sg_index += src_nents ? : 1; |
| 1380 | } | 1219 | } |
| 1381 | if (dst_nents) { | 1220 | if (dst_nents) { |
| 1382 | sg_to_link_tbl_last(req->dst, dst_nents, | 1221 | sg_to_sec4_sg_last(req->dst, dst_nents, |
| 1383 | edesc->link_tbl + link_tbl_index, 0); | 1222 | edesc->sec4_sg + sec4_sg_index, 0); |
| 1384 | } | 1223 | } |
| 1385 | 1224 | ||
| 1386 | return edesc; | 1225 | return edesc; |
| @@ -1487,24 +1326,25 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
| 1487 | int sgc; | 1326 | int sgc; |
| 1488 | u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; | 1327 | u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; |
| 1489 | int ivsize = crypto_aead_ivsize(aead); | 1328 | int ivsize = crypto_aead_ivsize(aead); |
| 1490 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; | 1329 | bool assoc_chained = false, src_chained = false, dst_chained = false; |
| 1330 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | ||
| 1491 | 1331 | ||
| 1492 | assoc_nents = sg_count(req->assoc, req->assoclen); | 1332 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); |
| 1493 | src_nents = sg_count(req->src, req->cryptlen); | 1333 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
| 1494 | 1334 | ||
| 1495 | if (unlikely(req->dst != req->src)) | 1335 | if (unlikely(req->dst != req->src)) |
| 1496 | dst_nents = sg_count(req->dst, req->cryptlen); | 1336 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); |
| 1497 | 1337 | ||
| 1498 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, | 1338 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
| 1499 | DMA_BIDIRECTIONAL); | 1339 | DMA_BIDIRECTIONAL, assoc_chained); |
| 1500 | if (likely(req->src == req->dst)) { | 1340 | if (likely(req->src == req->dst)) { |
| 1501 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1341 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
| 1502 | DMA_BIDIRECTIONAL); | 1342 | DMA_BIDIRECTIONAL, src_chained); |
| 1503 | } else { | 1343 | } else { |
| 1504 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1344 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
| 1505 | DMA_TO_DEVICE); | 1345 | DMA_TO_DEVICE, src_chained); |
| 1506 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | 1346 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, |
| 1507 | DMA_FROM_DEVICE); | 1347 | DMA_FROM_DEVICE, dst_chained); |
| 1508 | } | 1348 | } |
| 1509 | 1349 | ||
| 1510 | /* Check if data are contiguous */ | 1350 | /* Check if data are contiguous */ |
| @@ -1516,58 +1356,61 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
| 1516 | contig &= ~GIV_DST_CONTIG; | 1356 | contig &= ~GIV_DST_CONTIG; |
| 1517 | if (unlikely(req->src != req->dst)) { | 1357 | if (unlikely(req->src != req->dst)) { |
| 1518 | dst_nents = dst_nents ? : 1; | 1358 | dst_nents = dst_nents ? : 1; |
| 1519 | link_tbl_len += 1; | 1359 | sec4_sg_len += 1; |
| 1520 | } | 1360 | } |
| 1521 | if (!(contig & GIV_SRC_CONTIG)) { | 1361 | if (!(contig & GIV_SRC_CONTIG)) { |
| 1522 | assoc_nents = assoc_nents ? : 1; | 1362 | assoc_nents = assoc_nents ? : 1; |
| 1523 | src_nents = src_nents ? : 1; | 1363 | src_nents = src_nents ? : 1; |
| 1524 | link_tbl_len += assoc_nents + 1 + src_nents; | 1364 | sec4_sg_len += assoc_nents + 1 + src_nents; |
| 1525 | if (likely(req->src == req->dst)) | 1365 | if (likely(req->src == req->dst)) |
| 1526 | contig &= ~GIV_DST_CONTIG; | 1366 | contig &= ~GIV_DST_CONTIG; |
| 1527 | } | 1367 | } |
| 1528 | link_tbl_len += dst_nents; | 1368 | sec4_sg_len += dst_nents; |
| 1529 | 1369 | ||
| 1530 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | 1370 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
| 1531 | 1371 | ||
| 1532 | /* allocate space for base edesc and hw desc commands, link tables */ | 1372 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1533 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | 1373 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + |
| 1534 | link_tbl_bytes, GFP_DMA | flags); | 1374 | sec4_sg_bytes, GFP_DMA | flags); |
| 1535 | if (!edesc) { | 1375 | if (!edesc) { |
| 1536 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1376 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1537 | return ERR_PTR(-ENOMEM); | 1377 | return ERR_PTR(-ENOMEM); |
| 1538 | } | 1378 | } |
| 1539 | 1379 | ||
| 1540 | edesc->assoc_nents = assoc_nents; | 1380 | edesc->assoc_nents = assoc_nents; |
| 1381 | edesc->assoc_chained = assoc_chained; | ||
| 1541 | edesc->src_nents = src_nents; | 1382 | edesc->src_nents = src_nents; |
| 1383 | edesc->src_chained = src_chained; | ||
| 1542 | edesc->dst_nents = dst_nents; | 1384 | edesc->dst_nents = dst_nents; |
| 1385 | edesc->dst_chained = dst_chained; | ||
| 1543 | edesc->iv_dma = iv_dma; | 1386 | edesc->iv_dma = iv_dma; |
| 1544 | edesc->link_tbl_bytes = link_tbl_bytes; | 1387 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
| 1545 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | 1388 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + |
| 1546 | desc_bytes; | 1389 | desc_bytes; |
| 1547 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1390 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
| 1548 | link_tbl_bytes, DMA_TO_DEVICE); | 1391 | sec4_sg_bytes, DMA_TO_DEVICE); |
| 1549 | *contig_ptr = contig; | 1392 | *contig_ptr = contig; |
| 1550 | 1393 | ||
| 1551 | link_tbl_index = 0; | 1394 | sec4_sg_index = 0; |
| 1552 | if (!(contig & GIV_SRC_CONTIG)) { | 1395 | if (!(contig & GIV_SRC_CONTIG)) { |
| 1553 | sg_to_link_tbl(req->assoc, assoc_nents, | 1396 | sg_to_sec4_sg(req->assoc, assoc_nents, |
| 1554 | edesc->link_tbl + | 1397 | edesc->sec4_sg + |
| 1555 | link_tbl_index, 0); | 1398 | sec4_sg_index, 0); |
| 1556 | link_tbl_index += assoc_nents; | 1399 | sec4_sg_index += assoc_nents; |
| 1557 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | 1400 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, |
| 1558 | iv_dma, ivsize, 0); | 1401 | iv_dma, ivsize, 0); |
| 1559 | link_tbl_index += 1; | 1402 | sec4_sg_index += 1; |
| 1560 | sg_to_link_tbl_last(req->src, src_nents, | 1403 | sg_to_sec4_sg_last(req->src, src_nents, |
| 1561 | edesc->link_tbl + | 1404 | edesc->sec4_sg + |
| 1562 | link_tbl_index, 0); | 1405 | sec4_sg_index, 0); |
| 1563 | link_tbl_index += src_nents; | 1406 | sec4_sg_index += src_nents; |
| 1564 | } | 1407 | } |
| 1565 | if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { | 1408 | if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { |
| 1566 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | 1409 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, |
| 1567 | iv_dma, ivsize, 0); | 1410 | iv_dma, ivsize, 0); |
| 1568 | link_tbl_index += 1; | 1411 | sec4_sg_index += 1; |
| 1569 | sg_to_link_tbl_last(req->dst, dst_nents, | 1412 | sg_to_sec4_sg_last(req->dst, dst_nents, |
| 1570 | edesc->link_tbl + link_tbl_index, 0); | 1413 | edesc->sec4_sg + sec4_sg_index, 0); |
| 1571 | } | 1414 | } |
| 1572 | 1415 | ||
| 1573 | return edesc; | 1416 | return edesc; |
| @@ -1633,27 +1476,28 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
| 1633 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1476 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
| 1634 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? | 1477 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? |
| 1635 | GFP_KERNEL : GFP_ATOMIC; | 1478 | GFP_KERNEL : GFP_ATOMIC; |
| 1636 | int src_nents, dst_nents = 0, link_tbl_bytes; | 1479 | int src_nents, dst_nents = 0, sec4_sg_bytes; |
| 1637 | struct ablkcipher_edesc *edesc; | 1480 | struct ablkcipher_edesc *edesc; |
| 1638 | dma_addr_t iv_dma = 0; | 1481 | dma_addr_t iv_dma = 0; |
| 1639 | bool iv_contig = false; | 1482 | bool iv_contig = false; |
| 1640 | int sgc; | 1483 | int sgc; |
| 1641 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | 1484 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
| 1642 | int link_tbl_index; | 1485 | bool src_chained = false, dst_chained = false; |
| 1486 | int sec4_sg_index; | ||
| 1643 | 1487 | ||
| 1644 | src_nents = sg_count(req->src, req->nbytes); | 1488 | src_nents = sg_count(req->src, req->nbytes, &src_chained); |
| 1645 | 1489 | ||
| 1646 | if (unlikely(req->dst != req->src)) | 1490 | if (req->dst != req->src) |
| 1647 | dst_nents = sg_count(req->dst, req->nbytes); | 1491 | dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); |
| 1648 | 1492 | ||
| 1649 | if (likely(req->src == req->dst)) { | 1493 | if (likely(req->src == req->dst)) { |
| 1650 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1494 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
| 1651 | DMA_BIDIRECTIONAL); | 1495 | DMA_BIDIRECTIONAL, src_chained); |
| 1652 | } else { | 1496 | } else { |
| 1653 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1497 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
| 1654 | DMA_TO_DEVICE); | 1498 | DMA_TO_DEVICE, src_chained); |
| 1655 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | 1499 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, |
| 1656 | DMA_FROM_DEVICE); | 1500 | DMA_FROM_DEVICE, dst_chained); |
| 1657 | } | 1501 | } |
| 1658 | 1502 | ||
| 1659 | /* | 1503 | /* |
| @@ -1665,44 +1509,46 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
| 1665 | iv_contig = true; | 1509 | iv_contig = true; |
| 1666 | else | 1510 | else |
| 1667 | src_nents = src_nents ? : 1; | 1511 | src_nents = src_nents ? : 1; |
| 1668 | link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * | 1512 | sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * |
| 1669 | sizeof(struct link_tbl_entry); | 1513 | sizeof(struct sec4_sg_entry); |
| 1670 | 1514 | ||
| 1671 | /* allocate space for base edesc and hw desc commands, link tables */ | 1515 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1672 | edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + | 1516 | edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + |
| 1673 | link_tbl_bytes, GFP_DMA | flags); | 1517 | sec4_sg_bytes, GFP_DMA | flags); |
| 1674 | if (!edesc) { | 1518 | if (!edesc) { |
| 1675 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1519 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1676 | return ERR_PTR(-ENOMEM); | 1520 | return ERR_PTR(-ENOMEM); |
| 1677 | } | 1521 | } |
| 1678 | 1522 | ||
| 1679 | edesc->src_nents = src_nents; | 1523 | edesc->src_nents = src_nents; |
| 1524 | edesc->src_chained = src_chained; | ||
| 1680 | edesc->dst_nents = dst_nents; | 1525 | edesc->dst_nents = dst_nents; |
| 1681 | edesc->link_tbl_bytes = link_tbl_bytes; | 1526 | edesc->dst_chained = dst_chained; |
| 1682 | edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + | 1527 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
| 1683 | desc_bytes; | 1528 | edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + |
| 1529 | desc_bytes; | ||
| 1684 | 1530 | ||
| 1685 | link_tbl_index = 0; | 1531 | sec4_sg_index = 0; |
| 1686 | if (!iv_contig) { | 1532 | if (!iv_contig) { |
| 1687 | sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); | 1533 | dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); |
| 1688 | sg_to_link_tbl_last(req->src, src_nents, | 1534 | sg_to_sec4_sg_last(req->src, src_nents, |
| 1689 | edesc->link_tbl + 1, 0); | 1535 | edesc->sec4_sg + 1, 0); |
| 1690 | link_tbl_index += 1 + src_nents; | 1536 | sec4_sg_index += 1 + src_nents; |
| 1691 | } | 1537 | } |
| 1692 | 1538 | ||
| 1693 | if (unlikely(dst_nents)) { | 1539 | if (dst_nents) { |
| 1694 | sg_to_link_tbl_last(req->dst, dst_nents, | 1540 | sg_to_sec4_sg_last(req->dst, dst_nents, |
| 1695 | edesc->link_tbl + link_tbl_index, 0); | 1541 | edesc->sec4_sg + sec4_sg_index, 0); |
| 1696 | } | 1542 | } |
| 1697 | 1543 | ||
| 1698 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1544 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
| 1699 | link_tbl_bytes, DMA_TO_DEVICE); | 1545 | sec4_sg_bytes, DMA_TO_DEVICE); |
| 1700 | edesc->iv_dma = iv_dma; | 1546 | edesc->iv_dma = iv_dma; |
| 1701 | 1547 | ||
| 1702 | #ifdef DEBUG | 1548 | #ifdef DEBUG |
| 1703 | print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", | 1549 | print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ", |
| 1704 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | 1550 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, |
| 1705 | link_tbl_bytes, 1); | 1551 | sec4_sg_bytes, 1); |
| 1706 | #endif | 1552 | #endif |
| 1707 | 1553 | ||
| 1708 | *iv_contig_out = iv_contig; | 1554 | *iv_contig_out = iv_contig; |
| @@ -2227,7 +2073,7 @@ static int caam_cra_init(struct crypto_tfm *tfm) | |||
| 2227 | * distribute tfms across job rings to ensure in-order | 2073 | * distribute tfms across job rings to ensure in-order |
| 2228 | * crypto request processing per tfm | 2074 | * crypto request processing per tfm |
| 2229 | */ | 2075 | */ |
| 2230 | ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; | 2076 | ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; |
| 2231 | 2077 | ||
| 2232 | /* copy descriptor header template value */ | 2078 | /* copy descriptor header template value */ |
| 2233 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | 2079 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; |
| @@ -2264,7 +2110,6 @@ static void __exit caam_algapi_exit(void) | |||
| 2264 | struct device *ctrldev; | 2110 | struct device *ctrldev; |
| 2265 | struct caam_drv_private *priv; | 2111 | struct caam_drv_private *priv; |
| 2266 | struct caam_crypto_alg *t_alg, *n; | 2112 | struct caam_crypto_alg *t_alg, *n; |
| 2267 | int i, err; | ||
| 2268 | 2113 | ||
| 2269 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 2114 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
| 2270 | if (!dev_node) { | 2115 | if (!dev_node) { |
| @@ -2289,13 +2134,6 @@ static void __exit caam_algapi_exit(void) | |||
| 2289 | list_del(&t_alg->entry); | 2134 | list_del(&t_alg->entry); |
| 2290 | kfree(t_alg); | 2135 | kfree(t_alg); |
| 2291 | } | 2136 | } |
| 2292 | |||
| 2293 | for (i = 0; i < priv->total_jobrs; i++) { | ||
| 2294 | err = caam_jr_deregister(priv->algapi_jr[i]); | ||
| 2295 | if (err < 0) | ||
| 2296 | break; | ||
| 2297 | } | ||
| 2298 | kfree(priv->algapi_jr); | ||
| 2299 | } | 2137 | } |
| 2300 | 2138 | ||
| 2301 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | 2139 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, |
| @@ -2348,7 +2186,7 @@ static int __init caam_algapi_init(void) | |||
| 2348 | { | 2186 | { |
| 2349 | struct device_node *dev_node; | 2187 | struct device_node *dev_node; |
| 2350 | struct platform_device *pdev; | 2188 | struct platform_device *pdev; |
| 2351 | struct device *ctrldev, **jrdev; | 2189 | struct device *ctrldev; |
| 2352 | struct caam_drv_private *priv; | 2190 | struct caam_drv_private *priv; |
| 2353 | int i = 0, err = 0; | 2191 | int i = 0, err = 0; |
| 2354 | 2192 | ||
| @@ -2369,24 +2207,6 @@ static int __init caam_algapi_init(void) | |||
| 2369 | 2207 | ||
| 2370 | INIT_LIST_HEAD(&priv->alg_list); | 2208 | INIT_LIST_HEAD(&priv->alg_list); |
| 2371 | 2209 | ||
| 2372 | jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL); | ||
| 2373 | if (!jrdev) | ||
| 2374 | return -ENOMEM; | ||
| 2375 | |||
| 2376 | for (i = 0; i < priv->total_jobrs; i++) { | ||
| 2377 | err = caam_jr_register(ctrldev, &jrdev[i]); | ||
| 2378 | if (err < 0) | ||
| 2379 | break; | ||
| 2380 | } | ||
| 2381 | if (err < 0 && i == 0) { | ||
| 2382 | dev_err(ctrldev, "algapi error in job ring registration: %d\n", | ||
| 2383 | err); | ||
| 2384 | kfree(jrdev); | ||
| 2385 | return err; | ||
| 2386 | } | ||
| 2387 | |||
| 2388 | priv->num_jrs_for_algapi = i; | ||
| 2389 | priv->algapi_jr = jrdev; | ||
| 2390 | atomic_set(&priv->tfm_count, -1); | 2210 | atomic_set(&priv->tfm_count, -1); |
| 2391 | 2211 | ||
| 2392 | /* register crypto algorithms the device supports */ | 2212 | /* register crypto algorithms the device supports */ |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c new file mode 100644 index 000000000000..895aaf2bca92 --- /dev/null +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -0,0 +1,1878 @@ | |||
| 1 | /* | ||
| 2 | * caam - Freescale FSL CAAM support for ahash functions of crypto API | ||
| 3 | * | ||
| 4 | * Copyright 2011 Freescale Semiconductor, Inc. | ||
| 5 | * | ||
| 6 | * Based on caamalg.c crypto API driver. | ||
| 7 | * | ||
| 8 | * relationship of digest job descriptor or first job descriptor after init to | ||
| 9 | * shared descriptors: | ||
| 10 | * | ||
| 11 | * --------------- --------------- | ||
| 12 | * | JobDesc #1 |-------------------->| ShareDesc | | ||
| 13 | * | *(packet 1) | | (hashKey) | | ||
| 14 | * --------------- | (operation) | | ||
| 15 | * --------------- | ||
| 16 | * | ||
| 17 | * relationship of subsequent job descriptors to shared descriptors: | ||
| 18 | * | ||
| 19 | * --------------- --------------- | ||
| 20 | * | JobDesc #2 |-------------------->| ShareDesc | | ||
| 21 | * | *(packet 2) | |------------->| (hashKey) | | ||
| 22 | * --------------- | |-------->| (operation) | | ||
| 23 | * . | | | (load ctx2) | | ||
| 24 | * . | | --------------- | ||
| 25 | * --------------- | | | ||
| 26 | * | JobDesc #3 |------| | | ||
| 27 | * | *(packet 3) | | | ||
| 28 | * --------------- | | ||
| 29 | * . | | ||
| 30 | * . | | ||
| 31 | * --------------- | | ||
| 32 | * | JobDesc #4 |------------ | ||
| 33 | * | *(packet 4) | | ||
| 34 | * --------------- | ||
| 35 | * | ||
| 36 | * The SharedDesc never changes for a connection unless rekeyed, but | ||
| 37 | * each packet will likely be in a different place. So all we need | ||
| 38 | * to know to process the packet is where the input is, where the | ||
| 39 | * output goes, and what context we want to process with. Context is | ||
| 40 | * in the SharedDesc, packet references in the JobDesc. | ||
| 41 | * | ||
| 42 | * So, a job desc looks like: | ||
| 43 | * | ||
| 44 | * --------------------- | ||
| 45 | * | Header | | ||
| 46 | * | ShareDesc Pointer | | ||
| 47 | * | SEQ_OUT_PTR | | ||
| 48 | * | (output buffer) | | ||
| 49 | * | (output length) | | ||
| 50 | * | SEQ_IN_PTR | | ||
| 51 | * | (input buffer) | | ||
| 52 | * | (input length) | | ||
| 53 | * --------------------- | ||
| 54 | */ | ||
| 55 | |||
| 56 | #include "compat.h" | ||
| 57 | |||
| 58 | #include "regs.h" | ||
| 59 | #include "intern.h" | ||
| 60 | #include "desc_constr.h" | ||
| 61 | #include "jr.h" | ||
| 62 | #include "error.h" | ||
| 63 | #include "sg_sw_sec4.h" | ||
| 64 | #include "key_gen.h" | ||
| 65 | |||
| 66 | #define CAAM_CRA_PRIORITY 3000 | ||
| 67 | |||
| 68 | /* max hash key is max split key size */ | ||
| 69 | #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) | ||
| 70 | |||
| 71 | #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE | ||
| 72 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE | ||
| 73 | |||
| 74 | /* length of descriptors text */ | ||
| 75 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) | ||
| 76 | |||
| 77 | #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) | ||
| 78 | #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) | ||
| 79 | #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) | ||
| 80 | #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) | ||
| 81 | #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) | ||
| 82 | #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) | ||
| 83 | |||
| 84 | #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ | ||
| 85 | CAAM_MAX_HASH_KEY_SIZE) | ||
| 86 | #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) | ||
| 87 | |||
| 88 | /* caam context sizes for hashes: running digest + 8 */ | ||
| 89 | #define HASH_MSG_LEN 8 | ||
| 90 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) | ||
| 91 | |||
| 92 | #ifdef DEBUG | ||
| 93 | /* for print_hex_dumps with line references */ | ||
| 94 | #define xstr(s) str(s) | ||
| 95 | #define str(s) #s | ||
| 96 | #define debug(format, arg...) printk(format, arg) | ||
| 97 | #else | ||
| 98 | #define debug(format, arg...) | ||
| 99 | #endif | ||
| 100 | |||
| 101 | /* ahash per-session context */ | ||
| 102 | struct caam_hash_ctx { | ||
| 103 | struct device *jrdev; | ||
| 104 | u32 sh_desc_update[DESC_HASH_MAX_USED_LEN]; | ||
| 105 | u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN]; | ||
| 106 | u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN]; | ||
| 107 | u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN]; | ||
| 108 | u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN]; | ||
| 109 | dma_addr_t sh_desc_update_dma; | ||
| 110 | dma_addr_t sh_desc_update_first_dma; | ||
| 111 | dma_addr_t sh_desc_fin_dma; | ||
| 112 | dma_addr_t sh_desc_digest_dma; | ||
| 113 | dma_addr_t sh_desc_finup_dma; | ||
| 114 | u32 alg_type; | ||
| 115 | u32 alg_op; | ||
| 116 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; | ||
| 117 | dma_addr_t key_dma; | ||
| 118 | int ctx_len; | ||
| 119 | unsigned int split_key_len; | ||
| 120 | unsigned int split_key_pad_len; | ||
| 121 | }; | ||
| 122 | |||
| 123 | /* ahash state */ | ||
| 124 | struct caam_hash_state { | ||
| 125 | dma_addr_t buf_dma; | ||
| 126 | dma_addr_t ctx_dma; | ||
| 127 | u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; | ||
| 128 | int buflen_0; | ||
| 129 | u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; | ||
| 130 | int buflen_1; | ||
| 131 | u8 caam_ctx[MAX_CTX_LEN]; | ||
| 132 | int (*update)(struct ahash_request *req); | ||
| 133 | int (*final)(struct ahash_request *req); | ||
| 134 | int (*finup)(struct ahash_request *req); | ||
| 135 | int current_buf; | ||
| 136 | }; | ||
| 137 | |||
| 138 | /* Common job descriptor seq in/out ptr routines */ | ||
| 139 | |||
| 140 | /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ | ||
| 141 | static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, | ||
| 142 | struct caam_hash_state *state, | ||
| 143 | int ctx_len) | ||
| 144 | { | ||
| 145 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, | ||
| 146 | ctx_len, DMA_FROM_DEVICE); | ||
| 147 | append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); | ||
| 148 | } | ||
| 149 | |||
| 150 | /* Map req->result, and append seq_out_ptr command that points to it */ | ||
| 151 | static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, | ||
| 152 | u8 *result, int digestsize) | ||
| 153 | { | ||
| 154 | dma_addr_t dst_dma; | ||
| 155 | |||
| 156 | dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); | ||
| 157 | append_seq_out_ptr(desc, dst_dma, digestsize, 0); | ||
| 158 | |||
| 159 | return dst_dma; | ||
| 160 | } | ||
| 161 | |||
| 162 | /* Map current buffer in state and put it in link table */ | ||
| 163 | static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, | ||
| 164 | struct sec4_sg_entry *sec4_sg, | ||
| 165 | u8 *buf, int buflen) | ||
| 166 | { | ||
| 167 | dma_addr_t buf_dma; | ||
| 168 | |||
| 169 | buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | ||
| 170 | dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); | ||
| 171 | |||
| 172 | return buf_dma; | ||
| 173 | } | ||
| 174 | |||
| 175 | /* Map req->src and put it in link table */ | ||
| 176 | static inline void src_map_to_sec4_sg(struct device *jrdev, | ||
| 177 | struct scatterlist *src, int src_nents, | ||
| 178 | struct sec4_sg_entry *sec4_sg, | ||
| 179 | bool chained) | ||
| 180 | { | ||
| 181 | dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); | ||
| 182 | sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); | ||
| 183 | } | ||
| 184 | |||
| 185 | /* | ||
| 186 | * Only put buffer in link table if it contains data, which is possible, | ||
| 187 | * since a buffer has previously been used, and needs to be unmapped, | ||
| 188 | */ | ||
| 189 | static inline dma_addr_t | ||
| 190 | try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, | ||
| 191 | u8 *buf, dma_addr_t buf_dma, int buflen, | ||
| 192 | int last_buflen) | ||
| 193 | { | ||
| 194 | if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) | ||
| 195 | dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); | ||
| 196 | if (buflen) | ||
| 197 | buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); | ||
| 198 | else | ||
| 199 | buf_dma = 0; | ||
| 200 | |||
| 201 | return buf_dma; | ||
| 202 | } | ||
| 203 | |||
| 204 | /* Map state->caam_ctx, and add it to link table */ | ||
| 205 | static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, | ||
| 206 | struct caam_hash_state *state, | ||
| 207 | int ctx_len, | ||
| 208 | struct sec4_sg_entry *sec4_sg, | ||
| 209 | u32 flag) | ||
| 210 | { | ||
| 211 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); | ||
| 212 | dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); | ||
| 213 | } | ||
| 214 | |||
| 215 | /* Common shared descriptor commands */ | ||
| 216 | static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | ||
| 217 | { | ||
| 218 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
| 219 | ctx->split_key_len, CLASS_2 | | ||
| 220 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 221 | } | ||
| 222 | |||
| 223 | /* Append key if it has been set */ | ||
| 224 | static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | ||
| 225 | { | ||
| 226 | u32 *key_jump_cmd; | ||
| 227 | |||
| 228 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
| 229 | |||
| 230 | if (ctx->split_key_len) { | ||
| 231 | /* Skip if already shared */ | ||
| 232 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 233 | JUMP_COND_SHRD); | ||
| 234 | |||
| 235 | append_key_ahash(desc, ctx); | ||
| 236 | |||
| 237 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 238 | } | ||
| 239 | |||
| 240 | /* Propagate errors from shared to job descriptor */ | ||
| 241 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
| 242 | } | ||
| 243 | |||
| 244 | /* | ||
| 245 | * For ahash read data from seqin following state->caam_ctx, | ||
| 246 | * and write resulting class2 context to seqout, which may be state->caam_ctx | ||
| 247 | * or req->result | ||
| 248 | */ | ||
| 249 | static inline void ahash_append_load_str(u32 *desc, int digestsize) | ||
| 250 | { | ||
| 251 | /* Calculate remaining bytes to read */ | ||
| 252 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 253 | |||
| 254 | /* Read remaining bytes */ | ||
| 255 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | | ||
| 256 | FIFOLD_TYPE_MSG | KEY_VLF); | ||
| 257 | |||
| 258 | /* Store class2 context bytes */ | ||
| 259 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | ||
| 260 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 261 | } | ||
| 262 | |||
| 263 | /* | ||
| 264 | * For ahash update, final and finup, import context, read and write to seqout | ||
| 265 | */ | ||
| 266 | static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, | ||
| 267 | int digestsize, | ||
| 268 | struct caam_hash_ctx *ctx) | ||
| 269 | { | ||
| 270 | init_sh_desc_key_ahash(desc, ctx); | ||
| 271 | |||
| 272 | /* Import context from software */ | ||
| 273 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
| 274 | LDST_CLASS_2_CCB | ctx->ctx_len); | ||
| 275 | |||
| 276 | /* Class 2 operation */ | ||
| 277 | append_operation(desc, op | state | OP_ALG_ENCRYPT); | ||
| 278 | |||
| 279 | /* | ||
| 280 | * Load from buf and/or src and write to req->result or state->context | ||
| 281 | */ | ||
| 282 | ahash_append_load_str(desc, digestsize); | ||
| 283 | } | ||
| 284 | |||
| 285 | /* For ahash firsts and digest, read and write to seqout */ | ||
| 286 | static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, | ||
| 287 | int digestsize, struct caam_hash_ctx *ctx) | ||
| 288 | { | ||
| 289 | init_sh_desc_key_ahash(desc, ctx); | ||
| 290 | |||
| 291 | /* Class 2 operation */ | ||
| 292 | append_operation(desc, op | state | OP_ALG_ENCRYPT); | ||
| 293 | |||
| 294 | /* | ||
| 295 | * Load from buf and/or src and write to req->result or state->context | ||
| 296 | */ | ||
| 297 | ahash_append_load_str(desc, digestsize); | ||
| 298 | } | ||
| 299 | |||
| 300 | static int ahash_set_sh_desc(struct crypto_ahash *ahash) | ||
| 301 | { | ||
| 302 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 303 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 304 | struct device *jrdev = ctx->jrdev; | ||
| 305 | u32 have_key = 0; | ||
| 306 | u32 *desc; | ||
| 307 | |||
| 308 | if (ctx->split_key_len) | ||
| 309 | have_key = OP_ALG_AAI_HMAC_PRECOMP; | ||
| 310 | |||
| 311 | /* ahash_update shared descriptor */ | ||
| 312 | desc = ctx->sh_desc_update; | ||
| 313 | |||
| 314 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
| 315 | |||
| 316 | /* Import context from software */ | ||
| 317 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
| 318 | LDST_CLASS_2_CCB | ctx->ctx_len); | ||
| 319 | |||
| 320 | /* Class 2 operation */ | ||
| 321 | append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | | ||
| 322 | OP_ALG_ENCRYPT); | ||
| 323 | |||
| 324 | /* Load data and write to result or context */ | ||
| 325 | ahash_append_load_str(desc, ctx->ctx_len); | ||
| 326 | |||
| 327 | ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
| 328 | DMA_TO_DEVICE); | ||
| 329 | if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { | ||
| 330 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
| 331 | return -ENOMEM; | ||
| 332 | } | ||
| 333 | #ifdef DEBUG | ||
| 334 | print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ", | ||
| 335 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 336 | #endif | ||
| 337 | |||
| 338 | /* ahash_update_first shared descriptor */ | ||
| 339 | desc = ctx->sh_desc_update_first; | ||
| 340 | |||
| 341 | ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, | ||
| 342 | ctx->ctx_len, ctx); | ||
| 343 | |||
| 344 | ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, | ||
| 345 | desc_bytes(desc), | ||
| 346 | DMA_TO_DEVICE); | ||
| 347 | if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { | ||
| 348 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
| 349 | return -ENOMEM; | ||
| 350 | } | ||
| 351 | #ifdef DEBUG | ||
| 352 | print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ", | ||
| 353 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 354 | #endif | ||
| 355 | |||
| 356 | /* ahash_final shared descriptor */ | ||
| 357 | desc = ctx->sh_desc_fin; | ||
| 358 | |||
| 359 | ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, | ||
| 360 | OP_ALG_AS_FINALIZE, digestsize, ctx); | ||
| 361 | |||
| 362 | ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
| 363 | DMA_TO_DEVICE); | ||
| 364 | if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { | ||
| 365 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
| 366 | return -ENOMEM; | ||
| 367 | } | ||
| 368 | #ifdef DEBUG | ||
| 369 | print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ", | ||
| 370 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 371 | desc_bytes(desc), 1); | ||
| 372 | #endif | ||
| 373 | |||
| 374 | /* ahash_finup shared descriptor */ | ||
| 375 | desc = ctx->sh_desc_finup; | ||
| 376 | |||
| 377 | ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, | ||
| 378 | OP_ALG_AS_FINALIZE, digestsize, ctx); | ||
| 379 | |||
| 380 | ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
| 381 | DMA_TO_DEVICE); | ||
| 382 | if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { | ||
| 383 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
| 384 | return -ENOMEM; | ||
| 385 | } | ||
| 386 | #ifdef DEBUG | ||
| 387 | print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ", | ||
| 388 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 389 | desc_bytes(desc), 1); | ||
| 390 | #endif | ||
| 391 | |||
| 392 | /* ahash_digest shared descriptor */ | ||
| 393 | desc = ctx->sh_desc_digest; | ||
| 394 | |||
| 395 | ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, | ||
| 396 | digestsize, ctx); | ||
| 397 | |||
| 398 | ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, | ||
| 399 | desc_bytes(desc), | ||
| 400 | DMA_TO_DEVICE); | ||
| 401 | if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { | ||
| 402 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
| 403 | return -ENOMEM; | ||
| 404 | } | ||
| 405 | #ifdef DEBUG | ||
| 406 | print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ", | ||
| 407 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 408 | desc_bytes(desc), 1); | ||
| 409 | #endif | ||
| 410 | |||
| 411 | return 0; | ||
| 412 | } | ||
| 413 | |||
| 414 | static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, | ||
| 415 | u32 keylen) | ||
| 416 | { | ||
| 417 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, | ||
| 418 | ctx->split_key_pad_len, key_in, keylen, | ||
| 419 | ctx->alg_op); | ||
| 420 | } | ||
| 421 | |||
| 422 | /* Digest hash size if it is too large */ | ||
| 423 | static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | ||
| 424 | u32 *keylen, u8 *key_out, u32 digestsize) | ||
| 425 | { | ||
| 426 | struct device *jrdev = ctx->jrdev; | ||
| 427 | u32 *desc; | ||
| 428 | struct split_key_result result; | ||
| 429 | dma_addr_t src_dma, dst_dma; | ||
| 430 | int ret = 0; | ||
| 431 | |||
| 432 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | ||
| 433 | |||
| 434 | init_job_desc(desc, 0); | ||
| 435 | |||
| 436 | src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, | ||
| 437 | DMA_TO_DEVICE); | ||
| 438 | if (dma_mapping_error(jrdev, src_dma)) { | ||
| 439 | dev_err(jrdev, "unable to map key input memory\n"); | ||
| 440 | kfree(desc); | ||
| 441 | return -ENOMEM; | ||
| 442 | } | ||
| 443 | dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, | ||
| 444 | DMA_FROM_DEVICE); | ||
| 445 | if (dma_mapping_error(jrdev, dst_dma)) { | ||
| 446 | dev_err(jrdev, "unable to map key output memory\n"); | ||
| 447 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); | ||
| 448 | kfree(desc); | ||
| 449 | return -ENOMEM; | ||
| 450 | } | ||
| 451 | |||
| 452 | /* Job descriptor to perform unkeyed hash on key_in */ | ||
| 453 | append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | | ||
| 454 | OP_ALG_AS_INITFINAL); | ||
| 455 | append_seq_in_ptr(desc, src_dma, *keylen, 0); | ||
| 456 | append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | | ||
| 457 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); | ||
| 458 | append_seq_out_ptr(desc, dst_dma, digestsize, 0); | ||
| 459 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | ||
| 460 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 461 | |||
| 462 | #ifdef DEBUG | ||
| 463 | print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ", | ||
| 464 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); | ||
| 465 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 466 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 467 | #endif | ||
| 468 | |||
| 469 | result.err = 0; | ||
| 470 | init_completion(&result.completion); | ||
| 471 | |||
| 472 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | ||
| 473 | if (!ret) { | ||
| 474 | /* in progress */ | ||
| 475 | wait_for_completion_interruptible(&result.completion); | ||
| 476 | ret = result.err; | ||
| 477 | #ifdef DEBUG | ||
| 478 | print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ", | ||
| 479 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, | ||
| 480 | digestsize, 1); | ||
| 481 | #endif | ||
| 482 | } | ||
| 483 | *keylen = digestsize; | ||
| 484 | |||
| 485 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); | ||
| 486 | dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); | ||
| 487 | |||
| 488 | kfree(desc); | ||
| 489 | |||
| 490 | return ret; | ||
| 491 | } | ||
| 492 | |||
| 493 | static int ahash_setkey(struct crypto_ahash *ahash, | ||
| 494 | const u8 *key, unsigned int keylen) | ||
| 495 | { | ||
| 496 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
| 497 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
| 498 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 499 | struct device *jrdev = ctx->jrdev; | ||
| 500 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); | ||
| 501 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 502 | int ret = 0; | ||
| 503 | u8 *hashed_key = NULL; | ||
| 504 | |||
| 505 | #ifdef DEBUG | ||
| 506 | printk(KERN_ERR "keylen %d\n", keylen); | ||
| 507 | #endif | ||
| 508 | |||
| 509 | if (keylen > blocksize) { | ||
| 510 | hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL | | ||
| 511 | GFP_DMA); | ||
| 512 | if (!hashed_key) | ||
| 513 | return -ENOMEM; | ||
| 514 | ret = hash_digest_key(ctx, key, &keylen, hashed_key, | ||
| 515 | digestsize); | ||
| 516 | if (ret) | ||
| 517 | goto badkey; | ||
| 518 | key = hashed_key; | ||
| 519 | } | ||
| 520 | |||
| 521 | /* Pick class 2 key length from algorithm submask */ | ||
| 522 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | ||
| 523 | OP_ALG_ALGSEL_SHIFT] * 2; | ||
| 524 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | ||
| 525 | |||
| 526 | #ifdef DEBUG | ||
| 527 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | ||
| 528 | ctx->split_key_len, ctx->split_key_pad_len); | ||
| 529 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | ||
| 530 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 531 | #endif | ||
| 532 | |||
| 533 | ret = gen_split_hash_key(ctx, key, keylen); | ||
| 534 | if (ret) | ||
| 535 | goto badkey; | ||
| 536 | |||
| 537 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | ||
| 538 | DMA_TO_DEVICE); | ||
| 539 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | ||
| 540 | dev_err(jrdev, "unable to map key i/o memory\n"); | ||
| 541 | return -ENOMEM; | ||
| 542 | } | ||
| 543 | #ifdef DEBUG | ||
| 544 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
| 545 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | ||
| 546 | ctx->split_key_pad_len, 1); | ||
| 547 | #endif | ||
| 548 | |||
| 549 | ret = ahash_set_sh_desc(ahash); | ||
| 550 | if (ret) { | ||
| 551 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, | ||
| 552 | DMA_TO_DEVICE); | ||
| 553 | } | ||
| 554 | |||
| 555 | kfree(hashed_key); | ||
| 556 | return ret; | ||
| 557 | badkey: | ||
| 558 | kfree(hashed_key); | ||
| 559 | crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 560 | return -EINVAL; | ||
| 561 | } | ||
| 562 | |||
| 563 | /* | ||
| 564 | * ahash_edesc - s/w-extended ahash descriptor | ||
| 565 | * @dst_dma: physical mapped address of req->result | ||
| 566 | * @sec4_sg_dma: physical mapped address of h/w link table | ||
| 567 | * @chained: if source is chained | ||
| 568 | * @src_nents: number of segments in input scatterlist | ||
| 569 | * @sec4_sg_bytes: length of dma mapped sec4_sg space | ||
| 570 | * @sec4_sg: pointer to h/w link table | ||
| 571 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | ||
| 572 | */ | ||
| 573 | struct ahash_edesc { | ||
| 574 | dma_addr_t dst_dma; | ||
| 575 | dma_addr_t sec4_sg_dma; | ||
| 576 | bool chained; | ||
| 577 | int src_nents; | ||
| 578 | int sec4_sg_bytes; | ||
| 579 | struct sec4_sg_entry *sec4_sg; | ||
| 580 | u32 hw_desc[0]; | ||
| 581 | }; | ||
| 582 | |||
| 583 | static inline void ahash_unmap(struct device *dev, | ||
| 584 | struct ahash_edesc *edesc, | ||
| 585 | struct ahash_request *req, int dst_len) | ||
| 586 | { | ||
| 587 | if (edesc->src_nents) | ||
| 588 | dma_unmap_sg_chained(dev, req->src, edesc->src_nents, | ||
| 589 | DMA_TO_DEVICE, edesc->chained); | ||
| 590 | if (edesc->dst_dma) | ||
| 591 | dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); | ||
| 592 | |||
| 593 | if (edesc->sec4_sg_bytes) | ||
| 594 | dma_unmap_single(dev, edesc->sec4_sg_dma, | ||
| 595 | edesc->sec4_sg_bytes, DMA_TO_DEVICE); | ||
| 596 | } | ||
| 597 | |||
| 598 | static inline void ahash_unmap_ctx(struct device *dev, | ||
| 599 | struct ahash_edesc *edesc, | ||
| 600 | struct ahash_request *req, int dst_len, u32 flag) | ||
| 601 | { | ||
| 602 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 603 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 604 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 605 | |||
| 606 | if (state->ctx_dma) | ||
| 607 | dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); | ||
| 608 | ahash_unmap(dev, edesc, req, dst_len); | ||
| 609 | } | ||
| 610 | |||
| 611 | static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | ||
| 612 | void *context) | ||
| 613 | { | ||
| 614 | struct ahash_request *req = context; | ||
| 615 | struct ahash_edesc *edesc; | ||
| 616 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 617 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 618 | #ifdef DEBUG | ||
| 619 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 620 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 621 | |||
| 622 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 623 | #endif | ||
| 624 | |||
| 625 | edesc = (struct ahash_edesc *)((char *)desc - | ||
| 626 | offsetof(struct ahash_edesc, hw_desc)); | ||
| 627 | if (err) { | ||
| 628 | char tmp[CAAM_ERROR_STR_MAX]; | ||
| 629 | |||
| 630 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
| 631 | } | ||
| 632 | |||
| 633 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
| 634 | kfree(edesc); | ||
| 635 | |||
| 636 | #ifdef DEBUG | ||
| 637 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | ||
| 638 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | ||
| 639 | ctx->ctx_len, 1); | ||
| 640 | if (req->result) | ||
| 641 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | ||
| 642 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | ||
| 643 | digestsize, 1); | ||
| 644 | #endif | ||
| 645 | |||
| 646 | req->base.complete(&req->base, err); | ||
| 647 | } | ||
| 648 | |||
| 649 | static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | ||
| 650 | void *context) | ||
| 651 | { | ||
| 652 | struct ahash_request *req = context; | ||
| 653 | struct ahash_edesc *edesc; | ||
| 654 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 655 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 656 | #ifdef DEBUG | ||
| 657 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 658 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 659 | |||
| 660 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 661 | #endif | ||
| 662 | |||
| 663 | edesc = (struct ahash_edesc *)((char *)desc - | ||
| 664 | offsetof(struct ahash_edesc, hw_desc)); | ||
| 665 | if (err) { | ||
| 666 | char tmp[CAAM_ERROR_STR_MAX]; | ||
| 667 | |||
| 668 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
| 669 | } | ||
| 670 | |||
| 671 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); | ||
| 672 | kfree(edesc); | ||
| 673 | |||
| 674 | #ifdef DEBUG | ||
| 675 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | ||
| 676 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | ||
| 677 | ctx->ctx_len, 1); | ||
| 678 | if (req->result) | ||
| 679 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | ||
| 680 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | ||
| 681 | digestsize, 1); | ||
| 682 | #endif | ||
| 683 | |||
| 684 | req->base.complete(&req->base, err); | ||
| 685 | } | ||
| 686 | |||
| 687 | static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | ||
| 688 | void *context) | ||
| 689 | { | ||
| 690 | struct ahash_request *req = context; | ||
| 691 | struct ahash_edesc *edesc; | ||
| 692 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 693 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 694 | #ifdef DEBUG | ||
| 695 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 696 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 697 | |||
| 698 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 699 | #endif | ||
| 700 | |||
| 701 | edesc = (struct ahash_edesc *)((char *)desc - | ||
| 702 | offsetof(struct ahash_edesc, hw_desc)); | ||
| 703 | if (err) { | ||
| 704 | char tmp[CAAM_ERROR_STR_MAX]; | ||
| 705 | |||
| 706 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
| 707 | } | ||
| 708 | |||
| 709 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
| 710 | kfree(edesc); | ||
| 711 | |||
| 712 | #ifdef DEBUG | ||
| 713 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | ||
| 714 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | ||
| 715 | ctx->ctx_len, 1); | ||
| 716 | if (req->result) | ||
| 717 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | ||
| 718 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | ||
| 719 | digestsize, 1); | ||
| 720 | #endif | ||
| 721 | |||
| 722 | req->base.complete(&req->base, err); | ||
| 723 | } | ||
| 724 | |||
| 725 | static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | ||
| 726 | void *context) | ||
| 727 | { | ||
| 728 | struct ahash_request *req = context; | ||
| 729 | struct ahash_edesc *edesc; | ||
| 730 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 731 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 732 | #ifdef DEBUG | ||
| 733 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 734 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 735 | |||
| 736 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 737 | #endif | ||
| 738 | |||
| 739 | edesc = (struct ahash_edesc *)((char *)desc - | ||
| 740 | offsetof(struct ahash_edesc, hw_desc)); | ||
| 741 | if (err) { | ||
| 742 | char tmp[CAAM_ERROR_STR_MAX]; | ||
| 743 | |||
| 744 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
| 745 | } | ||
| 746 | |||
| 747 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); | ||
| 748 | kfree(edesc); | ||
| 749 | |||
| 750 | #ifdef DEBUG | ||
| 751 | print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", | ||
| 752 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | ||
| 753 | ctx->ctx_len, 1); | ||
| 754 | if (req->result) | ||
| 755 | print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", | ||
| 756 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | ||
| 757 | digestsize, 1); | ||
| 758 | #endif | ||
| 759 | |||
| 760 | req->base.complete(&req->base, err); | ||
| 761 | } | ||
| 762 | |||
| 763 | /* submit update job descriptor */ | ||
| 764 | static int ahash_update_ctx(struct ahash_request *req) | ||
| 765 | { | ||
| 766 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 767 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 768 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 769 | struct device *jrdev = ctx->jrdev; | ||
| 770 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 771 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 772 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
| 773 | int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; | ||
| 774 | u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; | ||
| 775 | int *next_buflen = state->current_buf ? &state->buflen_0 : | ||
| 776 | &state->buflen_1, last_buflen; | ||
| 777 | int in_len = *buflen + req->nbytes, to_hash; | ||
| 778 | u32 *sh_desc = ctx->sh_desc_update, *desc; | ||
| 779 | dma_addr_t ptr = ctx->sh_desc_update_dma; | ||
| 780 | int src_nents, sec4_sg_bytes, sec4_sg_src_index; | ||
| 781 | struct ahash_edesc *edesc; | ||
| 782 | bool chained = false; | ||
| 783 | int ret = 0; | ||
| 784 | int sh_len; | ||
| 785 | |||
| 786 | last_buflen = *next_buflen; | ||
| 787 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); | ||
| 788 | to_hash = in_len - *next_buflen; | ||
| 789 | |||
| 790 | if (to_hash) { | ||
| 791 | src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), | ||
| 792 | &chained); | ||
| 793 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); | ||
| 794 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | ||
| 795 | sizeof(struct sec4_sg_entry); | ||
| 796 | |||
| 797 | /* | ||
| 798 | * allocate space for base edesc and hw desc commands, | ||
| 799 | * link tables | ||
| 800 | */ | ||
| 801 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
| 802 | sec4_sg_bytes, GFP_DMA | flags); | ||
| 803 | if (!edesc) { | ||
| 804 | dev_err(jrdev, | ||
| 805 | "could not allocate extended descriptor\n"); | ||
| 806 | return -ENOMEM; | ||
| 807 | } | ||
| 808 | |||
| 809 | edesc->src_nents = src_nents; | ||
| 810 | edesc->chained = chained; | ||
| 811 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 812 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 813 | DESC_JOB_IO_LEN; | ||
| 814 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 815 | sec4_sg_bytes, | ||
| 816 | DMA_TO_DEVICE); | ||
| 817 | |||
| 818 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, | ||
| 819 | edesc->sec4_sg, DMA_BIDIRECTIONAL); | ||
| 820 | |||
| 821 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, | ||
| 822 | edesc->sec4_sg + 1, | ||
| 823 | buf, state->buf_dma, | ||
| 824 | *buflen, last_buflen); | ||
| 825 | |||
| 826 | if (src_nents) { | ||
| 827 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | ||
| 828 | edesc->sec4_sg + sec4_sg_src_index, | ||
| 829 | chained); | ||
| 830 | if (*next_buflen) { | ||
| 831 | sg_copy_part(next_buf, req->src, to_hash - | ||
| 832 | *buflen, req->nbytes); | ||
| 833 | state->current_buf = !state->current_buf; | ||
| 834 | } | ||
| 835 | } else { | ||
| 836 | (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= | ||
| 837 | SEC4_SG_LEN_FIN; | ||
| 838 | } | ||
| 839 | |||
| 840 | sh_len = desc_len(sh_desc); | ||
| 841 | desc = edesc->hw_desc; | ||
| 842 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
| 843 | HDR_REVERSE); | ||
| 844 | |||
| 845 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | ||
| 846 | to_hash, LDST_SGF); | ||
| 847 | |||
| 848 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); | ||
| 849 | |||
| 850 | #ifdef DEBUG | ||
| 851 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 852 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 853 | desc_bytes(desc), 1); | ||
| 854 | #endif | ||
| 855 | |||
| 856 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); | ||
| 857 | if (!ret) { | ||
| 858 | ret = -EINPROGRESS; | ||
| 859 | } else { | ||
| 860 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | ||
| 861 | DMA_BIDIRECTIONAL); | ||
| 862 | kfree(edesc); | ||
| 863 | } | ||
| 864 | } else if (*next_buflen) { | ||
| 865 | sg_copy(buf + *buflen, req->src, req->nbytes); | ||
| 866 | *buflen = *next_buflen; | ||
| 867 | *next_buflen = last_buflen; | ||
| 868 | } | ||
| 869 | #ifdef DEBUG | ||
| 870 | print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", | ||
| 871 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | ||
| 872 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | ||
| 873 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | ||
| 874 | *next_buflen, 1); | ||
| 875 | #endif | ||
| 876 | |||
| 877 | return ret; | ||
| 878 | } | ||
| 879 | |||
| 880 | static int ahash_final_ctx(struct ahash_request *req) | ||
| 881 | { | ||
| 882 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 883 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 884 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 885 | struct device *jrdev = ctx->jrdev; | ||
| 886 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 887 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 888 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
| 889 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | ||
| 890 | int last_buflen = state->current_buf ? state->buflen_0 : | ||
| 891 | state->buflen_1; | ||
| 892 | u32 *sh_desc = ctx->sh_desc_fin, *desc; | ||
| 893 | dma_addr_t ptr = ctx->sh_desc_fin_dma; | ||
| 894 | int sec4_sg_bytes; | ||
| 895 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 896 | struct ahash_edesc *edesc; | ||
| 897 | int ret = 0; | ||
| 898 | int sh_len; | ||
| 899 | |||
| 900 | sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); | ||
| 901 | |||
| 902 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
| 903 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
| 904 | sec4_sg_bytes, GFP_DMA | flags); | ||
| 905 | if (!edesc) { | ||
| 906 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
| 907 | return -ENOMEM; | ||
| 908 | } | ||
| 909 | |||
| 910 | sh_len = desc_len(sh_desc); | ||
| 911 | desc = edesc->hw_desc; | ||
| 912 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 913 | |||
| 914 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 915 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 916 | DESC_JOB_IO_LEN; | ||
| 917 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 918 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
| 919 | edesc->src_nents = 0; | ||
| 920 | |||
| 921 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, | ||
| 922 | DMA_TO_DEVICE); | ||
| 923 | |||
| 924 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | ||
| 925 | buf, state->buf_dma, buflen, | ||
| 926 | last_buflen); | ||
| 927 | (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; | ||
| 928 | |||
| 929 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, | ||
| 930 | LDST_SGF); | ||
| 931 | |||
| 932 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
| 933 | digestsize); | ||
| 934 | |||
| 935 | #ifdef DEBUG | ||
| 936 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 937 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 938 | #endif | ||
| 939 | |||
| 940 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | ||
| 941 | if (!ret) { | ||
| 942 | ret = -EINPROGRESS; | ||
| 943 | } else { | ||
| 944 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
| 945 | kfree(edesc); | ||
| 946 | } | ||
| 947 | |||
| 948 | return ret; | ||
| 949 | } | ||
| 950 | |||
| 951 | static int ahash_finup_ctx(struct ahash_request *req) | ||
| 952 | { | ||
| 953 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 954 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 955 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 956 | struct device *jrdev = ctx->jrdev; | ||
| 957 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 958 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 959 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
| 960 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | ||
| 961 | int last_buflen = state->current_buf ? state->buflen_0 : | ||
| 962 | state->buflen_1; | ||
| 963 | u32 *sh_desc = ctx->sh_desc_finup, *desc; | ||
| 964 | dma_addr_t ptr = ctx->sh_desc_finup_dma; | ||
| 965 | int sec4_sg_bytes, sec4_sg_src_index; | ||
| 966 | int src_nents; | ||
| 967 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 968 | struct ahash_edesc *edesc; | ||
| 969 | bool chained = false; | ||
| 970 | int ret = 0; | ||
| 971 | int sh_len; | ||
| 972 | |||
| 973 | src_nents = __sg_count(req->src, req->nbytes, &chained); | ||
| 974 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); | ||
| 975 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | ||
| 976 | sizeof(struct sec4_sg_entry); | ||
| 977 | |||
| 978 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
| 979 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
| 980 | sec4_sg_bytes, GFP_DMA | flags); | ||
| 981 | if (!edesc) { | ||
| 982 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
| 983 | return -ENOMEM; | ||
| 984 | } | ||
| 985 | |||
| 986 | sh_len = desc_len(sh_desc); | ||
| 987 | desc = edesc->hw_desc; | ||
| 988 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 989 | |||
| 990 | edesc->src_nents = src_nents; | ||
| 991 | edesc->chained = chained; | ||
| 992 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 993 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 994 | DESC_JOB_IO_LEN; | ||
| 995 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 996 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
| 997 | |||
| 998 | ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, | ||
| 999 | DMA_TO_DEVICE); | ||
| 1000 | |||
| 1001 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | ||
| 1002 | buf, state->buf_dma, buflen, | ||
| 1003 | last_buflen); | ||
| 1004 | |||
| 1005 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + | ||
| 1006 | sec4_sg_src_index, chained); | ||
| 1007 | |||
| 1008 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + | ||
| 1009 | buflen + req->nbytes, LDST_SGF); | ||
| 1010 | |||
| 1011 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
| 1012 | digestsize); | ||
| 1013 | |||
| 1014 | #ifdef DEBUG | ||
| 1015 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 1016 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1017 | #endif | ||
| 1018 | |||
| 1019 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | ||
| 1020 | if (!ret) { | ||
| 1021 | ret = -EINPROGRESS; | ||
| 1022 | } else { | ||
| 1023 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | ||
| 1024 | kfree(edesc); | ||
| 1025 | } | ||
| 1026 | |||
| 1027 | return ret; | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | static int ahash_digest(struct ahash_request *req) | ||
| 1031 | { | ||
| 1032 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 1033 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 1034 | struct device *jrdev = ctx->jrdev; | ||
| 1035 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 1036 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 1037 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | ||
| 1038 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | ||
| 1039 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 1040 | int src_nents, sec4_sg_bytes; | ||
| 1041 | dma_addr_t src_dma; | ||
| 1042 | struct ahash_edesc *edesc; | ||
| 1043 | bool chained = false; | ||
| 1044 | int ret = 0; | ||
| 1045 | u32 options; | ||
| 1046 | int sh_len; | ||
| 1047 | |||
| 1048 | src_nents = sg_count(req->src, req->nbytes, &chained); | ||
| 1049 | dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, | ||
| 1050 | chained); | ||
| 1051 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | ||
| 1052 | |||
| 1053 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
| 1054 | edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + | ||
| 1055 | DESC_JOB_IO_LEN, GFP_DMA | flags); | ||
| 1056 | if (!edesc) { | ||
| 1057 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
| 1058 | return -ENOMEM; | ||
| 1059 | } | ||
| 1060 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1061 | DESC_JOB_IO_LEN; | ||
| 1062 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 1063 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
| 1064 | edesc->src_nents = src_nents; | ||
| 1065 | edesc->chained = chained; | ||
| 1066 | |||
| 1067 | sh_len = desc_len(sh_desc); | ||
| 1068 | desc = edesc->hw_desc; | ||
| 1069 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 1070 | |||
| 1071 | if (src_nents) { | ||
| 1072 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); | ||
| 1073 | src_dma = edesc->sec4_sg_dma; | ||
| 1074 | options = LDST_SGF; | ||
| 1075 | } else { | ||
| 1076 | src_dma = sg_dma_address(req->src); | ||
| 1077 | options = 0; | ||
| 1078 | } | ||
| 1079 | append_seq_in_ptr(desc, src_dma, req->nbytes, options); | ||
| 1080 | |||
| 1081 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
| 1082 | digestsize); | ||
| 1083 | |||
| 1084 | #ifdef DEBUG | ||
| 1085 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 1086 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1087 | #endif | ||
| 1088 | |||
| 1089 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | ||
| 1090 | if (!ret) { | ||
| 1091 | ret = -EINPROGRESS; | ||
| 1092 | } else { | ||
| 1093 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
| 1094 | kfree(edesc); | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | return ret; | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | /* submit ahash final if it the first job descriptor */ | ||
| 1101 | static int ahash_final_no_ctx(struct ahash_request *req) | ||
| 1102 | { | ||
| 1103 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 1104 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 1105 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1106 | struct device *jrdev = ctx->jrdev; | ||
| 1107 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 1108 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 1109 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
| 1110 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | ||
| 1111 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | ||
| 1112 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | ||
| 1113 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 1114 | struct ahash_edesc *edesc; | ||
| 1115 | int ret = 0; | ||
| 1116 | int sh_len; | ||
| 1117 | |||
| 1118 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
| 1119 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, | ||
| 1120 | GFP_DMA | flags); | ||
| 1121 | if (!edesc) { | ||
| 1122 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
| 1123 | return -ENOMEM; | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | sh_len = desc_len(sh_desc); | ||
| 1127 | desc = edesc->hw_desc; | ||
| 1128 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 1129 | |||
| 1130 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | ||
| 1131 | |||
| 1132 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | ||
| 1133 | |||
| 1134 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
| 1135 | digestsize); | ||
| 1136 | edesc->src_nents = 0; | ||
| 1137 | |||
| 1138 | #ifdef DEBUG | ||
| 1139 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 1140 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1141 | #endif | ||
| 1142 | |||
| 1143 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | ||
| 1144 | if (!ret) { | ||
| 1145 | ret = -EINPROGRESS; | ||
| 1146 | } else { | ||
| 1147 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
| 1148 | kfree(edesc); | ||
| 1149 | } | ||
| 1150 | |||
| 1151 | return ret; | ||
| 1152 | } | ||
| 1153 | |||
| 1154 | /* submit ahash update if it the first job descriptor after update */ | ||
| 1155 | static int ahash_update_no_ctx(struct ahash_request *req) | ||
| 1156 | { | ||
| 1157 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 1158 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 1159 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1160 | struct device *jrdev = ctx->jrdev; | ||
| 1161 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 1162 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 1163 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
| 1164 | int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; | ||
| 1165 | u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; | ||
| 1166 | int *next_buflen = state->current_buf ? &state->buflen_0 : | ||
| 1167 | &state->buflen_1; | ||
| 1168 | int in_len = *buflen + req->nbytes, to_hash; | ||
| 1169 | int sec4_sg_bytes, src_nents; | ||
| 1170 | struct ahash_edesc *edesc; | ||
| 1171 | u32 *desc, *sh_desc = ctx->sh_desc_update_first; | ||
| 1172 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; | ||
| 1173 | bool chained = false; | ||
| 1174 | int ret = 0; | ||
| 1175 | int sh_len; | ||
| 1176 | |||
| 1177 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); | ||
| 1178 | to_hash = in_len - *next_buflen; | ||
| 1179 | |||
| 1180 | if (to_hash) { | ||
| 1181 | src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), | ||
| 1182 | &chained); | ||
| 1183 | sec4_sg_bytes = (1 + src_nents) * | ||
| 1184 | sizeof(struct sec4_sg_entry); | ||
| 1185 | |||
| 1186 | /* | ||
| 1187 | * allocate space for base edesc and hw desc commands, | ||
| 1188 | * link tables | ||
| 1189 | */ | ||
| 1190 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
| 1191 | sec4_sg_bytes, GFP_DMA | flags); | ||
| 1192 | if (!edesc) { | ||
| 1193 | dev_err(jrdev, | ||
| 1194 | "could not allocate extended descriptor\n"); | ||
| 1195 | return -ENOMEM; | ||
| 1196 | } | ||
| 1197 | |||
| 1198 | edesc->src_nents = src_nents; | ||
| 1199 | edesc->chained = chained; | ||
| 1200 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 1201 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1202 | DESC_JOB_IO_LEN; | ||
| 1203 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 1204 | sec4_sg_bytes, | ||
| 1205 | DMA_TO_DEVICE); | ||
| 1206 | |||
| 1207 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, | ||
| 1208 | buf, *buflen); | ||
| 1209 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | ||
| 1210 | edesc->sec4_sg + 1, chained); | ||
| 1211 | if (*next_buflen) { | ||
| 1212 | sg_copy_part(next_buf, req->src, to_hash - *buflen, | ||
| 1213 | req->nbytes); | ||
| 1214 | state->current_buf = !state->current_buf; | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | sh_len = desc_len(sh_desc); | ||
| 1218 | desc = edesc->hw_desc; | ||
| 1219 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
| 1220 | HDR_REVERSE); | ||
| 1221 | |||
| 1222 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); | ||
| 1223 | |||
| 1224 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | ||
| 1225 | |||
| 1226 | #ifdef DEBUG | ||
| 1227 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 1228 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1229 | desc_bytes(desc), 1); | ||
| 1230 | #endif | ||
| 1231 | |||
| 1232 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); | ||
| 1233 | if (!ret) { | ||
| 1234 | ret = -EINPROGRESS; | ||
| 1235 | state->update = ahash_update_ctx; | ||
| 1236 | state->finup = ahash_finup_ctx; | ||
| 1237 | state->final = ahash_final_ctx; | ||
| 1238 | } else { | ||
| 1239 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | ||
| 1240 | DMA_TO_DEVICE); | ||
| 1241 | kfree(edesc); | ||
| 1242 | } | ||
| 1243 | } else if (*next_buflen) { | ||
| 1244 | sg_copy(buf + *buflen, req->src, req->nbytes); | ||
| 1245 | *buflen = *next_buflen; | ||
| 1246 | *next_buflen = 0; | ||
| 1247 | } | ||
| 1248 | #ifdef DEBUG | ||
| 1249 | print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", | ||
| 1250 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | ||
| 1251 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | ||
| 1252 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | ||
| 1253 | *next_buflen, 1); | ||
| 1254 | #endif | ||
| 1255 | |||
| 1256 | return ret; | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | /* submit ahash finup if it the first job descriptor after update */ | ||
| 1260 | static int ahash_finup_no_ctx(struct ahash_request *req) | ||
| 1261 | { | ||
| 1262 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 1263 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 1264 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1265 | struct device *jrdev = ctx->jrdev; | ||
| 1266 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 1267 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 1268 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | ||
| 1269 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | ||
| 1270 | int last_buflen = state->current_buf ? state->buflen_0 : | ||
| 1271 | state->buflen_1; | ||
| 1272 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | ||
| 1273 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | ||
| 1274 | int sec4_sg_bytes, sec4_sg_src_index, src_nents; | ||
| 1275 | int digestsize = crypto_ahash_digestsize(ahash); | ||
| 1276 | struct ahash_edesc *edesc; | ||
| 1277 | bool chained = false; | ||
| 1278 | int sh_len; | ||
| 1279 | int ret = 0; | ||
| 1280 | |||
| 1281 | src_nents = __sg_count(req->src, req->nbytes, &chained); | ||
| 1282 | sec4_sg_src_index = 2; | ||
| 1283 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | ||
| 1284 | sizeof(struct sec4_sg_entry); | ||
| 1285 | |||
| 1286 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
| 1287 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
| 1288 | sec4_sg_bytes, GFP_DMA | flags); | ||
| 1289 | if (!edesc) { | ||
| 1290 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
| 1291 | return -ENOMEM; | ||
| 1292 | } | ||
| 1293 | |||
| 1294 | sh_len = desc_len(sh_desc); | ||
| 1295 | desc = edesc->hw_desc; | ||
| 1296 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 1297 | |||
| 1298 | edesc->src_nents = src_nents; | ||
| 1299 | edesc->chained = chained; | ||
| 1300 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 1301 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1302 | DESC_JOB_IO_LEN; | ||
| 1303 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 1304 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
| 1305 | |||
| 1306 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, | ||
| 1307 | state->buf_dma, buflen, | ||
| 1308 | last_buflen); | ||
| 1309 | |||
| 1310 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, | ||
| 1311 | chained); | ||
| 1312 | |||
| 1313 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + | ||
| 1314 | req->nbytes, LDST_SGF); | ||
| 1315 | |||
| 1316 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | ||
| 1317 | digestsize); | ||
| 1318 | |||
| 1319 | #ifdef DEBUG | ||
| 1320 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 1321 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1322 | #endif | ||
| 1323 | |||
| 1324 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | ||
| 1325 | if (!ret) { | ||
| 1326 | ret = -EINPROGRESS; | ||
| 1327 | } else { | ||
| 1328 | ahash_unmap(jrdev, edesc, req, digestsize); | ||
| 1329 | kfree(edesc); | ||
| 1330 | } | ||
| 1331 | |||
| 1332 | return ret; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | /* submit first update job descriptor after init */ | ||
| 1336 | static int ahash_update_first(struct ahash_request *req) | ||
| 1337 | { | ||
| 1338 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 1339 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 1340 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1341 | struct device *jrdev = ctx->jrdev; | ||
| 1342 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 1343 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 1344 | u8 *next_buf = state->buf_0 + state->current_buf * | ||
| 1345 | CAAM_MAX_HASH_BLOCK_SIZE; | ||
| 1346 | int *next_buflen = &state->buflen_0 + state->current_buf; | ||
| 1347 | int to_hash; | ||
| 1348 | u32 *sh_desc = ctx->sh_desc_update_first, *desc; | ||
| 1349 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; | ||
| 1350 | int sec4_sg_bytes, src_nents; | ||
| 1351 | dma_addr_t src_dma; | ||
| 1352 | u32 options; | ||
| 1353 | struct ahash_edesc *edesc; | ||
| 1354 | bool chained = false; | ||
| 1355 | int ret = 0; | ||
| 1356 | int sh_len; | ||
| 1357 | |||
| 1358 | *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - | ||
| 1359 | 1); | ||
| 1360 | to_hash = req->nbytes - *next_buflen; | ||
| 1361 | |||
| 1362 | if (to_hash) { | ||
| 1363 | src_nents = sg_count(req->src, req->nbytes - (*next_buflen), | ||
| 1364 | &chained); | ||
| 1365 | dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
| 1366 | DMA_TO_DEVICE, chained); | ||
| 1367 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | ||
| 1368 | |||
| 1369 | /* | ||
| 1370 | * allocate space for base edesc and hw desc commands, | ||
| 1371 | * link tables | ||
| 1372 | */ | ||
| 1373 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | ||
| 1374 | sec4_sg_bytes, GFP_DMA | flags); | ||
| 1375 | if (!edesc) { | ||
| 1376 | dev_err(jrdev, | ||
| 1377 | "could not allocate extended descriptor\n"); | ||
| 1378 | return -ENOMEM; | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | edesc->src_nents = src_nents; | ||
| 1382 | edesc->chained = chained; | ||
| 1383 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 1384 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | ||
| 1385 | DESC_JOB_IO_LEN; | ||
| 1386 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 1387 | sec4_sg_bytes, | ||
| 1388 | DMA_TO_DEVICE); | ||
| 1389 | |||
| 1390 | if (src_nents) { | ||
| 1391 | sg_to_sec4_sg_last(req->src, src_nents, | ||
| 1392 | edesc->sec4_sg, 0); | ||
| 1393 | src_dma = edesc->sec4_sg_dma; | ||
| 1394 | options = LDST_SGF; | ||
| 1395 | } else { | ||
| 1396 | src_dma = sg_dma_address(req->src); | ||
| 1397 | options = 0; | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | if (*next_buflen) | ||
| 1401 | sg_copy_part(next_buf, req->src, to_hash, req->nbytes); | ||
| 1402 | |||
| 1403 | sh_len = desc_len(sh_desc); | ||
| 1404 | desc = edesc->hw_desc; | ||
| 1405 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | ||
| 1406 | HDR_REVERSE); | ||
| 1407 | |||
| 1408 | append_seq_in_ptr(desc, src_dma, to_hash, options); | ||
| 1409 | |||
| 1410 | map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); | ||
| 1411 | |||
| 1412 | #ifdef DEBUG | ||
| 1413 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 1414 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1415 | desc_bytes(desc), 1); | ||
| 1416 | #endif | ||
| 1417 | |||
| 1418 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, | ||
| 1419 | req); | ||
| 1420 | if (!ret) { | ||
| 1421 | ret = -EINPROGRESS; | ||
| 1422 | state->update = ahash_update_ctx; | ||
| 1423 | state->finup = ahash_finup_ctx; | ||
| 1424 | state->final = ahash_final_ctx; | ||
| 1425 | } else { | ||
| 1426 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | ||
| 1427 | DMA_TO_DEVICE); | ||
| 1428 | kfree(edesc); | ||
| 1429 | } | ||
| 1430 | } else if (*next_buflen) { | ||
| 1431 | state->update = ahash_update_no_ctx; | ||
| 1432 | state->finup = ahash_finup_no_ctx; | ||
| 1433 | state->final = ahash_final_no_ctx; | ||
| 1434 | sg_copy(next_buf, req->src, req->nbytes); | ||
| 1435 | } | ||
| 1436 | #ifdef DEBUG | ||
| 1437 | print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", | ||
| 1438 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | ||
| 1439 | *next_buflen, 1); | ||
| 1440 | #endif | ||
| 1441 | |||
| 1442 | return ret; | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | static int ahash_finup_first(struct ahash_request *req) | ||
| 1446 | { | ||
| 1447 | return ahash_digest(req); | ||
| 1448 | } | ||
| 1449 | |||
| 1450 | static int ahash_init(struct ahash_request *req) | ||
| 1451 | { | ||
| 1452 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1453 | |||
| 1454 | state->update = ahash_update_first; | ||
| 1455 | state->finup = ahash_finup_first; | ||
| 1456 | state->final = ahash_final_no_ctx; | ||
| 1457 | |||
| 1458 | state->current_buf = 0; | ||
| 1459 | |||
| 1460 | return 0; | ||
| 1461 | } | ||
| 1462 | |||
| 1463 | static int ahash_update(struct ahash_request *req) | ||
| 1464 | { | ||
| 1465 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1466 | |||
| 1467 | return state->update(req); | ||
| 1468 | } | ||
| 1469 | |||
| 1470 | static int ahash_finup(struct ahash_request *req) | ||
| 1471 | { | ||
| 1472 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1473 | |||
| 1474 | return state->finup(req); | ||
| 1475 | } | ||
| 1476 | |||
| 1477 | static int ahash_final(struct ahash_request *req) | ||
| 1478 | { | ||
| 1479 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1480 | |||
| 1481 | return state->final(req); | ||
| 1482 | } | ||
| 1483 | |||
| 1484 | static int ahash_export(struct ahash_request *req, void *out) | ||
| 1485 | { | ||
| 1486 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 1487 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 1488 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1489 | |||
| 1490 | memcpy(out, ctx, sizeof(struct caam_hash_ctx)); | ||
| 1491 | memcpy(out + sizeof(struct caam_hash_ctx), state, | ||
| 1492 | sizeof(struct caam_hash_state)); | ||
| 1493 | return 0; | ||
| 1494 | } | ||
| 1495 | |||
| 1496 | static int ahash_import(struct ahash_request *req, const void *in) | ||
| 1497 | { | ||
| 1498 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
| 1499 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
| 1500 | struct caam_hash_state *state = ahash_request_ctx(req); | ||
| 1501 | |||
| 1502 | memcpy(ctx, in, sizeof(struct caam_hash_ctx)); | ||
| 1503 | memcpy(state, in + sizeof(struct caam_hash_ctx), | ||
| 1504 | sizeof(struct caam_hash_state)); | ||
| 1505 | return 0; | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | struct caam_hash_template { | ||
| 1509 | char name[CRYPTO_MAX_ALG_NAME]; | ||
| 1510 | char driver_name[CRYPTO_MAX_ALG_NAME]; | ||
| 1511 | char hmac_name[CRYPTO_MAX_ALG_NAME]; | ||
| 1512 | char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; | ||
| 1513 | unsigned int blocksize; | ||
| 1514 | struct ahash_alg template_ahash; | ||
| 1515 | u32 alg_type; | ||
| 1516 | u32 alg_op; | ||
| 1517 | }; | ||
| 1518 | |||
| 1519 | /* ahash descriptors */ | ||
| 1520 | static struct caam_hash_template driver_hash[] = { | ||
| 1521 | { | ||
| 1522 | .name = "sha1", | ||
| 1523 | .driver_name = "sha1-caam", | ||
| 1524 | .hmac_name = "hmac(sha1)", | ||
| 1525 | .hmac_driver_name = "hmac-sha1-caam", | ||
| 1526 | .blocksize = SHA1_BLOCK_SIZE, | ||
| 1527 | .template_ahash = { | ||
| 1528 | .init = ahash_init, | ||
| 1529 | .update = ahash_update, | ||
| 1530 | .final = ahash_final, | ||
| 1531 | .finup = ahash_finup, | ||
| 1532 | .digest = ahash_digest, | ||
| 1533 | .export = ahash_export, | ||
| 1534 | .import = ahash_import, | ||
| 1535 | .setkey = ahash_setkey, | ||
| 1536 | .halg = { | ||
| 1537 | .digestsize = SHA1_DIGEST_SIZE, | ||
| 1538 | }, | ||
| 1539 | }, | ||
| 1540 | .alg_type = OP_ALG_ALGSEL_SHA1, | ||
| 1541 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 1542 | }, { | ||
| 1543 | .name = "sha224", | ||
| 1544 | .driver_name = "sha224-caam", | ||
| 1545 | .hmac_name = "hmac(sha224)", | ||
| 1546 | .hmac_driver_name = "hmac-sha224-caam", | ||
| 1547 | .blocksize = SHA224_BLOCK_SIZE, | ||
| 1548 | .template_ahash = { | ||
| 1549 | .init = ahash_init, | ||
| 1550 | .update = ahash_update, | ||
| 1551 | .final = ahash_final, | ||
| 1552 | .finup = ahash_finup, | ||
| 1553 | .digest = ahash_digest, | ||
| 1554 | .export = ahash_export, | ||
| 1555 | .import = ahash_import, | ||
| 1556 | .setkey = ahash_setkey, | ||
| 1557 | .halg = { | ||
| 1558 | .digestsize = SHA224_DIGEST_SIZE, | ||
| 1559 | }, | ||
| 1560 | }, | ||
| 1561 | .alg_type = OP_ALG_ALGSEL_SHA224, | ||
| 1562 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 1563 | }, { | ||
| 1564 | .name = "sha256", | ||
| 1565 | .driver_name = "sha256-caam", | ||
| 1566 | .hmac_name = "hmac(sha256)", | ||
| 1567 | .hmac_driver_name = "hmac-sha256-caam", | ||
| 1568 | .blocksize = SHA256_BLOCK_SIZE, | ||
| 1569 | .template_ahash = { | ||
| 1570 | .init = ahash_init, | ||
| 1571 | .update = ahash_update, | ||
| 1572 | .final = ahash_final, | ||
| 1573 | .finup = ahash_finup, | ||
| 1574 | .digest = ahash_digest, | ||
| 1575 | .export = ahash_export, | ||
| 1576 | .import = ahash_import, | ||
| 1577 | .setkey = ahash_setkey, | ||
| 1578 | .halg = { | ||
| 1579 | .digestsize = SHA256_DIGEST_SIZE, | ||
| 1580 | }, | ||
| 1581 | }, | ||
| 1582 | .alg_type = OP_ALG_ALGSEL_SHA256, | ||
| 1583 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 1584 | }, { | ||
| 1585 | .name = "sha384", | ||
| 1586 | .driver_name = "sha384-caam", | ||
| 1587 | .hmac_name = "hmac(sha384)", | ||
| 1588 | .hmac_driver_name = "hmac-sha384-caam", | ||
| 1589 | .blocksize = SHA384_BLOCK_SIZE, | ||
| 1590 | .template_ahash = { | ||
| 1591 | .init = ahash_init, | ||
| 1592 | .update = ahash_update, | ||
| 1593 | .final = ahash_final, | ||
| 1594 | .finup = ahash_finup, | ||
| 1595 | .digest = ahash_digest, | ||
| 1596 | .export = ahash_export, | ||
| 1597 | .import = ahash_import, | ||
| 1598 | .setkey = ahash_setkey, | ||
| 1599 | .halg = { | ||
| 1600 | .digestsize = SHA384_DIGEST_SIZE, | ||
| 1601 | }, | ||
| 1602 | }, | ||
| 1603 | .alg_type = OP_ALG_ALGSEL_SHA384, | ||
| 1604 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 1605 | }, { | ||
| 1606 | .name = "sha512", | ||
| 1607 | .driver_name = "sha512-caam", | ||
| 1608 | .hmac_name = "hmac(sha512)", | ||
| 1609 | .hmac_driver_name = "hmac-sha512-caam", | ||
| 1610 | .blocksize = SHA512_BLOCK_SIZE, | ||
| 1611 | .template_ahash = { | ||
| 1612 | .init = ahash_init, | ||
| 1613 | .update = ahash_update, | ||
| 1614 | .final = ahash_final, | ||
| 1615 | .finup = ahash_finup, | ||
| 1616 | .digest = ahash_digest, | ||
| 1617 | .export = ahash_export, | ||
| 1618 | .import = ahash_import, | ||
| 1619 | .setkey = ahash_setkey, | ||
| 1620 | .halg = { | ||
| 1621 | .digestsize = SHA512_DIGEST_SIZE, | ||
| 1622 | }, | ||
| 1623 | }, | ||
| 1624 | .alg_type = OP_ALG_ALGSEL_SHA512, | ||
| 1625 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 1626 | }, { | ||
| 1627 | .name = "md5", | ||
| 1628 | .driver_name = "md5-caam", | ||
| 1629 | .hmac_name = "hmac(md5)", | ||
| 1630 | .hmac_driver_name = "hmac-md5-caam", | ||
| 1631 | .blocksize = MD5_BLOCK_WORDS * 4, | ||
| 1632 | .template_ahash = { | ||
| 1633 | .init = ahash_init, | ||
| 1634 | .update = ahash_update, | ||
| 1635 | .final = ahash_final, | ||
| 1636 | .finup = ahash_finup, | ||
| 1637 | .digest = ahash_digest, | ||
| 1638 | .export = ahash_export, | ||
| 1639 | .import = ahash_import, | ||
| 1640 | .setkey = ahash_setkey, | ||
| 1641 | .halg = { | ||
| 1642 | .digestsize = MD5_DIGEST_SIZE, | ||
| 1643 | }, | ||
| 1644 | }, | ||
| 1645 | .alg_type = OP_ALG_ALGSEL_MD5, | ||
| 1646 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 1647 | }, | ||
| 1648 | }; | ||
| 1649 | |||
| 1650 | struct caam_hash_alg { | ||
| 1651 | struct list_head entry; | ||
| 1652 | struct device *ctrldev; | ||
| 1653 | int alg_type; | ||
| 1654 | int alg_op; | ||
| 1655 | struct ahash_alg ahash_alg; | ||
| 1656 | }; | ||
| 1657 | |||
| 1658 | static int caam_hash_cra_init(struct crypto_tfm *tfm) | ||
| 1659 | { | ||
| 1660 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
| 1661 | struct crypto_alg *base = tfm->__crt_alg; | ||
| 1662 | struct hash_alg_common *halg = | ||
| 1663 | container_of(base, struct hash_alg_common, base); | ||
| 1664 | struct ahash_alg *alg = | ||
| 1665 | container_of(halg, struct ahash_alg, halg); | ||
| 1666 | struct caam_hash_alg *caam_hash = | ||
| 1667 | container_of(alg, struct caam_hash_alg, ahash_alg); | ||
| 1668 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 1669 | struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev); | ||
| 1670 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ | ||
| 1671 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, | ||
| 1672 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, | ||
| 1673 | HASH_MSG_LEN + 32, | ||
| 1674 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, | ||
| 1675 | HASH_MSG_LEN + 64, | ||
| 1676 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; | ||
| 1677 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | ||
| 1678 | int ret = 0; | ||
| 1679 | |||
| 1680 | /* | ||
| 1681 | * distribute tfms across job rings to ensure in-order | ||
| 1682 | * crypto request processing per tfm | ||
| 1683 | */ | ||
| 1684 | ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; | ||
| 1685 | |||
| 1686 | /* copy descriptor header template value */ | ||
| 1687 | ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; | ||
| 1688 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; | ||
| 1689 | |||
| 1690 | ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | ||
| 1691 | OP_ALG_ALGSEL_SHIFT]; | ||
| 1692 | |||
| 1693 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
| 1694 | sizeof(struct caam_hash_state)); | ||
| 1695 | |||
| 1696 | ret = ahash_set_sh_desc(ahash); | ||
| 1697 | |||
| 1698 | return ret; | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | static void caam_hash_cra_exit(struct crypto_tfm *tfm) | ||
| 1702 | { | ||
| 1703 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 1704 | |||
| 1705 | if (ctx->sh_desc_update_dma && | ||
| 1706 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) | ||
| 1707 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, | ||
| 1708 | desc_bytes(ctx->sh_desc_update), | ||
| 1709 | DMA_TO_DEVICE); | ||
| 1710 | if (ctx->sh_desc_update_first_dma && | ||
| 1711 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) | ||
| 1712 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, | ||
| 1713 | desc_bytes(ctx->sh_desc_update_first), | ||
| 1714 | DMA_TO_DEVICE); | ||
| 1715 | if (ctx->sh_desc_fin_dma && | ||
| 1716 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) | ||
| 1717 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, | ||
| 1718 | desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); | ||
| 1719 | if (ctx->sh_desc_digest_dma && | ||
| 1720 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) | ||
| 1721 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, | ||
| 1722 | desc_bytes(ctx->sh_desc_digest), | ||
| 1723 | DMA_TO_DEVICE); | ||
| 1724 | if (ctx->sh_desc_finup_dma && | ||
| 1725 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) | ||
| 1726 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, | ||
| 1727 | desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | static void __exit caam_algapi_hash_exit(void) | ||
| 1731 | { | ||
| 1732 | struct device_node *dev_node; | ||
| 1733 | struct platform_device *pdev; | ||
| 1734 | struct device *ctrldev; | ||
| 1735 | struct caam_drv_private *priv; | ||
| 1736 | struct caam_hash_alg *t_alg, *n; | ||
| 1737 | |||
| 1738 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 1739 | if (!dev_node) | ||
| 1740 | return; | ||
| 1741 | |||
| 1742 | pdev = of_find_device_by_node(dev_node); | ||
| 1743 | if (!pdev) | ||
| 1744 | return; | ||
| 1745 | |||
| 1746 | ctrldev = &pdev->dev; | ||
| 1747 | of_node_put(dev_node); | ||
| 1748 | priv = dev_get_drvdata(ctrldev); | ||
| 1749 | |||
| 1750 | if (!priv->hash_list.next) | ||
| 1751 | return; | ||
| 1752 | |||
| 1753 | list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) { | ||
| 1754 | crypto_unregister_ahash(&t_alg->ahash_alg); | ||
| 1755 | list_del(&t_alg->entry); | ||
| 1756 | kfree(t_alg); | ||
| 1757 | } | ||
| 1758 | } | ||
| 1759 | |||
| 1760 | static struct caam_hash_alg * | ||
| 1761 | caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, | ||
| 1762 | bool keyed) | ||
| 1763 | { | ||
| 1764 | struct caam_hash_alg *t_alg; | ||
| 1765 | struct ahash_alg *halg; | ||
| 1766 | struct crypto_alg *alg; | ||
| 1767 | |||
| 1768 | t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); | ||
| 1769 | if (!t_alg) { | ||
| 1770 | dev_err(ctrldev, "failed to allocate t_alg\n"); | ||
| 1771 | return ERR_PTR(-ENOMEM); | ||
| 1772 | } | ||
| 1773 | |||
| 1774 | t_alg->ahash_alg = template->template_ahash; | ||
| 1775 | halg = &t_alg->ahash_alg; | ||
| 1776 | alg = &halg->halg.base; | ||
| 1777 | |||
| 1778 | if (keyed) { | ||
| 1779 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
| 1780 | template->hmac_name); | ||
| 1781 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
| 1782 | template->hmac_driver_name); | ||
| 1783 | } else { | ||
| 1784 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
| 1785 | template->name); | ||
| 1786 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
| 1787 | template->driver_name); | ||
| 1788 | } | ||
| 1789 | alg->cra_module = THIS_MODULE; | ||
| 1790 | alg->cra_init = caam_hash_cra_init; | ||
| 1791 | alg->cra_exit = caam_hash_cra_exit; | ||
| 1792 | alg->cra_ctxsize = sizeof(struct caam_hash_ctx); | ||
| 1793 | alg->cra_priority = CAAM_CRA_PRIORITY; | ||
| 1794 | alg->cra_blocksize = template->blocksize; | ||
| 1795 | alg->cra_alignmask = 0; | ||
| 1796 | alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; | ||
| 1797 | alg->cra_type = &crypto_ahash_type; | ||
| 1798 | |||
| 1799 | t_alg->alg_type = template->alg_type; | ||
| 1800 | t_alg->alg_op = template->alg_op; | ||
| 1801 | t_alg->ctrldev = ctrldev; | ||
| 1802 | |||
| 1803 | return t_alg; | ||
| 1804 | } | ||
| 1805 | |||
| 1806 | static int __init caam_algapi_hash_init(void) | ||
| 1807 | { | ||
| 1808 | struct device_node *dev_node; | ||
| 1809 | struct platform_device *pdev; | ||
| 1810 | struct device *ctrldev; | ||
| 1811 | struct caam_drv_private *priv; | ||
| 1812 | int i = 0, err = 0; | ||
| 1813 | |||
| 1814 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 1815 | if (!dev_node) | ||
| 1816 | return -ENODEV; | ||
| 1817 | |||
| 1818 | pdev = of_find_device_by_node(dev_node); | ||
| 1819 | if (!pdev) | ||
| 1820 | return -ENODEV; | ||
| 1821 | |||
| 1822 | ctrldev = &pdev->dev; | ||
| 1823 | priv = dev_get_drvdata(ctrldev); | ||
| 1824 | of_node_put(dev_node); | ||
| 1825 | |||
| 1826 | INIT_LIST_HEAD(&priv->hash_list); | ||
| 1827 | |||
| 1828 | atomic_set(&priv->tfm_count, -1); | ||
| 1829 | |||
| 1830 | /* register crypto algorithms the device supports */ | ||
| 1831 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { | ||
| 1832 | /* TODO: check if h/w supports alg */ | ||
| 1833 | struct caam_hash_alg *t_alg; | ||
| 1834 | |||
| 1835 | /* register hmac version */ | ||
| 1836 | t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); | ||
| 1837 | if (IS_ERR(t_alg)) { | ||
| 1838 | err = PTR_ERR(t_alg); | ||
| 1839 | dev_warn(ctrldev, "%s alg allocation failed\n", | ||
| 1840 | driver_hash[i].driver_name); | ||
| 1841 | continue; | ||
| 1842 | } | ||
| 1843 | |||
| 1844 | err = crypto_register_ahash(&t_alg->ahash_alg); | ||
| 1845 | if (err) { | ||
| 1846 | dev_warn(ctrldev, "%s alg registration failed\n", | ||
| 1847 | t_alg->ahash_alg.halg.base.cra_driver_name); | ||
| 1848 | kfree(t_alg); | ||
| 1849 | } else | ||
| 1850 | list_add_tail(&t_alg->entry, &priv->hash_list); | ||
| 1851 | |||
| 1852 | /* register unkeyed version */ | ||
| 1853 | t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); | ||
| 1854 | if (IS_ERR(t_alg)) { | ||
| 1855 | err = PTR_ERR(t_alg); | ||
| 1856 | dev_warn(ctrldev, "%s alg allocation failed\n", | ||
| 1857 | driver_hash[i].driver_name); | ||
| 1858 | continue; | ||
| 1859 | } | ||
| 1860 | |||
| 1861 | err = crypto_register_ahash(&t_alg->ahash_alg); | ||
| 1862 | if (err) { | ||
| 1863 | dev_warn(ctrldev, "%s alg registration failed\n", | ||
| 1864 | t_alg->ahash_alg.halg.base.cra_driver_name); | ||
| 1865 | kfree(t_alg); | ||
| 1866 | } else | ||
| 1867 | list_add_tail(&t_alg->entry, &priv->hash_list); | ||
| 1868 | } | ||
| 1869 | |||
| 1870 | return err; | ||
| 1871 | } | ||
| 1872 | |||
| 1873 | module_init(caam_algapi_hash_init); | ||
| 1874 | module_exit(caam_algapi_hash_exit); | ||
| 1875 | |||
| 1876 | MODULE_LICENSE("GPL"); | ||
| 1877 | MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); | ||
| 1878 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); | ||
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c new file mode 100644 index 000000000000..e2bfe161dece --- /dev/null +++ b/drivers/crypto/caam/caamrng.c | |||
| @@ -0,0 +1,309 @@ | |||
| 1 | /* | ||
| 2 | * caam - Freescale FSL CAAM support for hw_random | ||
| 3 | * | ||
| 4 | * Copyright 2011 Freescale Semiconductor, Inc. | ||
| 5 | * | ||
| 6 | * Based on caamalg.c crypto API driver. | ||
| 7 | * | ||
| 8 | * relationship between job descriptors to shared descriptors: | ||
| 9 | * | ||
| 10 | * --------------- -------------- | ||
| 11 | * | JobDesc #0 |-------------------->| ShareDesc | | ||
| 12 | * | *(buffer 0) | |------------->| (generate) | | ||
| 13 | * --------------- | | (move) | | ||
| 14 | * | | (store) | | ||
| 15 | * --------------- | -------------- | ||
| 16 | * | JobDesc #1 |------| | ||
| 17 | * | *(buffer 1) | | ||
| 18 | * --------------- | ||
| 19 | * | ||
| 20 | * A job desc looks like this: | ||
| 21 | * | ||
| 22 | * --------------------- | ||
| 23 | * | Header | | ||
| 24 | * | ShareDesc Pointer | | ||
| 25 | * | SEQ_OUT_PTR | | ||
| 26 | * | (output buffer) | | ||
| 27 | * --------------------- | ||
| 28 | * | ||
| 29 | * The SharedDesc never changes, and each job descriptor points to one of two | ||
| 30 | * buffers for each device, from which the data will be copied into the | ||
| 31 | * requested destination | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/hw_random.h> | ||
| 35 | #include <linux/completion.h> | ||
| 36 | #include <linux/atomic.h> | ||
| 37 | |||
| 38 | #include "compat.h" | ||
| 39 | |||
| 40 | #include "regs.h" | ||
| 41 | #include "intern.h" | ||
| 42 | #include "desc_constr.h" | ||
| 43 | #include "jr.h" | ||
| 44 | #include "error.h" | ||
| 45 | |||
| 46 | /* | ||
| 47 | * Maximum buffer size: maximum number of random, cache-aligned bytes that | ||
| 48 | * will be generated and moved to seq out ptr (extlen not allowed) | ||
| 49 | */ | ||
| 50 | #define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \ | ||
| 51 | L1_CACHE_BYTES) | ||
| 52 | |||
| 53 | /* length of descriptors */ | ||
| 54 | #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) | ||
| 55 | #define DESC_RNG_LEN (10 * CAAM_CMD_SZ) | ||
| 56 | |||
| 57 | /* Buffer, its dma address and lock */ | ||
| 58 | struct buf_data { | ||
| 59 | u8 buf[RN_BUF_SIZE]; | ||
| 60 | dma_addr_t addr; | ||
| 61 | struct completion filled; | ||
| 62 | u32 hw_desc[DESC_JOB_O_LEN]; | ||
| 63 | #define BUF_NOT_EMPTY 0 | ||
| 64 | #define BUF_EMPTY 1 | ||
| 65 | #define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */ | ||
| 66 | atomic_t empty; | ||
| 67 | }; | ||
| 68 | |||
| 69 | /* rng per-device context */ | ||
| 70 | struct caam_rng_ctx { | ||
| 71 | struct device *jrdev; | ||
| 72 | dma_addr_t sh_desc_dma; | ||
| 73 | u32 sh_desc[DESC_RNG_LEN]; | ||
| 74 | unsigned int cur_buf_idx; | ||
| 75 | int current_buf; | ||
| 76 | struct buf_data bufs[2]; | ||
| 77 | }; | ||
| 78 | |||
| 79 | static struct caam_rng_ctx rng_ctx; | ||
| 80 | |||
| 81 | static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) | ||
| 82 | { | ||
| 83 | if (bd->addr) | ||
| 84 | dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE, | ||
| 85 | DMA_FROM_DEVICE); | ||
| 86 | } | ||
| 87 | |||
| 88 | static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx) | ||
| 89 | { | ||
| 90 | struct device *jrdev = ctx->jrdev; | ||
| 91 | |||
| 92 | if (ctx->sh_desc_dma) | ||
| 93 | dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN, | ||
| 94 | DMA_TO_DEVICE); | ||
| 95 | rng_unmap_buf(jrdev, &ctx->bufs[0]); | ||
| 96 | rng_unmap_buf(jrdev, &ctx->bufs[1]); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) | ||
| 100 | { | ||
| 101 | struct buf_data *bd; | ||
| 102 | |||
| 103 | bd = (struct buf_data *)((char *)desc - | ||
| 104 | offsetof(struct buf_data, hw_desc)); | ||
| 105 | |||
| 106 | if (err) { | ||
| 107 | char tmp[CAAM_ERROR_STR_MAX]; | ||
| 108 | |||
| 109 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
| 110 | } | ||
| 111 | |||
| 112 | atomic_set(&bd->empty, BUF_NOT_EMPTY); | ||
| 113 | complete(&bd->filled); | ||
| 114 | #ifdef DEBUG | ||
| 115 | print_hex_dump(KERN_ERR, "rng refreshed buf@: ", | ||
| 116 | DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); | ||
| 117 | #endif | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) | ||
| 121 | { | ||
| 122 | struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)]; | ||
| 123 | struct device *jrdev = ctx->jrdev; | ||
| 124 | u32 *desc = bd->hw_desc; | ||
| 125 | int err; | ||
| 126 | |||
| 127 | dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf)); | ||
| 128 | init_completion(&bd->filled); | ||
| 129 | err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); | ||
| 130 | if (err) | ||
| 131 | complete(&bd->filled); /* don't wait on failed job*/ | ||
| 132 | else | ||
| 133 | atomic_inc(&bd->empty); /* note if pending */ | ||
| 134 | |||
| 135 | return err; | ||
| 136 | } | ||
| 137 | |||
| 138 | static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) | ||
| 139 | { | ||
| 140 | struct caam_rng_ctx *ctx = &rng_ctx; | ||
| 141 | struct buf_data *bd = &ctx->bufs[ctx->current_buf]; | ||
| 142 | int next_buf_idx, copied_idx; | ||
| 143 | int err; | ||
| 144 | |||
| 145 | if (atomic_read(&bd->empty)) { | ||
| 146 | /* try to submit job if there wasn't one */ | ||
| 147 | if (atomic_read(&bd->empty) == BUF_EMPTY) { | ||
| 148 | err = submit_job(ctx, 1); | ||
| 149 | /* if can't submit job, can't even wait */ | ||
| 150 | if (err) | ||
| 151 | return 0; | ||
| 152 | } | ||
| 153 | /* no immediate data, so exit if not waiting */ | ||
| 154 | if (!wait) | ||
| 155 | return 0; | ||
| 156 | |||
| 157 | /* waiting for pending job */ | ||
| 158 | if (atomic_read(&bd->empty)) | ||
| 159 | wait_for_completion(&bd->filled); | ||
| 160 | } | ||
| 161 | |||
| 162 | next_buf_idx = ctx->cur_buf_idx + max; | ||
| 163 | dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n", | ||
| 164 | __func__, ctx->current_buf, ctx->cur_buf_idx); | ||
| 165 | |||
| 166 | /* if enough data in current buffer */ | ||
| 167 | if (next_buf_idx < RN_BUF_SIZE) { | ||
| 168 | memcpy(data, bd->buf + ctx->cur_buf_idx, max); | ||
| 169 | ctx->cur_buf_idx = next_buf_idx; | ||
| 170 | return max; | ||
| 171 | } | ||
| 172 | |||
| 173 | /* else, copy what's left... */ | ||
| 174 | copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx; | ||
| 175 | memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx); | ||
| 176 | ctx->cur_buf_idx = 0; | ||
| 177 | atomic_set(&bd->empty, BUF_EMPTY); | ||
| 178 | |||
| 179 | /* ...refill... */ | ||
| 180 | submit_job(ctx, 1); | ||
| 181 | |||
| 182 | /* and use next buffer */ | ||
| 183 | ctx->current_buf = !ctx->current_buf; | ||
| 184 | dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf); | ||
| 185 | |||
| 186 | /* since there already is some data read, don't wait */ | ||
| 187 | return copied_idx + caam_read(rng, data + copied_idx, | ||
| 188 | max - copied_idx, false); | ||
| 189 | } | ||
| 190 | |||
| 191 | static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx) | ||
| 192 | { | ||
| 193 | struct device *jrdev = ctx->jrdev; | ||
| 194 | u32 *desc = ctx->sh_desc; | ||
| 195 | |||
| 196 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
| 197 | |||
| 198 | /* Propagate errors from shared to job descriptor */ | ||
| 199 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
| 200 | |||
| 201 | /* Generate random bytes */ | ||
| 202 | append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); | ||
| 203 | |||
| 204 | /* Store bytes */ | ||
| 205 | append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE); | ||
| 206 | |||
| 207 | ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
| 208 | DMA_TO_DEVICE); | ||
| 209 | #ifdef DEBUG | ||
| 210 | print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | ||
| 211 | desc, desc_bytes(desc), 1); | ||
| 212 | #endif | ||
| 213 | } | ||
| 214 | |||
| 215 | static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) | ||
| 216 | { | ||
| 217 | struct device *jrdev = ctx->jrdev; | ||
| 218 | struct buf_data *bd = &ctx->bufs[buf_id]; | ||
| 219 | u32 *desc = bd->hw_desc; | ||
| 220 | int sh_len = desc_len(ctx->sh_desc); | ||
| 221 | |||
| 222 | init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER | | ||
| 223 | HDR_REVERSE); | ||
| 224 | |||
| 225 | bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); | ||
| 226 | |||
| 227 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); | ||
| 228 | #ifdef DEBUG | ||
| 229 | print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | ||
| 230 | desc, desc_bytes(desc), 1); | ||
| 231 | #endif | ||
| 232 | } | ||
| 233 | |||
| 234 | static void caam_cleanup(struct hwrng *rng) | ||
| 235 | { | ||
| 236 | int i; | ||
| 237 | struct buf_data *bd; | ||
| 238 | |||
| 239 | for (i = 0; i < 2; i++) { | ||
| 240 | bd = &rng_ctx.bufs[i]; | ||
| 241 | if (atomic_read(&bd->empty) == BUF_PENDING) | ||
| 242 | wait_for_completion(&bd->filled); | ||
| 243 | } | ||
| 244 | |||
| 245 | rng_unmap_ctx(&rng_ctx); | ||
| 246 | } | ||
| 247 | |||
| 248 | static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) | ||
| 249 | { | ||
| 250 | struct buf_data *bd = &ctx->bufs[buf_id]; | ||
| 251 | |||
| 252 | rng_create_job_desc(ctx, buf_id); | ||
| 253 | atomic_set(&bd->empty, BUF_EMPTY); | ||
| 254 | submit_job(ctx, buf_id == ctx->current_buf); | ||
| 255 | wait_for_completion(&bd->filled); | ||
| 256 | } | ||
| 257 | |||
| 258 | static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) | ||
| 259 | { | ||
| 260 | ctx->jrdev = jrdev; | ||
| 261 | rng_create_sh_desc(ctx); | ||
| 262 | ctx->current_buf = 0; | ||
| 263 | ctx->cur_buf_idx = 0; | ||
| 264 | caam_init_buf(ctx, 0); | ||
| 265 | caam_init_buf(ctx, 1); | ||
| 266 | } | ||
| 267 | |||
| 268 | static struct hwrng caam_rng = { | ||
| 269 | .name = "rng-caam", | ||
| 270 | .cleanup = caam_cleanup, | ||
| 271 | .read = caam_read, | ||
| 272 | }; | ||
| 273 | |||
| 274 | static void __exit caam_rng_exit(void) | ||
| 275 | { | ||
| 276 | hwrng_unregister(&caam_rng); | ||
| 277 | } | ||
| 278 | |||
| 279 | static int __init caam_rng_init(void) | ||
| 280 | { | ||
| 281 | struct device_node *dev_node; | ||
| 282 | struct platform_device *pdev; | ||
| 283 | struct device *ctrldev; | ||
| 284 | struct caam_drv_private *priv; | ||
| 285 | |||
| 286 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 287 | if (!dev_node) | ||
| 288 | return -ENODEV; | ||
| 289 | |||
| 290 | pdev = of_find_device_by_node(dev_node); | ||
| 291 | if (!pdev) | ||
| 292 | return -ENODEV; | ||
| 293 | |||
| 294 | ctrldev = &pdev->dev; | ||
| 295 | priv = dev_get_drvdata(ctrldev); | ||
| 296 | of_node_put(dev_node); | ||
| 297 | |||
| 298 | caam_init_rng(&rng_ctx, priv->jrdev[0]); | ||
| 299 | |||
| 300 | dev_info(priv->jrdev[0], "registering rng-caam\n"); | ||
| 301 | return hwrng_register(&caam_rng); | ||
| 302 | } | ||
| 303 | |||
| 304 | module_init(caam_rng_init); | ||
| 305 | module_exit(caam_rng_exit); | ||
| 306 | |||
| 307 | MODULE_LICENSE("GPL"); | ||
| 308 | MODULE_DESCRIPTION("FSL CAAM support for hw_random API"); | ||
| 309 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); | ||
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index a63bc65fae86..762aeff626ac 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/crypto.h> | 13 | #include <linux/crypto.h> |
| 14 | #include <linux/hash.h> | ||
| 14 | #include <linux/hw_random.h> | 15 | #include <linux/hw_random.h> |
| 15 | #include <linux/of_platform.h> | 16 | #include <linux/of_platform.h> |
| 16 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
| @@ -33,5 +34,6 @@ | |||
| 33 | #include <crypto/authenc.h> | 34 | #include <crypto/authenc.h> |
| 34 | #include <crypto/scatterwalk.h> | 35 | #include <crypto/scatterwalk.h> |
| 35 | #include <crypto/internal/skcipher.h> | 36 | #include <crypto/internal/skcipher.h> |
| 37 | #include <crypto/internal/hash.h> | ||
| 36 | 38 | ||
| 37 | #endif /* !defined(CAAM_COMPAT_H) */ | 39 | #endif /* !defined(CAAM_COMPAT_H) */ |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 77557ebcd337..414ba20c05a1 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -2,13 +2,16 @@ | |||
| 2 | * CAAM control-plane driver backend | 2 | * CAAM control-plane driver backend |
| 3 | * Controller-level driver, kernel property detection, initialization | 3 | * Controller-level driver, kernel property detection, initialization |
| 4 | * | 4 | * |
| 5 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include "compat.h" | 8 | #include "compat.h" |
| 9 | #include "regs.h" | 9 | #include "regs.h" |
| 10 | #include "intern.h" | 10 | #include "intern.h" |
| 11 | #include "jr.h" | 11 | #include "jr.h" |
| 12 | #include "desc_constr.h" | ||
| 13 | #include "error.h" | ||
| 14 | #include "ctrl.h" | ||
| 12 | 15 | ||
| 13 | static int caam_remove(struct platform_device *pdev) | 16 | static int caam_remove(struct platform_device *pdev) |
| 14 | { | 17 | { |
| @@ -43,10 +46,154 @@ static int caam_remove(struct platform_device *pdev) | |||
| 43 | return ret; | 46 | return ret; |
| 44 | } | 47 | } |
| 45 | 48 | ||
| 49 | /* | ||
| 50 | * Descriptor to instantiate RNG State Handle 0 in normal mode and | ||
| 51 | * load the JDKEK, TDKEK and TDSK registers | ||
| 52 | */ | ||
| 53 | static void build_instantiation_desc(u32 *desc) | ||
| 54 | { | ||
| 55 | u32 *jump_cmd; | ||
| 56 | |||
| 57 | init_job_desc(desc, 0); | ||
| 58 | |||
| 59 | /* INIT RNG in non-test mode */ | ||
| 60 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | ||
| 61 | OP_ALG_AS_INIT); | ||
| 62 | |||
| 63 | /* wait for done */ | ||
| 64 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); | ||
| 65 | set_jump_tgt_here(desc, jump_cmd); | ||
| 66 | |||
| 67 | /* | ||
| 68 | * load 1 to clear written reg: | ||
| 69 | * resets the done interrrupt and returns the RNG to idle. | ||
| 70 | */ | ||
| 71 | append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); | ||
| 72 | |||
| 73 | /* generate secure keys (non-test) */ | ||
| 74 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | ||
| 75 | OP_ALG_RNG4_SK); | ||
| 76 | } | ||
| 77 | |||
| 78 | struct instantiate_result { | ||
| 79 | struct completion completion; | ||
| 80 | int err; | ||
| 81 | }; | ||
| 82 | |||
| 83 | static void rng4_init_done(struct device *dev, u32 *desc, u32 err, | ||
| 84 | void *context) | ||
| 85 | { | ||
| 86 | struct instantiate_result *instantiation = context; | ||
| 87 | |||
| 88 | if (err) { | ||
| 89 | char tmp[CAAM_ERROR_STR_MAX]; | ||
| 90 | |||
| 91 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
| 92 | } | ||
| 93 | |||
| 94 | instantiation->err = err; | ||
| 95 | complete(&instantiation->completion); | ||
| 96 | } | ||
| 97 | |||
| 98 | static int instantiate_rng(struct device *jrdev) | ||
| 99 | { | ||
| 100 | struct instantiate_result instantiation; | ||
| 101 | |||
| 102 | dma_addr_t desc_dma; | ||
| 103 | u32 *desc; | ||
| 104 | int ret; | ||
| 105 | |||
| 106 | desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); | ||
| 107 | if (!desc) { | ||
| 108 | dev_err(jrdev, "cannot allocate RNG init descriptor memory\n"); | ||
| 109 | return -ENOMEM; | ||
| 110 | } | ||
| 111 | |||
| 112 | build_instantiation_desc(desc); | ||
| 113 | desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); | ||
| 114 | init_completion(&instantiation.completion); | ||
| 115 | ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation); | ||
| 116 | if (!ret) { | ||
| 117 | wait_for_completion_interruptible(&instantiation.completion); | ||
| 118 | ret = instantiation.err; | ||
| 119 | if (ret) | ||
| 120 | dev_err(jrdev, "unable to instantiate RNG\n"); | ||
| 121 | } | ||
| 122 | |||
| 123 | dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE); | ||
| 124 | |||
| 125 | kfree(desc); | ||
| 126 | |||
| 127 | return ret; | ||
| 128 | } | ||
| 129 | |||
| 130 | /* | ||
| 131 | * By default, the TRNG runs for 200 clocks per sample; | ||
| 132 | * 800 clocks per sample generates better entropy. | ||
| 133 | */ | ||
| 134 | static void kick_trng(struct platform_device *pdev) | ||
| 135 | { | ||
| 136 | struct device *ctrldev = &pdev->dev; | ||
| 137 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | ||
| 138 | struct caam_full __iomem *topregs; | ||
| 139 | struct rng4tst __iomem *r4tst; | ||
| 140 | u32 val; | ||
| 141 | |||
| 142 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
| 143 | r4tst = &topregs->ctrl.r4tst[0]; | ||
| 144 | |||
| 145 | /* put RNG4 into program mode */ | ||
| 146 | setbits32(&r4tst->rtmctl, RTMCTL_PRGM); | ||
| 147 | /* 800 clocks per sample */ | ||
| 148 | val = rd_reg32(&r4tst->rtsdctl); | ||
| 149 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | (800 << RTSDCTL_ENT_DLY_SHIFT); | ||
| 150 | wr_reg32(&r4tst->rtsdctl, val); | ||
| 151 | /* min. freq. count */ | ||
| 152 | wr_reg32(&r4tst->rtfrqmin, 400); | ||
| 153 | /* max. freq. count */ | ||
| 154 | wr_reg32(&r4tst->rtfrqmax, 6400); | ||
| 155 | /* put RNG4 into run mode */ | ||
| 156 | clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); | ||
| 157 | } | ||
| 158 | |||
| 159 | /** | ||
| 160 | * caam_get_era() - Return the ERA of the SEC on SoC, based | ||
| 161 | * on the SEC_VID register. | ||
| 162 | * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown. | ||
| 163 | * @caam_id - the value of the SEC_VID register | ||
| 164 | **/ | ||
| 165 | int caam_get_era(u64 caam_id) | ||
| 166 | { | ||
| 167 | struct sec_vid *sec_vid = (struct sec_vid *)&caam_id; | ||
| 168 | static const struct { | ||
| 169 | u16 ip_id; | ||
| 170 | u8 maj_rev; | ||
| 171 | u8 era; | ||
| 172 | } caam_eras[] = { | ||
| 173 | {0x0A10, 1, 1}, | ||
| 174 | {0x0A10, 2, 2}, | ||
| 175 | {0x0A12, 1, 3}, | ||
| 176 | {0x0A14, 1, 3}, | ||
| 177 | {0x0A14, 2, 4}, | ||
| 178 | {0x0A16, 1, 4}, | ||
| 179 | {0x0A11, 1, 4} | ||
| 180 | }; | ||
| 181 | int i; | ||
| 182 | |||
| 183 | for (i = 0; i < ARRAY_SIZE(caam_eras); i++) | ||
| 184 | if (caam_eras[i].ip_id == sec_vid->ip_id && | ||
| 185 | caam_eras[i].maj_rev == sec_vid->maj_rev) | ||
| 186 | return caam_eras[i].era; | ||
| 187 | |||
| 188 | return -ENOTSUPP; | ||
| 189 | } | ||
| 190 | EXPORT_SYMBOL(caam_get_era); | ||
| 191 | |||
| 46 | /* Probe routine for CAAM top (controller) level */ | 192 | /* Probe routine for CAAM top (controller) level */ |
| 47 | static int caam_probe(struct platform_device *pdev) | 193 | static int caam_probe(struct platform_device *pdev) |
| 48 | { | 194 | { |
| 49 | int ring, rspec; | 195 | int ret, ring, rspec; |
| 196 | u64 caam_id; | ||
| 50 | struct device *dev; | 197 | struct device *dev; |
| 51 | struct device_node *nprop, *np; | 198 | struct device_node *nprop, *np; |
| 52 | struct caam_ctrl __iomem *ctrl; | 199 | struct caam_ctrl __iomem *ctrl; |
| @@ -82,13 +229,18 @@ static int caam_probe(struct platform_device *pdev) | |||
| 82 | 229 | ||
| 83 | /* | 230 | /* |
| 84 | * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, | 231 | * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, |
| 85 | * 36-bit pointers in master configuration register | 232 | * long pointers in master configuration register |
| 86 | */ | 233 | */ |
| 87 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | | 234 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | |
| 88 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); | 235 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); |
| 89 | 236 | ||
| 90 | if (sizeof(dma_addr_t) == sizeof(u64)) | 237 | if (sizeof(dma_addr_t) == sizeof(u64)) |
| 91 | dma_set_mask(dev, DMA_BIT_MASK(36)); | 238 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) |
| 239 | dma_set_mask(dev, DMA_BIT_MASK(40)); | ||
| 240 | else | ||
| 241 | dma_set_mask(dev, DMA_BIT_MASK(36)); | ||
| 242 | else | ||
| 243 | dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
| 92 | 244 | ||
| 93 | /* | 245 | /* |
| 94 | * Detect and enable JobRs | 246 | * Detect and enable JobRs |
| @@ -141,14 +293,29 @@ static int caam_probe(struct platform_device *pdev) | |||
| 141 | return -ENOMEM; | 293 | return -ENOMEM; |
| 142 | } | 294 | } |
| 143 | 295 | ||
| 296 | /* | ||
| 297 | * RNG4 based SECs (v5+) need special initialization prior | ||
| 298 | * to executing any descriptors | ||
| 299 | */ | ||
| 300 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) { | ||
| 301 | kick_trng(pdev); | ||
| 302 | ret = instantiate_rng(ctrlpriv->jrdev[0]); | ||
| 303 | if (ret) { | ||
| 304 | caam_remove(pdev); | ||
| 305 | return ret; | ||
| 306 | } | ||
| 307 | } | ||
| 308 | |||
| 144 | /* NOTE: RTIC detection ought to go here, around Si time */ | 309 | /* NOTE: RTIC detection ought to go here, around Si time */ |
| 145 | 310 | ||
| 146 | /* Initialize queue allocator lock */ | 311 | /* Initialize queue allocator lock */ |
| 147 | spin_lock_init(&ctrlpriv->jr_alloc_lock); | 312 | spin_lock_init(&ctrlpriv->jr_alloc_lock); |
| 148 | 313 | ||
| 314 | caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); | ||
| 315 | |||
| 149 | /* Report "alive" for developer to see */ | 316 | /* Report "alive" for developer to see */ |
| 150 | dev_info(dev, "device ID = 0x%016llx\n", | 317 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, |
| 151 | rd_reg64(&topregs->ctrl.perfmon.caam_id)); | 318 | caam_get_era(caam_id)); |
| 152 | dev_info(dev, "job rings = %d, qi = %d\n", | 319 | dev_info(dev, "job rings = %d, qi = %d\n", |
| 153 | ctrlpriv->total_jobrs, ctrlpriv->qi_present); | 320 | ctrlpriv->total_jobrs, ctrlpriv->qi_present); |
| 154 | 321 | ||
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h new file mode 100644 index 000000000000..980d44eaaf40 --- /dev/null +++ b/drivers/crypto/caam/ctrl.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | /* | ||
| 2 | * CAAM control-plane driver backend public-level include definitions | ||
| 3 | * | ||
| 4 | * Copyright 2012 Freescale Semiconductor, Inc. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef CTRL_H | ||
| 8 | #define CTRL_H | ||
| 9 | |||
| 10 | /* Prototypes for backend-level services exposed to APIs */ | ||
| 11 | int caam_get_era(u64 caam_id); | ||
| 12 | |||
| 13 | #endif /* CTRL_H */ | ||
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index a17c2958dab1..f7f833be8c67 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
| @@ -8,6 +8,16 @@ | |||
| 8 | #ifndef DESC_H | 8 | #ifndef DESC_H |
| 9 | #define DESC_H | 9 | #define DESC_H |
| 10 | 10 | ||
| 11 | struct sec4_sg_entry { | ||
| 12 | u64 ptr; | ||
| 13 | #define SEC4_SG_LEN_FIN 0x40000000 | ||
| 14 | #define SEC4_SG_LEN_EXT 0x80000000 | ||
| 15 | u32 len; | ||
| 16 | u8 reserved; | ||
| 17 | u8 buf_pool_id; | ||
| 18 | u16 offset; | ||
| 19 | }; | ||
| 20 | |||
| 11 | /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ | 21 | /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ |
| 12 | #define MAX_CAAM_DESCSIZE 64 | 22 | #define MAX_CAAM_DESCSIZE 64 |
| 13 | 23 | ||
| @@ -1162,6 +1172,11 @@ | |||
| 1162 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) | 1172 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) |
| 1163 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) | 1173 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) |
| 1164 | 1174 | ||
| 1175 | /* RNG4 set */ | ||
| 1176 | #define OP_ALG_RNG4_SHIFT 4 | ||
| 1177 | #define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT) | ||
| 1178 | |||
| 1179 | #define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT) | ||
| 1165 | 1180 | ||
| 1166 | #define OP_ALG_AS_SHIFT 2 | 1181 | #define OP_ALG_AS_SHIFT 2 |
| 1167 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) | 1182 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) |
| @@ -1585,20 +1600,4 @@ | |||
| 1585 | #define NFIFOENTRY_PLEN_SHIFT 0 | 1600 | #define NFIFOENTRY_PLEN_SHIFT 0 |
| 1586 | #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) | 1601 | #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) |
| 1587 | 1602 | ||
| 1588 | /* | ||
| 1589 | * PDB internal definitions | ||
| 1590 | */ | ||
| 1591 | |||
| 1592 | /* IPSec ESP CBC Encap/Decap Options */ | ||
| 1593 | #define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */ | ||
| 1594 | #define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */ | ||
| 1595 | #define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */ | ||
| 1596 | #define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */ | ||
| 1597 | #define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */ | ||
| 1598 | #define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */ | ||
| 1599 | #define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ | ||
| 1600 | #define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */ | ||
| 1601 | #define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */ | ||
| 1602 | #define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */ | ||
| 1603 | |||
| 1604 | #endif /* DESC_H */ | 1603 | #endif /* DESC_H */ |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 348b882275f0..c85c1f058401 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * caam descriptor construction helper functions | 2 | * caam descriptor construction helper functions |
| 3 | * | 3 | * |
| 4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 4 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include "desc.h" | 7 | #include "desc.h" |
| @@ -51,7 +51,7 @@ static inline void *sh_desc_pdb(u32 *desc) | |||
| 51 | 51 | ||
| 52 | static inline void init_desc(u32 *desc, u32 options) | 52 | static inline void init_desc(u32 *desc, u32 options) |
| 53 | { | 53 | { |
| 54 | *desc = options | HDR_ONE | 1; | 54 | *desc = (options | HDR_ONE) + 1; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static inline void init_sh_desc(u32 *desc, u32 options) | 57 | static inline void init_sh_desc(u32 *desc, u32 options) |
| @@ -62,9 +62,9 @@ static inline void init_sh_desc(u32 *desc, u32 options) | |||
| 62 | 62 | ||
| 63 | static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) | 63 | static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) |
| 64 | { | 64 | { |
| 65 | u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1; | 65 | u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; |
| 66 | 66 | ||
| 67 | init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) | | 67 | init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) | |
| 68 | options); | 68 | options); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| @@ -117,6 +117,15 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, | |||
| 117 | append_ptr(desc, ptr); | 117 | append_ptr(desc, ptr); |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | /* Write length after pointer, rather than inside command */ | ||
| 121 | static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, | ||
| 122 | unsigned int len, u32 command) | ||
| 123 | { | ||
| 124 | append_cmd(desc, command); | ||
| 125 | append_ptr(desc, ptr); | ||
| 126 | append_cmd(desc, len); | ||
| 127 | } | ||
| 128 | |||
| 120 | static inline void append_cmd_data(u32 *desc, void *data, int len, | 129 | static inline void append_cmd_data(u32 *desc, void *data, int len, |
| 121 | u32 command) | 130 | u32 command) |
| 122 | { | 131 | { |
| @@ -166,13 +175,22 @@ static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \ | |||
| 166 | append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ | 175 | append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ |
| 167 | } | 176 | } |
| 168 | APPEND_CMD_PTR(key, KEY) | 177 | APPEND_CMD_PTR(key, KEY) |
| 169 | APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR) | ||
| 170 | APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR) | ||
| 171 | APPEND_CMD_PTR(load, LOAD) | 178 | APPEND_CMD_PTR(load, LOAD) |
| 172 | APPEND_CMD_PTR(store, STORE) | 179 | APPEND_CMD_PTR(store, STORE) |
| 173 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) | 180 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) |
| 174 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) | 181 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) |
| 175 | 182 | ||
| 183 | #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ | ||
| 184 | static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \ | ||
| 185 | unsigned int len, \ | ||
| 186 | u32 options) \ | ||
| 187 | { \ | ||
| 188 | PRINT_POS; \ | ||
| 189 | append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ | ||
| 190 | } | ||
| 191 | APPEND_SEQ_PTR_INTLEN(in, IN) | ||
| 192 | APPEND_SEQ_PTR_INTLEN(out, OUT) | ||
| 193 | |||
| 176 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ | 194 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ |
| 177 | static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | 195 | static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ |
| 178 | unsigned int len, u32 options) \ | 196 | unsigned int len, u32 options) \ |
| @@ -183,6 +201,33 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | |||
| 183 | APPEND_CMD_PTR_TO_IMM(load, LOAD); | 201 | APPEND_CMD_PTR_TO_IMM(load, LOAD); |
| 184 | APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); | 202 | APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); |
| 185 | 203 | ||
| 204 | #define APPEND_CMD_PTR_EXTLEN(cmd, op) \ | ||
| 205 | static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \ | ||
| 206 | unsigned int len, u32 options) \ | ||
| 207 | { \ | ||
| 208 | PRINT_POS; \ | ||
| 209 | append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \ | ||
| 210 | } | ||
| 211 | APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR) | ||
| 212 | APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR) | ||
| 213 | |||
| 214 | /* | ||
| 215 | * Determine whether to store length internally or externally depending on | ||
| 216 | * the size of its type | ||
| 217 | */ | ||
| 218 | #define APPEND_CMD_PTR_LEN(cmd, op, type) \ | ||
| 219 | static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \ | ||
| 220 | type len, u32 options) \ | ||
| 221 | { \ | ||
| 222 | PRINT_POS; \ | ||
| 223 | if (sizeof(type) > sizeof(u16)) \ | ||
| 224 | append_##cmd##_extlen(desc, ptr, len, options); \ | ||
| 225 | else \ | ||
| 226 | append_##cmd##_intlen(desc, ptr, len, options); \ | ||
| 227 | } | ||
| 228 | APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32) | ||
| 229 | APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32) | ||
| 230 | |||
| 186 | /* | 231 | /* |
| 187 | * 2nd variant for commands whose specified immediate length differs | 232 | * 2nd variant for commands whose specified immediate length differs |
| 188 | * from length of immediate data provided, e.g., split keys | 233 | * from length of immediate data provided, e.g., split keys |
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 7e2d54bffad6..9955ed9643e6 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
| @@ -39,18 +39,20 @@ static void report_ccb_status(u32 status, char *outstr) | |||
| 39 | char *cha_id_list[] = { | 39 | char *cha_id_list[] = { |
| 40 | "", | 40 | "", |
| 41 | "AES", | 41 | "AES", |
| 42 | "DES, 3DES", | 42 | "DES", |
| 43 | "ARC4", | 43 | "ARC4", |
| 44 | "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512", | 44 | "MDHA", |
| 45 | "RNG", | 45 | "RNG", |
| 46 | "SNOW f8", | 46 | "SNOW f8", |
| 47 | "Kasumi f8, f9", | 47 | "Kasumi f8/9", |
| 48 | "All Public Key Algorithms", | 48 | "PKHA", |
| 49 | "CRC", | 49 | "CRCA", |
| 50 | "SNOW f9", | 50 | "SNOW f9", |
| 51 | "ZUCE", | ||
| 52 | "ZUCA", | ||
| 51 | }; | 53 | }; |
| 52 | char *err_id_list[] = { | 54 | char *err_id_list[] = { |
| 53 | "None. No error.", | 55 | "No error.", |
| 54 | "Mode error.", | 56 | "Mode error.", |
| 55 | "Data size error.", | 57 | "Data size error.", |
| 56 | "Key size error.", | 58 | "Key size error.", |
| @@ -67,6 +69,20 @@ static void report_ccb_status(u32 status, char *outstr) | |||
| 67 | "Invalid CHA combination was selected", | 69 | "Invalid CHA combination was selected", |
| 68 | "Invalid CHA selected.", | 70 | "Invalid CHA selected.", |
| 69 | }; | 71 | }; |
| 72 | char *rng_err_id_list[] = { | ||
| 73 | "", | ||
| 74 | "", | ||
| 75 | "", | ||
| 76 | "Instantiate", | ||
| 77 | "Not instantiated", | ||
| 78 | "Test instantiate", | ||
| 79 | "Prediction resistance", | ||
| 80 | "", | ||
| 81 | "Prediction resistance and test request", | ||
| 82 | "Uninstantiate", | ||
| 83 | "", | ||
| 84 | "Secure key generation", | ||
| 85 | }; | ||
| 70 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> | 86 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> |
| 71 | JRSTA_CCBERR_CHAID_SHIFT; | 87 | JRSTA_CCBERR_CHAID_SHIFT; |
| 72 | u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; | 88 | u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; |
| @@ -81,7 +97,13 @@ static void report_ccb_status(u32 status, char *outstr) | |||
| 81 | cha_id, sizeof("ff")); | 97 | cha_id, sizeof("ff")); |
| 82 | } | 98 | } |
| 83 | 99 | ||
| 84 | if (err_id < ARRAY_SIZE(err_id_list)) { | 100 | if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG && |
| 101 | err_id < ARRAY_SIZE(rng_err_id_list) && | ||
| 102 | strlen(rng_err_id_list[err_id])) { | ||
| 103 | /* RNG-only error */ | ||
| 104 | SPRINTFCAT(outstr, "%s", rng_err_id_list[err_id], | ||
| 105 | strlen(rng_err_id_list[err_id])); | ||
| 106 | } else if (err_id < ARRAY_SIZE(err_id_list)) { | ||
| 85 | SPRINTFCAT(outstr, "%s", err_id_list[err_id], | 107 | SPRINTFCAT(outstr, "%s", err_id_list[err_id], |
| 86 | strlen(err_id_list[err_id])); | 108 | strlen(err_id_list[err_id])); |
| 87 | } else { | 109 | } else { |
| @@ -101,10 +123,10 @@ static void report_deco_status(u32 status, char *outstr) | |||
| 101 | u8 value; | 123 | u8 value; |
| 102 | char *error_text; | 124 | char *error_text; |
| 103 | } desc_error_list[] = { | 125 | } desc_error_list[] = { |
| 104 | { 0x00, "None. No error." }, | 126 | { 0x00, "No error." }, |
| 105 | { 0x01, "SGT Length Error. The descriptor is trying to read " | 127 | { 0x01, "SGT Length Error. The descriptor is trying to read " |
| 106 | "more data than is contained in the SGT table." }, | 128 | "more data than is contained in the SGT table." }, |
| 107 | { 0x02, "Reserved." }, | 129 | { 0x02, "SGT Null Entry Error." }, |
| 108 | { 0x03, "Job Ring Control Error. There is a bad value in the " | 130 | { 0x03, "Job Ring Control Error. There is a bad value in the " |
| 109 | "Job Ring Control register." }, | 131 | "Job Ring Control register." }, |
| 110 | { 0x04, "Invalid Descriptor Command. The Descriptor Command " | 132 | { 0x04, "Invalid Descriptor Command. The Descriptor Command " |
| @@ -116,7 +138,7 @@ static void report_deco_status(u32 status, char *outstr) | |||
| 116 | { 0x09, "Invalid OPERATION Command" }, | 138 | { 0x09, "Invalid OPERATION Command" }, |
| 117 | { 0x0A, "Invalid FIFO LOAD Command" }, | 139 | { 0x0A, "Invalid FIFO LOAD Command" }, |
| 118 | { 0x0B, "Invalid FIFO STORE Command" }, | 140 | { 0x0B, "Invalid FIFO STORE Command" }, |
| 119 | { 0x0C, "Invalid MOVE Command" }, | 141 | { 0x0C, "Invalid MOVE/MOVE_LEN Command" }, |
| 120 | { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is " | 142 | { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is " |
| 121 | "invalid because the target is not a Job Header " | 143 | "invalid because the target is not a Job Header " |
| 122 | "Command, or the jump is from a Trusted Descriptor to " | 144 | "Command, or the jump is from a Trusted Descriptor to " |
| @@ -166,6 +188,8 @@ static void report_deco_status(u32 status, char *outstr) | |||
| 166 | "(input frame; block ciphers) and IPsec decap (output " | 188 | "(input frame; block ciphers) and IPsec decap (output " |
| 167 | "frame, when doing the next header byte update) and " | 189 | "frame, when doing the next header byte update) and " |
| 168 | "DCRC (output frame)." }, | 190 | "DCRC (output frame)." }, |
| 191 | { 0x23, "Read Input Frame error" }, | ||
| 192 | { 0x24, "JDKEK, TDKEK or TDSK not loaded error" }, | ||
| 169 | { 0x80, "DNR (do not run) error" }, | 193 | { 0x80, "DNR (do not run) error" }, |
| 170 | { 0x81, "undefined protocol command" }, | 194 | { 0x81, "undefined protocol command" }, |
| 171 | { 0x82, "invalid setting in PDB" }, | 195 | { 0x82, "invalid setting in PDB" }, |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index a34be01b0b29..5cd4c1b268a1 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
| @@ -43,7 +43,7 @@ struct caam_drv_private_jr { | |||
| 43 | struct device *parentdev; /* points back to controller dev */ | 43 | struct device *parentdev; /* points back to controller dev */ |
| 44 | int ridx; | 44 | int ridx; |
| 45 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | 45 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
| 46 | struct tasklet_struct irqtask[NR_CPUS]; | 46 | struct tasklet_struct irqtask; |
| 47 | int irq; /* One per queue */ | 47 | int irq; /* One per queue */ |
| 48 | int assign; /* busy/free */ | 48 | int assign; /* busy/free */ |
| 49 | 49 | ||
| @@ -86,10 +86,10 @@ struct caam_drv_private { | |||
| 86 | 86 | ||
| 87 | /* which jr allocated to scatterlist crypto */ | 87 | /* which jr allocated to scatterlist crypto */ |
| 88 | atomic_t tfm_count ____cacheline_aligned; | 88 | atomic_t tfm_count ____cacheline_aligned; |
| 89 | int num_jrs_for_algapi; | ||
| 90 | struct device **algapi_jr; | ||
| 91 | /* list of registered crypto algorithms (mk generic context handle?) */ | 89 | /* list of registered crypto algorithms (mk generic context handle?) */ |
| 92 | struct list_head alg_list; | 90 | struct list_head alg_list; |
| 91 | /* list of registered hash algorithms (mk generic context handle?) */ | ||
| 92 | struct list_head hash_list; | ||
| 93 | 93 | ||
| 94 | /* | 94 | /* |
| 95 | * debugfs entries for developer view into driver/device | 95 | * debugfs entries for developer view into driver/device |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 340fa322c0f0..53c8c51d5881 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * CAAM/SEC 4.x transport/backend driver | 2 | * CAAM/SEC 4.x transport/backend driver |
| 3 | * JobR backend functionality | 3 | * JobR backend functionality |
| 4 | * | 4 | * |
| 5 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include "compat.h" | 8 | #include "compat.h" |
| @@ -43,7 +43,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | |||
| 43 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); | 43 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); |
| 44 | 44 | ||
| 45 | preempt_disable(); | 45 | preempt_disable(); |
| 46 | tasklet_schedule(&jrp->irqtask[smp_processor_id()]); | 46 | tasklet_schedule(&jrp->irqtask); |
| 47 | preempt_enable(); | 47 | preempt_enable(); |
| 48 | 48 | ||
| 49 | return IRQ_HANDLED; | 49 | return IRQ_HANDLED; |
| @@ -58,17 +58,16 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 58 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); | 58 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); |
| 59 | u32 *userdesc, userstatus; | 59 | u32 *userdesc, userstatus; |
| 60 | void *userarg; | 60 | void *userarg; |
| 61 | unsigned long flags; | ||
| 62 | 61 | ||
| 63 | spin_lock_irqsave(&jrp->outlock, flags); | 62 | while (rd_reg32(&jrp->rregs->outring_used)) { |
| 64 | 63 | ||
| 65 | head = ACCESS_ONCE(jrp->head); | 64 | head = ACCESS_ONCE(jrp->head); |
| 66 | sw_idx = tail = jrp->tail; | ||
| 67 | 65 | ||
| 68 | while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && | 66 | spin_lock_bh(&jrp->outlock); |
| 69 | rd_reg32(&jrp->rregs->outring_used)) { | ||
| 70 | 67 | ||
| 68 | sw_idx = tail = jrp->tail; | ||
| 71 | hw_idx = jrp->out_ring_read_index; | 69 | hw_idx = jrp->out_ring_read_index; |
| 70 | |||
| 72 | for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { | 71 | for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { |
| 73 | sw_idx = (tail + i) & (JOBR_DEPTH - 1); | 72 | sw_idx = (tail + i) & (JOBR_DEPTH - 1); |
| 74 | 73 | ||
| @@ -95,7 +94,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 95 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; | 94 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; |
| 96 | userstatus = jrp->outring[hw_idx].jrstatus; | 95 | userstatus = jrp->outring[hw_idx].jrstatus; |
| 97 | 96 | ||
| 98 | smp_mb(); | 97 | /* set done */ |
| 98 | wr_reg32(&jrp->rregs->outring_rmvd, 1); | ||
| 99 | 99 | ||
| 100 | jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & | 100 | jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & |
| 101 | (JOBR_DEPTH - 1); | 101 | (JOBR_DEPTH - 1); |
| @@ -115,22 +115,12 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 115 | jrp->tail = tail; | 115 | jrp->tail = tail; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | /* set done */ | 118 | spin_unlock_bh(&jrp->outlock); |
| 119 | wr_reg32(&jrp->rregs->outring_rmvd, 1); | ||
| 120 | |||
| 121 | spin_unlock_irqrestore(&jrp->outlock, flags); | ||
| 122 | 119 | ||
| 123 | /* Finally, execute user's callback */ | 120 | /* Finally, execute user's callback */ |
| 124 | usercall(dev, userdesc, userstatus, userarg); | 121 | usercall(dev, userdesc, userstatus, userarg); |
| 125 | |||
| 126 | spin_lock_irqsave(&jrp->outlock, flags); | ||
| 127 | |||
| 128 | head = ACCESS_ONCE(jrp->head); | ||
| 129 | sw_idx = tail = jrp->tail; | ||
| 130 | } | 122 | } |
| 131 | 123 | ||
| 132 | spin_unlock_irqrestore(&jrp->outlock, flags); | ||
| 133 | |||
| 134 | /* reenable / unmask IRQs */ | 124 | /* reenable / unmask IRQs */ |
| 135 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | 125 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); |
| 136 | } | 126 | } |
| @@ -148,23 +138,22 @@ int caam_jr_register(struct device *ctrldev, struct device **rdev) | |||
| 148 | { | 138 | { |
| 149 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | 139 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
| 150 | struct caam_drv_private_jr *jrpriv = NULL; | 140 | struct caam_drv_private_jr *jrpriv = NULL; |
| 151 | unsigned long flags; | ||
| 152 | int ring; | 141 | int ring; |
| 153 | 142 | ||
| 154 | /* Lock, if free ring - assign, unlock */ | 143 | /* Lock, if free ring - assign, unlock */ |
| 155 | spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); | 144 | spin_lock(&ctrlpriv->jr_alloc_lock); |
| 156 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | 145 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { |
| 157 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); | 146 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); |
| 158 | if (jrpriv->assign == JOBR_UNASSIGNED) { | 147 | if (jrpriv->assign == JOBR_UNASSIGNED) { |
| 159 | jrpriv->assign = JOBR_ASSIGNED; | 148 | jrpriv->assign = JOBR_ASSIGNED; |
| 160 | *rdev = ctrlpriv->jrdev[ring]; | 149 | *rdev = ctrlpriv->jrdev[ring]; |
| 161 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | 150 | spin_unlock(&ctrlpriv->jr_alloc_lock); |
| 162 | return ring; | 151 | return ring; |
| 163 | } | 152 | } |
| 164 | } | 153 | } |
| 165 | 154 | ||
| 166 | /* If assigned, write dev where caller needs it */ | 155 | /* If assigned, write dev where caller needs it */ |
| 167 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | 156 | spin_unlock(&ctrlpriv->jr_alloc_lock); |
| 168 | *rdev = NULL; | 157 | *rdev = NULL; |
| 169 | 158 | ||
| 170 | return -ENODEV; | 159 | return -ENODEV; |
| @@ -182,7 +171,6 @@ int caam_jr_deregister(struct device *rdev) | |||
| 182 | { | 171 | { |
| 183 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); | 172 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); |
| 184 | struct caam_drv_private *ctrlpriv; | 173 | struct caam_drv_private *ctrlpriv; |
| 185 | unsigned long flags; | ||
| 186 | 174 | ||
| 187 | /* Get the owning controller's private space */ | 175 | /* Get the owning controller's private space */ |
| 188 | ctrlpriv = dev_get_drvdata(jrpriv->parentdev); | 176 | ctrlpriv = dev_get_drvdata(jrpriv->parentdev); |
| @@ -195,9 +183,9 @@ int caam_jr_deregister(struct device *rdev) | |||
| 195 | return -EBUSY; | 183 | return -EBUSY; |
| 196 | 184 | ||
| 197 | /* Release ring */ | 185 | /* Release ring */ |
| 198 | spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); | 186 | spin_lock(&ctrlpriv->jr_alloc_lock); |
| 199 | jrpriv->assign = JOBR_UNASSIGNED; | 187 | jrpriv->assign = JOBR_UNASSIGNED; |
| 200 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | 188 | spin_unlock(&ctrlpriv->jr_alloc_lock); |
| 201 | 189 | ||
| 202 | return 0; | 190 | return 0; |
| 203 | } | 191 | } |
| @@ -238,7 +226,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
| 238 | { | 226 | { |
| 239 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | 227 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
| 240 | struct caam_jrentry_info *head_entry; | 228 | struct caam_jrentry_info *head_entry; |
| 241 | unsigned long flags; | ||
| 242 | int head, tail, desc_size; | 229 | int head, tail, desc_size; |
| 243 | dma_addr_t desc_dma; | 230 | dma_addr_t desc_dma; |
| 244 | 231 | ||
| @@ -249,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
| 249 | return -EIO; | 236 | return -EIO; |
| 250 | } | 237 | } |
| 251 | 238 | ||
| 252 | spin_lock_irqsave(&jrp->inplock, flags); | 239 | spin_lock(&jrp->inplock); |
| 253 | 240 | ||
| 254 | head = jrp->head; | 241 | head = jrp->head; |
| 255 | tail = ACCESS_ONCE(jrp->tail); | 242 | tail = ACCESS_ONCE(jrp->tail); |
| 256 | 243 | ||
| 257 | if (!rd_reg32(&jrp->rregs->inpring_avail) || | 244 | if (!rd_reg32(&jrp->rregs->inpring_avail) || |
| 258 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { | 245 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { |
| 259 | spin_unlock_irqrestore(&jrp->inplock, flags); | 246 | spin_unlock(&jrp->inplock); |
| 260 | dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); | 247 | dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); |
| 261 | return -EBUSY; | 248 | return -EBUSY; |
| 262 | } | 249 | } |
| @@ -276,11 +263,9 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
| 276 | (JOBR_DEPTH - 1); | 263 | (JOBR_DEPTH - 1); |
| 277 | jrp->head = (head + 1) & (JOBR_DEPTH - 1); | 264 | jrp->head = (head + 1) & (JOBR_DEPTH - 1); |
| 278 | 265 | ||
| 279 | wmb(); | ||
| 280 | |||
| 281 | wr_reg32(&jrp->rregs->inpring_jobadd, 1); | 266 | wr_reg32(&jrp->rregs->inpring_jobadd, 1); |
| 282 | 267 | ||
| 283 | spin_unlock_irqrestore(&jrp->inplock, flags); | 268 | spin_unlock(&jrp->inplock); |
| 284 | 269 | ||
| 285 | return 0; | 270 | return 0; |
| 286 | } | 271 | } |
| @@ -337,11 +322,9 @@ static int caam_jr_init(struct device *dev) | |||
| 337 | 322 | ||
| 338 | jrp = dev_get_drvdata(dev); | 323 | jrp = dev_get_drvdata(dev); |
| 339 | 324 | ||
| 340 | /* Connect job ring interrupt handler. */ | 325 | tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); |
| 341 | for_each_possible_cpu(i) | ||
| 342 | tasklet_init(&jrp->irqtask[i], caam_jr_dequeue, | ||
| 343 | (unsigned long)dev); | ||
| 344 | 326 | ||
| 327 | /* Connect job ring interrupt handler. */ | ||
| 345 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, | 328 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, |
| 346 | "caam-jobr", dev); | 329 | "caam-jobr", dev); |
| 347 | if (error) { | 330 | if (error) { |
| @@ -356,10 +339,11 @@ static int caam_jr_init(struct device *dev) | |||
| 356 | if (error) | 339 | if (error) |
| 357 | return error; | 340 | return error; |
| 358 | 341 | ||
| 359 | jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH, | 342 | jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, |
| 360 | GFP_KERNEL | GFP_DMA); | 343 | &inpbusaddr, GFP_KERNEL); |
| 361 | jrp->outring = kzalloc(sizeof(struct jr_outentry) * | 344 | |
| 362 | JOBR_DEPTH, GFP_KERNEL | GFP_DMA); | 345 | jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * |
| 346 | JOBR_DEPTH, &outbusaddr, GFP_KERNEL); | ||
| 363 | 347 | ||
| 364 | jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, | 348 | jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, |
| 365 | GFP_KERNEL); | 349 | GFP_KERNEL); |
| @@ -375,31 +359,6 @@ static int caam_jr_init(struct device *dev) | |||
| 375 | jrp->entinfo[i].desc_addr_dma = !0; | 359 | jrp->entinfo[i].desc_addr_dma = !0; |
| 376 | 360 | ||
| 377 | /* Setup rings */ | 361 | /* Setup rings */ |
| 378 | inpbusaddr = dma_map_single(dev, jrp->inpring, | ||
| 379 | sizeof(u32 *) * JOBR_DEPTH, | ||
| 380 | DMA_BIDIRECTIONAL); | ||
| 381 | if (dma_mapping_error(dev, inpbusaddr)) { | ||
| 382 | dev_err(dev, "caam_jr_init(): can't map input ring\n"); | ||
| 383 | kfree(jrp->inpring); | ||
| 384 | kfree(jrp->outring); | ||
| 385 | kfree(jrp->entinfo); | ||
| 386 | return -EIO; | ||
| 387 | } | ||
| 388 | |||
| 389 | outbusaddr = dma_map_single(dev, jrp->outring, | ||
| 390 | sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
| 391 | DMA_BIDIRECTIONAL); | ||
| 392 | if (dma_mapping_error(dev, outbusaddr)) { | ||
| 393 | dev_err(dev, "caam_jr_init(): can't map output ring\n"); | ||
| 394 | dma_unmap_single(dev, inpbusaddr, | ||
| 395 | sizeof(u32 *) * JOBR_DEPTH, | ||
| 396 | DMA_BIDIRECTIONAL); | ||
| 397 | kfree(jrp->inpring); | ||
| 398 | kfree(jrp->outring); | ||
| 399 | kfree(jrp->entinfo); | ||
| 400 | return -EIO; | ||
| 401 | } | ||
| 402 | |||
| 403 | jrp->inp_ring_write_index = 0; | 362 | jrp->inp_ring_write_index = 0; |
| 404 | jrp->out_ring_read_index = 0; | 363 | jrp->out_ring_read_index = 0; |
| 405 | jrp->head = 0; | 364 | jrp->head = 0; |
| @@ -431,12 +390,11 @@ int caam_jr_shutdown(struct device *dev) | |||
| 431 | { | 390 | { |
| 432 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | 391 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
| 433 | dma_addr_t inpbusaddr, outbusaddr; | 392 | dma_addr_t inpbusaddr, outbusaddr; |
| 434 | int ret, i; | 393 | int ret; |
| 435 | 394 | ||
| 436 | ret = caam_reset_hw_jr(dev); | 395 | ret = caam_reset_hw_jr(dev); |
| 437 | 396 | ||
| 438 | for_each_possible_cpu(i) | 397 | tasklet_kill(&jrp->irqtask); |
| 439 | tasklet_kill(&jrp->irqtask[i]); | ||
| 440 | 398 | ||
| 441 | /* Release interrupt */ | 399 | /* Release interrupt */ |
| 442 | free_irq(jrp->irq, dev); | 400 | free_irq(jrp->irq, dev); |
| @@ -444,13 +402,10 @@ int caam_jr_shutdown(struct device *dev) | |||
| 444 | /* Free rings */ | 402 | /* Free rings */ |
| 445 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); | 403 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); |
| 446 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); | 404 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); |
| 447 | dma_unmap_single(dev, outbusaddr, | 405 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, |
| 448 | sizeof(struct jr_outentry) * JOBR_DEPTH, | 406 | jrp->inpring, inpbusaddr); |
| 449 | DMA_BIDIRECTIONAL); | 407 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, |
| 450 | dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH, | 408 | jrp->outring, outbusaddr); |
| 451 | DMA_BIDIRECTIONAL); | ||
| 452 | kfree(jrp->outring); | ||
| 453 | kfree(jrp->inpring); | ||
| 454 | kfree(jrp->entinfo); | 409 | kfree(jrp->entinfo); |
| 455 | 410 | ||
| 456 | return ret; | 411 | return ret; |
| @@ -503,6 +458,14 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | |||
| 503 | dev_set_drvdata(jrdev, jrpriv); | 458 | dev_set_drvdata(jrdev, jrpriv); |
| 504 | ctrlpriv->jrdev[ring] = jrdev; | 459 | ctrlpriv->jrdev[ring] = jrdev; |
| 505 | 460 | ||
| 461 | if (sizeof(dma_addr_t) == sizeof(u64)) | ||
| 462 | if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) | ||
| 463 | dma_set_mask(jrdev, DMA_BIT_MASK(40)); | ||
| 464 | else | ||
| 465 | dma_set_mask(jrdev, DMA_BIT_MASK(36)); | ||
| 466 | else | ||
| 467 | dma_set_mask(jrdev, DMA_BIT_MASK(32)); | ||
| 468 | |||
| 506 | /* Identify the interrupt */ | 469 | /* Identify the interrupt */ |
| 507 | jrpriv->irq = of_irq_to_resource(np, 0, NULL); | 470 | jrpriv->irq = of_irq_to_resource(np, 0, NULL); |
| 508 | 471 | ||
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c new file mode 100644 index 000000000000..002888185f17 --- /dev/null +++ b/drivers/crypto/caam/key_gen.c | |||
| @@ -0,0 +1,122 @@ | |||
| 1 | /* | ||
| 2 | * CAAM/SEC 4.x functions for handling key-generation jobs | ||
| 3 | * | ||
| 4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | #include "compat.h" | ||
| 8 | #include "jr.h" | ||
| 9 | #include "error.h" | ||
| 10 | #include "desc_constr.h" | ||
| 11 | #include "key_gen.h" | ||
| 12 | |||
| 13 | void split_key_done(struct device *dev, u32 *desc, u32 err, | ||
| 14 | void *context) | ||
| 15 | { | ||
| 16 | struct split_key_result *res = context; | ||
| 17 | |||
| 18 | #ifdef DEBUG | ||
| 19 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 20 | #endif | ||
| 21 | |||
| 22 | if (err) { | ||
| 23 | char tmp[CAAM_ERROR_STR_MAX]; | ||
| 24 | |||
| 25 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
| 26 | } | ||
| 27 | |||
| 28 | res->err = err; | ||
| 29 | |||
| 30 | complete(&res->completion); | ||
| 31 | } | ||
| 32 | EXPORT_SYMBOL(split_key_done); | ||
| 33 | /* | ||
| 34 | get a split ipad/opad key | ||
| 35 | |||
| 36 | Split key generation----------------------------------------------- | ||
| 37 | |||
| 38 | [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 | ||
| 39 | [01] 0x04000014 key: class2->keyreg len=20 | ||
| 40 | @0xffe01000 | ||
| 41 | [03] 0x84410014 operation: cls2-op sha1 hmac init dec | ||
| 42 | [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm | ||
| 43 | [05] 0xa4000001 jump: class2 local all ->1 [06] | ||
| 44 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | ||
| 45 | @0xffe04000 | ||
| 46 | */ | ||
| 47 | u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | ||
| 48 | int split_key_pad_len, const u8 *key_in, u32 keylen, | ||
| 49 | u32 alg_op) | ||
| 50 | { | ||
| 51 | u32 *desc; | ||
| 52 | struct split_key_result result; | ||
| 53 | dma_addr_t dma_addr_in, dma_addr_out; | ||
| 54 | int ret = 0; | ||
| 55 | |||
| 56 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | ||
| 57 | |||
| 58 | init_job_desc(desc, 0); | ||
| 59 | |||
| 60 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, | ||
| 61 | DMA_TO_DEVICE); | ||
| 62 | if (dma_mapping_error(jrdev, dma_addr_in)) { | ||
| 63 | dev_err(jrdev, "unable to map key input memory\n"); | ||
| 64 | kfree(desc); | ||
| 65 | return -ENOMEM; | ||
| 66 | } | ||
| 67 | append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); | ||
| 68 | |||
| 69 | /* Sets MDHA up into an HMAC-INIT */ | ||
| 70 | append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT); | ||
| 71 | |||
| 72 | /* | ||
| 73 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | ||
| 74 | * into both pads inside MDHA | ||
| 75 | */ | ||
| 76 | append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | | ||
| 77 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); | ||
| 78 | |||
| 79 | /* | ||
| 80 | * FIFO_STORE with the explicit split-key content store | ||
| 81 | * (0x26 output type) | ||
| 82 | */ | ||
| 83 | dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, | ||
| 84 | DMA_FROM_DEVICE); | ||
| 85 | if (dma_mapping_error(jrdev, dma_addr_out)) { | ||
| 86 | dev_err(jrdev, "unable to map key output memory\n"); | ||
| 87 | kfree(desc); | ||
| 88 | return -ENOMEM; | ||
| 89 | } | ||
| 90 | append_fifo_store(desc, dma_addr_out, split_key_len, | ||
| 91 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | ||
| 92 | |||
| 93 | #ifdef DEBUG | ||
| 94 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
| 95 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); | ||
| 96 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
| 97 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 98 | #endif | ||
| 99 | |||
| 100 | result.err = 0; | ||
| 101 | init_completion(&result.completion); | ||
| 102 | |||
| 103 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | ||
| 104 | if (!ret) { | ||
| 105 | /* in progress */ | ||
| 106 | wait_for_completion_interruptible(&result.completion); | ||
| 107 | ret = result.err; | ||
| 108 | #ifdef DEBUG | ||
| 109 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
| 110 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, | ||
| 111 | split_key_pad_len, 1); | ||
| 112 | #endif | ||
| 113 | } | ||
| 114 | |||
| 115 | dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, | ||
| 116 | DMA_FROM_DEVICE); | ||
| 117 | dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); | ||
| 118 | |||
| 119 | kfree(desc); | ||
| 120 | |||
| 121 | return ret; | ||
| 122 | } | ||
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h new file mode 100644 index 000000000000..d95d290c6e8b --- /dev/null +++ b/drivers/crypto/caam/key_gen.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | /* | ||
| 2 | * CAAM/SEC 4.x definitions for handling key-generation jobs | ||
| 3 | * | ||
| 4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | |||
| 8 | struct split_key_result { | ||
| 9 | struct completion completion; | ||
| 10 | int err; | ||
| 11 | }; | ||
| 12 | |||
| 13 | void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); | ||
| 14 | |||
| 15 | u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | ||
| 16 | int split_key_pad_len, const u8 *key_in, u32 keylen, | ||
| 17 | u32 alg_op); | ||
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h new file mode 100644 index 000000000000..62950d22ac13 --- /dev/null +++ b/drivers/crypto/caam/pdb.h | |||
| @@ -0,0 +1,401 @@ | |||
| 1 | /* | ||
| 2 | * CAAM Protocol Data Block (PDB) definition header file | ||
| 3 | * | ||
| 4 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef CAAM_PDB_H | ||
| 9 | #define CAAM_PDB_H | ||
| 10 | |||
| 11 | /* | ||
| 12 | * PDB- IPSec ESP Header Modification Options | ||
| 13 | */ | ||
| 14 | #define PDBHMO_ESP_DECAP_SHIFT 12 | ||
| 15 | #define PDBHMO_ESP_ENCAP_SHIFT 4 | ||
| 16 | /* | ||
| 17 | * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the | ||
| 18 | * Options Byte IP version (IPvsn) field: | ||
| 19 | * if IPv4, decrement the inner IP header TTL field (byte 8); | ||
| 20 | * if IPv6 decrement the inner IP header Hop Limit field (byte 7). | ||
| 21 | */ | ||
| 22 | #define PDBHMO_ESP_DECAP_DEC_TTL (0x02 << PDBHMO_ESP_DECAP_SHIFT) | ||
| 23 | #define PDBHMO_ESP_ENCAP_DEC_TTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT) | ||
| 24 | /* | ||
| 25 | * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte | ||
| 26 | * from the outer IP header to the inner IP header. | ||
| 27 | */ | ||
| 28 | #define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT) | ||
| 29 | /* | ||
| 30 | * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from | ||
| 31 | * the PDB, copy the DF bit from the inner IP header to the outer IP header. | ||
| 32 | */ | ||
| 33 | #define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT) | ||
| 34 | |||
| 35 | /* | ||
| 36 | * PDB - IPSec ESP Encap/Decap Options | ||
| 37 | */ | ||
| 38 | #define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */ | ||
| 39 | #define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */ | ||
| 40 | #define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */ | ||
| 41 | #define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */ | ||
| 42 | #define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */ | ||
| 43 | #define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */ | ||
| 44 | #define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ | ||
| 45 | #define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */ | ||
| 46 | #define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */ | ||
| 47 | #define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */ | ||
| 48 | #define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */ | ||
| 49 | #define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */ | ||
| 50 | #define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */ | ||
| 51 | #define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */ | ||
| 52 | |||
| 53 | /* | ||
| 54 | * General IPSec encap/decap PDB definitions | ||
| 55 | */ | ||
| 56 | struct ipsec_encap_cbc { | ||
| 57 | u32 iv[4]; | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct ipsec_encap_ctr { | ||
| 61 | u32 ctr_nonce; | ||
| 62 | u32 ctr_initial; | ||
| 63 | u32 iv[2]; | ||
| 64 | }; | ||
| 65 | |||
| 66 | struct ipsec_encap_ccm { | ||
| 67 | u32 salt; /* lower 24 bits */ | ||
| 68 | u8 b0_flags; | ||
| 69 | u8 ctr_flags; | ||
| 70 | u16 ctr_initial; | ||
| 71 | u32 iv[2]; | ||
| 72 | }; | ||
| 73 | |||
| 74 | struct ipsec_encap_gcm { | ||
| 75 | u32 salt; /* lower 24 bits */ | ||
| 76 | u32 rsvd1; | ||
| 77 | u32 iv[2]; | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct ipsec_encap_pdb { | ||
| 81 | u8 hmo_rsvd; | ||
| 82 | u8 ip_nh; | ||
| 83 | u8 ip_nh_offset; | ||
| 84 | u8 options; | ||
| 85 | u32 seq_num_ext_hi; | ||
| 86 | u32 seq_num; | ||
| 87 | union { | ||
| 88 | struct ipsec_encap_cbc cbc; | ||
| 89 | struct ipsec_encap_ctr ctr; | ||
| 90 | struct ipsec_encap_ccm ccm; | ||
| 91 | struct ipsec_encap_gcm gcm; | ||
| 92 | }; | ||
| 93 | u32 spi; | ||
| 94 | u16 rsvd1; | ||
| 95 | u16 ip_hdr_len; | ||
| 96 | u32 ip_hdr[0]; /* optional IP Header content */ | ||
| 97 | }; | ||
| 98 | |||
| 99 | struct ipsec_decap_cbc { | ||
| 100 | u32 rsvd[2]; | ||
| 101 | }; | ||
| 102 | |||
| 103 | struct ipsec_decap_ctr { | ||
| 104 | u32 salt; | ||
| 105 | u32 ctr_initial; | ||
| 106 | }; | ||
| 107 | |||
| 108 | struct ipsec_decap_ccm { | ||
| 109 | u32 salt; | ||
| 110 | u8 iv_flags; | ||
| 111 | u8 ctr_flags; | ||
| 112 | u16 ctr_initial; | ||
| 113 | }; | ||
| 114 | |||
| 115 | struct ipsec_decap_gcm { | ||
| 116 | u32 salt; | ||
| 117 | u32 resvd; | ||
| 118 | }; | ||
| 119 | |||
| 120 | struct ipsec_decap_pdb { | ||
| 121 | u16 hmo_ip_hdr_len; | ||
| 122 | u8 ip_nh_offset; | ||
| 123 | u8 options; | ||
| 124 | union { | ||
| 125 | struct ipsec_decap_cbc cbc; | ||
| 126 | struct ipsec_decap_ctr ctr; | ||
| 127 | struct ipsec_decap_ccm ccm; | ||
| 128 | struct ipsec_decap_gcm gcm; | ||
| 129 | }; | ||
| 130 | u32 seq_num_ext_hi; | ||
| 131 | u32 seq_num; | ||
| 132 | u32 anti_replay[2]; | ||
| 133 | u32 end_index[0]; | ||
| 134 | }; | ||
| 135 | |||
| 136 | /* | ||
| 137 | * IPSec ESP Datapath Protocol Override Register (DPOVRD) | ||
| 138 | */ | ||
| 139 | struct ipsec_deco_dpovrd { | ||
| 140 | #define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80 | ||
| 141 | u8 ovrd_ecn; | ||
| 142 | u8 ip_hdr_len; | ||
| 143 | u8 nh_offset; | ||
| 144 | u8 next_header; /* reserved if decap */ | ||
| 145 | }; | ||
| 146 | |||
| 147 | /* | ||
| 148 | * IEEE 802.11i WiFi Protocol Data Block | ||
| 149 | */ | ||
| 150 | #define WIFI_PDBOPTS_FCS 0x01 | ||
| 151 | #define WIFI_PDBOPTS_AR 0x40 | ||
| 152 | |||
| 153 | struct wifi_encap_pdb { | ||
| 154 | u16 mac_hdr_len; | ||
| 155 | u8 rsvd; | ||
| 156 | u8 options; | ||
| 157 | u8 iv_flags; | ||
| 158 | u8 pri; | ||
| 159 | u16 pn1; | ||
| 160 | u32 pn2; | ||
| 161 | u16 frm_ctrl_mask; | ||
| 162 | u16 seq_ctrl_mask; | ||
| 163 | u8 rsvd1[2]; | ||
| 164 | u8 cnst; | ||
| 165 | u8 key_id; | ||
| 166 | u8 ctr_flags; | ||
| 167 | u8 rsvd2; | ||
| 168 | u16 ctr_init; | ||
| 169 | }; | ||
| 170 | |||
| 171 | struct wifi_decap_pdb { | ||
| 172 | u16 mac_hdr_len; | ||
| 173 | u8 rsvd; | ||
| 174 | u8 options; | ||
| 175 | u8 iv_flags; | ||
| 176 | u8 pri; | ||
| 177 | u16 pn1; | ||
| 178 | u32 pn2; | ||
| 179 | u16 frm_ctrl_mask; | ||
| 180 | u16 seq_ctrl_mask; | ||
| 181 | u8 rsvd1[4]; | ||
| 182 | u8 ctr_flags; | ||
| 183 | u8 rsvd2; | ||
| 184 | u16 ctr_init; | ||
| 185 | }; | ||
| 186 | |||
| 187 | /* | ||
| 188 | * IEEE 802.16 WiMAX Protocol Data Block | ||
| 189 | */ | ||
| 190 | #define WIMAX_PDBOPTS_FCS 0x01 | ||
| 191 | #define WIMAX_PDBOPTS_AR 0x40 /* decap only */ | ||
| 192 | |||
| 193 | struct wimax_encap_pdb { | ||
| 194 | u8 rsvd[3]; | ||
| 195 | u8 options; | ||
| 196 | u32 nonce; | ||
| 197 | u8 b0_flags; | ||
| 198 | u8 ctr_flags; | ||
| 199 | u16 ctr_init; | ||
| 200 | /* begin DECO writeback region */ | ||
| 201 | u32 pn; | ||
| 202 | /* end DECO writeback region */ | ||
| 203 | }; | ||
| 204 | |||
| 205 | struct wimax_decap_pdb { | ||
| 206 | u8 rsvd[3]; | ||
| 207 | u8 options; | ||
| 208 | u32 nonce; | ||
| 209 | u8 iv_flags; | ||
| 210 | u8 ctr_flags; | ||
| 211 | u16 ctr_init; | ||
| 212 | /* begin DECO writeback region */ | ||
| 213 | u32 pn; | ||
| 214 | u8 rsvd1[2]; | ||
| 215 | u16 antireplay_len; | ||
| 216 | u64 antireplay_scorecard; | ||
| 217 | /* end DECO writeback region */ | ||
| 218 | }; | ||
| 219 | |||
| 220 | /* | ||
| 221 | * IEEE 801.AE MacSEC Protocol Data Block | ||
| 222 | */ | ||
| 223 | #define MACSEC_PDBOPTS_FCS 0x01 | ||
| 224 | #define MACSEC_PDBOPTS_AR 0x40 /* used in decap only */ | ||
| 225 | |||
| 226 | struct macsec_encap_pdb { | ||
| 227 | u16 aad_len; | ||
| 228 | u8 rsvd; | ||
| 229 | u8 options; | ||
| 230 | u64 sci; | ||
| 231 | u16 ethertype; | ||
| 232 | u8 tci_an; | ||
| 233 | u8 rsvd1; | ||
| 234 | /* begin DECO writeback region */ | ||
| 235 | u32 pn; | ||
| 236 | /* end DECO writeback region */ | ||
| 237 | }; | ||
| 238 | |||
| 239 | struct macsec_decap_pdb { | ||
| 240 | u16 aad_len; | ||
| 241 | u8 rsvd; | ||
| 242 | u8 options; | ||
| 243 | u64 sci; | ||
| 244 | u8 rsvd1[3]; | ||
| 245 | /* begin DECO writeback region */ | ||
| 246 | u8 antireplay_len; | ||
| 247 | u32 pn; | ||
| 248 | u64 antireplay_scorecard; | ||
| 249 | /* end DECO writeback region */ | ||
| 250 | }; | ||
| 251 | |||
| 252 | /* | ||
| 253 | * SSL/TLS/DTLS Protocol Data Blocks | ||
| 254 | */ | ||
| 255 | |||
| 256 | #define TLS_PDBOPTS_ARS32 0x40 | ||
| 257 | #define TLS_PDBOPTS_ARS64 0xc0 | ||
| 258 | #define TLS_PDBOPTS_OUTFMT 0x08 | ||
| 259 | #define TLS_PDBOPTS_IV_WRTBK 0x02 /* 1.1/1.2/DTLS only */ | ||
| 260 | #define TLS_PDBOPTS_EXP_RND_IV 0x01 /* 1.1/1.2/DTLS only */ | ||
| 261 | |||
| 262 | struct tls_block_encap_pdb { | ||
| 263 | u8 type; | ||
| 264 | u8 version[2]; | ||
| 265 | u8 options; | ||
| 266 | u64 seq_num; | ||
| 267 | u32 iv[4]; | ||
| 268 | }; | ||
| 269 | |||
| 270 | struct tls_stream_encap_pdb { | ||
| 271 | u8 type; | ||
| 272 | u8 version[2]; | ||
| 273 | u8 options; | ||
| 274 | u64 seq_num; | ||
| 275 | u8 i; | ||
| 276 | u8 j; | ||
| 277 | u8 rsvd1[2]; | ||
| 278 | }; | ||
| 279 | |||
| 280 | struct dtls_block_encap_pdb { | ||
| 281 | u8 type; | ||
| 282 | u8 version[2]; | ||
| 283 | u8 options; | ||
| 284 | u16 epoch; | ||
| 285 | u16 seq_num[3]; | ||
| 286 | u32 iv[4]; | ||
| 287 | }; | ||
| 288 | |||
| 289 | struct tls_block_decap_pdb { | ||
| 290 | u8 rsvd[3]; | ||
| 291 | u8 options; | ||
| 292 | u64 seq_num; | ||
| 293 | u32 iv[4]; | ||
| 294 | }; | ||
| 295 | |||
| 296 | struct tls_stream_decap_pdb { | ||
| 297 | u8 rsvd[3]; | ||
| 298 | u8 options; | ||
| 299 | u64 seq_num; | ||
| 300 | u8 i; | ||
| 301 | u8 j; | ||
| 302 | u8 rsvd1[2]; | ||
| 303 | }; | ||
| 304 | |||
| 305 | struct dtls_block_decap_pdb { | ||
| 306 | u8 rsvd[3]; | ||
| 307 | u8 options; | ||
| 308 | u16 epoch; | ||
| 309 | u16 seq_num[3]; | ||
| 310 | u32 iv[4]; | ||
| 311 | u64 antireplay_scorecard; | ||
| 312 | }; | ||
| 313 | |||
| 314 | /* | ||
| 315 | * SRTP Protocol Data Blocks | ||
| 316 | */ | ||
| 317 | #define SRTP_PDBOPTS_MKI 0x08 | ||
| 318 | #define SRTP_PDBOPTS_AR 0x40 | ||
| 319 | |||
| 320 | struct srtp_encap_pdb { | ||
| 321 | u8 x_len; | ||
| 322 | u8 mki_len; | ||
| 323 | u8 n_tag; | ||
| 324 | u8 options; | ||
| 325 | u32 cnst0; | ||
| 326 | u8 rsvd[2]; | ||
| 327 | u16 cnst1; | ||
| 328 | u16 salt[7]; | ||
| 329 | u16 cnst2; | ||
| 330 | u32 rsvd1; | ||
| 331 | u32 roc; | ||
| 332 | u32 opt_mki; | ||
| 333 | }; | ||
| 334 | |||
| 335 | struct srtp_decap_pdb { | ||
| 336 | u8 x_len; | ||
| 337 | u8 mki_len; | ||
| 338 | u8 n_tag; | ||
| 339 | u8 options; | ||
| 340 | u32 cnst0; | ||
| 341 | u8 rsvd[2]; | ||
| 342 | u16 cnst1; | ||
| 343 | u16 salt[7]; | ||
| 344 | u16 cnst2; | ||
| 345 | u16 rsvd1; | ||
| 346 | u16 seq_num; | ||
| 347 | u32 roc; | ||
| 348 | u64 antireplay_scorecard; | ||
| 349 | }; | ||
| 350 | |||
| 351 | /* | ||
| 352 | * DSA/ECDSA Protocol Data Blocks | ||
| 353 | * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar | ||
| 354 | * except for the treatment of "w" for verify, "s" for sign, | ||
| 355 | * and the placement of "a,b". | ||
| 356 | */ | ||
| 357 | #define DSA_PDB_SGF_SHIFT 24 | ||
| 358 | #define DSA_PDB_SGF_MASK (0xff << DSA_PDB_SGF_SHIFT) | ||
| 359 | #define DSA_PDB_SGF_Q (0x80 << DSA_PDB_SGF_SHIFT) | ||
| 360 | #define DSA_PDB_SGF_R (0x40 << DSA_PDB_SGF_SHIFT) | ||
| 361 | #define DSA_PDB_SGF_G (0x20 << DSA_PDB_SGF_SHIFT) | ||
| 362 | #define DSA_PDB_SGF_W (0x10 << DSA_PDB_SGF_SHIFT) | ||
| 363 | #define DSA_PDB_SGF_S (0x10 << DSA_PDB_SGF_SHIFT) | ||
| 364 | #define DSA_PDB_SGF_F (0x08 << DSA_PDB_SGF_SHIFT) | ||
| 365 | #define DSA_PDB_SGF_C (0x04 << DSA_PDB_SGF_SHIFT) | ||
| 366 | #define DSA_PDB_SGF_D (0x02 << DSA_PDB_SGF_SHIFT) | ||
| 367 | #define DSA_PDB_SGF_AB_SIGN (0x02 << DSA_PDB_SGF_SHIFT) | ||
| 368 | #define DSA_PDB_SGF_AB_VERIFY (0x01 << DSA_PDB_SGF_SHIFT) | ||
| 369 | |||
| 370 | #define DSA_PDB_L_SHIFT 7 | ||
| 371 | #define DSA_PDB_L_MASK (0x3ff << DSA_PDB_L_SHIFT) | ||
| 372 | |||
| 373 | #define DSA_PDB_N_MASK 0x7f | ||
| 374 | |||
| 375 | struct dsa_sign_pdb { | ||
| 376 | u32 sgf_ln; /* Use DSA_PDB_ defintions per above */ | ||
| 377 | u8 *q; | ||
| 378 | u8 *r; | ||
| 379 | u8 *g; /* or Gx,y */ | ||
| 380 | u8 *s; | ||
| 381 | u8 *f; | ||
| 382 | u8 *c; | ||
| 383 | u8 *d; | ||
| 384 | u8 *ab; /* ECC only */ | ||
| 385 | u8 *u; | ||
| 386 | }; | ||
| 387 | |||
| 388 | struct dsa_verify_pdb { | ||
| 389 | u32 sgf_ln; | ||
| 390 | u8 *q; | ||
| 391 | u8 *r; | ||
| 392 | u8 *g; /* or Gx,y */ | ||
| 393 | u8 *w; /* or Wx,y */ | ||
| 394 | u8 *f; | ||
| 395 | u8 *c; | ||
| 396 | u8 *d; | ||
| 397 | u8 *tmp; /* temporary data block */ | ||
| 398 | u8 *ab; /* only used if ECC processing */ | ||
| 399 | }; | ||
| 400 | |||
| 401 | #endif | ||
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index e9f7a70cdd5e..3223fc6d647c 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
| @@ -117,6 +117,12 @@ struct jr_outentry { | |||
| 117 | #define CHA_NUM_DECONUM_SHIFT 56 | 117 | #define CHA_NUM_DECONUM_SHIFT 56 |
| 118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) | 118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) |
| 119 | 119 | ||
| 120 | struct sec_vid { | ||
| 121 | u16 ip_id; | ||
| 122 | u8 maj_rev; | ||
| 123 | u8 min_rev; | ||
| 124 | }; | ||
| 125 | |||
| 120 | struct caam_perfmon { | 126 | struct caam_perfmon { |
| 121 | /* Performance Monitor Registers f00-f9f */ | 127 | /* Performance Monitor Registers f00-f9f */ |
| 122 | u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */ | 128 | u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */ |
| @@ -167,7 +173,7 @@ struct partid { | |||
| 167 | u32 pidr; /* partition ID, DECO */ | 173 | u32 pidr; /* partition ID, DECO */ |
| 168 | }; | 174 | }; |
| 169 | 175 | ||
| 170 | /* RNG test mode (replicated twice in some configurations) */ | 176 | /* RNGB test mode (replicated twice in some configurations) */ |
| 171 | /* Padded out to 0x100 */ | 177 | /* Padded out to 0x100 */ |
| 172 | struct rngtst { | 178 | struct rngtst { |
| 173 | u32 mode; /* RTSTMODEx - Test mode */ | 179 | u32 mode; /* RTSTMODEx - Test mode */ |
| @@ -200,6 +206,31 @@ struct rngtst { | |||
| 200 | u32 rsvd14[15]; | 206 | u32 rsvd14[15]; |
| 201 | }; | 207 | }; |
| 202 | 208 | ||
| 209 | /* RNG4 TRNG test registers */ | ||
| 210 | struct rng4tst { | ||
| 211 | #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ | ||
| 212 | u32 rtmctl; /* misc. control register */ | ||
| 213 | u32 rtscmisc; /* statistical check misc. register */ | ||
| 214 | u32 rtpkrrng; /* poker range register */ | ||
| 215 | union { | ||
| 216 | u32 rtpkrmax; /* PRGM=1: poker max. limit register */ | ||
| 217 | u32 rtpkrsq; /* PRGM=0: poker square calc. result register */ | ||
| 218 | }; | ||
| 219 | #define RTSDCTL_ENT_DLY_SHIFT 16 | ||
| 220 | #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) | ||
| 221 | u32 rtsdctl; /* seed control register */ | ||
| 222 | union { | ||
| 223 | u32 rtsblim; /* PRGM=1: sparse bit limit register */ | ||
| 224 | u32 rttotsam; /* PRGM=0: total samples register */ | ||
| 225 | }; | ||
| 226 | u32 rtfrqmin; /* frequency count min. limit register */ | ||
| 227 | union { | ||
| 228 | u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ | ||
| 229 | u32 rtfrqcnt; /* PRGM=0: freq. count register */ | ||
| 230 | }; | ||
| 231 | u32 rsvd1[56]; | ||
| 232 | }; | ||
| 233 | |||
| 203 | /* | 234 | /* |
| 204 | * caam_ctrl - basic core configuration | 235 | * caam_ctrl - basic core configuration |
| 205 | * starts base + 0x0000 padded out to 0x1000 | 236 | * starts base + 0x0000 padded out to 0x1000 |
| @@ -249,7 +280,10 @@ struct caam_ctrl { | |||
| 249 | 280 | ||
| 250 | /* RNG Test/Verification/Debug Access 600-7ff */ | 281 | /* RNG Test/Verification/Debug Access 600-7ff */ |
| 251 | /* (Useful in Test/Debug modes only...) */ | 282 | /* (Useful in Test/Debug modes only...) */ |
| 252 | struct rngtst rtst[2]; | 283 | union { |
| 284 | struct rngtst rtst[2]; | ||
| 285 | struct rng4tst r4tst[2]; | ||
| 286 | }; | ||
| 253 | 287 | ||
| 254 | u32 rsvd9[448]; | 288 | u32 rsvd9[448]; |
| 255 | 289 | ||
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h new file mode 100644 index 000000000000..e0037c8ee243 --- /dev/null +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
| @@ -0,0 +1,156 @@ | |||
| 1 | /* | ||
| 2 | * CAAM/SEC 4.x functions for using scatterlists in caam driver | ||
| 3 | * | ||
| 4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
| 5 | * | ||
| 6 | */ | ||
| 7 | |||
| 8 | struct sec4_sg_entry; | ||
| 9 | |||
| 10 | /* | ||
| 11 | * convert single dma address to h/w link table format | ||
| 12 | */ | ||
| 13 | static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, | ||
| 14 | dma_addr_t dma, u32 len, u32 offset) | ||
| 15 | { | ||
| 16 | sec4_sg_ptr->ptr = dma; | ||
| 17 | sec4_sg_ptr->len = len; | ||
| 18 | sec4_sg_ptr->reserved = 0; | ||
| 19 | sec4_sg_ptr->buf_pool_id = 0; | ||
| 20 | sec4_sg_ptr->offset = offset; | ||
| 21 | #ifdef DEBUG | ||
| 22 | print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", | ||
| 23 | DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, | ||
| 24 | sizeof(struct sec4_sg_entry), 1); | ||
| 25 | #endif | ||
| 26 | } | ||
| 27 | |||
| 28 | /* | ||
| 29 | * convert scatterlist to h/w link table format | ||
| 30 | * but does not have final bit; instead, returns last entry | ||
| 31 | */ | ||
| 32 | static inline struct sec4_sg_entry * | ||
| 33 | sg_to_sec4_sg(struct scatterlist *sg, int sg_count, | ||
| 34 | struct sec4_sg_entry *sec4_sg_ptr, u32 offset) | ||
| 35 | { | ||
| 36 | while (sg_count) { | ||
| 37 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), | ||
| 38 | sg_dma_len(sg), offset); | ||
| 39 | sec4_sg_ptr++; | ||
| 40 | sg = scatterwalk_sg_next(sg); | ||
| 41 | sg_count--; | ||
| 42 | } | ||
| 43 | return sec4_sg_ptr - 1; | ||
| 44 | } | ||
| 45 | |||
| 46 | /* | ||
| 47 | * convert scatterlist to h/w link table format | ||
| 48 | * scatterlist must have been previously dma mapped | ||
| 49 | */ | ||
| 50 | static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, | ||
| 51 | struct sec4_sg_entry *sec4_sg_ptr, | ||
| 52 | u32 offset) | ||
| 53 | { | ||
| 54 | sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); | ||
| 55 | sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; | ||
| 56 | } | ||
| 57 | |||
| 58 | /* count number of elements in scatterlist */ | ||
| 59 | static inline int __sg_count(struct scatterlist *sg_list, int nbytes, | ||
| 60 | bool *chained) | ||
| 61 | { | ||
| 62 | struct scatterlist *sg = sg_list; | ||
| 63 | int sg_nents = 0; | ||
| 64 | |||
| 65 | while (nbytes > 0) { | ||
| 66 | sg_nents++; | ||
| 67 | nbytes -= sg->length; | ||
| 68 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | ||
| 69 | *chained = true; | ||
| 70 | sg = scatterwalk_sg_next(sg); | ||
| 71 | } | ||
| 72 | |||
| 73 | return sg_nents; | ||
| 74 | } | ||
| 75 | |||
| 76 | /* derive number of elements in scatterlist, but return 0 for 1 */ | ||
| 77 | static inline int sg_count(struct scatterlist *sg_list, int nbytes, | ||
| 78 | bool *chained) | ||
| 79 | { | ||
| 80 | int sg_nents = __sg_count(sg_list, nbytes, chained); | ||
| 81 | |||
| 82 | if (likely(sg_nents == 1)) | ||
| 83 | return 0; | ||
| 84 | |||
| 85 | return sg_nents; | ||
| 86 | } | ||
| 87 | |||
| 88 | static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, | ||
| 89 | unsigned int nents, enum dma_data_direction dir, | ||
| 90 | bool chained) | ||
| 91 | { | ||
| 92 | if (unlikely(chained)) { | ||
| 93 | int i; | ||
| 94 | for (i = 0; i < nents; i++) { | ||
| 95 | dma_map_sg(dev, sg, 1, dir); | ||
| 96 | sg = scatterwalk_sg_next(sg); | ||
| 97 | } | ||
| 98 | } else { | ||
| 99 | dma_map_sg(dev, sg, nents, dir); | ||
| 100 | } | ||
| 101 | return nents; | ||
| 102 | } | ||
| 103 | |||
| 104 | static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, | ||
| 105 | unsigned int nents, enum dma_data_direction dir, | ||
| 106 | bool chained) | ||
| 107 | { | ||
| 108 | if (unlikely(chained)) { | ||
| 109 | int i; | ||
| 110 | for (i = 0; i < nents; i++) { | ||
| 111 | dma_unmap_sg(dev, sg, 1, dir); | ||
| 112 | sg = scatterwalk_sg_next(sg); | ||
| 113 | } | ||
| 114 | } else { | ||
| 115 | dma_unmap_sg(dev, sg, nents, dir); | ||
| 116 | } | ||
| 117 | return nents; | ||
| 118 | } | ||
| 119 | |||
| 120 | /* Copy from len bytes of sg to dest, starting from beginning */ | ||
| 121 | static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) | ||
| 122 | { | ||
| 123 | struct scatterlist *current_sg = sg; | ||
| 124 | int cpy_index = 0, next_cpy_index = current_sg->length; | ||
| 125 | |||
| 126 | while (next_cpy_index < len) { | ||
| 127 | memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), | ||
| 128 | current_sg->length); | ||
| 129 | current_sg = scatterwalk_sg_next(current_sg); | ||
| 130 | cpy_index = next_cpy_index; | ||
| 131 | next_cpy_index += current_sg->length; | ||
| 132 | } | ||
| 133 | if (cpy_index < len) | ||
| 134 | memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), | ||
| 135 | len - cpy_index); | ||
| 136 | } | ||
| 137 | |||
| 138 | /* Copy sg data, from to_skip to end, to dest */ | ||
| 139 | static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, | ||
| 140 | int to_skip, unsigned int end) | ||
| 141 | { | ||
| 142 | struct scatterlist *current_sg = sg; | ||
| 143 | int sg_index, cpy_index; | ||
| 144 | |||
| 145 | sg_index = current_sg->length; | ||
| 146 | while (sg_index <= to_skip) { | ||
| 147 | current_sg = scatterwalk_sg_next(current_sg); | ||
| 148 | sg_index += current_sg->length; | ||
| 149 | } | ||
| 150 | cpy_index = sg_index - to_skip; | ||
| 151 | memcpy(dest, (u8 *) sg_virt(current_sg) + | ||
| 152 | current_sg->length - cpy_index, cpy_index); | ||
| 153 | current_sg = scatterwalk_sg_next(current_sg); | ||
| 154 | if (end - sg_index) | ||
| 155 | sg_copy(dest + cpy_index, current_sg, end - sg_index); | ||
| 156 | } | ||
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 1cc6b3f3e262..0d4071754352 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | #define MV_CESA "MV-CESA:" | 25 | #define MV_CESA "MV-CESA:" |
| 26 | #define MAX_HW_HASH_SIZE 0xFFFF | 26 | #define MAX_HW_HASH_SIZE 0xFFFF |
| 27 | #define MV_CESA_EXPIRE 500 /* msec */ | ||
| 27 | 28 | ||
| 28 | /* | 29 | /* |
| 29 | * STM: | 30 | * STM: |
| @@ -87,6 +88,7 @@ struct crypto_priv { | |||
| 87 | spinlock_t lock; | 88 | spinlock_t lock; |
| 88 | struct crypto_queue queue; | 89 | struct crypto_queue queue; |
| 89 | enum engine_status eng_st; | 90 | enum engine_status eng_st; |
| 91 | struct timer_list completion_timer; | ||
| 90 | struct crypto_async_request *cur_req; | 92 | struct crypto_async_request *cur_req; |
| 91 | struct req_progress p; | 93 | struct req_progress p; |
| 92 | int max_req_size; | 94 | int max_req_size; |
| @@ -138,6 +140,29 @@ struct mv_req_hash_ctx { | |||
| 138 | int count_add; | 140 | int count_add; |
| 139 | }; | 141 | }; |
| 140 | 142 | ||
| 143 | static void mv_completion_timer_callback(unsigned long unused) | ||
| 144 | { | ||
| 145 | int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; | ||
| 146 | |||
| 147 | printk(KERN_ERR MV_CESA | ||
| 148 | "completion timer expired (CESA %sactive), cleaning up.\n", | ||
| 149 | active ? "" : "in"); | ||
| 150 | |||
| 151 | del_timer(&cpg->completion_timer); | ||
| 152 | writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD); | ||
| 153 | while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC) | ||
| 154 | printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__); | ||
| 155 | cpg->eng_st = ENGINE_W_DEQUEUE; | ||
| 156 | wake_up_process(cpg->queue_th); | ||
| 157 | } | ||
| 158 | |||
| 159 | static void mv_setup_timer(void) | ||
| 160 | { | ||
| 161 | setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0); | ||
| 162 | mod_timer(&cpg->completion_timer, | ||
| 163 | jiffies + msecs_to_jiffies(MV_CESA_EXPIRE)); | ||
| 164 | } | ||
| 165 | |||
| 141 | static void compute_aes_dec_key(struct mv_ctx *ctx) | 166 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
| 142 | { | 167 | { |
| 143 | struct crypto_aes_ctx gen_aes_key; | 168 | struct crypto_aes_ctx gen_aes_key; |
| @@ -273,12 +298,8 @@ static void mv_process_current_q(int first_block) | |||
| 273 | sizeof(struct sec_accel_config)); | 298 | sizeof(struct sec_accel_config)); |
| 274 | 299 | ||
| 275 | /* GO */ | 300 | /* GO */ |
| 301 | mv_setup_timer(); | ||
| 276 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 302 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
| 277 | |||
| 278 | /* | ||
| 279 | * XXX: add timer if the interrupt does not occur for some mystery | ||
| 280 | * reason | ||
| 281 | */ | ||
| 282 | } | 303 | } |
| 283 | 304 | ||
| 284 | static void mv_crypto_algo_completion(void) | 305 | static void mv_crypto_algo_completion(void) |
| @@ -357,12 +378,8 @@ static void mv_process_hash_current(int first_block) | |||
| 357 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | 378 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); |
| 358 | 379 | ||
| 359 | /* GO */ | 380 | /* GO */ |
| 381 | mv_setup_timer(); | ||
| 360 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 382 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
| 361 | |||
| 362 | /* | ||
| 363 | * XXX: add timer if the interrupt does not occur for some mystery | ||
| 364 | * reason | ||
| 365 | */ | ||
| 366 | } | 383 | } |
| 367 | 384 | ||
| 368 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, | 385 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, |
| @@ -406,6 +423,15 @@ out: | |||
| 406 | return rc; | 423 | return rc; |
| 407 | } | 424 | } |
| 408 | 425 | ||
| 426 | static void mv_save_digest_state(struct mv_req_hash_ctx *ctx) | ||
| 427 | { | ||
| 428 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
| 429 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
| 430 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
| 431 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
| 432 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
| 433 | } | ||
| 434 | |||
| 409 | static void mv_hash_algo_completion(void) | 435 | static void mv_hash_algo_completion(void) |
| 410 | { | 436 | { |
| 411 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | 437 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); |
| @@ -420,14 +446,12 @@ static void mv_hash_algo_completion(void) | |||
| 420 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | 446 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, |
| 421 | crypto_ahash_digestsize(crypto_ahash_reqtfm | 447 | crypto_ahash_digestsize(crypto_ahash_reqtfm |
| 422 | (req))); | 448 | (req))); |
| 423 | } else | 449 | } else { |
| 450 | mv_save_digest_state(ctx); | ||
| 424 | mv_hash_final_fallback(req); | 451 | mv_hash_final_fallback(req); |
| 452 | } | ||
| 425 | } else { | 453 | } else { |
| 426 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | 454 | mv_save_digest_state(ctx); |
| 427 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
| 428 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
| 429 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
| 430 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
| 431 | } | 455 | } |
| 432 | } | 456 | } |
| 433 | 457 | ||
| @@ -888,6 +912,10 @@ irqreturn_t crypto_int(int irq, void *priv) | |||
| 888 | if (!(val & SEC_INT_ACCEL0_DONE)) | 912 | if (!(val & SEC_INT_ACCEL0_DONE)) |
| 889 | return IRQ_NONE; | 913 | return IRQ_NONE; |
| 890 | 914 | ||
| 915 | if (!del_timer(&cpg->completion_timer)) { | ||
| 916 | printk(KERN_WARNING MV_CESA | ||
| 917 | "got an interrupt but no pending timer?\n"); | ||
| 918 | } | ||
| 891 | val &= ~SEC_INT_ACCEL0_DONE; | 919 | val &= ~SEC_INT_ACCEL0_DONE; |
| 892 | writel(val, cpg->reg + FPGA_INT_STATUS); | 920 | writel(val, cpg->reg + FPGA_INT_STATUS); |
| 893 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); | 921 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); |
| @@ -1061,6 +1089,7 @@ static int mv_probe(struct platform_device *pdev) | |||
| 1061 | if (!IS_ERR(cp->clk)) | 1089 | if (!IS_ERR(cp->clk)) |
| 1062 | clk_prepare_enable(cp->clk); | 1090 | clk_prepare_enable(cp->clk); |
| 1063 | 1091 | ||
| 1092 | writel(0, cpg->reg + SEC_ACCEL_INT_STATUS); | ||
| 1064 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | 1093 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
| 1065 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | 1094 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
| 1066 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | 1095 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 921039e56f87..efff788d2f1d 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
| @@ -53,117 +53,6 @@ | |||
| 53 | 53 | ||
| 54 | #include "talitos.h" | 54 | #include "talitos.h" |
| 55 | 55 | ||
| 56 | #define TALITOS_TIMEOUT 100000 | ||
| 57 | #define TALITOS_MAX_DATA_LEN 65535 | ||
| 58 | |||
| 59 | #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) | ||
| 60 | #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) | ||
| 61 | #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf) | ||
| 62 | |||
| 63 | /* descriptor pointer entry */ | ||
| 64 | struct talitos_ptr { | ||
| 65 | __be16 len; /* length */ | ||
| 66 | u8 j_extent; /* jump to sg link table and/or extent */ | ||
| 67 | u8 eptr; /* extended address */ | ||
| 68 | __be32 ptr; /* address */ | ||
| 69 | }; | ||
| 70 | |||
| 71 | static const struct talitos_ptr zero_entry = { | ||
| 72 | .len = 0, | ||
| 73 | .j_extent = 0, | ||
| 74 | .eptr = 0, | ||
| 75 | .ptr = 0 | ||
| 76 | }; | ||
| 77 | |||
| 78 | /* descriptor */ | ||
| 79 | struct talitos_desc { | ||
| 80 | __be32 hdr; /* header high bits */ | ||
| 81 | __be32 hdr_lo; /* header low bits */ | ||
| 82 | struct talitos_ptr ptr[7]; /* ptr/len pair array */ | ||
| 83 | }; | ||
| 84 | |||
| 85 | /** | ||
| 86 | * talitos_request - descriptor submission request | ||
| 87 | * @desc: descriptor pointer (kernel virtual) | ||
| 88 | * @dma_desc: descriptor's physical bus address | ||
| 89 | * @callback: whom to call when descriptor processing is done | ||
| 90 | * @context: caller context (optional) | ||
| 91 | */ | ||
| 92 | struct talitos_request { | ||
| 93 | struct talitos_desc *desc; | ||
| 94 | dma_addr_t dma_desc; | ||
| 95 | void (*callback) (struct device *dev, struct talitos_desc *desc, | ||
| 96 | void *context, int error); | ||
| 97 | void *context; | ||
| 98 | }; | ||
| 99 | |||
| 100 | /* per-channel fifo management */ | ||
| 101 | struct talitos_channel { | ||
| 102 | void __iomem *reg; | ||
| 103 | |||
| 104 | /* request fifo */ | ||
| 105 | struct talitos_request *fifo; | ||
| 106 | |||
| 107 | /* number of requests pending in channel h/w fifo */ | ||
| 108 | atomic_t submit_count ____cacheline_aligned; | ||
| 109 | |||
| 110 | /* request submission (head) lock */ | ||
| 111 | spinlock_t head_lock ____cacheline_aligned; | ||
| 112 | /* index to next free descriptor request */ | ||
| 113 | int head; | ||
| 114 | |||
| 115 | /* request release (tail) lock */ | ||
| 116 | spinlock_t tail_lock ____cacheline_aligned; | ||
| 117 | /* index to next in-progress/done descriptor request */ | ||
| 118 | int tail; | ||
| 119 | }; | ||
| 120 | |||
| 121 | struct talitos_private { | ||
| 122 | struct device *dev; | ||
| 123 | struct platform_device *ofdev; | ||
| 124 | void __iomem *reg; | ||
| 125 | int irq[2]; | ||
| 126 | |||
| 127 | /* SEC global registers lock */ | ||
| 128 | spinlock_t reg_lock ____cacheline_aligned; | ||
| 129 | |||
| 130 | /* SEC version geometry (from device tree node) */ | ||
| 131 | unsigned int num_channels; | ||
| 132 | unsigned int chfifo_len; | ||
| 133 | unsigned int exec_units; | ||
| 134 | unsigned int desc_types; | ||
| 135 | |||
| 136 | /* SEC Compatibility info */ | ||
| 137 | unsigned long features; | ||
| 138 | |||
| 139 | /* | ||
| 140 | * length of the request fifo | ||
| 141 | * fifo_len is chfifo_len rounded up to next power of 2 | ||
| 142 | * so we can use bitwise ops to wrap | ||
| 143 | */ | ||
| 144 | unsigned int fifo_len; | ||
| 145 | |||
| 146 | struct talitos_channel *chan; | ||
| 147 | |||
| 148 | /* next channel to be assigned next incoming descriptor */ | ||
| 149 | atomic_t last_chan ____cacheline_aligned; | ||
| 150 | |||
| 151 | /* request callback tasklet */ | ||
| 152 | struct tasklet_struct done_task[2]; | ||
| 153 | |||
| 154 | /* list of registered algorithms */ | ||
| 155 | struct list_head alg_list; | ||
| 156 | |||
| 157 | /* hwrng device */ | ||
| 158 | struct hwrng rng; | ||
| 159 | }; | ||
| 160 | |||
| 161 | /* .features flag */ | ||
| 162 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | ||
| 163 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | ||
| 164 | #define TALITOS_FTR_SHA224_HWINIT 0x00000004 | ||
| 165 | #define TALITOS_FTR_HMAC_OK 0x00000008 | ||
| 166 | |||
| 167 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | 56 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) |
| 168 | { | 57 | { |
| 169 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); | 58 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); |
| @@ -303,11 +192,11 @@ static int init_device(struct device *dev) | |||
| 303 | * callback must check err and feedback in descriptor header | 192 | * callback must check err and feedback in descriptor header |
| 304 | * for device processing status. | 193 | * for device processing status. |
| 305 | */ | 194 | */ |
| 306 | static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | 195 | int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, |
| 307 | void (*callback)(struct device *dev, | 196 | void (*callback)(struct device *dev, |
| 308 | struct talitos_desc *desc, | 197 | struct talitos_desc *desc, |
| 309 | void *context, int error), | 198 | void *context, int error), |
| 310 | void *context) | 199 | void *context) |
| 311 | { | 200 | { |
| 312 | struct talitos_private *priv = dev_get_drvdata(dev); | 201 | struct talitos_private *priv = dev_get_drvdata(dev); |
| 313 | struct talitos_request *request; | 202 | struct talitos_request *request; |
| @@ -348,6 +237,7 @@ static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
| 348 | 237 | ||
| 349 | return -EINPROGRESS; | 238 | return -EINPROGRESS; |
| 350 | } | 239 | } |
| 240 | EXPORT_SYMBOL(talitos_submit); | ||
| 351 | 241 | ||
| 352 | /* | 242 | /* |
| 353 | * process what was done, notify callback of error if not | 243 | * process what was done, notify callback of error if not |
| @@ -733,7 +623,7 @@ static void talitos_unregister_rng(struct device *dev) | |||
| 733 | * crypto alg | 623 | * crypto alg |
| 734 | */ | 624 | */ |
| 735 | #define TALITOS_CRA_PRIORITY 3000 | 625 | #define TALITOS_CRA_PRIORITY 3000 |
| 736 | #define TALITOS_MAX_KEY_SIZE 64 | 626 | #define TALITOS_MAX_KEY_SIZE 96 |
| 737 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | 627 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ |
| 738 | 628 | ||
| 739 | #define MD5_BLOCK_SIZE 64 | 629 | #define MD5_BLOCK_SIZE 64 |
| @@ -2066,6 +1956,59 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2066 | DESC_HDR_MODE1_MDEU_PAD | | 1956 | DESC_HDR_MODE1_MDEU_PAD | |
| 2067 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1957 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
| 2068 | }, | 1958 | }, |
| 1959 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
| 1960 | .alg.crypto = { | ||
| 1961 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | ||
| 1962 | .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos", | ||
| 1963 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1964 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
| 1965 | .cra_type = &crypto_aead_type, | ||
| 1966 | .cra_aead = { | ||
| 1967 | .setkey = aead_setkey, | ||
| 1968 | .setauthsize = aead_setauthsize, | ||
| 1969 | .encrypt = aead_encrypt, | ||
| 1970 | .decrypt = aead_decrypt, | ||
| 1971 | .givencrypt = aead_givencrypt, | ||
| 1972 | .geniv = "<built-in>", | ||
| 1973 | .ivsize = AES_BLOCK_SIZE, | ||
| 1974 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 1975 | } | ||
| 1976 | }, | ||
| 1977 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
| 1978 | DESC_HDR_SEL0_AESU | | ||
| 1979 | DESC_HDR_MODE0_AESU_CBC | | ||
| 1980 | DESC_HDR_SEL1_MDEUA | | ||
| 1981 | DESC_HDR_MODE1_MDEU_INIT | | ||
| 1982 | DESC_HDR_MODE1_MDEU_PAD | | ||
| 1983 | DESC_HDR_MODE1_MDEU_SHA224_HMAC, | ||
| 1984 | }, | ||
| 1985 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
| 1986 | .alg.crypto = { | ||
| 1987 | .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", | ||
| 1988 | .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos", | ||
| 1989 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 1990 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
| 1991 | .cra_type = &crypto_aead_type, | ||
| 1992 | .cra_aead = { | ||
| 1993 | .setkey = aead_setkey, | ||
| 1994 | .setauthsize = aead_setauthsize, | ||
| 1995 | .encrypt = aead_encrypt, | ||
| 1996 | .decrypt = aead_decrypt, | ||
| 1997 | .givencrypt = aead_givencrypt, | ||
| 1998 | .geniv = "<built-in>", | ||
| 1999 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2000 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 2001 | } | ||
| 2002 | }, | ||
| 2003 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
| 2004 | DESC_HDR_SEL0_DEU | | ||
| 2005 | DESC_HDR_MODE0_DEU_CBC | | ||
| 2006 | DESC_HDR_MODE0_DEU_3DES | | ||
| 2007 | DESC_HDR_SEL1_MDEUA | | ||
| 2008 | DESC_HDR_MODE1_MDEU_INIT | | ||
| 2009 | DESC_HDR_MODE1_MDEU_PAD | | ||
| 2010 | DESC_HDR_MODE1_MDEU_SHA224_HMAC, | ||
| 2011 | }, | ||
| 2069 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2012 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2070 | .alg.crypto = { | 2013 | .alg.crypto = { |
| 2071 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 2014 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
| @@ -2121,6 +2064,112 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2121 | }, | 2064 | }, |
| 2122 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2065 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2123 | .alg.crypto = { | 2066 | .alg.crypto = { |
| 2067 | .cra_name = "authenc(hmac(sha384),cbc(aes))", | ||
| 2068 | .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos", | ||
| 2069 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 2070 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
| 2071 | .cra_type = &crypto_aead_type, | ||
| 2072 | .cra_aead = { | ||
| 2073 | .setkey = aead_setkey, | ||
| 2074 | .setauthsize = aead_setauthsize, | ||
| 2075 | .encrypt = aead_encrypt, | ||
| 2076 | .decrypt = aead_decrypt, | ||
| 2077 | .givencrypt = aead_givencrypt, | ||
| 2078 | .geniv = "<built-in>", | ||
| 2079 | .ivsize = AES_BLOCK_SIZE, | ||
| 2080 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
| 2081 | } | ||
| 2082 | }, | ||
| 2083 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
| 2084 | DESC_HDR_SEL0_AESU | | ||
| 2085 | DESC_HDR_MODE0_AESU_CBC | | ||
| 2086 | DESC_HDR_SEL1_MDEUB | | ||
| 2087 | DESC_HDR_MODE1_MDEU_INIT | | ||
| 2088 | DESC_HDR_MODE1_MDEU_PAD | | ||
| 2089 | DESC_HDR_MODE1_MDEUB_SHA384_HMAC, | ||
| 2090 | }, | ||
| 2091 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
| 2092 | .alg.crypto = { | ||
| 2093 | .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", | ||
| 2094 | .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos", | ||
| 2095 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 2096 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
| 2097 | .cra_type = &crypto_aead_type, | ||
| 2098 | .cra_aead = { | ||
| 2099 | .setkey = aead_setkey, | ||
| 2100 | .setauthsize = aead_setauthsize, | ||
| 2101 | .encrypt = aead_encrypt, | ||
| 2102 | .decrypt = aead_decrypt, | ||
| 2103 | .givencrypt = aead_givencrypt, | ||
| 2104 | .geniv = "<built-in>", | ||
| 2105 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2106 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
| 2107 | } | ||
| 2108 | }, | ||
| 2109 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
| 2110 | DESC_HDR_SEL0_DEU | | ||
| 2111 | DESC_HDR_MODE0_DEU_CBC | | ||
| 2112 | DESC_HDR_MODE0_DEU_3DES | | ||
| 2113 | DESC_HDR_SEL1_MDEUB | | ||
| 2114 | DESC_HDR_MODE1_MDEU_INIT | | ||
| 2115 | DESC_HDR_MODE1_MDEU_PAD | | ||
| 2116 | DESC_HDR_MODE1_MDEUB_SHA384_HMAC, | ||
| 2117 | }, | ||
| 2118 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
| 2119 | .alg.crypto = { | ||
| 2120 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | ||
| 2121 | .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos", | ||
| 2122 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 2123 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
| 2124 | .cra_type = &crypto_aead_type, | ||
| 2125 | .cra_aead = { | ||
| 2126 | .setkey = aead_setkey, | ||
| 2127 | .setauthsize = aead_setauthsize, | ||
| 2128 | .encrypt = aead_encrypt, | ||
| 2129 | .decrypt = aead_decrypt, | ||
| 2130 | .givencrypt = aead_givencrypt, | ||
| 2131 | .geniv = "<built-in>", | ||
| 2132 | .ivsize = AES_BLOCK_SIZE, | ||
| 2133 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 2134 | } | ||
| 2135 | }, | ||
| 2136 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
| 2137 | DESC_HDR_SEL0_AESU | | ||
| 2138 | DESC_HDR_MODE0_AESU_CBC | | ||
| 2139 | DESC_HDR_SEL1_MDEUB | | ||
| 2140 | DESC_HDR_MODE1_MDEU_INIT | | ||
| 2141 | DESC_HDR_MODE1_MDEU_PAD | | ||
| 2142 | DESC_HDR_MODE1_MDEUB_SHA512_HMAC, | ||
| 2143 | }, | ||
| 2144 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
| 2145 | .alg.crypto = { | ||
| 2146 | .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", | ||
| 2147 | .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos", | ||
| 2148 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 2149 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
| 2150 | .cra_type = &crypto_aead_type, | ||
| 2151 | .cra_aead = { | ||
| 2152 | .setkey = aead_setkey, | ||
| 2153 | .setauthsize = aead_setauthsize, | ||
| 2154 | .encrypt = aead_encrypt, | ||
| 2155 | .decrypt = aead_decrypt, | ||
| 2156 | .givencrypt = aead_givencrypt, | ||
| 2157 | .geniv = "<built-in>", | ||
| 2158 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2159 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 2160 | } | ||
| 2161 | }, | ||
| 2162 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | ||
| 2163 | DESC_HDR_SEL0_DEU | | ||
| 2164 | DESC_HDR_MODE0_DEU_CBC | | ||
| 2165 | DESC_HDR_MODE0_DEU_3DES | | ||
| 2166 | DESC_HDR_SEL1_MDEUB | | ||
| 2167 | DESC_HDR_MODE1_MDEU_INIT | | ||
| 2168 | DESC_HDR_MODE1_MDEU_PAD | | ||
| 2169 | DESC_HDR_MODE1_MDEUB_SHA512_HMAC, | ||
| 2170 | }, | ||
| 2171 | { .type = CRYPTO_ALG_TYPE_AEAD, | ||
| 2172 | .alg.crypto = { | ||
| 2124 | .cra_name = "authenc(hmac(md5),cbc(aes))", | 2173 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
| 2125 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", | 2174 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", |
| 2126 | .cra_blocksize = AES_BLOCK_SIZE, | 2175 | .cra_blocksize = AES_BLOCK_SIZE, |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 3c173954ef29..61a14054aa39 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
| @@ -28,6 +28,123 @@ | |||
| 28 | * | 28 | * |
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| 31 | #define TALITOS_TIMEOUT 100000 | ||
| 32 | #define TALITOS_MAX_DATA_LEN 65535 | ||
| 33 | |||
| 34 | #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) | ||
| 35 | #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) | ||
| 36 | #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf) | ||
| 37 | |||
| 38 | /* descriptor pointer entry */ | ||
| 39 | struct talitos_ptr { | ||
| 40 | __be16 len; /* length */ | ||
| 41 | u8 j_extent; /* jump to sg link table and/or extent */ | ||
| 42 | u8 eptr; /* extended address */ | ||
| 43 | __be32 ptr; /* address */ | ||
| 44 | }; | ||
| 45 | |||
| 46 | static const struct talitos_ptr zero_entry = { | ||
| 47 | .len = 0, | ||
| 48 | .j_extent = 0, | ||
| 49 | .eptr = 0, | ||
| 50 | .ptr = 0 | ||
| 51 | }; | ||
| 52 | |||
| 53 | /* descriptor */ | ||
| 54 | struct talitos_desc { | ||
| 55 | __be32 hdr; /* header high bits */ | ||
| 56 | __be32 hdr_lo; /* header low bits */ | ||
| 57 | struct talitos_ptr ptr[7]; /* ptr/len pair array */ | ||
| 58 | }; | ||
| 59 | |||
| 60 | /** | ||
| 61 | * talitos_request - descriptor submission request | ||
| 62 | * @desc: descriptor pointer (kernel virtual) | ||
| 63 | * @dma_desc: descriptor's physical bus address | ||
| 64 | * @callback: whom to call when descriptor processing is done | ||
| 65 | * @context: caller context (optional) | ||
| 66 | */ | ||
| 67 | struct talitos_request { | ||
| 68 | struct talitos_desc *desc; | ||
| 69 | dma_addr_t dma_desc; | ||
| 70 | void (*callback) (struct device *dev, struct talitos_desc *desc, | ||
| 71 | void *context, int error); | ||
| 72 | void *context; | ||
| 73 | }; | ||
| 74 | |||
| 75 | /* per-channel fifo management */ | ||
| 76 | struct talitos_channel { | ||
| 77 | void __iomem *reg; | ||
| 78 | |||
| 79 | /* request fifo */ | ||
| 80 | struct talitos_request *fifo; | ||
| 81 | |||
| 82 | /* number of requests pending in channel h/w fifo */ | ||
| 83 | atomic_t submit_count ____cacheline_aligned; | ||
| 84 | |||
| 85 | /* request submission (head) lock */ | ||
| 86 | spinlock_t head_lock ____cacheline_aligned; | ||
| 87 | /* index to next free descriptor request */ | ||
| 88 | int head; | ||
| 89 | |||
| 90 | /* request release (tail) lock */ | ||
| 91 | spinlock_t tail_lock ____cacheline_aligned; | ||
| 92 | /* index to next in-progress/done descriptor request */ | ||
| 93 | int tail; | ||
| 94 | }; | ||
| 95 | |||
| 96 | struct talitos_private { | ||
| 97 | struct device *dev; | ||
| 98 | struct platform_device *ofdev; | ||
| 99 | void __iomem *reg; | ||
| 100 | int irq[2]; | ||
| 101 | |||
| 102 | /* SEC global registers lock */ | ||
| 103 | spinlock_t reg_lock ____cacheline_aligned; | ||
| 104 | |||
| 105 | /* SEC version geometry (from device tree node) */ | ||
| 106 | unsigned int num_channels; | ||
| 107 | unsigned int chfifo_len; | ||
| 108 | unsigned int exec_units; | ||
| 109 | unsigned int desc_types; | ||
| 110 | |||
| 111 | /* SEC Compatibility info */ | ||
| 112 | unsigned long features; | ||
| 113 | |||
| 114 | /* | ||
| 115 | * length of the request fifo | ||
| 116 | * fifo_len is chfifo_len rounded up to next power of 2 | ||
| 117 | * so we can use bitwise ops to wrap | ||
| 118 | */ | ||
| 119 | unsigned int fifo_len; | ||
| 120 | |||
| 121 | struct talitos_channel *chan; | ||
| 122 | |||
| 123 | /* next channel to be assigned next incoming descriptor */ | ||
| 124 | atomic_t last_chan ____cacheline_aligned; | ||
| 125 | |||
| 126 | /* request callback tasklet */ | ||
| 127 | struct tasklet_struct done_task[2]; | ||
| 128 | |||
| 129 | /* list of registered algorithms */ | ||
| 130 | struct list_head alg_list; | ||
| 131 | |||
| 132 | /* hwrng device */ | ||
| 133 | struct hwrng rng; | ||
| 134 | }; | ||
| 135 | |||
| 136 | extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | ||
| 137 | void (*callback)(struct device *dev, | ||
| 138 | struct talitos_desc *desc, | ||
| 139 | void *context, int error), | ||
| 140 | void *context); | ||
| 141 | |||
| 142 | /* .features flag */ | ||
| 143 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | ||
| 144 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | ||
| 145 | #define TALITOS_FTR_SHA224_HWINIT 0x00000004 | ||
| 146 | #define TALITOS_FTR_HMAC_OK 0x00000008 | ||
| 147 | |||
| 31 | /* | 148 | /* |
| 32 | * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register | 149 | * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register |
| 33 | */ | 150 | */ |
| @@ -209,6 +326,12 @@ | |||
| 209 | DESC_HDR_MODE1_MDEU_HMAC) | 326 | DESC_HDR_MODE1_MDEU_HMAC) |
| 210 | #define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \ | 327 | #define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \ |
| 211 | DESC_HDR_MODE1_MDEU_HMAC) | 328 | DESC_HDR_MODE1_MDEU_HMAC) |
| 329 | #define DESC_HDR_MODE1_MDEU_SHA224_HMAC (DESC_HDR_MODE1_MDEU_SHA224 | \ | ||
| 330 | DESC_HDR_MODE1_MDEU_HMAC) | ||
| 331 | #define DESC_HDR_MODE1_MDEUB_SHA384_HMAC (DESC_HDR_MODE1_MDEUB_SHA384 | \ | ||
| 332 | DESC_HDR_MODE1_MDEU_HMAC) | ||
| 333 | #define DESC_HDR_MODE1_MDEUB_SHA512_HMAC (DESC_HDR_MODE1_MDEUB_SHA512 | \ | ||
| 334 | DESC_HDR_MODE1_MDEU_HMAC) | ||
| 212 | 335 | ||
| 213 | /* direction of overall data flow (DIR) */ | 336 | /* direction of overall data flow (DIR) */ |
| 214 | #define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) | 337 | #define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) |
diff --git a/include/linux/platform_data/atmel-aes.h b/include/linux/platform_data/atmel-aes.h new file mode 100644 index 000000000000..e7a1949bad26 --- /dev/null +++ b/include/linux/platform_data/atmel-aes.h | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | #ifndef __LINUX_ATMEL_AES_H | ||
| 2 | #define __LINUX_ATMEL_AES_H | ||
| 3 | |||
| 4 | #include <mach/at_hdmac.h> | ||
| 5 | |||
| 6 | /** | ||
| 7 | * struct aes_dma_data - DMA data for AES | ||
| 8 | */ | ||
| 9 | struct aes_dma_data { | ||
| 10 | struct at_dma_slave txdata; | ||
| 11 | struct at_dma_slave rxdata; | ||
| 12 | }; | ||
| 13 | |||
| 14 | /** | ||
| 15 | * struct aes_platform_data - board-specific AES configuration | ||
| 16 | * @dma_slave: DMA slave interface to use in data transfers. | ||
| 17 | */ | ||
| 18 | struct aes_platform_data { | ||
| 19 | struct aes_dma_data *dma_slave; | ||
| 20 | }; | ||
| 21 | |||
| 22 | #endif /* __LINUX_ATMEL_AES_H */ | ||
