aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 18:59:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 18:59:10 -0400
commit0bd3fbd4abeafa19ae0302d25194468b022d1a56 (patch)
tree1fc34b25666c97b85dfb7199e48b2e074ffde264
parent0b87da68a0f0a7bf7f7446cf64f92e672bd68ef8 (diff)
parentef45b834319f8a18f257a40ba4bce6b829ef1708 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: - New cipher/hash driver for ARM ux500. - Code clean-up for aesni-intel. - Misc fixes. Fixed up conflicts in arch/arm/mach-ux500/devices-common.h, where quite frankly some of it made no sense at all (the pull brought in a declaration for the dbx500_add_platform_device_noirq() function, which neither exists nor is used anywhere). Also some trivial add-add context conflicts in the Kconfig file in drivers/{char/hw_random,crypto}/ * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: aesni-intel - move more common code to ablk_init_common crypto: aesni-intel - use crypto_[un]register_algs crypto: ux500 - Cleanup hardware identification crypto: ux500 - Update DMA handling for 3.4 mach-ux500: crypto - core support for CRYP/HASH module. crypto: ux500 - Add driver for HASH hardware crypto: ux500 - Add driver for CRYP hardware hwrng: Kconfig - modify default state for atmel-rng driver hwrng: omap - use devm_request_and_ioremap crypto: crypto4xx - move up err_request_irq label crypto, xor: Sanitize checksumming function selection output crypto: caam - add backward compatible string sec4.0
-rw-r--r--arch/arm/mach-ux500/board-mop500.c48
-rw-r--r--arch/arm/mach-ux500/clock.c18
-rw-r--r--arch/arm/mach-ux500/devices-common.h50
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c3
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h4
-rw-r--r--arch/arm/mach-ux500/include/mach/crypto-ux500.h22
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h3
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h3
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c793
-rw-r--r--crypto/xor.c5
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/omap-rng.c22
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/caam/caamalg.c14
-rw-r--r--drivers/crypto/caam/ctrl.c16
-rw-r--r--drivers/crypto/ux500/Kconfig30
-rw-r--r--drivers/crypto/ux500/Makefile8
-rw-r--r--drivers/crypto/ux500/cryp/Makefile13
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c389
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h308
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1784
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c45
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h31
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h125
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h123
-rw-r--r--drivers/crypto/ux500/hash/Makefile11
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h395
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2009
30 files changed, 5777 insertions, 511 deletions
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 4bc0cbc5f071..f943687acaf0 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -47,6 +47,7 @@
47#include <mach/setup.h> 47#include <mach/setup.h>
48#include <mach/devices.h> 48#include <mach/devices.h>
49#include <mach/irqs.h> 49#include <mach/irqs.h>
50#include <mach/crypto-ux500.h>
50 51
51#include "ste-dma40-db8500.h" 52#include "ste-dma40-db8500.h"
52#include "devices-db8500.h" 53#include "devices-db8500.h"
@@ -417,6 +418,45 @@ static void mop500_prox_deactivate(struct device *dev)
417 regulator_put(prox_regulator); 418 regulator_put(prox_regulator);
418} 419}
419 420
421static struct cryp_platform_data u8500_cryp1_platform_data = {
422 .mem_to_engine = {
423 .dir = STEDMA40_MEM_TO_PERIPH,
424 .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
425 .dst_dev_type = DB8500_DMA_DEV48_CAC1_TX,
426 .src_info.data_width = STEDMA40_WORD_WIDTH,
427 .dst_info.data_width = STEDMA40_WORD_WIDTH,
428 .mode = STEDMA40_MODE_LOGICAL,
429 .src_info.psize = STEDMA40_PSIZE_LOG_4,
430 .dst_info.psize = STEDMA40_PSIZE_LOG_4,
431 },
432 .engine_to_mem = {
433 .dir = STEDMA40_PERIPH_TO_MEM,
434 .src_dev_type = DB8500_DMA_DEV48_CAC1_RX,
435 .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
436 .src_info.data_width = STEDMA40_WORD_WIDTH,
437 .dst_info.data_width = STEDMA40_WORD_WIDTH,
438 .mode = STEDMA40_MODE_LOGICAL,
439 .src_info.psize = STEDMA40_PSIZE_LOG_4,
440 .dst_info.psize = STEDMA40_PSIZE_LOG_4,
441 }
442};
443
444static struct stedma40_chan_cfg u8500_hash_dma_cfg_tx = {
445 .dir = STEDMA40_MEM_TO_PERIPH,
446 .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
447 .dst_dev_type = DB8500_DMA_DEV50_HAC1_TX,
448 .src_info.data_width = STEDMA40_WORD_WIDTH,
449 .dst_info.data_width = STEDMA40_WORD_WIDTH,
450 .mode = STEDMA40_MODE_LOGICAL,
451 .src_info.psize = STEDMA40_PSIZE_LOG_16,
452 .dst_info.psize = STEDMA40_PSIZE_LOG_16,
453};
454
455static struct hash_platform_data u8500_hash1_platform_data = {
456 .mem_to_engine = &u8500_hash_dma_cfg_tx,
457 .dma_filter = stedma40_filter,
458};
459
420/* add any platform devices here - TODO */ 460/* add any platform devices here - TODO */
421static struct platform_device *mop500_platform_devs[] __initdata = { 461static struct platform_device *mop500_platform_devs[] __initdata = {
422 &mop500_gpio_keys_device, 462 &mop500_gpio_keys_device,
@@ -624,6 +664,12 @@ static void __init mop500_uart_init(struct device *parent)
624 db8500_add_uart2(parent, &uart2_plat); 664 db8500_add_uart2(parent, &uart2_plat);
625} 665}
626 666
667static void __init u8500_cryp1_hash1_init(struct device *parent)
668{
669 db8500_add_cryp1(parent, &u8500_cryp1_platform_data);
670 db8500_add_hash1(parent, &u8500_hash1_platform_data);
671}
672
627static struct platform_device *snowball_platform_devs[] __initdata = { 673static struct platform_device *snowball_platform_devs[] __initdata = {
628 &snowball_led_dev, 674 &snowball_led_dev,
629 &snowball_key_dev, 675 &snowball_key_dev,
@@ -654,6 +700,8 @@ static void __init mop500_init_machine(void)
654 mop500_msp_init(parent); 700 mop500_msp_init(parent);
655 mop500_uart_init(parent); 701 mop500_uart_init(parent);
656 702
703 u8500_cryp1_hash1_init(parent);
704
657 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); 705 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
658 706
659 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); 707 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index a121cb472dd6..1762c4728f1e 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -381,14 +381,15 @@ static DEFINE_PRCC_CLK(5, usb, 0, 0, NULL);
381/* Peripheral Cluster #6 */ 381/* Peripheral Cluster #6 */
382 382
383/* MTU ID in data */ 383/* MTU ID in data */
384static DEFINE_PRCC_CLK_CUSTOM(6, mtu1, 8, -1, NULL, clk_mtu_get_rate, 1); 384static DEFINE_PRCC_CLK_CUSTOM(6, mtu1, 9, -1, NULL, clk_mtu_get_rate, 1);
385static DEFINE_PRCC_CLK_CUSTOM(6, mtu0, 7, -1, NULL, clk_mtu_get_rate, 0); 385static DEFINE_PRCC_CLK_CUSTOM(6, mtu0, 8, -1, NULL, clk_mtu_get_rate, 0);
386static DEFINE_PRCC_CLK(6, cfgreg, 6, 6, NULL); 386static DEFINE_PRCC_CLK(6, cfgreg, 7, 7, NULL);
387static DEFINE_PRCC_CLK(6, hash1, 5, -1, NULL); 387static DEFINE_PRCC_CLK(6, hash1, 6, -1, NULL);
388static DEFINE_PRCC_CLK(6, unipro, 4, 1, &clk_uniproclk); 388static DEFINE_PRCC_CLK(6, unipro, 5, 1, &clk_uniproclk);
389static DEFINE_PRCC_CLK(6, pka, 3, -1, NULL); 389static DEFINE_PRCC_CLK(6, pka, 4, -1, NULL);
390static DEFINE_PRCC_CLK(6, hash0, 2, -1, NULL); 390static DEFINE_PRCC_CLK(6, hash0, 3, -1, NULL);
391static DEFINE_PRCC_CLK(6, cryp0, 1, -1, NULL); 391static DEFINE_PRCC_CLK(6, cryp0, 2, -1, NULL);
392static DEFINE_PRCC_CLK(6, cryp1, 1, -1, NULL);
392static DEFINE_PRCC_CLK(6, rng, 0, 0, &clk_rngclk); 393static DEFINE_PRCC_CLK(6, rng, 0, 0, &clk_rngclk);
393 394
394static struct clk clk_dummy_apb_pclk = { 395static struct clk clk_dummy_apb_pclk = {
@@ -430,6 +431,7 @@ static struct clk_lookup u8500_clks[] = {
430 CLK(pka, "pka", NULL), 431 CLK(pka, "pka", NULL),
431 CLK(hash0, "hash0", NULL), 432 CLK(hash0, "hash0", NULL),
432 CLK(cryp0, "cryp0", NULL), 433 CLK(cryp0, "cryp0", NULL),
434 CLK(cryp1, "cryp1", NULL),
433 435
434 /* PRCMU level clock gating */ 436 /* PRCMU level clock gating */
435 437
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index 7cbccfd9e158..6e4706560266 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -13,6 +13,7 @@
13#include <linux/sys_soc.h> 13#include <linux/sys_soc.h>
14#include <linux/amba/bus.h> 14#include <linux/amba/bus.h>
15#include <plat/i2c.h> 15#include <plat/i2c.h>
16#include <mach/crypto-ux500.h>
16 17
17struct spi_master_cntlr; 18struct spi_master_cntlr;
18 19
@@ -85,6 +86,55 @@ dbx500_add_rtc(struct device *parent, resource_size_t base, int irq)
85 0, NULL, 0); 86 0, NULL, 0);
86} 87}
87 88
89struct cryp_platform_data;
90
91static inline struct platform_device *
92dbx500_add_cryp1(struct device *parent, int id, resource_size_t base, int irq,
93 struct cryp_platform_data *pdata)
94{
95 struct resource res[] = {
96 DEFINE_RES_MEM(base, SZ_4K),
97 DEFINE_RES_IRQ(irq),
98 };
99
100 struct platform_device_info pdevinfo = {
101 .parent = parent,
102 .name = "cryp1",
103 .id = id,
104 .res = res,
105 .num_res = ARRAY_SIZE(res),
106 .data = pdata,
107 .size_data = sizeof(*pdata),
108 .dma_mask = DMA_BIT_MASK(32),
109 };
110
111 return platform_device_register_full(&pdevinfo);
112}
113
114struct hash_platform_data;
115
116static inline struct platform_device *
117dbx500_add_hash1(struct device *parent, int id, resource_size_t base,
118 struct hash_platform_data *pdata)
119{
120 struct resource res[] = {
121 DEFINE_RES_MEM(base, SZ_4K),
122 };
123
124 struct platform_device_info pdevinfo = {
125 .parent = parent,
126 .name = "hash1",
127 .id = id,
128 .res = res,
129 .num_res = ARRAY_SIZE(res),
130 .data = pdata,
131 .size_data = sizeof(*pdata),
132 .dma_mask = DMA_BIT_MASK(32),
133 };
134
135 return platform_device_register_full(&pdevinfo);
136}
137
88struct nmk_gpio_platform_data; 138struct nmk_gpio_platform_data;
89 139
90void dbx500_add_gpios(struct device *parent, resource_size_t *base, int num, 140void dbx500_add_gpios(struct device *parent, resource_size_t *base, int num,
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 6e66d3777ed5..91754a8a0d49 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -104,6 +104,8 @@ static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV] = {
104 [DB8500_DMA_DEV14_MSP2_TX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET, 104 [DB8500_DMA_DEV14_MSP2_TX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
105 [DB8500_DMA_DEV30_MSP1_TX] = U8500_MSP1_BASE + MSP_TX_RX_REG_OFFSET, 105 [DB8500_DMA_DEV30_MSP1_TX] = U8500_MSP1_BASE + MSP_TX_RX_REG_OFFSET,
106 [DB8500_DMA_DEV31_MSP0_TX_SLIM0_CH0_TX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET, 106 [DB8500_DMA_DEV31_MSP0_TX_SLIM0_CH0_TX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
107 [DB8500_DMA_DEV48_CAC1_TX] = U8500_CRYP1_BASE + CRYP1_TX_REG_OFFSET,
108 [DB8500_DMA_DEV50_HAC1_TX] = U8500_HASH1_BASE + HASH1_TX_REG_OFFSET,
107}; 109};
108 110
109/* Mapping between source event lines and physical device address */ 111/* Mapping between source event lines and physical device address */
@@ -139,6 +141,7 @@ static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV] = {
139 [DB8500_DMA_DEV14_MSP2_RX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET, 141 [DB8500_DMA_DEV14_MSP2_RX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
140 [DB8500_DMA_DEV30_MSP3_RX] = U8500_MSP3_BASE + MSP_TX_RX_REG_OFFSET, 142 [DB8500_DMA_DEV30_MSP3_RX] = U8500_MSP3_BASE + MSP_TX_RX_REG_OFFSET,
141 [DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET, 143 [DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
144 [DB8500_DMA_DEV48_CAC1_RX] = U8500_CRYP1_BASE + CRYP1_RX_REG_OFFSET,
142}; 145};
143 146
144/* Reserved event lines for memcpy only */ 147/* Reserved event lines for memcpy only */
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index 0b9677a95bbc..3c8010f4fb3f 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -114,4 +114,8 @@ db8500_add_ssp(struct device *parent, const char *name, resource_size_t base,
114 dbx500_add_uart(parent, "uart2", U8500_UART2_BASE, \ 114 dbx500_add_uart(parent, "uart2", U8500_UART2_BASE, \
115 IRQ_DB8500_UART2, pdata) 115 IRQ_DB8500_UART2, pdata)
116 116
117#define db8500_add_cryp1(parent, pdata) \
118 dbx500_add_cryp1(parent, -1, U8500_CRYP1_BASE, IRQ_DB8500_CRYP1, pdata)
119#define db8500_add_hash1(parent, pdata) \
120 dbx500_add_hash1(parent, -1, U8500_HASH1_BASE, pdata)
117#endif 121#endif
diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h
new file mode 100644
index 000000000000..5b2d0817e26a
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2011
3 *
4 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 */
7#ifndef _CRYPTO_UX500_H
8#define _CRYPTO_UX500_H
9#include <linux/dmaengine.h>
10#include <plat/ste_dma40.h>
11
12struct hash_platform_data {
13 void *mem_to_engine;
14 bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
15};
16
17struct cryp_platform_data {
18 struct stedma40_chan_cfg mem_to_engine;
19 struct stedma40_chan_cfg engine_to_mem;
20};
21
22#endif
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index 9b5eb69a0154..cbc6f1e4104d 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -14,6 +14,9 @@ extern struct platform_device u8500_gpio_devs[];
14 14
15extern struct amba_device ux500_pl031_device; 15extern struct amba_device ux500_pl031_device;
16 16
17extern struct platform_device ux500_hash1_device;
18extern struct platform_device ux500_cryp1_device;
19
17extern struct platform_device u8500_dma40_device; 20extern struct platform_device u8500_dma40_device;
18extern struct platform_device ux500_ske_keypad_device; 21extern struct platform_device ux500_ske_keypad_device;
19 22
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 808c1d6601c5..28d16e744bfd 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -33,6 +33,9 @@
33#include <mach/db8500-regs.h> 33#include <mach/db8500-regs.h>
34 34
35#define MSP_TX_RX_REG_OFFSET 0 35#define MSP_TX_RX_REG_OFFSET 0
36#define CRYP1_RX_REG_OFFSET 0x10
37#define CRYP1_TX_REG_OFFSET 0x8
38#define HASH1_TX_REG_OFFSET 0x4
36 39
37#ifndef __ASSEMBLY__ 40#ifndef __ASSEMBLY__
38 41
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index c799352e24fc..ac7f5cd019e8 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -222,27 +222,6 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
222 } 222 }
223} 223}
224 224
225static struct crypto_alg aesni_alg = {
226 .cra_name = "aes",
227 .cra_driver_name = "aes-aesni",
228 .cra_priority = 300,
229 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
230 .cra_blocksize = AES_BLOCK_SIZE,
231 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
232 .cra_alignmask = 0,
233 .cra_module = THIS_MODULE,
234 .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
235 .cra_u = {
236 .cipher = {
237 .cia_min_keysize = AES_MIN_KEY_SIZE,
238 .cia_max_keysize = AES_MAX_KEY_SIZE,
239 .cia_setkey = aes_set_key,
240 .cia_encrypt = aes_encrypt,
241 .cia_decrypt = aes_decrypt
242 }
243 }
244};
245
246static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 225static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
247{ 226{
248 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 227 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
@@ -257,27 +236,6 @@ static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
257 aesni_dec(ctx, dst, src); 236 aesni_dec(ctx, dst, src);
258} 237}
259 238
260static struct crypto_alg __aesni_alg = {
261 .cra_name = "__aes-aesni",
262 .cra_driver_name = "__driver-aes-aesni",
263 .cra_priority = 0,
264 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
265 .cra_blocksize = AES_BLOCK_SIZE,
266 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
267 .cra_alignmask = 0,
268 .cra_module = THIS_MODULE,
269 .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
270 .cra_u = {
271 .cipher = {
272 .cia_min_keysize = AES_MIN_KEY_SIZE,
273 .cia_max_keysize = AES_MAX_KEY_SIZE,
274 .cia_setkey = aes_set_key,
275 .cia_encrypt = __aes_encrypt,
276 .cia_decrypt = __aes_decrypt
277 }
278 }
279};
280
281static int ecb_encrypt(struct blkcipher_desc *desc, 239static int ecb_encrypt(struct blkcipher_desc *desc,
282 struct scatterlist *dst, struct scatterlist *src, 240 struct scatterlist *dst, struct scatterlist *src,
283 unsigned int nbytes) 241 unsigned int nbytes)
@@ -326,28 +284,6 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
326 return err; 284 return err;
327} 285}
328 286
329static struct crypto_alg blk_ecb_alg = {
330 .cra_name = "__ecb-aes-aesni",
331 .cra_driver_name = "__driver-ecb-aes-aesni",
332 .cra_priority = 0,
333 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
334 .cra_blocksize = AES_BLOCK_SIZE,
335 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
336 .cra_alignmask = 0,
337 .cra_type = &crypto_blkcipher_type,
338 .cra_module = THIS_MODULE,
339 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
340 .cra_u = {
341 .blkcipher = {
342 .min_keysize = AES_MIN_KEY_SIZE,
343 .max_keysize = AES_MAX_KEY_SIZE,
344 .setkey = aes_set_key,
345 .encrypt = ecb_encrypt,
346 .decrypt = ecb_decrypt,
347 },
348 },
349};
350
351static int cbc_encrypt(struct blkcipher_desc *desc, 287static int cbc_encrypt(struct blkcipher_desc *desc,
352 struct scatterlist *dst, struct scatterlist *src, 288 struct scatterlist *dst, struct scatterlist *src,
353 unsigned int nbytes) 289 unsigned int nbytes)
@@ -396,28 +332,6 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
396 return err; 332 return err;
397} 333}
398 334
399static struct crypto_alg blk_cbc_alg = {
400 .cra_name = "__cbc-aes-aesni",
401 .cra_driver_name = "__driver-cbc-aes-aesni",
402 .cra_priority = 0,
403 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
404 .cra_blocksize = AES_BLOCK_SIZE,
405 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
406 .cra_alignmask = 0,
407 .cra_type = &crypto_blkcipher_type,
408 .cra_module = THIS_MODULE,
409 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
410 .cra_u = {
411 .blkcipher = {
412 .min_keysize = AES_MIN_KEY_SIZE,
413 .max_keysize = AES_MAX_KEY_SIZE,
414 .setkey = aes_set_key,
415 .encrypt = cbc_encrypt,
416 .decrypt = cbc_decrypt,
417 },
418 },
419};
420
421#ifdef CONFIG_X86_64 335#ifdef CONFIG_X86_64
422static void ctr_crypt_final(struct crypto_aes_ctx *ctx, 336static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
423 struct blkcipher_walk *walk) 337 struct blkcipher_walk *walk)
@@ -461,29 +375,6 @@ static int ctr_crypt(struct blkcipher_desc *desc,
461 375
462 return err; 376 return err;
463} 377}
464
465static struct crypto_alg blk_ctr_alg = {
466 .cra_name = "__ctr-aes-aesni",
467 .cra_driver_name = "__driver-ctr-aes-aesni",
468 .cra_priority = 0,
469 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
470 .cra_blocksize = 1,
471 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
472 .cra_alignmask = 0,
473 .cra_type = &crypto_blkcipher_type,
474 .cra_module = THIS_MODULE,
475 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
476 .cra_u = {
477 .blkcipher = {
478 .min_keysize = AES_MIN_KEY_SIZE,
479 .max_keysize = AES_MAX_KEY_SIZE,
480 .ivsize = AES_BLOCK_SIZE,
481 .setkey = aes_set_key,
482 .encrypt = ctr_crypt,
483 .decrypt = ctr_crypt,
484 },
485 },
486};
487#endif 378#endif
488 379
489static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, 380static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -551,281 +442,65 @@ static void ablk_exit(struct crypto_tfm *tfm)
551 cryptd_free_ablkcipher(ctx->cryptd_tfm); 442 cryptd_free_ablkcipher(ctx->cryptd_tfm);
552} 443}
553 444
554static void ablk_init_common(struct crypto_tfm *tfm, 445static int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
555 struct cryptd_ablkcipher *cryptd_tfm)
556{ 446{
557 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); 447 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
448 struct cryptd_ablkcipher *cryptd_tfm;
449
450 cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
451 if (IS_ERR(cryptd_tfm))
452 return PTR_ERR(cryptd_tfm);
558 453
559 ctx->cryptd_tfm = cryptd_tfm; 454 ctx->cryptd_tfm = cryptd_tfm;
560 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + 455 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
561 crypto_ablkcipher_reqsize(&cryptd_tfm->base); 456 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
457
458 return 0;
562} 459}
563 460
564static int ablk_ecb_init(struct crypto_tfm *tfm) 461static int ablk_ecb_init(struct crypto_tfm *tfm)
565{ 462{
566 struct cryptd_ablkcipher *cryptd_tfm; 463 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
567
568 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
569 if (IS_ERR(cryptd_tfm))
570 return PTR_ERR(cryptd_tfm);
571 ablk_init_common(tfm, cryptd_tfm);
572 return 0;
573} 464}
574 465
575static struct crypto_alg ablk_ecb_alg = {
576 .cra_name = "ecb(aes)",
577 .cra_driver_name = "ecb-aes-aesni",
578 .cra_priority = 400,
579 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
580 .cra_blocksize = AES_BLOCK_SIZE,
581 .cra_ctxsize = sizeof(struct async_aes_ctx),
582 .cra_alignmask = 0,
583 .cra_type = &crypto_ablkcipher_type,
584 .cra_module = THIS_MODULE,
585 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
586 .cra_init = ablk_ecb_init,
587 .cra_exit = ablk_exit,
588 .cra_u = {
589 .ablkcipher = {
590 .min_keysize = AES_MIN_KEY_SIZE,
591 .max_keysize = AES_MAX_KEY_SIZE,
592 .setkey = ablk_set_key,
593 .encrypt = ablk_encrypt,
594 .decrypt = ablk_decrypt,
595 },
596 },
597};
598
599static int ablk_cbc_init(struct crypto_tfm *tfm) 466static int ablk_cbc_init(struct crypto_tfm *tfm)
600{ 467{
601 struct cryptd_ablkcipher *cryptd_tfm; 468 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
602
603 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
604 if (IS_ERR(cryptd_tfm))
605 return PTR_ERR(cryptd_tfm);
606 ablk_init_common(tfm, cryptd_tfm);
607 return 0;
608} 469}
609 470
610static struct crypto_alg ablk_cbc_alg = {
611 .cra_name = "cbc(aes)",
612 .cra_driver_name = "cbc-aes-aesni",
613 .cra_priority = 400,
614 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
615 .cra_blocksize = AES_BLOCK_SIZE,
616 .cra_ctxsize = sizeof(struct async_aes_ctx),
617 .cra_alignmask = 0,
618 .cra_type = &crypto_ablkcipher_type,
619 .cra_module = THIS_MODULE,
620 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
621 .cra_init = ablk_cbc_init,
622 .cra_exit = ablk_exit,
623 .cra_u = {
624 .ablkcipher = {
625 .min_keysize = AES_MIN_KEY_SIZE,
626 .max_keysize = AES_MAX_KEY_SIZE,
627 .ivsize = AES_BLOCK_SIZE,
628 .setkey = ablk_set_key,
629 .encrypt = ablk_encrypt,
630 .decrypt = ablk_decrypt,
631 },
632 },
633};
634
635#ifdef CONFIG_X86_64 471#ifdef CONFIG_X86_64
636static int ablk_ctr_init(struct crypto_tfm *tfm) 472static int ablk_ctr_init(struct crypto_tfm *tfm)
637{ 473{
638 struct cryptd_ablkcipher *cryptd_tfm; 474 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
639
640 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
641 if (IS_ERR(cryptd_tfm))
642 return PTR_ERR(cryptd_tfm);
643 ablk_init_common(tfm, cryptd_tfm);
644 return 0;
645} 475}
646 476
647static struct crypto_alg ablk_ctr_alg = {
648 .cra_name = "ctr(aes)",
649 .cra_driver_name = "ctr-aes-aesni",
650 .cra_priority = 400,
651 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
652 .cra_blocksize = 1,
653 .cra_ctxsize = sizeof(struct async_aes_ctx),
654 .cra_alignmask = 0,
655 .cra_type = &crypto_ablkcipher_type,
656 .cra_module = THIS_MODULE,
657 .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
658 .cra_init = ablk_ctr_init,
659 .cra_exit = ablk_exit,
660 .cra_u = {
661 .ablkcipher = {
662 .min_keysize = AES_MIN_KEY_SIZE,
663 .max_keysize = AES_MAX_KEY_SIZE,
664 .ivsize = AES_BLOCK_SIZE,
665 .setkey = ablk_set_key,
666 .encrypt = ablk_encrypt,
667 .decrypt = ablk_encrypt,
668 .geniv = "chainiv",
669 },
670 },
671};
672
673#ifdef HAS_CTR 477#ifdef HAS_CTR
674static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm) 478static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
675{ 479{
676 struct cryptd_ablkcipher *cryptd_tfm; 480 return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
677
678 cryptd_tfm = cryptd_alloc_ablkcipher(
679 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
680 if (IS_ERR(cryptd_tfm))
681 return PTR_ERR(cryptd_tfm);
682 ablk_init_common(tfm, cryptd_tfm);
683 return 0;
684} 481}
685
686static struct crypto_alg ablk_rfc3686_ctr_alg = {
687 .cra_name = "rfc3686(ctr(aes))",
688 .cra_driver_name = "rfc3686-ctr-aes-aesni",
689 .cra_priority = 400,
690 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
691 .cra_blocksize = 1,
692 .cra_ctxsize = sizeof(struct async_aes_ctx),
693 .cra_alignmask = 0,
694 .cra_type = &crypto_ablkcipher_type,
695 .cra_module = THIS_MODULE,
696 .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
697 .cra_init = ablk_rfc3686_ctr_init,
698 .cra_exit = ablk_exit,
699 .cra_u = {
700 .ablkcipher = {
701 .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
702 .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
703 .ivsize = CTR_RFC3686_IV_SIZE,
704 .setkey = ablk_set_key,
705 .encrypt = ablk_encrypt,
706 .decrypt = ablk_decrypt,
707 .geniv = "seqiv",
708 },
709 },
710};
711#endif 482#endif
712#endif 483#endif
713 484
714#ifdef HAS_LRW 485#ifdef HAS_LRW
715static int ablk_lrw_init(struct crypto_tfm *tfm) 486static int ablk_lrw_init(struct crypto_tfm *tfm)
716{ 487{
717 struct cryptd_ablkcipher *cryptd_tfm; 488 return ablk_init_common(tfm, "fpu(lrw(__driver-aes-aesni))");
718
719 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
720 0, 0);
721 if (IS_ERR(cryptd_tfm))
722 return PTR_ERR(cryptd_tfm);
723 ablk_init_common(tfm, cryptd_tfm);
724 return 0;
725} 489}
726
727static struct crypto_alg ablk_lrw_alg = {
728 .cra_name = "lrw(aes)",
729 .cra_driver_name = "lrw-aes-aesni",
730 .cra_priority = 400,
731 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
732 .cra_blocksize = AES_BLOCK_SIZE,
733 .cra_ctxsize = sizeof(struct async_aes_ctx),
734 .cra_alignmask = 0,
735 .cra_type = &crypto_ablkcipher_type,
736 .cra_module = THIS_MODULE,
737 .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
738 .cra_init = ablk_lrw_init,
739 .cra_exit = ablk_exit,
740 .cra_u = {
741 .ablkcipher = {
742 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
743 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
744 .ivsize = AES_BLOCK_SIZE,
745 .setkey = ablk_set_key,
746 .encrypt = ablk_encrypt,
747 .decrypt = ablk_decrypt,
748 },
749 },
750};
751#endif 490#endif
752 491
753#ifdef HAS_PCBC 492#ifdef HAS_PCBC
754static int ablk_pcbc_init(struct crypto_tfm *tfm) 493static int ablk_pcbc_init(struct crypto_tfm *tfm)
755{ 494{
756 struct cryptd_ablkcipher *cryptd_tfm; 495 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
757
758 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
759 0, 0);
760 if (IS_ERR(cryptd_tfm))
761 return PTR_ERR(cryptd_tfm);
762 ablk_init_common(tfm, cryptd_tfm);
763 return 0;
764} 496}
765
766static struct crypto_alg ablk_pcbc_alg = {
767 .cra_name = "pcbc(aes)",
768 .cra_driver_name = "pcbc-aes-aesni",
769 .cra_priority = 400,
770 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
771 .cra_blocksize = AES_BLOCK_SIZE,
772 .cra_ctxsize = sizeof(struct async_aes_ctx),
773 .cra_alignmask = 0,
774 .cra_type = &crypto_ablkcipher_type,
775 .cra_module = THIS_MODULE,
776 .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
777 .cra_init = ablk_pcbc_init,
778 .cra_exit = ablk_exit,
779 .cra_u = {
780 .ablkcipher = {
781 .min_keysize = AES_MIN_KEY_SIZE,
782 .max_keysize = AES_MAX_KEY_SIZE,
783 .ivsize = AES_BLOCK_SIZE,
784 .setkey = ablk_set_key,
785 .encrypt = ablk_encrypt,
786 .decrypt = ablk_decrypt,
787 },
788 },
789};
790#endif 497#endif
791 498
792#ifdef HAS_XTS 499#ifdef HAS_XTS
793static int ablk_xts_init(struct crypto_tfm *tfm) 500static int ablk_xts_init(struct crypto_tfm *tfm)
794{ 501{
795 struct cryptd_ablkcipher *cryptd_tfm; 502 return ablk_init_common(tfm, "fpu(xts(__driver-aes-aesni))");
796
797 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
798 0, 0);
799 if (IS_ERR(cryptd_tfm))
800 return PTR_ERR(cryptd_tfm);
801 ablk_init_common(tfm, cryptd_tfm);
802 return 0;
803} 503}
804
805static struct crypto_alg ablk_xts_alg = {
806 .cra_name = "xts(aes)",
807 .cra_driver_name = "xts-aes-aesni",
808 .cra_priority = 400,
809 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
810 .cra_blocksize = AES_BLOCK_SIZE,
811 .cra_ctxsize = sizeof(struct async_aes_ctx),
812 .cra_alignmask = 0,
813 .cra_type = &crypto_ablkcipher_type,
814 .cra_module = THIS_MODULE,
815 .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
816 .cra_init = ablk_xts_init,
817 .cra_exit = ablk_exit,
818 .cra_u = {
819 .ablkcipher = {
820 .min_keysize = 2 * AES_MIN_KEY_SIZE,
821 .max_keysize = 2 * AES_MAX_KEY_SIZE,
822 .ivsize = AES_BLOCK_SIZE,
823 .setkey = ablk_set_key,
824 .encrypt = ablk_encrypt,
825 .decrypt = ablk_decrypt,
826 },
827 },
828};
829#endif 504#endif
830 505
831#ifdef CONFIG_X86_64 506#ifdef CONFIG_X86_64
@@ -1050,32 +725,6 @@ static int rfc4106_decrypt(struct aead_request *req)
1050 } 725 }
1051} 726}
1052 727
1053static struct crypto_alg rfc4106_alg = {
1054 .cra_name = "rfc4106(gcm(aes))",
1055 .cra_driver_name = "rfc4106-gcm-aesni",
1056 .cra_priority = 400,
1057 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1058 .cra_blocksize = 1,
1059 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1060 .cra_alignmask = 0,
1061 .cra_type = &crypto_nivaead_type,
1062 .cra_module = THIS_MODULE,
1063 .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1064 .cra_init = rfc4106_init,
1065 .cra_exit = rfc4106_exit,
1066 .cra_u = {
1067 .aead = {
1068 .setkey = rfc4106_set_key,
1069 .setauthsize = rfc4106_set_authsize,
1070 .encrypt = rfc4106_encrypt,
1071 .decrypt = rfc4106_decrypt,
1072 .geniv = "seqiv",
1073 .ivsize = 8,
1074 .maxauthsize = 16,
1075 },
1076 },
1077};
1078
1079static int __driver_rfc4106_encrypt(struct aead_request *req) 728static int __driver_rfc4106_encrypt(struct aead_request *req)
1080{ 729{
1081 u8 one_entry_in_sg = 0; 730 u8 one_entry_in_sg = 0;
@@ -1233,26 +882,316 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
1233 } 882 }
1234 return retval; 883 return retval;
1235} 884}
885#endif
1236 886
1237static struct crypto_alg __rfc4106_alg = { 887static struct crypto_alg aesni_algs[] = { {
888 .cra_name = "aes",
889 .cra_driver_name = "aes-aesni",
890 .cra_priority = 300,
891 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
892 .cra_blocksize = AES_BLOCK_SIZE,
893 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
894 AESNI_ALIGN - 1,
895 .cra_alignmask = 0,
896 .cra_module = THIS_MODULE,
897 .cra_u = {
898 .cipher = {
899 .cia_min_keysize = AES_MIN_KEY_SIZE,
900 .cia_max_keysize = AES_MAX_KEY_SIZE,
901 .cia_setkey = aes_set_key,
902 .cia_encrypt = aes_encrypt,
903 .cia_decrypt = aes_decrypt
904 }
905 }
906}, {
907 .cra_name = "__aes-aesni",
908 .cra_driver_name = "__driver-aes-aesni",
909 .cra_priority = 0,
910 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
911 .cra_blocksize = AES_BLOCK_SIZE,
912 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
913 AESNI_ALIGN - 1,
914 .cra_alignmask = 0,
915 .cra_module = THIS_MODULE,
916 .cra_u = {
917 .cipher = {
918 .cia_min_keysize = AES_MIN_KEY_SIZE,
919 .cia_max_keysize = AES_MAX_KEY_SIZE,
920 .cia_setkey = aes_set_key,
921 .cia_encrypt = __aes_encrypt,
922 .cia_decrypt = __aes_decrypt
923 }
924 }
925}, {
926 .cra_name = "__ecb-aes-aesni",
927 .cra_driver_name = "__driver-ecb-aes-aesni",
928 .cra_priority = 0,
929 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
930 .cra_blocksize = AES_BLOCK_SIZE,
931 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
932 AESNI_ALIGN - 1,
933 .cra_alignmask = 0,
934 .cra_type = &crypto_blkcipher_type,
935 .cra_module = THIS_MODULE,
936 .cra_u = {
937 .blkcipher = {
938 .min_keysize = AES_MIN_KEY_SIZE,
939 .max_keysize = AES_MAX_KEY_SIZE,
940 .setkey = aes_set_key,
941 .encrypt = ecb_encrypt,
942 .decrypt = ecb_decrypt,
943 },
944 },
945}, {
946 .cra_name = "__cbc-aes-aesni",
947 .cra_driver_name = "__driver-cbc-aes-aesni",
948 .cra_priority = 0,
949 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
950 .cra_blocksize = AES_BLOCK_SIZE,
951 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
952 AESNI_ALIGN - 1,
953 .cra_alignmask = 0,
954 .cra_type = &crypto_blkcipher_type,
955 .cra_module = THIS_MODULE,
956 .cra_u = {
957 .blkcipher = {
958 .min_keysize = AES_MIN_KEY_SIZE,
959 .max_keysize = AES_MAX_KEY_SIZE,
960 .setkey = aes_set_key,
961 .encrypt = cbc_encrypt,
962 .decrypt = cbc_decrypt,
963 },
964 },
965}, {
966 .cra_name = "ecb(aes)",
967 .cra_driver_name = "ecb-aes-aesni",
968 .cra_priority = 400,
969 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
970 .cra_blocksize = AES_BLOCK_SIZE,
971 .cra_ctxsize = sizeof(struct async_aes_ctx),
972 .cra_alignmask = 0,
973 .cra_type = &crypto_ablkcipher_type,
974 .cra_module = THIS_MODULE,
975 .cra_init = ablk_ecb_init,
976 .cra_exit = ablk_exit,
977 .cra_u = {
978 .ablkcipher = {
979 .min_keysize = AES_MIN_KEY_SIZE,
980 .max_keysize = AES_MAX_KEY_SIZE,
981 .setkey = ablk_set_key,
982 .encrypt = ablk_encrypt,
983 .decrypt = ablk_decrypt,
984 },
985 },
986}, {
987 .cra_name = "cbc(aes)",
988 .cra_driver_name = "cbc-aes-aesni",
989 .cra_priority = 400,
990 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
991 .cra_blocksize = AES_BLOCK_SIZE,
992 .cra_ctxsize = sizeof(struct async_aes_ctx),
993 .cra_alignmask = 0,
994 .cra_type = &crypto_ablkcipher_type,
995 .cra_module = THIS_MODULE,
996 .cra_init = ablk_cbc_init,
997 .cra_exit = ablk_exit,
998 .cra_u = {
999 .ablkcipher = {
1000 .min_keysize = AES_MIN_KEY_SIZE,
1001 .max_keysize = AES_MAX_KEY_SIZE,
1002 .ivsize = AES_BLOCK_SIZE,
1003 .setkey = ablk_set_key,
1004 .encrypt = ablk_encrypt,
1005 .decrypt = ablk_decrypt,
1006 },
1007 },
1008#ifdef CONFIG_X86_64
1009}, {
1010 .cra_name = "__ctr-aes-aesni",
1011 .cra_driver_name = "__driver-ctr-aes-aesni",
1012 .cra_priority = 0,
1013 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1014 .cra_blocksize = 1,
1015 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1016 AESNI_ALIGN - 1,
1017 .cra_alignmask = 0,
1018 .cra_type = &crypto_blkcipher_type,
1019 .cra_module = THIS_MODULE,
1020 .cra_u = {
1021 .blkcipher = {
1022 .min_keysize = AES_MIN_KEY_SIZE,
1023 .max_keysize = AES_MAX_KEY_SIZE,
1024 .ivsize = AES_BLOCK_SIZE,
1025 .setkey = aes_set_key,
1026 .encrypt = ctr_crypt,
1027 .decrypt = ctr_crypt,
1028 },
1029 },
1030}, {
1031 .cra_name = "ctr(aes)",
1032 .cra_driver_name = "ctr-aes-aesni",
1033 .cra_priority = 400,
1034 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1035 .cra_blocksize = 1,
1036 .cra_ctxsize = sizeof(struct async_aes_ctx),
1037 .cra_alignmask = 0,
1038 .cra_type = &crypto_ablkcipher_type,
1039 .cra_module = THIS_MODULE,
1040 .cra_init = ablk_ctr_init,
1041 .cra_exit = ablk_exit,
1042 .cra_u = {
1043 .ablkcipher = {
1044 .min_keysize = AES_MIN_KEY_SIZE,
1045 .max_keysize = AES_MAX_KEY_SIZE,
1046 .ivsize = AES_BLOCK_SIZE,
1047 .setkey = ablk_set_key,
1048 .encrypt = ablk_encrypt,
1049 .decrypt = ablk_encrypt,
1050 .geniv = "chainiv",
1051 },
1052 },
1053}, {
1238 .cra_name = "__gcm-aes-aesni", 1054 .cra_name = "__gcm-aes-aesni",
1239 .cra_driver_name = "__driver-gcm-aes-aesni", 1055 .cra_driver_name = "__driver-gcm-aes-aesni",
1240 .cra_priority = 0, 1056 .cra_priority = 0,
1241 .cra_flags = CRYPTO_ALG_TYPE_AEAD, 1057 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1242 .cra_blocksize = 1, 1058 .cra_blocksize = 1,
1243 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN, 1059 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1060 AESNI_ALIGN,
1244 .cra_alignmask = 0, 1061 .cra_alignmask = 0,
1245 .cra_type = &crypto_aead_type, 1062 .cra_type = &crypto_aead_type,
1246 .cra_module = THIS_MODULE, 1063 .cra_module = THIS_MODULE,
1247 .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1248 .cra_u = { 1064 .cra_u = {
1249 .aead = { 1065 .aead = {
1250 .encrypt = __driver_rfc4106_encrypt, 1066 .encrypt = __driver_rfc4106_encrypt,
1251 .decrypt = __driver_rfc4106_decrypt, 1067 .decrypt = __driver_rfc4106_decrypt,
1252 }, 1068 },
1253 }, 1069 },
1254}; 1070}, {
1071 .cra_name = "rfc4106(gcm(aes))",
1072 .cra_driver_name = "rfc4106-gcm-aesni",
1073 .cra_priority = 400,
1074 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1075 .cra_blocksize = 1,
1076 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1077 AESNI_ALIGN,
1078 .cra_alignmask = 0,
1079 .cra_type = &crypto_nivaead_type,
1080 .cra_module = THIS_MODULE,
1081 .cra_init = rfc4106_init,
1082 .cra_exit = rfc4106_exit,
1083 .cra_u = {
1084 .aead = {
1085 .setkey = rfc4106_set_key,
1086 .setauthsize = rfc4106_set_authsize,
1087 .encrypt = rfc4106_encrypt,
1088 .decrypt = rfc4106_decrypt,
1089 .geniv = "seqiv",
1090 .ivsize = 8,
1091 .maxauthsize = 16,
1092 },
1093 },
1094#ifdef HAS_CTR
1095}, {
1096 .cra_name = "rfc3686(ctr(aes))",
1097 .cra_driver_name = "rfc3686-ctr-aes-aesni",
1098 .cra_priority = 400,
1099 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1100 .cra_blocksize = 1,
1101 .cra_ctxsize = sizeof(struct async_aes_ctx),
1102 .cra_alignmask = 0,
1103 .cra_type = &crypto_ablkcipher_type,
1104 .cra_module = THIS_MODULE,
1105 .cra_init = ablk_rfc3686_ctr_init,
1106 .cra_exit = ablk_exit,
1107 .cra_u = {
1108 .ablkcipher = {
1109 .min_keysize = AES_MIN_KEY_SIZE +
1110 CTR_RFC3686_NONCE_SIZE,
1111 .max_keysize = AES_MAX_KEY_SIZE +
1112 CTR_RFC3686_NONCE_SIZE,
1113 .ivsize = CTR_RFC3686_IV_SIZE,
1114 .setkey = ablk_set_key,
1115 .encrypt = ablk_encrypt,
1116 .decrypt = ablk_decrypt,
1117 .geniv = "seqiv",
1118 },
1119 },
1120#endif
1121#endif
1122#ifdef HAS_LRW
1123}, {
1124 .cra_name = "lrw(aes)",
1125 .cra_driver_name = "lrw-aes-aesni",
1126 .cra_priority = 400,
1127 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1128 .cra_blocksize = AES_BLOCK_SIZE,
1129 .cra_ctxsize = sizeof(struct async_aes_ctx),
1130 .cra_alignmask = 0,
1131 .cra_type = &crypto_ablkcipher_type,
1132 .cra_module = THIS_MODULE,
1133 .cra_init = ablk_lrw_init,
1134 .cra_exit = ablk_exit,
1135 .cra_u = {
1136 .ablkcipher = {
1137 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1138 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1139 .ivsize = AES_BLOCK_SIZE,
1140 .setkey = ablk_set_key,
1141 .encrypt = ablk_encrypt,
1142 .decrypt = ablk_decrypt,
1143 },
1144 },
1145#endif
1146#ifdef HAS_PCBC
1147}, {
1148 .cra_name = "pcbc(aes)",
1149 .cra_driver_name = "pcbc-aes-aesni",
1150 .cra_priority = 400,
1151 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1152 .cra_blocksize = AES_BLOCK_SIZE,
1153 .cra_ctxsize = sizeof(struct async_aes_ctx),
1154 .cra_alignmask = 0,
1155 .cra_type = &crypto_ablkcipher_type,
1156 .cra_module = THIS_MODULE,
1157 .cra_init = ablk_pcbc_init,
1158 .cra_exit = ablk_exit,
1159 .cra_u = {
1160 .ablkcipher = {
1161 .min_keysize = AES_MIN_KEY_SIZE,
1162 .max_keysize = AES_MAX_KEY_SIZE,
1163 .ivsize = AES_BLOCK_SIZE,
1164 .setkey = ablk_set_key,
1165 .encrypt = ablk_encrypt,
1166 .decrypt = ablk_decrypt,
1167 },
1168 },
1255#endif 1169#endif
1170#ifdef HAS_XTS
1171}, {
1172 .cra_name = "xts(aes)",
1173 .cra_driver_name = "xts-aes-aesni",
1174 .cra_priority = 400,
1175 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1176 .cra_blocksize = AES_BLOCK_SIZE,
1177 .cra_ctxsize = sizeof(struct async_aes_ctx),
1178 .cra_alignmask = 0,
1179 .cra_type = &crypto_ablkcipher_type,
1180 .cra_module = THIS_MODULE,
1181 .cra_init = ablk_xts_init,
1182 .cra_exit = ablk_exit,
1183 .cra_u = {
1184 .ablkcipher = {
1185 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1186 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1187 .ivsize = AES_BLOCK_SIZE,
1188 .setkey = ablk_set_key,
1189 .encrypt = ablk_encrypt,
1190 .decrypt = ablk_decrypt,
1191 },
1192 },
1193#endif
1194} };
1256 1195
1257 1196
1258static const struct x86_cpu_id aesni_cpu_id[] = { 1197static const struct x86_cpu_id aesni_cpu_id[] = {
@@ -1263,120 +1202,24 @@ MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1263 1202
1264static int __init aesni_init(void) 1203static int __init aesni_init(void)
1265{ 1204{
1266 int err; 1205 int err, i;
1267 1206
1268 if (!x86_match_cpu(aesni_cpu_id)) 1207 if (!x86_match_cpu(aesni_cpu_id))
1269 return -ENODEV; 1208 return -ENODEV;
1270 1209
1271 if ((err = crypto_fpu_init())) 1210 err = crypto_fpu_init();
1272 goto fpu_err; 1211 if (err)
1273 if ((err = crypto_register_alg(&aesni_alg))) 1212 return err;
1274 goto aes_err;
1275 if ((err = crypto_register_alg(&__aesni_alg)))
1276 goto __aes_err;
1277 if ((err = crypto_register_alg(&blk_ecb_alg)))
1278 goto blk_ecb_err;
1279 if ((err = crypto_register_alg(&blk_cbc_alg)))
1280 goto blk_cbc_err;
1281 if ((err = crypto_register_alg(&ablk_ecb_alg)))
1282 goto ablk_ecb_err;
1283 if ((err = crypto_register_alg(&ablk_cbc_alg)))
1284 goto ablk_cbc_err;
1285#ifdef CONFIG_X86_64
1286 if ((err = crypto_register_alg(&blk_ctr_alg)))
1287 goto blk_ctr_err;
1288 if ((err = crypto_register_alg(&ablk_ctr_alg)))
1289 goto ablk_ctr_err;
1290 if ((err = crypto_register_alg(&__rfc4106_alg)))
1291 goto __aead_gcm_err;
1292 if ((err = crypto_register_alg(&rfc4106_alg)))
1293 goto aead_gcm_err;
1294#ifdef HAS_CTR
1295 if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1296 goto ablk_rfc3686_ctr_err;
1297#endif
1298#endif
1299#ifdef HAS_LRW
1300 if ((err = crypto_register_alg(&ablk_lrw_alg)))
1301 goto ablk_lrw_err;
1302#endif
1303#ifdef HAS_PCBC
1304 if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1305 goto ablk_pcbc_err;
1306#endif
1307#ifdef HAS_XTS
1308 if ((err = crypto_register_alg(&ablk_xts_alg)))
1309 goto ablk_xts_err;
1310#endif
1311 return err;
1312 1213
1313#ifdef HAS_XTS 1214 for (i = 0; i < ARRAY_SIZE(aesni_algs); i++)
1314ablk_xts_err: 1215 INIT_LIST_HEAD(&aesni_algs[i].cra_list);
1315#endif 1216
1316#ifdef HAS_PCBC 1217 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1317 crypto_unregister_alg(&ablk_pcbc_alg);
1318ablk_pcbc_err:
1319#endif
1320#ifdef HAS_LRW
1321 crypto_unregister_alg(&ablk_lrw_alg);
1322ablk_lrw_err:
1323#endif
1324#ifdef CONFIG_X86_64
1325#ifdef HAS_CTR
1326 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1327ablk_rfc3686_ctr_err:
1328#endif
1329 crypto_unregister_alg(&rfc4106_alg);
1330aead_gcm_err:
1331 crypto_unregister_alg(&__rfc4106_alg);
1332__aead_gcm_err:
1333 crypto_unregister_alg(&ablk_ctr_alg);
1334ablk_ctr_err:
1335 crypto_unregister_alg(&blk_ctr_alg);
1336blk_ctr_err:
1337#endif
1338 crypto_unregister_alg(&ablk_cbc_alg);
1339ablk_cbc_err:
1340 crypto_unregister_alg(&ablk_ecb_alg);
1341ablk_ecb_err:
1342 crypto_unregister_alg(&blk_cbc_alg);
1343blk_cbc_err:
1344 crypto_unregister_alg(&blk_ecb_alg);
1345blk_ecb_err:
1346 crypto_unregister_alg(&__aesni_alg);
1347__aes_err:
1348 crypto_unregister_alg(&aesni_alg);
1349aes_err:
1350fpu_err:
1351 return err;
1352} 1218}
1353 1219
1354static void __exit aesni_exit(void) 1220static void __exit aesni_exit(void)
1355{ 1221{
1356#ifdef HAS_XTS 1222 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1357 crypto_unregister_alg(&ablk_xts_alg);
1358#endif
1359#ifdef HAS_PCBC
1360 crypto_unregister_alg(&ablk_pcbc_alg);
1361#endif
1362#ifdef HAS_LRW
1363 crypto_unregister_alg(&ablk_lrw_alg);
1364#endif
1365#ifdef CONFIG_X86_64
1366#ifdef HAS_CTR
1367 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1368#endif
1369 crypto_unregister_alg(&rfc4106_alg);
1370 crypto_unregister_alg(&__rfc4106_alg);
1371 crypto_unregister_alg(&ablk_ctr_alg);
1372 crypto_unregister_alg(&blk_ctr_alg);
1373#endif
1374 crypto_unregister_alg(&ablk_cbc_alg);
1375 crypto_unregister_alg(&ablk_ecb_alg);
1376 crypto_unregister_alg(&blk_cbc_alg);
1377 crypto_unregister_alg(&blk_ecb_alg);
1378 crypto_unregister_alg(&__aesni_alg);
1379 crypto_unregister_alg(&aesni_alg);
1380 1223
1381 crypto_fpu_exit(); 1224 crypto_fpu_exit();
1382} 1225}
diff --git a/crypto/xor.c b/crypto/xor.c
index b75182d8ab14..664b6dfa9e2c 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -129,9 +129,9 @@ calibrate_xor_blocks(void)
129 129
130 if (fastest) { 130 if (fastest) {
131 printk(KERN_INFO "xor: automatically using best " 131 printk(KERN_INFO "xor: automatically using best "
132 "checksumming function: %s\n", 132 "checksumming function:\n");
133 fastest->name);
134 xor_speed(fastest); 133 xor_speed(fastest);
134 goto out;
135 } else { 135 } else {
136 printk(KERN_INFO "xor: measuring software checksum speed\n"); 136 printk(KERN_INFO "xor: measuring software checksum speed\n");
137 XOR_TRY_TEMPLATES; 137 XOR_TRY_TEMPLATES;
@@ -146,6 +146,7 @@ calibrate_xor_blocks(void)
146 146
147#undef xor_speed 147#undef xor_speed
148 148
149 out:
149 free_pages((unsigned long)b1, 2); 150 free_pages((unsigned long)b1, 2);
150 151
151 active_template = fastest; 152 active_template = fastest;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c225314468ee..f45dad39a18b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -63,7 +63,7 @@ config HW_RANDOM_AMD
63config HW_RANDOM_ATMEL 63config HW_RANDOM_ATMEL
64 tristate "Atmel Random Number Generator support" 64 tristate "Atmel Random Number Generator support"
65 depends on HW_RANDOM && HAVE_CLK 65 depends on HW_RANDOM && HAVE_CLK
66 default HW_RANDOM 66 default (HW_RANDOM && ARCH_AT91)
67 ---help--- 67 ---help---
68 This driver provides kernel-side support for the Random Number 68 This driver provides kernel-side support for the Random Number
69 Generator hardware found on Atmel AT91 devices. 69 Generator hardware found on Atmel AT91 devices.
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index a07a5caa599c..1412565c01af 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -115,22 +115,12 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
115 115
116 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 116 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
117 117
118 if (!res) { 118 rng_base = devm_request_and_ioremap(&pdev->dev, res);
119 ret = -ENOENT;
120 goto err_region;
121 }
122
123 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
124 ret = -EBUSY;
125 goto err_region;
126 }
127
128 dev_set_drvdata(&pdev->dev, res);
129 rng_base = ioremap(res->start, resource_size(res));
130 if (!rng_base) { 119 if (!rng_base) {
131 ret = -ENOMEM; 120 ret = -ENOMEM;
132 goto err_ioremap; 121 goto err_ioremap;
133 } 122 }
123 dev_set_drvdata(&pdev->dev, res);
134 124
135 ret = hwrng_register(&omap_rng_ops); 125 ret = hwrng_register(&omap_rng_ops);
136 if (ret) 126 if (ret)
@@ -145,11 +135,8 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
145 return 0; 135 return 0;
146 136
147err_register: 137err_register:
148 iounmap(rng_base);
149 rng_base = NULL; 138 rng_base = NULL;
150err_ioremap: 139err_ioremap:
151 release_mem_region(res->start, resource_size(res));
152err_region:
153 if (cpu_is_omap24xx()) { 140 if (cpu_is_omap24xx()) {
154 clk_disable(rng_ick); 141 clk_disable(rng_ick);
155 clk_put(rng_ick); 142 clk_put(rng_ick);
@@ -159,20 +146,15 @@ err_region:
159 146
160static int __exit omap_rng_remove(struct platform_device *pdev) 147static int __exit omap_rng_remove(struct platform_device *pdev)
161{ 148{
162 struct resource *res = dev_get_drvdata(&pdev->dev);
163
164 hwrng_unregister(&omap_rng_ops); 149 hwrng_unregister(&omap_rng_ops);
165 150
166 omap_rng_write_reg(RNG_MASK_REG, 0x0); 151 omap_rng_write_reg(RNG_MASK_REG, 0x0);
167 152
168 iounmap(rng_base);
169
170 if (cpu_is_omap24xx()) { 153 if (cpu_is_omap24xx()) {
171 clk_disable(rng_ick); 154 clk_disable(rng_ick);
172 clk_put(rng_ick); 155 clk_put(rng_ick);
173 } 156 }
174 157
175 release_mem_region(res->start, resource_size(res));
176 rng_base = NULL; 158 rng_base = NULL;
177 159
178 return 0; 160 return 0;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 6373fa0ddb65..1092a770482e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -314,4 +314,15 @@ config CRYPTO_DEV_NX
314 module supports acceleration for AES and SHA2 algorithms. If you 314 module supports acceleration for AES and SHA2 algorithms. If you
315 choose 'M' here, this module will be called nx_crypto. 315 choose 'M' here, this module will be called nx_crypto.
316 316
317config CRYPTO_DEV_UX500
318 tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
319 depends on ARCH_U8500
320 select CRYPTO_ALGAPI
321 help
322 Driver for ST-Ericsson UX500 crypto engine.
323
324if CRYPTO_DEV_UX500
325 source "drivers/crypto/ux500/Kconfig"
326endif # if CRYPTO_DEV_UX500
327
317endif # CRYPTO_HW 328endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f3e64eadd7af..01390325d72d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
14obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o 14obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
15obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o 15obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
16obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o 16obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
17obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ \ No newline at end of file
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 13f8e1a14988..802e85102c32 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1244,9 +1244,9 @@ err_start_dev:
1244 iounmap(core_dev->dev->ce_base); 1244 iounmap(core_dev->dev->ce_base);
1245err_iomap: 1245err_iomap:
1246 free_irq(core_dev->irq, dev); 1246 free_irq(core_dev->irq, dev);
1247err_request_irq:
1247 irq_dispose_mapping(core_dev->irq); 1248 irq_dispose_mapping(core_dev->irq);
1248 tasklet_kill(&core_dev->tasklet); 1249 tasklet_kill(&core_dev->tasklet);
1249err_request_irq:
1250 crypto4xx_destroy_sdr(core_dev->dev); 1250 crypto4xx_destroy_sdr(core_dev->dev);
1251err_build_sdr: 1251err_build_sdr:
1252 crypto4xx_destroy_gdr(core_dev->dev); 1252 crypto4xx_destroy_gdr(core_dev->dev);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 534a36469d57..4eec389184d3 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2267,8 +2267,11 @@ static void __exit caam_algapi_exit(void)
2267 int i, err; 2267 int i, err;
2268 2268
2269 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2269 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2270 if (!dev_node) 2270 if (!dev_node) {
2271 return; 2271 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2272 if (!dev_node)
2273 return;
2274 }
2272 2275
2273 pdev = of_find_device_by_node(dev_node); 2276 pdev = of_find_device_by_node(dev_node);
2274 if (!pdev) 2277 if (!pdev)
@@ -2350,8 +2353,11 @@ static int __init caam_algapi_init(void)
2350 int i = 0, err = 0; 2353 int i = 0, err = 0;
2351 2354
2352 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2355 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2353 if (!dev_node) 2356 if (!dev_node) {
2354 return -ENODEV; 2357 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2358 if (!dev_node)
2359 return -ENODEV;
2360 }
2355 2361
2356 pdev = of_find_device_by_node(dev_node); 2362 pdev = of_find_device_by_node(dev_node);
2357 if (!pdev) 2363 if (!pdev)
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index c5f61c55d923..77557ebcd337 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -98,6 +98,12 @@ static int caam_probe(struct platform_device *pdev)
98 rspec = 0; 98 rspec = 0;
99 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") 99 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
100 rspec++; 100 rspec++;
101 if (!rspec) {
102 /* for backward compatible with device trees */
103 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
104 rspec++;
105 }
106
101 ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL); 107 ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
102 if (ctrlpriv->jrdev == NULL) { 108 if (ctrlpriv->jrdev == NULL) {
103 iounmap(&topregs->ctrl); 109 iounmap(&topregs->ctrl);
@@ -111,6 +117,13 @@ static int caam_probe(struct platform_device *pdev)
111 ctrlpriv->total_jobrs++; 117 ctrlpriv->total_jobrs++;
112 ring++; 118 ring++;
113 } 119 }
120 if (!ring) {
121 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
122 caam_jr_probe(pdev, np, ring);
123 ctrlpriv->total_jobrs++;
124 ring++;
125 }
126 }
114 127
115 /* Check to see if QI present. If so, enable */ 128 /* Check to see if QI present. If so, enable */
116 ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) & 129 ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
@@ -226,6 +239,9 @@ static struct of_device_id caam_match[] = {
226 { 239 {
227 .compatible = "fsl,sec-v4.0", 240 .compatible = "fsl,sec-v4.0",
228 }, 241 },
242 {
243 .compatible = "fsl,sec4.0",
244 },
229 {}, 245 {},
230}; 246};
231MODULE_DEVICE_TABLE(of, caam_match); 247MODULE_DEVICE_TABLE(of, caam_match);
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
new file mode 100644
index 000000000000..b35e5c4b025a
--- /dev/null
+++ b/drivers/crypto/ux500/Kconfig
@@ -0,0 +1,30 @@
1#
2# Copyright (C) ST-Ericsson SA 2010
3# Author: Shujuan Chen (shujuan.chen@stericsson.com)
4# License terms: GNU General Public License (GPL) version 2
5#
6
7config CRYPTO_DEV_UX500_CRYP
8 tristate "UX500 crypto driver for CRYP block"
9 depends on CRYPTO_DEV_UX500
10 select CRYPTO_DES
11 help
12 This selects the crypto driver for the UX500_CRYP hardware. It supports
13 AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
14
15config CRYPTO_DEV_UX500_HASH
16 tristate "UX500 crypto driver for HASH block"
17 depends on CRYPTO_DEV_UX500
18 select CRYPTO_HASH
19 select CRYPTO_HMAC
20 help
21 This selects the hash driver for the UX500_HASH hardware.
22 Depends on UX500/STM DMA if running in DMA mode.
23
24config CRYPTO_DEV_UX500_DEBUG
25 bool "Activate ux500 platform debug-mode for crypto and hash block"
26 depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
27 default n
28 help
29 Say Y if you want to add debug prints to ux500_hash and
30 ux500_cryp devices.
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
new file mode 100644
index 000000000000..b9a365bade86
--- /dev/null
+++ b/drivers/crypto/ux500/Makefile
@@ -0,0 +1,8 @@
1#
2# Copyright (C) ST-Ericsson SA 2010
3# Author: Shujuan Chen (shujuan.chen@stericsson.com)
4# License terms: GNU General Public License (GPL) version 2
5#
6
7obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
8obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
new file mode 100644
index 000000000000..e5d362a6f680
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -0,0 +1,13 @@
1#/*
2# * Copyright (C) ST-Ericsson SA 2010
3# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
4# * License terms: GNU General Public License (GPL) version 2 */
5
6ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
7CFLAGS_cryp_core.o := -DDEBUG -O0
8CFLAGS_cryp.o := -DDEBUG -O0
9CFLAGS_cryp_irq.o := -DDEBUG -O0
10endif
11
12obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
13ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
new file mode 100644
index 000000000000..e208ceaf81c9
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -0,0 +1,389 @@
1/**
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4 * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
5 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
6 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
7 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
8 * License terms: GNU General Public License (GPL) version 2
9 */
10
11#include <linux/errno.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14
15#include <mach/hardware.h>
16
17#include "cryp_p.h"
18#include "cryp.h"
19
20/**
21 * cryp_wait_until_done - wait until the device logic is not busy
22 */
23void cryp_wait_until_done(struct cryp_device_data *device_data)
24{
25 while (cryp_is_logic_busy(device_data))
26 cpu_relax();
27}
28
29/**
30 * cryp_check - This routine checks Peripheral and PCell Id
31 * @device_data: Pointer to the device data struct for base address.
32 */
33int cryp_check(struct cryp_device_data *device_data)
34{
35 int peripheralid2 = 0;
36
37 if (NULL == device_data)
38 return -EINVAL;
39
40 peripheralid2 = readl_relaxed(&device_data->base->periphId2);
41
42 if (peripheralid2 != CRYP_PERIPHERAL_ID2_DB8500)
43 return -EPERM;
44
45 /* Check Peripheral and Pcell Id Register for CRYP */
46 if ((CRYP_PERIPHERAL_ID0 ==
47 readl_relaxed(&device_data->base->periphId0))
48 && (CRYP_PERIPHERAL_ID1 ==
49 readl_relaxed(&device_data->base->periphId1))
50 && (CRYP_PERIPHERAL_ID3 ==
51 readl_relaxed(&device_data->base->periphId3))
52 && (CRYP_PCELL_ID0 ==
53 readl_relaxed(&device_data->base->pcellId0))
54 && (CRYP_PCELL_ID1 ==
55 readl_relaxed(&device_data->base->pcellId1))
56 && (CRYP_PCELL_ID2 ==
57 readl_relaxed(&device_data->base->pcellId2))
58 && (CRYP_PCELL_ID3 ==
59 readl_relaxed(&device_data->base->pcellId3))) {
60 return 0;
61 }
62
63 return -EPERM;
64}
65
66/**
67 * cryp_activity - This routine enables/disable the cryptography function.
68 * @device_data: Pointer to the device data struct for base address.
69 * @cryp_crypen: Enable/Disable functionality
70 */
71void cryp_activity(struct cryp_device_data *device_data,
72 enum cryp_crypen cryp_crypen)
73{
74 CRYP_PUT_BITS(&device_data->base->cr,
75 cryp_crypen,
76 CRYP_CR_CRYPEN_POS,
77 CRYP_CR_CRYPEN_MASK);
78}
79
80/**
81 * cryp_flush_inoutfifo - Resets both the input and the output FIFOs
82 * @device_data: Pointer to the device data struct for base address.
83 */
84void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
85{
86 /*
87 * We always need to disble the hardware before trying to flush the
88 * FIFO. This is something that isn't written in the design
89 * specification, but we have been informed by the hardware designers
90 * that this must be done.
91 */
92 cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
93 cryp_wait_until_done(device_data);
94
95 CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK);
96 /*
97 * CRYP_SR_INFIFO_READY_MASK is the expected value on the status
98 * register when starting a new calculation, which means Input FIFO is
99 * not full and input FIFO is empty.
100 */
101 while (readl_relaxed(&device_data->base->sr) !=
102 CRYP_SR_INFIFO_READY_MASK)
103 cpu_relax();
104}
105
106/**
107 * cryp_set_configuration - This routine set the cr CRYP IP
108 * @device_data: Pointer to the device data struct for base address.
109 * @cryp_config: Pointer to the configuration parameter
110 * @control_register: The control register to be written later on.
111 */
112int cryp_set_configuration(struct cryp_device_data *device_data,
113 struct cryp_config *cryp_config,
114 u32 *control_register)
115{
116 u32 cr_for_kse;
117
118 if (NULL == device_data || NULL == cryp_config)
119 return -EINVAL;
120
121 *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS);
122
123 /* Prepare key for decryption in AES_ECB and AES_CBC mode. */
124 if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) &&
125 ((CRYP_ALGO_AES_ECB == cryp_config->algomode) ||
126 (CRYP_ALGO_AES_CBC == cryp_config->algomode))) {
127 cr_for_kse = *control_register;
128 /*
129 * This seems a bit odd, but it is indeed needed to set this to
130 * encrypt even though it is a decryption that we are doing. It
131 * also mentioned in the design spec that you need to do this.
132 * After the keyprepartion for decrypting is done you should set
133 * algodir back to decryption, which is done outside this if
134 * statement.
135 *
136 * According to design specification we should set mode ECB
137 * during key preparation even though we might be running CBC
138 * when enter this function.
139 *
140 * Writing to KSE_ENABLED will drop CRYPEN when key preparation
141 * is done. Therefore we need to set CRYPEN again outside this
142 * if statement when running decryption.
143 */
144 cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) |
145 (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) |
146 (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) |
147 (KSE_ENABLED << CRYP_CR_KSE_POS));
148
149 writel_relaxed(cr_for_kse, &device_data->base->cr);
150 cryp_wait_until_done(device_data);
151 }
152
153 *control_register |=
154 ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) |
155 (cryp_config->algodir << CRYP_CR_ALGODIR_POS));
156
157 return 0;
158}
159
160/**
161 * cryp_configure_protection - set the protection bits in the CRYP logic.
162 * @device_data: Pointer to the device data struct for base address.
163 * @p_protect_config: Pointer to the protection mode and
164 * secure mode configuration
165 */
166int cryp_configure_protection(struct cryp_device_data *device_data,
167 struct cryp_protection_config *p_protect_config)
168{
169 if (NULL == p_protect_config)
170 return -EINVAL;
171
172 CRYP_WRITE_BIT(&device_data->base->cr,
173 (u32) p_protect_config->secure_access,
174 CRYP_CR_SECURE_MASK);
175 CRYP_PUT_BITS(&device_data->base->cr,
176 p_protect_config->privilege_access,
177 CRYP_CR_PRLG_POS,
178 CRYP_CR_PRLG_MASK);
179
180 return 0;
181}
182
183/**
184 * cryp_is_logic_busy - returns the busy status of the CRYP logic
185 * @device_data: Pointer to the device data struct for base address.
186 */
187int cryp_is_logic_busy(struct cryp_device_data *device_data)
188{
189 return CRYP_TEST_BITS(&device_data->base->sr,
190 CRYP_SR_BUSY_MASK);
191}
192
193/**
194 * cryp_configure_for_dma - configures the CRYP IP for DMA operation
195 * @device_data: Pointer to the device data struct for base address.
196 * @dma_req: Specifies the DMA request type value.
197 */
198void cryp_configure_for_dma(struct cryp_device_data *device_data,
199 enum cryp_dma_req_type dma_req)
200{
201 CRYP_SET_BITS(&device_data->base->dmacr,
202 (u32) dma_req);
203}
204
205/**
206 * cryp_configure_key_values - configures the key values for CRYP operations
207 * @device_data: Pointer to the device data struct for base address.
208 * @key_reg_index: Key value index register
209 * @key_value: The key value struct
210 */
211int cryp_configure_key_values(struct cryp_device_data *device_data,
212 enum cryp_key_reg_index key_reg_index,
213 struct cryp_key_value key_value)
214{
215 while (cryp_is_logic_busy(device_data))
216 cpu_relax();
217
218 switch (key_reg_index) {
219 case CRYP_KEY_REG_1:
220 writel_relaxed(key_value.key_value_left,
221 &device_data->base->key_1_l);
222 writel_relaxed(key_value.key_value_right,
223 &device_data->base->key_1_r);
224 break;
225 case CRYP_KEY_REG_2:
226 writel_relaxed(key_value.key_value_left,
227 &device_data->base->key_2_l);
228 writel_relaxed(key_value.key_value_right,
229 &device_data->base->key_2_r);
230 break;
231 case CRYP_KEY_REG_3:
232 writel_relaxed(key_value.key_value_left,
233 &device_data->base->key_3_l);
234 writel_relaxed(key_value.key_value_right,
235 &device_data->base->key_3_r);
236 break;
237 case CRYP_KEY_REG_4:
238 writel_relaxed(key_value.key_value_left,
239 &device_data->base->key_4_l);
240 writel_relaxed(key_value.key_value_right,
241 &device_data->base->key_4_r);
242 break;
243 default:
244 return -EINVAL;
245 }
246
247 return 0;
248}
249
250/**
251 * cryp_configure_init_vector - configures the initialization vector register
252 * @device_data: Pointer to the device data struct for base address.
253 * @init_vector_index: Specifies the index of the init vector.
254 * @init_vector_value: Specifies the value for the init vector.
255 */
256int cryp_configure_init_vector(struct cryp_device_data *device_data,
257 enum cryp_init_vector_index
258 init_vector_index,
259 struct cryp_init_vector_value
260 init_vector_value)
261{
262 while (cryp_is_logic_busy(device_data))
263 cpu_relax();
264
265 switch (init_vector_index) {
266 case CRYP_INIT_VECTOR_INDEX_0:
267 writel_relaxed(init_vector_value.init_value_left,
268 &device_data->base->init_vect_0_l);
269 writel_relaxed(init_vector_value.init_value_right,
270 &device_data->base->init_vect_0_r);
271 break;
272 case CRYP_INIT_VECTOR_INDEX_1:
273 writel_relaxed(init_vector_value.init_value_left,
274 &device_data->base->init_vect_1_l);
275 writel_relaxed(init_vector_value.init_value_right,
276 &device_data->base->init_vect_1_r);
277 break;
278 default:
279 return -EINVAL;
280 }
281
282 return 0;
283}
284
285/**
286 * cryp_save_device_context - Store hardware registers and
287 * other device context parameter
288 * @device_data: Pointer to the device data struct for base address.
289 * @ctx: Crypto device context
290 */
291void cryp_save_device_context(struct cryp_device_data *device_data,
292 struct cryp_device_context *ctx,
293 int cryp_mode)
294{
295 enum cryp_algo_mode algomode;
296 struct cryp_register *src_reg = device_data->base;
297 struct cryp_config *config =
298 (struct cryp_config *)device_data->current_ctx;
299
300 /*
301 * Always start by disable the hardware and wait for it to finish the
302 * ongoing calculations before trying to reprogram it.
303 */
304 cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
305 cryp_wait_until_done(device_data);
306
307 if (cryp_mode == CRYP_MODE_DMA)
308 cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH);
309
310 if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0)
311 ctx->din = readl_relaxed(&src_reg->din);
312
313 ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK;
314
315 switch (config->keysize) {
316 case CRYP_KEY_SIZE_256:
317 ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
318 ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
319
320 case CRYP_KEY_SIZE_192:
321 ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
322 ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
323
324 case CRYP_KEY_SIZE_128:
325 ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
326 ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
327
328 default:
329 ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
330 ctx->key_1_r = readl_relaxed(&src_reg->key_1_r);
331 }
332
333 /* Save IV for CBC mode for both AES and DES. */
334 algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS);
335 if (algomode == CRYP_ALGO_TDES_CBC ||
336 algomode == CRYP_ALGO_DES_CBC ||
337 algomode == CRYP_ALGO_AES_CBC) {
338 ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l);
339 ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r);
340 ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l);
341 ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r);
342 }
343}
344
345/**
346 * cryp_restore_device_context - Restore hardware registers and
347 * other device context parameter
348 * @device_data: Pointer to the device data struct for base address.
349 * @ctx: Crypto device context
350 */
351void cryp_restore_device_context(struct cryp_device_data *device_data,
352 struct cryp_device_context *ctx)
353{
354 struct cryp_register *reg = device_data->base;
355 struct cryp_config *config =
356 (struct cryp_config *)device_data->current_ctx;
357
358 /*
359 * Fall through for all items in switch statement. DES is captured in
360 * the default.
361 */
362 switch (config->keysize) {
363 case CRYP_KEY_SIZE_256:
364 writel_relaxed(ctx->key_4_l, &reg->key_4_l);
365 writel_relaxed(ctx->key_4_r, &reg->key_4_r);
366
367 case CRYP_KEY_SIZE_192:
368 writel_relaxed(ctx->key_3_l, &reg->key_3_l);
369 writel_relaxed(ctx->key_3_r, &reg->key_3_r);
370
371 case CRYP_KEY_SIZE_128:
372 writel_relaxed(ctx->key_2_l, &reg->key_2_l);
373 writel_relaxed(ctx->key_2_r, &reg->key_2_r);
374
375 default:
376 writel_relaxed(ctx->key_1_l, &reg->key_1_l);
377 writel_relaxed(ctx->key_1_r, &reg->key_1_r);
378 }
379
380 /* Restore IV for CBC mode for AES and DES. */
381 if (config->algomode == CRYP_ALGO_TDES_CBC ||
382 config->algomode == CRYP_ALGO_DES_CBC ||
383 config->algomode == CRYP_ALGO_AES_CBC) {
384 writel_relaxed(ctx->init_vect_0_l, &reg->init_vect_0_l);
385 writel_relaxed(ctx->init_vect_0_r, &reg->init_vect_0_r);
386 writel_relaxed(ctx->init_vect_1_l, &reg->init_vect_1_l);
387 writel_relaxed(ctx->init_vect_1_r, &reg->init_vect_1_r);
388 }
389}
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
new file mode 100644
index 000000000000..14cfd05b777a
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -0,0 +1,308 @@
1/**
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4 * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
5 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
6 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
7 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
8 * License terms: GNU General Public License (GPL) version 2
9 */
10
11#ifndef _CRYP_H_
12#define _CRYP_H_
13
14#include <linux/completion.h>
15#include <linux/dmaengine.h>
16#include <linux/klist.h>
17#include <linux/mutex.h>
18
19#define DEV_DBG_NAME "crypX crypX:"
20
21/* CRYP enable/disable */
22enum cryp_crypen {
23 CRYP_CRYPEN_DISABLE = 0,
24 CRYP_CRYPEN_ENABLE = 1
25};
26
27/* CRYP Start Computation enable/disable */
28enum cryp_start {
29 CRYP_START_DISABLE = 0,
30 CRYP_START_ENABLE = 1
31};
32
33/* CRYP Init Signal enable/disable */
34enum cryp_init {
35 CRYP_INIT_DISABLE = 0,
36 CRYP_INIT_ENABLE = 1
37};
38
39/* Cryp State enable/disable */
40enum cryp_state {
41 CRYP_STATE_DISABLE = 0,
42 CRYP_STATE_ENABLE = 1
43};
44
45/* Key preparation bit enable */
46enum cryp_key_prep {
47 KSE_DISABLED = 0,
48 KSE_ENABLED = 1
49};
50
51/* Key size for AES */
52#define CRYP_KEY_SIZE_128 (0)
53#define CRYP_KEY_SIZE_192 (1)
54#define CRYP_KEY_SIZE_256 (2)
55
56/* AES modes */
57enum cryp_algo_mode {
58 CRYP_ALGO_TDES_ECB,
59 CRYP_ALGO_TDES_CBC,
60 CRYP_ALGO_DES_ECB,
61 CRYP_ALGO_DES_CBC,
62 CRYP_ALGO_AES_ECB,
63 CRYP_ALGO_AES_CBC,
64 CRYP_ALGO_AES_CTR,
65 CRYP_ALGO_AES_XTS
66};
67
68/* Cryp Encryption or Decryption */
69enum cryp_algorithm_dir {
70 CRYP_ALGORITHM_ENCRYPT,
71 CRYP_ALGORITHM_DECRYPT
72};
73
74/* Hardware access method */
75enum cryp_mode {
76 CRYP_MODE_POLLING,
77 CRYP_MODE_INTERRUPT,
78 CRYP_MODE_DMA
79};
80
81/**
82 * struct cryp_config -
83 * @keysize: Key size for AES
84 * @algomode: AES modes
85 * @algodir: Cryp Encryption or Decryption
86 *
87 * CRYP configuration structure to be passed to set configuration
88 */
89struct cryp_config {
90 int keysize;
91 enum cryp_algo_mode algomode;
92 enum cryp_algorithm_dir algodir;
93};
94
95/**
96 * struct cryp_protection_config -
97 * @privilege_access: Privileged cryp state enable/disable
98 * @secure_access: Secure cryp state enable/disable
99 *
100 * Protection configuration structure for setting privilage access
101 */
102struct cryp_protection_config {
103 enum cryp_state privilege_access;
104 enum cryp_state secure_access;
105};
106
107/* Cryp status */
108enum cryp_status_id {
109 CRYP_STATUS_BUSY = 0x10,
110 CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08,
111 CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04,
112 CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02,
113 CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01
114};
115
116/* Cryp DMA interface */
117enum cryp_dma_req_type {
118 CRYP_DMA_DISABLE_BOTH,
119 CRYP_DMA_ENABLE_IN_DATA,
120 CRYP_DMA_ENABLE_OUT_DATA,
121 CRYP_DMA_ENABLE_BOTH_DIRECTIONS
122};
123
124enum cryp_dma_channel {
125 CRYP_DMA_RX = 0,
126 CRYP_DMA_TX
127};
128
129/* Key registers */
130enum cryp_key_reg_index {
131 CRYP_KEY_REG_1,
132 CRYP_KEY_REG_2,
133 CRYP_KEY_REG_3,
134 CRYP_KEY_REG_4
135};
136
137/* Key register left and right */
138struct cryp_key_value {
139 u32 key_value_left;
140 u32 key_value_right;
141};
142
143/* Cryp Initialization structure */
144enum cryp_init_vector_index {
145 CRYP_INIT_VECTOR_INDEX_0,
146 CRYP_INIT_VECTOR_INDEX_1
147};
148
149/* struct cryp_init_vector_value -
150 * @init_value_left
151 * @init_value_right
152 * */
153struct cryp_init_vector_value {
154 u32 init_value_left;
155 u32 init_value_right;
156};
157
158/**
159 * struct cryp_device_context - structure for a cryp context.
160 * @cr: control register
161 * @dmacr: DMA control register
162 * @imsc: Interrupt mask set/clear register
163 * @key_1_l: Key 1l register
164 * @key_1_r: Key 1r register
165 * @key_2_l: Key 2l register
166 * @key_2_r: Key 2r register
167 * @key_3_l: Key 3l register
168 * @key_3_r: Key 3r register
169 * @key_4_l: Key 4l register
170 * @key_4_r: Key 4r register
171 * @init_vect_0_l: Initialization vector 0l register
172 * @init_vect_0_r: Initialization vector 0r register
173 * @init_vect_1_l: Initialization vector 1l register
174 * @init_vect_1_r: Initialization vector 0r register
175 * @din: Data in register
176 * @dout: Data out register
177 *
178 * CRYP power management specifc structure.
179 */
180struct cryp_device_context {
181 u32 cr;
182 u32 dmacr;
183 u32 imsc;
184
185 u32 key_1_l;
186 u32 key_1_r;
187 u32 key_2_l;
188 u32 key_2_r;
189 u32 key_3_l;
190 u32 key_3_r;
191 u32 key_4_l;
192 u32 key_4_r;
193
194 u32 init_vect_0_l;
195 u32 init_vect_0_r;
196 u32 init_vect_1_l;
197 u32 init_vect_1_r;
198
199 u32 din;
200 u32 dout;
201};
202
203struct cryp_dma {
204 dma_cap_mask_t mask;
205 struct completion cryp_dma_complete;
206 struct dma_chan *chan_cryp2mem;
207 struct dma_chan *chan_mem2cryp;
208 struct stedma40_chan_cfg *cfg_cryp2mem;
209 struct stedma40_chan_cfg *cfg_mem2cryp;
210 int sg_src_len;
211 int sg_dst_len;
212 struct scatterlist *sg_src;
213 struct scatterlist *sg_dst;
214 int nents_src;
215 int nents_dst;
216};
217
218/**
219 * struct cryp_device_data - structure for a cryp device.
220 * @base: Pointer to the hardware base address.
221 * @dev: Pointer to the devices dev structure.
222 * @clk: Pointer to the device's clock control.
223 * @pwr_regulator: Pointer to the device's power control.
224 * @power_status: Current status of the power.
225 * @ctx_lock: Lock for current_ctx.
226 * @current_ctx: Pointer to the currently allocated context.
227 * @list_node: For inclusion into a klist.
228 * @dma: The dma structure holding channel configuration.
229 * @power_state: TRUE = power state on, FALSE = power state off.
230 * @power_state_spinlock: Spinlock for power_state.
231 * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx.
232 */
233struct cryp_device_data {
234 struct cryp_register __iomem *base;
235 struct device *dev;
236 struct clk *clk;
237 struct regulator *pwr_regulator;
238 int power_status;
239 struct spinlock ctx_lock;
240 struct cryp_ctx *current_ctx;
241 struct klist_node list_node;
242 struct cryp_dma dma;
243 bool power_state;
244 struct spinlock power_state_spinlock;
245 bool restore_dev_ctx;
246};
247
248void cryp_wait_until_done(struct cryp_device_data *device_data);
249
250/* Initialization functions */
251
252int cryp_check(struct cryp_device_data *device_data);
253
254void cryp_activity(struct cryp_device_data *device_data,
255 enum cryp_crypen cryp_crypen);
256
257void cryp_flush_inoutfifo(struct cryp_device_data *device_data);
258
259int cryp_set_configuration(struct cryp_device_data *device_data,
260 struct cryp_config *cryp_config,
261 u32 *control_register);
262
263void cryp_configure_for_dma(struct cryp_device_data *device_data,
264 enum cryp_dma_req_type dma_req);
265
266int cryp_configure_key_values(struct cryp_device_data *device_data,
267 enum cryp_key_reg_index key_reg_index,
268 struct cryp_key_value key_value);
269
270int cryp_configure_init_vector(struct cryp_device_data *device_data,
271 enum cryp_init_vector_index
272 init_vector_index,
273 struct cryp_init_vector_value
274 init_vector_value);
275
276int cryp_configure_protection(struct cryp_device_data *device_data,
277 struct cryp_protection_config *p_protect_config);
278
279/* Power management funtions */
280void cryp_save_device_context(struct cryp_device_data *device_data,
281 struct cryp_device_context *ctx,
282 int cryp_mode);
283
284void cryp_restore_device_context(struct cryp_device_data *device_data,
285 struct cryp_device_context *ctx);
286
287/* Data transfer and status bits. */
288int cryp_is_logic_busy(struct cryp_device_data *device_data);
289
290int cryp_get_status(struct cryp_device_data *device_data);
291
292/**
293 * cryp_write_indata - This routine writes 32 bit data into the data input
294 * register of the cryptography IP.
295 * @device_data: Pointer to the device data struct for base address.
296 * @write_data: Data to write.
297 */
298int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data);
299
300/**
301 * cryp_read_outdata - This routine reads the data from the data output
302 * register of the CRYP logic
303 * @device_data: Pointer to the device data struct for base address.
304 * @read_data: Read the data from the output FIFO.
305 */
306int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data);
307
308#endif /* _CRYP_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
new file mode 100644
index 000000000000..7cac12793a4b
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -0,0 +1,1784 @@
1/**
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
5 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
6 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
7 * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
8 * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
9 * License terms: GNU General Public License (GPL) version 2
10 */
11
12#include <linux/clk.h>
13#include <linux/completion.h>
14#include <linux/crypto.h>
15#include <linux/dmaengine.h>
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/irqreturn.h>
21#include <linux/klist.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25#include <linux/semaphore.h>
26
27#include <crypto/aes.h>
28#include <crypto/algapi.h>
29#include <crypto/ctr.h>
30#include <crypto/des.h>
31#include <crypto/scatterwalk.h>
32
33#include <plat/ste_dma40.h>
34
35#include <mach/crypto-ux500.h>
36#include <mach/hardware.h>
37
38#include "cryp_p.h"
39#include "cryp.h"
40
41#define CRYP_MAX_KEY_SIZE 32
42#define BYTES_PER_WORD 4
43
44static int cryp_mode;
45static atomic_t session_id;
46
47static struct stedma40_chan_cfg *mem_to_engine;
48static struct stedma40_chan_cfg *engine_to_mem;
49
50/**
51 * struct cryp_driver_data - data specific to the driver.
52 *
53 * @device_list: A list of registered devices to choose from.
54 * @device_allocation: A semaphore initialized with number of devices.
55 */
56struct cryp_driver_data {
57 struct klist device_list;
58 struct semaphore device_allocation;
59};
60
61/**
62 * struct cryp_ctx - Crypto context
63 * @config: Crypto mode.
64 * @key[CRYP_MAX_KEY_SIZE]: Key.
65 * @keylen: Length of key.
66 * @iv: Pointer to initialization vector.
67 * @indata: Pointer to indata.
68 * @outdata: Pointer to outdata.
69 * @datalen: Length of indata.
70 * @outlen: Length of outdata.
71 * @blocksize: Size of blocks.
72 * @updated: Updated flag.
73 * @dev_ctx: Device dependent context.
74 * @device: Pointer to the device.
75 */
76struct cryp_ctx {
77 struct cryp_config config;
78 u8 key[CRYP_MAX_KEY_SIZE];
79 u32 keylen;
80 u8 *iv;
81 const u8 *indata;
82 u8 *outdata;
83 u32 datalen;
84 u32 outlen;
85 u32 blocksize;
86 u8 updated;
87 struct cryp_device_context dev_ctx;
88 struct cryp_device_data *device;
89 u32 session_id;
90};
91
92static struct cryp_driver_data driver_data;
93
94/**
95 * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
96 * @in: Data to convert.
97 */
98static inline u32 uint8p_to_uint32_be(u8 *in)
99{
100 u32 *data = (u32 *)in;
101
102 return cpu_to_be32p(data);
103}
104
105/**
106 * swap_bits_in_byte - mirror the bits in a byte
107 * @b: the byte to be mirrored
108 *
109 * The bits are swapped the following way:
110 * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
111 * nibble 2 (n2) bits 4-7.
112 *
113 * Nibble 1 (n1):
114 * (The "old" (moved) bit is replaced with a zero)
115 * 1. Move bit 6 and 7, 4 positions to the left.
116 * 2. Move bit 3 and 5, 2 positions to the left.
117 * 3. Move bit 1-4, 1 position to the left.
118 *
119 * Nibble 2 (n2):
120 * 1. Move bit 0 and 1, 4 positions to the right.
121 * 2. Move bit 2 and 4, 2 positions to the right.
122 * 3. Move bit 3-6, 1 position to the right.
123 *
124 * Combine the two nibbles to a complete and swapped byte.
125 */
126
127static inline u8 swap_bits_in_byte(u8 b)
128{
129#define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */
130#define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5,
131 right shift 2 */
132#define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4,
133 right shift 1 */
134#define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */
135#define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4,
136 left shift 2 */
137#define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6,
138 left shift 1 */
139
140 u8 n1;
141 u8 n2;
142
143 /* Swap most significant nibble */
144 /* Right shift 4, bits 6 and 7 */
145 n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
146 /* Right shift 2, bits 3 and 5 */
147 n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
148 /* Right shift 1, bits 1-4 */
149 n1 = (n1 & R_SHIFT_1_MASK) >> 1;
150
151 /* Swap least significant nibble */
152 /* Left shift 4, bits 0 and 1 */
153 n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
154 /* Left shift 2, bits 2 and 4 */
155 n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
156 /* Left shift 1, bits 3-6 */
157 n2 = (n2 & L_SHIFT_1_MASK) << 1;
158
159 return n1 | n2;
160}
161
162static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
163 u8 *out, u32 len)
164{
165 unsigned int i = 0;
166 int j;
167 int index = 0;
168
169 j = len - BYTES_PER_WORD;
170 while (j >= 0) {
171 for (i = 0; i < BYTES_PER_WORD; i++) {
172 index = len - j - BYTES_PER_WORD + i;
173 out[j + i] =
174 swap_bits_in_byte(in[index]);
175 }
176 j -= BYTES_PER_WORD;
177 }
178}
179
180static void add_session_id(struct cryp_ctx *ctx)
181{
182 /*
183 * We never want 0 to be a valid value, since this is the default value
184 * for the software context.
185 */
186 if (unlikely(atomic_inc_and_test(&session_id)))
187 atomic_inc(&session_id);
188
189 ctx->session_id = atomic_read(&session_id);
190}
191
192static irqreturn_t cryp_interrupt_handler(int irq, void *param)
193{
194 struct cryp_ctx *ctx;
195 int i;
196 struct cryp_device_data *device_data;
197
198 if (param == NULL) {
199 BUG_ON(!param);
200 return IRQ_HANDLED;
201 }
202
203 /* The device is coming from the one found in hw_crypt_noxts. */
204 device_data = (struct cryp_device_data *)param;
205
206 ctx = device_data->current_ctx;
207
208 if (ctx == NULL) {
209 BUG_ON(!ctx);
210 return IRQ_HANDLED;
211 }
212
213 dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
214 cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
215 "out" : "in");
216
217 if (cryp_pending_irq_src(device_data,
218 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
219 if (ctx->outlen / ctx->blocksize > 0) {
220 for (i = 0; i < ctx->blocksize / 4; i++) {
221 *(ctx->outdata) = readl_relaxed(
222 &device_data->base->dout);
223 ctx->outdata += 4;
224 ctx->outlen -= 4;
225 }
226
227 if (ctx->outlen == 0) {
228 cryp_disable_irq_src(device_data,
229 CRYP_IRQ_SRC_OUTPUT_FIFO);
230 }
231 }
232 } else if (cryp_pending_irq_src(device_data,
233 CRYP_IRQ_SRC_INPUT_FIFO)) {
234 if (ctx->datalen / ctx->blocksize > 0) {
235 for (i = 0 ; i < ctx->blocksize / 4; i++) {
236 writel_relaxed(ctx->indata,
237 &device_data->base->din);
238 ctx->indata += 4;
239 ctx->datalen -= 4;
240 }
241
242 if (ctx->datalen == 0)
243 cryp_disable_irq_src(device_data,
244 CRYP_IRQ_SRC_INPUT_FIFO);
245
246 if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
247 CRYP_PUT_BITS(&device_data->base->cr,
248 CRYP_START_ENABLE,
249 CRYP_CR_START_POS,
250 CRYP_CR_START_MASK);
251
252 cryp_wait_until_done(device_data);
253 }
254 }
255 }
256
257 return IRQ_HANDLED;
258}
259
260static int mode_is_aes(enum cryp_algo_mode mode)
261{
262 return CRYP_ALGO_AES_ECB == mode ||
263 CRYP_ALGO_AES_CBC == mode ||
264 CRYP_ALGO_AES_CTR == mode ||
265 CRYP_ALGO_AES_XTS == mode;
266}
267
268static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
269 enum cryp_init_vector_index index)
270{
271 struct cryp_init_vector_value vector_value;
272
273 dev_dbg(device_data->dev, "[%s]", __func__);
274
275 vector_value.init_value_left = left;
276 vector_value.init_value_right = right;
277
278 return cryp_configure_init_vector(device_data,
279 index,
280 vector_value);
281}
282
283static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
284{
285 int i;
286 int status = 0;
287 int num_of_regs = ctx->blocksize / 8;
288 u32 iv[AES_BLOCK_SIZE / 4];
289
290 dev_dbg(device_data->dev, "[%s]", __func__);
291
292 /*
293 * Since we loop on num_of_regs we need to have a check in case
294 * someone provides an incorrect blocksize which would force calling
295 * cfg_iv with i greater than 2 which is an error.
296 */
297 if (num_of_regs > 2) {
298 dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
299 __func__, ctx->blocksize);
300 return -EINVAL;
301 }
302
303 for (i = 0; i < ctx->blocksize / 4; i++)
304 iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
305
306 for (i = 0; i < num_of_regs; i++) {
307 status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
308 (enum cryp_init_vector_index) i);
309 if (status != 0)
310 return status;
311 }
312 return status;
313}
314
315static int set_key(struct cryp_device_data *device_data,
316 u32 left_key,
317 u32 right_key,
318 enum cryp_key_reg_index index)
319{
320 struct cryp_key_value key_value;
321 int cryp_error;
322
323 dev_dbg(device_data->dev, "[%s]", __func__);
324
325 key_value.key_value_left = left_key;
326 key_value.key_value_right = right_key;
327
328 cryp_error = cryp_configure_key_values(device_data,
329 index,
330 key_value);
331 if (cryp_error != 0)
332 dev_err(device_data->dev, "[%s]: "
333 "cryp_configure_key_values() failed!", __func__);
334
335 return cryp_error;
336}
337
338static int cfg_keys(struct cryp_ctx *ctx)
339{
340 int i;
341 int num_of_regs = ctx->keylen / 8;
342 u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
343 int cryp_error = 0;
344
345 dev_dbg(ctx->device->dev, "[%s]", __func__);
346
347 if (mode_is_aes(ctx->config.algomode)) {
348 swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
349 (u8 *)swapped_key,
350 ctx->keylen);
351 } else {
352 for (i = 0; i < ctx->keylen / 4; i++)
353 swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
354 }
355
356 for (i = 0; i < num_of_regs; i++) {
357 cryp_error = set_key(ctx->device,
358 *(((u32 *)swapped_key)+i*2),
359 *(((u32 *)swapped_key)+i*2+1),
360 (enum cryp_key_reg_index) i);
361
362 if (cryp_error != 0) {
363 dev_err(ctx->device->dev, "[%s]: set_key() failed!",
364 __func__);
365 return cryp_error;
366 }
367 }
368 return cryp_error;
369}
370
371static int cryp_setup_context(struct cryp_ctx *ctx,
372 struct cryp_device_data *device_data)
373{
374 u32 control_register = CRYP_CR_DEFAULT;
375
376 switch (cryp_mode) {
377 case CRYP_MODE_INTERRUPT:
378 writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
379 break;
380
381 case CRYP_MODE_DMA:
382 writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
383 break;
384
385 default:
386 break;
387 }
388
389 if (ctx->updated == 0) {
390 cryp_flush_inoutfifo(device_data);
391 if (cfg_keys(ctx) != 0) {
392 dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
393 __func__);
394 return -EINVAL;
395 }
396
397 if (ctx->iv &&
398 CRYP_ALGO_AES_ECB != ctx->config.algomode &&
399 CRYP_ALGO_DES_ECB != ctx->config.algomode &&
400 CRYP_ALGO_TDES_ECB != ctx->config.algomode) {
401 if (cfg_ivs(device_data, ctx) != 0)
402 return -EPERM;
403 }
404
405 cryp_set_configuration(device_data, &ctx->config,
406 &control_register);
407 add_session_id(ctx);
408 } else if (ctx->updated == 1 &&
409 ctx->session_id != atomic_read(&session_id)) {
410 cryp_flush_inoutfifo(device_data);
411 cryp_restore_device_context(device_data, &ctx->dev_ctx);
412
413 add_session_id(ctx);
414 control_register = ctx->dev_ctx.cr;
415 } else
416 control_register = ctx->dev_ctx.cr;
417
418 writel(control_register |
419 (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
420 &device_data->base->cr);
421
422 return 0;
423}
424
425static int cryp_get_device_data(struct cryp_ctx *ctx,
426 struct cryp_device_data **device_data)
427{
428 int ret;
429 struct klist_iter device_iterator;
430 struct klist_node *device_node;
431 struct cryp_device_data *local_device_data = NULL;
432 pr_debug(DEV_DBG_NAME " [%s]", __func__);
433
434 /* Wait until a device is available */
435 ret = down_interruptible(&driver_data.device_allocation);
436 if (ret)
437 return ret; /* Interrupted */
438
439 /* Select a device */
440 klist_iter_init(&driver_data.device_list, &device_iterator);
441
442 device_node = klist_next(&device_iterator);
443 while (device_node) {
444 local_device_data = container_of(device_node,
445 struct cryp_device_data, list_node);
446 spin_lock(&local_device_data->ctx_lock);
447 /* current_ctx allocates a device, NULL = unallocated */
448 if (local_device_data->current_ctx) {
449 device_node = klist_next(&device_iterator);
450 } else {
451 local_device_data->current_ctx = ctx;
452 ctx->device = local_device_data;
453 spin_unlock(&local_device_data->ctx_lock);
454 break;
455 }
456 spin_unlock(&local_device_data->ctx_lock);
457 }
458 klist_iter_exit(&device_iterator);
459
460 if (!device_node) {
461 /**
462 * No free device found.
463 * Since we allocated a device with down_interruptible, this
464 * should not be able to happen.
465 * Number of available devices, which are contained in
466 * device_allocation, is therefore decremented by not doing
467 * an up(device_allocation).
468 */
469 return -EBUSY;
470 }
471
472 *device_data = local_device_data;
473
474 return 0;
475}
476
477static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
478 struct device *dev)
479{
480 dma_cap_zero(device_data->dma.mask);
481 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
482
483 device_data->dma.cfg_mem2cryp = mem_to_engine;
484 device_data->dma.chan_mem2cryp =
485 dma_request_channel(device_data->dma.mask,
486 stedma40_filter,
487 device_data->dma.cfg_mem2cryp);
488
489 device_data->dma.cfg_cryp2mem = engine_to_mem;
490 device_data->dma.chan_cryp2mem =
491 dma_request_channel(device_data->dma.mask,
492 stedma40_filter,
493 device_data->dma.cfg_cryp2mem);
494
495 init_completion(&device_data->dma.cryp_dma_complete);
496}
497
498static void cryp_dma_out_callback(void *data)
499{
500 struct cryp_ctx *ctx = (struct cryp_ctx *) data;
501 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
502
503 complete(&ctx->device->dma.cryp_dma_complete);
504}
505
506static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
507 struct scatterlist *sg,
508 int len,
509 enum dma_data_direction direction)
510{
511 struct dma_async_tx_descriptor *desc;
512 struct dma_chan *channel = NULL;
513 dma_cookie_t cookie;
514
515 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
516
517 if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
518 dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
519 "aligned! Addr: 0x%08x", __func__, (u32)sg);
520 return -EFAULT;
521 }
522
523 switch (direction) {
524 case DMA_TO_DEVICE:
525 channel = ctx->device->dma.chan_mem2cryp;
526 ctx->device->dma.sg_src = sg;
527 ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
528 ctx->device->dma.sg_src,
529 ctx->device->dma.nents_src,
530 direction);
531
532 if (!ctx->device->dma.sg_src_len) {
533 dev_dbg(ctx->device->dev,
534 "[%s]: Could not map the sg list (TO_DEVICE)",
535 __func__);
536 return -EFAULT;
537 }
538
539 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
540 "(TO_DEVICE)", __func__);
541
542 desc = channel->device->device_prep_slave_sg(channel,
543 ctx->device->dma.sg_src,
544 ctx->device->dma.sg_src_len,
545 direction, DMA_CTRL_ACK, NULL);
546 break;
547
548 case DMA_FROM_DEVICE:
549 channel = ctx->device->dma.chan_cryp2mem;
550 ctx->device->dma.sg_dst = sg;
551 ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
552 ctx->device->dma.sg_dst,
553 ctx->device->dma.nents_dst,
554 direction);
555
556 if (!ctx->device->dma.sg_dst_len) {
557 dev_dbg(ctx->device->dev,
558 "[%s]: Could not map the sg list (FROM_DEVICE)",
559 __func__);
560 return -EFAULT;
561 }
562
563 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
564 "(FROM_DEVICE)", __func__);
565
566 desc = channel->device->device_prep_slave_sg(channel,
567 ctx->device->dma.sg_dst,
568 ctx->device->dma.sg_dst_len,
569 direction,
570 DMA_CTRL_ACK |
571 DMA_PREP_INTERRUPT, NULL);
572
573 desc->callback = cryp_dma_out_callback;
574 desc->callback_param = ctx;
575 break;
576
577 default:
578 dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
579 __func__);
580 return -EFAULT;
581 }
582
583 cookie = desc->tx_submit(desc);
584 dma_async_issue_pending(channel);
585
586 return 0;
587}
588
589static void cryp_dma_done(struct cryp_ctx *ctx)
590{
591 struct dma_chan *chan;
592
593 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
594
595 chan = ctx->device->dma.chan_mem2cryp;
596 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
597 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
598 ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
599
600 chan = ctx->device->dma.chan_cryp2mem;
601 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
602 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
603 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
604}
605
606static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
607 int len)
608{
609 int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
610 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
611
612 if (error) {
613 dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
614 "failed", __func__);
615 return error;
616 }
617
618 return len;
619}
620
621static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
622{
623 int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
624 if (error) {
625 dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
626 "failed", __func__);
627 return error;
628 }
629
630 return len;
631}
632
633static void cryp_polling_mode(struct cryp_ctx *ctx,
634 struct cryp_device_data *device_data)
635{
636 int len = ctx->blocksize / BYTES_PER_WORD;
637 int remaining_length = ctx->datalen;
638 u32 *indata = (u32 *)ctx->indata;
639 u32 *outdata = (u32 *)ctx->outdata;
640
641 while (remaining_length > 0) {
642 writesl(&device_data->base->din, indata, len);
643 indata += len;
644 remaining_length -= (len * BYTES_PER_WORD);
645 cryp_wait_until_done(device_data);
646
647 readsl(&device_data->base->dout, outdata, len);
648 outdata += len;
649 cryp_wait_until_done(device_data);
650 }
651}
652
653static int cryp_disable_power(struct device *dev,
654 struct cryp_device_data *device_data,
655 bool save_device_context)
656{
657 int ret = 0;
658
659 dev_dbg(dev, "[%s]", __func__);
660
661 spin_lock(&device_data->power_state_spinlock);
662 if (!device_data->power_state)
663 goto out;
664
665 spin_lock(&device_data->ctx_lock);
666 if (save_device_context && device_data->current_ctx) {
667 cryp_save_device_context(device_data,
668 &device_data->current_ctx->dev_ctx,
669 cryp_mode);
670 device_data->restore_dev_ctx = true;
671 }
672 spin_unlock(&device_data->ctx_lock);
673
674 clk_disable(device_data->clk);
675 ret = regulator_disable(device_data->pwr_regulator);
676 if (ret)
677 dev_err(dev, "[%s]: "
678 "regulator_disable() failed!",
679 __func__);
680
681 device_data->power_state = false;
682
683out:
684 spin_unlock(&device_data->power_state_spinlock);
685
686 return ret;
687}
688
689static int cryp_enable_power(
690 struct device *dev,
691 struct cryp_device_data *device_data,
692 bool restore_device_context)
693{
694 int ret = 0;
695
696 dev_dbg(dev, "[%s]", __func__);
697
698 spin_lock(&device_data->power_state_spinlock);
699 if (!device_data->power_state) {
700 ret = regulator_enable(device_data->pwr_regulator);
701 if (ret) {
702 dev_err(dev, "[%s]: regulator_enable() failed!",
703 __func__);
704 goto out;
705 }
706
707 ret = clk_enable(device_data->clk);
708 if (ret) {
709 dev_err(dev, "[%s]: clk_enable() failed!",
710 __func__);
711 regulator_disable(device_data->pwr_regulator);
712 goto out;
713 }
714 device_data->power_state = true;
715 }
716
717 if (device_data->restore_dev_ctx) {
718 spin_lock(&device_data->ctx_lock);
719 if (restore_device_context && device_data->current_ctx) {
720 device_data->restore_dev_ctx = false;
721 cryp_restore_device_context(device_data,
722 &device_data->current_ctx->dev_ctx);
723 }
724 spin_unlock(&device_data->ctx_lock);
725 }
726out:
727 spin_unlock(&device_data->power_state_spinlock);
728
729 return ret;
730}
731
732static int hw_crypt_noxts(struct cryp_ctx *ctx,
733 struct cryp_device_data *device_data)
734{
735 int ret = 0;
736
737 const u8 *indata = ctx->indata;
738 u8 *outdata = ctx->outdata;
739 u32 datalen = ctx->datalen;
740 u32 outlen = datalen;
741
742 pr_debug(DEV_DBG_NAME " [%s]", __func__);
743
744 ctx->outlen = ctx->datalen;
745
746 if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
747 pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
748 "0x%08x", __func__, (u32)indata);
749 return -EINVAL;
750 }
751
752 ret = cryp_setup_context(ctx, device_data);
753
754 if (ret)
755 goto out;
756
757 if (cryp_mode == CRYP_MODE_INTERRUPT) {
758 cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
759 CRYP_IRQ_SRC_OUTPUT_FIFO);
760
761 /*
762 * ctx->outlen is decremented in the cryp_interrupt_handler
763 * function. We had to add cpu_relax() (barrier) to make sure
764 * that gcc didn't optimze away this variable.
765 */
766 while (ctx->outlen > 0)
767 cpu_relax();
768 } else if (cryp_mode == CRYP_MODE_POLLING ||
769 cryp_mode == CRYP_MODE_DMA) {
770 /*
771 * The reason for having DMA in this if case is that if we are
772 * running cryp_mode = 2, then we separate DMA routines for
773 * handling cipher/plaintext > blocksize, except when
774 * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
775 * the polling mode. Overhead of doing DMA setup eats up the
776 * benefits using it.
777 */
778 cryp_polling_mode(ctx, device_data);
779 } else {
780 dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
781 __func__);
782 ret = -EPERM;
783 goto out;
784 }
785
786 cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
787 ctx->updated = 1;
788
789out:
790 ctx->indata = indata;
791 ctx->outdata = outdata;
792 ctx->datalen = datalen;
793 ctx->outlen = outlen;
794
795 return ret;
796}
797
798static int get_nents(struct scatterlist *sg, int nbytes)
799{
800 int nents = 0;
801
802 while (nbytes > 0) {
803 nbytes -= sg->length;
804 sg = scatterwalk_sg_next(sg);
805 nents++;
806 }
807
808 return nents;
809}
810
811static int ablk_dma_crypt(struct ablkcipher_request *areq)
812{
813 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
814 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
815 struct cryp_device_data *device_data;
816
817 int bytes_written = 0;
818 int bytes_read = 0;
819 int ret;
820
821 pr_debug(DEV_DBG_NAME " [%s]", __func__);
822
823 ctx->datalen = areq->nbytes;
824 ctx->outlen = areq->nbytes;
825
826 ret = cryp_get_device_data(ctx, &device_data);
827 if (ret)
828 return ret;
829
830 ret = cryp_setup_context(ctx, device_data);
831 if (ret)
832 goto out;
833
834 /* We have the device now, so store the nents in the dma struct. */
835 ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
836 ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
837
838 /* Enable DMA in- and output. */
839 cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
840
841 bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
842 bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
843
844 wait_for_completion(&ctx->device->dma.cryp_dma_complete);
845 cryp_dma_done(ctx);
846
847 cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
848 ctx->updated = 1;
849
850out:
851 spin_lock(&device_data->ctx_lock);
852 device_data->current_ctx = NULL;
853 ctx->device = NULL;
854 spin_unlock(&device_data->ctx_lock);
855
856 /*
857 * The down_interruptible part for this semaphore is called in
858 * cryp_get_device_data.
859 */
860 up(&driver_data.device_allocation);
861
862 if (unlikely(bytes_written != bytes_read))
863 return -EPERM;
864
865 return 0;
866}
867
868static int ablk_crypt(struct ablkcipher_request *areq)
869{
870 struct ablkcipher_walk walk;
871 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
872 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
873 struct cryp_device_data *device_data;
874 unsigned long src_paddr;
875 unsigned long dst_paddr;
876 int ret;
877 int nbytes;
878
879 pr_debug(DEV_DBG_NAME " [%s]", __func__);
880
881 ret = cryp_get_device_data(ctx, &device_data);
882 if (ret)
883 goto out;
884
885 ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
886 ret = ablkcipher_walk_phys(areq, &walk);
887
888 if (ret) {
889 pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
890 __func__);
891 goto out;
892 }
893
894 while ((nbytes = walk.nbytes) > 0) {
895 ctx->iv = walk.iv;
896 src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
897 ctx->indata = phys_to_virt(src_paddr);
898
899 dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
900 ctx->outdata = phys_to_virt(dst_paddr);
901
902 ctx->datalen = nbytes - (nbytes % ctx->blocksize);
903
904 ret = hw_crypt_noxts(ctx, device_data);
905 if (ret)
906 goto out;
907
908 nbytes -= ctx->datalen;
909 ret = ablkcipher_walk_done(areq, &walk, nbytes);
910 if (ret)
911 goto out;
912 }
913 ablkcipher_walk_complete(&walk);
914
915out:
916 /* Release the device */
917 spin_lock(&device_data->ctx_lock);
918 device_data->current_ctx = NULL;
919 ctx->device = NULL;
920 spin_unlock(&device_data->ctx_lock);
921
922 /*
923 * The down_interruptible part for this semaphore is called in
924 * cryp_get_device_data.
925 */
926 up(&driver_data.device_allocation);
927
928 return ret;
929}
930
931static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
932 const u8 *key, unsigned int keylen)
933{
934 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
935 u32 *flags = &cipher->base.crt_flags;
936
937 pr_debug(DEV_DBG_NAME " [%s]", __func__);
938
939 switch (keylen) {
940 case AES_KEYSIZE_128:
941 ctx->config.keysize = CRYP_KEY_SIZE_128;
942 break;
943
944 case AES_KEYSIZE_192:
945 ctx->config.keysize = CRYP_KEY_SIZE_192;
946 break;
947
948 case AES_KEYSIZE_256:
949 ctx->config.keysize = CRYP_KEY_SIZE_256;
950 break;
951
952 default:
953 pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
954 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
955 return -EINVAL;
956 }
957
958 memcpy(ctx->key, key, keylen);
959 ctx->keylen = keylen;
960
961 ctx->updated = 0;
962
963 return 0;
964}
965
966static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
967 const u8 *key, unsigned int keylen)
968{
969 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
970 u32 *flags = &cipher->base.crt_flags;
971 u32 tmp[DES_EXPKEY_WORDS];
972 int ret;
973
974 pr_debug(DEV_DBG_NAME " [%s]", __func__);
975 if (keylen != DES_KEY_SIZE) {
976 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
977 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
978 __func__);
979 return -EINVAL;
980 }
981
982 ret = des_ekey(tmp, key);
983 if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
984 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
985 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
986 __func__);
987 return -EINVAL;
988 }
989
990 memcpy(ctx->key, key, keylen);
991 ctx->keylen = keylen;
992
993 ctx->updated = 0;
994 return 0;
995}
996
997static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
998 const u8 *key, unsigned int keylen)
999{
1000 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1001 u32 *flags = &cipher->base.crt_flags;
1002 const u32 *K = (const u32 *)key;
1003 u32 tmp[DES3_EDE_EXPKEY_WORDS];
1004 int i, ret;
1005
1006 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1007 if (keylen != DES3_EDE_KEY_SIZE) {
1008 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
1009 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
1010 __func__);
1011 return -EINVAL;
1012 }
1013
1014 /* Checking key interdependency for weak key detection. */
1015 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1016 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
1017 (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
1018 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
1019 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
1020 __func__);
1021 return -EINVAL;
1022 }
1023 for (i = 0; i < 3; i++) {
1024 ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
1025 if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
1026 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
1027 pr_debug(DEV_DBG_NAME " [%s]: "
1028 "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
1029 return -EINVAL;
1030 }
1031 }
1032
1033 memcpy(ctx->key, key, keylen);
1034 ctx->keylen = keylen;
1035
1036 ctx->updated = 0;
1037 return 0;
1038}
1039
1040static int cryp_blk_encrypt(struct ablkcipher_request *areq)
1041{
1042 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1043 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1044
1045 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1046
1047 ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
1048
1049 /*
1050 * DMA does not work for DES due to a hw bug */
1051 if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1052 return ablk_dma_crypt(areq);
1053
1054 /* For everything except DMA, we run the non DMA version. */
1055 return ablk_crypt(areq);
1056}
1057
1058static int cryp_blk_decrypt(struct ablkcipher_request *areq)
1059{
1060 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1061 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1062
1063 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1064
1065 ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
1066
1067 /* DMA does not work for DES due to a hw bug */
1068 if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1069 return ablk_dma_crypt(areq);
1070
1071 /* For everything except DMA, we run the non DMA version. */
1072 return ablk_crypt(areq);
1073}
1074
1075struct cryp_algo_template {
1076 enum cryp_algo_mode algomode;
1077 struct crypto_alg crypto;
1078};
1079
1080static int cryp_cra_init(struct crypto_tfm *tfm)
1081{
1082 struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
1083 struct crypto_alg *alg = tfm->__crt_alg;
1084 struct cryp_algo_template *cryp_alg = container_of(alg,
1085 struct cryp_algo_template,
1086 crypto);
1087
1088 ctx->config.algomode = cryp_alg->algomode;
1089 ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
1090
1091 return 0;
1092}
1093
1094static struct cryp_algo_template cryp_algs[] = {
1095 {
1096 .algomode = CRYP_ALGO_AES_ECB,
1097 .crypto = {
1098 .cra_name = "aes",
1099 .cra_driver_name = "aes-ux500",
1100 .cra_priority = 300,
1101 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1102 CRYPTO_ALG_ASYNC,
1103 .cra_blocksize = AES_BLOCK_SIZE,
1104 .cra_ctxsize = sizeof(struct cryp_ctx),
1105 .cra_alignmask = 3,
1106 .cra_type = &crypto_ablkcipher_type,
1107 .cra_init = cryp_cra_init,
1108 .cra_module = THIS_MODULE,
1109 .cra_u = {
1110 .ablkcipher = {
1111 .min_keysize = AES_MIN_KEY_SIZE,
1112 .max_keysize = AES_MAX_KEY_SIZE,
1113 .setkey = aes_ablkcipher_setkey,
1114 .encrypt = cryp_blk_encrypt,
1115 .decrypt = cryp_blk_decrypt
1116 }
1117 }
1118 }
1119 },
1120 {
1121 .algomode = CRYP_ALGO_AES_ECB,
1122 .crypto = {
1123 .cra_name = "ecb(aes)",
1124 .cra_driver_name = "ecb-aes-ux500",
1125 .cra_priority = 300,
1126 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1127 CRYPTO_ALG_ASYNC,
1128 .cra_blocksize = AES_BLOCK_SIZE,
1129 .cra_ctxsize = sizeof(struct cryp_ctx),
1130 .cra_alignmask = 3,
1131 .cra_type = &crypto_ablkcipher_type,
1132 .cra_init = cryp_cra_init,
1133 .cra_module = THIS_MODULE,
1134 .cra_u = {
1135 .ablkcipher = {
1136 .min_keysize = AES_MIN_KEY_SIZE,
1137 .max_keysize = AES_MAX_KEY_SIZE,
1138 .setkey = aes_ablkcipher_setkey,
1139 .encrypt = cryp_blk_encrypt,
1140 .decrypt = cryp_blk_decrypt,
1141 }
1142 }
1143 }
1144 },
1145 {
1146 .algomode = CRYP_ALGO_AES_CBC,
1147 .crypto = {
1148 .cra_name = "cbc(aes)",
1149 .cra_driver_name = "cbc-aes-ux500",
1150 .cra_priority = 300,
1151 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1152 CRYPTO_ALG_ASYNC,
1153 .cra_blocksize = AES_BLOCK_SIZE,
1154 .cra_ctxsize = sizeof(struct cryp_ctx),
1155 .cra_alignmask = 3,
1156 .cra_type = &crypto_ablkcipher_type,
1157 .cra_init = cryp_cra_init,
1158 .cra_module = THIS_MODULE,
1159 .cra_u = {
1160 .ablkcipher = {
1161 .min_keysize = AES_MIN_KEY_SIZE,
1162 .max_keysize = AES_MAX_KEY_SIZE,
1163 .setkey = aes_ablkcipher_setkey,
1164 .encrypt = cryp_blk_encrypt,
1165 .decrypt = cryp_blk_decrypt,
1166 .ivsize = AES_BLOCK_SIZE,
1167 }
1168 }
1169 }
1170 },
1171 {
1172 .algomode = CRYP_ALGO_AES_CTR,
1173 .crypto = {
1174 .cra_name = "ctr(aes)",
1175 .cra_driver_name = "ctr-aes-ux500",
1176 .cra_priority = 300,
1177 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1178 CRYPTO_ALG_ASYNC,
1179 .cra_blocksize = AES_BLOCK_SIZE,
1180 .cra_ctxsize = sizeof(struct cryp_ctx),
1181 .cra_alignmask = 3,
1182 .cra_type = &crypto_ablkcipher_type,
1183 .cra_init = cryp_cra_init,
1184 .cra_module = THIS_MODULE,
1185 .cra_u = {
1186 .ablkcipher = {
1187 .min_keysize = AES_MIN_KEY_SIZE,
1188 .max_keysize = AES_MAX_KEY_SIZE,
1189 .setkey = aes_ablkcipher_setkey,
1190 .encrypt = cryp_blk_encrypt,
1191 .decrypt = cryp_blk_decrypt,
1192 .ivsize = AES_BLOCK_SIZE,
1193 }
1194 }
1195 }
1196 },
1197 {
1198 .algomode = CRYP_ALGO_DES_ECB,
1199 .crypto = {
1200 .cra_name = "des",
1201 .cra_driver_name = "des-ux500",
1202 .cra_priority = 300,
1203 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1204 CRYPTO_ALG_ASYNC,
1205 .cra_blocksize = DES_BLOCK_SIZE,
1206 .cra_ctxsize = sizeof(struct cryp_ctx),
1207 .cra_alignmask = 3,
1208 .cra_type = &crypto_ablkcipher_type,
1209 .cra_init = cryp_cra_init,
1210 .cra_module = THIS_MODULE,
1211 .cra_u = {
1212 .ablkcipher = {
1213 .min_keysize = DES_KEY_SIZE,
1214 .max_keysize = DES_KEY_SIZE,
1215 .setkey = des_ablkcipher_setkey,
1216 .encrypt = cryp_blk_encrypt,
1217 .decrypt = cryp_blk_decrypt
1218 }
1219 }
1220 }
1221
1222 },
1223 {
1224 .algomode = CRYP_ALGO_TDES_ECB,
1225 .crypto = {
1226 .cra_name = "des3_ede",
1227 .cra_driver_name = "des3_ede-ux500",
1228 .cra_priority = 300,
1229 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1230 CRYPTO_ALG_ASYNC,
1231 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1232 .cra_ctxsize = sizeof(struct cryp_ctx),
1233 .cra_alignmask = 3,
1234 .cra_type = &crypto_ablkcipher_type,
1235 .cra_init = cryp_cra_init,
1236 .cra_module = THIS_MODULE,
1237 .cra_u = {
1238 .ablkcipher = {
1239 .min_keysize = DES3_EDE_KEY_SIZE,
1240 .max_keysize = DES3_EDE_KEY_SIZE,
1241 .setkey = des_ablkcipher_setkey,
1242 .encrypt = cryp_blk_encrypt,
1243 .decrypt = cryp_blk_decrypt
1244 }
1245 }
1246 }
1247 },
1248 {
1249 .algomode = CRYP_ALGO_DES_ECB,
1250 .crypto = {
1251 .cra_name = "ecb(des)",
1252 .cra_driver_name = "ecb-des-ux500",
1253 .cra_priority = 300,
1254 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1255 CRYPTO_ALG_ASYNC,
1256 .cra_blocksize = DES_BLOCK_SIZE,
1257 .cra_ctxsize = sizeof(struct cryp_ctx),
1258 .cra_alignmask = 3,
1259 .cra_type = &crypto_ablkcipher_type,
1260 .cra_init = cryp_cra_init,
1261 .cra_module = THIS_MODULE,
1262 .cra_u = {
1263 .ablkcipher = {
1264 .min_keysize = DES_KEY_SIZE,
1265 .max_keysize = DES_KEY_SIZE,
1266 .setkey = des_ablkcipher_setkey,
1267 .encrypt = cryp_blk_encrypt,
1268 .decrypt = cryp_blk_decrypt,
1269 }
1270 }
1271 }
1272 },
1273 {
1274 .algomode = CRYP_ALGO_TDES_ECB,
1275 .crypto = {
1276 .cra_name = "ecb(des3_ede)",
1277 .cra_driver_name = "ecb-des3_ede-ux500",
1278 .cra_priority = 300,
1279 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1280 CRYPTO_ALG_ASYNC,
1281 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1282 .cra_ctxsize = sizeof(struct cryp_ctx),
1283 .cra_alignmask = 3,
1284 .cra_type = &crypto_ablkcipher_type,
1285 .cra_init = cryp_cra_init,
1286 .cra_module = THIS_MODULE,
1287 .cra_u = {
1288 .ablkcipher = {
1289 .min_keysize = DES3_EDE_KEY_SIZE,
1290 .max_keysize = DES3_EDE_KEY_SIZE,
1291 .setkey = des3_ablkcipher_setkey,
1292 .encrypt = cryp_blk_encrypt,
1293 .decrypt = cryp_blk_decrypt,
1294 }
1295 }
1296 }
1297 },
1298 {
1299 .algomode = CRYP_ALGO_DES_CBC,
1300 .crypto = {
1301 .cra_name = "cbc(des)",
1302 .cra_driver_name = "cbc-des-ux500",
1303 .cra_priority = 300,
1304 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1305 CRYPTO_ALG_ASYNC,
1306 .cra_blocksize = DES_BLOCK_SIZE,
1307 .cra_ctxsize = sizeof(struct cryp_ctx),
1308 .cra_alignmask = 3,
1309 .cra_type = &crypto_ablkcipher_type,
1310 .cra_init = cryp_cra_init,
1311 .cra_module = THIS_MODULE,
1312 .cra_u = {
1313 .ablkcipher = {
1314 .min_keysize = DES_KEY_SIZE,
1315 .max_keysize = DES_KEY_SIZE,
1316 .setkey = des_ablkcipher_setkey,
1317 .encrypt = cryp_blk_encrypt,
1318 .decrypt = cryp_blk_decrypt,
1319 }
1320 }
1321 }
1322 },
1323 {
1324 .algomode = CRYP_ALGO_TDES_CBC,
1325 .crypto = {
1326 .cra_name = "cbc(des3_ede)",
1327 .cra_driver_name = "cbc-des3_ede-ux500",
1328 .cra_priority = 300,
1329 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1330 CRYPTO_ALG_ASYNC,
1331 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1332 .cra_ctxsize = sizeof(struct cryp_ctx),
1333 .cra_alignmask = 3,
1334 .cra_type = &crypto_ablkcipher_type,
1335 .cra_init = cryp_cra_init,
1336 .cra_module = THIS_MODULE,
1337 .cra_u = {
1338 .ablkcipher = {
1339 .min_keysize = DES3_EDE_KEY_SIZE,
1340 .max_keysize = DES3_EDE_KEY_SIZE,
1341 .setkey = des3_ablkcipher_setkey,
1342 .encrypt = cryp_blk_encrypt,
1343 .decrypt = cryp_blk_decrypt,
1344 .ivsize = DES3_EDE_BLOCK_SIZE,
1345 }
1346 }
1347 }
1348 }
1349};
1350
1351/**
1352 * cryp_algs_register_all -
1353 */
1354static int cryp_algs_register_all(void)
1355{
1356 int ret;
1357 int i;
1358 int count;
1359
1360 pr_debug("[%s]", __func__);
1361
1362 for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
1363 ret = crypto_register_alg(&cryp_algs[i].crypto);
1364 if (ret) {
1365 count = i;
1366 pr_err("[%s] alg registration failed",
1367 cryp_algs[i].crypto.cra_driver_name);
1368 goto unreg;
1369 }
1370 }
1371 return 0;
1372unreg:
1373 for (i = 0; i < count; i++)
1374 crypto_unregister_alg(&cryp_algs[i].crypto);
1375 return ret;
1376}
1377
1378/**
1379 * cryp_algs_unregister_all -
1380 */
1381static void cryp_algs_unregister_all(void)
1382{
1383 int i;
1384
1385 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1386
1387 for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
1388 crypto_unregister_alg(&cryp_algs[i].crypto);
1389}
1390
1391static int ux500_cryp_probe(struct platform_device *pdev)
1392{
1393 int ret;
1394 int cryp_error = 0;
1395 struct resource *res = NULL;
1396 struct resource *res_irq = NULL;
1397 struct cryp_device_data *device_data;
1398 struct cryp_protection_config prot = {
1399 .privilege_access = CRYP_STATE_ENABLE
1400 };
1401 struct device *dev = &pdev->dev;
1402
1403 dev_dbg(dev, "[%s]", __func__);
1404 device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC);
1405 if (!device_data) {
1406 dev_err(dev, "[%s]: kzalloc() failed!", __func__);
1407 ret = -ENOMEM;
1408 goto out;
1409 }
1410
1411 device_data->dev = dev;
1412 device_data->current_ctx = NULL;
1413
1414 /* Grab the DMA configuration from platform data. */
1415 mem_to_engine = &((struct cryp_platform_data *)
1416 dev->platform_data)->mem_to_engine;
1417 engine_to_mem = &((struct cryp_platform_data *)
1418 dev->platform_data)->engine_to_mem;
1419
1420 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1421 if (!res) {
1422 dev_err(dev, "[%s]: platform_get_resource() failed",
1423 __func__);
1424 ret = -ENODEV;
1425 goto out_kfree;
1426 }
1427
1428 res = request_mem_region(res->start, resource_size(res), pdev->name);
1429 if (res == NULL) {
1430 dev_err(dev, "[%s]: request_mem_region() failed",
1431 __func__);
1432 ret = -EBUSY;
1433 goto out_kfree;
1434 }
1435
1436 device_data->base = ioremap(res->start, resource_size(res));
1437 if (!device_data->base) {
1438 dev_err(dev, "[%s]: ioremap failed!", __func__);
1439 ret = -ENOMEM;
1440 goto out_free_mem;
1441 }
1442
1443 spin_lock_init(&device_data->ctx_lock);
1444 spin_lock_init(&device_data->power_state_spinlock);
1445
1446 /* Enable power for CRYP hardware block */
1447 device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape");
1448 if (IS_ERR(device_data->pwr_regulator)) {
1449 dev_err(dev, "[%s]: could not get cryp regulator", __func__);
1450 ret = PTR_ERR(device_data->pwr_regulator);
1451 device_data->pwr_regulator = NULL;
1452 goto out_unmap;
1453 }
1454
1455 /* Enable the clk for CRYP hardware block */
1456 device_data->clk = clk_get(&pdev->dev, NULL);
1457 if (IS_ERR(device_data->clk)) {
1458 dev_err(dev, "[%s]: clk_get() failed!", __func__);
1459 ret = PTR_ERR(device_data->clk);
1460 goto out_regulator;
1461 }
1462
1463 /* Enable device power (and clock) */
1464 ret = cryp_enable_power(device_data->dev, device_data, false);
1465 if (ret) {
1466 dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1467 goto out_clk;
1468 }
1469
1470 cryp_error = cryp_check(device_data);
1471 if (cryp_error != 0) {
1472 dev_err(dev, "[%s]: cryp_init() failed!", __func__);
1473 ret = -EINVAL;
1474 goto out_power;
1475 }
1476
1477 cryp_error = cryp_configure_protection(device_data, &prot);
1478 if (cryp_error != 0) {
1479 dev_err(dev, "[%s]: cryp_configure_protection() failed!",
1480 __func__);
1481 ret = -EINVAL;
1482 goto out_power;
1483 }
1484
1485 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1486 if (!res_irq) {
1487 dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
1488 __func__);
1489 goto out_power;
1490 }
1491
1492 ret = request_irq(res_irq->start,
1493 cryp_interrupt_handler,
1494 0,
1495 "cryp1",
1496 device_data);
1497 if (ret) {
1498 dev_err(dev, "[%s]: Unable to request IRQ", __func__);
1499 goto out_power;
1500 }
1501
1502 if (cryp_mode == CRYP_MODE_DMA)
1503 cryp_dma_setup_channel(device_data, dev);
1504
1505 platform_set_drvdata(pdev, device_data);
1506
1507 /* Put the new device into the device list... */
1508 klist_add_tail(&device_data->list_node, &driver_data.device_list);
1509
1510 /* ... and signal that a new device is available. */
1511 up(&driver_data.device_allocation);
1512
1513 atomic_set(&session_id, 1);
1514
1515 ret = cryp_algs_register_all();
1516 if (ret) {
1517 dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
1518 __func__);
1519 goto out_power;
1520 }
1521
1522 return 0;
1523
1524out_power:
1525 cryp_disable_power(device_data->dev, device_data, false);
1526
1527out_clk:
1528 clk_put(device_data->clk);
1529
1530out_regulator:
1531 regulator_put(device_data->pwr_regulator);
1532
1533out_unmap:
1534 iounmap(device_data->base);
1535
1536out_free_mem:
1537 release_mem_region(res->start, resource_size(res));
1538
1539out_kfree:
1540 kfree(device_data);
1541out:
1542 return ret;
1543}
1544
1545static int ux500_cryp_remove(struct platform_device *pdev)
1546{
1547 struct resource *res = NULL;
1548 struct resource *res_irq = NULL;
1549 struct cryp_device_data *device_data;
1550
1551 dev_dbg(&pdev->dev, "[%s]", __func__);
1552 device_data = platform_get_drvdata(pdev);
1553 if (!device_data) {
1554 dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1555 __func__);
1556 return -ENOMEM;
1557 }
1558
1559 /* Try to decrease the number of available devices. */
1560 if (down_trylock(&driver_data.device_allocation))
1561 return -EBUSY;
1562
1563 /* Check that the device is free */
1564 spin_lock(&device_data->ctx_lock);
1565 /* current_ctx allocates a device, NULL = unallocated */
1566 if (device_data->current_ctx) {
1567 /* The device is busy */
1568 spin_unlock(&device_data->ctx_lock);
1569 /* Return the device to the pool. */
1570 up(&driver_data.device_allocation);
1571 return -EBUSY;
1572 }
1573
1574 spin_unlock(&device_data->ctx_lock);
1575
1576 /* Remove the device from the list */
1577 if (klist_node_attached(&device_data->list_node))
1578 klist_remove(&device_data->list_node);
1579
1580 /* If this was the last device, remove the services */
1581 if (list_empty(&driver_data.device_list.k_list))
1582 cryp_algs_unregister_all();
1583
1584 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1585 if (!res_irq)
1586 dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
1587 __func__);
1588 else {
1589 disable_irq(res_irq->start);
1590 free_irq(res_irq->start, device_data);
1591 }
1592
1593 if (cryp_disable_power(&pdev->dev, device_data, false))
1594 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1595 __func__);
1596
1597 clk_put(device_data->clk);
1598 regulator_put(device_data->pwr_regulator);
1599
1600 iounmap(device_data->base);
1601
1602 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1603 if (res)
1604 release_mem_region(res->start, res->end - res->start + 1);
1605
1606 kfree(device_data);
1607
1608 return 0;
1609}
1610
1611static void ux500_cryp_shutdown(struct platform_device *pdev)
1612{
1613 struct resource *res_irq = NULL;
1614 struct cryp_device_data *device_data;
1615
1616 dev_dbg(&pdev->dev, "[%s]", __func__);
1617
1618 device_data = platform_get_drvdata(pdev);
1619 if (!device_data) {
1620 dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1621 __func__);
1622 return;
1623 }
1624
1625 /* Check that the device is free */
1626 spin_lock(&device_data->ctx_lock);
1627 /* current_ctx allocates a device, NULL = unallocated */
1628 if (!device_data->current_ctx) {
1629 if (down_trylock(&driver_data.device_allocation))
1630 dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
1631 "Shutting down anyway...", __func__);
1632 /**
1633 * (Allocate the device)
1634 * Need to set this to non-null (dummy) value,
1635 * to avoid usage if context switching.
1636 */
1637 device_data->current_ctx++;
1638 }
1639 spin_unlock(&device_data->ctx_lock);
1640
1641 /* Remove the device from the list */
1642 if (klist_node_attached(&device_data->list_node))
1643 klist_remove(&device_data->list_node);
1644
1645 /* If this was the last device, remove the services */
1646 if (list_empty(&driver_data.device_list.k_list))
1647 cryp_algs_unregister_all();
1648
1649 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1650 if (!res_irq)
1651 dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
1652 __func__);
1653 else {
1654 disable_irq(res_irq->start);
1655 free_irq(res_irq->start, device_data);
1656 }
1657
1658 if (cryp_disable_power(&pdev->dev, device_data, false))
1659 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1660 __func__);
1661
1662}
1663
1664static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
1665{
1666 int ret;
1667 struct cryp_device_data *device_data;
1668 struct resource *res_irq;
1669 struct cryp_ctx *temp_ctx = NULL;
1670
1671 dev_dbg(&pdev->dev, "[%s]", __func__);
1672
1673 /* Handle state? */
1674 device_data = platform_get_drvdata(pdev);
1675 if (!device_data) {
1676 dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1677 __func__);
1678 return -ENOMEM;
1679 }
1680
1681 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1682 if (!res_irq)
1683 dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
1684 __func__);
1685 else
1686 disable_irq(res_irq->start);
1687
1688 spin_lock(&device_data->ctx_lock);
1689 if (!device_data->current_ctx)
1690 device_data->current_ctx++;
1691 spin_unlock(&device_data->ctx_lock);
1692
1693 if (device_data->current_ctx == ++temp_ctx) {
1694 if (down_interruptible(&driver_data.device_allocation))
1695 dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
1696 "failed", __func__);
1697 ret = cryp_disable_power(&pdev->dev, device_data, false);
1698
1699 } else
1700 ret = cryp_disable_power(&pdev->dev, device_data, true);
1701
1702 if (ret)
1703 dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__);
1704
1705 return ret;
1706}
1707
1708static int ux500_cryp_resume(struct platform_device *pdev)
1709{
1710 int ret = 0;
1711 struct cryp_device_data *device_data;
1712 struct resource *res_irq;
1713 struct cryp_ctx *temp_ctx = NULL;
1714
1715 dev_dbg(&pdev->dev, "[%s]", __func__);
1716
1717 device_data = platform_get_drvdata(pdev);
1718 if (!device_data) {
1719 dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1720 __func__);
1721 return -ENOMEM;
1722 }
1723
1724 spin_lock(&device_data->ctx_lock);
1725 if (device_data->current_ctx == ++temp_ctx)
1726 device_data->current_ctx = NULL;
1727 spin_unlock(&device_data->ctx_lock);
1728
1729
1730 if (!device_data->current_ctx)
1731 up(&driver_data.device_allocation);
1732 else
1733 ret = cryp_enable_power(&pdev->dev, device_data, true);
1734
1735 if (ret)
1736 dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!",
1737 __func__);
1738 else {
1739 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1740 if (res_irq)
1741 enable_irq(res_irq->start);
1742 }
1743
1744 return ret;
1745}
1746
1747static struct platform_driver cryp_driver = {
1748 .probe = ux500_cryp_probe,
1749 .remove = ux500_cryp_remove,
1750 .shutdown = ux500_cryp_shutdown,
1751 .suspend = ux500_cryp_suspend,
1752 .resume = ux500_cryp_resume,
1753 .driver = {
1754 .owner = THIS_MODULE,
1755 .name = "cryp1"
1756 }
1757};
1758
1759static int __init ux500_cryp_mod_init(void)
1760{
1761 pr_debug("[%s] is called!", __func__);
1762 klist_init(&driver_data.device_list, NULL, NULL);
1763 /* Initialize the semaphore to 0 devices (locked state) */
1764 sema_init(&driver_data.device_allocation, 0);
1765 return platform_driver_register(&cryp_driver);
1766}
1767
1768static void __exit ux500_cryp_mod_fini(void)
1769{
1770 pr_debug("[%s] is called!", __func__);
1771 platform_driver_unregister(&cryp_driver);
1772 return;
1773}
1774
1775module_init(ux500_cryp_mod_init);
1776module_exit(ux500_cryp_mod_fini);
1777
1778module_param(cryp_mode, int, 0);
1779
1780MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1781MODULE_ALIAS("aes-all");
1782MODULE_ALIAS("des-all");
1783
1784MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
new file mode 100644
index 000000000000..08d291cdbe6d
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -0,0 +1,45 @@
1/**
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4 * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
5 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
6 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
7 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
8 * License terms: GNU General Public License (GPL) version 2.
9 */
10
11#include <linux/kernel.h>
12#include <linux/bitmap.h>
13#include <linux/device.h>
14
15#include "cryp.h"
16#include "cryp_p.h"
17#include "cryp_irq.h"
18#include "cryp_irqp.h"
19
20void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
21{
22 u32 i;
23
24 dev_dbg(device_data->dev, "[%s]", __func__);
25
26 i = readl_relaxed(&device_data->base->imsc);
27 i = i | irq_src;
28 writel_relaxed(i, &device_data->base->imsc);
29}
30
31void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
32{
33 u32 i;
34
35 dev_dbg(device_data->dev, "[%s]", __func__);
36
37 i = readl_relaxed(&device_data->base->imsc);
38 i = i & ~irq_src;
39 writel_relaxed(i, &device_data->base->imsc);
40}
41
42bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src)
43{
44 return (readl_relaxed(&device_data->base->mis) & irq_src) > 0;
45}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
new file mode 100644
index 000000000000..5a7837f1b8f9
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -0,0 +1,31 @@
1/**
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4 * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
5 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
6 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
7 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
8 * License terms: GNU General Public License (GPL) version 2
9 */
10
11#ifndef _CRYP_IRQ_H_
12#define _CRYP_IRQ_H_
13
14#include "cryp.h"
15
16enum cryp_irq_src_id {
17 CRYP_IRQ_SRC_INPUT_FIFO = 0x1,
18 CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2,
19 CRYP_IRQ_SRC_ALL = 0x3
20};
21
22/**
23 * M0 Funtions
24 */
25void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
26
27void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
28
29bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src);
30
31#endif /* _CRYP_IRQ_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
new file mode 100644
index 000000000000..8b339cc34bf8
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -0,0 +1,125 @@
1/**
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4 * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
5 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
6 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
7 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
8 * License terms: GNU General Public License (GPL) version 2
9 */
10
11#ifndef __CRYP_IRQP_H_
12#define __CRYP_IRQP_H_
13
14#include "cryp_irq.h"
15
16/**
17 *
18 * CRYP Registers - Offset mapping
19 * +-----------------+
20 * 00h | CRYP_CR | Configuration register
21 * +-----------------+
22 * 04h | CRYP_SR | Status register
23 * +-----------------+
24 * 08h | CRYP_DIN | Data In register
25 * +-----------------+
26 * 0ch | CRYP_DOUT | Data out register
27 * +-----------------+
28 * 10h | CRYP_DMACR | DMA control register
29 * +-----------------+
30 * 14h | CRYP_IMSC | IMSC
31 * +-----------------+
32 * 18h | CRYP_RIS | Raw interrupt status
33 * +-----------------+
34 * 1ch | CRYP_MIS | Masked interrupt status.
35 * +-----------------+
36 * Key registers
37 * IVR registers
38 * Peripheral
39 * Cell IDs
40 *
41 * Refer data structure for other register map
42 */
43
44/**
45 * struct cryp_register
46 * @cr - Configuration register
47 * @status - Status register
48 * @din - Data input register
49 * @din_size - Data input size register
50 * @dout - Data output register
51 * @dout_size - Data output size register
52 * @dmacr - Dma control register
53 * @imsc - Interrupt mask set/clear register
54 * @ris - Raw interrupt status
55 * @mis - Masked interrupt statu register
56 * @key_1_l - Key register 1 L
57 * @key_1_r - Key register 1 R
58 * @key_2_l - Key register 2 L
59 * @key_2_r - Key register 2 R
60 * @key_3_l - Key register 3 L
61 * @key_3_r - Key register 3 R
62 * @key_4_l - Key register 4 L
63 * @key_4_r - Key register 4 R
64 * @init_vect_0_l - init vector 0 L
65 * @init_vect_0_r - init vector 0 R
66 * @init_vect_1_l - init vector 1 L
67 * @init_vect_1_r - init vector 1 R
68 * @cryp_unused1 - unused registers
69 * @itcr - Integration test control register
70 * @itip - Integration test input register
71 * @itop - Integration test output register
72 * @cryp_unused2 - unused registers
73 * @periphId0 - FE0 CRYP Peripheral Identication Register
74 * @periphId1 - FE4
75 * @periphId2 - FE8
76 * @periphId3 - FEC
77 * @pcellId0 - FF0 CRYP PCell Identication Register
78 * @pcellId1 - FF4
79 * @pcellId2 - FF8
80 * @pcellId3 - FFC
81 */
82struct cryp_register {
83 u32 cr; /* Configuration register */
84 u32 sr; /* Status register */
85 u32 din; /* Data input register */
86 u32 din_size; /* Data input size register */
87 u32 dout; /* Data output register */
88 u32 dout_size; /* Data output size register */
89 u32 dmacr; /* Dma control register */
90 u32 imsc; /* Interrupt mask set/clear register */
91 u32 ris; /* Raw interrupt status */
92 u32 mis; /* Masked interrupt statu register */
93
94 u32 key_1_l; /*Key register 1 L */
95 u32 key_1_r; /*Key register 1 R */
96 u32 key_2_l; /*Key register 2 L */
97 u32 key_2_r; /*Key register 2 R */
98 u32 key_3_l; /*Key register 3 L */
99 u32 key_3_r; /*Key register 3 R */
100 u32 key_4_l; /*Key register 4 L */
101 u32 key_4_r; /*Key register 4 R */
102
103 u32 init_vect_0_l; /*init vector 0 L */
104 u32 init_vect_0_r; /*init vector 0 R */
105 u32 init_vect_1_l; /*init vector 1 L */
106 u32 init_vect_1_r; /*init vector 1 R */
107
108 u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */
109 u32 itcr; /*Integration test control register */
110 u32 itip; /*Integration test input register */
111 u32 itop; /*Integration test output register */
112 u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */
113
114 u32 periphId0; /* FE0 CRYP Peripheral Identication Register */
115 u32 periphId1; /* FE4 */
116 u32 periphId2; /* FE8 */
117 u32 periphId3; /* FEC */
118
119 u32 pcellId0; /* FF0 CRYP PCell Identication Register */
120 u32 pcellId1; /* FF4 */
121 u32 pcellId2; /* FF8 */
122 u32 pcellId3; /* FFC */
123};
124
125#endif
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
new file mode 100644
index 000000000000..6dcffe15c2bc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -0,0 +1,123 @@
1/**
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4 * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
5 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
6 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
7 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
8 * License terms: GNU General Public License (GPL) version 2
9 */
10
11#ifndef _CRYP_P_H_
12#define _CRYP_P_H_
13
14#include <linux/io.h>
15#include <linux/bitops.h>
16
17#include "cryp.h"
18#include "cryp_irqp.h"
19
20/**
21 * Generic Macros
22 */
23#define CRYP_SET_BITS(reg_name, mask) \
24 writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
25
26#define CRYP_WRITE_BIT(reg_name, val, mask) \
27 writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\
28 ((val) & (mask))), reg_name)
29
30#define CRYP_TEST_BITS(reg_name, val) \
31 (readl_relaxed(reg_name) & (val))
32
33#define CRYP_PUT_BITS(reg, val, shift, mask) \
34 writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
35 (((u32)val << shift) & (mask))), reg)
36
37/**
38 * CRYP specific Macros
39 */
40#define CRYP_PERIPHERAL_ID0 0xE3
41#define CRYP_PERIPHERAL_ID1 0x05
42
43#define CRYP_PERIPHERAL_ID2_DB8500 0x28
44#define CRYP_PERIPHERAL_ID3 0x00
45
46#define CRYP_PCELL_ID0 0x0D
47#define CRYP_PCELL_ID1 0xF0
48#define CRYP_PCELL_ID2 0x05
49#define CRYP_PCELL_ID3 0xB1
50
51/**
52 * CRYP register default values
53 */
54#define MAX_DEVICE_SUPPORT 2
55
56/* Priv set, keyrden set and datatype 8bits swapped set as default. */
57#define CRYP_CR_DEFAULT 0x0482
58#define CRYP_DMACR_DEFAULT 0x0
59#define CRYP_IMSC_DEFAULT 0x0
60#define CRYP_DIN_DEFAULT 0x0
61#define CRYP_DOUT_DEFAULT 0x0
62#define CRYP_KEY_DEFAULT 0x0
63#define CRYP_INIT_VECT_DEFAULT 0x0
64
65/**
66 * CRYP Control register specific mask
67 */
68#define CRYP_CR_SECURE_MASK BIT(0)
69#define CRYP_CR_PRLG_MASK BIT(1)
70#define CRYP_CR_ALGODIR_MASK BIT(2)
71#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3))
72#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6))
73#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8))
74#define CRYP_CR_KEYRDEN_MASK BIT(10)
75#define CRYP_CR_KSE_MASK BIT(11)
76#define CRYP_CR_START_MASK BIT(12)
77#define CRYP_CR_INIT_MASK BIT(13)
78#define CRYP_CR_FFLUSH_MASK BIT(14)
79#define CRYP_CR_CRYPEN_MASK BIT(15)
80#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\
81 CRYP_CR_PRLG_MASK |\
82 CRYP_CR_ALGODIR_MASK |\
83 CRYP_CR_ALGOMODE_MASK |\
84 CRYP_CR_DATATYPE_MASK |\
85 CRYP_CR_KEYSIZE_MASK |\
86 CRYP_CR_KEYRDEN_MASK |\
87 CRYP_CR_DATATYPE_MASK)
88
89
90#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1))
91#define CRYP_SR_IFEM_MASK BIT(0)
92#define CRYP_SR_BUSY_MASK BIT(4)
93
94/**
95 * Bit position used while setting bits in register
96 */
97#define CRYP_CR_PRLG_POS 1
98#define CRYP_CR_ALGODIR_POS 2
99#define CRYP_CR_ALGOMODE_POS 3
100#define CRYP_CR_DATATYPE_POS 6
101#define CRYP_CR_KEYSIZE_POS 8
102#define CRYP_CR_KEYRDEN_POS 10
103#define CRYP_CR_KSE_POS 11
104#define CRYP_CR_START_POS 12
105#define CRYP_CR_INIT_POS 13
106#define CRYP_CR_CRYPEN_POS 15
107
108#define CRYP_SR_BUSY_POS 4
109
110/**
111 * CRYP PCRs------PC_NAND control register
112 * BIT_MASK
113 */
114#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0))
115#define CRYP_DMA_REQ_MASK_POS 0
116
117
118struct cryp_system_context {
119 /* CRYP Register structure */
120 struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT];
121};
122
123#endif
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
new file mode 100644
index 000000000000..b2f90d9bac72
--- /dev/null
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -0,0 +1,11 @@
1#
2# Copyright (C) ST-Ericsson SA 2010
3# Author: Shujuan Chen (shujuan.chen@stericsson.com)
4# License terms: GNU General Public License (GPL) version 2
5#
6ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
7CFLAGS_hash_core.o := -DDEBUG -O0
8endif
9
10obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
11ux500_hash-objs := hash_core.o
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
new file mode 100644
index 000000000000..cd9351cb24df
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -0,0 +1,395 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen (shujuan.chen@stericsson.com)
4 * Author: Joakim Bech (joakim.xx.bech@stericsson.com)
5 * Author: Berne Hebark (berne.hebark@stericsson.com))
6 * License terms: GNU General Public License (GPL) version 2
7 */
8#ifndef _HASH_ALG_H
9#define _HASH_ALG_H
10
11#include <linux/bitops.h>
12
13#define HASH_BLOCK_SIZE 64
14#define HASH_DMA_ALIGN_SIZE 4
15#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024
16#define HASH_BYTES_PER_WORD 4
17
18/* Maximum value of the length's high word */
19#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL
20
21/* Power on Reset values HASH registers */
22#define HASH_RESET_CR_VALUE 0x0
23#define HASH_RESET_STR_VALUE 0x0
24
25/* Number of context swap registers */
26#define HASH_CSR_COUNT 52
27
28#define HASH_RESET_CSRX_REG_VALUE 0x0
29#define HASH_RESET_CSFULL_REG_VALUE 0x0
30#define HASH_RESET_CSDATAIN_REG_VALUE 0x0
31
32#define HASH_RESET_INDEX_VAL 0x0
33#define HASH_RESET_BIT_INDEX_VAL 0x0
34#define HASH_RESET_BUFFER_VAL 0x0
35#define HASH_RESET_LEN_HIGH_VAL 0x0
36#define HASH_RESET_LEN_LOW_VAL 0x0
37
38/* Control register bitfields */
39#define HASH_CR_RESUME_MASK 0x11FCF
40
41#define HASH_CR_SWITCHON_POS 31
42#define HASH_CR_SWITCHON_MASK BIT(31)
43
44#define HASH_CR_EMPTYMSG_POS 20
45#define HASH_CR_EMPTYMSG_MASK BIT(20)
46
47#define HASH_CR_DINF_POS 12
48#define HASH_CR_DINF_MASK BIT(12)
49
50#define HASH_CR_NBW_POS 8
51#define HASH_CR_NBW_MASK 0x00000F00UL
52
53#define HASH_CR_LKEY_POS 16
54#define HASH_CR_LKEY_MASK BIT(16)
55
56#define HASH_CR_ALGO_POS 7
57#define HASH_CR_ALGO_MASK BIT(7)
58
59#define HASH_CR_MODE_POS 6
60#define HASH_CR_MODE_MASK BIT(6)
61
62#define HASH_CR_DATAFORM_POS 4
63#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5))
64
65#define HASH_CR_DMAE_POS 3
66#define HASH_CR_DMAE_MASK BIT(3)
67
68#define HASH_CR_INIT_POS 2
69#define HASH_CR_INIT_MASK BIT(2)
70
71#define HASH_CR_PRIVN_POS 1
72#define HASH_CR_PRIVN_MASK BIT(1)
73
74#define HASH_CR_SECN_POS 0
75#define HASH_CR_SECN_MASK BIT(0)
76
77/* Start register bitfields */
78#define HASH_STR_DCAL_POS 8
79#define HASH_STR_DCAL_MASK BIT(8)
80#define HASH_STR_DEFAULT 0x0
81
82#define HASH_STR_NBLW_POS 0
83#define HASH_STR_NBLW_MASK 0x0000001FUL
84
85#define HASH_NBLW_MAX_VAL 0x1F
86
87/* PrimeCell IDs */
88#define HASH_P_ID0 0xE0
89#define HASH_P_ID1 0x05
90#define HASH_P_ID2 0x38
91#define HASH_P_ID3 0x00
92#define HASH_CELL_ID0 0x0D
93#define HASH_CELL_ID1 0xF0
94#define HASH_CELL_ID2 0x05
95#define HASH_CELL_ID3 0xB1
96
97#define HASH_SET_BITS(reg_name, mask) \
98 writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
99
100#define HASH_CLEAR_BITS(reg_name, mask) \
101 writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name)
102
103#define HASH_PUT_BITS(reg, val, shift, mask) \
104 writel_relaxed(((readl(reg) & ~(mask)) | \
105 (((u32)val << shift) & (mask))), reg)
106
107#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len))
108
109#define HASH_INITIALIZE \
110 HASH_PUT_BITS( \
111 &device_data->base->cr, \
112 0x01, HASH_CR_INIT_POS, \
113 HASH_CR_INIT_MASK)
114
115#define HASH_SET_DATA_FORMAT(data_format) \
116 HASH_PUT_BITS( \
117 &device_data->base->cr, \
118 (u32) (data_format), HASH_CR_DATAFORM_POS, \
119 HASH_CR_DATAFORM_MASK)
120#define HASH_SET_NBLW(val) \
121 HASH_PUT_BITS( \
122 &device_data->base->str, \
123 (u32) (val), HASH_STR_NBLW_POS, \
124 HASH_STR_NBLW_MASK)
125#define HASH_SET_DCAL \
126 HASH_PUT_BITS( \
127 &device_data->base->str, \
128 0x01, HASH_STR_DCAL_POS, \
129 HASH_STR_DCAL_MASK)
130
131/* Hardware access method */
132enum hash_mode {
133 HASH_MODE_CPU,
134 HASH_MODE_DMA
135};
136
137/**
138 * struct uint64 - Structure to handle 64 bits integers.
139 * @high_word: Most significant bits.
140 * @low_word: Least significant bits.
141 *
142 * Used to handle 64 bits integers.
143 */
144struct uint64 {
145 u32 high_word;
146 u32 low_word;
147};
148
149/**
150 * struct hash_register - Contains all registers in ux500 hash hardware.
151 * @cr: HASH control register (0x000).
152 * @din: HASH data input register (0x004).
153 * @str: HASH start register (0x008).
154 * @hx: HASH digest register 0..7 (0x00c-0x01C).
155 * @padding0: Reserved (0x02C).
156 * @itcr: Integration test control register (0x080).
157 * @itip: Integration test input register (0x084).
158 * @itop: Integration test output register (0x088).
159 * @padding1: Reserved (0x08C).
160 * @csfull: HASH context full register (0x0F8).
161 * @csdatain: HASH context swap data input register (0x0FC).
162 * @csrx: HASH context swap register 0..51 (0x100-0x1CC).
163 * @padding2: Reserved (0x1D0).
164 * @periphid0: HASH peripheral identification register 0 (0xFE0).
165 * @periphid1: HASH peripheral identification register 1 (0xFE4).
166 * @periphid2: HASH peripheral identification register 2 (0xFE8).
167 * @periphid3: HASH peripheral identification register 3 (0xFEC).
168 * @cellid0: HASH PCell identification register 0 (0xFF0).
169 * @cellid1: HASH PCell identification register 1 (0xFF4).
170 * @cellid2: HASH PCell identification register 2 (0xFF8).
171 * @cellid3: HASH PCell identification register 3 (0xFFC).
172 *
173 * The device communicates to the HASH via 32-bit-wide control registers
174 * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure
175 * with the registers used.
176 */
177struct hash_register {
178 u32 cr;
179 u32 din;
180 u32 str;
181 u32 hx[8];
182
183 u32 padding0[(0x080 - 0x02C) / sizeof(u32)];
184
185 u32 itcr;
186 u32 itip;
187 u32 itop;
188
189 u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)];
190
191 u32 csfull;
192 u32 csdatain;
193 u32 csrx[HASH_CSR_COUNT];
194
195 u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)];
196
197 u32 periphid0;
198 u32 periphid1;
199 u32 periphid2;
200 u32 periphid3;
201
202 u32 cellid0;
203 u32 cellid1;
204 u32 cellid2;
205 u32 cellid3;
206};
207
208/**
209 * struct hash_state - Hash context state.
210 * @temp_cr: Temporary HASH Control Register.
211 * @str_reg: HASH Start Register.
212 * @din_reg: HASH Data Input Register.
213 * @csr[52]: HASH Context Swap Registers 0-39.
214 * @csfull: HASH Context Swap Registers 40 ie Status flags.
215 * @csdatain: HASH Context Swap Registers 41 ie Input data.
216 * @buffer: Working buffer for messages going to the hardware.
217 * @length: Length of the part of message hashed so far (floor(N/64) * 64).
218 * @index: Valid number of bytes in buffer (N % 64).
219 * @bit_index: Valid number of bits in buffer (N % 8).
220 *
221 * This structure is used between context switches, i.e. when ongoing jobs are
222 * interupted with new jobs. When this happens we need to store intermediate
223 * results in software.
224 *
225 * WARNING: "index" is the member of the structure, to be sure that "buffer"
226 * is aligned on a 4-bytes boundary. This is highly implementation dependent
227 * and MUST be checked whenever this code is ported on new platforms.
228 */
229struct hash_state {
230 u32 temp_cr;
231 u32 str_reg;
232 u32 din_reg;
233 u32 csr[52];
234 u32 csfull;
235 u32 csdatain;
236 u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)];
237 struct uint64 length;
238 u8 index;
239 u8 bit_index;
240};
241
242/**
243 * enum hash_device_id - HASH device ID.
244 * @HASH_DEVICE_ID_0: Hash hardware with ID 0
245 * @HASH_DEVICE_ID_1: Hash hardware with ID 1
246 */
247enum hash_device_id {
248 HASH_DEVICE_ID_0 = 0,
249 HASH_DEVICE_ID_1 = 1
250};
251
252/**
253 * enum hash_data_format - HASH data format.
254 * @HASH_DATA_32_BITS: 32 bits data format
255 * @HASH_DATA_16_BITS: 16 bits data format
256 * @HASH_DATA_8_BITS: 8 bits data format.
257 * @HASH_DATA_1_BITS: 1 bit data format.
258 */
259enum hash_data_format {
260 HASH_DATA_32_BITS = 0x0,
261 HASH_DATA_16_BITS = 0x1,
262 HASH_DATA_8_BITS = 0x2,
263 HASH_DATA_1_BIT = 0x3
264};
265
266/**
267 * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm.
268 * @HASH_ALGO_SHA1: Indicates that SHA1 is used.
269 * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used.
270 */
271enum hash_algo {
272 HASH_ALGO_SHA1 = 0x0,
273 HASH_ALGO_SHA256 = 0x1
274};
275
276/**
277 * enum hash_op - Enumeration for selecting between HASH or HMAC mode.
278 * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode.
279 * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC.
280 */
281enum hash_op {
282 HASH_OPER_MODE_HASH = 0x0,
283 HASH_OPER_MODE_HMAC = 0x1
284};
285
286/**
287 * struct hash_config - Configuration data for the hardware.
288 * @data_format: Format of data entered into the hash data in register.
289 * @algorithm: Algorithm selection bit.
290 * @oper_mode: Operating mode selection bit.
291 */
292struct hash_config {
293 int data_format;
294 int algorithm;
295 int oper_mode;
296};
297
298/**
299 * struct hash_dma - Structure used for dma.
300 * @mask: DMA capabilities bitmap mask.
301 * @complete: Used to maintain state for a "completion".
302 * @chan_mem2hash: DMA channel.
303 * @cfg_mem2hash: DMA channel configuration.
304 * @sg_len: Scatterlist length.
305 * @sg: Scatterlist.
306 * @nents: Number of sg entries.
307 */
308struct hash_dma {
309 dma_cap_mask_t mask;
310 struct completion complete;
311 struct dma_chan *chan_mem2hash;
312 void *cfg_mem2hash;
313 int sg_len;
314 struct scatterlist *sg;
315 int nents;
316};
317
318/**
319 * struct hash_ctx - The context used for hash calculations.
320 * @key: The key used in the operation.
321 * @keylen: The length of the key.
322 * @state: The state of the current calculations.
323 * @config: The current configuration.
324 * @digestsize: The size of current digest.
325 * @device: Pointer to the device structure.
326 */
327struct hash_ctx {
328 u8 *key;
329 u32 keylen;
330 struct hash_config config;
331 int digestsize;
332 struct hash_device_data *device;
333};
334
335/**
336 * struct hash_ctx - The request context used for hash calculations.
337 * @state: The state of the current calculations.
338 * @dma_mode: Used in special cases (workaround), e.g. need to change to
339 * cpu mode, if not supported/working in dma mode.
340 * @updated: Indicates if hardware is initialized for new operations.
341 */
342struct hash_req_ctx {
343 struct hash_state state;
344 bool dma_mode;
345 u8 updated;
346};
347
348/**
349 * struct hash_device_data - structure for a hash device.
350 * @base: Pointer to the hardware base address.
351 * @list_node: For inclusion in klist.
352 * @dev: Pointer to the device dev structure.
353 * @ctx_lock: Spinlock for current_ctx.
354 * @current_ctx: Pointer to the currently allocated context.
355 * @power_state: TRUE = power state on, FALSE = power state off.
356 * @power_state_lock: Spinlock for power_state.
357 * @regulator: Pointer to the device's power control.
358 * @clk: Pointer to the device's clock control.
359 * @restore_dev_state: TRUE = saved state, FALSE = no saved state.
360 * @dma: Structure used for dma.
361 */
362struct hash_device_data {
363 struct hash_register __iomem *base;
364 struct klist_node list_node;
365 struct device *dev;
366 struct spinlock ctx_lock;
367 struct hash_ctx *current_ctx;
368 bool power_state;
369 struct spinlock power_state_lock;
370 struct regulator *regulator;
371 struct clk *clk;
372 bool restore_dev_state;
373 struct hash_state state; /* Used for saving and resuming state */
374 struct hash_dma dma;
375};
376
377int hash_check_hw(struct hash_device_data *device_data);
378
379int hash_setconfiguration(struct hash_device_data *device_data,
380 struct hash_config *config);
381
382void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx);
383
384void hash_get_digest(struct hash_device_data *device_data,
385 u8 *digest, int algorithm);
386
387int hash_hw_update(struct ahash_request *req);
388
389int hash_save_state(struct hash_device_data *device_data,
390 struct hash_state *state);
391
392int hash_resume_state(struct hash_device_data *device_data,
393 const struct hash_state *state);
394
395#endif
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
new file mode 100644
index 000000000000..6dbb9ec709a3
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -0,0 +1,2009 @@
1/*
2 * Cryptographic API.
3 * Support for Nomadik hardware crypto engine.
4
5 * Copyright (C) ST-Ericsson SA 2010
6 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
7 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
8 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
9 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
10 * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
11 * License terms: GNU General Public License (GPL) version 2
12 */
13
14#include <linux/clk.h>
15#include <linux/device.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/klist.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/crypto.h>
24
25#include <linux/regulator/consumer.h>
26#include <linux/dmaengine.h>
27#include <linux/bitops.h>
28
29#include <crypto/internal/hash.h>
30#include <crypto/sha.h>
31#include <crypto/scatterwalk.h>
32#include <crypto/algapi.h>
33
34#include <mach/crypto-ux500.h>
35#include <mach/hardware.h>
36
37#include "hash_alg.h"
38
39#define DEV_DBG_NAME "hashX hashX:"
40
41static int hash_mode;
42module_param(hash_mode, int, 0);
43MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
44
45/**
46 * Pre-calculated empty message digests.
47 */
48static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
49 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
50 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
51 0xaf, 0xd8, 0x07, 0x09
52};
53
54static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
55 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
56 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
57 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
58 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
59};
60
61/* HMAC-SHA1, no key */
62static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
63 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
64 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
65 0x70, 0x69, 0x0e, 0x1d
66};
67
68/* HMAC-SHA256, no key */
69static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
70 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
71 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
72 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
73 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
74};
75
76/**
77 * struct hash_driver_data - data specific to the driver.
78 *
79 * @device_list: A list of registered devices to choose from.
80 * @device_allocation: A semaphore initialized with number of devices.
81 */
82struct hash_driver_data {
83 struct klist device_list;
84 struct semaphore device_allocation;
85};
86
87static struct hash_driver_data driver_data;
88
89/* Declaration of functions */
90/**
91 * hash_messagepad - Pads a message and write the nblw bits.
92 * @device_data: Structure for the hash device.
93 * @message: Last word of a message
94 * @index_bytes: The number of bytes in the last message
95 *
96 * This function manages the final part of the digest calculation, when less
97 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
98 *
99 */
100static void hash_messagepad(struct hash_device_data *device_data,
101 const u32 *message, u8 index_bytes);
102
103/**
104 * release_hash_device - Releases a previously allocated hash device.
105 * @device_data: Structure for the hash device.
106 *
107 */
108static void release_hash_device(struct hash_device_data *device_data)
109{
110 spin_lock(&device_data->ctx_lock);
111 device_data->current_ctx->device = NULL;
112 device_data->current_ctx = NULL;
113 spin_unlock(&device_data->ctx_lock);
114
115 /*
116 * The down_interruptible part for this semaphore is called in
117 * cryp_get_device_data.
118 */
119 up(&driver_data.device_allocation);
120}
121
122static void hash_dma_setup_channel(struct hash_device_data *device_data,
123 struct device *dev)
124{
125 struct hash_platform_data *platform_data = dev->platform_data;
126 dma_cap_zero(device_data->dma.mask);
127 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
128
129 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
130 device_data->dma.chan_mem2hash =
131 dma_request_channel(device_data->dma.mask,
132 platform_data->dma_filter,
133 device_data->dma.cfg_mem2hash);
134
135 init_completion(&device_data->dma.complete);
136}
137
138static void hash_dma_callback(void *data)
139{
140 struct hash_ctx *ctx = (struct hash_ctx *) data;
141
142 complete(&ctx->device->dma.complete);
143}
144
145static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
146 int len, enum dma_data_direction direction)
147{
148 struct dma_async_tx_descriptor *desc = NULL;
149 struct dma_chan *channel = NULL;
150 dma_cookie_t cookie;
151
152 if (direction != DMA_TO_DEVICE) {
153 dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
154 __func__);
155 return -EFAULT;
156 }
157
158 sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
159
160 channel = ctx->device->dma.chan_mem2hash;
161 ctx->device->dma.sg = sg;
162 ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
163 ctx->device->dma.sg, ctx->device->dma.nents,
164 direction);
165
166 if (!ctx->device->dma.sg_len) {
167 dev_err(ctx->device->dev,
168 "[%s]: Could not map the sg list (TO_DEVICE)",
169 __func__);
170 return -EFAULT;
171 }
172
173 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
174 "(TO_DEVICE)", __func__);
175 desc = channel->device->device_prep_slave_sg(channel,
176 ctx->device->dma.sg, ctx->device->dma.sg_len,
177 direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL);
178 if (!desc) {
179 dev_err(ctx->device->dev,
180 "[%s]: device_prep_slave_sg() failed!", __func__);
181 return -EFAULT;
182 }
183
184 desc->callback = hash_dma_callback;
185 desc->callback_param = ctx;
186
187 cookie = desc->tx_submit(desc);
188 dma_async_issue_pending(channel);
189
190 return 0;
191}
192
193static void hash_dma_done(struct hash_ctx *ctx)
194{
195 struct dma_chan *chan;
196
197 chan = ctx->device->dma.chan_mem2hash;
198 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
199 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
200 ctx->device->dma.sg_len, DMA_TO_DEVICE);
201
202}
203
204static int hash_dma_write(struct hash_ctx *ctx,
205 struct scatterlist *sg, int len)
206{
207 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
208 if (error) {
209 dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
210 "failed", __func__);
211 return error;
212 }
213
214 return len;
215}
216
217/**
218 * get_empty_message_digest - Returns a pre-calculated digest for
219 * the empty message.
220 * @device_data: Structure for the hash device.
221 * @zero_hash: Buffer to return the empty message digest.
222 * @zero_hash_size: Hash size of the empty message digest.
223 * @zero_digest: True if zero_digest returned.
224 */
225static int get_empty_message_digest(
226 struct hash_device_data *device_data,
227 u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
228{
229 int ret = 0;
230 struct hash_ctx *ctx = device_data->current_ctx;
231 *zero_digest = false;
232
233 /**
234 * Caller responsible for ctx != NULL.
235 */
236
237 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
238 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
239 memcpy(zero_hash, &zero_message_hash_sha1[0],
240 SHA1_DIGEST_SIZE);
241 *zero_hash_size = SHA1_DIGEST_SIZE;
242 *zero_digest = true;
243 } else if (HASH_ALGO_SHA256 ==
244 ctx->config.algorithm) {
245 memcpy(zero_hash, &zero_message_hash_sha256[0],
246 SHA256_DIGEST_SIZE);
247 *zero_hash_size = SHA256_DIGEST_SIZE;
248 *zero_digest = true;
249 } else {
250 dev_err(device_data->dev, "[%s] "
251 "Incorrect algorithm!"
252 , __func__);
253 ret = -EINVAL;
254 goto out;
255 }
256 } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
257 if (!ctx->keylen) {
258 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
259 memcpy(zero_hash, &zero_message_hmac_sha1[0],
260 SHA1_DIGEST_SIZE);
261 *zero_hash_size = SHA1_DIGEST_SIZE;
262 *zero_digest = true;
263 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
264 memcpy(zero_hash, &zero_message_hmac_sha256[0],
265 SHA256_DIGEST_SIZE);
266 *zero_hash_size = SHA256_DIGEST_SIZE;
267 *zero_digest = true;
268 } else {
269 dev_err(device_data->dev, "[%s] "
270 "Incorrect algorithm!"
271 , __func__);
272 ret = -EINVAL;
273 goto out;
274 }
275 } else {
276 dev_dbg(device_data->dev, "[%s] Continue hash "
277 "calculation, since hmac key avalable",
278 __func__);
279 }
280 }
281out:
282
283 return ret;
284}
285
286/**
287 * hash_disable_power - Request to disable power and clock.
288 * @device_data: Structure for the hash device.
289 * @save_device_state: If true, saves the current hw state.
290 *
291 * This function request for disabling power (regulator) and clock,
292 * and could also save current hw state.
293 */
294static int hash_disable_power(
295 struct hash_device_data *device_data,
296 bool save_device_state)
297{
298 int ret = 0;
299 struct device *dev = device_data->dev;
300
301 spin_lock(&device_data->power_state_lock);
302 if (!device_data->power_state)
303 goto out;
304
305 if (save_device_state) {
306 hash_save_state(device_data,
307 &device_data->state);
308 device_data->restore_dev_state = true;
309 }
310
311 clk_disable(device_data->clk);
312 ret = regulator_disable(device_data->regulator);
313 if (ret)
314 dev_err(dev, "[%s] regulator_disable() failed!", __func__);
315
316 device_data->power_state = false;
317
318out:
319 spin_unlock(&device_data->power_state_lock);
320
321 return ret;
322}
323
324/**
325 * hash_enable_power - Request to enable power and clock.
326 * @device_data: Structure for the hash device.
327 * @restore_device_state: If true, restores a previous saved hw state.
328 *
329 * This function request for enabling power (regulator) and clock,
330 * and could also restore a previously saved hw state.
331 */
332static int hash_enable_power(
333 struct hash_device_data *device_data,
334 bool restore_device_state)
335{
336 int ret = 0;
337 struct device *dev = device_data->dev;
338
339 spin_lock(&device_data->power_state_lock);
340 if (!device_data->power_state) {
341 ret = regulator_enable(device_data->regulator);
342 if (ret) {
343 dev_err(dev, "[%s]: regulator_enable() failed!",
344 __func__);
345 goto out;
346 }
347 ret = clk_enable(device_data->clk);
348 if (ret) {
349 dev_err(dev, "[%s]: clk_enable() failed!",
350 __func__);
351 ret = regulator_disable(
352 device_data->regulator);
353 goto out;
354 }
355 device_data->power_state = true;
356 }
357
358 if (device_data->restore_dev_state) {
359 if (restore_device_state) {
360 device_data->restore_dev_state = false;
361 hash_resume_state(device_data,
362 &device_data->state);
363 }
364 }
365out:
366 spin_unlock(&device_data->power_state_lock);
367
368 return ret;
369}
370
371/**
372 * hash_get_device_data - Checks for an available hash device and return it.
373 * @hash_ctx: Structure for the hash context.
374 * @device_data: Structure for the hash device.
375 *
376 * This function check for an available hash device and return it to
377 * the caller.
378 * Note! Caller need to release the device, calling up().
379 */
380static int hash_get_device_data(struct hash_ctx *ctx,
381 struct hash_device_data **device_data)
382{
383 int ret;
384 struct klist_iter device_iterator;
385 struct klist_node *device_node;
386 struct hash_device_data *local_device_data = NULL;
387
388 /* Wait until a device is available */
389 ret = down_interruptible(&driver_data.device_allocation);
390 if (ret)
391 return ret; /* Interrupted */
392
393 /* Select a device */
394 klist_iter_init(&driver_data.device_list, &device_iterator);
395 device_node = klist_next(&device_iterator);
396 while (device_node) {
397 local_device_data = container_of(device_node,
398 struct hash_device_data, list_node);
399 spin_lock(&local_device_data->ctx_lock);
400 /* current_ctx allocates a device, NULL = unallocated */
401 if (local_device_data->current_ctx) {
402 device_node = klist_next(&device_iterator);
403 } else {
404 local_device_data->current_ctx = ctx;
405 ctx->device = local_device_data;
406 spin_unlock(&local_device_data->ctx_lock);
407 break;
408 }
409 spin_unlock(&local_device_data->ctx_lock);
410 }
411 klist_iter_exit(&device_iterator);
412
413 if (!device_node) {
414 /**
415 * No free device found.
416 * Since we allocated a device with down_interruptible, this
417 * should not be able to happen.
418 * Number of available devices, which are contained in
419 * device_allocation, is therefore decremented by not doing
420 * an up(device_allocation).
421 */
422 return -EBUSY;
423 }
424
425 *device_data = local_device_data;
426
427 return 0;
428}
429
430/**
431 * hash_hw_write_key - Writes the key to the hardware registries.
432 *
433 * @device_data: Structure for the hash device.
434 * @key: Key to be written.
435 * @keylen: The lengt of the key.
436 *
437 * Note! This function DOES NOT write to the NBLW registry, even though
438 * specified in the the hw design spec. Either due to incorrect info in the
439 * spec or due to a bug in the hw.
440 */
441static void hash_hw_write_key(struct hash_device_data *device_data,
442 const u8 *key, unsigned int keylen)
443{
444 u32 word = 0;
445 int nwords = 1;
446
447 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
448
449 while (keylen >= 4) {
450 u32 *key_word = (u32 *)key;
451
452 HASH_SET_DIN(key_word, nwords);
453 keylen -= 4;
454 key += 4;
455 }
456
457 /* Take care of the remaining bytes in the last word */
458 if (keylen) {
459 word = 0;
460 while (keylen) {
461 word |= (key[keylen - 1] << (8 * (keylen - 1)));
462 keylen--;
463 }
464
465 HASH_SET_DIN(&word, nwords);
466 }
467
468 while (device_data->base->str & HASH_STR_DCAL_MASK)
469 cpu_relax();
470
471 HASH_SET_DCAL;
472
473 while (device_data->base->str & HASH_STR_DCAL_MASK)
474 cpu_relax();
475}
476
477/**
478 * init_hash_hw - Initialise the hash hardware for a new calculation.
479 * @device_data: Structure for the hash device.
480 * @ctx: The hash context.
481 *
482 * This function will enable the bits needed to clear and start a new
483 * calculation.
484 */
485static int init_hash_hw(struct hash_device_data *device_data,
486 struct hash_ctx *ctx)
487{
488 int ret = 0;
489
490 ret = hash_setconfiguration(device_data, &ctx->config);
491 if (ret) {
492 dev_err(device_data->dev, "[%s] hash_setconfiguration() "
493 "failed!", __func__);
494 return ret;
495 }
496
497 hash_begin(device_data, ctx);
498
499 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
500 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
501
502 return ret;
503}
504
505/**
506 * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
507 *
508 * @sg: Scatterlist.
509 * @size: Size in bytes.
510 * @aligned: True if sg data aligned to work in DMA mode.
511 *
512 */
513static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
514{
515 int nents = 0;
516 bool aligned_data = true;
517
518 while (size > 0 && sg) {
519 nents++;
520 size -= sg->length;
521
522 /* hash_set_dma_transfer will align last nent */
523 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
524 || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
525 size > 0))
526 aligned_data = false;
527
528 sg = sg_next(sg);
529 }
530
531 if (aligned)
532 *aligned = aligned_data;
533
534 if (size != 0)
535 return -EFAULT;
536
537 return nents;
538}
539
540/**
541 * hash_dma_valid_data - checks for dma valid sg data.
542 * @sg: Scatterlist.
543 * @datasize: Datasize in bytes.
544 *
545 * NOTE! This function checks for dma valid sg data, since dma
546 * only accept datasizes of even wordsize.
547 */
548static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
549{
550 bool aligned;
551
552 /* Need to include at least one nent, else error */
553 if (hash_get_nents(sg, datasize, &aligned) < 1)
554 return false;
555
556 return aligned;
557}
558
559/**
560 * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
561 * @req: The hash request for the job.
562 *
563 * Initialize structures.
564 */
565static int hash_init(struct ahash_request *req)
566{
567 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
568 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
569 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
570
571 if (!ctx->key)
572 ctx->keylen = 0;
573
574 memset(&req_ctx->state, 0, sizeof(struct hash_state));
575 req_ctx->updated = 0;
576 if (hash_mode == HASH_MODE_DMA) {
577 if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
578 req_ctx->dma_mode = false; /* Don't use DMA */
579
580 pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
581 "to CPU mode for data size < %d",
582 __func__, HASH_DMA_ALIGN_SIZE);
583 } else {
584 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
585 hash_dma_valid_data(req->src,
586 req->nbytes)) {
587 req_ctx->dma_mode = true;
588 } else {
589 req_ctx->dma_mode = false;
590 pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
591 " CPU mode for datalength < %d"
592 " or non-aligned data, except "
593 "in last nent", __func__,
594 HASH_DMA_PERFORMANCE_MIN_SIZE);
595 }
596 }
597 }
598 return 0;
599}
600
601/**
602 * hash_processblock - This function processes a single block of 512 bits (64
603 * bytes), word aligned, starting at message.
604 * @device_data: Structure for the hash device.
605 * @message: Block (512 bits) of message to be written to
606 * the HASH hardware.
607 *
608 */
609static void hash_processblock(
610 struct hash_device_data *device_data,
611 const u32 *message, int length)
612{
613 int len = length / HASH_BYTES_PER_WORD;
614 /*
615 * NBLW bits. Reset the number of bits in last word (NBLW).
616 */
617 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
618
619 /*
620 * Write message data to the HASH_DIN register.
621 */
622 HASH_SET_DIN(message, len);
623}
624
625/**
626 * hash_messagepad - Pads a message and write the nblw bits.
627 * @device_data: Structure for the hash device.
628 * @message: Last word of a message.
629 * @index_bytes: The number of bytes in the last message.
630 *
631 * This function manages the final part of the digest calculation, when less
632 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
633 *
634 */
635static void hash_messagepad(struct hash_device_data *device_data,
636 const u32 *message, u8 index_bytes)
637{
638 int nwords = 1;
639
640 /*
641 * Clear hash str register, only clear NBLW
642 * since DCAL will be reset by hardware.
643 */
644 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
645
646 /* Main loop */
647 while (index_bytes >= 4) {
648 HASH_SET_DIN(message, nwords);
649 index_bytes -= 4;
650 message++;
651 }
652
653 if (index_bytes)
654 HASH_SET_DIN(message, nwords);
655
656 while (device_data->base->str & HASH_STR_DCAL_MASK)
657 cpu_relax();
658
659 /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
660 HASH_SET_NBLW(index_bytes * 8);
661 dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
662 readl_relaxed(&device_data->base->din),
663 (int)(readl_relaxed(&device_data->base->str) &
664 HASH_STR_NBLW_MASK));
665 HASH_SET_DCAL;
666 dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
667 __func__, readl_relaxed(&device_data->base->din),
668 (int)(readl_relaxed(&device_data->base->str) &
669 HASH_STR_NBLW_MASK));
670
671 while (device_data->base->str & HASH_STR_DCAL_MASK)
672 cpu_relax();
673}
674
675/**
676 * hash_incrementlength - Increments the length of the current message.
677 * @ctx: Hash context
678 * @incr: Length of message processed already
679 *
680 * Overflow cannot occur, because conditions for overflow are checked in
681 * hash_hw_update.
682 */
683static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
684{
685 ctx->state.length.low_word += incr;
686
687 /* Check for wrap-around */
688 if (ctx->state.length.low_word < incr)
689 ctx->state.length.high_word++;
690}
691
692/**
693 * hash_setconfiguration - Sets the required configuration for the hash
694 * hardware.
695 * @device_data: Structure for the hash device.
696 * @config: Pointer to a configuration structure.
697 */
698int hash_setconfiguration(struct hash_device_data *device_data,
699 struct hash_config *config)
700{
701 int ret = 0;
702
703 if (config->algorithm != HASH_ALGO_SHA1 &&
704 config->algorithm != HASH_ALGO_SHA256)
705 return -EPERM;
706
707 /*
708 * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
709 * to be written to HASH_DIN is considered as 32 bits.
710 */
711 HASH_SET_DATA_FORMAT(config->data_format);
712
713 /*
714 * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
715 */
716 switch (config->algorithm) {
717 case HASH_ALGO_SHA1:
718 HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
719 break;
720
721 case HASH_ALGO_SHA256:
722 HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
723 break;
724
725 default:
726 dev_err(device_data->dev, "[%s] Incorrect algorithm.",
727 __func__);
728 return -EPERM;
729 }
730
731 /*
732 * MODE bit. This bit selects between HASH or HMAC mode for the
733 * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
734 */
735 if (HASH_OPER_MODE_HASH == config->oper_mode)
736 HASH_CLEAR_BITS(&device_data->base->cr,
737 HASH_CR_MODE_MASK);
738 else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
739 HASH_SET_BITS(&device_data->base->cr,
740 HASH_CR_MODE_MASK);
741 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
742 /* Truncate key to blocksize */
743 dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
744 HASH_SET_BITS(&device_data->base->cr,
745 HASH_CR_LKEY_MASK);
746 } else {
747 dev_dbg(device_data->dev, "[%s] LKEY cleared",
748 __func__);
749 HASH_CLEAR_BITS(&device_data->base->cr,
750 HASH_CR_LKEY_MASK);
751 }
752 } else { /* Wrong hash mode */
753 ret = -EPERM;
754 dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
755 __func__);
756 }
757 return ret;
758}
759
760/**
761 * hash_begin - This routine resets some globals and initializes the hash
762 * hardware.
763 * @device_data: Structure for the hash device.
764 * @ctx: Hash context.
765 */
766void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
767{
768 /* HW and SW initializations */
769 /* Note: there is no need to initialize buffer and digest members */
770
771 while (device_data->base->str & HASH_STR_DCAL_MASK)
772 cpu_relax();
773
774 /*
775 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
776 * prepare the initialize the HASH accelerator to compute the message
777 * digest of a new message.
778 */
779 HASH_INITIALIZE;
780
781 /*
782 * NBLW bits. Reset the number of bits in last word (NBLW).
783 */
784 HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
785}
786
787int hash_process_data(
788 struct hash_device_data *device_data,
789 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
790 int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
791{
792 int ret = 0;
793 u32 count;
794
795 do {
796 if ((*index + msg_length) < HASH_BLOCK_SIZE) {
797 for (count = 0; count < msg_length; count++) {
798 buffer[*index + count] =
799 *(data_buffer + count);
800 }
801 *index += msg_length;
802 msg_length = 0;
803 } else {
804 if (req_ctx->updated) {
805
806 ret = hash_resume_state(device_data,
807 &device_data->state);
808 memmove(req_ctx->state.buffer,
809 device_data->state.buffer,
810 HASH_BLOCK_SIZE / sizeof(u32));
811 if (ret) {
812 dev_err(device_data->dev, "[%s] "
813 "hash_resume_state()"
814 " failed!", __func__);
815 goto out;
816 }
817 } else {
818 ret = init_hash_hw(device_data, ctx);
819 if (ret) {
820 dev_err(device_data->dev, "[%s] "
821 "init_hash_hw()"
822 " failed!", __func__);
823 goto out;
824 }
825 req_ctx->updated = 1;
826 }
827 /*
828 * If 'data_buffer' is four byte aligned and
829 * local buffer does not have any data, we can
830 * write data directly from 'data_buffer' to
831 * HW peripheral, otherwise we first copy data
832 * to a local buffer
833 */
834 if ((0 == (((u32)data_buffer) % 4))
835 && (0 == *index))
836 hash_processblock(device_data,
837 (const u32 *)
838 data_buffer, HASH_BLOCK_SIZE);
839 else {
840 for (count = 0; count <
841 (u32)(HASH_BLOCK_SIZE -
842 *index);
843 count++) {
844 buffer[*index + count] =
845 *(data_buffer + count);
846 }
847 hash_processblock(device_data,
848 (const u32 *)buffer,
849 HASH_BLOCK_SIZE);
850 }
851 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
852 data_buffer += (HASH_BLOCK_SIZE - *index);
853
854 msg_length -= (HASH_BLOCK_SIZE - *index);
855 *index = 0;
856
857 ret = hash_save_state(device_data,
858 &device_data->state);
859
860 memmove(device_data->state.buffer,
861 req_ctx->state.buffer,
862 HASH_BLOCK_SIZE / sizeof(u32));
863 if (ret) {
864 dev_err(device_data->dev, "[%s] "
865 "hash_save_state()"
866 " failed!", __func__);
867 goto out;
868 }
869 }
870 } while (msg_length != 0);
871out:
872
873 return ret;
874}
875
876/**
877 * hash_dma_final - The hash dma final function for SHA1/SHA256.
878 * @req: The hash request for the job.
879 */
880static int hash_dma_final(struct ahash_request *req)
881{
882 int ret = 0;
883 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
884 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
885 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
886 struct hash_device_data *device_data;
887 u8 digest[SHA256_DIGEST_SIZE];
888 int bytes_written = 0;
889
890 ret = hash_get_device_data(ctx, &device_data);
891 if (ret)
892 return ret;
893
894 dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
895
896 if (req_ctx->updated) {
897 ret = hash_resume_state(device_data, &device_data->state);
898
899 if (ret) {
900 dev_err(device_data->dev, "[%s] hash_resume_state() "
901 "failed!", __func__);
902 goto out;
903 }
904
905 }
906
907 if (!req_ctx->updated) {
908 ret = hash_setconfiguration(device_data, &ctx->config);
909 if (ret) {
910 dev_err(device_data->dev, "[%s] "
911 "hash_setconfiguration() failed!",
912 __func__);
913 goto out;
914 }
915
916 /* Enable DMA input */
917 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
918 HASH_CLEAR_BITS(&device_data->base->cr,
919 HASH_CR_DMAE_MASK);
920 } else {
921 HASH_SET_BITS(&device_data->base->cr,
922 HASH_CR_DMAE_MASK);
923 HASH_SET_BITS(&device_data->base->cr,
924 HASH_CR_PRIVN_MASK);
925 }
926
927 HASH_INITIALIZE;
928
929 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
930 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
931
932 /* Number of bits in last word = (nbytes * 8) % 32 */
933 HASH_SET_NBLW((req->nbytes * 8) % 32);
934 req_ctx->updated = 1;
935 }
936
937 /* Store the nents in the dma struct. */
938 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
939 if (!ctx->device->dma.nents) {
940 dev_err(device_data->dev, "[%s] "
941 "ctx->device->dma.nents = 0", __func__);
942 goto out;
943 }
944
945 bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
946 if (bytes_written != req->nbytes) {
947 dev_err(device_data->dev, "[%s] "
948 "hash_dma_write() failed!", __func__);
949 goto out;
950 }
951
952 wait_for_completion(&ctx->device->dma.complete);
953 hash_dma_done(ctx);
954
955 while (device_data->base->str & HASH_STR_DCAL_MASK)
956 cpu_relax();
957
958 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
959 unsigned int keylen = ctx->keylen;
960 u8 *key = ctx->key;
961
962 dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
963 ctx->keylen);
964 hash_hw_write_key(device_data, key, keylen);
965 }
966
967 hash_get_digest(device_data, digest, ctx->config.algorithm);
968 memcpy(req->result, digest, ctx->digestsize);
969
970out:
971 release_hash_device(device_data);
972
973 /**
974 * Allocated in setkey, and only used in HMAC.
975 */
976 kfree(ctx->key);
977
978 return ret;
979}
980
981/**
982 * hash_hw_final - The final hash calculation function
983 * @req: The hash request for the job.
984 */
985int hash_hw_final(struct ahash_request *req)
986{
987 int ret = 0;
988 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
989 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
990 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
991 struct hash_device_data *device_data;
992 u8 digest[SHA256_DIGEST_SIZE];
993
994 ret = hash_get_device_data(ctx, &device_data);
995 if (ret)
996 return ret;
997
998 dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
999
1000 if (req_ctx->updated) {
1001 ret = hash_resume_state(device_data, &device_data->state);
1002
1003 if (ret) {
1004 dev_err(device_data->dev, "[%s] hash_resume_state() "
1005 "failed!", __func__);
1006 goto out;
1007 }
1008 } else if (req->nbytes == 0 && ctx->keylen == 0) {
1009 u8 zero_hash[SHA256_DIGEST_SIZE];
1010 u32 zero_hash_size = 0;
1011 bool zero_digest = false;
1012 /**
1013 * Use a pre-calculated empty message digest
1014 * (workaround since hw return zeroes, hw bug!?)
1015 */
1016 ret = get_empty_message_digest(device_data, &zero_hash[0],
1017 &zero_hash_size, &zero_digest);
1018 if (!ret && likely(zero_hash_size == ctx->digestsize) &&
1019 zero_digest) {
1020 memcpy(req->result, &zero_hash[0], ctx->digestsize);
1021 goto out;
1022 } else if (!ret && !zero_digest) {
1023 dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
1024 "key, continue...", __func__);
1025 } else {
1026 dev_err(device_data->dev, "[%s] ret=%d, or wrong "
1027 "digest size? %s", __func__, ret,
1028 (zero_hash_size == ctx->digestsize) ?
1029 "true" : "false");
1030 /* Return error */
1031 goto out;
1032 }
1033 } else if (req->nbytes == 0 && ctx->keylen > 0) {
1034 dev_err(device_data->dev, "[%s] Empty message with "
1035 "keylength > 0, NOT supported.", __func__);
1036 goto out;
1037 }
1038
1039 if (!req_ctx->updated) {
1040 ret = init_hash_hw(device_data, ctx);
1041 if (ret) {
1042 dev_err(device_data->dev, "[%s] init_hash_hw() "
1043 "failed!", __func__);
1044 goto out;
1045 }
1046 }
1047
1048 if (req_ctx->state.index) {
1049 hash_messagepad(device_data, req_ctx->state.buffer,
1050 req_ctx->state.index);
1051 } else {
1052 HASH_SET_DCAL;
1053 while (device_data->base->str & HASH_STR_DCAL_MASK)
1054 cpu_relax();
1055 }
1056
1057 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1058 unsigned int keylen = ctx->keylen;
1059 u8 *key = ctx->key;
1060
1061 dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
1062 ctx->keylen);
1063 hash_hw_write_key(device_data, key, keylen);
1064 }
1065
1066 hash_get_digest(device_data, digest, ctx->config.algorithm);
1067 memcpy(req->result, digest, ctx->digestsize);
1068
1069out:
1070 release_hash_device(device_data);
1071
1072 /**
1073 * Allocated in setkey, and only used in HMAC.
1074 */
1075 kfree(ctx->key);
1076
1077 return ret;
1078}
1079
1080/**
1081 * hash_hw_update - Updates current HASH computation hashing another part of
1082 * the message.
1083 * @req: Byte array containing the message to be hashed (caller
1084 * allocated).
1085 */
1086int hash_hw_update(struct ahash_request *req)
1087{
1088 int ret = 0;
1089 u8 index = 0;
1090 u8 *buffer;
1091 struct hash_device_data *device_data;
1092 u8 *data_buffer;
1093 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1094 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1095 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1096 struct crypto_hash_walk walk;
1097 int msg_length = crypto_hash_walk_first(req, &walk);
1098
1099 /* Empty message ("") is correct indata */
1100 if (msg_length == 0)
1101 return ret;
1102
1103 index = req_ctx->state.index;
1104 buffer = (u8 *)req_ctx->state.buffer;
1105
1106 /* Check if ctx->state.length + msg_length
1107 overflows */
1108 if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1109 HASH_HIGH_WORD_MAX_VAL ==
1110 req_ctx->state.length.high_word) {
1111 pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
1112 __func__);
1113 return -EPERM;
1114 }
1115
1116 ret = hash_get_device_data(ctx, &device_data);
1117 if (ret)
1118 return ret;
1119
1120 /* Main loop */
1121 while (0 != msg_length) {
1122 data_buffer = walk.data;
1123 ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1124 data_buffer, buffer, &index);
1125
1126 if (ret) {
1127 dev_err(device_data->dev, "[%s] hash_internal_hw_"
1128 "update() failed!", __func__);
1129 goto out;
1130 }
1131
1132 msg_length = crypto_hash_walk_done(&walk, 0);
1133 }
1134
1135 req_ctx->state.index = index;
1136 dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))",
1137 __func__, req_ctx->state.index,
1138 req_ctx->state.bit_index);
1139
1140out:
1141 release_hash_device(device_data);
1142
1143 return ret;
1144}
1145
1146/**
1147 * hash_resume_state - Function that resumes the state of an calculation.
1148 * @device_data: Pointer to the device structure.
1149 * @device_state: The state to be restored in the hash hardware
1150 */
1151int hash_resume_state(struct hash_device_data *device_data,
1152 const struct hash_state *device_state)
1153{
1154 u32 temp_cr;
1155 s32 count;
1156 int hash_mode = HASH_OPER_MODE_HASH;
1157
1158 if (NULL == device_state) {
1159 dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
1160 __func__);
1161 return -EPERM;
1162 }
1163
1164 /* Check correctness of index and length members */
1165 if (device_state->index > HASH_BLOCK_SIZE
1166 || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1167 dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
1168 __func__);
1169 return -EPERM;
1170 }
1171
1172 /*
1173 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
1174 * prepare the initialize the HASH accelerator to compute the message
1175 * digest of a new message.
1176 */
1177 HASH_INITIALIZE;
1178
1179 temp_cr = device_state->temp_cr;
1180 writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1181
1182 if (device_data->base->cr & HASH_CR_MODE_MASK)
1183 hash_mode = HASH_OPER_MODE_HMAC;
1184 else
1185 hash_mode = HASH_OPER_MODE_HASH;
1186
1187 for (count = 0; count < HASH_CSR_COUNT; count++) {
1188 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1189 break;
1190
1191 writel_relaxed(device_state->csr[count],
1192 &device_data->base->csrx[count]);
1193 }
1194
1195 writel_relaxed(device_state->csfull, &device_data->base->csfull);
1196 writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1197
1198 writel_relaxed(device_state->str_reg, &device_data->base->str);
1199 writel_relaxed(temp_cr, &device_data->base->cr);
1200
1201 return 0;
1202}
1203
1204/**
1205 * hash_save_state - Function that saves the state of hardware.
1206 * @device_data: Pointer to the device structure.
1207 * @device_state: The strucure where the hardware state should be saved.
1208 */
1209int hash_save_state(struct hash_device_data *device_data,
1210 struct hash_state *device_state)
1211{
1212 u32 temp_cr;
1213 u32 count;
1214 int hash_mode = HASH_OPER_MODE_HASH;
1215
1216 if (NULL == device_state) {
1217 dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
1218 __func__);
1219 return -ENOTSUPP;
1220 }
1221
1222 /* Write dummy value to force digest intermediate calculation. This
1223 * actually makes sure that there isn't any ongoing calculation in the
1224 * hardware.
1225 */
1226 while (device_data->base->str & HASH_STR_DCAL_MASK)
1227 cpu_relax();
1228
1229 temp_cr = readl_relaxed(&device_data->base->cr);
1230
1231 device_state->str_reg = readl_relaxed(&device_data->base->str);
1232
1233 device_state->din_reg = readl_relaxed(&device_data->base->din);
1234
1235 if (device_data->base->cr & HASH_CR_MODE_MASK)
1236 hash_mode = HASH_OPER_MODE_HMAC;
1237 else
1238 hash_mode = HASH_OPER_MODE_HASH;
1239
1240 for (count = 0; count < HASH_CSR_COUNT; count++) {
1241 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1242 break;
1243
1244 device_state->csr[count] =
1245 readl_relaxed(&device_data->base->csrx[count]);
1246 }
1247
1248 device_state->csfull = readl_relaxed(&device_data->base->csfull);
1249 device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1250
1251 device_state->temp_cr = temp_cr;
1252
1253 return 0;
1254}
1255
1256/**
1257 * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
1258 * @device_data:
1259 *
1260 */
1261int hash_check_hw(struct hash_device_data *device_data)
1262{
1263 /* Checking Peripheral Ids */
1264 if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0)
1265 && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1)
1266 && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2)
1267 && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3)
1268 && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0)
1269 && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1)
1270 && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2)
1271 && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)
1272 ) {
1273 return 0;
1274 }
1275
1276 dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
1277 __func__);
1278 return -ENOTSUPP;
1279}
1280
1281/**
1282 * hash_get_digest - Gets the digest.
1283 * @device_data: Pointer to the device structure.
1284 * @digest: User allocated byte array for the calculated digest.
1285 * @algorithm: The algorithm in use.
1286 */
1287void hash_get_digest(struct hash_device_data *device_data,
1288 u8 *digest, int algorithm)
1289{
1290 u32 temp_hx_val, count;
1291 int loop_ctr;
1292
1293 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1294 dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
1295 __func__, algorithm);
1296 return;
1297 }
1298
1299 if (algorithm == HASH_ALGO_SHA1)
1300 loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1301 else
1302 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1303
1304 dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
1305 __func__, (u32) digest);
1306
1307 /* Copy result into digest array */
1308 for (count = 0; count < loop_ctr; count++) {
1309 temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1310 digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1311 digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1312 digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1313 digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1314 }
1315}
1316
1317/**
1318 * hash_update - The hash update function for SHA1/SHA2 (SHA256).
1319 * @req: The hash request for the job.
1320 */
1321static int ahash_update(struct ahash_request *req)
1322{
1323 int ret = 0;
1324 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1325
1326 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1327 ret = hash_hw_update(req);
1328 /* Skip update for DMA, all data will be passed to DMA in final */
1329
1330 if (ret) {
1331 pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
1332 __func__);
1333 }
1334
1335 return ret;
1336}
1337
1338/**
1339 * hash_final - The hash final function for SHA1/SHA2 (SHA256).
1340 * @req: The hash request for the job.
1341 */
1342static int ahash_final(struct ahash_request *req)
1343{
1344 int ret = 0;
1345 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1346
1347 pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
1348
1349 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1350 ret = hash_dma_final(req);
1351 else
1352 ret = hash_hw_final(req);
1353
1354 if (ret) {
1355 pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
1356 __func__);
1357 }
1358
1359 return ret;
1360}
1361
1362static int hash_setkey(struct crypto_ahash *tfm,
1363 const u8 *key, unsigned int keylen, int alg)
1364{
1365 int ret = 0;
1366 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1367
1368 /**
1369 * Freed in final.
1370 */
1371 ctx->key = kmalloc(keylen, GFP_KERNEL);
1372 if (!ctx->key) {
1373 pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
1374 "for %d\n", __func__, alg);
1375 return -ENOMEM;
1376 }
1377
1378 memcpy(ctx->key, key, keylen);
1379 ctx->keylen = keylen;
1380
1381 return ret;
1382}
1383
1384static int ahash_sha1_init(struct ahash_request *req)
1385{
1386 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1387 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1388
1389 ctx->config.data_format = HASH_DATA_8_BITS;
1390 ctx->config.algorithm = HASH_ALGO_SHA1;
1391 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1392 ctx->digestsize = SHA1_DIGEST_SIZE;
1393
1394 return hash_init(req);
1395}
1396
1397static int ahash_sha256_init(struct ahash_request *req)
1398{
1399 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1400 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1401
1402 ctx->config.data_format = HASH_DATA_8_BITS;
1403 ctx->config.algorithm = HASH_ALGO_SHA256;
1404 ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1405 ctx->digestsize = SHA256_DIGEST_SIZE;
1406
1407 return hash_init(req);
1408}
1409
1410static int ahash_sha1_digest(struct ahash_request *req)
1411{
1412 int ret2, ret1;
1413
1414 ret1 = ahash_sha1_init(req);
1415 if (ret1)
1416 goto out;
1417
1418 ret1 = ahash_update(req);
1419 ret2 = ahash_final(req);
1420
1421out:
1422 return ret1 ? ret1 : ret2;
1423}
1424
1425static int ahash_sha256_digest(struct ahash_request *req)
1426{
1427 int ret2, ret1;
1428
1429 ret1 = ahash_sha256_init(req);
1430 if (ret1)
1431 goto out;
1432
1433 ret1 = ahash_update(req);
1434 ret2 = ahash_final(req);
1435
1436out:
1437 return ret1 ? ret1 : ret2;
1438}
1439
1440static int hmac_sha1_init(struct ahash_request *req)
1441{
1442 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1443 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1444
1445 ctx->config.data_format = HASH_DATA_8_BITS;
1446 ctx->config.algorithm = HASH_ALGO_SHA1;
1447 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1448 ctx->digestsize = SHA1_DIGEST_SIZE;
1449
1450 return hash_init(req);
1451}
1452
1453static int hmac_sha256_init(struct ahash_request *req)
1454{
1455 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1456 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1457
1458 ctx->config.data_format = HASH_DATA_8_BITS;
1459 ctx->config.algorithm = HASH_ALGO_SHA256;
1460 ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
1461 ctx->digestsize = SHA256_DIGEST_SIZE;
1462
1463 return hash_init(req);
1464}
1465
1466static int hmac_sha1_digest(struct ahash_request *req)
1467{
1468 int ret2, ret1;
1469
1470 ret1 = hmac_sha1_init(req);
1471 if (ret1)
1472 goto out;
1473
1474 ret1 = ahash_update(req);
1475 ret2 = ahash_final(req);
1476
1477out:
1478 return ret1 ? ret1 : ret2;
1479}
1480
1481static int hmac_sha256_digest(struct ahash_request *req)
1482{
1483 int ret2, ret1;
1484
1485 ret1 = hmac_sha256_init(req);
1486 if (ret1)
1487 goto out;
1488
1489 ret1 = ahash_update(req);
1490 ret2 = ahash_final(req);
1491
1492out:
1493 return ret1 ? ret1 : ret2;
1494}
1495
1496static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1497 const u8 *key, unsigned int keylen)
1498{
1499 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1500}
1501
1502static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1503 const u8 *key, unsigned int keylen)
1504{
1505 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1506}
1507
1508struct hash_algo_template {
1509 struct hash_config conf;
1510 struct ahash_alg hash;
1511};
1512
1513static int hash_cra_init(struct crypto_tfm *tfm)
1514{
1515 struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1516 struct crypto_alg *alg = tfm->__crt_alg;
1517 struct hash_algo_template *hash_alg;
1518
1519 hash_alg = container_of(__crypto_ahash_alg(alg),
1520 struct hash_algo_template,
1521 hash);
1522
1523 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1524 sizeof(struct hash_req_ctx));
1525
1526 ctx->config.data_format = HASH_DATA_8_BITS;
1527 ctx->config.algorithm = hash_alg->conf.algorithm;
1528 ctx->config.oper_mode = hash_alg->conf.oper_mode;
1529
1530 ctx->digestsize = hash_alg->hash.halg.digestsize;
1531
1532 return 0;
1533}
1534
1535static struct hash_algo_template hash_algs[] = {
1536 {
1537 .conf.algorithm = HASH_ALGO_SHA1,
1538 .conf.oper_mode = HASH_OPER_MODE_HASH,
1539 .hash = {
1540 .init = hash_init,
1541 .update = ahash_update,
1542 .final = ahash_final,
1543 .digest = ahash_sha1_digest,
1544 .halg.digestsize = SHA1_DIGEST_SIZE,
1545 .halg.statesize = sizeof(struct hash_ctx),
1546 .halg.base = {
1547 .cra_name = "sha1",
1548 .cra_driver_name = "sha1-ux500",
1549 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1550 CRYPTO_ALG_ASYNC,
1551 .cra_blocksize = SHA1_BLOCK_SIZE,
1552 .cra_ctxsize = sizeof(struct hash_ctx),
1553 .cra_init = hash_cra_init,
1554 .cra_module = THIS_MODULE,
1555 }
1556 }
1557 },
1558 {
1559 .conf.algorithm = HASH_ALGO_SHA256,
1560 .conf.oper_mode = HASH_OPER_MODE_HASH,
1561 .hash = {
1562 .init = hash_init,
1563 .update = ahash_update,
1564 .final = ahash_final,
1565 .digest = ahash_sha256_digest,
1566 .halg.digestsize = SHA256_DIGEST_SIZE,
1567 .halg.statesize = sizeof(struct hash_ctx),
1568 .halg.base = {
1569 .cra_name = "sha256",
1570 .cra_driver_name = "sha256-ux500",
1571 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1572 CRYPTO_ALG_ASYNC,
1573 .cra_blocksize = SHA256_BLOCK_SIZE,
1574 .cra_ctxsize = sizeof(struct hash_ctx),
1575 .cra_type = &crypto_ahash_type,
1576 .cra_init = hash_cra_init,
1577 .cra_module = THIS_MODULE,
1578 }
1579 }
1580
1581 },
1582 {
1583 .conf.algorithm = HASH_ALGO_SHA1,
1584 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1585 .hash = {
1586 .init = hash_init,
1587 .update = ahash_update,
1588 .final = ahash_final,
1589 .digest = hmac_sha1_digest,
1590 .setkey = hmac_sha1_setkey,
1591 .halg.digestsize = SHA1_DIGEST_SIZE,
1592 .halg.statesize = sizeof(struct hash_ctx),
1593 .halg.base = {
1594 .cra_name = "hmac(sha1)",
1595 .cra_driver_name = "hmac-sha1-ux500",
1596 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1597 CRYPTO_ALG_ASYNC,
1598 .cra_blocksize = SHA1_BLOCK_SIZE,
1599 .cra_ctxsize = sizeof(struct hash_ctx),
1600 .cra_type = &crypto_ahash_type,
1601 .cra_init = hash_cra_init,
1602 .cra_module = THIS_MODULE,
1603 }
1604 }
1605 },
1606 {
1607 .conf.algorithm = HASH_ALGO_SHA256,
1608 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1609 .hash = {
1610 .init = hash_init,
1611 .update = ahash_update,
1612 .final = ahash_final,
1613 .digest = hmac_sha256_digest,
1614 .setkey = hmac_sha256_setkey,
1615 .halg.digestsize = SHA256_DIGEST_SIZE,
1616 .halg.statesize = sizeof(struct hash_ctx),
1617 .halg.base = {
1618 .cra_name = "hmac(sha256)",
1619 .cra_driver_name = "hmac-sha256-ux500",
1620 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1621 CRYPTO_ALG_ASYNC,
1622 .cra_blocksize = SHA256_BLOCK_SIZE,
1623 .cra_ctxsize = sizeof(struct hash_ctx),
1624 .cra_type = &crypto_ahash_type,
1625 .cra_init = hash_cra_init,
1626 .cra_module = THIS_MODULE,
1627 }
1628 }
1629 }
1630};
1631
1632/**
1633 * hash_algs_register_all -
1634 */
1635static int ahash_algs_register_all(struct hash_device_data *device_data)
1636{
1637 int ret;
1638 int i;
1639 int count;
1640
1641 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1642 ret = crypto_register_ahash(&hash_algs[i].hash);
1643 if (ret) {
1644 count = i;
1645 dev_err(device_data->dev, "[%s] alg registration failed",
1646 hash_algs[i].hash.halg.base.cra_driver_name);
1647 goto unreg;
1648 }
1649 }
1650 return 0;
1651unreg:
1652 for (i = 0; i < count; i++)
1653 crypto_unregister_ahash(&hash_algs[i].hash);
1654 return ret;
1655}
1656
1657/**
1658 * hash_algs_unregister_all -
1659 */
1660static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1661{
1662 int i;
1663
1664 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1665 crypto_unregister_ahash(&hash_algs[i].hash);
1666}
1667
1668/**
1669 * ux500_hash_probe - Function that probes the hash hardware.
1670 * @pdev: The platform device.
1671 */
1672static int ux500_hash_probe(struct platform_device *pdev)
1673{
1674 int ret = 0;
1675 struct resource *res = NULL;
1676 struct hash_device_data *device_data;
1677 struct device *dev = &pdev->dev;
1678
1679 device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
1680 if (!device_data) {
1681 dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
1682 ret = -ENOMEM;
1683 goto out;
1684 }
1685
1686 device_data->dev = dev;
1687 device_data->current_ctx = NULL;
1688
1689 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1690 if (!res) {
1691 dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
1692 ret = -ENODEV;
1693 goto out_kfree;
1694 }
1695
1696 res = request_mem_region(res->start, resource_size(res), pdev->name);
1697 if (res == NULL) {
1698 dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
1699 ret = -EBUSY;
1700 goto out_kfree;
1701 }
1702
1703 device_data->base = ioremap(res->start, resource_size(res));
1704 if (!device_data->base) {
1705 dev_err(dev, "[%s] ioremap() failed!",
1706 __func__);
1707 ret = -ENOMEM;
1708 goto out_free_mem;
1709 }
1710 spin_lock_init(&device_data->ctx_lock);
1711 spin_lock_init(&device_data->power_state_lock);
1712
1713 /* Enable power for HASH1 hardware block */
1714 device_data->regulator = regulator_get(dev, "v-ape");
1715 if (IS_ERR(device_data->regulator)) {
1716 dev_err(dev, "[%s] regulator_get() failed!", __func__);
1717 ret = PTR_ERR(device_data->regulator);
1718 device_data->regulator = NULL;
1719 goto out_unmap;
1720 }
1721
1722 /* Enable the clock for HASH1 hardware block */
1723 device_data->clk = clk_get(dev, NULL);
1724 if (IS_ERR(device_data->clk)) {
1725 dev_err(dev, "[%s] clk_get() failed!", __func__);
1726 ret = PTR_ERR(device_data->clk);
1727 goto out_regulator;
1728 }
1729
1730 /* Enable device power (and clock) */
1731 ret = hash_enable_power(device_data, false);
1732 if (ret) {
1733 dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
1734 goto out_clk;
1735 }
1736
1737 ret = hash_check_hw(device_data);
1738 if (ret) {
1739 dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
1740 goto out_power;
1741 }
1742
1743 if (hash_mode == HASH_MODE_DMA)
1744 hash_dma_setup_channel(device_data, dev);
1745
1746 platform_set_drvdata(pdev, device_data);
1747
1748 /* Put the new device into the device list... */
1749 klist_add_tail(&device_data->list_node, &driver_data.device_list);
1750 /* ... and signal that a new device is available. */
1751 up(&driver_data.device_allocation);
1752
1753 ret = ahash_algs_register_all(device_data);
1754 if (ret) {
1755 dev_err(dev, "[%s] ahash_algs_register_all() "
1756 "failed!", __func__);
1757 goto out_power;
1758 }
1759
1760 dev_info(dev, "[%s] successfully probed\n", __func__);
1761 return 0;
1762
1763out_power:
1764 hash_disable_power(device_data, false);
1765
1766out_clk:
1767 clk_put(device_data->clk);
1768
1769out_regulator:
1770 regulator_put(device_data->regulator);
1771
1772out_unmap:
1773 iounmap(device_data->base);
1774
1775out_free_mem:
1776 release_mem_region(res->start, resource_size(res));
1777
1778out_kfree:
1779 kfree(device_data);
1780out:
1781 return ret;
1782}
1783
1784/**
1785 * ux500_hash_remove - Function that removes the hash device from the platform.
1786 * @pdev: The platform device.
1787 */
1788static int ux500_hash_remove(struct platform_device *pdev)
1789{
1790 struct resource *res;
1791 struct hash_device_data *device_data;
1792 struct device *dev = &pdev->dev;
1793
1794 device_data = platform_get_drvdata(pdev);
1795 if (!device_data) {
1796 dev_err(dev, "[%s]: platform_get_drvdata() failed!",
1797 __func__);
1798 return -ENOMEM;
1799 }
1800
1801 /* Try to decrease the number of available devices. */
1802 if (down_trylock(&driver_data.device_allocation))
1803 return -EBUSY;
1804
1805 /* Check that the device is free */
1806 spin_lock(&device_data->ctx_lock);
1807 /* current_ctx allocates a device, NULL = unallocated */
1808 if (device_data->current_ctx) {
1809 /* The device is busy */
1810 spin_unlock(&device_data->ctx_lock);
1811 /* Return the device to the pool. */
1812 up(&driver_data.device_allocation);
1813 return -EBUSY;
1814 }
1815
1816 spin_unlock(&device_data->ctx_lock);
1817
1818 /* Remove the device from the list */
1819 if (klist_node_attached(&device_data->list_node))
1820 klist_remove(&device_data->list_node);
1821
1822 /* If this was the last device, remove the services */
1823 if (list_empty(&driver_data.device_list.k_list))
1824 ahash_algs_unregister_all(device_data);
1825
1826 if (hash_disable_power(device_data, false))
1827 dev_err(dev, "[%s]: hash_disable_power() failed",
1828 __func__);
1829
1830 clk_put(device_data->clk);
1831 regulator_put(device_data->regulator);
1832
1833 iounmap(device_data->base);
1834
1835 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1836 if (res)
1837 release_mem_region(res->start, resource_size(res));
1838
1839 kfree(device_data);
1840
1841 return 0;
1842}
1843
1844/**
1845 * ux500_hash_shutdown - Function that shutdown the hash device.
1846 * @pdev: The platform device
1847 */
1848static void ux500_hash_shutdown(struct platform_device *pdev)
1849{
1850 struct resource *res = NULL;
1851 struct hash_device_data *device_data;
1852
1853 device_data = platform_get_drvdata(pdev);
1854 if (!device_data) {
1855 dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
1856 __func__);
1857 return;
1858 }
1859
1860 /* Check that the device is free */
1861 spin_lock(&device_data->ctx_lock);
1862 /* current_ctx allocates a device, NULL = unallocated */
1863 if (!device_data->current_ctx) {
1864 if (down_trylock(&driver_data.device_allocation))
1865 dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
1866 "Shutting down anyway...", __func__);
1867 /**
1868 * (Allocate the device)
1869 * Need to set this to non-null (dummy) value,
1870 * to avoid usage if context switching.
1871 */
1872 device_data->current_ctx++;
1873 }
1874 spin_unlock(&device_data->ctx_lock);
1875
1876 /* Remove the device from the list */
1877 if (klist_node_attached(&device_data->list_node))
1878 klist_remove(&device_data->list_node);
1879
1880 /* If this was the last device, remove the services */
1881 if (list_empty(&driver_data.device_list.k_list))
1882 ahash_algs_unregister_all(device_data);
1883
1884 iounmap(device_data->base);
1885
1886 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1887 if (res)
1888 release_mem_region(res->start, resource_size(res));
1889
1890 if (hash_disable_power(device_data, false))
1891 dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
1892 __func__);
1893}
1894
1895/**
1896 * ux500_hash_suspend - Function that suspends the hash device.
1897 * @pdev: The platform device.
1898 * @state: -
1899 */
1900static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
1901{
1902 int ret;
1903 struct hash_device_data *device_data;
1904 struct hash_ctx *temp_ctx = NULL;
1905
1906 device_data = platform_get_drvdata(pdev);
1907 if (!device_data) {
1908 dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
1909 __func__);
1910 return -ENOMEM;
1911 }
1912
1913 spin_lock(&device_data->ctx_lock);
1914 if (!device_data->current_ctx)
1915 device_data->current_ctx++;
1916 spin_unlock(&device_data->ctx_lock);
1917
1918 if (device_data->current_ctx == ++temp_ctx) {
1919 if (down_interruptible(&driver_data.device_allocation))
1920 dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
1921 "failed", __func__);
1922 ret = hash_disable_power(device_data, false);
1923
1924 } else
1925 ret = hash_disable_power(device_data, true);
1926
1927 if (ret)
1928 dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
1929
1930 return ret;
1931}
1932
1933/**
1934 * ux500_hash_resume - Function that resume the hash device.
1935 * @pdev: The platform device.
1936 */
1937static int ux500_hash_resume(struct platform_device *pdev)
1938{
1939 int ret = 0;
1940 struct hash_device_data *device_data;
1941 struct hash_ctx *temp_ctx = NULL;
1942
1943 device_data = platform_get_drvdata(pdev);
1944 if (!device_data) {
1945 dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
1946 __func__);
1947 return -ENOMEM;
1948 }
1949
1950 spin_lock(&device_data->ctx_lock);
1951 if (device_data->current_ctx == ++temp_ctx)
1952 device_data->current_ctx = NULL;
1953 spin_unlock(&device_data->ctx_lock);
1954
1955 if (!device_data->current_ctx)
1956 up(&driver_data.device_allocation);
1957 else
1958 ret = hash_enable_power(device_data, true);
1959
1960 if (ret)
1961 dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
1962 __func__);
1963
1964 return ret;
1965}
1966
1967static struct platform_driver hash_driver = {
1968 .probe = ux500_hash_probe,
1969 .remove = ux500_hash_remove,
1970 .shutdown = ux500_hash_shutdown,
1971 .suspend = ux500_hash_suspend,
1972 .resume = ux500_hash_resume,
1973 .driver = {
1974 .owner = THIS_MODULE,
1975 .name = "hash1",
1976 }
1977};
1978
1979/**
1980 * ux500_hash_mod_init - The kernel module init function.
1981 */
1982static int __init ux500_hash_mod_init(void)
1983{
1984 klist_init(&driver_data.device_list, NULL, NULL);
1985 /* Initialize the semaphore to 0 devices (locked state) */
1986 sema_init(&driver_data.device_allocation, 0);
1987
1988 return platform_driver_register(&hash_driver);
1989}
1990
1991/**
1992 * ux500_hash_mod_fini - The kernel module exit function.
1993 */
1994static void __exit ux500_hash_mod_fini(void)
1995{
1996 platform_driver_unregister(&hash_driver);
1997 return;
1998}
1999
2000module_init(ux500_hash_mod_init);
2001module_exit(ux500_hash_mod_fini);
2002
2003MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
2004MODULE_LICENSE("GPL");
2005
2006MODULE_ALIAS("sha1-all");
2007MODULE_ALIAS("sha256-all");
2008MODULE_ALIAS("hmac-sha1-all");
2009MODULE_ALIAS("hmac-sha256-all");