aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/amd-rng.c9
-rw-r--r--drivers/crypto/Kconfig65
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/caam/Kconfig72
-rw-r--r--drivers/crypto/caam/Makefile8
-rw-r--r--drivers/crypto/caam/caamalg.c1268
-rw-r--r--drivers/crypto/caam/compat.h35
-rw-r--r--drivers/crypto/caam/ctrl.c269
-rw-r--r--drivers/crypto/caam/desc.h1605
-rw-r--r--drivers/crypto/caam/desc_constr.h205
-rw-r--r--drivers/crypto/caam/error.c248
-rw-r--r--drivers/crypto/caam/error.h11
-rw-r--r--drivers/crypto/caam/intern.h113
-rw-r--r--drivers/crypto/caam/jr.c517
-rw-r--r--drivers/crypto/caam/jr.h21
-rw-r--r--drivers/crypto/caam/regs.h663
-rw-r--r--drivers/crypto/mv_cesa.c97
-rw-r--r--drivers/crypto/omap-sham.c78
-rw-r--r--drivers/crypto/padlock-sha.c269
-rw-r--r--drivers/crypto/picoxcell_crypto.c64
-rw-r--r--drivers/crypto/s5p-sss.c701
22 files changed, 6167 insertions, 155 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index beecd1cf9b99..a60043b3e409 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -49,7 +49,7 @@ config HW_RANDOM_INTEL
49 49
50config HW_RANDOM_AMD 50config HW_RANDOM_AMD
51 tristate "AMD HW Random Number Generator support" 51 tristate "AMD HW Random Number Generator support"
52 depends on HW_RANDOM && X86 && PCI 52 depends on HW_RANDOM && (X86 || PPC_MAPLE) && PCI
53 default HW_RANDOM 53 default HW_RANDOM
54 ---help--- 54 ---help---
55 This driver provides kernel-side support for the Random Number 55 This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 0d8c5788b8e4..c6af038682f1 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -133,6 +133,12 @@ found:
133 pmbase &= 0x0000FF00; 133 pmbase &= 0x0000FF00;
134 if (pmbase == 0) 134 if (pmbase == 0)
135 goto out; 135 goto out;
136 if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
137 dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
138 pmbase + 0xF0);
139 err = -EBUSY;
140 goto out;
141 }
136 amd_rng.priv = (unsigned long)pmbase; 142 amd_rng.priv = (unsigned long)pmbase;
137 amd_pdev = pdev; 143 amd_pdev = pdev;
138 144
@@ -141,6 +147,7 @@ found:
141 if (err) { 147 if (err) {
142 printk(KERN_ERR PFX "RNG registering failed (%d)\n", 148 printk(KERN_ERR PFX "RNG registering failed (%d)\n",
143 err); 149 err);
150 release_region(pmbase + 0xF0, 8);
144 goto out; 151 goto out;
145 } 152 }
146out: 153out:
@@ -149,6 +156,8 @@ out:
149 156
150static void __exit mod_exit(void) 157static void __exit mod_exit(void)
151{ 158{
159 u32 pmbase = (unsigned long)amd_rng.priv;
160 release_region(pmbase + 0xF0, 8);
152 hwrng_unregister(&amd_rng); 161 hwrng_unregister(&amd_rng);
153} 162}
154 163
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e54185223c8c..c64c3807f516 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -91,6 +91,8 @@ config CRYPTO_SHA1_S390
91 This is the s390 hardware accelerated implementation of the 91 This is the s390 hardware accelerated implementation of the
92 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 92 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
93 93
94 It is available as of z990.
95
94config CRYPTO_SHA256_S390 96config CRYPTO_SHA256_S390
95 tristate "SHA256 digest algorithm" 97 tristate "SHA256 digest algorithm"
96 depends on S390 98 depends on S390
@@ -99,8 +101,7 @@ config CRYPTO_SHA256_S390
99 This is the s390 hardware accelerated implementation of the 101 This is the s390 hardware accelerated implementation of the
100 SHA256 secure hash standard (DFIPS 180-2). 102 SHA256 secure hash standard (DFIPS 180-2).
101 103
102 This version of SHA implements a 256 bit hash with 128 bits of 104 It is available as of z9.
103 security against collision attacks.
104 105
105config CRYPTO_SHA512_S390 106config CRYPTO_SHA512_S390
106 tristate "SHA384 and SHA512 digest algorithm" 107 tristate "SHA384 and SHA512 digest algorithm"
@@ -110,10 +111,7 @@ config CRYPTO_SHA512_S390
110 This is the s390 hardware accelerated implementation of the 111 This is the s390 hardware accelerated implementation of the
111 SHA512 secure hash standard. 112 SHA512 secure hash standard.
112 113
113 This version of SHA implements a 512 bit hash with 256 bits of 114 It is available as of z10.
114 security against collision attacks. The code also includes SHA-384,
115 a 384 bit hash with 192 bits of security against collision attacks.
116
117 115
118config CRYPTO_DES_S390 116config CRYPTO_DES_S390
119 tristate "DES and Triple DES cipher algorithms" 117 tristate "DES and Triple DES cipher algorithms"
@@ -121,9 +119,12 @@ config CRYPTO_DES_S390
121 select CRYPTO_ALGAPI 119 select CRYPTO_ALGAPI
122 select CRYPTO_BLKCIPHER 120 select CRYPTO_BLKCIPHER
123 help 121 help
124 This us the s390 hardware accelerated implementation of the 122 This is the s390 hardware accelerated implementation of the
125 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 123 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
126 124
125 As of z990 the ECB and CBC mode are hardware accelerated.
126 As of z196 the CTR mode is hardware accelerated.
127
127config CRYPTO_AES_S390 128config CRYPTO_AES_S390
128 tristate "AES cipher algorithms" 129 tristate "AES cipher algorithms"
129 depends on S390 130 depends on S390
@@ -131,20 +132,15 @@ config CRYPTO_AES_S390
131 select CRYPTO_BLKCIPHER 132 select CRYPTO_BLKCIPHER
132 help 133 help
133 This is the s390 hardware accelerated implementation of the 134 This is the s390 hardware accelerated implementation of the
134 AES cipher algorithms (FIPS-197). AES uses the Rijndael 135 AES cipher algorithms (FIPS-197).
135 algorithm.
136
137 Rijndael appears to be consistently a very good performer in
138 both hardware and software across a wide range of computing
139 environments regardless of its use in feedback or non-feedback
140 modes. Its key setup time is excellent, and its key agility is
141 good. Rijndael's very low memory requirements make it very well
142 suited for restricted-space environments, in which it also
143 demonstrates excellent performance. Rijndael's operations are
144 among the easiest to defend against power and timing attacks.
145 136
146 On s390 the System z9-109 currently only supports the key size 137 As of z9 the ECB and CBC modes are hardware accelerated
147 of 128 bit. 138 for 128 bit keys.
139 As of z10 the ECB and CBC modes are hardware accelerated
140 for all AES key sizes.
141 As of z196 the CTR mode is hardware accelerated for all AES
142 key sizes and XTS mode is hardware accelerated for 256 and
143 512 bit keys.
148 144
149config S390_PRNG 145config S390_PRNG
150 tristate "Pseudo random number generator device driver" 146 tristate "Pseudo random number generator device driver"
@@ -154,8 +150,20 @@ config S390_PRNG
154 Select this option if you want to use the s390 pseudo random number 150 Select this option if you want to use the s390 pseudo random number
155 generator. The PRNG is part of the cryptographic processor functions 151 generator. The PRNG is part of the cryptographic processor functions
156 and uses triple-DES to generate secure random numbers like the 152 and uses triple-DES to generate secure random numbers like the
157 ANSI X9.17 standard. The PRNG is usable via the char device 153 ANSI X9.17 standard. User-space programs access the
158 /dev/prandom. 154 pseudo-random-number device through the char device /dev/prandom.
155
156 It is available as of z9.
157
158config CRYPTO_GHASH_S390
159 tristate "GHASH digest algorithm"
160 depends on S390
161 select CRYPTO_HASH
162 help
163 This is the s390 hardware accelerated implementation of the
164 GHASH message digest algorithm for GCM (Galois/Counter Mode).
165
166 It is available as of z196.
159 167
160config CRYPTO_DEV_MV_CESA 168config CRYPTO_DEV_MV_CESA
161 tristate "Marvell's Cryptographic Engine" 169 tristate "Marvell's Cryptographic Engine"
@@ -200,6 +208,8 @@ config CRYPTO_DEV_HIFN_795X_RNG
200 Select this option if you want to enable the random number generator 208 Select this option if you want to enable the random number generator
201 on the HIFN 795x crypto adapters. 209 on the HIFN 795x crypto adapters.
202 210
211source drivers/crypto/caam/Kconfig
212
203config CRYPTO_DEV_TALITOS 213config CRYPTO_DEV_TALITOS
204 tristate "Talitos Freescale Security Engine (SEC)" 214 tristate "Talitos Freescale Security Engine (SEC)"
205 select CRYPTO_ALGAPI 215 select CRYPTO_ALGAPI
@@ -269,4 +279,15 @@ config CRYPTO_DEV_PICOXCELL
269 279
270 Saying m here will build a module named pipcoxcell_crypto. 280 Saying m here will build a module named pipcoxcell_crypto.
271 281
282config CRYPTO_DEV_S5P
283 tristate "Support for Samsung S5PV210 crypto accelerator"
284 depends on ARCH_S5PV210
285 select CRYPTO_AES
286 select CRYPTO_ALGAPI
287 select CRYPTO_BLKCIPHER
288 help
289 This option allows you to have support for S5P crypto acceleration.
290 Select this to offload Samsung S5PV210 or S5PC110 from AES
291 algorithms execution.
292
272endif # CRYPTO_HW 293endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 5203e34248d7..53ea50155319 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,8 +6,10 @@ n2_crypto-y := n2_core.o n2_asm.o
6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o 7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
9obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 10obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 11obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o 12obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
12obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o 13obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
13obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o 14obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
15obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
new file mode 100644
index 000000000000..2d876bb98ff4
--- /dev/null
+++ b/drivers/crypto/caam/Kconfig
@@ -0,0 +1,72 @@
1config CRYPTO_DEV_FSL_CAAM
2 tristate "Freescale CAAM-Multicore driver backend"
3 depends on FSL_SOC
4 help
5 Enables the driver module for Freescale's Cryptographic Accelerator
6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
7 This module adds a job ring operation interface, and configures h/w
8 to operate as a DPAA component automatically, depending
9 on h/w feature availability.
10
11 To compile this driver as a module, choose M here: the module
12 will be called caam.
13
14config CRYPTO_DEV_FSL_CAAM_RINGSIZE
15 int "Job Ring size"
16 depends on CRYPTO_DEV_FSL_CAAM
17 range 2 9
18 default "9"
19 help
20 Select size of Job Rings as a power of 2, within the
21 range 2-9 (ring size 4-512).
22 Examples:
23 2 => 4
24 3 => 8
25 4 => 16
26 5 => 32
27 6 => 64
28 7 => 128
29 8 => 256
30 9 => 512
31
32config CRYPTO_DEV_FSL_CAAM_INTC
33 bool "Job Ring interrupt coalescing"
34 depends on CRYPTO_DEV_FSL_CAAM
35 default y
36 help
37 Enable the Job Ring's interrupt coalescing feature.
38
39config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
40 int "Job Ring interrupt coalescing count threshold"
41 depends on CRYPTO_DEV_FSL_CAAM_INTC
42 range 1 255
43 default 255
44 help
45 Select number of descriptor completions to queue before
46 raising an interrupt, in the range 1-255. Note that a selection
47 of 1 functionally defeats the coalescing feature, and a selection
48 equal or greater than the job ring size will force timeouts.
49
50config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
51 int "Job Ring interrupt coalescing timer threshold"
52 depends on CRYPTO_DEV_FSL_CAAM_INTC
53 range 1 65535
54 default 2048
55 help
56 Select number of bus clocks/64 to timeout in the case that one or
57 more descriptor completions are queued without reaching the count
58 threshold. Range is 1-65535.
59
60config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
61 tristate "Register algorithm implementations with the Crypto API"
62 depends on CRYPTO_DEV_FSL_CAAM
63 default y
64 select CRYPTO_ALGAPI
65 select CRYPTO_AUTHENC
66 help
67 Selecting this will offload crypto for users of the
68 scatterlist crypto API (such as the linux native IPSec
69 stack) to the SEC4 via job ring.
70
71 To compile this as a module, choose M here: the module
72 will be called caamalg.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
new file mode 100644
index 000000000000..ef39011b4505
--- /dev/null
+++ b/drivers/crypto/caam/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the CAAM backend and dependent components
3#
4
5obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
6obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7
8caam-objs := ctrl.o jr.o error.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
new file mode 100644
index 000000000000..d0e65d6ddc77
--- /dev/null
+++ b/drivers/crypto/caam/caamalg.c
@@ -0,0 +1,1268 @@
1/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | SEQ_IN_PTR |
41 * | (input buffer) |
42 * | LOAD (to DECO) |
43 * ---------------------
44 */
45
46#include "compat.h"
47
48#include "regs.h"
49#include "intern.h"
50#include "desc_constr.h"
51#include "jr.h"
52#include "error.h"
53
54/*
55 * crypto alg
56 */
57#define CAAM_CRA_PRIORITY 3000
58/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
59#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
60 SHA512_DIGEST_SIZE * 2)
61/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
62#define CAAM_MAX_IV_LENGTH 16
63
64/* length of descriptors text */
65#define DESC_AEAD_SHARED_TEXT_LEN 4
66#define DESC_AEAD_ENCRYPT_TEXT_LEN 21
67#define DESC_AEAD_DECRYPT_TEXT_LEN 24
68#define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27
69
70#ifdef DEBUG
71/* for print_hex_dumps with line references */
72#define xstr(s) str(s)
73#define str(s) #s
74#define debug(format, arg...) printk(format, arg)
75#else
76#define debug(format, arg...)
77#endif
78
79/*
80 * per-session context
81 */
82struct caam_ctx {
83 struct device *jrdev;
84 u32 *sh_desc;
85 dma_addr_t shared_desc_phys;
86 u32 class1_alg_type;
87 u32 class2_alg_type;
88 u32 alg_op;
89 u8 *key;
90 dma_addr_t key_phys;
91 unsigned int enckeylen;
92 unsigned int split_key_len;
93 unsigned int split_key_pad_len;
94 unsigned int authsize;
95};
96
97static int aead_authenc_setauthsize(struct crypto_aead *authenc,
98 unsigned int authsize)
99{
100 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
101
102 ctx->authsize = authsize;
103
104 return 0;
105}
106
107struct split_key_result {
108 struct completion completion;
109 int err;
110};
111
112static void split_key_done(struct device *dev, u32 *desc, u32 err,
113 void *context)
114{
115 struct split_key_result *res = context;
116
117#ifdef DEBUG
118 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
119#endif
120 if (err) {
121 char tmp[CAAM_ERROR_STR_MAX];
122
123 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
124 }
125
126 res->err = err;
127
128 complete(&res->completion);
129}
130
131/*
132get a split ipad/opad key
133
134Split key generation-----------------------------------------------
135
136[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
137[01] 0x04000014 key: class2->keyreg len=20
138 @0xffe01000
139[03] 0x84410014 operation: cls2-op sha1 hmac init dec
140[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
141[05] 0xa4000001 jump: class2 local all ->1 [06]
142[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
143 @0xffe04000
144*/
145static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
146{
147 struct device *jrdev = ctx->jrdev;
148 u32 *desc;
149 struct split_key_result result;
150 dma_addr_t dma_addr_in, dma_addr_out;
151 int ret = 0;
152
153 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
154
155 init_job_desc(desc, 0);
156
157 dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
158 DMA_TO_DEVICE);
159 if (dma_mapping_error(jrdev, dma_addr_in)) {
160 dev_err(jrdev, "unable to map key input memory\n");
161 kfree(desc);
162 return -ENOMEM;
163 }
164 append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
165 KEY_DEST_CLASS_REG);
166
167 /* Sets MDHA up into an HMAC-INIT */
168 append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
169 OP_ALG_AS_INIT);
170
171 /*
172 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
173 into both pads inside MDHA
174 */
175 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
176 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
177
178 /*
179 * FIFO_STORE with the explicit split-key content store
180 * (0x26 output type)
181 */
182 dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
183 DMA_FROM_DEVICE);
184 if (dma_mapping_error(jrdev, dma_addr_out)) {
185 dev_err(jrdev, "unable to map key output memory\n");
186 kfree(desc);
187 return -ENOMEM;
188 }
189 append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
190 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
191
192#ifdef DEBUG
193 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
194 DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
195 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
196 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
197#endif
198
199 result.err = 0;
200 init_completion(&result.completion);
201
202 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
203 if (!ret) {
204 /* in progress */
205 wait_for_completion_interruptible(&result.completion);
206 ret = result.err;
207#ifdef DEBUG
208 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
209 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
210 ctx->split_key_pad_len, 1);
211#endif
212 }
213
214 dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
215 DMA_FROM_DEVICE);
216 dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
217
218 kfree(desc);
219
220 return ret;
221}
222
223static int build_sh_desc_ipsec(struct caam_ctx *ctx)
224{
225 struct device *jrdev = ctx->jrdev;
226 u32 *sh_desc;
227 u32 *jump_cmd;
228 bool keys_fit_inline = 0;
229
230 /*
231 * largest Job Descriptor and its Shared Descriptor
232 * must both fit into the 64-word Descriptor h/w Buffer
233 */
234 if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN +
235 DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ +
236 ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
237 keys_fit_inline = 1;
238
239 /* build shared descriptor for this session */
240 sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
241 keys_fit_inline ?
242 ctx->split_key_pad_len + ctx->enckeylen :
243 CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL);
244 if (!sh_desc) {
245 dev_err(jrdev, "could not allocate shared descriptor\n");
246 return -ENOMEM;
247 }
248
249 init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL);
250
251 jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL |
252 JUMP_COND_SHRD | JUMP_COND_SELF);
253
254 /*
255 * process keys, starting with class 2/authentication.
256 */
257 if (keys_fit_inline) {
258 append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len,
259 ctx->split_key_len,
260 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
261
262 append_key_as_imm(sh_desc, (void *)ctx->key +
263 ctx->split_key_pad_len, ctx->enckeylen,
264 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
265 } else {
266 append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 |
267 KEY_DEST_MDHA_SPLIT | KEY_ENC);
268 append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len,
269 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
270 }
271
272 /* update jump cmd now that we are at the jump target */
273 set_jump_tgt_here(sh_desc, jump_cmd);
274
275 ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc,
276 desc_bytes(sh_desc),
277 DMA_TO_DEVICE);
278 if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) {
279 dev_err(jrdev, "unable to map shared descriptor\n");
280 kfree(sh_desc);
281 return -ENOMEM;
282 }
283
284 ctx->sh_desc = sh_desc;
285
286 return 0;
287}
288
289static int aead_authenc_setkey(struct crypto_aead *aead,
290 const u8 *key, unsigned int keylen)
291{
292 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
293 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
294 struct caam_ctx *ctx = crypto_aead_ctx(aead);
295 struct device *jrdev = ctx->jrdev;
296 struct rtattr *rta = (void *)key;
297 struct crypto_authenc_key_param *param;
298 unsigned int authkeylen;
299 unsigned int enckeylen;
300 int ret = 0;
301
302 param = RTA_DATA(rta);
303 enckeylen = be32_to_cpu(param->enckeylen);
304
305 key += RTA_ALIGN(rta->rta_len);
306 keylen -= RTA_ALIGN(rta->rta_len);
307
308 if (keylen < enckeylen)
309 goto badkey;
310
311 authkeylen = keylen - enckeylen;
312
313 if (keylen > CAAM_MAX_KEY_SIZE)
314 goto badkey;
315
316 /* Pick class 2 key length from algorithm submask */
317 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
318 OP_ALG_ALGSEL_SHIFT] * 2;
319 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
320
321#ifdef DEBUG
322 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
323 keylen, enckeylen, authkeylen);
324 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
325 ctx->split_key_len, ctx->split_key_pad_len);
326 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
327 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
328#endif
329 ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen,
330 GFP_KERNEL | GFP_DMA);
331 if (!ctx->key) {
332 dev_err(jrdev, "could not allocate key output memory\n");
333 return -ENOMEM;
334 }
335
336 ret = gen_split_key(ctx, key, authkeylen);
337 if (ret) {
338 kfree(ctx->key);
339 goto badkey;
340 }
341
342 /* postpend encryption key to auth split key */
343 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
344
345 ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
346 enckeylen, DMA_TO_DEVICE);
347 if (dma_mapping_error(jrdev, ctx->key_phys)) {
348 dev_err(jrdev, "unable to map key i/o memory\n");
349 kfree(ctx->key);
350 return -ENOMEM;
351 }
352#ifdef DEBUG
353 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
354 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
355 ctx->split_key_pad_len + enckeylen, 1);
356#endif
357
358 ctx->enckeylen = enckeylen;
359
360 ret = build_sh_desc_ipsec(ctx);
361 if (ret) {
362 dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len +
363 enckeylen, DMA_TO_DEVICE);
364 kfree(ctx->key);
365 }
366
367 return ret;
368badkey:
369 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
370 return -EINVAL;
371}
372
373struct link_tbl_entry {
374 u64 ptr;
375 u32 len;
376 u8 reserved;
377 u8 buf_pool_id;
378 u16 offset;
379};
380
381/*
382 * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
383 * @src_nents: number of segments in input scatterlist
384 * @dst_nents: number of segments in output scatterlist
385 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
386 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
387 * @link_tbl_bytes: length of dma mapped link_tbl space
388 * @link_tbl_dma: bus physical mapped address of h/w link table
389 * @hw_desc: the h/w job descriptor followed by any referenced link tables
390 */
391struct ipsec_esp_edesc {
392 int assoc_nents;
393 int src_nents;
394 int dst_nents;
395 int link_tbl_bytes;
396 dma_addr_t link_tbl_dma;
397 struct link_tbl_entry *link_tbl;
398 u32 hw_desc[0];
399};
400
401static void ipsec_esp_unmap(struct device *dev,
402 struct ipsec_esp_edesc *edesc,
403 struct aead_request *areq)
404{
405 dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
406
407 if (unlikely(areq->dst != areq->src)) {
408 dma_unmap_sg(dev, areq->src, edesc->src_nents,
409 DMA_TO_DEVICE);
410 dma_unmap_sg(dev, areq->dst, edesc->dst_nents,
411 DMA_FROM_DEVICE);
412 } else {
413 dma_unmap_sg(dev, areq->src, edesc->src_nents,
414 DMA_BIDIRECTIONAL);
415 }
416
417 if (edesc->link_tbl_bytes)
418 dma_unmap_single(dev, edesc->link_tbl_dma,
419 edesc->link_tbl_bytes,
420 DMA_TO_DEVICE);
421}
422
423/*
424 * ipsec_esp descriptor callbacks
425 */
426static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
427 void *context)
428{
429 struct aead_request *areq = context;
430 struct ipsec_esp_edesc *edesc;
431#ifdef DEBUG
432 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
433 int ivsize = crypto_aead_ivsize(aead);
434 struct caam_ctx *ctx = crypto_aead_ctx(aead);
435
436 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
437#endif
438 edesc = (struct ipsec_esp_edesc *)((char *)desc -
439 offsetof(struct ipsec_esp_edesc, hw_desc));
440
441 if (err) {
442 char tmp[CAAM_ERROR_STR_MAX];
443
444 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
445 }
446
447 ipsec_esp_unmap(jrdev, edesc, areq);
448
449#ifdef DEBUG
450 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
451 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
452 areq->assoclen , 1);
453 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
454 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
455 edesc->src_nents ? 100 : ivsize, 1);
456 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
457 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
458 edesc->src_nents ? 100 : areq->cryptlen +
459 ctx->authsize + 4, 1);
460#endif
461
462 kfree(edesc);
463
464 aead_request_complete(areq, err);
465}
466
467static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
468 void *context)
469{
470 struct aead_request *areq = context;
471 struct ipsec_esp_edesc *edesc;
472#ifdef DEBUG
473 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
474 struct caam_ctx *ctx = crypto_aead_ctx(aead);
475
476 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
477#endif
478 edesc = (struct ipsec_esp_edesc *)((char *)desc -
479 offsetof(struct ipsec_esp_edesc, hw_desc));
480
481 if (err) {
482 char tmp[CAAM_ERROR_STR_MAX];
483
484 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
485 }
486
487 ipsec_esp_unmap(jrdev, edesc, areq);
488
489 /*
490 * verify hw auth check passed else return -EBADMSG
491 */
492 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
493 err = -EBADMSG;
494
495#ifdef DEBUG
496 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
497 DUMP_PREFIX_ADDRESS, 16, 4,
498 ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)),
499 sizeof(struct iphdr) + areq->assoclen +
500 ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) +
501 ctx->authsize + 36, 1);
502 if (!err && edesc->link_tbl_bytes) {
503 struct scatterlist *sg = sg_last(areq->src, edesc->src_nents);
504 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
505 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
506 sg->length + ctx->authsize + 16, 1);
507 }
508#endif
509 kfree(edesc);
510
511 aead_request_complete(areq, err);
512}
513
514/*
515 * convert scatterlist to h/w link table format
516 * scatterlist must have been previously dma mapped
517 */
518static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
519 struct link_tbl_entry *link_tbl_ptr, u32 offset)
520{
521 while (sg_count) {
522 link_tbl_ptr->ptr = sg_dma_address(sg);
523 link_tbl_ptr->len = sg_dma_len(sg);
524 link_tbl_ptr->reserved = 0;
525 link_tbl_ptr->buf_pool_id = 0;
526 link_tbl_ptr->offset = offset;
527 link_tbl_ptr++;
528 sg = sg_next(sg);
529 sg_count--;
530 }
531
532 /* set Final bit (marks end of link table) */
533 link_tbl_ptr--;
534 link_tbl_ptr->len |= 0x40000000;
535}
536
537/*
538 * fill in and submit ipsec_esp job descriptor
539 */
540static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
541 u32 encrypt,
542 void (*callback) (struct device *dev, u32 *desc,
543 u32 err, void *context))
544{
545 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
546 struct caam_ctx *ctx = crypto_aead_ctx(aead);
547 struct device *jrdev = ctx->jrdev;
548 u32 *desc = edesc->hw_desc, options;
549 int ret, sg_count, assoc_sg_count;
550 int ivsize = crypto_aead_ivsize(aead);
551 int authsize = ctx->authsize;
552 dma_addr_t ptr, dst_dma, src_dma;
553#ifdef DEBUG
554 u32 *sh_desc = ctx->sh_desc;
555
556 debug("assoclen %d cryptlen %d authsize %d\n",
557 areq->assoclen, areq->cryptlen, authsize);
558 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
559 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
560 areq->assoclen , 1);
561 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
562 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
563 edesc->src_nents ? 100 : ivsize, 1);
564 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
565 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
566 edesc->src_nents ? 100 : areq->cryptlen + authsize, 1);
567 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
568 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
569 desc_bytes(sh_desc), 1);
570#endif
571 assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1,
572 DMA_TO_DEVICE);
573 if (areq->src == areq->dst)
574 sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
575 DMA_BIDIRECTIONAL);
576 else
577 sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
578 DMA_TO_DEVICE);
579
580 /* start auth operation */
581 append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL |
582 (encrypt ? : OP_ALG_ICV_ON));
583
584 /* Load FIFO with data for Class 2 CHA */
585 options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
586 if (!edesc->assoc_nents) {
587 ptr = sg_dma_address(areq->assoc);
588 } else {
589 sg_to_link_tbl(areq->assoc, edesc->assoc_nents,
590 edesc->link_tbl, 0);
591 ptr = edesc->link_tbl_dma;
592 options |= LDST_SGF;
593 }
594 append_fifo_load(desc, ptr, areq->assoclen, options);
595
596 /* copy iv from cipher/class1 input context to class2 infifo */
597 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
598
599 if (!encrypt) {
600 u32 *jump_cmd, *uncond_jump_cmd;
601
602 /* JUMP if shared */
603 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
604
605 /* start class 1 (cipher) operation, non-shared version */
606 append_operation(desc, ctx->class1_alg_type |
607 OP_ALG_AS_INITFINAL);
608
609 uncond_jump_cmd = append_jump(desc, 0);
610
611 set_jump_tgt_here(desc, jump_cmd);
612
613 /* start class 1 (cipher) operation, shared version */
614 append_operation(desc, ctx->class1_alg_type |
615 OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK);
616 set_jump_tgt_here(desc, uncond_jump_cmd);
617 } else
618 append_operation(desc, ctx->class1_alg_type |
619 OP_ALG_AS_INITFINAL | encrypt);
620
621 /* load payload & instruct to class2 to snoop class 1 if encrypting */
622 options = 0;
623 if (!edesc->src_nents) {
624 src_dma = sg_dma_address(areq->src);
625 } else {
626 sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl +
627 edesc->assoc_nents, 0);
628 src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
629 sizeof(struct link_tbl_entry);
630 options |= LDST_SGF;
631 }
632 append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options);
633 append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH |
634 FIFOLD_TYPE_LASTBOTH |
635 (encrypt ? FIFOLD_TYPE_MSG1OUT2
636 : FIFOLD_TYPE_MSG));
637
638 /* specify destination */
639 if (areq->src == areq->dst) {
640 dst_dma = src_dma;
641 } else {
642 sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1,
643 DMA_FROM_DEVICE);
644 if (!edesc->dst_nents) {
645 dst_dma = sg_dma_address(areq->dst);
646 options = 0;
647 } else {
648 sg_to_link_tbl(areq->dst, edesc->dst_nents,
649 edesc->link_tbl + edesc->assoc_nents +
650 edesc->src_nents, 0);
651 dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
652 edesc->src_nents) *
653 sizeof(struct link_tbl_entry);
654 options = LDST_SGF;
655 }
656 }
657 append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options);
658 append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
659
660 /* ICV */
661 if (encrypt)
662 append_seq_store(desc, authsize, LDST_CLASS_2_CCB |
663 LDST_SRCDST_BYTE_CONTEXT);
664 else
665 append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 |
666 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
667
668#ifdef DEBUG
669 debug("job_desc_len %d\n", desc_len(desc));
670 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
671 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1);
672 print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ",
673 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
674 edesc->link_tbl_bytes, 1);
675#endif
676
677 ret = caam_jr_enqueue(jrdev, desc, callback, areq);
678 if (!ret)
679 ret = -EINPROGRESS;
680 else {
681 ipsec_esp_unmap(jrdev, edesc, areq);
682 kfree(edesc);
683 }
684
685 return ret;
686}
687
688/*
689 * derive number of elements in scatterlist
690 */
691static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
692{
693 struct scatterlist *sg = sg_list;
694 int sg_nents = 0;
695
696 *chained = 0;
697 while (nbytes > 0) {
698 sg_nents++;
699 nbytes -= sg->length;
700 if (!sg_is_last(sg) && (sg + 1)->length == 0)
701 *chained = 1;
702 sg = scatterwalk_sg_next(sg);
703 }
704
705 return sg_nents;
706}
707
708/*
709 * allocate and map the ipsec_esp extended descriptor
710 */
711static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
712 int desc_bytes)
713{
714 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
715 struct caam_ctx *ctx = crypto_aead_ctx(aead);
716 struct device *jrdev = ctx->jrdev;
717 gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
718 GFP_ATOMIC;
719 int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
720 struct ipsec_esp_edesc *edesc;
721
722 assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained);
723 BUG_ON(chained);
724 if (likely(assoc_nents == 1))
725 assoc_nents = 0;
726
727 src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize,
728 &chained);
729 BUG_ON(chained);
730 if (src_nents == 1)
731 src_nents = 0;
732
733 if (unlikely(areq->dst != areq->src)) {
734 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize,
735 &chained);
736 BUG_ON(chained);
737 if (dst_nents == 1)
738 dst_nents = 0;
739 }
740
741 link_tbl_bytes = (assoc_nents + src_nents + dst_nents) *
742 sizeof(struct link_tbl_entry);
743 debug("link_tbl_bytes %d\n", link_tbl_bytes);
744
745 /* allocate space for base edesc and hw desc commands, link tables */
746 edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes +
747 link_tbl_bytes, GFP_DMA | flags);
748 if (!edesc) {
749 dev_err(jrdev, "could not allocate extended descriptor\n");
750 return ERR_PTR(-ENOMEM);
751 }
752
753 edesc->assoc_nents = assoc_nents;
754 edesc->src_nents = src_nents;
755 edesc->dst_nents = dst_nents;
756 edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) +
757 desc_bytes;
758 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
759 link_tbl_bytes, DMA_TO_DEVICE);
760 edesc->link_tbl_bytes = link_tbl_bytes;
761
762 return edesc;
763}
764
765static int aead_authenc_encrypt(struct aead_request *areq)
766{
767 struct ipsec_esp_edesc *edesc;
768 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
769 struct caam_ctx *ctx = crypto_aead_ctx(aead);
770 struct device *jrdev = ctx->jrdev;
771 int ivsize = crypto_aead_ivsize(aead);
772 u32 *desc;
773 dma_addr_t iv_dma;
774
775 /* allocate extended descriptor */
776 edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN *
777 CAAM_CMD_SZ);
778 if (IS_ERR(edesc))
779 return PTR_ERR(edesc);
780
781 desc = edesc->hw_desc;
782
783 /* insert shared descriptor pointer */
784 init_job_desc_shared(desc, ctx->shared_desc_phys,
785 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
786
787 iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE);
788 /* check dma error */
789
790 append_load(desc, iv_dma, ivsize,
791 LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
792
793 return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
794}
795
796static int aead_authenc_decrypt(struct aead_request *req)
797{
798 struct crypto_aead *aead = crypto_aead_reqtfm(req);
799 int ivsize = crypto_aead_ivsize(aead);
800 struct caam_ctx *ctx = crypto_aead_ctx(aead);
801 struct device *jrdev = ctx->jrdev;
802 struct ipsec_esp_edesc *edesc;
803 u32 *desc;
804 dma_addr_t iv_dma;
805
806 req->cryptlen -= ctx->authsize;
807
808 /* allocate extended descriptor */
809 edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN *
810 CAAM_CMD_SZ);
811 if (IS_ERR(edesc))
812 return PTR_ERR(edesc);
813
814 desc = edesc->hw_desc;
815
816 /* insert shared descriptor pointer */
817 init_job_desc_shared(desc, ctx->shared_desc_phys,
818 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
819
820 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
821 /* check dma error */
822
823 append_load(desc, iv_dma, ivsize,
824 LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
825
826 return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done);
827}
828
829static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
830{
831 struct aead_request *areq = &req->areq;
832 struct ipsec_esp_edesc *edesc;
833 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
834 struct caam_ctx *ctx = crypto_aead_ctx(aead);
835 struct device *jrdev = ctx->jrdev;
836 int ivsize = crypto_aead_ivsize(aead);
837 dma_addr_t iv_dma;
838 u32 *desc;
839
840 iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE);
841
842 debug("%s: giv %p\n", __func__, req->giv);
843
844 /* allocate extended descriptor */
845 edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN *
846 CAAM_CMD_SZ);
847 if (IS_ERR(edesc))
848 return PTR_ERR(edesc);
849
850 desc = edesc->hw_desc;
851
852 /* insert shared descriptor pointer */
853 init_job_desc_shared(desc, ctx->shared_desc_phys,
854 desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
855
856 /*
857 * LOAD IMM Info FIFO
858 * to DECO, Last, Padding, Random, Message, 16 bytes
859 */
860 append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 |
861 NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG |
862 NFIFOENTRY_PTYPE_RND | ivsize,
863 LDST_SRCDST_WORD_INFO_FIFO);
864
865 /*
866 * disable info fifo entries since the above serves as the entry
867 * this way, the MOVE command won't generate an entry.
868 * Note that this isn't required in more recent versions of
869 * SEC as a MOVE that doesn't do info FIFO entries is available.
870 */
871 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
872
873 /* MOVE DECO Alignment -> C1 Context 16 bytes */
874 append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize);
875
876 /* re-enable info fifo entries */
877 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
878
879 /* MOVE C1 Context -> OFIFO 16 bytes */
880 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize);
881
882 append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
883
884 return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
885}
886
887struct caam_alg_template {
888 char name[CRYPTO_MAX_ALG_NAME];
889 char driver_name[CRYPTO_MAX_ALG_NAME];
890 unsigned int blocksize;
891 struct aead_alg aead;
892 u32 class1_alg_type;
893 u32 class2_alg_type;
894 u32 alg_op;
895};
896
897static struct caam_alg_template driver_algs[] = {
898 /* single-pass ipsec_esp descriptor */
899 {
900 .name = "authenc(hmac(sha1),cbc(aes))",
901 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
902 .blocksize = AES_BLOCK_SIZE,
903 .aead = {
904 .setkey = aead_authenc_setkey,
905 .setauthsize = aead_authenc_setauthsize,
906 .encrypt = aead_authenc_encrypt,
907 .decrypt = aead_authenc_decrypt,
908 .givencrypt = aead_authenc_givencrypt,
909 .geniv = "<built-in>",
910 .ivsize = AES_BLOCK_SIZE,
911 .maxauthsize = SHA1_DIGEST_SIZE,
912 },
913 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
914 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
915 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
916 },
917 {
918 .name = "authenc(hmac(sha256),cbc(aes))",
919 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
920 .blocksize = AES_BLOCK_SIZE,
921 .aead = {
922 .setkey = aead_authenc_setkey,
923 .setauthsize = aead_authenc_setauthsize,
924 .encrypt = aead_authenc_encrypt,
925 .decrypt = aead_authenc_decrypt,
926 .givencrypt = aead_authenc_givencrypt,
927 .geniv = "<built-in>",
928 .ivsize = AES_BLOCK_SIZE,
929 .maxauthsize = SHA256_DIGEST_SIZE,
930 },
931 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
932 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
933 OP_ALG_AAI_HMAC_PRECOMP,
934 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
935 },
936 {
937 .name = "authenc(hmac(sha512),cbc(aes))",
938 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
939 .blocksize = AES_BLOCK_SIZE,
940 .aead = {
941 .setkey = aead_authenc_setkey,
942 .setauthsize = aead_authenc_setauthsize,
943 .encrypt = aead_authenc_encrypt,
944 .decrypt = aead_authenc_decrypt,
945 .givencrypt = aead_authenc_givencrypt,
946 .geniv = "<built-in>",
947 .ivsize = AES_BLOCK_SIZE,
948 .maxauthsize = SHA512_DIGEST_SIZE,
949 },
950 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
951 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
952 OP_ALG_AAI_HMAC_PRECOMP,
953 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
954 },
955 {
956 .name = "authenc(hmac(sha1),cbc(des3_ede))",
957 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
958 .blocksize = DES3_EDE_BLOCK_SIZE,
959 .aead = {
960 .setkey = aead_authenc_setkey,
961 .setauthsize = aead_authenc_setauthsize,
962 .encrypt = aead_authenc_encrypt,
963 .decrypt = aead_authenc_decrypt,
964 .givencrypt = aead_authenc_givencrypt,
965 .geniv = "<built-in>",
966 .ivsize = DES3_EDE_BLOCK_SIZE,
967 .maxauthsize = SHA1_DIGEST_SIZE,
968 },
969 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
970 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
971 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
972 },
973 {
974 .name = "authenc(hmac(sha256),cbc(des3_ede))",
975 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
976 .blocksize = DES3_EDE_BLOCK_SIZE,
977 .aead = {
978 .setkey = aead_authenc_setkey,
979 .setauthsize = aead_authenc_setauthsize,
980 .encrypt = aead_authenc_encrypt,
981 .decrypt = aead_authenc_decrypt,
982 .givencrypt = aead_authenc_givencrypt,
983 .geniv = "<built-in>",
984 .ivsize = DES3_EDE_BLOCK_SIZE,
985 .maxauthsize = SHA256_DIGEST_SIZE,
986 },
987 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
988 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
989 OP_ALG_AAI_HMAC_PRECOMP,
990 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
991 },
992 {
993 .name = "authenc(hmac(sha512),cbc(des3_ede))",
994 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
995 .blocksize = DES3_EDE_BLOCK_SIZE,
996 .aead = {
997 .setkey = aead_authenc_setkey,
998 .setauthsize = aead_authenc_setauthsize,
999 .encrypt = aead_authenc_encrypt,
1000 .decrypt = aead_authenc_decrypt,
1001 .givencrypt = aead_authenc_givencrypt,
1002 .geniv = "<built-in>",
1003 .ivsize = DES3_EDE_BLOCK_SIZE,
1004 .maxauthsize = SHA512_DIGEST_SIZE,
1005 },
1006 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1007 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1008 OP_ALG_AAI_HMAC_PRECOMP,
1009 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1010 },
1011 {
1012 .name = "authenc(hmac(sha1),cbc(des))",
1013 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1014 .blocksize = DES_BLOCK_SIZE,
1015 .aead = {
1016 .setkey = aead_authenc_setkey,
1017 .setauthsize = aead_authenc_setauthsize,
1018 .encrypt = aead_authenc_encrypt,
1019 .decrypt = aead_authenc_decrypt,
1020 .givencrypt = aead_authenc_givencrypt,
1021 .geniv = "<built-in>",
1022 .ivsize = DES_BLOCK_SIZE,
1023 .maxauthsize = SHA1_DIGEST_SIZE,
1024 },
1025 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1026 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1027 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1028 },
1029 {
1030 .name = "authenc(hmac(sha256),cbc(des))",
1031 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1032 .blocksize = DES_BLOCK_SIZE,
1033 .aead = {
1034 .setkey = aead_authenc_setkey,
1035 .setauthsize = aead_authenc_setauthsize,
1036 .encrypt = aead_authenc_encrypt,
1037 .decrypt = aead_authenc_decrypt,
1038 .givencrypt = aead_authenc_givencrypt,
1039 .geniv = "<built-in>",
1040 .ivsize = DES_BLOCK_SIZE,
1041 .maxauthsize = SHA256_DIGEST_SIZE,
1042 },
1043 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1044 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1045 OP_ALG_AAI_HMAC_PRECOMP,
1046 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1047 },
1048 {
1049 .name = "authenc(hmac(sha512),cbc(des))",
1050 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1051 .blocksize = DES_BLOCK_SIZE,
1052 .aead = {
1053 .setkey = aead_authenc_setkey,
1054 .setauthsize = aead_authenc_setauthsize,
1055 .encrypt = aead_authenc_encrypt,
1056 .decrypt = aead_authenc_decrypt,
1057 .givencrypt = aead_authenc_givencrypt,
1058 .geniv = "<built-in>",
1059 .ivsize = DES_BLOCK_SIZE,
1060 .maxauthsize = SHA512_DIGEST_SIZE,
1061 },
1062 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1063 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1064 OP_ALG_AAI_HMAC_PRECOMP,
1065 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1066 },
1067};
1068
1069struct caam_crypto_alg {
1070 struct list_head entry;
1071 struct device *ctrldev;
1072 int class1_alg_type;
1073 int class2_alg_type;
1074 int alg_op;
1075 struct crypto_alg crypto_alg;
1076};
1077
1078static int caam_cra_init(struct crypto_tfm *tfm)
1079{
1080 struct crypto_alg *alg = tfm->__crt_alg;
1081 struct caam_crypto_alg *caam_alg =
1082 container_of(alg, struct caam_crypto_alg, crypto_alg);
1083 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1084 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
1085 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1086
1087 /*
1088 * distribute tfms across job rings to ensure in-order
1089 * crypto request processing per tfm
1090 */
1091 ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
1092
1093 /* copy descriptor header template value */
1094 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
1095 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
1096 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
1097
1098 return 0;
1099}
1100
1101static void caam_cra_exit(struct crypto_tfm *tfm)
1102{
1103 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1104
1105 if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys))
1106 dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys,
1107 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
1108 kfree(ctx->sh_desc);
1109
1110 if (!dma_mapping_error(ctx->jrdev, ctx->key_phys))
1111 dma_unmap_single(ctx->jrdev, ctx->key_phys,
1112 ctx->split_key_pad_len + ctx->enckeylen,
1113 DMA_TO_DEVICE);
1114 kfree(ctx->key);
1115}
1116
1117static void __exit caam_algapi_exit(void)
1118{
1119
1120 struct device_node *dev_node;
1121 struct platform_device *pdev;
1122 struct device *ctrldev;
1123 struct caam_drv_private *priv;
1124 struct caam_crypto_alg *t_alg, *n;
1125 int i, err;
1126
1127 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1128 if (!dev_node)
1129 return;
1130
1131 pdev = of_find_device_by_node(dev_node);
1132 if (!pdev)
1133 return;
1134
1135 ctrldev = &pdev->dev;
1136 of_node_put(dev_node);
1137 priv = dev_get_drvdata(ctrldev);
1138
1139 if (!priv->alg_list.next)
1140 return;
1141
1142 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1143 crypto_unregister_alg(&t_alg->crypto_alg);
1144 list_del(&t_alg->entry);
1145 kfree(t_alg);
1146 }
1147
1148 for (i = 0; i < priv->total_jobrs; i++) {
1149 err = caam_jr_deregister(priv->algapi_jr[i]);
1150 if (err < 0)
1151 break;
1152 }
1153 kfree(priv->algapi_jr);
1154}
1155
1156static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
1157 struct caam_alg_template
1158 *template)
1159{
1160 struct caam_crypto_alg *t_alg;
1161 struct crypto_alg *alg;
1162
1163 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
1164 if (!t_alg) {
1165 dev_err(ctrldev, "failed to allocate t_alg\n");
1166 return ERR_PTR(-ENOMEM);
1167 }
1168
1169 alg = &t_alg->crypto_alg;
1170
1171 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1172 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1173 template->driver_name);
1174 alg->cra_module = THIS_MODULE;
1175 alg->cra_init = caam_cra_init;
1176 alg->cra_exit = caam_cra_exit;
1177 alg->cra_priority = CAAM_CRA_PRIORITY;
1178 alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
1179 alg->cra_blocksize = template->blocksize;
1180 alg->cra_alignmask = 0;
1181 alg->cra_type = &crypto_aead_type;
1182 alg->cra_ctxsize = sizeof(struct caam_ctx);
1183 alg->cra_u.aead = template->aead;
1184
1185 t_alg->class1_alg_type = template->class1_alg_type;
1186 t_alg->class2_alg_type = template->class2_alg_type;
1187 t_alg->alg_op = template->alg_op;
1188 t_alg->ctrldev = ctrldev;
1189
1190 return t_alg;
1191}
1192
1193static int __init caam_algapi_init(void)
1194{
1195 struct device_node *dev_node;
1196 struct platform_device *pdev;
1197 struct device *ctrldev, **jrdev;
1198 struct caam_drv_private *priv;
1199 int i = 0, err = 0;
1200
1201 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1202 if (!dev_node)
1203 return -ENODEV;
1204
1205 pdev = of_find_device_by_node(dev_node);
1206 if (!pdev)
1207 return -ENODEV;
1208
1209 ctrldev = &pdev->dev;
1210 priv = dev_get_drvdata(ctrldev);
1211 of_node_put(dev_node);
1212
1213 INIT_LIST_HEAD(&priv->alg_list);
1214
1215 jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
1216 if (!jrdev)
1217 return -ENOMEM;
1218
1219 for (i = 0; i < priv->total_jobrs; i++) {
1220 err = caam_jr_register(ctrldev, &jrdev[i]);
1221 if (err < 0)
1222 break;
1223 }
1224 if (err < 0 && i == 0) {
1225 dev_err(ctrldev, "algapi error in job ring registration: %d\n",
1226 err);
1227 kfree(jrdev);
1228 return err;
1229 }
1230
1231 priv->num_jrs_for_algapi = i;
1232 priv->algapi_jr = jrdev;
1233 atomic_set(&priv->tfm_count, -1);
1234
1235 /* register crypto algorithms the device supports */
1236 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1237 /* TODO: check if h/w supports alg */
1238 struct caam_crypto_alg *t_alg;
1239
1240 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
1241 if (IS_ERR(t_alg)) {
1242 err = PTR_ERR(t_alg);
1243 dev_warn(ctrldev, "%s alg allocation failed\n",
1244 driver_algs[i].driver_name);
1245 continue;
1246 }
1247
1248 err = crypto_register_alg(&t_alg->crypto_alg);
1249 if (err) {
1250 dev_warn(ctrldev, "%s alg registration failed\n",
1251 t_alg->crypto_alg.cra_driver_name);
1252 kfree(t_alg);
1253 } else {
1254 list_add_tail(&t_alg->entry, &priv->alg_list);
1255 dev_info(ctrldev, "%s\n",
1256 t_alg->crypto_alg.cra_driver_name);
1257 }
1258 }
1259
1260 return err;
1261}
1262
1263module_init(caam_algapi_init);
1264module_exit(caam_algapi_exit);
1265
1266MODULE_LICENSE("GPL");
1267MODULE_DESCRIPTION("FSL CAAM support for crypto API");
1268MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
new file mode 100644
index 000000000000..950450346f70
--- /dev/null
+++ b/drivers/crypto/caam/compat.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2008-2011 Freescale Semiconductor, Inc.
3 */
4
5#ifndef CAAM_COMPAT_H
6#define CAAM_COMPAT_H
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mod_devicetable.h>
11#include <linux/device.h>
12#include <linux/interrupt.h>
13#include <linux/crypto.h>
14#include <linux/hw_random.h>
15#include <linux/of_platform.h>
16#include <linux/dma-mapping.h>
17#include <linux/io.h>
18#include <linux/spinlock.h>
19#include <linux/rtnetlink.h>
20#include <linux/in.h>
21#include <linux/slab.h>
22#include <linux/types.h>
23#include <linux/debugfs.h>
24#include <linux/circ_buf.h>
25#include <net/xfrm.h>
26
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/des.h>
30#include <crypto/sha.h>
31#include <crypto/aead.h>
32#include <crypto/authenc.h>
33#include <crypto/scatterwalk.h>
34
35#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
new file mode 100644
index 000000000000..9009713a3c2e
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.c
@@ -0,0 +1,269 @@
1/*
2 * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#include "compat.h"
9#include "regs.h"
10#include "intern.h"
11#include "jr.h"
12
13static int caam_remove(struct platform_device *pdev)
14{
15 struct device *ctrldev;
16 struct caam_drv_private *ctrlpriv;
17 struct caam_drv_private_jr *jrpriv;
18 struct caam_full __iomem *topregs;
19 int ring, ret = 0;
20
21 ctrldev = &pdev->dev;
22 ctrlpriv = dev_get_drvdata(ctrldev);
23 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
24
25 /* shut down JobRs */
26 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
27 ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
28 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
29 irq_dispose_mapping(jrpriv->irq);
30 }
31
32 /* Shut down debug views */
33#ifdef CONFIG_DEBUG_FS
34 debugfs_remove_recursive(ctrlpriv->dfs_root);
35#endif
36
37 /* Unmap controller region */
38 iounmap(&topregs->ctrl);
39
40 kfree(ctrlpriv->jrdev);
41 kfree(ctrlpriv);
42
43 return ret;
44}
45
46/* Probe routine for CAAM top (controller) level */
47static int caam_probe(struct platform_device *pdev)
48{
49 int d, ring, rspec;
50 struct device *dev;
51 struct device_node *nprop, *np;
52 struct caam_ctrl __iomem *ctrl;
53 struct caam_full __iomem *topregs;
54 struct caam_drv_private *ctrlpriv;
55 struct caam_perfmon *perfmon;
56 struct caam_deco **deco;
57 u32 deconum;
58
59 ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
60 if (!ctrlpriv)
61 return -ENOMEM;
62
63 dev = &pdev->dev;
64 dev_set_drvdata(dev, ctrlpriv);
65 ctrlpriv->pdev = pdev;
66 nprop = pdev->dev.of_node;
67
68 /* Get configuration properties from device tree */
69 /* First, get register page */
70 ctrl = of_iomap(nprop, 0);
71 if (ctrl == NULL) {
72 dev_err(dev, "caam: of_iomap() failed\n");
73 return -ENOMEM;
74 }
75 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
76
77 /* topregs used to derive pointers to CAAM sub-blocks only */
78 topregs = (struct caam_full __iomem *)ctrl;
79
80 /* Get the IRQ of the controller (for security violations only) */
81 ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
82
83 /*
84 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
85 * 36-bit pointers in master configuration register
86 */
87 setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
88 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
89
90 if (sizeof(dma_addr_t) == sizeof(u64))
91 dma_set_mask(dev, DMA_BIT_MASK(36));
92
93 /* Find out how many DECOs are present */
94 deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
95 CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
96
97 ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
98 GFP_KERNEL);
99
100 deco = (struct caam_deco __force **)&topregs->deco;
101 for (d = 0; d < deconum; d++)
102 ctrlpriv->deco[d] = deco[d];
103
104 /*
105 * Detect and enable JobRs
106 * First, find out how many ring spec'ed, allocate references
107 * for all, then go probe each one.
108 */
109 rspec = 0;
110 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
111 rspec++;
112 ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
113 if (ctrlpriv->jrdev == NULL) {
114 iounmap(&topregs->ctrl);
115 return -ENOMEM;
116 }
117
118 ring = 0;
119 ctrlpriv->total_jobrs = 0;
120 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
121 caam_jr_probe(pdev, np, ring);
122 ctrlpriv->total_jobrs++;
123 ring++;
124 }
125
126 /* Check to see if QI present. If so, enable */
127 ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
128 CTPR_QI_MASK);
129 if (ctrlpriv->qi_present) {
130 ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
131 /* This is all that's required to physically enable QI */
132 wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
133 }
134
135 /* If no QI and no rings specified, quit and go home */
136 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
137 dev_err(dev, "no queues configured, terminating\n");
138 caam_remove(pdev);
139 return -ENOMEM;
140 }
141
142 /* NOTE: RTIC detection ought to go here, around Si time */
143
144 /* Initialize queue allocator lock */
145 spin_lock_init(&ctrlpriv->jr_alloc_lock);
146
147 /* Report "alive" for developer to see */
148 dev_info(dev, "device ID = 0x%016llx\n",
149 rd_reg64(&topregs->ctrl.perfmon.caam_id));
150 dev_info(dev, "job rings = %d, qi = %d\n",
151 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
152
153#ifdef CONFIG_DEBUG_FS
154 /*
155 * FIXME: needs better naming distinction, as some amalgamation of
156 * "caam" and nprop->full_name. The OF name isn't distinctive,
157 * but does separate instances
158 */
159 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
160
161 ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
162 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
163
164 /* Controller-level - performance monitor counters */
165 ctrlpriv->ctl_rq_dequeued =
166 debugfs_create_u64("rq_dequeued",
167 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
168 ctrlpriv->ctl, &perfmon->req_dequeued);
169 ctrlpriv->ctl_ob_enc_req =
170 debugfs_create_u64("ob_rq_encrypted",
171 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
172 ctrlpriv->ctl, &perfmon->ob_enc_req);
173 ctrlpriv->ctl_ib_dec_req =
174 debugfs_create_u64("ib_rq_decrypted",
175 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
176 ctrlpriv->ctl, &perfmon->ib_dec_req);
177 ctrlpriv->ctl_ob_enc_bytes =
178 debugfs_create_u64("ob_bytes_encrypted",
179 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
180 ctrlpriv->ctl, &perfmon->ob_enc_bytes);
181 ctrlpriv->ctl_ob_prot_bytes =
182 debugfs_create_u64("ob_bytes_protected",
183 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
184 ctrlpriv->ctl, &perfmon->ob_prot_bytes);
185 ctrlpriv->ctl_ib_dec_bytes =
186 debugfs_create_u64("ib_bytes_decrypted",
187 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
188 ctrlpriv->ctl, &perfmon->ib_dec_bytes);
189 ctrlpriv->ctl_ib_valid_bytes =
190 debugfs_create_u64("ib_bytes_validated",
191 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
192 ctrlpriv->ctl, &perfmon->ib_valid_bytes);
193
194 /* Controller level - global status values */
195 ctrlpriv->ctl_faultaddr =
196 debugfs_create_u64("fault_addr",
197 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
198 ctrlpriv->ctl, &perfmon->faultaddr);
199 ctrlpriv->ctl_faultdetail =
200 debugfs_create_u32("fault_detail",
201 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
202 ctrlpriv->ctl, &perfmon->faultdetail);
203 ctrlpriv->ctl_faultstatus =
204 debugfs_create_u32("fault_status",
205 S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
206 ctrlpriv->ctl, &perfmon->status);
207
208 /* Internal covering keys (useful in non-secure mode only) */
209 ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
210 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
211 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
212 S_IFCHR | S_IRUSR |
213 S_IRGRP | S_IROTH,
214 ctrlpriv->ctl,
215 &ctrlpriv->ctl_kek_wrap);
216
217 ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
218 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
219 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
220 S_IFCHR | S_IRUSR |
221 S_IRGRP | S_IROTH,
222 ctrlpriv->ctl,
223 &ctrlpriv->ctl_tkek_wrap);
224
225 ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
226 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
227 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
228 S_IFCHR | S_IRUSR |
229 S_IRGRP | S_IROTH,
230 ctrlpriv->ctl,
231 &ctrlpriv->ctl_tdsk_wrap);
232#endif
233 return 0;
234}
235
236static struct of_device_id caam_match[] = {
237 {
238 .compatible = "fsl,sec-v4.0",
239 },
240 {},
241};
242MODULE_DEVICE_TABLE(of, caam_match);
243
244static struct platform_driver caam_driver = {
245 .driver = {
246 .name = "caam",
247 .owner = THIS_MODULE,
248 .of_match_table = caam_match,
249 },
250 .probe = caam_probe,
251 .remove = __devexit_p(caam_remove),
252};
253
254static int __init caam_base_init(void)
255{
256 return platform_driver_register(&caam_driver);
257}
258
259static void __exit caam_base_exit(void)
260{
261 return platform_driver_unregister(&caam_driver);
262}
263
264module_init(caam_base_init);
265module_exit(caam_base_exit);
266
267MODULE_LICENSE("GPL");
268MODULE_DESCRIPTION("FSL CAAM request backend");
269MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
new file mode 100644
index 000000000000..974a75842da9
--- /dev/null
+++ b/drivers/crypto/caam/desc.h
@@ -0,0 +1,1605 @@
1/*
2 * CAAM descriptor composition header
3 * Definitions to support CAAM descriptor instruction generation
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#ifndef DESC_H
9#define DESC_H
10
11/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
12#define MAX_CAAM_DESCSIZE 64
13
14/* Block size of any entity covered/uncovered with a KEK/TKEK */
15#define KEK_BLOCKSIZE 16
16
17/*
18 * Supported descriptor command types as they show up
19 * inside a descriptor command word.
20 */
21#define CMD_SHIFT 27
22#define CMD_MASK 0xf8000000
23
24#define CMD_KEY (0x00 << CMD_SHIFT)
25#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
26#define CMD_LOAD (0x02 << CMD_SHIFT)
27#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
28#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
29#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
30#define CMD_STORE (0x0a << CMD_SHIFT)
31#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
32#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
33#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
34#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
35#define CMD_MOVE (0x0f << CMD_SHIFT)
36#define CMD_OPERATION (0x10 << CMD_SHIFT)
37#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
38#define CMD_JUMP (0x14 << CMD_SHIFT)
39#define CMD_MATH (0x15 << CMD_SHIFT)
40#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
41#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
42#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
43#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
44
45/* General-purpose class selector for all commands */
46#define CLASS_SHIFT 25
47#define CLASS_MASK (0x03 << CLASS_SHIFT)
48
49#define CLASS_NONE (0x00 << CLASS_SHIFT)
50#define CLASS_1 (0x01 << CLASS_SHIFT)
51#define CLASS_2 (0x02 << CLASS_SHIFT)
52#define CLASS_BOTH (0x03 << CLASS_SHIFT)
53
54/*
55 * Descriptor header command constructs
56 * Covers shared, job, and trusted descriptor headers
57 */
58
59/*
60 * Do Not Run - marks a descriptor inexecutable if there was
61 * a preceding error somewhere
62 */
63#define HDR_DNR 0x01000000
64
65/*
66 * ONE - should always be set. Combination of ONE (always
67 * set) and ZRO (always clear) forms an endianness sanity check
68 */
69#define HDR_ONE 0x00800000
70#define HDR_ZRO 0x00008000
71
72/* Start Index or SharedDesc Length */
73#define HDR_START_IDX_MASK 0x3f
74#define HDR_START_IDX_SHIFT 16
75
76/* If shared descriptor header, 6-bit length */
77#define HDR_DESCLEN_SHR_MASK 0x3f
78
79/* If non-shared header, 7-bit length */
80#define HDR_DESCLEN_MASK 0x7f
81
82/* This is a TrustedDesc (if not SharedDesc) */
83#define HDR_TRUSTED 0x00004000
84
85/* Make into TrustedDesc (if not SharedDesc) */
86#define HDR_MAKE_TRUSTED 0x00002000
87
88/* Save context if self-shared (if SharedDesc) */
89#define HDR_SAVECTX 0x00001000
90
91/* Next item points to SharedDesc */
92#define HDR_SHARED 0x00001000
93
94/*
95 * Reverse Execution Order - execute JobDesc first, then
96 * execute SharedDesc (normally SharedDesc goes first).
97 */
98#define HDR_REVERSE 0x00000800
99
100/* Propogate DNR property to SharedDesc */
101#define HDR_PROP_DNR 0x00000800
102
103/* JobDesc/SharedDesc share property */
104#define HDR_SD_SHARE_MASK 0x03
105#define HDR_SD_SHARE_SHIFT 8
106#define HDR_JD_SHARE_MASK 0x07
107#define HDR_JD_SHARE_SHIFT 8
108
109#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
110#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
111#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
112#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
113#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
114
115/* JobDesc/SharedDesc descriptor length */
116#define HDR_JD_LENGTH_MASK 0x7f
117#define HDR_SD_LENGTH_MASK 0x3f
118
119/*
120 * KEY/SEQ_KEY Command Constructs
121 */
122
123/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
124#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
125#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
126
127/* Scatter-Gather Table/Variable Length Field */
128#define KEY_SGF 0x01000000
129#define KEY_VLF 0x01000000
130
131/* Immediate - Key follows command in the descriptor */
132#define KEY_IMM 0x00800000
133
134/*
135 * Encrypted - Key is encrypted either with the KEK, or
136 * with the TDKEK if TK is set
137 */
138#define KEY_ENC 0x00400000
139
140/*
141 * No Write Back - Do not allow key to be FIFO STOREd
142 */
143#define KEY_NWB 0x00200000
144
145/*
146 * Enhanced Encryption of Key
147 */
148#define KEY_EKT 0x00100000
149
150/*
151 * Encrypted with Trusted Key
152 */
153#define KEY_TK 0x00008000
154
155/*
156 * KDEST - Key Destination: 0 - class key register,
157 * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
158 */
159#define KEY_DEST_SHIFT 16
160#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
161
162#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
163#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
164#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
165#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
166
167/* Length in bytes */
168#define KEY_LENGTH_MASK 0x000003ff
169
170/*
171 * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
172 */
173
174/*
175 * Load/Store Destination: 0 = class independent CCB,
176 * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
177 */
178#define LDST_CLASS_SHIFT 25
179#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
180#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
181#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
182#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
183#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
184
185/* Scatter-Gather Table/Variable Length Field */
186#define LDST_SGF 0x01000000
187#define LDST_VLF LDST_SGF
188
189/* Immediate - Key follows this command in descriptor */
190#define LDST_IMM_MASK 1
191#define LDST_IMM_SHIFT 23
192#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
193
194/* SRC/DST - Destination for LOAD, Source for STORE */
195#define LDST_SRCDST_SHIFT 16
196#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
197
198#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
199#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
200#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
201#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
202
203#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
204#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
205#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
206#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
207#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
208#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
209#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
210#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
211#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
212#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
213#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
214#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
215#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
216#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
217#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
218#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
219#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
220#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
221#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
222#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
223#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
224#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
225#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
226
227/* Offset in source/destination */
228#define LDST_OFFSET_SHIFT 8
229#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
230
231/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
232/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
233#define LDOFF_CHG_SHARE_SHIFT 0
234#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
235#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
236#define LDOFF_CHG_SHARE_OK_NO_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
237#define LDOFF_CHG_SHARE_OK_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
238
239#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
240#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
241
242#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
243#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
244#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
245#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
246#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
247
248#define LDOFF_CHG_SEQLIODN_SHIFT 6
249#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
250#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
251#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
252#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
253
254/* Data length in bytes */
255#define LDST_LEN_SHIFT 0
256#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
257
258/* Special Length definitions when dst=deco-ctrl */
259#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
260#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
261#define LDLEN_RST_OFIFO (1 << 5)
262#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
263#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
264#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
265#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
266
267/*
268 * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
269 * Command Constructs
270 */
271
272/*
273 * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
274 * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
275 * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
276 */
277#define FIFOLD_CLASS_SHIFT 25
278#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
279#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
280#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
281#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
282#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
283
284#define FIFOST_CLASS_SHIFT 25
285#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
286#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
287#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
288#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
289
290/*
291 * Scatter-Gather Table/Variable Length Field
292 * If set for FIFO_LOAD, refers to a SG table. Within
293 * SEQ_FIFO_LOAD, is variable input sequence
294 */
295#define FIFOLDST_SGF_SHIFT 24
296#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
297#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
298#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
299#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
300
301/* Immediate - Data follows command in descriptor */
302#define FIFOLD_IMM_SHIFT 23
303#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
304#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
305
306/* Continue - Not the last FIFO store to come */
307#define FIFOST_CONT_SHIFT 23
308#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
309#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
310
311/*
312 * Extended Length - use 32-bit extended length that
313 * follows the pointer field. Illegal with IMM set
314 */
315#define FIFOLDST_EXT_SHIFT 22
316#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
317#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
318
319/* Input data type.*/
320#define FIFOLD_TYPE_SHIFT 16
321#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
322#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
323
324/* PK types */
325#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
326#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
327#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
328#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
329#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
330#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
331#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
332#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
333#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
334#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
335#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
336#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
337#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
338#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
339
340/* Other types. Need to OR in last/flush bits as desired */
341#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
342#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
343#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
344#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
345#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
346#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
347#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
348
349/* Last/Flush bits for use with "other" types above */
350#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
351#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
352#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
353#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
354#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
355#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
356#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
357#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
358#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
359
360#define FIFOLDST_LEN_MASK 0xffff
361#define FIFOLDST_EXT_LEN_MASK 0xffffffff
362
363/* Output data types */
364#define FIFOST_TYPE_SHIFT 16
365#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
366
367#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
368#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
369#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
370#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
371#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
372#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
373#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
374#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
375#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
376#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
377#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
378#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
379#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
380#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
381#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
382#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
383#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
384#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
385#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
386#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
387#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
388#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
389#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
390#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
391#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
392
393/*
394 * OPERATION Command Constructs
395 */
396
397/* Operation type selectors - OP TYPE */
398#define OP_TYPE_SHIFT 24
399#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
400
401#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
402#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
403#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
404#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
405#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
406#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
407
408/* ProtocolID selectors - PROTID */
409#define OP_PCLID_SHIFT 16
410#define OP_PCLID_MASK (0xff << 16)
411
412/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
413#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
414#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
415#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
416#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
417#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
418#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
419#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
420#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
421#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
422#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
423#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
424#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
425
426/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
427#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
428#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
429#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
430#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
431#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
432#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
433#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
434#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
435#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
436#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
437
438/*
439 * ProtocolInfo selectors
440 */
441#define OP_PCLINFO_MASK 0xffff
442
443/* for OP_PCLID_IPSEC */
444#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
445#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
446
447#define OP_PCL_IPSEC_DES_IV64 0x0100
448#define OP_PCL_IPSEC_DES 0x0200
449#define OP_PCL_IPSEC_3DES 0x0300
450#define OP_PCL_IPSEC_AES_CBC 0x0c00
451#define OP_PCL_IPSEC_AES_CTR 0x0d00
452#define OP_PCL_IPSEC_AES_XTS 0x1600
453#define OP_PCL_IPSEC_AES_CCM8 0x0e00
454#define OP_PCL_IPSEC_AES_CCM12 0x0f00
455#define OP_PCL_IPSEC_AES_CCM16 0x1000
456#define OP_PCL_IPSEC_AES_GCM8 0x1200
457#define OP_PCL_IPSEC_AES_GCM12 0x1300
458#define OP_PCL_IPSEC_AES_GCM16 0x1400
459
460#define OP_PCL_IPSEC_HMAC_NULL 0x0000
461#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
462#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
463#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
464#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
465#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
466#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
467#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
468#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
469
470/* For SRTP - OP_PCLID_SRTP */
471#define OP_PCL_SRTP_CIPHER_MASK 0xff00
472#define OP_PCL_SRTP_AUTH_MASK 0x00ff
473
474#define OP_PCL_SRTP_AES_CTR 0x0d00
475
476#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
477
478/* For SSL 3.0 - OP_PCLID_SSL30 */
479#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
480#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
481#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
482#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
483#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
484#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
485#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
486#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
487#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
488#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
489#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
490#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
491#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
492#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
493#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
494#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
495#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
496
497#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
498#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
499#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
500#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
501#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
502#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
503#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
504#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
505#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
506#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
507#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
508#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
509#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
510#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
511#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
512#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
513#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
514
515#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
516
517#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
518#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
519#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
520#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
521#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
522#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
523#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
524#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
525#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
526#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
527#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
528#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
529#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
530#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
531#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
532#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
533#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
534#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
535
536#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
537
538#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
539
540#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
541#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
542#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
543#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
544#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
545#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
546#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
547
548#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
549#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
550#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
551#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
552#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
553#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
554#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
555
556#define OP_PCL_SSL30_RC4_128_MD5 0x0024
557#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
558#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
559
560#define OP_PCL_SSL30_RC4_40_MD5 0x002b
561#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
562#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
563
564#define OP_PCL_SSL30_RC4_128_SHA 0x0020
565#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
566#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
567#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
568#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
569#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
570#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
571#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
572#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
573#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
574
575#define OP_PCL_SSL30_RC4_40_SHA 0x0028
576
577
578/* For TLS 1.0 - OP_PCLID_TLS10 */
579#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
580#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
581#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
582#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
583#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
584#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
585#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
586#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
587#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
588#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
589#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
590#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
591#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
592#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
593#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
594#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
595#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
596
597#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
598#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
599#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
600#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
601#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
602#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
603#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
604#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
605#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
606#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
607#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
608#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
609#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
610#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
611#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
612#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
613#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
614
615/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
616
617#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
618#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
619#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
620#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
621#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
622#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
623#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
624#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
625#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
626#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
627#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
628#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
629#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
630#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
631#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
632#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
633#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
634#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
635
636#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
637
638#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
639
640#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
641#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
642#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
643#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
644#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
645#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
646#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
647
648
649#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
650#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
651#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
652#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
653#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
654#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
655#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
656
657#define OP_PCL_TLS10_RC4_128_MD5 0x0024
658#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
659#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
660
661#define OP_PCL_TLS10_RC4_40_MD5 0x002b
662#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
663#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
664
665#define OP_PCL_TLS10_RC4_128_SHA 0x0020
666#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
667#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
668#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
669#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
670#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
671#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
672#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
673#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
674#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
675
676#define OP_PCL_TLS10_RC4_40_SHA 0x0028
677
678#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
679#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
680#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
681#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
682#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
683#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
684#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
685#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
686#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
687#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
688#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
689#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
690#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
691#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
692#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
693#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
694#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
695#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
696#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
697#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
698#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
699
700
701
702/* For TLS 1.1 - OP_PCLID_TLS11 */
703#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
704#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
705#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
706#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
707#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
708#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
709#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
710#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
711#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
712#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
713#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
714#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
715#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
716#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
717#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
718#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
719#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
720
721#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
722#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
723#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
724#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
725#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
726#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
727#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
728#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
729#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
730#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
731#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
732#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
733#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
734#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
735#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
736#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
737#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
738
739/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
740
741#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
742#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
743#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
744#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
745#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
746#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
747#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
748#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
749#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
750#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
751#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
752#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
753#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
754#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
755#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
756#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
757#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
758#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
759
760#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
761
762#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
763
764#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
765#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
766#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
767#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
768#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
769#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
770#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
771
772#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
773#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
774#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
775#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
776#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
777#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
778#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
779
780#define OP_PCL_TLS11_RC4_128_MD5 0x0024
781#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
782#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
783
784#define OP_PCL_TLS11_RC4_40_MD5 0x002b
785#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
786#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
787
788#define OP_PCL_TLS11_RC4_128_SHA 0x0020
789#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
790#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
791#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
792#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
793#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
794#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
795#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
796#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
797#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
798
799#define OP_PCL_TLS11_RC4_40_SHA 0x0028
800
801#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
802#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
803#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
804#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
805#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
806#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
807#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
808#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
809#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
810#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
811#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
812#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
813#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
814#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
815#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
816#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
817#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
818#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
819#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
820#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
821#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
822
823
824/* For TLS 1.2 - OP_PCLID_TLS12 */
825#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
826#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
827#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
828#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
829#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
830#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
831#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
832#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
833#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
834#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
835#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
836#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
837#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
838#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
839#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
840#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
841#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
842
843#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
844#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
845#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
846#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
847#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
848#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
849#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
850#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
851#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
852#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
853#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
854#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
855#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
856#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
857#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
858#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
859#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
860
861/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
862
863#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
864#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
865#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
866#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
867#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
868#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
869#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
870#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
871#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
872#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
873#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
874#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
875#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
876#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
877#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
878#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
879#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
880#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
881
882#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
883
884#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
885
886#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
887#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
888#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
889#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
890#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
891#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
892#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
893
894#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
895#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
896#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
897#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
898#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
899#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
900#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
901
902#define OP_PCL_TLS12_RC4_128_MD5 0x0024
903#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
904#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
905
906#define OP_PCL_TLS12_RC4_40_MD5 0x002b
907#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
908#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
909
910#define OP_PCL_TLS12_RC4_128_SHA 0x0020
911#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
912#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
913#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
914#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
915#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
916#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
917#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
918#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
919#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
920
921#define OP_PCL_TLS12_RC4_40_SHA 0x0028
922
923/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
924#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
925#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
926#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
927#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
928#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
929
930/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
931#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
932#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
933#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
934#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
935#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
936
937/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
938
939#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
940#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
941#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
942#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
943#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
944#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
945#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
946#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
947#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
948#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
949#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
950#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
951#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
952#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
953#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
954#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
955#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
956#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
957#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
958#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
959#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
960
961/* For DTLS - OP_PCLID_DTLS */
962
963#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
964#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
965#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
966#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
967#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
968#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
969#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
970#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
971#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
972#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
973#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
974#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
975#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
976#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
977#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
978#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
979#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
980
981#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
982#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
983#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
984#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
985#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
986#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
987#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
988#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
989#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
990#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
991#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
992#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
993#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
994#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
995#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
996#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
997#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
998
999/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
1000
1001#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
1002#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
1003#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
1004#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
1005#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
1006#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
1007#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
1008#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
1009#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
1010#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
1011#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
1012#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
1013#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
1014#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
1015#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
1016#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
1017#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
1018#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
1019
1020#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
1021
1022#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
1023
1024#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
1025#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
1026#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
1027#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
1028#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
1029#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
1030#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
1031
1032
1033#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
1034#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
1035#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
1036#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
1037#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
1038#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
1039#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
1040
1041
1042#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
1043#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
1044#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
1045#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
1046#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
1047#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
1048#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
1049#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
1050#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
1051#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
1052#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
1053#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
1054#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
1055#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
1056#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
1057#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
1058#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
1059#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
1060#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
1061#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
1062#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
1063
1064/* 802.16 WiMAX protinfos */
1065#define OP_PCL_WIMAX_OFDM 0x0201
1066#define OP_PCL_WIMAX_OFDMA 0x0231
1067
1068/* 802.11 WiFi protinfos */
1069#define OP_PCL_WIFI 0xac04
1070
1071/* MacSec protinfos */
1072#define OP_PCL_MACSEC 0x0001
1073
1074/* PKI unidirectional protocol protinfo bits */
1075#define OP_PCL_PKPROT_TEST 0x0008
1076#define OP_PCL_PKPROT_DECRYPT 0x0004
1077#define OP_PCL_PKPROT_ECC 0x0002
1078#define OP_PCL_PKPROT_F2M 0x0001
1079
1080/* For non-protocol/alg-only op commands */
1081#define OP_ALG_TYPE_SHIFT 24
1082#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
1083#define OP_ALG_TYPE_CLASS1 2
1084#define OP_ALG_TYPE_CLASS2 4
1085
1086#define OP_ALG_ALGSEL_SHIFT 16
1087#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
1088#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
1089#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
1090#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
1091#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
1092#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
1093#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
1094#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
1095#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
1096#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
1097#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
1098#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
1099#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
1100#define OP_ALG_ALGSEL_SNOW (0x60 << OP_ALG_ALGSEL_SHIFT)
1101#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
1102#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
1103#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
1104#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
1105
1106#define OP_ALG_AAI_SHIFT 4
1107#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
1108
1109/* blockcipher AAI set */
1110#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
1111#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
1112#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
1113#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
1114#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
1115#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
1116#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
1117#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
1118#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
1119#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
1120#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
1121#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
1122#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
1123#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
1124#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
1125#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
1126#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
1127#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
1128#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
1129#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
1130#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
1131#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
1132#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
1133#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
1134#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
1135#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
1136#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
1137#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
1138#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
1139
1140/* randomizer AAI set */
1141#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
1142#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT)
1143#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT)
1144
1145/* hmac/smac AAI set */
1146#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
1147#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
1148#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
1149#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
1150
1151/* CRC AAI set*/
1152#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
1153#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
1154#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
1155#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
1156#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
1157#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
1158
1159/* Kasumi/SNOW AAI set */
1160#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
1161#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
1162#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
1163#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
1164
1165
1166#define OP_ALG_AS_SHIFT 2
1167#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
1168#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
1169#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
1170#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
1171#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
1172
1173#define OP_ALG_ICV_SHIFT 1
1174#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
1175#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT)
1176#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT)
1177
1178#define OP_ALG_DIR_SHIFT 0
1179#define OP_ALG_DIR_MASK 1
1180#define OP_ALG_DECRYPT 0
1181#define OP_ALG_ENCRYPT 1
1182
1183/* PKHA algorithm type set */
1184#define OP_ALG_PK 0x00800000
1185#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
1186
1187/* PKHA mode clear memory functions */
1188#define OP_ALG_PKMODE_A_RAM 0x80000
1189#define OP_ALG_PKMODE_B_RAM 0x40000
1190#define OP_ALG_PKMODE_E_RAM 0x20000
1191#define OP_ALG_PKMODE_N_RAM 0x10000
1192#define OP_ALG_PKMODE_CLEARMEM 0x00001
1193
1194/* PKHA mode modular-arithmetic functions */
1195#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
1196#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
1197#define OP_ALG_PKMODE_MOD_F2M 0x20000
1198#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
1199#define OP_ALG_PKMODE_PRJECTV 0x00800
1200#define OP_ALG_PKMODE_TIME_EQ 0x400
1201#define OP_ALG_PKMODE_OUT_B 0x000
1202#define OP_ALG_PKMODE_OUT_A 0x100
1203#define OP_ALG_PKMODE_MOD_ADD 0x002
1204#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
1205#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
1206#define OP_ALG_PKMODE_MOD_MULT 0x005
1207#define OP_ALG_PKMODE_MOD_EXPO 0x006
1208#define OP_ALG_PKMODE_MOD_REDUCT 0x007
1209#define OP_ALG_PKMODE_MOD_INV 0x008
1210#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
1211#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
1212#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
1213#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
1214#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
1215#define OP_ALG_PKMODE_MOD_GCD 0x00e
1216#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
1217
1218/* PKHA mode copy-memory functions */
1219#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
1220#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1221#define OP_ALG_PKMODE_DST_REG_SHIFT 10
1222#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
1223#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
1224#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1225#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
1226#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1227
1228#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1229#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1230#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
1231#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
1232#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
1233#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
1234#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
1235#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1236#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1237#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1238#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
1239#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1240#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1241#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1242#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
1243#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
1244#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
1245
1246/*
1247 * SEQ_IN_PTR Command Constructs
1248 */
1249
1250/* Release Buffers */
1251#define SQIN_RBS 0x04000000
1252
1253/* Sequence pointer is really a descriptor */
1254#define SQIN_INL 0x02000000
1255
1256/* Sequence pointer is a scatter-gather table */
1257#define SQIN_SGF 0x01000000
1258
1259/* Appends to a previous pointer */
1260#define SQIN_PRE 0x00800000
1261
1262/* Use extended length following pointer */
1263#define SQIN_EXT 0x00400000
1264
1265/* Restore sequence with pointer/length */
1266#define SQIN_RTO 0x00200000
1267
1268/* Replace job descriptor */
1269#define SQIN_RJD 0x00100000
1270
1271#define SQIN_LEN_SHIFT 0
1272#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
1273
1274/*
1275 * SEQ_OUT_PTR Command Constructs
1276 */
1277
1278/* Sequence pointer is a scatter-gather table */
1279#define SQOUT_SGF 0x01000000
1280
1281/* Appends to a previous pointer */
1282#define SQOUT_PRE 0x00800000
1283
1284/* Restore sequence with pointer/length */
1285#define SQOUT_RTO 0x00200000
1286
1287/* Use extended length following pointer */
1288#define SQOUT_EXT 0x00400000
1289
1290#define SQOUT_LEN_SHIFT 0
1291#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
1292
1293
1294/*
1295 * SIGNATURE Command Constructs
1296 */
1297
1298/* TYPE field is all that's relevant */
1299#define SIGN_TYPE_SHIFT 16
1300#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
1301
1302#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
1303#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
1304#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
1305#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
1306#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
1307#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
1308
1309/*
1310 * MOVE Command Constructs
1311 */
1312
1313#define MOVE_AUX_SHIFT 25
1314#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
1315#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
1316#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
1317
1318#define MOVE_WAITCOMP_SHIFT 24
1319#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
1320#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
1321
1322#define MOVE_SRC_SHIFT 20
1323#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
1324#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
1325#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
1326#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
1327#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
1328#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
1329#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
1330#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
1331#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
1332#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
1333#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
1334
1335#define MOVE_DEST_SHIFT 16
1336#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
1337#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
1338#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
1339#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
1340#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
1341#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
1342#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
1343#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
1344#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
1345#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
1346#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
1347#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
1348#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
1349#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
1350
1351#define MOVE_OFFSET_SHIFT 8
1352#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
1353
1354#define MOVE_LEN_SHIFT 0
1355#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
1356
1357#define MOVELEN_MRSEL_SHIFT 0
1358#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
1359
1360/*
1361 * MATH Command Constructs
1362 */
1363
1364#define MATH_IFB_SHIFT 26
1365#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
1366#define MATH_IFB (1 << MATH_IFB_SHIFT)
1367
1368#define MATH_NFU_SHIFT 25
1369#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
1370#define MATH_NFU (1 << MATH_NFU_SHIFT)
1371
1372#define MATH_STL_SHIFT 24
1373#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
1374#define MATH_STL (1 << MATH_STL_SHIFT)
1375
1376/* Function selectors */
1377#define MATH_FUN_SHIFT 20
1378#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
1379#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
1380#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
1381#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
1382#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
1383#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
1384#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
1385#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
1386#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
1387#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
1388#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
1389#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
1390
1391/* Source 0 selectors */
1392#define MATH_SRC0_SHIFT 16
1393#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
1394#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
1395#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
1396#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
1397#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
1398#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
1399#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
1400#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
1401#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
1402#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
1403#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
1404
1405/* Source 1 selectors */
1406#define MATH_SRC1_SHIFT 12
1407#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
1408#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
1409#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
1410#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
1411#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
1412#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
1413#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
1414#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
1415#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
1416
1417/* Destination selectors */
1418#define MATH_DEST_SHIFT 8
1419#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
1420#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
1421#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
1422#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
1423#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
1424#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
1425#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
1426#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
1427#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
1428#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
1429
1430/* Length selectors */
1431#define MATH_LEN_SHIFT 0
1432#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
1433#define MATH_LEN_1BYTE 0x01
1434#define MATH_LEN_2BYTE 0x02
1435#define MATH_LEN_4BYTE 0x04
1436#define MATH_LEN_8BYTE 0x08
1437
1438/*
1439 * JUMP Command Constructs
1440 */
1441
1442#define JUMP_CLASS_SHIFT 25
1443#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
1444#define JUMP_CLASS_NONE 0
1445#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
1446#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
1447#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
1448
1449#define JUMP_JSL_SHIFT 24
1450#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
1451#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
1452
1453#define JUMP_TYPE_SHIFT 22
1454#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
1455#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
1456#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
1457#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
1458#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
1459
1460#define JUMP_TEST_SHIFT 16
1461#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
1462#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
1463#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
1464#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
1465#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
1466
1467/* Condition codes. JSL bit is factored in */
1468#define JUMP_COND_SHIFT 8
1469#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
1470#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
1471#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
1472#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
1473#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
1474#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
1475#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
1476#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
1477
1478#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
1479#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
1480#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
1481#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
1482#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
1483#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
1484#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
1485#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
1486
1487#define JUMP_OFFSET_SHIFT 0
1488#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
1489
1490/*
1491 * NFIFO ENTRY
1492 * Data Constructs
1493 *
1494 */
1495#define NFIFOENTRY_DEST_SHIFT 30
1496#define NFIFOENTRY_DEST_MASK (3 << NFIFOENTRY_DEST_SHIFT)
1497#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
1498#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
1499#define NFIFOENTRY_DEST_CLASS2 (2 << NFIFOENTRY_DEST_SHIFT)
1500#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT)
1501
1502#define NFIFOENTRY_LC2_SHIFT 29
1503#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
1504#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
1505
1506#define NFIFOENTRY_LC1_SHIFT 28
1507#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
1508#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
1509
1510#define NFIFOENTRY_FC2_SHIFT 27
1511#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
1512#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
1513
1514#define NFIFOENTRY_FC1_SHIFT 26
1515#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
1516#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
1517
1518#define NFIFOENTRY_STYPE_SHIFT 24
1519#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
1520#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
1521#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
1522#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
1523#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
1524
1525#define NFIFOENTRY_DTYPE_SHIFT 20
1526#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
1527
1528#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
1529#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
1530#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
1531#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
1532#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
1533#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
1534#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
1535
1536#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
1537#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
1538#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
1539#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
1540#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
1541#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
1542#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
1543#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
1544#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
1545#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
1546#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
1547#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
1548
1549
1550#define NFIFOENTRY_BND_SHIFT 19
1551#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
1552#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
1553
1554#define NFIFOENTRY_PTYPE_SHIFT 16
1555#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
1556
1557#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
1558#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
1559#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
1560#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
1561#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
1562#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
1563#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
1564#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
1565
1566#define NFIFOENTRY_OC_SHIFT 15
1567#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
1568#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
1569
1570#define NFIFOENTRY_AST_SHIFT 14
1571#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
1572#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
1573
1574#define NFIFOENTRY_BM_SHIFT 11
1575#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
1576#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
1577
1578#define NFIFOENTRY_PS_SHIFT 10
1579#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
1580#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
1581
1582
1583#define NFIFOENTRY_DLEN_SHIFT 0
1584#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
1585
1586#define NFIFOENTRY_PLEN_SHIFT 0
1587#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
1588
1589/*
1590 * PDB internal definitions
1591 */
1592
1593/* IPSec ESP CBC Encap/Decap Options */
1594#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
1595#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
1596#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
1597#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
1598#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
1599#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
1600#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
1601#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
1602#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
1603#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
1604
1605#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
new file mode 100644
index 000000000000..46915800c26f
--- /dev/null
+++ b/drivers/crypto/caam/desc_constr.h
@@ -0,0 +1,205 @@
1/*
2 * caam descriptor construction helper functions
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#include "desc.h"
8
9#define IMMEDIATE (1 << 23)
10#define CAAM_CMD_SZ sizeof(u32)
11#define CAAM_PTR_SZ sizeof(dma_addr_t)
12#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64)
13
14#ifdef DEBUG
15#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
16 &__func__[sizeof("append")]); } while (0)
17#else
18#define PRINT_POS
19#endif
20
21#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
22 LDST_SRCDST_WORD_DECOCTRL | \
23 (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
24#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
25 LDST_SRCDST_WORD_DECOCTRL | \
26 (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
27
28static inline int desc_len(u32 *desc)
29{
30 return *desc & HDR_DESCLEN_MASK;
31}
32
33static inline int desc_bytes(void *desc)
34{
35 return desc_len(desc) * CAAM_CMD_SZ;
36}
37
38static inline u32 *desc_end(u32 *desc)
39{
40 return desc + desc_len(desc);
41}
42
43static inline void *sh_desc_pdb(u32 *desc)
44{
45 return desc + 1;
46}
47
48static inline void init_desc(u32 *desc, u32 options)
49{
50 *desc = options | HDR_ONE | 1;
51}
52
53static inline void init_sh_desc(u32 *desc, u32 options)
54{
55 PRINT_POS;
56 init_desc(desc, CMD_SHARED_DESC_HDR | options);
57}
58
59static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
60{
61 u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1;
62
63 init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) |
64 options);
65}
66
67static inline void init_job_desc(u32 *desc, u32 options)
68{
69 init_desc(desc, CMD_DESC_HDR | options);
70}
71
72static inline void append_ptr(u32 *desc, dma_addr_t ptr)
73{
74 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
75
76 *offset = ptr;
77
78 (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
79}
80
81static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
82 u32 options)
83{
84 PRINT_POS;
85 init_job_desc(desc, HDR_SHARED | options |
86 (len << HDR_START_IDX_SHIFT));
87 append_ptr(desc, ptr);
88}
89
90static inline void append_data(u32 *desc, void *data, int len)
91{
92 u32 *offset = desc_end(desc);
93
94 if (len) /* avoid sparse warning: memcpy with byte count of 0 */
95 memcpy(offset, data, len);
96
97 (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
98}
99
100static inline void append_cmd(u32 *desc, u32 command)
101{
102 u32 *cmd = desc_end(desc);
103
104 *cmd = command;
105
106 (*desc)++;
107}
108
109static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
110 u32 command)
111{
112 append_cmd(desc, command | len);
113 append_ptr(desc, ptr);
114}
115
116static inline void append_cmd_data(u32 *desc, void *data, int len,
117 u32 command)
118{
119 append_cmd(desc, command | IMMEDIATE | len);
120 append_data(desc, data, len);
121}
122
123static inline u32 *append_jump(u32 *desc, u32 options)
124{
125 u32 *cmd = desc_end(desc);
126
127 PRINT_POS;
128 append_cmd(desc, CMD_JUMP | options);
129
130 return cmd;
131}
132
133static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
134{
135 *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
136}
137
138#define APPEND_CMD(cmd, op) \
139static inline void append_##cmd(u32 *desc, u32 options) \
140{ \
141 PRINT_POS; \
142 append_cmd(desc, CMD_##op | options); \
143}
144APPEND_CMD(operation, OPERATION)
145APPEND_CMD(move, MOVE)
146
147#define APPEND_CMD_LEN(cmd, op) \
148static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
149{ \
150 PRINT_POS; \
151 append_cmd(desc, CMD_##op | len | options); \
152}
153APPEND_CMD_LEN(seq_store, SEQ_STORE)
154APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
155APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
156
157#define APPEND_CMD_PTR(cmd, op) \
158static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
159 u32 options) \
160{ \
161 PRINT_POS; \
162 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
163}
164APPEND_CMD_PTR(key, KEY)
165APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
166APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
167APPEND_CMD_PTR(load, LOAD)
168APPEND_CMD_PTR(store, STORE)
169APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
170APPEND_CMD_PTR(fifo_store, FIFO_STORE)
171
172#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
173static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
174 unsigned int len, u32 options) \
175{ \
176 PRINT_POS; \
177 append_cmd_data(desc, data, len, CMD_##op | options); \
178}
179APPEND_CMD_PTR_TO_IMM(load, LOAD);
180APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
181
182/*
183 * 2nd variant for commands whose specified immediate length differs
184 * from length of immediate data provided, e.g., split keys
185 */
186#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
187static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
188 unsigned int data_len, \
189 unsigned int len, u32 options) \
190{ \
191 PRINT_POS; \
192 append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
193 append_data(desc, data, data_len); \
194}
195APPEND_CMD_PTR_TO_IMM2(key, KEY);
196
197#define APPEND_CMD_RAW_IMM(cmd, op, type) \
198static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
199 u32 options) \
200{ \
201 PRINT_POS; \
202 append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
203 append_cmd(desc, immediate); \
204}
205APPEND_CMD_RAW_IMM(load, LOAD, u32);
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
new file mode 100644
index 000000000000..7e2d54bffad6
--- /dev/null
+++ b/drivers/crypto/caam/error.c
@@ -0,0 +1,248 @@
1/*
2 * CAAM Error Reporting
3 *
4 * Copyright 2009-2011 Freescale Semiconductor, Inc.
5 */
6
7#include "compat.h"
8#include "regs.h"
9#include "intern.h"
10#include "desc.h"
11#include "jr.h"
12#include "error.h"
13
14#define SPRINTFCAT(str, format, param, max_alloc) \
15{ \
16 char *tmp; \
17 \
18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
19 sprintf(tmp, format, param); \
20 strcat(str, tmp); \
21 kfree(tmp); \
22}
23
24static void report_jump_idx(u32 status, char *outstr)
25{
26 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
27 JRSTA_DECOERR_INDEX_SHIFT;
28
29 if (status & JRSTA_DECOERR_JUMP)
30 strcat(outstr, "jump tgt desc idx ");
31 else
32 strcat(outstr, "desc idx ");
33
34 SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
35}
36
37static void report_ccb_status(u32 status, char *outstr)
38{
39 char *cha_id_list[] = {
40 "",
41 "AES",
42 "DES, 3DES",
43 "ARC4",
44 "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512",
45 "RNG",
46 "SNOW f8",
47 "Kasumi f8, f9",
48 "All Public Key Algorithms",
49 "CRC",
50 "SNOW f9",
51 };
52 char *err_id_list[] = {
53 "None. No error.",
54 "Mode error.",
55 "Data size error.",
56 "Key size error.",
57 "PKHA A memory size error.",
58 "PKHA B memory size error.",
59 "Data arrived out of sequence error.",
60 "PKHA divide-by-zero error.",
61 "PKHA modulus even error.",
62 "DES key parity error.",
63 "ICV check failed.",
64 "Hardware error.",
65 "Unsupported CCM AAD size.",
66 "Class 1 CHA is not reset",
67 "Invalid CHA combination was selected",
68 "Invalid CHA selected.",
69 };
70 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
71 JRSTA_CCBERR_CHAID_SHIFT;
72 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
73
74 report_jump_idx(status, outstr);
75
76 if (cha_id < ARRAY_SIZE(cha_id_list)) {
77 SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id],
78 strlen(cha_id_list[cha_id]));
79 } else {
80 SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
81 cha_id, sizeof("ff"));
82 }
83
84 if (err_id < ARRAY_SIZE(err_id_list)) {
85 SPRINTFCAT(outstr, "%s", err_id_list[err_id],
86 strlen(err_id_list[err_id]));
87 } else {
88 SPRINTFCAT(outstr, "unidentified err_id value 0x%02x",
89 err_id, sizeof("ff"));
90 }
91}
92
93static void report_jump_status(u32 status, char *outstr)
94{
95 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
96}
97
98static void report_deco_status(u32 status, char *outstr)
99{
100 const struct {
101 u8 value;
102 char *error_text;
103 } desc_error_list[] = {
104 { 0x00, "None. No error." },
105 { 0x01, "SGT Length Error. The descriptor is trying to read "
106 "more data than is contained in the SGT table." },
107 { 0x02, "Reserved." },
108 { 0x03, "Job Ring Control Error. There is a bad value in the "
109 "Job Ring Control register." },
110 { 0x04, "Invalid Descriptor Command. The Descriptor Command "
111 "field is invalid." },
112 { 0x05, "Reserved." },
113 { 0x06, "Invalid KEY Command" },
114 { 0x07, "Invalid LOAD Command" },
115 { 0x08, "Invalid STORE Command" },
116 { 0x09, "Invalid OPERATION Command" },
117 { 0x0A, "Invalid FIFO LOAD Command" },
118 { 0x0B, "Invalid FIFO STORE Command" },
119 { 0x0C, "Invalid MOVE Command" },
120 { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
121 "invalid because the target is not a Job Header "
122 "Command, or the jump is from a Trusted Descriptor to "
123 "a Job Descriptor, or because the target Descriptor "
124 "contains a Shared Descriptor." },
125 { 0x0E, "Invalid MATH Command" },
126 { 0x0F, "Invalid SIGNATURE Command" },
127 { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
128 "Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
129 "LOAD, or SEQ FIFO STORE decremented the input or "
130 "output sequence length below 0. This error may result "
131 "if a built-in PROTOCOL Command has encountered a "
132 "malformed PDU." },
133 { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
134 { 0x12, "Shared Descriptor Header Error" },
135 { 0x13, "Header Error. Invalid length or parity, or certain "
136 "other problems." },
137 { 0x14, "Burster Error. Burster has gotten to an illegal "
138 "state" },
139 { 0x15, "Context Register Length Error. The descriptor is "
140 "trying to read or write past the end of the Context "
141 "Register. A SEQ LOAD or SEQ STORE with the VLF bit "
142 "set was executed with too large a length in the "
143 "variable length register (VSOL for SEQ STORE or VSIL "
144 "for SEQ LOAD)." },
145 { 0x16, "DMA Error" },
146 { 0x17, "Reserved." },
147 { 0x1A, "Job failed due to JR reset" },
148 { 0x1B, "Job failed due to Fail Mode" },
149 { 0x1C, "DECO Watchdog timer timeout error" },
150 { 0x1D, "DECO tried to copy a key from another DECO but the "
151 "other DECO's Key Registers were locked" },
152 { 0x1E, "DECO attempted to copy data from a DECO that had an "
153 "unmasked Descriptor error" },
154 { 0x1F, "LIODN error. DECO was trying to share from itself or "
155 "from another DECO but the two Non-SEQ LIODN values "
156 "didn't match or the 'shared from' DECO's Descriptor "
157 "required that the SEQ LIODNs be the same and they "
158 "aren't." },
159 { 0x20, "DECO has completed a reset initiated via the DRR "
160 "register" },
161 { 0x21, "Nonce error. When using EKT (CCM) key encryption "
162 "option in the FIFO STORE Command, the Nonce counter "
163 "reached its maximum value and this encryption mode "
164 "can no longer be used." },
165 { 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
166 "(input frame; block ciphers) and IPsec decap (output "
167 "frame, when doing the next header byte update) and "
168 "DCRC (output frame)." },
169 { 0x80, "DNR (do not run) error" },
170 { 0x81, "undefined protocol command" },
171 { 0x82, "invalid setting in PDB" },
172 { 0x83, "Anti-replay LATE error" },
173 { 0x84, "Anti-replay REPLAY error" },
174 { 0x85, "Sequence number overflow" },
175 { 0x86, "Sigver invalid signature" },
176 { 0x87, "DSA Sign Illegal test descriptor" },
177 { 0x88, "Protocol Format Error - A protocol has seen an error "
178 "in the format of data received. When running RSA, "
179 "this means that formatting with random padding was "
180 "used, and did not follow the form: 0x00, 0x02, 8-to-N "
181 "bytes of non-zero pad, 0x00, F data." },
182 { 0x89, "Protocol Size Error - A protocol has seen an error in "
183 "size. When running RSA, pdb size N < (size of F) when "
184 "no formatting is used; or pdb size N < (F + 11) when "
185 "formatting is used." },
186 { 0xC1, "Blob Command error: Undefined mode" },
187 { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
188 { 0xC4, "Blob Command error: Black Blob key or input size "
189 "error" },
190 { 0xC5, "Blob Command error: Invalid key destination" },
191 { 0xC8, "Blob Command error: Trusted/Secure mode error" },
192 { 0xF0, "IPsec TTL or hop limit field either came in as 0, "
193 "or was decremented to 0" },
194 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
195 };
196 u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
197 int i;
198
199 report_jump_idx(status, outstr);
200
201 for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
202 if (desc_error_list[i].value == desc_error)
203 break;
204
205 if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) {
206 SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text,
207 strlen(desc_error_list[i].error_text));
208 } else {
209 SPRINTFCAT(outstr, "unidentified error value 0x%02x",
210 desc_error, sizeof("ff"));
211 }
212}
213
214static void report_jr_status(u32 status, char *outstr)
215{
216 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
217}
218
219static void report_cond_code_status(u32 status, char *outstr)
220{
221 SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
222}
223
224char *caam_jr_strstatus(char *outstr, u32 status)
225{
226 struct stat_src {
227 void (*report_ssed)(u32 status, char *outstr);
228 char *error;
229 } status_src[] = {
230 { NULL, "No error" },
231 { NULL, NULL },
232 { report_ccb_status, "CCB" },
233 { report_jump_status, "Jump" },
234 { report_deco_status, "DECO" },
235 { NULL, NULL },
236 { report_jr_status, "Job Ring" },
237 { report_cond_code_status, "Condition Code" },
238 };
239 u32 ssrc = status >> JRSTA_SSRC_SHIFT;
240
241 sprintf(outstr, "%s: ", status_src[ssrc].error);
242
243 if (status_src[ssrc].report_ssed)
244 status_src[ssrc].report_ssed(status, outstr);
245
246 return outstr;
247}
248EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
new file mode 100644
index 000000000000..02c7baa1748e
--- /dev/null
+++ b/drivers/crypto/caam/error.h
@@ -0,0 +1,11 @@
1/*
2 * CAAM Error Reporting code header
3 *
4 * Copyright 2009-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef CAAM_ERROR_H
8#define CAAM_ERROR_H
9#define CAAM_ERROR_STR_MAX 302
10extern char *caam_jr_strstatus(char *outstr, u32 status);
11#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
new file mode 100644
index 000000000000..a34be01b0b29
--- /dev/null
+++ b/drivers/crypto/caam/intern.h
@@ -0,0 +1,113 @@
1/*
2 * CAAM/SEC 4.x driver backend
3 * Private/internal definitions between modules
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 *
7 */
8
9#ifndef INTERN_H
10#define INTERN_H
11
12#define JOBR_UNASSIGNED 0
13#define JOBR_ASSIGNED 1
14
15/* Currently comes from Kconfig param as a ^2 (driver-required) */
16#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
17
18/* Kconfig params for interrupt coalescing if selected (else zero) */
19#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
20#define JOBR_INTC JRCFG_ICEN
21#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
22#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
23#else
24#define JOBR_INTC 0
25#define JOBR_INTC_TIME_THLD 0
26#define JOBR_INTC_COUNT_THLD 0
27#endif
28
29/*
30 * Storage for tracking each in-process entry moving across a ring
31 * Each entry on an output ring needs one of these
32 */
33struct caam_jrentry_info {
34 void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
35 void *cbkarg; /* Argument per ring entry */
36 u32 *desc_addr_virt; /* Stored virt addr for postprocessing */
37 dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */
38 u32 desc_size; /* Stored size for postprocessing, header derived */
39};
40
41/* Private sub-storage for a single JobR */
42struct caam_drv_private_jr {
43 struct device *parentdev; /* points back to controller dev */
44 int ridx;
45 struct caam_job_ring __iomem *rregs; /* JobR's register space */
46 struct tasklet_struct irqtask[NR_CPUS];
47 int irq; /* One per queue */
48 int assign; /* busy/free */
49
50 /* Job ring info */
51 int ringsize; /* Size of rings (assume input = output) */
52 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
53 spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
54 int inp_ring_write_index; /* Input index "tail" */
55 int head; /* entinfo (s/w ring) head index */
56 dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
57 spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
58 int out_ring_read_index; /* Output index "tail" */
59 int tail; /* entinfo (s/w ring) tail index */
60 struct jr_outentry *outring; /* Base of output ring, DMA-safe */
61};
62
63/*
64 * Driver-private storage for a single CAAM block instance
65 */
66struct caam_drv_private {
67
68 struct device *dev;
69 struct device **jrdev; /* Alloc'ed array per sub-device */
70 spinlock_t jr_alloc_lock;
71 struct platform_device *pdev;
72
73 /* Physical-presence section */
74 struct caam_ctrl *ctrl; /* controller region */
75 struct caam_deco **deco; /* DECO/CCB views */
76 struct caam_assurance *ac;
77 struct caam_queue_if *qi; /* QI control region */
78
79 /*
80 * Detected geometry block. Filled in from device tree if powerpc,
81 * or from register-based version detection code
82 */
83 u8 total_jobrs; /* Total Job Rings in device */
84 u8 qi_present; /* Nonzero if QI present in device */
85 int secvio_irq; /* Security violation interrupt number */
86
87 /* which jr allocated to scatterlist crypto */
88 atomic_t tfm_count ____cacheline_aligned;
89 int num_jrs_for_algapi;
90 struct device **algapi_jr;
91 /* list of registered crypto algorithms (mk generic context handle?) */
92 struct list_head alg_list;
93
94 /*
95 * debugfs entries for developer view into driver/device
96 * variables at runtime.
97 */
98#ifdef CONFIG_DEBUG_FS
99 struct dentry *dfs_root;
100 struct dentry *ctl; /* controller dir */
101 struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
102 struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
103 struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
104 struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
105
106 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
107 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
108#endif
109};
110
111void caam_jr_algapi_init(struct device *dev);
112void caam_jr_algapi_remove(struct device *dev);
113#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
new file mode 100644
index 000000000000..340fa322c0f0
--- /dev/null
+++ b/drivers/crypto/caam/jr.c
@@ -0,0 +1,517 @@
1/*
2 * CAAM/SEC 4.x transport/backend driver
3 * JobR backend functionality
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#include "compat.h"
9#include "regs.h"
10#include "jr.h"
11#include "desc.h"
12#include "intern.h"
13
14/* Main per-ring interrupt handler */
15static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
16{
17 struct device *dev = st_dev;
18 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
19 u32 irqstate;
20
21 /*
22 * Check the output ring for ready responses, kick
23 * tasklet if jobs done.
24 */
25 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
26 if (!irqstate)
27 return IRQ_NONE;
28
29 /*
30 * If JobR error, we got more development work to do
31 * Flag a bug now, but we really need to shut down and
32 * restart the queue (and fix code).
33 */
34 if (irqstate & JRINT_JR_ERROR) {
35 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
36 BUG();
37 }
38
39 /* mask valid interrupts */
40 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
41
42 /* Have valid interrupt at this point, just ACK and trigger */
43 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
44
45 preempt_disable();
46 tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
47 preempt_enable();
48
49 return IRQ_HANDLED;
50}
51
52/* Deferred service handler, run as interrupt-fired tasklet */
53static void caam_jr_dequeue(unsigned long devarg)
54{
55 int hw_idx, sw_idx, i, head, tail;
56 struct device *dev = (struct device *)devarg;
57 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
59 u32 *userdesc, userstatus;
60 void *userarg;
61 unsigned long flags;
62
63 spin_lock_irqsave(&jrp->outlock, flags);
64
65 head = ACCESS_ONCE(jrp->head);
66 sw_idx = tail = jrp->tail;
67
68 while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
69 rd_reg32(&jrp->rregs->outring_used)) {
70
71 hw_idx = jrp->out_ring_read_index;
72 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
73 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
74
75 smp_read_barrier_depends();
76
77 if (jrp->outring[hw_idx].desc ==
78 jrp->entinfo[sw_idx].desc_addr_dma)
79 break; /* found */
80 }
81 /* we should never fail to find a matching descriptor */
82 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
83
84 /* Unmap just-run descriptor so we can post-process */
85 dma_unmap_single(dev, jrp->outring[hw_idx].desc,
86 jrp->entinfo[sw_idx].desc_size,
87 DMA_TO_DEVICE);
88
89 /* mark completed, avoid matching on a recycled desc addr */
90 jrp->entinfo[sw_idx].desc_addr_dma = 0;
91
92 /* Stash callback params for use outside of lock */
93 usercall = jrp->entinfo[sw_idx].callbk;
94 userarg = jrp->entinfo[sw_idx].cbkarg;
95 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
96 userstatus = jrp->outring[hw_idx].jrstatus;
97
98 smp_mb();
99
100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
101 (JOBR_DEPTH - 1);
102
103 /*
104 * if this job completed out-of-order, do not increment
105 * the tail. Otherwise, increment tail by 1 plus the
106 * number of subsequent jobs already completed out-of-order
107 */
108 if (sw_idx == tail) {
109 do {
110 tail = (tail + 1) & (JOBR_DEPTH - 1);
111 smp_read_barrier_depends();
112 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
113 jrp->entinfo[tail].desc_addr_dma == 0);
114
115 jrp->tail = tail;
116 }
117
118 /* set done */
119 wr_reg32(&jrp->rregs->outring_rmvd, 1);
120
121 spin_unlock_irqrestore(&jrp->outlock, flags);
122
123 /* Finally, execute user's callback */
124 usercall(dev, userdesc, userstatus, userarg);
125
126 spin_lock_irqsave(&jrp->outlock, flags);
127
128 head = ACCESS_ONCE(jrp->head);
129 sw_idx = tail = jrp->tail;
130 }
131
132 spin_unlock_irqrestore(&jrp->outlock, flags);
133
134 /* reenable / unmask IRQs */
135 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
136}
137
138/**
139 * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
140 * an ordinal of the rings allocated, else returns -ENODEV if no rings
141 * are available.
142 * @ctrldev: points to the controller level dev (parent) that
143 * owns rings available for use.
144 * @dev: points to where a pointer to the newly allocated queue's
145 * dev can be written to if successful.
146 **/
147int caam_jr_register(struct device *ctrldev, struct device **rdev)
148{
149 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
150 struct caam_drv_private_jr *jrpriv = NULL;
151 unsigned long flags;
152 int ring;
153
154 /* Lock, if free ring - assign, unlock */
155 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
156 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
157 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
158 if (jrpriv->assign == JOBR_UNASSIGNED) {
159 jrpriv->assign = JOBR_ASSIGNED;
160 *rdev = ctrlpriv->jrdev[ring];
161 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
162 return ring;
163 }
164 }
165
166 /* If assigned, write dev where caller needs it */
167 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
168 *rdev = NULL;
169
170 return -ENODEV;
171}
172EXPORT_SYMBOL(caam_jr_register);
173
174/**
175 * caam_jr_deregister() - Deregister an API and release the queue.
176 * Returns 0 if OK, -EBUSY if queue still contains pending entries
177 * or unprocessed results at the time of the call
178 * @dev - points to the dev that identifies the queue to
179 * be released.
180 **/
181int caam_jr_deregister(struct device *rdev)
182{
183 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
184 struct caam_drv_private *ctrlpriv;
185 unsigned long flags;
186
187 /* Get the owning controller's private space */
188 ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
189
190 /*
191 * Make sure ring empty before release
192 */
193 if (rd_reg32(&jrpriv->rregs->outring_used) ||
194 (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
195 return -EBUSY;
196
197 /* Release ring */
198 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
199 jrpriv->assign = JOBR_UNASSIGNED;
200 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
201
202 return 0;
203}
204EXPORT_SYMBOL(caam_jr_deregister);
205
206/**
207 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
208 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
209 * descriptor.
210 * @dev: device of the job ring to be used. This device should have
211 * been assigned prior by caam_jr_register().
212 * @desc: points to a job descriptor that execute our request. All
213 * descriptors (and all referenced data) must be in a DMAable
214 * region, and all data references must be physical addresses
215 * accessible to CAAM (i.e. within a PAMU window granted
216 * to it).
217 * @cbk: pointer to a callback function to be invoked upon completion
218 * of this request. This has the form:
219 * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
220 * where:
221 * @dev: contains the job ring device that processed this
222 * response.
223 * @desc: descriptor that initiated the request, same as
224 * "desc" being argued to caam_jr_enqueue().
225 * @status: untranslated status received from CAAM. See the
226 * reference manual for a detailed description of
227 * error meaning, or see the JRSTA definitions in the
228 * register header file
229 * @areq: optional pointer to an argument passed with the
230 * original request
231 * @areq: optional pointer to a user argument for use at callback
232 * time.
233 **/
234int caam_jr_enqueue(struct device *dev, u32 *desc,
235 void (*cbk)(struct device *dev, u32 *desc,
236 u32 status, void *areq),
237 void *areq)
238{
239 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
240 struct caam_jrentry_info *head_entry;
241 unsigned long flags;
242 int head, tail, desc_size;
243 dma_addr_t desc_dma;
244
245 desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
246 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
247 if (dma_mapping_error(dev, desc_dma)) {
248 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
249 return -EIO;
250 }
251
252 spin_lock_irqsave(&jrp->inplock, flags);
253
254 head = jrp->head;
255 tail = ACCESS_ONCE(jrp->tail);
256
257 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
258 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
259 spin_unlock_irqrestore(&jrp->inplock, flags);
260 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
261 return -EBUSY;
262 }
263
264 head_entry = &jrp->entinfo[head];
265 head_entry->desc_addr_virt = desc;
266 head_entry->desc_size = desc_size;
267 head_entry->callbk = (void *)cbk;
268 head_entry->cbkarg = areq;
269 head_entry->desc_addr_dma = desc_dma;
270
271 jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
272
273 smp_wmb();
274
275 jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
276 (JOBR_DEPTH - 1);
277 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
278
279 wmb();
280
281 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
282
283 spin_unlock_irqrestore(&jrp->inplock, flags);
284
285 return 0;
286}
287EXPORT_SYMBOL(caam_jr_enqueue);
288
289static int caam_reset_hw_jr(struct device *dev)
290{
291 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
292 unsigned int timeout = 100000;
293
294 /*
295 * mask interrupts since we are going to poll
296 * for reset completion status
297 */
298 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
299
300 /* initiate flush (required prior to reset) */
301 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
302 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
303 JRINT_ERR_HALT_INPROGRESS) && --timeout)
304 cpu_relax();
305
306 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
307 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
308 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
309 return -EIO;
310 }
311
312 /* initiate reset */
313 timeout = 100000;
314 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
315 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
316 cpu_relax();
317
318 if (timeout == 0) {
319 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
320 return -EIO;
321 }
322
323 /* unmask interrupts */
324 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
325
326 return 0;
327}
328
329/*
330 * Init JobR independent of platform property detection
331 */
332static int caam_jr_init(struct device *dev)
333{
334 struct caam_drv_private_jr *jrp;
335 dma_addr_t inpbusaddr, outbusaddr;
336 int i, error;
337
338 jrp = dev_get_drvdata(dev);
339
340 /* Connect job ring interrupt handler. */
341 for_each_possible_cpu(i)
342 tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
343 (unsigned long)dev);
344
345 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
346 "caam-jobr", dev);
347 if (error) {
348 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
349 jrp->ridx, jrp->irq);
350 irq_dispose_mapping(jrp->irq);
351 jrp->irq = 0;
352 return -EINVAL;
353 }
354
355 error = caam_reset_hw_jr(dev);
356 if (error)
357 return error;
358
359 jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
360 GFP_KERNEL | GFP_DMA);
361 jrp->outring = kzalloc(sizeof(struct jr_outentry) *
362 JOBR_DEPTH, GFP_KERNEL | GFP_DMA);
363
364 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
365 GFP_KERNEL);
366
367 if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
368 (jrp->entinfo == NULL)) {
369 dev_err(dev, "can't allocate job rings for %d\n",
370 jrp->ridx);
371 return -ENOMEM;
372 }
373
374 for (i = 0; i < JOBR_DEPTH; i++)
375 jrp->entinfo[i].desc_addr_dma = !0;
376
377 /* Setup rings */
378 inpbusaddr = dma_map_single(dev, jrp->inpring,
379 sizeof(u32 *) * JOBR_DEPTH,
380 DMA_BIDIRECTIONAL);
381 if (dma_mapping_error(dev, inpbusaddr)) {
382 dev_err(dev, "caam_jr_init(): can't map input ring\n");
383 kfree(jrp->inpring);
384 kfree(jrp->outring);
385 kfree(jrp->entinfo);
386 return -EIO;
387 }
388
389 outbusaddr = dma_map_single(dev, jrp->outring,
390 sizeof(struct jr_outentry) * JOBR_DEPTH,
391 DMA_BIDIRECTIONAL);
392 if (dma_mapping_error(dev, outbusaddr)) {
393 dev_err(dev, "caam_jr_init(): can't map output ring\n");
394 dma_unmap_single(dev, inpbusaddr,
395 sizeof(u32 *) * JOBR_DEPTH,
396 DMA_BIDIRECTIONAL);
397 kfree(jrp->inpring);
398 kfree(jrp->outring);
399 kfree(jrp->entinfo);
400 return -EIO;
401 }
402
403 jrp->inp_ring_write_index = 0;
404 jrp->out_ring_read_index = 0;
405 jrp->head = 0;
406 jrp->tail = 0;
407
408 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
409 wr_reg64(&jrp->rregs->outring_base, outbusaddr);
410 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
411 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
412
413 jrp->ringsize = JOBR_DEPTH;
414
415 spin_lock_init(&jrp->inplock);
416 spin_lock_init(&jrp->outlock);
417
418 /* Select interrupt coalescing parameters */
419 setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
420 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
421 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
422
423 jrp->assign = JOBR_UNASSIGNED;
424 return 0;
425}
426
427/*
428 * Shutdown JobR independent of platform property code
429 */
430int caam_jr_shutdown(struct device *dev)
431{
432 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
433 dma_addr_t inpbusaddr, outbusaddr;
434 int ret, i;
435
436 ret = caam_reset_hw_jr(dev);
437
438 for_each_possible_cpu(i)
439 tasklet_kill(&jrp->irqtask[i]);
440
441 /* Release interrupt */
442 free_irq(jrp->irq, dev);
443
444 /* Free rings */
445 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
446 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
447 dma_unmap_single(dev, outbusaddr,
448 sizeof(struct jr_outentry) * JOBR_DEPTH,
449 DMA_BIDIRECTIONAL);
450 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
451 DMA_BIDIRECTIONAL);
452 kfree(jrp->outring);
453 kfree(jrp->inpring);
454 kfree(jrp->entinfo);
455
456 return ret;
457}
458
459/*
460 * Probe routine for each detected JobR subsystem. It assumes that
461 * property detection was picked up externally.
462 */
463int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
464 int ring)
465{
466 struct device *ctrldev, *jrdev;
467 struct platform_device *jr_pdev;
468 struct caam_drv_private *ctrlpriv;
469 struct caam_drv_private_jr *jrpriv;
470 u32 *jroffset;
471 int error;
472
473 ctrldev = &pdev->dev;
474 ctrlpriv = dev_get_drvdata(ctrldev);
475
476 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
477 GFP_KERNEL);
478 if (jrpriv == NULL) {
479 dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
480 ring);
481 return -ENOMEM;
482 }
483 jrpriv->parentdev = ctrldev; /* point back to parent */
484 jrpriv->ridx = ring; /* save ring identity relative to detection */
485
486 /*
487 * Derive a pointer to the detected JobRs regs
488 * Driver has already iomapped the entire space, we just
489 * need to add in the offset to this JobR. Don't know if I
490 * like this long-term, but it'll run
491 */
492 jroffset = (u32 *)of_get_property(np, "reg", NULL);
493 jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
494 + *jroffset);
495
496 /* Build a local dev for each detected queue */
497 jr_pdev = of_platform_device_create(np, NULL, ctrldev);
498 if (jr_pdev == NULL) {
499 kfree(jrpriv);
500 return -EINVAL;
501 }
502 jrdev = &jr_pdev->dev;
503 dev_set_drvdata(jrdev, jrpriv);
504 ctrlpriv->jrdev[ring] = jrdev;
505
506 /* Identify the interrupt */
507 jrpriv->irq = of_irq_to_resource(np, 0, NULL);
508
509 /* Now do the platform independent part */
510 error = caam_jr_init(jrdev); /* now turn on hardware */
511 if (error) {
512 kfree(jrpriv);
513 return error;
514 }
515
516 return error;
517}
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
new file mode 100644
index 000000000000..c23df395b622
--- /dev/null
+++ b/drivers/crypto/caam/jr.h
@@ -0,0 +1,21 @@
1/*
2 * CAAM public-level include definitions for the JobR backend
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef JR_H
8#define JR_H
9
10/* Prototypes for backend-level services exposed to APIs */
11int caam_jr_register(struct device *ctrldev, struct device **rdev);
12int caam_jr_deregister(struct device *rdev);
13int caam_jr_enqueue(struct device *dev, u32 *desc,
14 void (*cbk)(struct device *dev, u32 *desc, u32 status,
15 void *areq),
16 void *areq);
17
18extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
19 int ring);
20extern int caam_jr_shutdown(struct device *dev);
21#endif /* JR_H */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
new file mode 100644
index 000000000000..aee394e39056
--- /dev/null
+++ b/drivers/crypto/caam/regs.h
@@ -0,0 +1,663 @@
1/*
2 * CAAM hardware register-level view
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 */
6
7#ifndef REGS_H
8#define REGS_H
9
10#include <linux/types.h>
11#include <linux/io.h>
12
13/*
14 * Architecture-specific register access methods
15 *
16 * CAAM's bus-addressable registers are 64 bits internally.
17 * They have been wired to be safely accessible on 32-bit
18 * architectures, however. Registers were organized such
19 * that (a) they can be contained in 32 bits, (b) if not, then they
20 * can be treated as two 32-bit entities, or finally (c) if they
21 * must be treated as a single 64-bit value, then this can safely
22 * be done with two 32-bit cycles.
23 *
24 * For 32-bit operations on 64-bit values, CAAM follows the same
25 * 64-bit register access conventions as it's predecessors, in that
26 * writes are "triggered" by a write to the register at the numerically
27 * higher address, thus, a full 64-bit write cycle requires a write
28 * to the lower address, followed by a write to the higher address,
29 * which will latch/execute the write cycle.
30 *
31 * For example, let's assume a SW reset of CAAM through the master
32 * configuration register.
33 * - SWRST is in bit 31 of MCFG.
34 * - MCFG begins at base+0x0000.
35 * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
36 * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
37 *
38 * (and on Power, the convention is 0-31, 32-63, I know...)
39 *
40 * Assuming a 64-bit write to this MCFG to perform a software reset
41 * would then require a write of 0 to base+0x0000, followed by a
42 * write of 0x80000000 to base+0x0004, which would "execute" the
43 * reset.
44 *
45 * Of course, since MCFG 63-32 is all zero, we could cheat and simply
46 * write 0x8000000 to base+0x0004, and the reset would work fine.
47 * However, since CAAM does contain some write-and-read-intended
48 * 64-bit registers, this code defines 64-bit access methods for
49 * the sake of internal consistency and simplicity, and so that a
50 * clean transition to 64-bit is possible when it becomes necessary.
51 *
52 * There are limitations to this that the developer must recognize.
53 * 32-bit architectures cannot enforce an atomic-64 operation,
54 * Therefore:
55 *
56 * - On writes, since the HW is assumed to latch the cycle on the
57 * write of the higher-numeric-address word, then ordered
58 * writes work OK.
59 *
60 * - For reads, where a register contains a relevant value of more
61 * that 32 bits, the hardware employs logic to latch the other
62 * "half" of the data until read, ensuring an accurate value.
63 * This is of particular relevance when dealing with CAAM's
64 * performance counters.
65 *
66 */
67
68#ifdef __BIG_ENDIAN
69#define wr_reg32(reg, data) out_be32(reg, data)
70#define rd_reg32(reg) in_be32(reg)
71#ifdef CONFIG_64BIT
72#define wr_reg64(reg, data) out_be64(reg, data)
73#define rd_reg64(reg) in_be64(reg)
74#endif
75#else
76#ifdef __LITTLE_ENDIAN
77#define wr_reg32(reg, data) __raw_writel(reg, data)
78#define rd_reg32(reg) __raw_readl(reg)
79#ifdef CONFIG_64BIT
80#define wr_reg64(reg, data) __raw_writeq(reg, data)
81#define rd_reg64(reg) __raw_readq(reg)
82#endif
83#endif
84#endif
85
86#ifndef CONFIG_64BIT
87static inline void wr_reg64(u64 __iomem *reg, u64 data)
88{
89 wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
90 wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull);
91}
92
93static inline u64 rd_reg64(u64 __iomem *reg)
94{
95 return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
96 ((u64)rd_reg32((u32 __iomem *)reg + 1));
97}
98#endif
99
100/*
101 * jr_outentry
102 * Represents each entry in a JobR output ring
103 */
104struct jr_outentry {
105 dma_addr_t desc;/* Pointer to completed descriptor */
106 u32 jrstatus; /* Status for completed descriptor */
107} __packed;
108
109/*
110 * caam_perfmon - Performance Monitor/Secure Memory Status/
111 * CAAM Global Status/Component Version IDs
112 *
113 * Spans f00-fff wherever instantiated
114 */
115
116/* Number of DECOs */
117#define CHA_NUM_DECONUM_SHIFT 56
118#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
119
120struct caam_perfmon {
121 /* Performance Monitor Registers f00-f9f */
122 u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
123 u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */
124 u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests */
125 u64 ob_enc_bytes; /* PC_OB_ENCRYPT - Outbound Bytes Encrypted */
126 u64 ob_prot_bytes; /* PC_OB_PROTECT - Outbound Bytes Protected */
127 u64 ib_dec_bytes; /* PC_IB_DECRYPT - Inbound Bytes Decrypted */
128 u64 ib_valid_bytes; /* PC_IB_VALIDATED Inbound Bytes Validated */
129 u64 rsvd[13];
130
131 /* CAAM Hardware Instantiation Parameters fa0-fbf */
132 u64 cha_rev; /* CRNR - CHA Revision Number */
133#define CTPR_QI_SHIFT 57
134#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT)
135 u64 comp_parms; /* CTPR - Compile Parameters Register */
136 u64 rsvd1[2];
137
138 /* CAAM Global Status fc0-fdf */
139 u64 faultaddr; /* FAR - Fault Address */
140 u32 faultliodn; /* FALR - Fault Address LIODN */
141 u32 faultdetail; /* FADR - Fault Addr Detail */
142 u32 rsvd2;
143 u32 status; /* CSTA - CAAM Status */
144 u64 rsvd3;
145
146 /* Component Instantiation Parameters fe0-fff */
147 u32 rtic_id; /* RVID - RTIC Version ID */
148 u32 ccb_id; /* CCBVID - CCB Version ID */
149 u64 cha_id; /* CHAVID - CHA Version ID */
150 u64 cha_num; /* CHANUM - CHA Number */
151 u64 caam_id; /* CAAMVID - CAAM Version ID */
152};
153
154/* LIODN programming for DMA configuration */
155#define MSTRID_LOCK_LIODN 0x80000000
156#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */
157
158#define MSTRID_LIODN_MASK 0x0fff
159struct masterid {
160 u32 liodn_ms; /* lock and make-trusted control bits */
161 u32 liodn_ls; /* LIODN for non-sequence and seq access */
162};
163
164/* Partition ID for DMA configuration */
165struct partid {
166 u32 rsvd1;
167 u32 pidr; /* partition ID, DECO */
168};
169
170/* RNG test mode (replicated twice in some configurations) */
171/* Padded out to 0x100 */
172struct rngtst {
173 u32 mode; /* RTSTMODEx - Test mode */
174 u32 rsvd1[3];
175 u32 reset; /* RTSTRESETx - Test reset control */
176 u32 rsvd2[3];
177 u32 status; /* RTSTSSTATUSx - Test status */
178 u32 rsvd3;
179 u32 errstat; /* RTSTERRSTATx - Test error status */
180 u32 rsvd4;
181 u32 errctl; /* RTSTERRCTLx - Test error control */
182 u32 rsvd5;
183 u32 entropy; /* RTSTENTROPYx - Test entropy */
184 u32 rsvd6[15];
185 u32 verifctl; /* RTSTVERIFCTLx - Test verification control */
186 u32 rsvd7;
187 u32 verifstat; /* RTSTVERIFSTATx - Test verification status */
188 u32 rsvd8;
189 u32 verifdata; /* RTSTVERIFDx - Test verification data */
190 u32 rsvd9;
191 u32 xkey; /* RTSTXKEYx - Test XKEY */
192 u32 rsvd10;
193 u32 oscctctl; /* RTSTOSCCTCTLx - Test osc. counter control */
194 u32 rsvd11;
195 u32 oscct; /* RTSTOSCCTx - Test oscillator counter */
196 u32 rsvd12;
197 u32 oscctstat; /* RTSTODCCTSTATx - Test osc counter status */
198 u32 rsvd13[2];
199 u32 ofifo[4]; /* RTSTOFIFOx - Test output FIFO */
200 u32 rsvd14[15];
201};
202
203/*
204 * caam_ctrl - basic core configuration
205 * starts base + 0x0000 padded out to 0x1000
206 */
207
208#define KEK_KEY_SIZE 8
209#define TKEK_KEY_SIZE 8
210#define TDSK_KEY_SIZE 8
211
212#define DECO_RESET 1 /* Use with DECO reset/availability regs */
213#define DECO_RESET_0 (DECO_RESET << 0)
214#define DECO_RESET_1 (DECO_RESET << 1)
215#define DECO_RESET_2 (DECO_RESET << 2)
216#define DECO_RESET_3 (DECO_RESET << 3)
217#define DECO_RESET_4 (DECO_RESET << 4)
218
219struct caam_ctrl {
220 /* Basic Configuration Section 000-01f */
221 /* Read/Writable */
222 u32 rsvd1;
223 u32 mcr; /* MCFG Master Config Register */
224 u32 rsvd2[2];
225
226 /* Bus Access Configuration Section 010-11f */
227 /* Read/Writable */
228 struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
229 u32 rsvd3[12];
230 struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
231 u32 rsvd4[7];
232 u32 deco_rq; /* DECORR - DECO Request */
233 struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
234 u32 rsvd5[22];
235
236 /* DECO Availability/Reset Section 120-3ff */
237 u32 deco_avail; /* DAR - DECO availability */
238 u32 deco_reset; /* DRR - DECO reset */
239 u32 rsvd6[182];
240
241 /* Key Encryption/Decryption Configuration 400-5ff */
242 /* Read/Writable only while in Non-secure mode */
243 u32 kek[KEK_KEY_SIZE]; /* JDKEKR - Key Encryption Key */
244 u32 tkek[TKEK_KEY_SIZE]; /* TDKEKR - Trusted Desc KEK */
245 u32 tdsk[TDSK_KEY_SIZE]; /* TDSKR - Trusted Desc Signing Key */
246 u32 rsvd7[32];
247 u64 sknonce; /* SKNR - Secure Key Nonce */
248 u32 rsvd8[70];
249
250 /* RNG Test/Verification/Debug Access 600-7ff */
251 /* (Useful in Test/Debug modes only...) */
252 struct rngtst rtst[2];
253
254 u32 rsvd9[448];
255
256 /* Performance Monitor f00-fff */
257 struct caam_perfmon perfmon;
258};
259
260/*
261 * Controller master config register defs
262 */
263#define MCFGR_SWRESET 0x80000000 /* software reset */
264#define MCFGR_WDENABLE 0x40000000 /* DECO watchdog enable */
265#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */
266#define MCFGR_DMA_RESET 0x10000000
267#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
268
269/* AXI read cache control */
270#define MCFGR_ARCACHE_SHIFT 12
271#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
272
273/* AXI write cache control */
274#define MCFGR_AWCACHE_SHIFT 8
275#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
276
277/* AXI pipeline depth */
278#define MCFGR_AXIPIPE_SHIFT 4
279#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
280
281#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
282#define MCFGR_BURST_64 0x00000001 /* Max burst size */
283
284/*
285 * caam_job_ring - direct job ring setup
286 * 1-4 possible per instantiation, base + 1000/2000/3000/4000
287 * Padded out to 0x1000
288 */
289struct caam_job_ring {
290 /* Input ring */
291 u64 inpring_base; /* IRBAx - Input desc ring baseaddr */
292 u32 rsvd1;
293 u32 inpring_size; /* IRSx - Input ring size */
294 u32 rsvd2;
295 u32 inpring_avail; /* IRSAx - Input ring room remaining */
296 u32 rsvd3;
297 u32 inpring_jobadd; /* IRJAx - Input ring jobs added */
298
299 /* Output Ring */
300 u64 outring_base; /* ORBAx - Output status ring base addr */
301 u32 rsvd4;
302 u32 outring_size; /* ORSx - Output ring size */
303 u32 rsvd5;
304 u32 outring_rmvd; /* ORJRx - Output ring jobs removed */
305 u32 rsvd6;
306 u32 outring_used; /* ORSFx - Output ring slots full */
307
308 /* Status/Configuration */
309 u32 rsvd7;
310 u32 jroutstatus; /* JRSTAx - JobR output status */
311 u32 rsvd8;
312 u32 jrintstatus; /* JRINTx - JobR interrupt status */
313 u32 rconfig_hi; /* JRxCFG - Ring configuration */
314 u32 rconfig_lo;
315
316 /* Indices. CAAM maintains as "heads" of each queue */
317 u32 rsvd9;
318 u32 inp_rdidx; /* IRRIx - Input ring read index */
319 u32 rsvd10;
320 u32 out_wtidx; /* ORWIx - Output ring write index */
321
322 /* Command/control */
323 u32 rsvd11;
324 u32 jrcommand; /* JRCRx - JobR command */
325
326 u32 rsvd12[932];
327
328 /* Performance Monitor f00-fff */
329 struct caam_perfmon perfmon;
330};
331
332#define JR_RINGSIZE_MASK 0x03ff
333/*
334 * jrstatus - Job Ring Output Status
335 * All values in lo word
336 * Also note, same values written out as status through QI
337 * in the command/status field of a frame descriptor
338 */
339#define JRSTA_SSRC_SHIFT 28
340#define JRSTA_SSRC_MASK 0xf0000000
341
342#define JRSTA_SSRC_NONE 0x00000000
343#define JRSTA_SSRC_CCB_ERROR 0x20000000
344#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
345#define JRSTA_SSRC_DECO 0x40000000
346#define JRSTA_SSRC_JRERROR 0x60000000
347#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000
348
349#define JRSTA_DECOERR_JUMP 0x08000000
350#define JRSTA_DECOERR_INDEX_SHIFT 8
351#define JRSTA_DECOERR_INDEX_MASK 0xff00
352#define JRSTA_DECOERR_ERROR_MASK 0x00ff
353
354#define JRSTA_DECOERR_NONE 0x00
355#define JRSTA_DECOERR_LINKLEN 0x01
356#define JRSTA_DECOERR_LINKPTR 0x02
357#define JRSTA_DECOERR_JRCTRL 0x03
358#define JRSTA_DECOERR_DESCCMD 0x04
359#define JRSTA_DECOERR_ORDER 0x05
360#define JRSTA_DECOERR_KEYCMD 0x06
361#define JRSTA_DECOERR_LOADCMD 0x07
362#define JRSTA_DECOERR_STORECMD 0x08
363#define JRSTA_DECOERR_OPCMD 0x09
364#define JRSTA_DECOERR_FIFOLDCMD 0x0a
365#define JRSTA_DECOERR_FIFOSTCMD 0x0b
366#define JRSTA_DECOERR_MOVECMD 0x0c
367#define JRSTA_DECOERR_JUMPCMD 0x0d
368#define JRSTA_DECOERR_MATHCMD 0x0e
369#define JRSTA_DECOERR_SHASHCMD 0x0f
370#define JRSTA_DECOERR_SEQCMD 0x10
371#define JRSTA_DECOERR_DECOINTERNAL 0x11
372#define JRSTA_DECOERR_SHDESCHDR 0x12
373#define JRSTA_DECOERR_HDRLEN 0x13
374#define JRSTA_DECOERR_BURSTER 0x14
375#define JRSTA_DECOERR_DESCSIGNATURE 0x15
376#define JRSTA_DECOERR_DMA 0x16
377#define JRSTA_DECOERR_BURSTFIFO 0x17
378#define JRSTA_DECOERR_JRRESET 0x1a
379#define JRSTA_DECOERR_JOBFAIL 0x1b
380#define JRSTA_DECOERR_DNRERR 0x80
381#define JRSTA_DECOERR_UNDEFPCL 0x81
382#define JRSTA_DECOERR_PDBERR 0x82
383#define JRSTA_DECOERR_ANRPLY_LATE 0x83
384#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
385#define JRSTA_DECOERR_SEQOVF 0x85
386#define JRSTA_DECOERR_INVSIGN 0x86
387#define JRSTA_DECOERR_DSASIGN 0x87
388
389#define JRSTA_CCBERR_JUMP 0x08000000
390#define JRSTA_CCBERR_INDEX_MASK 0xff00
391#define JRSTA_CCBERR_INDEX_SHIFT 8
392#define JRSTA_CCBERR_CHAID_MASK 0x00f0
393#define JRSTA_CCBERR_CHAID_SHIFT 4
394#define JRSTA_CCBERR_ERRID_MASK 0x000f
395
396#define JRSTA_CCBERR_CHAID_AES (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
397#define JRSTA_CCBERR_CHAID_DES (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
398#define JRSTA_CCBERR_CHAID_ARC4 (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
399#define JRSTA_CCBERR_CHAID_MD (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
400#define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
401#define JRSTA_CCBERR_CHAID_SNOW (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
402#define JRSTA_CCBERR_CHAID_KASUMI (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
403#define JRSTA_CCBERR_CHAID_PK (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
404#define JRSTA_CCBERR_CHAID_CRC (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
405
406#define JRSTA_CCBERR_ERRID_NONE 0x00
407#define JRSTA_CCBERR_ERRID_MODE 0x01
408#define JRSTA_CCBERR_ERRID_DATASIZ 0x02
409#define JRSTA_CCBERR_ERRID_KEYSIZ 0x03
410#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
411#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
412#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
413#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
414#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
415#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
416#define JRSTA_CCBERR_ERRID_ICVCHK 0x0a
417#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
418#define JRSTA_CCBERR_ERRID_CCMAAD 0x0c
419#define JRSTA_CCBERR_ERRID_INVCHA 0x0f
420
421#define JRINT_ERR_INDEX_MASK 0x3fff0000
422#define JRINT_ERR_INDEX_SHIFT 16
423#define JRINT_ERR_TYPE_MASK 0xf00
424#define JRINT_ERR_TYPE_SHIFT 8
425#define JRINT_ERR_HALT_MASK 0xc
426#define JRINT_ERR_HALT_SHIFT 2
427#define JRINT_ERR_HALT_INPROGRESS 0x4
428#define JRINT_ERR_HALT_COMPLETE 0x8
429#define JRINT_JR_ERROR 0x02
430#define JRINT_JR_INT 0x01
431
432#define JRINT_ERR_TYPE_WRITE 1
433#define JRINT_ERR_TYPE_BAD_INPADDR 3
434#define JRINT_ERR_TYPE_BAD_OUTADDR 4
435#define JRINT_ERR_TYPE_INV_INPWRT 5
436#define JRINT_ERR_TYPE_INV_OUTWRT 6
437#define JRINT_ERR_TYPE_RESET 7
438#define JRINT_ERR_TYPE_REMOVE_OFL 8
439#define JRINT_ERR_TYPE_ADD_OFL 9
440
441#define JRCFG_SOE 0x04
442#define JRCFG_ICEN 0x02
443#define JRCFG_IMSK 0x01
444#define JRCFG_ICDCT_SHIFT 8
445#define JRCFG_ICTT_SHIFT 16
446
447#define JRCR_RESET 0x01
448
449/*
450 * caam_assurance - Assurance Controller View
451 * base + 0x6000 padded out to 0x1000
452 */
453
454struct rtic_element {
455 u64 address;
456 u32 rsvd;
457 u32 length;
458};
459
460struct rtic_block {
461 struct rtic_element element[2];
462};
463
464struct rtic_memhash {
465 u32 memhash_be[32];
466 u32 memhash_le[32];
467};
468
469struct caam_assurance {
470 /* Status/Command/Watchdog */
471 u32 rsvd1;
472 u32 status; /* RSTA - Status */
473 u32 rsvd2;
474 u32 cmd; /* RCMD - Command */
475 u32 rsvd3;
476 u32 ctrl; /* RCTL - Control */
477 u32 rsvd4;
478 u32 throttle; /* RTHR - Throttle */
479 u32 rsvd5[2];
480 u64 watchdog; /* RWDOG - Watchdog Timer */
481 u32 rsvd6;
482 u32 rend; /* REND - Endian corrections */
483 u32 rsvd7[50];
484
485 /* Block access/configuration @ 100/110/120/130 */
486 struct rtic_block memblk[4]; /* Memory Blocks A-D */
487 u32 rsvd8[32];
488
489 /* Block hashes @ 200/300/400/500 */
490 struct rtic_memhash hash[4]; /* Block hash values A-D */
491 u32 rsvd_3[640];
492};
493
494/*
495 * caam_queue_if - QI configuration and control
496 * starts base + 0x7000, padded out to 0x1000 long
497 */
498
499struct caam_queue_if {
500 u32 qi_control_hi; /* QICTL - QI Control */
501 u32 qi_control_lo;
502 u32 rsvd1;
503 u32 qi_status; /* QISTA - QI Status */
504 u32 qi_deq_cfg_hi; /* QIDQC - QI Dequeue Configuration */
505 u32 qi_deq_cfg_lo;
506 u32 qi_enq_cfg_hi; /* QISEQC - QI Enqueue Command */
507 u32 qi_enq_cfg_lo;
508 u32 rsvd2[1016];
509};
510
511/* QI control bits - low word */
512#define QICTL_DQEN 0x01 /* Enable frame pop */
513#define QICTL_STOP 0x02 /* Stop dequeue/enqueue */
514#define QICTL_SOE 0x04 /* Stop on error */
515
516/* QI control bits - high word */
517#define QICTL_MBSI 0x01
518#define QICTL_MHWSI 0x02
519#define QICTL_MWSI 0x04
520#define QICTL_MDWSI 0x08
521#define QICTL_CBSI 0x10 /* CtrlDataByteSwapInput */
522#define QICTL_CHWSI 0x20 /* CtrlDataHalfSwapInput */
523#define QICTL_CWSI 0x40 /* CtrlDataWordSwapInput */
524#define QICTL_CDWSI 0x80 /* CtrlDataDWordSwapInput */
525#define QICTL_MBSO 0x0100
526#define QICTL_MHWSO 0x0200
527#define QICTL_MWSO 0x0400
528#define QICTL_MDWSO 0x0800
529#define QICTL_CBSO 0x1000 /* CtrlDataByteSwapOutput */
530#define QICTL_CHWSO 0x2000 /* CtrlDataHalfSwapOutput */
531#define QICTL_CWSO 0x4000 /* CtrlDataWordSwapOutput */
532#define QICTL_CDWSO 0x8000 /* CtrlDataDWordSwapOutput */
533#define QICTL_DMBS 0x010000
534#define QICTL_EPO 0x020000
535
536/* QI status bits */
537#define QISTA_PHRDERR 0x01 /* PreHeader Read Error */
538#define QISTA_CFRDERR 0x02 /* Compound Frame Read Error */
539#define QISTA_OFWRERR 0x04 /* Output Frame Read Error */
540#define QISTA_BPDERR 0x08 /* Buffer Pool Depleted */
541#define QISTA_BTSERR 0x10 /* Buffer Undersize */
542#define QISTA_CFWRERR 0x20 /* Compound Frame Write Err */
543#define QISTA_STOPD 0x80000000 /* QI Stopped (see QICTL) */
544
545/* deco_sg_table - DECO view of scatter/gather table */
546struct deco_sg_table {
547 u64 addr; /* Segment Address */
548 u32 elen; /* E, F bits + 30-bit length */
549 u32 bpid_offset; /* Buffer Pool ID + 16-bit length */
550};
551
552/*
553 * caam_deco - descriptor controller - CHA cluster block
554 *
555 * Only accessible when direct DECO access is turned on
556 * (done in DECORR, via MID programmed in DECOxMID
557 *
558 * 5 typical, base + 0x8000/9000/a000/b000
559 * Padded out to 0x1000 long
560 */
561struct caam_deco {
562 u32 rsvd1;
563 u32 cls1_mode; /* CxC1MR - Class 1 Mode */
564 u32 rsvd2;
565 u32 cls1_keysize; /* CxC1KSR - Class 1 Key Size */
566 u32 cls1_datasize_hi; /* CxC1DSR - Class 1 Data Size */
567 u32 cls1_datasize_lo;
568 u32 rsvd3;
569 u32 cls1_icvsize; /* CxC1ICVSR - Class 1 ICV size */
570 u32 rsvd4[5];
571 u32 cha_ctrl; /* CCTLR - CHA control */
572 u32 rsvd5;
573 u32 irq_crtl; /* CxCIRQ - CCB interrupt done/error/clear */
574 u32 rsvd6;
575 u32 clr_written; /* CxCWR - Clear-Written */
576 u32 ccb_status_hi; /* CxCSTA - CCB Status/Error */
577 u32 ccb_status_lo;
578 u32 rsvd7[3];
579 u32 aad_size; /* CxAADSZR - Current AAD Size */
580 u32 rsvd8;
581 u32 cls1_iv_size; /* CxC1IVSZR - Current Class 1 IV Size */
582 u32 rsvd9[7];
583 u32 pkha_a_size; /* PKASZRx - Size of PKHA A */
584 u32 rsvd10;
585 u32 pkha_b_size; /* PKBSZRx - Size of PKHA B */
586 u32 rsvd11;
587 u32 pkha_n_size; /* PKNSZRx - Size of PKHA N */
588 u32 rsvd12;
589 u32 pkha_e_size; /* PKESZRx - Size of PKHA E */
590 u32 rsvd13[24];
591 u32 cls1_ctx[16]; /* CxC1CTXR - Class 1 Context @100 */
592 u32 rsvd14[48];
593 u32 cls1_key[8]; /* CxC1KEYR - Class 1 Key @200 */
594 u32 rsvd15[121];
595 u32 cls2_mode; /* CxC2MR - Class 2 Mode */
596 u32 rsvd16;
597 u32 cls2_keysize; /* CxX2KSR - Class 2 Key Size */
598 u32 cls2_datasize_hi; /* CxC2DSR - Class 2 Data Size */
599 u32 cls2_datasize_lo;
600 u32 rsvd17;
601 u32 cls2_icvsize; /* CxC2ICVSZR - Class 2 ICV Size */
602 u32 rsvd18[56];
603 u32 cls2_ctx[18]; /* CxC2CTXR - Class 2 Context @500 */
604 u32 rsvd19[46];
605 u32 cls2_key[32]; /* CxC2KEYR - Class2 Key @600 */
606 u32 rsvd20[84];
607 u32 inp_infofifo_hi; /* CxIFIFO - Input Info FIFO @7d0 */
608 u32 inp_infofifo_lo;
609 u32 rsvd21[2];
610 u64 inp_datafifo; /* CxDFIFO - Input Data FIFO */
611 u32 rsvd22[2];
612 u64 out_datafifo; /* CxOFIFO - Output Data FIFO */
613 u32 rsvd23[2];
614 u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
615 u32 jr_ctl_lo;
616 u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
617 u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
618 u32 op_status_lo;
619 u32 rsvd24[2];
620 u32 liodn; /* DxLSR - DECO LIODN Status - non-seq */
621 u32 td_liodn; /* DxLSR - DECO LIODN Status - trustdesc */
622 u32 rsvd26[6];
623 u64 math[4]; /* DxMTH - Math register */
624 u32 rsvd27[8];
625 struct deco_sg_table gthr_tbl[4]; /* DxGTR - Gather Tables */
626 u32 rsvd28[16];
627 struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
628 u32 rsvd29[48];
629 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
630 u32 rsvd30[320];
631};
632
633/*
634 * Current top-level view of memory map is:
635 *
636 * 0x0000 - 0x0fff - CAAM Top-Level Control
637 * 0x1000 - 0x1fff - Job Ring 0
638 * 0x2000 - 0x2fff - Job Ring 1
639 * 0x3000 - 0x3fff - Job Ring 2
640 * 0x4000 - 0x4fff - Job Ring 3
641 * 0x5000 - 0x5fff - (unused)
642 * 0x6000 - 0x6fff - Assurance Controller
643 * 0x7000 - 0x7fff - Queue Interface
644 * 0x8000 - 0x8fff - DECO-CCB 0
645 * 0x9000 - 0x9fff - DECO-CCB 1
646 * 0xa000 - 0xafff - DECO-CCB 2
647 * 0xb000 - 0xbfff - DECO-CCB 3
648 * 0xc000 - 0xcfff - DECO-CCB 4
649 *
650 * caam_full describes the full register view of CAAM if useful,
651 * although many configurations may choose to implement parts of
652 * the register map separately, in differing privilege regions
653 */
654struct caam_full {
655 struct caam_ctrl __iomem ctrl;
656 struct caam_job_ring jr[4];
657 u64 rsvd[512];
658 struct caam_assurance assure;
659 struct caam_queue_if qi;
660 struct caam_deco *deco;
661};
662
663#endif /* REGS_H */
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index c99305afa58a..3cf303ee3fe3 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -133,7 +133,6 @@ struct mv_req_hash_ctx {
133 int extra_bytes; /* unprocessed bytes in buffer */ 133 int extra_bytes; /* unprocessed bytes in buffer */
134 enum hash_op op; 134 enum hash_op op;
135 int count_add; 135 int count_add;
136 struct scatterlist dummysg;
137}; 136};
138 137
139static void compute_aes_dec_key(struct mv_ctx *ctx) 138static void compute_aes_dec_key(struct mv_ctx *ctx)
@@ -187,9 +186,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
187{ 186{
188 int ret; 187 int ret;
189 void *sbuf; 188 void *sbuf;
190 int copied = 0; 189 int copy_len;
191 190
192 while (1) { 191 while (len) {
193 if (!p->sg_src_left) { 192 if (!p->sg_src_left) {
194 ret = sg_miter_next(&p->src_sg_it); 193 ret = sg_miter_next(&p->src_sg_it);
195 BUG_ON(!ret); 194 BUG_ON(!ret);
@@ -199,19 +198,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
199 198
200 sbuf = p->src_sg_it.addr + p->src_start; 199 sbuf = p->src_sg_it.addr + p->src_start;
201 200
202 if (p->sg_src_left <= len - copied) { 201 copy_len = min(p->sg_src_left, len);
203 memcpy(dbuf + copied, sbuf, p->sg_src_left); 202 memcpy(dbuf, sbuf, copy_len);
204 copied += p->sg_src_left; 203
205 p->sg_src_left = 0; 204 p->src_start += copy_len;
206 if (copied >= len) 205 p->sg_src_left -= copy_len;
207 break; 206
208 } else { 207 len -= copy_len;
209 int copy_len = len - copied; 208 dbuf += copy_len;
210 memcpy(dbuf + copied, sbuf, copy_len);
211 p->src_start += copy_len;
212 p->sg_src_left -= copy_len;
213 break;
214 }
215 } 209 }
216} 210}
217 211
@@ -275,7 +269,6 @@ static void mv_process_current_q(int first_block)
275 memcpy(cpg->sram + SRAM_CONFIG, &op, 269 memcpy(cpg->sram + SRAM_CONFIG, &op,
276 sizeof(struct sec_accel_config)); 270 sizeof(struct sec_accel_config));
277 271
278 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
279 /* GO */ 272 /* GO */
280 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 273 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
281 274
@@ -302,6 +295,7 @@ static void mv_crypto_algo_completion(void)
302static void mv_process_hash_current(int first_block) 295static void mv_process_hash_current(int first_block)
303{ 296{
304 struct ahash_request *req = ahash_request_cast(cpg->cur_req); 297 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
298 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
305 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); 299 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
306 struct req_progress *p = &cpg->p; 300 struct req_progress *p = &cpg->p;
307 struct sec_accel_config op = { 0 }; 301 struct sec_accel_config op = { 0 };
@@ -314,6 +308,8 @@ static void mv_process_hash_current(int first_block)
314 break; 308 break;
315 case COP_HMAC_SHA1: 309 case COP_HMAC_SHA1:
316 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; 310 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
311 memcpy(cpg->sram + SRAM_HMAC_IV_IN,
312 tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
317 break; 313 break;
318 } 314 }
319 315
@@ -345,11 +341,16 @@ static void mv_process_hash_current(int first_block)
345 op.config |= CFG_LAST_FRAG; 341 op.config |= CFG_LAST_FRAG;
346 else 342 else
347 op.config |= CFG_MID_FRAG; 343 op.config |= CFG_MID_FRAG;
344
345 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
346 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
347 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
348 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
349 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
348 } 350 }
349 351
350 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); 352 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
351 353
352 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
353 /* GO */ 354 /* GO */
354 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 355 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
355 356
@@ -409,12 +410,6 @@ static void mv_hash_algo_completion(void)
409 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); 410 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
410 sg_miter_stop(&cpg->p.src_sg_it); 411 sg_miter_stop(&cpg->p.src_sg_it);
411 412
412 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
413 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
414 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
415 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
416 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
417
418 if (likely(ctx->last_chunk)) { 413 if (likely(ctx->last_chunk)) {
419 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { 414 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
420 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, 415 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
@@ -422,6 +417,12 @@ static void mv_hash_algo_completion(void)
422 (req))); 417 (req)));
423 } else 418 } else
424 mv_hash_final_fallback(req); 419 mv_hash_final_fallback(req);
420 } else {
421 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
422 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
423 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
424 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
425 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
425 } 426 }
426} 427}
427 428
@@ -480,7 +481,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
480 int i = 0; 481 int i = 0;
481 size_t cur_len; 482 size_t cur_len;
482 483
483 while (1) { 484 while (sl) {
484 cur_len = sl[i].length; 485 cur_len = sl[i].length;
485 ++i; 486 ++i;
486 if (total_bytes > cur_len) 487 if (total_bytes > cur_len)
@@ -517,29 +518,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
517{ 518{
518 struct req_progress *p = &cpg->p; 519 struct req_progress *p = &cpg->p;
519 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); 520 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
520 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
521 int num_sgs, hw_bytes, old_extra_bytes, rc; 521 int num_sgs, hw_bytes, old_extra_bytes, rc;
522 cpg->cur_req = &req->base; 522 cpg->cur_req = &req->base;
523 memset(p, 0, sizeof(struct req_progress)); 523 memset(p, 0, sizeof(struct req_progress));
524 hw_bytes = req->nbytes + ctx->extra_bytes; 524 hw_bytes = req->nbytes + ctx->extra_bytes;
525 old_extra_bytes = ctx->extra_bytes; 525 old_extra_bytes = ctx->extra_bytes;
526 526
527 if (unlikely(ctx->extra_bytes)) {
528 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
529 ctx->extra_bytes);
530 p->crypt_len = ctx->extra_bytes;
531 }
532
533 memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
534
535 if (unlikely(!ctx->first_hash)) {
536 writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
537 writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
538 writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
539 writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
540 writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
541 }
542
543 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; 527 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
544 if (ctx->extra_bytes != 0 528 if (ctx->extra_bytes != 0
545 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) 529 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
@@ -555,6 +539,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
555 p->complete = mv_hash_algo_completion; 539 p->complete = mv_hash_algo_completion;
556 p->process = mv_process_hash_current; 540 p->process = mv_process_hash_current;
557 541
542 if (unlikely(old_extra_bytes)) {
543 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
544 old_extra_bytes);
545 p->crypt_len = old_extra_bytes;
546 }
547
558 mv_process_hash_current(1); 548 mv_process_hash_current(1);
559 } else { 549 } else {
560 copy_src_to_buf(p, ctx->buffer + old_extra_bytes, 550 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
@@ -603,9 +593,7 @@ static int queue_manag(void *data)
603 if (async_req->tfm->__crt_alg->cra_type != 593 if (async_req->tfm->__crt_alg->cra_type !=
604 &crypto_ahash_type) { 594 &crypto_ahash_type) {
605 struct ablkcipher_request *req = 595 struct ablkcipher_request *req =
606 container_of(async_req, 596 ablkcipher_request_cast(async_req);
607 struct ablkcipher_request,
608 base);
609 mv_start_new_crypt_req(req); 597 mv_start_new_crypt_req(req);
610 } else { 598 } else {
611 struct ahash_request *req = 599 struct ahash_request *req =
@@ -722,19 +710,13 @@ static int mv_hash_update(struct ahash_request *req)
722static int mv_hash_final(struct ahash_request *req) 710static int mv_hash_final(struct ahash_request *req)
723{ 711{
724 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); 712 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
725 /* dummy buffer of 4 bytes */ 713
726 sg_init_one(&ctx->dummysg, ctx->buffer, 4);
727 /* I think I'm allowed to do that... */
728 ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
729 mv_update_hash_req_ctx(ctx, 1, 0); 714 mv_update_hash_req_ctx(ctx, 1, 0);
730 return mv_handle_req(&req->base); 715 return mv_handle_req(&req->base);
731} 716}
732 717
733static int mv_hash_finup(struct ahash_request *req) 718static int mv_hash_finup(struct ahash_request *req)
734{ 719{
735 if (!req->nbytes)
736 return mv_hash_final(req);
737
738 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); 720 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
739 return mv_handle_req(&req->base); 721 return mv_handle_req(&req->base);
740} 722}
@@ -1065,14 +1047,21 @@ static int mv_probe(struct platform_device *pdev)
1065 1047
1066 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); 1048 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1067 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); 1049 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1050 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1068 1051
1069 ret = crypto_register_alg(&mv_aes_alg_ecb); 1052 ret = crypto_register_alg(&mv_aes_alg_ecb);
1070 if (ret) 1053 if (ret) {
1054 printk(KERN_WARNING MV_CESA
1055 "Could not register aes-ecb driver\n");
1071 goto err_irq; 1056 goto err_irq;
1057 }
1072 1058
1073 ret = crypto_register_alg(&mv_aes_alg_cbc); 1059 ret = crypto_register_alg(&mv_aes_alg_cbc);
1074 if (ret) 1060 if (ret) {
1061 printk(KERN_WARNING MV_CESA
1062 "Could not register aes-cbc driver\n");
1075 goto err_unreg_ecb; 1063 goto err_unreg_ecb;
1064 }
1076 1065
1077 ret = crypto_register_ahash(&mv_sha1_alg); 1066 ret = crypto_register_ahash(&mv_sha1_alg);
1078 if (ret == 0) 1067 if (ret == 0)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 465cde3e4f60..ba8f1ea84c5e 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -78,7 +78,6 @@
78#define FLAGS_SHA1 0x0010 78#define FLAGS_SHA1 0x0010
79#define FLAGS_DMA_ACTIVE 0x0020 79#define FLAGS_DMA_ACTIVE 0x0020
80#define FLAGS_OUTPUT_READY 0x0040 80#define FLAGS_OUTPUT_READY 0x0040
81#define FLAGS_CLEAN 0x0080
82#define FLAGS_INIT 0x0100 81#define FLAGS_INIT 0x0100
83#define FLAGS_CPU 0x0200 82#define FLAGS_CPU 0x0200
84#define FLAGS_HMAC 0x0400 83#define FLAGS_HMAC 0x0400
@@ -511,26 +510,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
511 return 0; 510 return 0;
512} 511}
513 512
514static void omap_sham_cleanup(struct ahash_request *req)
515{
516 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
517 struct omap_sham_dev *dd = ctx->dd;
518 unsigned long flags;
519
520 spin_lock_irqsave(&dd->lock, flags);
521 if (ctx->flags & FLAGS_CLEAN) {
522 spin_unlock_irqrestore(&dd->lock, flags);
523 return;
524 }
525 ctx->flags |= FLAGS_CLEAN;
526 spin_unlock_irqrestore(&dd->lock, flags);
527
528 if (ctx->digcnt)
529 omap_sham_copy_ready_hash(req);
530
531 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
532}
533
534static int omap_sham_init(struct ahash_request *req) 513static int omap_sham_init(struct ahash_request *req)
535{ 514{
536 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 515 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -618,9 +597,8 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
618 return err; 597 return err;
619} 598}
620 599
621static int omap_sham_finish_req_hmac(struct ahash_request *req) 600static int omap_sham_finish_hmac(struct ahash_request *req)
622{ 601{
623 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
624 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 602 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
625 struct omap_sham_hmac_ctx *bctx = tctx->base; 603 struct omap_sham_hmac_ctx *bctx = tctx->base;
626 int bs = crypto_shash_blocksize(bctx->shash); 604 int bs = crypto_shash_blocksize(bctx->shash);
@@ -635,7 +613,24 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req)
635 613
636 return crypto_shash_init(&desc.shash) ?: 614 return crypto_shash_init(&desc.shash) ?:
637 crypto_shash_update(&desc.shash, bctx->opad, bs) ?: 615 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
638 crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest); 616 crypto_shash_finup(&desc.shash, req->result, ds, req->result);
617}
618
619static int omap_sham_finish(struct ahash_request *req)
620{
621 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
622 struct omap_sham_dev *dd = ctx->dd;
623 int err = 0;
624
625 if (ctx->digcnt) {
626 omap_sham_copy_ready_hash(req);
627 if (ctx->flags & FLAGS_HMAC)
628 err = omap_sham_finish_hmac(req);
629 }
630
631 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
632
633 return err;
639} 634}
640 635
641static void omap_sham_finish_req(struct ahash_request *req, int err) 636static void omap_sham_finish_req(struct ahash_request *req, int err)
@@ -645,15 +640,12 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
645 640
646 if (!err) { 641 if (!err) {
647 omap_sham_copy_hash(ctx->dd->req, 1); 642 omap_sham_copy_hash(ctx->dd->req, 1);
648 if (ctx->flags & FLAGS_HMAC) 643 if (ctx->flags & FLAGS_FINAL)
649 err = omap_sham_finish_req_hmac(req); 644 err = omap_sham_finish(req);
650 } else { 645 } else {
651 ctx->flags |= FLAGS_ERROR; 646 ctx->flags |= FLAGS_ERROR;
652 } 647 }
653 648
654 if ((ctx->flags & FLAGS_FINAL) || err)
655 omap_sham_cleanup(req);
656
657 clk_disable(dd->iclk); 649 clk_disable(dd->iclk);
658 dd->flags &= ~FLAGS_BUSY; 650 dd->flags &= ~FLAGS_BUSY;
659 651
@@ -809,22 +801,21 @@ static int omap_sham_final_shash(struct ahash_request *req)
809static int omap_sham_final(struct ahash_request *req) 801static int omap_sham_final(struct ahash_request *req)
810{ 802{
811 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 803 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
812 int err = 0;
813 804
814 ctx->flags |= FLAGS_FINUP; 805 ctx->flags |= FLAGS_FINUP;
815 806
816 if (!(ctx->flags & FLAGS_ERROR)) { 807 if (ctx->flags & FLAGS_ERROR)
817 /* OMAP HW accel works only with buffers >= 9 */ 808 return 0; /* uncompleted hash is not needed */
818 /* HMAC is always >= 9 because of ipad */
819 if ((ctx->digcnt + ctx->bufcnt) < 9)
820 err = omap_sham_final_shash(req);
821 else if (ctx->bufcnt)
822 return omap_sham_enqueue(req, OP_FINAL);
823 }
824 809
825 omap_sham_cleanup(req); 810 /* OMAP HW accel works only with buffers >= 9 */
811 /* HMAC is always >= 9 because ipad == block size */
812 if ((ctx->digcnt + ctx->bufcnt) < 9)
813 return omap_sham_final_shash(req);
814 else if (ctx->bufcnt)
815 return omap_sham_enqueue(req, OP_FINAL);
826 816
827 return err; 817 /* copy ready hash (+ finalize hmac) */
818 return omap_sham_finish(req);
828} 819}
829 820
830static int omap_sham_finup(struct ahash_request *req) 821static int omap_sham_finup(struct ahash_request *req)
@@ -835,7 +826,7 @@ static int omap_sham_finup(struct ahash_request *req)
835 ctx->flags |= FLAGS_FINUP; 826 ctx->flags |= FLAGS_FINUP;
836 827
837 err1 = omap_sham_update(req); 828 err1 = omap_sham_update(req);
838 if (err1 == -EINPROGRESS) 829 if (err1 == -EINPROGRESS || err1 == -EBUSY)
839 return err1; 830 return err1;
840 /* 831 /*
841 * final() has to be always called to cleanup resources 832 * final() has to be always called to cleanup resources
@@ -890,8 +881,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
890 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 881 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
891 const char *alg_name = crypto_tfm_alg_name(tfm); 882 const char *alg_name = crypto_tfm_alg_name(tfm);
892 883
893 pr_info("enter\n");
894
895 /* Allocate a fallback and abort if it failed. */ 884 /* Allocate a fallback and abort if it failed. */
896 tctx->fallback = crypto_alloc_shash(alg_name, 0, 885 tctx->fallback = crypto_alloc_shash(alg_name, 0,
897 CRYPTO_ALG_NEED_FALLBACK); 886 CRYPTO_ALG_NEED_FALLBACK);
@@ -1297,7 +1286,8 @@ static int __init omap_sham_mod_init(void)
1297 pr_info("loading %s driver\n", "omap-sham"); 1286 pr_info("loading %s driver\n", "omap-sham");
1298 1287
1299 if (!cpu_class_is_omap2() || 1288 if (!cpu_class_is_omap2() ||
1300 omap_type() != OMAP2_DEVICE_TYPE_SEC) { 1289 (omap_type() != OMAP2_DEVICE_TYPE_SEC &&
1290 omap_type() != OMAP2_DEVICE_TYPE_EMU)) {
1301 pr_err("Unsupported cpu\n"); 1291 pr_err("Unsupported cpu\n");
1302 return -ENODEV; 1292 return -ENODEV;
1303 } 1293 }
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index adf075b6b9a8..06bdb4b2c6a6 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -288,9 +288,250 @@ static struct shash_alg sha256_alg = {
288 } 288 }
289}; 289};
290 290
291/* Add two shash_alg instance for hardware-implemented *
292* multiple-parts hash supported by VIA Nano Processor.*/
293static int padlock_sha1_init_nano(struct shash_desc *desc)
294{
295 struct sha1_state *sctx = shash_desc_ctx(desc);
296
297 *sctx = (struct sha1_state){
298 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
299 };
300
301 return 0;
302}
303
304static int padlock_sha1_update_nano(struct shash_desc *desc,
305 const u8 *data, unsigned int len)
306{
307 struct sha1_state *sctx = shash_desc_ctx(desc);
308 unsigned int partial, done;
309 const u8 *src;
310 /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
311 u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
312 ((aligned(STACK_ALIGN)));
313 u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
314 int ts_state;
315
316 partial = sctx->count & 0x3f;
317 sctx->count += len;
318 done = 0;
319 src = data;
320 memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
321
322 if ((partial + len) >= SHA1_BLOCK_SIZE) {
323
324 /* Append the bytes in state's buffer to a block to handle */
325 if (partial) {
326 done = -partial;
327 memcpy(sctx->buffer + partial, data,
328 done + SHA1_BLOCK_SIZE);
329 src = sctx->buffer;
330 ts_state = irq_ts_save();
331 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
332 : "+S"(src), "+D"(dst) \
333 : "a"((long)-1), "c"((unsigned long)1));
334 irq_ts_restore(ts_state);
335 done += SHA1_BLOCK_SIZE;
336 src = data + done;
337 }
338
339 /* Process the left bytes from the input data */
340 if (len - done >= SHA1_BLOCK_SIZE) {
341 ts_state = irq_ts_save();
342 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
343 : "+S"(src), "+D"(dst)
344 : "a"((long)-1),
345 "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
346 irq_ts_restore(ts_state);
347 done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
348 src = data + done;
349 }
350 partial = 0;
351 }
352 memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
353 memcpy(sctx->buffer + partial, src, len - done);
354
355 return 0;
356}
357
358static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
359{
360 struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
361 unsigned int partial, padlen;
362 __be64 bits;
363 static const u8 padding[64] = { 0x80, };
364
365 bits = cpu_to_be64(state->count << 3);
366
367 /* Pad out to 56 mod 64 */
368 partial = state->count & 0x3f;
369 padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
370 padlock_sha1_update_nano(desc, padding, padlen);
371
372 /* Append length field bytes */
373 padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
374
375 /* Swap to output */
376 padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
377
378 return 0;
379}
380
381static int padlock_sha256_init_nano(struct shash_desc *desc)
382{
383 struct sha256_state *sctx = shash_desc_ctx(desc);
384
385 *sctx = (struct sha256_state){
386 .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
387 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
388 };
389
390 return 0;
391}
392
393static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
394 unsigned int len)
395{
396 struct sha256_state *sctx = shash_desc_ctx(desc);
397 unsigned int partial, done;
398 const u8 *src;
399 /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
400 u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
401 ((aligned(STACK_ALIGN)));
402 u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
403 int ts_state;
404
405 partial = sctx->count & 0x3f;
406 sctx->count += len;
407 done = 0;
408 src = data;
409 memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
410
411 if ((partial + len) >= SHA256_BLOCK_SIZE) {
412
413 /* Append the bytes in state's buffer to a block to handle */
414 if (partial) {
415 done = -partial;
416 memcpy(sctx->buf + partial, data,
417 done + SHA256_BLOCK_SIZE);
418 src = sctx->buf;
419 ts_state = irq_ts_save();
420 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
421 : "+S"(src), "+D"(dst)
422 : "a"((long)-1), "c"((unsigned long)1));
423 irq_ts_restore(ts_state);
424 done += SHA256_BLOCK_SIZE;
425 src = data + done;
426 }
427
428 /* Process the left bytes from input data*/
429 if (len - done >= SHA256_BLOCK_SIZE) {
430 ts_state = irq_ts_save();
431 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
432 : "+S"(src), "+D"(dst)
433 : "a"((long)-1),
434 "c"((unsigned long)((len - done) / 64)));
435 irq_ts_restore(ts_state);
436 done += ((len - done) - (len - done) % 64);
437 src = data + done;
438 }
439 partial = 0;
440 }
441 memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
442 memcpy(sctx->buf + partial, src, len - done);
443
444 return 0;
445}
446
447static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
448{
449 struct sha256_state *state =
450 (struct sha256_state *)shash_desc_ctx(desc);
451 unsigned int partial, padlen;
452 __be64 bits;
453 static const u8 padding[64] = { 0x80, };
454
455 bits = cpu_to_be64(state->count << 3);
456
457 /* Pad out to 56 mod 64 */
458 partial = state->count & 0x3f;
459 padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
460 padlock_sha256_update_nano(desc, padding, padlen);
461
462 /* Append length field bytes */
463 padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
464
465 /* Swap to output */
466 padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
467
468 return 0;
469}
470
471static int padlock_sha_export_nano(struct shash_desc *desc,
472 void *out)
473{
474 int statesize = crypto_shash_statesize(desc->tfm);
475 void *sctx = shash_desc_ctx(desc);
476
477 memcpy(out, sctx, statesize);
478 return 0;
479}
480
481static int padlock_sha_import_nano(struct shash_desc *desc,
482 const void *in)
483{
484 int statesize = crypto_shash_statesize(desc->tfm);
485 void *sctx = shash_desc_ctx(desc);
486
487 memcpy(sctx, in, statesize);
488 return 0;
489}
490
491static struct shash_alg sha1_alg_nano = {
492 .digestsize = SHA1_DIGEST_SIZE,
493 .init = padlock_sha1_init_nano,
494 .update = padlock_sha1_update_nano,
495 .final = padlock_sha1_final_nano,
496 .export = padlock_sha_export_nano,
497 .import = padlock_sha_import_nano,
498 .descsize = sizeof(struct sha1_state),
499 .statesize = sizeof(struct sha1_state),
500 .base = {
501 .cra_name = "sha1",
502 .cra_driver_name = "sha1-padlock-nano",
503 .cra_priority = PADLOCK_CRA_PRIORITY,
504 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
505 .cra_blocksize = SHA1_BLOCK_SIZE,
506 .cra_module = THIS_MODULE,
507 }
508};
509
510static struct shash_alg sha256_alg_nano = {
511 .digestsize = SHA256_DIGEST_SIZE,
512 .init = padlock_sha256_init_nano,
513 .update = padlock_sha256_update_nano,
514 .final = padlock_sha256_final_nano,
515 .export = padlock_sha_export_nano,
516 .import = padlock_sha_import_nano,
517 .descsize = sizeof(struct sha256_state),
518 .statesize = sizeof(struct sha256_state),
519 .base = {
520 .cra_name = "sha256",
521 .cra_driver_name = "sha256-padlock-nano",
522 .cra_priority = PADLOCK_CRA_PRIORITY,
523 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
524 .cra_blocksize = SHA256_BLOCK_SIZE,
525 .cra_module = THIS_MODULE,
526 }
527};
528
291static int __init padlock_init(void) 529static int __init padlock_init(void)
292{ 530{
293 int rc = -ENODEV; 531 int rc = -ENODEV;
532 struct cpuinfo_x86 *c = &cpu_data(0);
533 struct shash_alg *sha1;
534 struct shash_alg *sha256;
294 535
295 if (!cpu_has_phe) { 536 if (!cpu_has_phe) {
296 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); 537 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
@@ -302,11 +543,21 @@ static int __init padlock_init(void)
302 return -ENODEV; 543 return -ENODEV;
303 } 544 }
304 545
305 rc = crypto_register_shash(&sha1_alg); 546 /* Register the newly added algorithm module if on *
547 * VIA Nano processor, or else just do as before */
548 if (c->x86_model < 0x0f) {
549 sha1 = &sha1_alg;
550 sha256 = &sha256_alg;
551 } else {
552 sha1 = &sha1_alg_nano;
553 sha256 = &sha256_alg_nano;
554 }
555
556 rc = crypto_register_shash(sha1);
306 if (rc) 557 if (rc)
307 goto out; 558 goto out;
308 559
309 rc = crypto_register_shash(&sha256_alg); 560 rc = crypto_register_shash(sha256);
310 if (rc) 561 if (rc)
311 goto out_unreg1; 562 goto out_unreg1;
312 563
@@ -315,7 +566,8 @@ static int __init padlock_init(void)
315 return 0; 566 return 0;
316 567
317out_unreg1: 568out_unreg1:
318 crypto_unregister_shash(&sha1_alg); 569 crypto_unregister_shash(sha1);
570
319out: 571out:
320 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); 572 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
321 return rc; 573 return rc;
@@ -323,8 +575,15 @@ out:
323 575
324static void __exit padlock_fini(void) 576static void __exit padlock_fini(void)
325{ 577{
326 crypto_unregister_shash(&sha1_alg); 578 struct cpuinfo_x86 *c = &cpu_data(0);
327 crypto_unregister_shash(&sha256_alg); 579
580 if (c->x86_model >= 0x0f) {
581 crypto_unregister_shash(&sha1_alg_nano);
582 crypto_unregister_shash(&sha256_alg_nano);
583 } else {
584 crypto_unregister_shash(&sha1_alg);
585 crypto_unregister_shash(&sha256_alg);
586 }
328} 587}
329 588
330module_init(padlock_init); 589module_init(padlock_init);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index b092d0a65837..230b5b8cda1f 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -176,6 +176,8 @@ struct spacc_aead_ctx {
176 u8 salt[AES_BLOCK_SIZE]; 176 u8 salt[AES_BLOCK_SIZE];
177}; 177};
178 178
179static int spacc_ablk_submit(struct spacc_req *req);
180
179static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) 181static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
180{ 182{
181 return alg ? container_of(alg, struct spacc_alg, alg) : NULL; 183 return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
@@ -666,6 +668,24 @@ static int spacc_aead_submit(struct spacc_req *req)
666 return -EINPROGRESS; 668 return -EINPROGRESS;
667} 669}
668 670
671static int spacc_req_submit(struct spacc_req *req);
672
673static void spacc_push(struct spacc_engine *engine)
674{
675 struct spacc_req *req;
676
677 while (!list_empty(&engine->pending) &&
678 engine->in_flight + 1 <= engine->fifo_sz) {
679
680 ++engine->in_flight;
681 req = list_first_entry(&engine->pending, struct spacc_req,
682 list);
683 list_move_tail(&req->list, &engine->in_progress);
684
685 req->result = spacc_req_submit(req);
686 }
687}
688
669/* 689/*
670 * Setup an AEAD request for processing. This will configure the engine, load 690 * Setup an AEAD request for processing. This will configure the engine, load
671 * the context and then start the packet processing. 691 * the context and then start the packet processing.
@@ -698,7 +718,8 @@ static int spacc_aead_setup(struct aead_request *req, u8 *giv,
698 718
699 err = -EINPROGRESS; 719 err = -EINPROGRESS;
700 spin_lock_irqsave(&engine->hw_lock, flags); 720 spin_lock_irqsave(&engine->hw_lock, flags);
701 if (unlikely(spacc_fifo_cmd_full(engine))) { 721 if (unlikely(spacc_fifo_cmd_full(engine)) ||
722 engine->in_flight + 1 > engine->fifo_sz) {
702 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 723 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
703 err = -EBUSY; 724 err = -EBUSY;
704 spin_unlock_irqrestore(&engine->hw_lock, flags); 725 spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -706,9 +727,8 @@ static int spacc_aead_setup(struct aead_request *req, u8 *giv,
706 } 727 }
707 list_add_tail(&dev_req->list, &engine->pending); 728 list_add_tail(&dev_req->list, &engine->pending);
708 } else { 729 } else {
709 ++engine->in_flight; 730 list_add_tail(&dev_req->list, &engine->pending);
710 list_add_tail(&dev_req->list, &engine->in_progress); 731 spacc_push(engine);
711 spacc_aead_submit(dev_req);
712 } 732 }
713 spin_unlock_irqrestore(&engine->hw_lock, flags); 733 spin_unlock_irqrestore(&engine->hw_lock, flags);
714 734
@@ -1041,7 +1061,8 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
1041 * we either stick it on the end of a pending list if we can backlog, 1061 * we either stick it on the end of a pending list if we can backlog,
1042 * or bailout with an error if not. 1062 * or bailout with an error if not.
1043 */ 1063 */
1044 if (unlikely(spacc_fifo_cmd_full(engine))) { 1064 if (unlikely(spacc_fifo_cmd_full(engine)) ||
1065 engine->in_flight + 1 > engine->fifo_sz) {
1045 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 1066 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1046 err = -EBUSY; 1067 err = -EBUSY;
1047 spin_unlock_irqrestore(&engine->hw_lock, flags); 1068 spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -1049,9 +1070,8 @@ static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
1049 } 1070 }
1050 list_add_tail(&dev_req->list, &engine->pending); 1071 list_add_tail(&dev_req->list, &engine->pending);
1051 } else { 1072 } else {
1052 ++engine->in_flight; 1073 list_add_tail(&dev_req->list, &engine->pending);
1053 list_add_tail(&dev_req->list, &engine->in_progress); 1074 spacc_push(engine);
1054 spacc_ablk_submit(dev_req);
1055 } 1075 }
1056 spin_unlock_irqrestore(&engine->hw_lock, flags); 1076 spin_unlock_irqrestore(&engine->hw_lock, flags);
1057 1077
@@ -1139,6 +1159,7 @@ static void spacc_process_done(struct spacc_engine *engine)
1139 req = list_first_entry(&engine->in_progress, struct spacc_req, 1159 req = list_first_entry(&engine->in_progress, struct spacc_req,
1140 list); 1160 list);
1141 list_move_tail(&req->list, &engine->completed); 1161 list_move_tail(&req->list, &engine->completed);
1162 --engine->in_flight;
1142 1163
1143 /* POP the status register. */ 1164 /* POP the status register. */
1144 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); 1165 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
@@ -1208,36 +1229,21 @@ static void spacc_spacc_complete(unsigned long data)
1208 struct spacc_engine *engine = (struct spacc_engine *)data; 1229 struct spacc_engine *engine = (struct spacc_engine *)data;
1209 struct spacc_req *req, *tmp; 1230 struct spacc_req *req, *tmp;
1210 unsigned long flags; 1231 unsigned long flags;
1211 int num_removed = 0;
1212 LIST_HEAD(completed); 1232 LIST_HEAD(completed);
1213 1233
1214 spin_lock_irqsave(&engine->hw_lock, flags); 1234 spin_lock_irqsave(&engine->hw_lock, flags);
1235
1215 list_splice_init(&engine->completed, &completed); 1236 list_splice_init(&engine->completed, &completed);
1237 spacc_push(engine);
1238 if (engine->in_flight)
1239 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1240
1216 spin_unlock_irqrestore(&engine->hw_lock, flags); 1241 spin_unlock_irqrestore(&engine->hw_lock, flags);
1217 1242
1218 list_for_each_entry_safe(req, tmp, &completed, list) { 1243 list_for_each_entry_safe(req, tmp, &completed, list) {
1219 ++num_removed;
1220 req->complete(req); 1244 req->complete(req);
1245 list_del(&req->list);
1221 } 1246 }
1222
1223 /* Try and fill the engine back up again. */
1224 spin_lock_irqsave(&engine->hw_lock, flags);
1225
1226 engine->in_flight -= num_removed;
1227
1228 list_for_each_entry_safe(req, tmp, &engine->pending, list) {
1229 if (spacc_fifo_cmd_full(engine))
1230 break;
1231
1232 list_move_tail(&req->list, &engine->in_progress);
1233 ++engine->in_flight;
1234 req->result = spacc_req_submit(req);
1235 }
1236
1237 if (engine->in_flight)
1238 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1239
1240 spin_unlock_irqrestore(&engine->hw_lock, flags);
1241} 1247}
1242 1248
1243#ifdef CONFIG_PM 1249#ifdef CONFIG_PM
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
new file mode 100644
index 000000000000..8115417a1c93
--- /dev/null
+++ b/drivers/crypto/s5p-sss.c
@@ -0,0 +1,701 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for Samsung S5PV210 HW acceleration.
5 *
6 * Copyright (C) 2011 NetUP Inc. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/clk.h>
21#include <linux/platform_device.h>
22#include <linux/scatterlist.h>
23#include <linux/dma-mapping.h>
24#include <linux/io.h>
25#include <linux/crypto.h>
26#include <linux/interrupt.h>
27
28#include <crypto/algapi.h>
29#include <crypto/aes.h>
30#include <crypto/ctr.h>
31
32#include <plat/cpu.h>
33#include <plat/dma.h>
34
35#define _SBF(s, v) ((v) << (s))
36#define _BIT(b) _SBF(b, 1)
37
38/* Feed control registers */
39#define SSS_REG_FCINTSTAT 0x0000
40#define SSS_FCINTSTAT_BRDMAINT _BIT(3)
41#define SSS_FCINTSTAT_BTDMAINT _BIT(2)
42#define SSS_FCINTSTAT_HRDMAINT _BIT(1)
43#define SSS_FCINTSTAT_PKDMAINT _BIT(0)
44
45#define SSS_REG_FCINTENSET 0x0004
46#define SSS_FCINTENSET_BRDMAINTENSET _BIT(3)
47#define SSS_FCINTENSET_BTDMAINTENSET _BIT(2)
48#define SSS_FCINTENSET_HRDMAINTENSET _BIT(1)
49#define SSS_FCINTENSET_PKDMAINTENSET _BIT(0)
50
51#define SSS_REG_FCINTENCLR 0x0008
52#define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3)
53#define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2)
54#define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1)
55#define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0)
56
57#define SSS_REG_FCINTPEND 0x000C
58#define SSS_FCINTPEND_BRDMAINTP _BIT(3)
59#define SSS_FCINTPEND_BTDMAINTP _BIT(2)
60#define SSS_FCINTPEND_HRDMAINTP _BIT(1)
61#define SSS_FCINTPEND_PKDMAINTP _BIT(0)
62
63#define SSS_REG_FCFIFOSTAT 0x0010
64#define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7)
65#define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6)
66#define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5)
67#define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4)
68#define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3)
69#define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2)
70#define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1)
71#define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0)
72
73#define SSS_REG_FCFIFOCTRL 0x0014
74#define SSS_FCFIFOCTRL_DESSEL _BIT(2)
75#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
76#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
77#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
78
79#define SSS_REG_FCBRDMAS 0x0020
80#define SSS_REG_FCBRDMAL 0x0024
81#define SSS_REG_FCBRDMAC 0x0028
82#define SSS_FCBRDMAC_BYTESWAP _BIT(1)
83#define SSS_FCBRDMAC_FLUSH _BIT(0)
84
85#define SSS_REG_FCBTDMAS 0x0030
86#define SSS_REG_FCBTDMAL 0x0034
87#define SSS_REG_FCBTDMAC 0x0038
88#define SSS_FCBTDMAC_BYTESWAP _BIT(1)
89#define SSS_FCBTDMAC_FLUSH _BIT(0)
90
91#define SSS_REG_FCHRDMAS 0x0040
92#define SSS_REG_FCHRDMAL 0x0044
93#define SSS_REG_FCHRDMAC 0x0048
94#define SSS_FCHRDMAC_BYTESWAP _BIT(1)
95#define SSS_FCHRDMAC_FLUSH _BIT(0)
96
97#define SSS_REG_FCPKDMAS 0x0050
98#define SSS_REG_FCPKDMAL 0x0054
99#define SSS_REG_FCPKDMAC 0x0058
100#define SSS_FCPKDMAC_BYTESWAP _BIT(3)
101#define SSS_FCPKDMAC_DESCEND _BIT(2)
102#define SSS_FCPKDMAC_TRANSMIT _BIT(1)
103#define SSS_FCPKDMAC_FLUSH _BIT(0)
104
105#define SSS_REG_FCPKDMAO 0x005C
106
107/* AES registers */
108#define SSS_REG_AES_CONTROL 0x4000
109#define SSS_AES_BYTESWAP_DI _BIT(11)
110#define SSS_AES_BYTESWAP_DO _BIT(10)
111#define SSS_AES_BYTESWAP_IV _BIT(9)
112#define SSS_AES_BYTESWAP_CNT _BIT(8)
113#define SSS_AES_BYTESWAP_KEY _BIT(7)
114#define SSS_AES_KEY_CHANGE_MODE _BIT(6)
115#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
116#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
117#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
118#define SSS_AES_FIFO_MODE _BIT(3)
119#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
120#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
121#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
122#define SSS_AES_MODE_DECRYPT _BIT(0)
123
124#define SSS_REG_AES_STATUS 0x4004
125#define SSS_AES_BUSY _BIT(2)
126#define SSS_AES_INPUT_READY _BIT(1)
127#define SSS_AES_OUTPUT_READY _BIT(0)
128
129#define SSS_REG_AES_IN_DATA(s) (0x4010 + (s << 2))
130#define SSS_REG_AES_OUT_DATA(s) (0x4020 + (s << 2))
131#define SSS_REG_AES_IV_DATA(s) (0x4030 + (s << 2))
132#define SSS_REG_AES_CNT_DATA(s) (0x4040 + (s << 2))
133#define SSS_REG_AES_KEY_DATA(s) (0x4080 + (s << 2))
134
135#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
136#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
137#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
138
139/* HW engine modes */
140#define FLAGS_AES_DECRYPT _BIT(0)
141#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
142#define FLAGS_AES_CBC _SBF(1, 0x01)
143#define FLAGS_AES_CTR _SBF(1, 0x02)
144
145#define AES_KEY_LEN 16
146#define CRYPTO_QUEUE_LEN 1
147
148struct s5p_aes_reqctx {
149 unsigned long mode;
150};
151
152struct s5p_aes_ctx {
153 struct s5p_aes_dev *dev;
154
155 uint8_t aes_key[AES_MAX_KEY_SIZE];
156 uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
157 int keylen;
158};
159
160struct s5p_aes_dev {
161 struct device *dev;
162 struct clk *clk;
163 void __iomem *ioaddr;
164 int irq_hash;
165 int irq_fc;
166
167 struct ablkcipher_request *req;
168 struct s5p_aes_ctx *ctx;
169 struct scatterlist *sg_src;
170 struct scatterlist *sg_dst;
171
172 struct tasklet_struct tasklet;
173 struct crypto_queue queue;
174 bool busy;
175 spinlock_t lock;
176};
177
178static struct s5p_aes_dev *s5p_dev;
179
180static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
181{
182 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
183 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
184}
185
186static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
187{
188 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
189 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
190}
191
192static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
193{
194 /* holding a lock outside */
195 dev->req->base.complete(&dev->req->base, err);
196 dev->busy = false;
197}
198
199static void s5p_unset_outdata(struct s5p_aes_dev *dev)
200{
201 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
202}
203
204static void s5p_unset_indata(struct s5p_aes_dev *dev)
205{
206 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
207}
208
209static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
210{
211 int err;
212
213 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
214 err = -EINVAL;
215 goto exit;
216 }
217 if (!sg_dma_len(sg)) {
218 err = -EINVAL;
219 goto exit;
220 }
221
222 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
223 if (!err) {
224 err = -ENOMEM;
225 goto exit;
226 }
227
228 dev->sg_dst = sg;
229 err = 0;
230
231 exit:
232 return err;
233}
234
235static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
236{
237 int err;
238
239 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
240 err = -EINVAL;
241 goto exit;
242 }
243 if (!sg_dma_len(sg)) {
244 err = -EINVAL;
245 goto exit;
246 }
247
248 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
249 if (!err) {
250 err = -ENOMEM;
251 goto exit;
252 }
253
254 dev->sg_src = sg;
255 err = 0;
256
257 exit:
258 return err;
259}
260
261static void s5p_aes_tx(struct s5p_aes_dev *dev)
262{
263 int err = 0;
264
265 s5p_unset_outdata(dev);
266
267 if (!sg_is_last(dev->sg_dst)) {
268 err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
269 if (err) {
270 s5p_aes_complete(dev, err);
271 return;
272 }
273
274 s5p_set_dma_outdata(dev, dev->sg_dst);
275 } else
276 s5p_aes_complete(dev, err);
277}
278
279static void s5p_aes_rx(struct s5p_aes_dev *dev)
280{
281 int err;
282
283 s5p_unset_indata(dev);
284
285 if (!sg_is_last(dev->sg_src)) {
286 err = s5p_set_indata(dev, sg_next(dev->sg_src));
287 if (err) {
288 s5p_aes_complete(dev, err);
289 return;
290 }
291
292 s5p_set_dma_indata(dev, dev->sg_src);
293 }
294}
295
296static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
297{
298 struct platform_device *pdev = dev_id;
299 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
300 uint32_t status;
301 unsigned long flags;
302
303 spin_lock_irqsave(&dev->lock, flags);
304
305 if (irq == dev->irq_fc) {
306 status = SSS_READ(dev, FCINTSTAT);
307 if (status & SSS_FCINTSTAT_BRDMAINT)
308 s5p_aes_rx(dev);
309 if (status & SSS_FCINTSTAT_BTDMAINT)
310 s5p_aes_tx(dev);
311
312 SSS_WRITE(dev, FCINTPEND, status);
313 }
314
315 spin_unlock_irqrestore(&dev->lock, flags);
316
317 return IRQ_HANDLED;
318}
319
320static void s5p_set_aes(struct s5p_aes_dev *dev,
321 uint8_t *key, uint8_t *iv, unsigned int keylen)
322{
323 void __iomem *keystart;
324
325 memcpy(dev->ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
326
327 if (keylen == AES_KEYSIZE_256)
328 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(0);
329 else if (keylen == AES_KEYSIZE_192)
330 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(2);
331 else
332 keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(4);
333
334 memcpy(keystart, key, keylen);
335}
336
337static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
338{
339 struct ablkcipher_request *req = dev->req;
340
341 uint32_t aes_control;
342 int err;
343 unsigned long flags;
344
345 aes_control = SSS_AES_KEY_CHANGE_MODE;
346 if (mode & FLAGS_AES_DECRYPT)
347 aes_control |= SSS_AES_MODE_DECRYPT;
348
349 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
350 aes_control |= SSS_AES_CHAIN_MODE_CBC;
351 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
352 aes_control |= SSS_AES_CHAIN_MODE_CTR;
353
354 if (dev->ctx->keylen == AES_KEYSIZE_192)
355 aes_control |= SSS_AES_KEY_SIZE_192;
356 else if (dev->ctx->keylen == AES_KEYSIZE_256)
357 aes_control |= SSS_AES_KEY_SIZE_256;
358
359 aes_control |= SSS_AES_FIFO_MODE;
360
361 /* as a variant it is possible to use byte swapping on DMA side */
362 aes_control |= SSS_AES_BYTESWAP_DI
363 | SSS_AES_BYTESWAP_DO
364 | SSS_AES_BYTESWAP_IV
365 | SSS_AES_BYTESWAP_KEY
366 | SSS_AES_BYTESWAP_CNT;
367
368 spin_lock_irqsave(&dev->lock, flags);
369
370 SSS_WRITE(dev, FCINTENCLR,
371 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
372 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
373
374 err = s5p_set_indata(dev, req->src);
375 if (err)
376 goto indata_error;
377
378 err = s5p_set_outdata(dev, req->dst);
379 if (err)
380 goto outdata_error;
381
382 SSS_WRITE(dev, AES_CONTROL, aes_control);
383 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
384
385 s5p_set_dma_indata(dev, req->src);
386 s5p_set_dma_outdata(dev, req->dst);
387
388 SSS_WRITE(dev, FCINTENSET,
389 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
390
391 spin_unlock_irqrestore(&dev->lock, flags);
392
393 return;
394
395 outdata_error:
396 s5p_unset_indata(dev);
397
398 indata_error:
399 s5p_aes_complete(dev, err);
400 spin_unlock_irqrestore(&dev->lock, flags);
401}
402
403static void s5p_tasklet_cb(unsigned long data)
404{
405 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
406 struct crypto_async_request *async_req, *backlog;
407 struct s5p_aes_reqctx *reqctx;
408 unsigned long flags;
409
410 spin_lock_irqsave(&dev->lock, flags);
411 backlog = crypto_get_backlog(&dev->queue);
412 async_req = crypto_dequeue_request(&dev->queue);
413 spin_unlock_irqrestore(&dev->lock, flags);
414
415 if (!async_req)
416 return;
417
418 if (backlog)
419 backlog->complete(backlog, -EINPROGRESS);
420
421 dev->req = ablkcipher_request_cast(async_req);
422 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
423 reqctx = ablkcipher_request_ctx(dev->req);
424
425 s5p_aes_crypt_start(dev, reqctx->mode);
426}
427
428static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
429 struct ablkcipher_request *req)
430{
431 unsigned long flags;
432 int err;
433
434 spin_lock_irqsave(&dev->lock, flags);
435 if (dev->busy) {
436 err = -EAGAIN;
437 spin_unlock_irqrestore(&dev->lock, flags);
438 goto exit;
439 }
440 dev->busy = true;
441
442 err = ablkcipher_enqueue_request(&dev->queue, req);
443 spin_unlock_irqrestore(&dev->lock, flags);
444
445 tasklet_schedule(&dev->tasklet);
446
447 exit:
448 return err;
449}
450
451static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
452{
453 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
454 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
455 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
456 struct s5p_aes_dev *dev = ctx->dev;
457
458 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
459 pr_err("request size is not exact amount of AES blocks\n");
460 return -EINVAL;
461 }
462
463 reqctx->mode = mode;
464
465 return s5p_aes_handle_req(dev, req);
466}
467
468static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
469 const uint8_t *key, unsigned int keylen)
470{
471 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
472 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
473
474 if (keylen != AES_KEYSIZE_128 &&
475 keylen != AES_KEYSIZE_192 &&
476 keylen != AES_KEYSIZE_256)
477 return -EINVAL;
478
479 memcpy(ctx->aes_key, key, keylen);
480 ctx->keylen = keylen;
481
482 return 0;
483}
484
485static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
486{
487 return s5p_aes_crypt(req, 0);
488}
489
490static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
491{
492 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
493}
494
495static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
496{
497 return s5p_aes_crypt(req, FLAGS_AES_CBC);
498}
499
500static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
501{
502 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
503}
504
505static int s5p_aes_cra_init(struct crypto_tfm *tfm)
506{
507 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
508
509 ctx->dev = s5p_dev;
510 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
511
512 return 0;
513}
514
515static struct crypto_alg algs[] = {
516 {
517 .cra_name = "ecb(aes)",
518 .cra_driver_name = "ecb-aes-s5p",
519 .cra_priority = 100,
520 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
521 CRYPTO_ALG_ASYNC,
522 .cra_blocksize = AES_BLOCK_SIZE,
523 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
524 .cra_alignmask = 0x0f,
525 .cra_type = &crypto_ablkcipher_type,
526 .cra_module = THIS_MODULE,
527 .cra_init = s5p_aes_cra_init,
528 .cra_u.ablkcipher = {
529 .min_keysize = AES_MIN_KEY_SIZE,
530 .max_keysize = AES_MAX_KEY_SIZE,
531 .setkey = s5p_aes_setkey,
532 .encrypt = s5p_aes_ecb_encrypt,
533 .decrypt = s5p_aes_ecb_decrypt,
534 }
535 },
536 {
537 .cra_name = "cbc(aes)",
538 .cra_driver_name = "cbc-aes-s5p",
539 .cra_priority = 100,
540 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
541 CRYPTO_ALG_ASYNC,
542 .cra_blocksize = AES_BLOCK_SIZE,
543 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
544 .cra_alignmask = 0x0f,
545 .cra_type = &crypto_ablkcipher_type,
546 .cra_module = THIS_MODULE,
547 .cra_init = s5p_aes_cra_init,
548 .cra_u.ablkcipher = {
549 .min_keysize = AES_MIN_KEY_SIZE,
550 .max_keysize = AES_MAX_KEY_SIZE,
551 .ivsize = AES_BLOCK_SIZE,
552 .setkey = s5p_aes_setkey,
553 .encrypt = s5p_aes_cbc_encrypt,
554 .decrypt = s5p_aes_cbc_decrypt,
555 }
556 },
557};
558
559static int s5p_aes_probe(struct platform_device *pdev)
560{
561 int i, j, err = -ENODEV;
562 struct s5p_aes_dev *pdata;
563 struct device *dev = &pdev->dev;
564 struct resource *res;
565
566 if (s5p_dev)
567 return -EEXIST;
568
569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
570 if (!res)
571 return -ENODEV;
572
573 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
574 if (!pdata)
575 return -ENOMEM;
576
577 if (!devm_request_mem_region(dev, res->start,
578 resource_size(res), pdev->name))
579 return -EBUSY;
580
581 pdata->clk = clk_get(dev, "secss");
582 if (IS_ERR(pdata->clk)) {
583 dev_err(dev, "failed to find secss clock source\n");
584 return -ENOENT;
585 }
586
587 clk_enable(pdata->clk);
588
589 spin_lock_init(&pdata->lock);
590 pdata->ioaddr = devm_ioremap(dev, res->start,
591 resource_size(res));
592
593 pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
594 if (pdata->irq_hash < 0) {
595 err = pdata->irq_hash;
596 dev_warn(dev, "hash interrupt is not available.\n");
597 goto err_irq;
598 }
599 err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
600 IRQF_SHARED, pdev->name, pdev);
601 if (err < 0) {
602 dev_warn(dev, "hash interrupt is not available.\n");
603 goto err_irq;
604 }
605
606 pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
607 if (pdata->irq_fc < 0) {
608 err = pdata->irq_fc;
609 dev_warn(dev, "feed control interrupt is not available.\n");
610 goto err_irq;
611 }
612 err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
613 IRQF_SHARED, pdev->name, pdev);
614 if (err < 0) {
615 dev_warn(dev, "feed control interrupt is not available.\n");
616 goto err_irq;
617 }
618
619 pdata->dev = dev;
620 platform_set_drvdata(pdev, pdata);
621 s5p_dev = pdata;
622
623 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
624 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
625
626 for (i = 0; i < ARRAY_SIZE(algs); i++) {
627 INIT_LIST_HEAD(&algs[i].cra_list);
628 err = crypto_register_alg(&algs[i]);
629 if (err)
630 goto err_algs;
631 }
632
633 pr_info("s5p-sss driver registered\n");
634
635 return 0;
636
637 err_algs:
638 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
639
640 for (j = 0; j < i; j++)
641 crypto_unregister_alg(&algs[j]);
642
643 tasklet_kill(&pdata->tasklet);
644
645 err_irq:
646 clk_disable(pdata->clk);
647 clk_put(pdata->clk);
648
649 s5p_dev = NULL;
650 platform_set_drvdata(pdev, NULL);
651
652 return err;
653}
654
655static int s5p_aes_remove(struct platform_device *pdev)
656{
657 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
658 int i;
659
660 if (!pdata)
661 return -ENODEV;
662
663 for (i = 0; i < ARRAY_SIZE(algs); i++)
664 crypto_unregister_alg(&algs[i]);
665
666 tasklet_kill(&pdata->tasklet);
667
668 clk_disable(pdata->clk);
669 clk_put(pdata->clk);
670
671 s5p_dev = NULL;
672 platform_set_drvdata(pdev, NULL);
673
674 return 0;
675}
676
677static struct platform_driver s5p_aes_crypto = {
678 .probe = s5p_aes_probe,
679 .remove = s5p_aes_remove,
680 .driver = {
681 .owner = THIS_MODULE,
682 .name = "s5p-secss",
683 },
684};
685
686static int __init s5p_aes_mod_init(void)
687{
688 return platform_driver_register(&s5p_aes_crypto);
689}
690
691static void __exit s5p_aes_mod_exit(void)
692{
693 platform_driver_unregister(&s5p_aes_crypto);
694}
695
696module_init(s5p_aes_mod_init);
697module_exit(s5p_aes_mod_exit);
698
699MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
700MODULE_LICENSE("GPL v2");
701MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");