diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/crypto | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/crypto')
32 files changed, 9359 insertions, 345 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index ea0b3863ad0f..e0b25de1e339 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -74,6 +74,8 @@ config ZCRYPT | |||
74 | + PCI-X Cryptographic Coprocessor (PCIXCC) | 74 | + PCI-X Cryptographic Coprocessor (PCIXCC) |
75 | + Crypto Express2 Coprocessor (CEX2C) | 75 | + Crypto Express2 Coprocessor (CEX2C) |
76 | + Crypto Express2 Accelerator (CEX2A) | 76 | + Crypto Express2 Accelerator (CEX2A) |
77 | + Crypto Express3 Coprocessor (CEX3C) | ||
78 | + Crypto Express3 Accelerator (CEX3A) | ||
77 | 79 | ||
78 | config ZCRYPT_MONOLITHIC | 80 | config ZCRYPT_MONOLITHIC |
79 | bool "Monolithic zcrypt module" | 81 | bool "Monolithic zcrypt module" |
@@ -91,6 +93,8 @@ config CRYPTO_SHA1_S390 | |||
91 | This is the s390 hardware accelerated implementation of the | 93 | This is the s390 hardware accelerated implementation of the |
92 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). | 94 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). |
93 | 95 | ||
96 | It is available as of z990. | ||
97 | |||
94 | config CRYPTO_SHA256_S390 | 98 | config CRYPTO_SHA256_S390 |
95 | tristate "SHA256 digest algorithm" | 99 | tristate "SHA256 digest algorithm" |
96 | depends on S390 | 100 | depends on S390 |
@@ -99,8 +103,7 @@ config CRYPTO_SHA256_S390 | |||
99 | This is the s390 hardware accelerated implementation of the | 103 | This is the s390 hardware accelerated implementation of the |
100 | SHA256 secure hash standard (DFIPS 180-2). | 104 | SHA256 secure hash standard (DFIPS 180-2). |
101 | 105 | ||
102 | This version of SHA implements a 256 bit hash with 128 bits of | 106 | It is available as of z9. |
103 | security against collision attacks. | ||
104 | 107 | ||
105 | config CRYPTO_SHA512_S390 | 108 | config CRYPTO_SHA512_S390 |
106 | tristate "SHA384 and SHA512 digest algorithm" | 109 | tristate "SHA384 and SHA512 digest algorithm" |
@@ -110,10 +113,7 @@ config CRYPTO_SHA512_S390 | |||
110 | This is the s390 hardware accelerated implementation of the | 113 | This is the s390 hardware accelerated implementation of the |
111 | SHA512 secure hash standard. | 114 | SHA512 secure hash standard. |
112 | 115 | ||
113 | This version of SHA implements a 512 bit hash with 256 bits of | 116 | It is available as of z10. |
114 | security against collision attacks. The code also includes SHA-384, | ||
115 | a 384 bit hash with 192 bits of security against collision attacks. | ||
116 | |||
117 | 117 | ||
118 | config CRYPTO_DES_S390 | 118 | config CRYPTO_DES_S390 |
119 | tristate "DES and Triple DES cipher algorithms" | 119 | tristate "DES and Triple DES cipher algorithms" |
@@ -121,9 +121,12 @@ config CRYPTO_DES_S390 | |||
121 | select CRYPTO_ALGAPI | 121 | select CRYPTO_ALGAPI |
122 | select CRYPTO_BLKCIPHER | 122 | select CRYPTO_BLKCIPHER |
123 | help | 123 | help |
124 | This us the s390 hardware accelerated implementation of the | 124 | This is the s390 hardware accelerated implementation of the |
125 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). | 125 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). |
126 | 126 | ||
127 | As of z990 the ECB and CBC mode are hardware accelerated. | ||
128 | As of z196 the CTR mode is hardware accelerated. | ||
129 | |||
127 | config CRYPTO_AES_S390 | 130 | config CRYPTO_AES_S390 |
128 | tristate "AES cipher algorithms" | 131 | tristate "AES cipher algorithms" |
129 | depends on S390 | 132 | depends on S390 |
@@ -131,20 +134,15 @@ config CRYPTO_AES_S390 | |||
131 | select CRYPTO_BLKCIPHER | 134 | select CRYPTO_BLKCIPHER |
132 | help | 135 | help |
133 | This is the s390 hardware accelerated implementation of the | 136 | This is the s390 hardware accelerated implementation of the |
134 | AES cipher algorithms (FIPS-197). AES uses the Rijndael | 137 | AES cipher algorithms (FIPS-197). |
135 | algorithm. | ||
136 | 138 | ||
137 | Rijndael appears to be consistently a very good performer in | 139 | As of z9 the ECB and CBC modes are hardware accelerated |
138 | both hardware and software across a wide range of computing | 140 | for 128 bit keys. |
139 | environments regardless of its use in feedback or non-feedback | 141 | As of z10 the ECB and CBC modes are hardware accelerated |
140 | modes. Its key setup time is excellent, and its key agility is | 142 | for all AES key sizes. |
141 | good. Rijndael's very low memory requirements make it very well | 143 | As of z196 the CTR mode is hardware accelerated for all AES |
142 | suited for restricted-space environments, in which it also | 144 | key sizes and XTS mode is hardware accelerated for 256 and |
143 | demonstrates excellent performance. Rijndael's operations are | 145 | 512 bit keys. |
144 | among the easiest to defend against power and timing attacks. | ||
145 | |||
146 | On s390 the System z9-109 currently only supports the key size | ||
147 | of 128 bit. | ||
148 | 146 | ||
149 | config S390_PRNG | 147 | config S390_PRNG |
150 | tristate "Pseudo random number generator device driver" | 148 | tristate "Pseudo random number generator device driver" |
@@ -154,8 +152,20 @@ config S390_PRNG | |||
154 | Select this option if you want to use the s390 pseudo random number | 152 | Select this option if you want to use the s390 pseudo random number |
155 | generator. The PRNG is part of the cryptographic processor functions | 153 | generator. The PRNG is part of the cryptographic processor functions |
156 | and uses triple-DES to generate secure random numbers like the | 154 | and uses triple-DES to generate secure random numbers like the |
157 | ANSI X9.17 standard. The PRNG is usable via the char device | 155 | ANSI X9.17 standard. User-space programs access the |
158 | /dev/prandom. | 156 | pseudo-random-number device through the char device /dev/prandom. |
157 | |||
158 | It is available as of z9. | ||
159 | |||
160 | config CRYPTO_GHASH_S390 | ||
161 | tristate "GHASH digest algorithm" | ||
162 | depends on S390 | ||
163 | select CRYPTO_HASH | ||
164 | help | ||
165 | This is the s390 hardware accelerated implementation of the | ||
166 | GHASH message digest algorithm for GCM (Galois/Counter Mode). | ||
167 | |||
168 | It is available as of z196. | ||
159 | 169 | ||
160 | config CRYPTO_DEV_MV_CESA | 170 | config CRYPTO_DEV_MV_CESA |
161 | tristate "Marvell's Cryptographic Engine" | 171 | tristate "Marvell's Cryptographic Engine" |
@@ -172,6 +182,7 @@ config CRYPTO_DEV_MV_CESA | |||
172 | 182 | ||
173 | config CRYPTO_DEV_NIAGARA2 | 183 | config CRYPTO_DEV_NIAGARA2 |
174 | tristate "Niagara2 Stream Processing Unit driver" | 184 | tristate "Niagara2 Stream Processing Unit driver" |
185 | select CRYPTO_DES | ||
175 | select CRYPTO_ALGAPI | 186 | select CRYPTO_ALGAPI |
176 | depends on SPARC64 | 187 | depends on SPARC64 |
177 | help | 188 | help |
@@ -199,6 +210,8 @@ config CRYPTO_DEV_HIFN_795X_RNG | |||
199 | Select this option if you want to enable the random number generator | 210 | Select this option if you want to enable the random number generator |
200 | on the HIFN 795x crypto adapters. | 211 | on the HIFN 795x crypto adapters. |
201 | 212 | ||
213 | source drivers/crypto/caam/Kconfig | ||
214 | |||
202 | config CRYPTO_DEV_TALITOS | 215 | config CRYPTO_DEV_TALITOS |
203 | tristate "Talitos Freescale Security Engine (SEC)" | 216 | tristate "Talitos Freescale Security Engine (SEC)" |
204 | select CRYPTO_ALGAPI | 217 | select CRYPTO_ALGAPI |
@@ -243,4 +256,40 @@ config CRYPTO_DEV_OMAP_SHAM | |||
243 | OMAP processors have SHA1/MD5 hw accelerator. Select this if you | 256 | OMAP processors have SHA1/MD5 hw accelerator. Select this if you |
244 | want to use the OMAP module for SHA1/MD5 algorithms. | 257 | want to use the OMAP module for SHA1/MD5 algorithms. |
245 | 258 | ||
259 | config CRYPTO_DEV_OMAP_AES | ||
260 | tristate "Support for OMAP AES hw engine" | ||
261 | depends on ARCH_OMAP2 || ARCH_OMAP3 | ||
262 | select CRYPTO_AES | ||
263 | help | ||
264 | OMAP processors have AES module accelerator. Select this if you | ||
265 | want to use the OMAP module for AES algorithms. | ||
266 | |||
267 | config CRYPTO_DEV_PICOXCELL | ||
268 | tristate "Support for picoXcell IPSEC and Layer2 crypto engines" | ||
269 | depends on ARCH_PICOXCELL | ||
270 | select CRYPTO_AES | ||
271 | select CRYPTO_AUTHENC | ||
272 | select CRYPTO_ALGAPI | ||
273 | select CRYPTO_DES | ||
274 | select CRYPTO_CBC | ||
275 | select CRYPTO_ECB | ||
276 | select CRYPTO_SEQIV | ||
277 | help | ||
278 | This option enables support for the hardware offload engines in the | ||
279 | Picochip picoXcell SoC devices. Select this for IPSEC ESP offload | ||
280 | and for 3gpp Layer 2 ciphering support. | ||
281 | |||
282 | Saying m here will build a module named pipcoxcell_crypto. | ||
283 | |||
284 | config CRYPTO_DEV_S5P | ||
285 | tristate "Support for Samsung S5PV210 crypto accelerator" | ||
286 | depends on ARCH_S5PV210 | ||
287 | select CRYPTO_AES | ||
288 | select CRYPTO_ALGAPI | ||
289 | select CRYPTO_BLKCIPHER | ||
290 | help | ||
291 | This option allows you to have support for S5P crypto acceleration. | ||
292 | Select this to offload Samsung S5PV210 or S5PC110 from AES | ||
293 | algorithms execution. | ||
294 | |||
246 | endif # CRYPTO_HW | 295 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 6dbbe00c4524..53ea50155319 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -2,11 +2,14 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | |||
2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o | 2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o |
3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o | 3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o |
4 | obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o | 4 | obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o |
5 | n2_crypto-objs := n2_core.o n2_asm.o | 5 | n2_crypto-y := n2_core.o n2_asm.o |
6 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o | 6 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o |
7 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o | 7 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o |
8 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o | 8 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
9 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ | ||
9 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o | 10 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o |
10 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ | 11 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ |
11 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o | 12 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o |
12 | 13 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o | |
14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o | ||
15 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | ||
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile index aa376e8d5ed5..5c0c62b65d69 100644 --- a/drivers/crypto/amcc/Makefile +++ b/drivers/crypto/amcc/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o | 1 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o |
2 | crypto4xx-objs := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o | 2 | crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 2b1baee525bc..18912521a7a5 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1150,8 +1150,7 @@ struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1150 | /** | 1150 | /** |
1151 | * Module Initialization Routine | 1151 | * Module Initialization Routine |
1152 | */ | 1152 | */ |
1153 | static int __init crypto4xx_probe(struct platform_device *ofdev, | 1153 | static int __init crypto4xx_probe(struct platform_device *ofdev) |
1154 | const struct of_device_id *match) | ||
1155 | { | 1154 | { |
1156 | int rc; | 1155 | int rc; |
1157 | struct resource res; | 1156 | struct resource res; |
@@ -1280,7 +1279,7 @@ static const struct of_device_id crypto4xx_match[] = { | |||
1280 | { }, | 1279 | { }, |
1281 | }; | 1280 | }; |
1282 | 1281 | ||
1283 | static struct of_platform_driver crypto4xx_driver = { | 1282 | static struct platform_driver crypto4xx_driver = { |
1284 | .driver = { | 1283 | .driver = { |
1285 | .name = "crypto4xx", | 1284 | .name = "crypto4xx", |
1286 | .owner = THIS_MODULE, | 1285 | .owner = THIS_MODULE, |
@@ -1292,12 +1291,12 @@ static struct of_platform_driver crypto4xx_driver = { | |||
1292 | 1291 | ||
1293 | static int __init crypto4xx_init(void) | 1292 | static int __init crypto4xx_init(void) |
1294 | { | 1293 | { |
1295 | return of_register_platform_driver(&crypto4xx_driver); | 1294 | return platform_driver_register(&crypto4xx_driver); |
1296 | } | 1295 | } |
1297 | 1296 | ||
1298 | static void __exit crypto4xx_exit(void) | 1297 | static void __exit crypto4xx_exit(void) |
1299 | { | 1298 | { |
1300 | of_unregister_platform_driver(&crypto4xx_driver); | 1299 | platform_driver_unregister(&crypto4xx_driver); |
1301 | } | 1300 | } |
1302 | 1301 | ||
1303 | module_init(crypto4xx_init); | 1302 | module_init(crypto4xx_init); |
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c index 466fd94cd4a3..de8a7a48775a 100644 --- a/drivers/crypto/amcc/crypto4xx_sa.c +++ b/drivers/crypto/amcc/crypto4xx_sa.c | |||
@@ -17,7 +17,7 @@ | |||
17 | * @file crypto4xx_sa.c | 17 | * @file crypto4xx_sa.c |
18 | * | 18 | * |
19 | * This file implements the security context | 19 | * This file implements the security context |
20 | * assoicate format. | 20 | * associate format. |
21 | */ | 21 | */ |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h index 4b83ed7e5570..1352d58d4e34 100644 --- a/drivers/crypto/amcc/crypto4xx_sa.h +++ b/drivers/crypto/amcc/crypto4xx_sa.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | 16 | * |
17 | * This file defines the security context | 17 | * This file defines the security context |
18 | * assoicate format. | 18 | * associate format. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #ifndef __CRYPTO4XX_SA_H__ | 21 | #ifndef __CRYPTO4XX_SA_H__ |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig new file mode 100644 index 000000000000..2d876bb98ff4 --- /dev/null +++ b/drivers/crypto/caam/Kconfig | |||
@@ -0,0 +1,72 @@ | |||
1 | config CRYPTO_DEV_FSL_CAAM | ||
2 | tristate "Freescale CAAM-Multicore driver backend" | ||
3 | depends on FSL_SOC | ||
4 | help | ||
5 | Enables the driver module for Freescale's Cryptographic Accelerator | ||
6 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). | ||
7 | This module adds a job ring operation interface, and configures h/w | ||
8 | to operate as a DPAA component automatically, depending | ||
9 | on h/w feature availability. | ||
10 | |||
11 | To compile this driver as a module, choose M here: the module | ||
12 | will be called caam. | ||
13 | |||
14 | config CRYPTO_DEV_FSL_CAAM_RINGSIZE | ||
15 | int "Job Ring size" | ||
16 | depends on CRYPTO_DEV_FSL_CAAM | ||
17 | range 2 9 | ||
18 | default "9" | ||
19 | help | ||
20 | Select size of Job Rings as a power of 2, within the | ||
21 | range 2-9 (ring size 4-512). | ||
22 | Examples: | ||
23 | 2 => 4 | ||
24 | 3 => 8 | ||
25 | 4 => 16 | ||
26 | 5 => 32 | ||
27 | 6 => 64 | ||
28 | 7 => 128 | ||
29 | 8 => 256 | ||
30 | 9 => 512 | ||
31 | |||
32 | config CRYPTO_DEV_FSL_CAAM_INTC | ||
33 | bool "Job Ring interrupt coalescing" | ||
34 | depends on CRYPTO_DEV_FSL_CAAM | ||
35 | default y | ||
36 | help | ||
37 | Enable the Job Ring's interrupt coalescing feature. | ||
38 | |||
39 | config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD | ||
40 | int "Job Ring interrupt coalescing count threshold" | ||
41 | depends on CRYPTO_DEV_FSL_CAAM_INTC | ||
42 | range 1 255 | ||
43 | default 255 | ||
44 | help | ||
45 | Select number of descriptor completions to queue before | ||
46 | raising an interrupt, in the range 1-255. Note that a selection | ||
47 | of 1 functionally defeats the coalescing feature, and a selection | ||
48 | equal or greater than the job ring size will force timeouts. | ||
49 | |||
50 | config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD | ||
51 | int "Job Ring interrupt coalescing timer threshold" | ||
52 | depends on CRYPTO_DEV_FSL_CAAM_INTC | ||
53 | range 1 65535 | ||
54 | default 2048 | ||
55 | help | ||
56 | Select number of bus clocks/64 to timeout in the case that one or | ||
57 | more descriptor completions are queued without reaching the count | ||
58 | threshold. Range is 1-65535. | ||
59 | |||
60 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | ||
61 | tristate "Register algorithm implementations with the Crypto API" | ||
62 | depends on CRYPTO_DEV_FSL_CAAM | ||
63 | default y | ||
64 | select CRYPTO_ALGAPI | ||
65 | select CRYPTO_AUTHENC | ||
66 | help | ||
67 | Selecting this will offload crypto for users of the | ||
68 | scatterlist crypto API (such as the linux native IPSec | ||
69 | stack) to the SEC4 via job ring. | ||
70 | |||
71 | To compile this as a module, choose M here: the module | ||
72 | will be called caamalg. | ||
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile new file mode 100644 index 000000000000..ef39011b4505 --- /dev/null +++ b/drivers/crypto/caam/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for the CAAM backend and dependent components | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | ||
6 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | ||
7 | |||
8 | caam-objs := ctrl.o jr.o error.o | ||
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c new file mode 100644 index 000000000000..676d957c22b0 --- /dev/null +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -0,0 +1,1268 @@ | |||
1 | /* | ||
2 | * caam - Freescale FSL CAAM support for crypto API | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * Based on talitos crypto API driver. | ||
7 | * | ||
8 | * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): | ||
9 | * | ||
10 | * --------------- --------------- | ||
11 | * | JobDesc #1 |-------------------->| ShareDesc | | ||
12 | * | *(packet 1) | | (PDB) | | ||
13 | * --------------- |------------->| (hashKey) | | ||
14 | * . | | (cipherKey) | | ||
15 | * . | |-------->| (operation) | | ||
16 | * --------------- | | --------------- | ||
17 | * | JobDesc #2 |------| | | ||
18 | * | *(packet 2) | | | ||
19 | * --------------- | | ||
20 | * . | | ||
21 | * . | | ||
22 | * --------------- | | ||
23 | * | JobDesc #3 |------------ | ||
24 | * | *(packet 3) | | ||
25 | * --------------- | ||
26 | * | ||
27 | * The SharedDesc never changes for a connection unless rekeyed, but | ||
28 | * each packet will likely be in a different place. So all we need | ||
29 | * to know to process the packet is where the input is, where the | ||
30 | * output goes, and what context we want to process with. Context is | ||
31 | * in the SharedDesc, packet references in the JobDesc. | ||
32 | * | ||
33 | * So, a job desc looks like: | ||
34 | * | ||
35 | * --------------------- | ||
36 | * | Header | | ||
37 | * | ShareDesc Pointer | | ||
38 | * | SEQ_OUT_PTR | | ||
39 | * | (output buffer) | | ||
40 | * | SEQ_IN_PTR | | ||
41 | * | (input buffer) | | ||
42 | * | LOAD (to DECO) | | ||
43 | * --------------------- | ||
44 | */ | ||
45 | |||
46 | #include "compat.h" | ||
47 | |||
48 | #include "regs.h" | ||
49 | #include "intern.h" | ||
50 | #include "desc_constr.h" | ||
51 | #include "jr.h" | ||
52 | #include "error.h" | ||
53 | |||
54 | /* | ||
55 | * crypto alg | ||
56 | */ | ||
57 | #define CAAM_CRA_PRIORITY 3000 | ||
58 | /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ | ||
59 | #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ | ||
60 | SHA512_DIGEST_SIZE * 2) | ||
61 | /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | ||
62 | #define CAAM_MAX_IV_LENGTH 16 | ||
63 | |||
64 | /* length of descriptors text */ | ||
65 | #define DESC_AEAD_SHARED_TEXT_LEN 4 | ||
66 | #define DESC_AEAD_ENCRYPT_TEXT_LEN 21 | ||
67 | #define DESC_AEAD_DECRYPT_TEXT_LEN 24 | ||
68 | #define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27 | ||
69 | |||
70 | #ifdef DEBUG | ||
71 | /* for print_hex_dumps with line references */ | ||
72 | #define xstr(s) str(s) | ||
73 | #define str(s) #s | ||
74 | #define debug(format, arg...) printk(format, arg) | ||
75 | #else | ||
76 | #define debug(format, arg...) | ||
77 | #endif | ||
78 | |||
79 | /* | ||
80 | * per-session context | ||
81 | */ | ||
82 | struct caam_ctx { | ||
83 | struct device *jrdev; | ||
84 | u32 *sh_desc; | ||
85 | dma_addr_t shared_desc_phys; | ||
86 | u32 class1_alg_type; | ||
87 | u32 class2_alg_type; | ||
88 | u32 alg_op; | ||
89 | u8 *key; | ||
90 | dma_addr_t key_phys; | ||
91 | unsigned int enckeylen; | ||
92 | unsigned int split_key_len; | ||
93 | unsigned int split_key_pad_len; | ||
94 | unsigned int authsize; | ||
95 | }; | ||
96 | |||
97 | static int aead_authenc_setauthsize(struct crypto_aead *authenc, | ||
98 | unsigned int authsize) | ||
99 | { | ||
100 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | ||
101 | |||
102 | ctx->authsize = authsize; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | struct split_key_result { | ||
108 | struct completion completion; | ||
109 | int err; | ||
110 | }; | ||
111 | |||
112 | static void split_key_done(struct device *dev, u32 *desc, u32 err, | ||
113 | void *context) | ||
114 | { | ||
115 | struct split_key_result *res = context; | ||
116 | |||
117 | #ifdef DEBUG | ||
118 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
119 | #endif | ||
120 | if (err) { | ||
121 | char tmp[CAAM_ERROR_STR_MAX]; | ||
122 | |||
123 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
124 | } | ||
125 | |||
126 | res->err = err; | ||
127 | |||
128 | complete(&res->completion); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | get a split ipad/opad key | ||
133 | |||
134 | Split key generation----------------------------------------------- | ||
135 | |||
136 | [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 | ||
137 | [01] 0x04000014 key: class2->keyreg len=20 | ||
138 | @0xffe01000 | ||
139 | [03] 0x84410014 operation: cls2-op sha1 hmac init dec | ||
140 | [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm | ||
141 | [05] 0xa4000001 jump: class2 local all ->1 [06] | ||
142 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | ||
143 | @0xffe04000 | ||
144 | */ | ||
145 | static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | ||
146 | { | ||
147 | struct device *jrdev = ctx->jrdev; | ||
148 | u32 *desc; | ||
149 | struct split_key_result result; | ||
150 | dma_addr_t dma_addr_in, dma_addr_out; | ||
151 | int ret = 0; | ||
152 | |||
153 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | ||
154 | |||
155 | init_job_desc(desc, 0); | ||
156 | |||
157 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, | ||
158 | DMA_TO_DEVICE); | ||
159 | if (dma_mapping_error(jrdev, dma_addr_in)) { | ||
160 | dev_err(jrdev, "unable to map key input memory\n"); | ||
161 | kfree(desc); | ||
162 | return -ENOMEM; | ||
163 | } | ||
164 | append_key(desc, dma_addr_in, authkeylen, CLASS_2 | | ||
165 | KEY_DEST_CLASS_REG); | ||
166 | |||
167 | /* Sets MDHA up into an HMAC-INIT */ | ||
168 | append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | | ||
169 | OP_ALG_AS_INIT); | ||
170 | |||
171 | /* | ||
172 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | ||
173 | into both pads inside MDHA | ||
174 | */ | ||
175 | append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | | ||
176 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); | ||
177 | |||
178 | /* | ||
179 | * FIFO_STORE with the explicit split-key content store | ||
180 | * (0x26 output type) | ||
181 | */ | ||
182 | dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | ||
183 | DMA_FROM_DEVICE); | ||
184 | if (dma_mapping_error(jrdev, dma_addr_out)) { | ||
185 | dev_err(jrdev, "unable to map key output memory\n"); | ||
186 | kfree(desc); | ||
187 | return -ENOMEM; | ||
188 | } | ||
189 | append_fifo_store(desc, dma_addr_out, ctx->split_key_len, | ||
190 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | ||
191 | |||
192 | #ifdef DEBUG | ||
193 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
194 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); | ||
195 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
196 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
197 | #endif | ||
198 | |||
199 | result.err = 0; | ||
200 | init_completion(&result.completion); | ||
201 | |||
202 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | ||
203 | if (!ret) { | ||
204 | /* in progress */ | ||
205 | wait_for_completion_interruptible(&result.completion); | ||
206 | ret = result.err; | ||
207 | #ifdef DEBUG | ||
208 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
209 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | ||
210 | ctx->split_key_pad_len, 1); | ||
211 | #endif | ||
212 | } | ||
213 | |||
214 | dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, | ||
215 | DMA_FROM_DEVICE); | ||
216 | dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); | ||
217 | |||
218 | kfree(desc); | ||
219 | |||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | static int build_sh_desc_ipsec(struct caam_ctx *ctx) | ||
224 | { | ||
225 | struct device *jrdev = ctx->jrdev; | ||
226 | u32 *sh_desc; | ||
227 | u32 *jump_cmd; | ||
228 | bool keys_fit_inline = 0; | ||
229 | |||
230 | /* | ||
231 | * largest Job Descriptor and its Shared Descriptor | ||
232 | * must both fit into the 64-word Descriptor h/w Buffer | ||
233 | */ | ||
234 | if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN + | ||
235 | DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ + | ||
236 | ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | ||
237 | keys_fit_inline = 1; | ||
238 | |||
239 | /* build shared descriptor for this session */ | ||
240 | sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN + | ||
241 | (keys_fit_inline ? | ||
242 | ctx->split_key_pad_len + ctx->enckeylen : | ||
243 | CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL); | ||
244 | if (!sh_desc) { | ||
245 | dev_err(jrdev, "could not allocate shared descriptor\n"); | ||
246 | return -ENOMEM; | ||
247 | } | ||
248 | |||
249 | init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL); | ||
250 | |||
251 | jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL | | ||
252 | JUMP_COND_SHRD | JUMP_COND_SELF); | ||
253 | |||
254 | /* | ||
255 | * process keys, starting with class 2/authentication. | ||
256 | */ | ||
257 | if (keys_fit_inline) { | ||
258 | append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len, | ||
259 | ctx->split_key_len, | ||
260 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
261 | |||
262 | append_key_as_imm(sh_desc, (void *)ctx->key + | ||
263 | ctx->split_key_pad_len, ctx->enckeylen, | ||
264 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
265 | } else { | ||
266 | append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 | | ||
267 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
268 | append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len, | ||
269 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
270 | } | ||
271 | |||
272 | /* update jump cmd now that we are at the jump target */ | ||
273 | set_jump_tgt_here(sh_desc, jump_cmd); | ||
274 | |||
275 | ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc, | ||
276 | desc_bytes(sh_desc), | ||
277 | DMA_TO_DEVICE); | ||
278 | if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) { | ||
279 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
280 | kfree(sh_desc); | ||
281 | return -ENOMEM; | ||
282 | } | ||
283 | |||
284 | ctx->sh_desc = sh_desc; | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static int aead_authenc_setkey(struct crypto_aead *aead, | ||
290 | const u8 *key, unsigned int keylen) | ||
291 | { | ||
292 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
293 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
294 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
295 | struct device *jrdev = ctx->jrdev; | ||
296 | struct rtattr *rta = (void *)key; | ||
297 | struct crypto_authenc_key_param *param; | ||
298 | unsigned int authkeylen; | ||
299 | unsigned int enckeylen; | ||
300 | int ret = 0; | ||
301 | |||
302 | param = RTA_DATA(rta); | ||
303 | enckeylen = be32_to_cpu(param->enckeylen); | ||
304 | |||
305 | key += RTA_ALIGN(rta->rta_len); | ||
306 | keylen -= RTA_ALIGN(rta->rta_len); | ||
307 | |||
308 | if (keylen < enckeylen) | ||
309 | goto badkey; | ||
310 | |||
311 | authkeylen = keylen - enckeylen; | ||
312 | |||
313 | if (keylen > CAAM_MAX_KEY_SIZE) | ||
314 | goto badkey; | ||
315 | |||
316 | /* Pick class 2 key length from algorithm submask */ | ||
317 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | ||
318 | OP_ALG_ALGSEL_SHIFT] * 2; | ||
319 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | ||
320 | |||
321 | #ifdef DEBUG | ||
322 | printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", | ||
323 | keylen, enckeylen, authkeylen); | ||
324 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | ||
325 | ctx->split_key_len, ctx->split_key_pad_len); | ||
326 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | ||
327 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
328 | #endif | ||
329 | ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen, | ||
330 | GFP_KERNEL | GFP_DMA); | ||
331 | if (!ctx->key) { | ||
332 | dev_err(jrdev, "could not allocate key output memory\n"); | ||
333 | return -ENOMEM; | ||
334 | } | ||
335 | |||
336 | ret = gen_split_key(ctx, key, authkeylen); | ||
337 | if (ret) { | ||
338 | kfree(ctx->key); | ||
339 | goto badkey; | ||
340 | } | ||
341 | |||
342 | /* postpend encryption key to auth split key */ | ||
343 | memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); | ||
344 | |||
345 | ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + | ||
346 | enckeylen, DMA_TO_DEVICE); | ||
347 | if (dma_mapping_error(jrdev, ctx->key_phys)) { | ||
348 | dev_err(jrdev, "unable to map key i/o memory\n"); | ||
349 | kfree(ctx->key); | ||
350 | return -ENOMEM; | ||
351 | } | ||
352 | #ifdef DEBUG | ||
353 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | ||
354 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | ||
355 | ctx->split_key_pad_len + enckeylen, 1); | ||
356 | #endif | ||
357 | |||
358 | ctx->enckeylen = enckeylen; | ||
359 | |||
360 | ret = build_sh_desc_ipsec(ctx); | ||
361 | if (ret) { | ||
362 | dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len + | ||
363 | enckeylen, DMA_TO_DEVICE); | ||
364 | kfree(ctx->key); | ||
365 | } | ||
366 | |||
367 | return ret; | ||
368 | badkey: | ||
369 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
370 | return -EINVAL; | ||
371 | } | ||
372 | |||
373 | struct link_tbl_entry { | ||
374 | u64 ptr; | ||
375 | u32 len; | ||
376 | u8 reserved; | ||
377 | u8 buf_pool_id; | ||
378 | u16 offset; | ||
379 | }; | ||
380 | |||
381 | /* | ||
382 | * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor | ||
383 | * @src_nents: number of segments in input scatterlist | ||
384 | * @dst_nents: number of segments in output scatterlist | ||
385 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | ||
386 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | ||
387 | * @link_tbl_bytes: length of dma mapped link_tbl space | ||
388 | * @link_tbl_dma: bus physical mapped address of h/w link table | ||
389 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | ||
390 | */ | ||
391 | struct ipsec_esp_edesc { | ||
392 | int assoc_nents; | ||
393 | int src_nents; | ||
394 | int dst_nents; | ||
395 | int link_tbl_bytes; | ||
396 | dma_addr_t link_tbl_dma; | ||
397 | struct link_tbl_entry *link_tbl; | ||
398 | u32 hw_desc[0]; | ||
399 | }; | ||
400 | |||
401 | static void ipsec_esp_unmap(struct device *dev, | ||
402 | struct ipsec_esp_edesc *edesc, | ||
403 | struct aead_request *areq) | ||
404 | { | ||
405 | dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | ||
406 | |||
407 | if (unlikely(areq->dst != areq->src)) { | ||
408 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | ||
409 | DMA_TO_DEVICE); | ||
410 | dma_unmap_sg(dev, areq->dst, edesc->dst_nents, | ||
411 | DMA_FROM_DEVICE); | ||
412 | } else { | ||
413 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | ||
414 | DMA_BIDIRECTIONAL); | ||
415 | } | ||
416 | |||
417 | if (edesc->link_tbl_bytes) | ||
418 | dma_unmap_single(dev, edesc->link_tbl_dma, | ||
419 | edesc->link_tbl_bytes, | ||
420 | DMA_TO_DEVICE); | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * ipsec_esp descriptor callbacks | ||
425 | */ | ||
426 | static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
427 | void *context) | ||
428 | { | ||
429 | struct aead_request *areq = context; | ||
430 | struct ipsec_esp_edesc *edesc; | ||
431 | #ifdef DEBUG | ||
432 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
433 | int ivsize = crypto_aead_ivsize(aead); | ||
434 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
435 | |||
436 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
437 | #endif | ||
438 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | ||
439 | offsetof(struct ipsec_esp_edesc, hw_desc)); | ||
440 | |||
441 | if (err) { | ||
442 | char tmp[CAAM_ERROR_STR_MAX]; | ||
443 | |||
444 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
445 | } | ||
446 | |||
447 | ipsec_esp_unmap(jrdev, edesc, areq); | ||
448 | |||
449 | #ifdef DEBUG | ||
450 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | ||
451 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | ||
452 | areq->assoclen , 1); | ||
453 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
454 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | ||
455 | edesc->src_nents ? 100 : ivsize, 1); | ||
456 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
457 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | ||
458 | edesc->src_nents ? 100 : areq->cryptlen + | ||
459 | ctx->authsize + 4, 1); | ||
460 | #endif | ||
461 | |||
462 | kfree(edesc); | ||
463 | |||
464 | aead_request_complete(areq, err); | ||
465 | } | ||
466 | |||
467 | static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
468 | void *context) | ||
469 | { | ||
470 | struct aead_request *areq = context; | ||
471 | struct ipsec_esp_edesc *edesc; | ||
472 | #ifdef DEBUG | ||
473 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
474 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
475 | |||
476 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
477 | #endif | ||
478 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | ||
479 | offsetof(struct ipsec_esp_edesc, hw_desc)); | ||
480 | |||
481 | if (err) { | ||
482 | char tmp[CAAM_ERROR_STR_MAX]; | ||
483 | |||
484 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
485 | } | ||
486 | |||
487 | ipsec_esp_unmap(jrdev, edesc, areq); | ||
488 | |||
489 | /* | ||
490 | * verify hw auth check passed else return -EBADMSG | ||
491 | */ | ||
492 | if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) | ||
493 | err = -EBADMSG; | ||
494 | |||
495 | #ifdef DEBUG | ||
496 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", | ||
497 | DUMP_PREFIX_ADDRESS, 16, 4, | ||
498 | ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)), | ||
499 | sizeof(struct iphdr) + areq->assoclen + | ||
500 | ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) + | ||
501 | ctx->authsize + 36, 1); | ||
502 | if (!err && edesc->link_tbl_bytes) { | ||
503 | struct scatterlist *sg = sg_last(areq->src, edesc->src_nents); | ||
504 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | ||
505 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | ||
506 | sg->length + ctx->authsize + 16, 1); | ||
507 | } | ||
508 | #endif | ||
509 | kfree(edesc); | ||
510 | |||
511 | aead_request_complete(areq, err); | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * convert scatterlist to h/w link table format | ||
516 | * scatterlist must have been previously dma mapped | ||
517 | */ | ||
518 | static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, | ||
519 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | ||
520 | { | ||
521 | while (sg_count) { | ||
522 | link_tbl_ptr->ptr = sg_dma_address(sg); | ||
523 | link_tbl_ptr->len = sg_dma_len(sg); | ||
524 | link_tbl_ptr->reserved = 0; | ||
525 | link_tbl_ptr->buf_pool_id = 0; | ||
526 | link_tbl_ptr->offset = offset; | ||
527 | link_tbl_ptr++; | ||
528 | sg = sg_next(sg); | ||
529 | sg_count--; | ||
530 | } | ||
531 | |||
532 | /* set Final bit (marks end of link table) */ | ||
533 | link_tbl_ptr--; | ||
534 | link_tbl_ptr->len |= 0x40000000; | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * fill in and submit ipsec_esp job descriptor | ||
539 | */ | ||
540 | static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | ||
541 | u32 encrypt, | ||
542 | void (*callback) (struct device *dev, u32 *desc, | ||
543 | u32 err, void *context)) | ||
544 | { | ||
545 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
546 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
547 | struct device *jrdev = ctx->jrdev; | ||
548 | u32 *desc = edesc->hw_desc, options; | ||
549 | int ret, sg_count, assoc_sg_count; | ||
550 | int ivsize = crypto_aead_ivsize(aead); | ||
551 | int authsize = ctx->authsize; | ||
552 | dma_addr_t ptr, dst_dma, src_dma; | ||
553 | #ifdef DEBUG | ||
554 | u32 *sh_desc = ctx->sh_desc; | ||
555 | |||
556 | debug("assoclen %d cryptlen %d authsize %d\n", | ||
557 | areq->assoclen, areq->cryptlen, authsize); | ||
558 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | ||
559 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | ||
560 | areq->assoclen , 1); | ||
561 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | ||
562 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | ||
563 | edesc->src_nents ? 100 : ivsize, 1); | ||
564 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | ||
565 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | ||
566 | edesc->src_nents ? 100 : areq->cryptlen + authsize, 1); | ||
567 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | ||
568 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | ||
569 | desc_bytes(sh_desc), 1); | ||
570 | #endif | ||
571 | assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1, | ||
572 | DMA_TO_DEVICE); | ||
573 | if (areq->src == areq->dst) | ||
574 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | ||
575 | DMA_BIDIRECTIONAL); | ||
576 | else | ||
577 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | ||
578 | DMA_TO_DEVICE); | ||
579 | |||
580 | /* start auth operation */ | ||
581 | append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | | ||
582 | (encrypt ? : OP_ALG_ICV_ON)); | ||
583 | |||
584 | /* Load FIFO with data for Class 2 CHA */ | ||
585 | options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; | ||
586 | if (!edesc->assoc_nents) { | ||
587 | ptr = sg_dma_address(areq->assoc); | ||
588 | } else { | ||
589 | sg_to_link_tbl(areq->assoc, edesc->assoc_nents, | ||
590 | edesc->link_tbl, 0); | ||
591 | ptr = edesc->link_tbl_dma; | ||
592 | options |= LDST_SGF; | ||
593 | } | ||
594 | append_fifo_load(desc, ptr, areq->assoclen, options); | ||
595 | |||
596 | /* copy iv from cipher/class1 input context to class2 infifo */ | ||
597 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | ||
598 | |||
599 | if (!encrypt) { | ||
600 | u32 *jump_cmd, *uncond_jump_cmd; | ||
601 | |||
602 | /* JUMP if shared */ | ||
603 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
604 | |||
605 | /* start class 1 (cipher) operation, non-shared version */ | ||
606 | append_operation(desc, ctx->class1_alg_type | | ||
607 | OP_ALG_AS_INITFINAL); | ||
608 | |||
609 | uncond_jump_cmd = append_jump(desc, 0); | ||
610 | |||
611 | set_jump_tgt_here(desc, jump_cmd); | ||
612 | |||
613 | /* start class 1 (cipher) operation, shared version */ | ||
614 | append_operation(desc, ctx->class1_alg_type | | ||
615 | OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK); | ||
616 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
617 | } else | ||
618 | append_operation(desc, ctx->class1_alg_type | | ||
619 | OP_ALG_AS_INITFINAL | encrypt); | ||
620 | |||
621 | /* load payload & instruct to class2 to snoop class 1 if encrypting */ | ||
622 | options = 0; | ||
623 | if (!edesc->src_nents) { | ||
624 | src_dma = sg_dma_address(areq->src); | ||
625 | } else { | ||
626 | sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl + | ||
627 | edesc->assoc_nents, 0); | ||
628 | src_dma = edesc->link_tbl_dma + edesc->assoc_nents * | ||
629 | sizeof(struct link_tbl_entry); | ||
630 | options |= LDST_SGF; | ||
631 | } | ||
632 | append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options); | ||
633 | append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH | | ||
634 | FIFOLD_TYPE_LASTBOTH | | ||
635 | (encrypt ? FIFOLD_TYPE_MSG1OUT2 | ||
636 | : FIFOLD_TYPE_MSG)); | ||
637 | |||
638 | /* specify destination */ | ||
639 | if (areq->src == areq->dst) { | ||
640 | dst_dma = src_dma; | ||
641 | } else { | ||
642 | sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1, | ||
643 | DMA_FROM_DEVICE); | ||
644 | if (!edesc->dst_nents) { | ||
645 | dst_dma = sg_dma_address(areq->dst); | ||
646 | options = 0; | ||
647 | } else { | ||
648 | sg_to_link_tbl(areq->dst, edesc->dst_nents, | ||
649 | edesc->link_tbl + edesc->assoc_nents + | ||
650 | edesc->src_nents, 0); | ||
651 | dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + | ||
652 | edesc->src_nents) * | ||
653 | sizeof(struct link_tbl_entry); | ||
654 | options = LDST_SGF; | ||
655 | } | ||
656 | } | ||
657 | append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options); | ||
658 | append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA); | ||
659 | |||
660 | /* ICV */ | ||
661 | if (encrypt) | ||
662 | append_seq_store(desc, authsize, LDST_CLASS_2_CCB | | ||
663 | LDST_SRCDST_BYTE_CONTEXT); | ||
664 | else | ||
665 | append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 | | ||
666 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
667 | |||
668 | #ifdef DEBUG | ||
669 | debug("job_desc_len %d\n", desc_len(desc)); | ||
670 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | ||
671 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1); | ||
672 | print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ", | ||
673 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | ||
674 | edesc->link_tbl_bytes, 1); | ||
675 | #endif | ||
676 | |||
677 | ret = caam_jr_enqueue(jrdev, desc, callback, areq); | ||
678 | if (!ret) | ||
679 | ret = -EINPROGRESS; | ||
680 | else { | ||
681 | ipsec_esp_unmap(jrdev, edesc, areq); | ||
682 | kfree(edesc); | ||
683 | } | ||
684 | |||
685 | return ret; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * derive number of elements in scatterlist | ||
690 | */ | ||
691 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | ||
692 | { | ||
693 | struct scatterlist *sg = sg_list; | ||
694 | int sg_nents = 0; | ||
695 | |||
696 | *chained = 0; | ||
697 | while (nbytes > 0) { | ||
698 | sg_nents++; | ||
699 | nbytes -= sg->length; | ||
700 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | ||
701 | *chained = 1; | ||
702 | sg = scatterwalk_sg_next(sg); | ||
703 | } | ||
704 | |||
705 | return sg_nents; | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * allocate and map the ipsec_esp extended descriptor | ||
710 | */ | ||
711 | static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | ||
712 | int desc_bytes) | ||
713 | { | ||
714 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
715 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
716 | struct device *jrdev = ctx->jrdev; | ||
717 | gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
718 | GFP_ATOMIC; | ||
719 | int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; | ||
720 | struct ipsec_esp_edesc *edesc; | ||
721 | |||
722 | assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained); | ||
723 | BUG_ON(chained); | ||
724 | if (likely(assoc_nents == 1)) | ||
725 | assoc_nents = 0; | ||
726 | |||
727 | src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize, | ||
728 | &chained); | ||
729 | BUG_ON(chained); | ||
730 | if (src_nents == 1) | ||
731 | src_nents = 0; | ||
732 | |||
733 | if (unlikely(areq->dst != areq->src)) { | ||
734 | dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize, | ||
735 | &chained); | ||
736 | BUG_ON(chained); | ||
737 | if (dst_nents == 1) | ||
738 | dst_nents = 0; | ||
739 | } | ||
740 | |||
741 | link_tbl_bytes = (assoc_nents + src_nents + dst_nents) * | ||
742 | sizeof(struct link_tbl_entry); | ||
743 | debug("link_tbl_bytes %d\n", link_tbl_bytes); | ||
744 | |||
745 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
746 | edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes + | ||
747 | link_tbl_bytes, GFP_DMA | flags); | ||
748 | if (!edesc) { | ||
749 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
750 | return ERR_PTR(-ENOMEM); | ||
751 | } | ||
752 | |||
753 | edesc->assoc_nents = assoc_nents; | ||
754 | edesc->src_nents = src_nents; | ||
755 | edesc->dst_nents = dst_nents; | ||
756 | edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) + | ||
757 | desc_bytes; | ||
758 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | ||
759 | link_tbl_bytes, DMA_TO_DEVICE); | ||
760 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
761 | |||
762 | return edesc; | ||
763 | } | ||
764 | |||
765 | static int aead_authenc_encrypt(struct aead_request *areq) | ||
766 | { | ||
767 | struct ipsec_esp_edesc *edesc; | ||
768 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
769 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
770 | struct device *jrdev = ctx->jrdev; | ||
771 | int ivsize = crypto_aead_ivsize(aead); | ||
772 | u32 *desc; | ||
773 | dma_addr_t iv_dma; | ||
774 | |||
775 | /* allocate extended descriptor */ | ||
776 | edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN * | ||
777 | CAAM_CMD_SZ); | ||
778 | if (IS_ERR(edesc)) | ||
779 | return PTR_ERR(edesc); | ||
780 | |||
781 | desc = edesc->hw_desc; | ||
782 | |||
783 | /* insert shared descriptor pointer */ | ||
784 | init_job_desc_shared(desc, ctx->shared_desc_phys, | ||
785 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
786 | |||
787 | iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE); | ||
788 | /* check dma error */ | ||
789 | |||
790 | append_load(desc, iv_dma, ivsize, | ||
791 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | ||
792 | |||
793 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | ||
794 | } | ||
795 | |||
796 | static int aead_authenc_decrypt(struct aead_request *req) | ||
797 | { | ||
798 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
799 | int ivsize = crypto_aead_ivsize(aead); | ||
800 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
801 | struct device *jrdev = ctx->jrdev; | ||
802 | struct ipsec_esp_edesc *edesc; | ||
803 | u32 *desc; | ||
804 | dma_addr_t iv_dma; | ||
805 | |||
806 | req->cryptlen -= ctx->authsize; | ||
807 | |||
808 | /* allocate extended descriptor */ | ||
809 | edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN * | ||
810 | CAAM_CMD_SZ); | ||
811 | if (IS_ERR(edesc)) | ||
812 | return PTR_ERR(edesc); | ||
813 | |||
814 | desc = edesc->hw_desc; | ||
815 | |||
816 | /* insert shared descriptor pointer */ | ||
817 | init_job_desc_shared(desc, ctx->shared_desc_phys, | ||
818 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
819 | |||
820 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | ||
821 | /* check dma error */ | ||
822 | |||
823 | append_load(desc, iv_dma, ivsize, | ||
824 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | ||
825 | |||
826 | return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done); | ||
827 | } | ||
828 | |||
829 | static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) | ||
830 | { | ||
831 | struct aead_request *areq = &req->areq; | ||
832 | struct ipsec_esp_edesc *edesc; | ||
833 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | ||
834 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
835 | struct device *jrdev = ctx->jrdev; | ||
836 | int ivsize = crypto_aead_ivsize(aead); | ||
837 | dma_addr_t iv_dma; | ||
838 | u32 *desc; | ||
839 | |||
840 | iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE); | ||
841 | |||
842 | debug("%s: giv %p\n", __func__, req->giv); | ||
843 | |||
844 | /* allocate extended descriptor */ | ||
845 | edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN * | ||
846 | CAAM_CMD_SZ); | ||
847 | if (IS_ERR(edesc)) | ||
848 | return PTR_ERR(edesc); | ||
849 | |||
850 | desc = edesc->hw_desc; | ||
851 | |||
852 | /* insert shared descriptor pointer */ | ||
853 | init_job_desc_shared(desc, ctx->shared_desc_phys, | ||
854 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
855 | |||
856 | /* | ||
857 | * LOAD IMM Info FIFO | ||
858 | * to DECO, Last, Padding, Random, Message, 16 bytes | ||
859 | */ | ||
860 | append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 | | ||
861 | NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | | ||
862 | NFIFOENTRY_PTYPE_RND | ivsize, | ||
863 | LDST_SRCDST_WORD_INFO_FIFO); | ||
864 | |||
865 | /* | ||
866 | * disable info fifo entries since the above serves as the entry | ||
867 | * this way, the MOVE command won't generate an entry. | ||
868 | * Note that this isn't required in more recent versions of | ||
869 | * SEC as a MOVE that doesn't do info FIFO entries is available. | ||
870 | */ | ||
871 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
872 | |||
873 | /* MOVE DECO Alignment -> C1 Context 16 bytes */ | ||
874 | append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize); | ||
875 | |||
876 | /* re-enable info fifo entries */ | ||
877 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
878 | |||
879 | /* MOVE C1 Context -> OFIFO 16 bytes */ | ||
880 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize); | ||
881 | |||
882 | append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); | ||
883 | |||
884 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | ||
885 | } | ||
886 | |||
887 | struct caam_alg_template { | ||
888 | char name[CRYPTO_MAX_ALG_NAME]; | ||
889 | char driver_name[CRYPTO_MAX_ALG_NAME]; | ||
890 | unsigned int blocksize; | ||
891 | struct aead_alg aead; | ||
892 | u32 class1_alg_type; | ||
893 | u32 class2_alg_type; | ||
894 | u32 alg_op; | ||
895 | }; | ||
896 | |||
897 | static struct caam_alg_template driver_algs[] = { | ||
898 | /* single-pass ipsec_esp descriptor */ | ||
899 | { | ||
900 | .name = "authenc(hmac(sha1),cbc(aes))", | ||
901 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", | ||
902 | .blocksize = AES_BLOCK_SIZE, | ||
903 | .aead = { | ||
904 | .setkey = aead_authenc_setkey, | ||
905 | .setauthsize = aead_authenc_setauthsize, | ||
906 | .encrypt = aead_authenc_encrypt, | ||
907 | .decrypt = aead_authenc_decrypt, | ||
908 | .givencrypt = aead_authenc_givencrypt, | ||
909 | .geniv = "<built-in>", | ||
910 | .ivsize = AES_BLOCK_SIZE, | ||
911 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
912 | }, | ||
913 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
914 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | ||
915 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
916 | }, | ||
917 | { | ||
918 | .name = "authenc(hmac(sha256),cbc(aes))", | ||
919 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", | ||
920 | .blocksize = AES_BLOCK_SIZE, | ||
921 | .aead = { | ||
922 | .setkey = aead_authenc_setkey, | ||
923 | .setauthsize = aead_authenc_setauthsize, | ||
924 | .encrypt = aead_authenc_encrypt, | ||
925 | .decrypt = aead_authenc_decrypt, | ||
926 | .givencrypt = aead_authenc_givencrypt, | ||
927 | .geniv = "<built-in>", | ||
928 | .ivsize = AES_BLOCK_SIZE, | ||
929 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
930 | }, | ||
931 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
932 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
933 | OP_ALG_AAI_HMAC_PRECOMP, | ||
934 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
935 | }, | ||
936 | { | ||
937 | .name = "authenc(hmac(sha512),cbc(aes))", | ||
938 | .driver_name = "authenc-hmac-sha512-cbc-aes-caam", | ||
939 | .blocksize = AES_BLOCK_SIZE, | ||
940 | .aead = { | ||
941 | .setkey = aead_authenc_setkey, | ||
942 | .setauthsize = aead_authenc_setauthsize, | ||
943 | .encrypt = aead_authenc_encrypt, | ||
944 | .decrypt = aead_authenc_decrypt, | ||
945 | .givencrypt = aead_authenc_givencrypt, | ||
946 | .geniv = "<built-in>", | ||
947 | .ivsize = AES_BLOCK_SIZE, | ||
948 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
949 | }, | ||
950 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
951 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
952 | OP_ALG_AAI_HMAC_PRECOMP, | ||
953 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
954 | }, | ||
955 | { | ||
956 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | ||
957 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", | ||
958 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
959 | .aead = { | ||
960 | .setkey = aead_authenc_setkey, | ||
961 | .setauthsize = aead_authenc_setauthsize, | ||
962 | .encrypt = aead_authenc_encrypt, | ||
963 | .decrypt = aead_authenc_decrypt, | ||
964 | .givencrypt = aead_authenc_givencrypt, | ||
965 | .geniv = "<built-in>", | ||
966 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
967 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
968 | }, | ||
969 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
970 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | ||
971 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
972 | }, | ||
973 | { | ||
974 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | ||
975 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", | ||
976 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
977 | .aead = { | ||
978 | .setkey = aead_authenc_setkey, | ||
979 | .setauthsize = aead_authenc_setauthsize, | ||
980 | .encrypt = aead_authenc_encrypt, | ||
981 | .decrypt = aead_authenc_decrypt, | ||
982 | .givencrypt = aead_authenc_givencrypt, | ||
983 | .geniv = "<built-in>", | ||
984 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
985 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
986 | }, | ||
987 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
988 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
989 | OP_ALG_AAI_HMAC_PRECOMP, | ||
990 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
991 | }, | ||
992 | { | ||
993 | .name = "authenc(hmac(sha512),cbc(des3_ede))", | ||
994 | .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", | ||
995 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
996 | .aead = { | ||
997 | .setkey = aead_authenc_setkey, | ||
998 | .setauthsize = aead_authenc_setauthsize, | ||
999 | .encrypt = aead_authenc_encrypt, | ||
1000 | .decrypt = aead_authenc_decrypt, | ||
1001 | .givencrypt = aead_authenc_givencrypt, | ||
1002 | .geniv = "<built-in>", | ||
1003 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1004 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
1005 | }, | ||
1006 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
1007 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
1008 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1009 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
1010 | }, | ||
1011 | { | ||
1012 | .name = "authenc(hmac(sha1),cbc(des))", | ||
1013 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", | ||
1014 | .blocksize = DES_BLOCK_SIZE, | ||
1015 | .aead = { | ||
1016 | .setkey = aead_authenc_setkey, | ||
1017 | .setauthsize = aead_authenc_setauthsize, | ||
1018 | .encrypt = aead_authenc_encrypt, | ||
1019 | .decrypt = aead_authenc_decrypt, | ||
1020 | .givencrypt = aead_authenc_givencrypt, | ||
1021 | .geniv = "<built-in>", | ||
1022 | .ivsize = DES_BLOCK_SIZE, | ||
1023 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1024 | }, | ||
1025 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
1026 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1027 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
1028 | }, | ||
1029 | { | ||
1030 | .name = "authenc(hmac(sha256),cbc(des))", | ||
1031 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", | ||
1032 | .blocksize = DES_BLOCK_SIZE, | ||
1033 | .aead = { | ||
1034 | .setkey = aead_authenc_setkey, | ||
1035 | .setauthsize = aead_authenc_setauthsize, | ||
1036 | .encrypt = aead_authenc_encrypt, | ||
1037 | .decrypt = aead_authenc_decrypt, | ||
1038 | .givencrypt = aead_authenc_givencrypt, | ||
1039 | .geniv = "<built-in>", | ||
1040 | .ivsize = DES_BLOCK_SIZE, | ||
1041 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1042 | }, | ||
1043 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
1044 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
1045 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1046 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
1047 | }, | ||
1048 | { | ||
1049 | .name = "authenc(hmac(sha512),cbc(des))", | ||
1050 | .driver_name = "authenc-hmac-sha512-cbc-des-caam", | ||
1051 | .blocksize = DES_BLOCK_SIZE, | ||
1052 | .aead = { | ||
1053 | .setkey = aead_authenc_setkey, | ||
1054 | .setauthsize = aead_authenc_setauthsize, | ||
1055 | .encrypt = aead_authenc_encrypt, | ||
1056 | .decrypt = aead_authenc_decrypt, | ||
1057 | .givencrypt = aead_authenc_givencrypt, | ||
1058 | .geniv = "<built-in>", | ||
1059 | .ivsize = DES_BLOCK_SIZE, | ||
1060 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
1061 | }, | ||
1062 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
1063 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
1064 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1065 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
1066 | }, | ||
1067 | }; | ||
1068 | |||
1069 | struct caam_crypto_alg { | ||
1070 | struct list_head entry; | ||
1071 | struct device *ctrldev; | ||
1072 | int class1_alg_type; | ||
1073 | int class2_alg_type; | ||
1074 | int alg_op; | ||
1075 | struct crypto_alg crypto_alg; | ||
1076 | }; | ||
1077 | |||
1078 | static int caam_cra_init(struct crypto_tfm *tfm) | ||
1079 | { | ||
1080 | struct crypto_alg *alg = tfm->__crt_alg; | ||
1081 | struct caam_crypto_alg *caam_alg = | ||
1082 | container_of(alg, struct caam_crypto_alg, crypto_alg); | ||
1083 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1084 | struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); | ||
1085 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | ||
1086 | |||
1087 | /* | ||
1088 | * distribute tfms across job rings to ensure in-order | ||
1089 | * crypto request processing per tfm | ||
1090 | */ | ||
1091 | ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; | ||
1092 | |||
1093 | /* copy descriptor header template value */ | ||
1094 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | ||
1095 | ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; | ||
1096 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; | ||
1097 | |||
1098 | return 0; | ||
1099 | } | ||
1100 | |||
1101 | static void caam_cra_exit(struct crypto_tfm *tfm) | ||
1102 | { | ||
1103 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1104 | |||
1105 | if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys)) | ||
1106 | dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys, | ||
1107 | desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); | ||
1108 | kfree(ctx->sh_desc); | ||
1109 | |||
1110 | if (!dma_mapping_error(ctx->jrdev, ctx->key_phys)) | ||
1111 | dma_unmap_single(ctx->jrdev, ctx->key_phys, | ||
1112 | ctx->split_key_pad_len + ctx->enckeylen, | ||
1113 | DMA_TO_DEVICE); | ||
1114 | kfree(ctx->key); | ||
1115 | } | ||
1116 | |||
1117 | static void __exit caam_algapi_exit(void) | ||
1118 | { | ||
1119 | |||
1120 | struct device_node *dev_node; | ||
1121 | struct platform_device *pdev; | ||
1122 | struct device *ctrldev; | ||
1123 | struct caam_drv_private *priv; | ||
1124 | struct caam_crypto_alg *t_alg, *n; | ||
1125 | int i, err; | ||
1126 | |||
1127 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1128 | if (!dev_node) | ||
1129 | return; | ||
1130 | |||
1131 | pdev = of_find_device_by_node(dev_node); | ||
1132 | if (!pdev) | ||
1133 | return; | ||
1134 | |||
1135 | ctrldev = &pdev->dev; | ||
1136 | of_node_put(dev_node); | ||
1137 | priv = dev_get_drvdata(ctrldev); | ||
1138 | |||
1139 | if (!priv->alg_list.next) | ||
1140 | return; | ||
1141 | |||
1142 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | ||
1143 | crypto_unregister_alg(&t_alg->crypto_alg); | ||
1144 | list_del(&t_alg->entry); | ||
1145 | kfree(t_alg); | ||
1146 | } | ||
1147 | |||
1148 | for (i = 0; i < priv->total_jobrs; i++) { | ||
1149 | err = caam_jr_deregister(priv->algapi_jr[i]); | ||
1150 | if (err < 0) | ||
1151 | break; | ||
1152 | } | ||
1153 | kfree(priv->algapi_jr); | ||
1154 | } | ||
1155 | |||
1156 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | ||
1157 | struct caam_alg_template | ||
1158 | *template) | ||
1159 | { | ||
1160 | struct caam_crypto_alg *t_alg; | ||
1161 | struct crypto_alg *alg; | ||
1162 | |||
1163 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); | ||
1164 | if (!t_alg) { | ||
1165 | dev_err(ctrldev, "failed to allocate t_alg\n"); | ||
1166 | return ERR_PTR(-ENOMEM); | ||
1167 | } | ||
1168 | |||
1169 | alg = &t_alg->crypto_alg; | ||
1170 | |||
1171 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); | ||
1172 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
1173 | template->driver_name); | ||
1174 | alg->cra_module = THIS_MODULE; | ||
1175 | alg->cra_init = caam_cra_init; | ||
1176 | alg->cra_exit = caam_cra_exit; | ||
1177 | alg->cra_priority = CAAM_CRA_PRIORITY; | ||
1178 | alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
1179 | alg->cra_blocksize = template->blocksize; | ||
1180 | alg->cra_alignmask = 0; | ||
1181 | alg->cra_type = &crypto_aead_type; | ||
1182 | alg->cra_ctxsize = sizeof(struct caam_ctx); | ||
1183 | alg->cra_u.aead = template->aead; | ||
1184 | |||
1185 | t_alg->class1_alg_type = template->class1_alg_type; | ||
1186 | t_alg->class2_alg_type = template->class2_alg_type; | ||
1187 | t_alg->alg_op = template->alg_op; | ||
1188 | t_alg->ctrldev = ctrldev; | ||
1189 | |||
1190 | return t_alg; | ||
1191 | } | ||
1192 | |||
1193 | static int __init caam_algapi_init(void) | ||
1194 | { | ||
1195 | struct device_node *dev_node; | ||
1196 | struct platform_device *pdev; | ||
1197 | struct device *ctrldev, **jrdev; | ||
1198 | struct caam_drv_private *priv; | ||
1199 | int i = 0, err = 0; | ||
1200 | |||
1201 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1202 | if (!dev_node) | ||
1203 | return -ENODEV; | ||
1204 | |||
1205 | pdev = of_find_device_by_node(dev_node); | ||
1206 | if (!pdev) | ||
1207 | return -ENODEV; | ||
1208 | |||
1209 | ctrldev = &pdev->dev; | ||
1210 | priv = dev_get_drvdata(ctrldev); | ||
1211 | of_node_put(dev_node); | ||
1212 | |||
1213 | INIT_LIST_HEAD(&priv->alg_list); | ||
1214 | |||
1215 | jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL); | ||
1216 | if (!jrdev) | ||
1217 | return -ENOMEM; | ||
1218 | |||
1219 | for (i = 0; i < priv->total_jobrs; i++) { | ||
1220 | err = caam_jr_register(ctrldev, &jrdev[i]); | ||
1221 | if (err < 0) | ||
1222 | break; | ||
1223 | } | ||
1224 | if (err < 0 && i == 0) { | ||
1225 | dev_err(ctrldev, "algapi error in job ring registration: %d\n", | ||
1226 | err); | ||
1227 | kfree(jrdev); | ||
1228 | return err; | ||
1229 | } | ||
1230 | |||
1231 | priv->num_jrs_for_algapi = i; | ||
1232 | priv->algapi_jr = jrdev; | ||
1233 | atomic_set(&priv->tfm_count, -1); | ||
1234 | |||
1235 | /* register crypto algorithms the device supports */ | ||
1236 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | ||
1237 | /* TODO: check if h/w supports alg */ | ||
1238 | struct caam_crypto_alg *t_alg; | ||
1239 | |||
1240 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); | ||
1241 | if (IS_ERR(t_alg)) { | ||
1242 | err = PTR_ERR(t_alg); | ||
1243 | dev_warn(ctrldev, "%s alg allocation failed\n", | ||
1244 | driver_algs[i].driver_name); | ||
1245 | continue; | ||
1246 | } | ||
1247 | |||
1248 | err = crypto_register_alg(&t_alg->crypto_alg); | ||
1249 | if (err) { | ||
1250 | dev_warn(ctrldev, "%s alg registration failed\n", | ||
1251 | t_alg->crypto_alg.cra_driver_name); | ||
1252 | kfree(t_alg); | ||
1253 | } else { | ||
1254 | list_add_tail(&t_alg->entry, &priv->alg_list); | ||
1255 | dev_info(ctrldev, "%s\n", | ||
1256 | t_alg->crypto_alg.cra_driver_name); | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | return err; | ||
1261 | } | ||
1262 | |||
1263 | module_init(caam_algapi_init); | ||
1264 | module_exit(caam_algapi_exit); | ||
1265 | |||
1266 | MODULE_LICENSE("GPL"); | ||
1267 | MODULE_DESCRIPTION("FSL CAAM support for crypto API"); | ||
1268 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); | ||
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h new file mode 100644 index 000000000000..950450346f70 --- /dev/null +++ b/drivers/crypto/caam/compat.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
3 | */ | ||
4 | |||
5 | #ifndef CAAM_COMPAT_H | ||
6 | #define CAAM_COMPAT_H | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/mod_devicetable.h> | ||
11 | #include <linux/device.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/crypto.h> | ||
14 | #include <linux/hw_random.h> | ||
15 | #include <linux/of_platform.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/rtnetlink.h> | ||
20 | #include <linux/in.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/circ_buf.h> | ||
25 | #include <net/xfrm.h> | ||
26 | |||
27 | #include <crypto/algapi.h> | ||
28 | #include <crypto/aes.h> | ||
29 | #include <crypto/des.h> | ||
30 | #include <crypto/sha.h> | ||
31 | #include <crypto/aead.h> | ||
32 | #include <crypto/authenc.h> | ||
33 | #include <crypto/scatterwalk.h> | ||
34 | |||
35 | #endif /* !defined(CAAM_COMPAT_H) */ | ||
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c new file mode 100644 index 000000000000..9009713a3c2e --- /dev/null +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -0,0 +1,269 @@ | |||
1 | /* | ||
2 | * CAAM control-plane driver backend | ||
3 | * Controller-level driver, kernel property detection, initialization | ||
4 | * | ||
5 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
6 | */ | ||
7 | |||
8 | #include "compat.h" | ||
9 | #include "regs.h" | ||
10 | #include "intern.h" | ||
11 | #include "jr.h" | ||
12 | |||
13 | static int caam_remove(struct platform_device *pdev) | ||
14 | { | ||
15 | struct device *ctrldev; | ||
16 | struct caam_drv_private *ctrlpriv; | ||
17 | struct caam_drv_private_jr *jrpriv; | ||
18 | struct caam_full __iomem *topregs; | ||
19 | int ring, ret = 0; | ||
20 | |||
21 | ctrldev = &pdev->dev; | ||
22 | ctrlpriv = dev_get_drvdata(ctrldev); | ||
23 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
24 | |||
25 | /* shut down JobRs */ | ||
26 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | ||
27 | ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]); | ||
28 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); | ||
29 | irq_dispose_mapping(jrpriv->irq); | ||
30 | } | ||
31 | |||
32 | /* Shut down debug views */ | ||
33 | #ifdef CONFIG_DEBUG_FS | ||
34 | debugfs_remove_recursive(ctrlpriv->dfs_root); | ||
35 | #endif | ||
36 | |||
37 | /* Unmap controller region */ | ||
38 | iounmap(&topregs->ctrl); | ||
39 | |||
40 | kfree(ctrlpriv->jrdev); | ||
41 | kfree(ctrlpriv); | ||
42 | |||
43 | return ret; | ||
44 | } | ||
45 | |||
46 | /* Probe routine for CAAM top (controller) level */ | ||
47 | static int caam_probe(struct platform_device *pdev) | ||
48 | { | ||
49 | int d, ring, rspec; | ||
50 | struct device *dev; | ||
51 | struct device_node *nprop, *np; | ||
52 | struct caam_ctrl __iomem *ctrl; | ||
53 | struct caam_full __iomem *topregs; | ||
54 | struct caam_drv_private *ctrlpriv; | ||
55 | struct caam_perfmon *perfmon; | ||
56 | struct caam_deco **deco; | ||
57 | u32 deconum; | ||
58 | |||
59 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); | ||
60 | if (!ctrlpriv) | ||
61 | return -ENOMEM; | ||
62 | |||
63 | dev = &pdev->dev; | ||
64 | dev_set_drvdata(dev, ctrlpriv); | ||
65 | ctrlpriv->pdev = pdev; | ||
66 | nprop = pdev->dev.of_node; | ||
67 | |||
68 | /* Get configuration properties from device tree */ | ||
69 | /* First, get register page */ | ||
70 | ctrl = of_iomap(nprop, 0); | ||
71 | if (ctrl == NULL) { | ||
72 | dev_err(dev, "caam: of_iomap() failed\n"); | ||
73 | return -ENOMEM; | ||
74 | } | ||
75 | ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl; | ||
76 | |||
77 | /* topregs used to derive pointers to CAAM sub-blocks only */ | ||
78 | topregs = (struct caam_full __iomem *)ctrl; | ||
79 | |||
80 | /* Get the IRQ of the controller (for security violations only) */ | ||
81 | ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL); | ||
82 | |||
83 | /* | ||
84 | * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, | ||
85 | * 36-bit pointers in master configuration register | ||
86 | */ | ||
87 | setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | | ||
88 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); | ||
89 | |||
90 | if (sizeof(dma_addr_t) == sizeof(u64)) | ||
91 | dma_set_mask(dev, DMA_BIT_MASK(36)); | ||
92 | |||
93 | /* Find out how many DECOs are present */ | ||
94 | deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) & | ||
95 | CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT; | ||
96 | |||
97 | ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *), | ||
98 | GFP_KERNEL); | ||
99 | |||
100 | deco = (struct caam_deco __force **)&topregs->deco; | ||
101 | for (d = 0; d < deconum; d++) | ||
102 | ctrlpriv->deco[d] = deco[d]; | ||
103 | |||
104 | /* | ||
105 | * Detect and enable JobRs | ||
106 | * First, find out how many ring spec'ed, allocate references | ||
107 | * for all, then go probe each one. | ||
108 | */ | ||
109 | rspec = 0; | ||
110 | for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") | ||
111 | rspec++; | ||
112 | ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL); | ||
113 | if (ctrlpriv->jrdev == NULL) { | ||
114 | iounmap(&topregs->ctrl); | ||
115 | return -ENOMEM; | ||
116 | } | ||
117 | |||
118 | ring = 0; | ||
119 | ctrlpriv->total_jobrs = 0; | ||
120 | for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { | ||
121 | caam_jr_probe(pdev, np, ring); | ||
122 | ctrlpriv->total_jobrs++; | ||
123 | ring++; | ||
124 | } | ||
125 | |||
126 | /* Check to see if QI present. If so, enable */ | ||
127 | ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) & | ||
128 | CTPR_QI_MASK); | ||
129 | if (ctrlpriv->qi_present) { | ||
130 | ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi; | ||
131 | /* This is all that's required to physically enable QI */ | ||
132 | wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN); | ||
133 | } | ||
134 | |||
135 | /* If no QI and no rings specified, quit and go home */ | ||
136 | if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { | ||
137 | dev_err(dev, "no queues configured, terminating\n"); | ||
138 | caam_remove(pdev); | ||
139 | return -ENOMEM; | ||
140 | } | ||
141 | |||
142 | /* NOTE: RTIC detection ought to go here, around Si time */ | ||
143 | |||
144 | /* Initialize queue allocator lock */ | ||
145 | spin_lock_init(&ctrlpriv->jr_alloc_lock); | ||
146 | |||
147 | /* Report "alive" for developer to see */ | ||
148 | dev_info(dev, "device ID = 0x%016llx\n", | ||
149 | rd_reg64(&topregs->ctrl.perfmon.caam_id)); | ||
150 | dev_info(dev, "job rings = %d, qi = %d\n", | ||
151 | ctrlpriv->total_jobrs, ctrlpriv->qi_present); | ||
152 | |||
153 | #ifdef CONFIG_DEBUG_FS | ||
154 | /* | ||
155 | * FIXME: needs better naming distinction, as some amalgamation of | ||
156 | * "caam" and nprop->full_name. The OF name isn't distinctive, | ||
157 | * but does separate instances | ||
158 | */ | ||
159 | perfmon = (struct caam_perfmon __force *)&ctrl->perfmon; | ||
160 | |||
161 | ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL); | ||
162 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); | ||
163 | |||
164 | /* Controller-level - performance monitor counters */ | ||
165 | ctrlpriv->ctl_rq_dequeued = | ||
166 | debugfs_create_u64("rq_dequeued", | ||
167 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
168 | ctrlpriv->ctl, &perfmon->req_dequeued); | ||
169 | ctrlpriv->ctl_ob_enc_req = | ||
170 | debugfs_create_u64("ob_rq_encrypted", | ||
171 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
172 | ctrlpriv->ctl, &perfmon->ob_enc_req); | ||
173 | ctrlpriv->ctl_ib_dec_req = | ||
174 | debugfs_create_u64("ib_rq_decrypted", | ||
175 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
176 | ctrlpriv->ctl, &perfmon->ib_dec_req); | ||
177 | ctrlpriv->ctl_ob_enc_bytes = | ||
178 | debugfs_create_u64("ob_bytes_encrypted", | ||
179 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
180 | ctrlpriv->ctl, &perfmon->ob_enc_bytes); | ||
181 | ctrlpriv->ctl_ob_prot_bytes = | ||
182 | debugfs_create_u64("ob_bytes_protected", | ||
183 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
184 | ctrlpriv->ctl, &perfmon->ob_prot_bytes); | ||
185 | ctrlpriv->ctl_ib_dec_bytes = | ||
186 | debugfs_create_u64("ib_bytes_decrypted", | ||
187 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
188 | ctrlpriv->ctl, &perfmon->ib_dec_bytes); | ||
189 | ctrlpriv->ctl_ib_valid_bytes = | ||
190 | debugfs_create_u64("ib_bytes_validated", | ||
191 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
192 | ctrlpriv->ctl, &perfmon->ib_valid_bytes); | ||
193 | |||
194 | /* Controller level - global status values */ | ||
195 | ctrlpriv->ctl_faultaddr = | ||
196 | debugfs_create_u64("fault_addr", | ||
197 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
198 | ctrlpriv->ctl, &perfmon->faultaddr); | ||
199 | ctrlpriv->ctl_faultdetail = | ||
200 | debugfs_create_u32("fault_detail", | ||
201 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
202 | ctrlpriv->ctl, &perfmon->faultdetail); | ||
203 | ctrlpriv->ctl_faultstatus = | ||
204 | debugfs_create_u32("fault_status", | ||
205 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | ||
206 | ctrlpriv->ctl, &perfmon->status); | ||
207 | |||
208 | /* Internal covering keys (useful in non-secure mode only) */ | ||
209 | ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0]; | ||
210 | ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); | ||
211 | ctrlpriv->ctl_kek = debugfs_create_blob("kek", | ||
212 | S_IFCHR | S_IRUSR | | ||
213 | S_IRGRP | S_IROTH, | ||
214 | ctrlpriv->ctl, | ||
215 | &ctrlpriv->ctl_kek_wrap); | ||
216 | |||
217 | ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0]; | ||
218 | ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); | ||
219 | ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", | ||
220 | S_IFCHR | S_IRUSR | | ||
221 | S_IRGRP | S_IROTH, | ||
222 | ctrlpriv->ctl, | ||
223 | &ctrlpriv->ctl_tkek_wrap); | ||
224 | |||
225 | ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0]; | ||
226 | ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); | ||
227 | ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", | ||
228 | S_IFCHR | S_IRUSR | | ||
229 | S_IRGRP | S_IROTH, | ||
230 | ctrlpriv->ctl, | ||
231 | &ctrlpriv->ctl_tdsk_wrap); | ||
232 | #endif | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | static struct of_device_id caam_match[] = { | ||
237 | { | ||
238 | .compatible = "fsl,sec-v4.0", | ||
239 | }, | ||
240 | {}, | ||
241 | }; | ||
242 | MODULE_DEVICE_TABLE(of, caam_match); | ||
243 | |||
244 | static struct platform_driver caam_driver = { | ||
245 | .driver = { | ||
246 | .name = "caam", | ||
247 | .owner = THIS_MODULE, | ||
248 | .of_match_table = caam_match, | ||
249 | }, | ||
250 | .probe = caam_probe, | ||
251 | .remove = __devexit_p(caam_remove), | ||
252 | }; | ||
253 | |||
254 | static int __init caam_base_init(void) | ||
255 | { | ||
256 | return platform_driver_register(&caam_driver); | ||
257 | } | ||
258 | |||
259 | static void __exit caam_base_exit(void) | ||
260 | { | ||
261 | return platform_driver_unregister(&caam_driver); | ||
262 | } | ||
263 | |||
264 | module_init(caam_base_init); | ||
265 | module_exit(caam_base_exit); | ||
266 | |||
267 | MODULE_LICENSE("GPL"); | ||
268 | MODULE_DESCRIPTION("FSL CAAM request backend"); | ||
269 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); | ||
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h new file mode 100644 index 000000000000..974a75842da9 --- /dev/null +++ b/drivers/crypto/caam/desc.h | |||
@@ -0,0 +1,1605 @@ | |||
1 | /* | ||
2 | * CAAM descriptor composition header | ||
3 | * Definitions to support CAAM descriptor instruction generation | ||
4 | * | ||
5 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
6 | */ | ||
7 | |||
8 | #ifndef DESC_H | ||
9 | #define DESC_H | ||
10 | |||
11 | /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ | ||
12 | #define MAX_CAAM_DESCSIZE 64 | ||
13 | |||
14 | /* Block size of any entity covered/uncovered with a KEK/TKEK */ | ||
15 | #define KEK_BLOCKSIZE 16 | ||
16 | |||
17 | /* | ||
18 | * Supported descriptor command types as they show up | ||
19 | * inside a descriptor command word. | ||
20 | */ | ||
21 | #define CMD_SHIFT 27 | ||
22 | #define CMD_MASK 0xf8000000 | ||
23 | |||
24 | #define CMD_KEY (0x00 << CMD_SHIFT) | ||
25 | #define CMD_SEQ_KEY (0x01 << CMD_SHIFT) | ||
26 | #define CMD_LOAD (0x02 << CMD_SHIFT) | ||
27 | #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT) | ||
28 | #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT) | ||
29 | #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT) | ||
30 | #define CMD_STORE (0x0a << CMD_SHIFT) | ||
31 | #define CMD_SEQ_STORE (0x0b << CMD_SHIFT) | ||
32 | #define CMD_FIFO_STORE (0x0c << CMD_SHIFT) | ||
33 | #define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT) | ||
34 | #define CMD_MOVE_LEN (0x0e << CMD_SHIFT) | ||
35 | #define CMD_MOVE (0x0f << CMD_SHIFT) | ||
36 | #define CMD_OPERATION (0x10 << CMD_SHIFT) | ||
37 | #define CMD_SIGNATURE (0x12 << CMD_SHIFT) | ||
38 | #define CMD_JUMP (0x14 << CMD_SHIFT) | ||
39 | #define CMD_MATH (0x15 << CMD_SHIFT) | ||
40 | #define CMD_DESC_HDR (0x16 << CMD_SHIFT) | ||
41 | #define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT) | ||
42 | #define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT) | ||
43 | #define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT) | ||
44 | |||
45 | /* General-purpose class selector for all commands */ | ||
46 | #define CLASS_SHIFT 25 | ||
47 | #define CLASS_MASK (0x03 << CLASS_SHIFT) | ||
48 | |||
49 | #define CLASS_NONE (0x00 << CLASS_SHIFT) | ||
50 | #define CLASS_1 (0x01 << CLASS_SHIFT) | ||
51 | #define CLASS_2 (0x02 << CLASS_SHIFT) | ||
52 | #define CLASS_BOTH (0x03 << CLASS_SHIFT) | ||
53 | |||
54 | /* | ||
55 | * Descriptor header command constructs | ||
56 | * Covers shared, job, and trusted descriptor headers | ||
57 | */ | ||
58 | |||
59 | /* | ||
60 | * Do Not Run - marks a descriptor inexecutable if there was | ||
61 | * a preceding error somewhere | ||
62 | */ | ||
63 | #define HDR_DNR 0x01000000 | ||
64 | |||
65 | /* | ||
66 | * ONE - should always be set. Combination of ONE (always | ||
67 | * set) and ZRO (always clear) forms an endianness sanity check | ||
68 | */ | ||
69 | #define HDR_ONE 0x00800000 | ||
70 | #define HDR_ZRO 0x00008000 | ||
71 | |||
72 | /* Start Index or SharedDesc Length */ | ||
73 | #define HDR_START_IDX_MASK 0x3f | ||
74 | #define HDR_START_IDX_SHIFT 16 | ||
75 | |||
76 | /* If shared descriptor header, 6-bit length */ | ||
77 | #define HDR_DESCLEN_SHR_MASK 0x3f | ||
78 | |||
79 | /* If non-shared header, 7-bit length */ | ||
80 | #define HDR_DESCLEN_MASK 0x7f | ||
81 | |||
82 | /* This is a TrustedDesc (if not SharedDesc) */ | ||
83 | #define HDR_TRUSTED 0x00004000 | ||
84 | |||
85 | /* Make into TrustedDesc (if not SharedDesc) */ | ||
86 | #define HDR_MAKE_TRUSTED 0x00002000 | ||
87 | |||
88 | /* Save context if self-shared (if SharedDesc) */ | ||
89 | #define HDR_SAVECTX 0x00001000 | ||
90 | |||
91 | /* Next item points to SharedDesc */ | ||
92 | #define HDR_SHARED 0x00001000 | ||
93 | |||
94 | /* | ||
95 | * Reverse Execution Order - execute JobDesc first, then | ||
96 | * execute SharedDesc (normally SharedDesc goes first). | ||
97 | */ | ||
98 | #define HDR_REVERSE 0x00000800 | ||
99 | |||
100 | /* Propogate DNR property to SharedDesc */ | ||
101 | #define HDR_PROP_DNR 0x00000800 | ||
102 | |||
103 | /* JobDesc/SharedDesc share property */ | ||
104 | #define HDR_SD_SHARE_MASK 0x03 | ||
105 | #define HDR_SD_SHARE_SHIFT 8 | ||
106 | #define HDR_JD_SHARE_MASK 0x07 | ||
107 | #define HDR_JD_SHARE_SHIFT 8 | ||
108 | |||
109 | #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT) | ||
110 | #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT) | ||
111 | #define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT) | ||
112 | #define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT) | ||
113 | #define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT) | ||
114 | |||
115 | /* JobDesc/SharedDesc descriptor length */ | ||
116 | #define HDR_JD_LENGTH_MASK 0x7f | ||
117 | #define HDR_SD_LENGTH_MASK 0x3f | ||
118 | |||
119 | /* | ||
120 | * KEY/SEQ_KEY Command Constructs | ||
121 | */ | ||
122 | |||
123 | /* Key Destination Class: 01 = Class 1, 02 - Class 2 */ | ||
124 | #define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */ | ||
125 | #define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT) | ||
126 | |||
127 | /* Scatter-Gather Table/Variable Length Field */ | ||
128 | #define KEY_SGF 0x01000000 | ||
129 | #define KEY_VLF 0x01000000 | ||
130 | |||
131 | /* Immediate - Key follows command in the descriptor */ | ||
132 | #define KEY_IMM 0x00800000 | ||
133 | |||
134 | /* | ||
135 | * Encrypted - Key is encrypted either with the KEK, or | ||
136 | * with the TDKEK if TK is set | ||
137 | */ | ||
138 | #define KEY_ENC 0x00400000 | ||
139 | |||
140 | /* | ||
141 | * No Write Back - Do not allow key to be FIFO STOREd | ||
142 | */ | ||
143 | #define KEY_NWB 0x00200000 | ||
144 | |||
145 | /* | ||
146 | * Enhanced Encryption of Key | ||
147 | */ | ||
148 | #define KEY_EKT 0x00100000 | ||
149 | |||
150 | /* | ||
151 | * Encrypted with Trusted Key | ||
152 | */ | ||
153 | #define KEY_TK 0x00008000 | ||
154 | |||
155 | /* | ||
156 | * KDEST - Key Destination: 0 - class key register, | ||
157 | * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key | ||
158 | */ | ||
159 | #define KEY_DEST_SHIFT 16 | ||
160 | #define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT) | ||
161 | |||
162 | #define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT) | ||
163 | #define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT) | ||
164 | #define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT) | ||
165 | #define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT) | ||
166 | |||
167 | /* Length in bytes */ | ||
168 | #define KEY_LENGTH_MASK 0x000003ff | ||
169 | |||
170 | /* | ||
171 | * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs | ||
172 | */ | ||
173 | |||
174 | /* | ||
175 | * Load/Store Destination: 0 = class independent CCB, | ||
176 | * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO | ||
177 | */ | ||
178 | #define LDST_CLASS_SHIFT 25 | ||
179 | #define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT) | ||
180 | #define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT) | ||
181 | #define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT) | ||
182 | #define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT) | ||
183 | #define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT) | ||
184 | |||
185 | /* Scatter-Gather Table/Variable Length Field */ | ||
186 | #define LDST_SGF 0x01000000 | ||
187 | #define LDST_VLF LDST_SGF | ||
188 | |||
189 | /* Immediate - Key follows this command in descriptor */ | ||
190 | #define LDST_IMM_MASK 1 | ||
191 | #define LDST_IMM_SHIFT 23 | ||
192 | #define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT) | ||
193 | |||
194 | /* SRC/DST - Destination for LOAD, Source for STORE */ | ||
195 | #define LDST_SRCDST_SHIFT 16 | ||
196 | #define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT) | ||
197 | |||
198 | #define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT) | ||
199 | #define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT) | ||
200 | #define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT) | ||
201 | #define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT) | ||
202 | |||
203 | #define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT) | ||
204 | #define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT) | ||
205 | #define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT) | ||
206 | #define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT) | ||
207 | #define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT) | ||
208 | #define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT) | ||
209 | #define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT) | ||
210 | #define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT) | ||
211 | #define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT) | ||
212 | #define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT) | ||
213 | #define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT) | ||
214 | #define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT) | ||
215 | #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT) | ||
216 | #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT) | ||
217 | #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT) | ||
218 | #define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT) | ||
219 | #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT) | ||
220 | #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT) | ||
221 | #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) | ||
222 | #define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT) | ||
223 | #define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT) | ||
224 | #define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT) | ||
225 | #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT) | ||
226 | |||
227 | /* Offset in source/destination */ | ||
228 | #define LDST_OFFSET_SHIFT 8 | ||
229 | #define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT) | ||
230 | |||
231 | /* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */ | ||
232 | /* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */ | ||
233 | #define LDOFF_CHG_SHARE_SHIFT 0 | ||
234 | #define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT) | ||
235 | #define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT) | ||
236 | #define LDOFF_CHG_SHARE_OK_NO_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT) | ||
237 | #define LDOFF_CHG_SHARE_OK_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT) | ||
238 | |||
239 | #define LDOFF_ENABLE_AUTO_NFIFO (1 << 2) | ||
240 | #define LDOFF_DISABLE_AUTO_NFIFO (1 << 3) | ||
241 | |||
242 | #define LDOFF_CHG_NONSEQLIODN_SHIFT 4 | ||
243 | #define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT) | ||
244 | #define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT) | ||
245 | #define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT) | ||
246 | #define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT) | ||
247 | |||
248 | #define LDOFF_CHG_SEQLIODN_SHIFT 6 | ||
249 | #define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT) | ||
250 | #define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT) | ||
251 | #define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT) | ||
252 | #define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT) | ||
253 | |||
254 | /* Data length in bytes */ | ||
255 | #define LDST_LEN_SHIFT 0 | ||
256 | #define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT) | ||
257 | |||
258 | /* Special Length definitions when dst=deco-ctrl */ | ||
259 | #define LDLEN_ENABLE_OSL_COUNT (1 << 7) | ||
260 | #define LDLEN_RST_CHA_OFIFO_PTR (1 << 6) | ||
261 | #define LDLEN_RST_OFIFO (1 << 5) | ||
262 | #define LDLEN_SET_OFIFO_OFF_VALID (1 << 4) | ||
263 | #define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3) | ||
264 | #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0 | ||
265 | #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT) | ||
266 | |||
267 | /* | ||
268 | * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE | ||
269 | * Command Constructs | ||
270 | */ | ||
271 | |||
272 | /* | ||
273 | * Load Destination: 0 = skip (SEQ_FIFO_LOAD only), | ||
274 | * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both | ||
275 | * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key | ||
276 | */ | ||
277 | #define FIFOLD_CLASS_SHIFT 25 | ||
278 | #define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT) | ||
279 | #define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT) | ||
280 | #define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT) | ||
281 | #define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT) | ||
282 | #define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT) | ||
283 | |||
284 | #define FIFOST_CLASS_SHIFT 25 | ||
285 | #define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT) | ||
286 | #define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT) | ||
287 | #define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT) | ||
288 | #define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT) | ||
289 | |||
290 | /* | ||
291 | * Scatter-Gather Table/Variable Length Field | ||
292 | * If set for FIFO_LOAD, refers to a SG table. Within | ||
293 | * SEQ_FIFO_LOAD, is variable input sequence | ||
294 | */ | ||
295 | #define FIFOLDST_SGF_SHIFT 24 | ||
296 | #define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT) | ||
297 | #define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT) | ||
298 | #define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT) | ||
299 | #define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT) | ||
300 | |||
301 | /* Immediate - Data follows command in descriptor */ | ||
302 | #define FIFOLD_IMM_SHIFT 23 | ||
303 | #define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT) | ||
304 | #define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT) | ||
305 | |||
306 | /* Continue - Not the last FIFO store to come */ | ||
307 | #define FIFOST_CONT_SHIFT 23 | ||
308 | #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) | ||
309 | #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) | ||
310 | |||
311 | /* | ||
312 | * Extended Length - use 32-bit extended length that | ||
313 | * follows the pointer field. Illegal with IMM set | ||
314 | */ | ||
315 | #define FIFOLDST_EXT_SHIFT 22 | ||
316 | #define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT) | ||
317 | #define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT) | ||
318 | |||
319 | /* Input data type.*/ | ||
320 | #define FIFOLD_TYPE_SHIFT 16 | ||
321 | #define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */ | ||
322 | #define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT) | ||
323 | |||
324 | /* PK types */ | ||
325 | #define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT) | ||
326 | #define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT) | ||
327 | #define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT) | ||
328 | #define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT) | ||
329 | #define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT) | ||
330 | #define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT) | ||
331 | #define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT) | ||
332 | #define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT) | ||
333 | #define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT) | ||
334 | #define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT) | ||
335 | #define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT) | ||
336 | #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT) | ||
337 | #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT) | ||
338 | #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT) | ||
339 | |||
340 | /* Other types. Need to OR in last/flush bits as desired */ | ||
341 | #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT) | ||
342 | #define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT) | ||
343 | #define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT) | ||
344 | #define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT) | ||
345 | #define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT) | ||
346 | #define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT) | ||
347 | #define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT) | ||
348 | |||
349 | /* Last/Flush bits for use with "other" types above */ | ||
350 | #define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT) | ||
351 | #define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT) | ||
352 | #define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT) | ||
353 | #define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT) | ||
354 | #define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT) | ||
355 | #define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT) | ||
356 | #define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT) | ||
357 | #define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT) | ||
358 | #define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT) | ||
359 | |||
360 | #define FIFOLDST_LEN_MASK 0xffff | ||
361 | #define FIFOLDST_EXT_LEN_MASK 0xffffffff | ||
362 | |||
363 | /* Output data types */ | ||
364 | #define FIFOST_TYPE_SHIFT 16 | ||
365 | #define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT) | ||
366 | |||
367 | #define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT) | ||
368 | #define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT) | ||
369 | #define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT) | ||
370 | #define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT) | ||
371 | #define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT) | ||
372 | #define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT) | ||
373 | #define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT) | ||
374 | #define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT) | ||
375 | #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT) | ||
376 | #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT) | ||
377 | #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT) | ||
378 | #define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT) | ||
379 | #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT) | ||
380 | #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT) | ||
381 | #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT) | ||
382 | #define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT) | ||
383 | #define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT) | ||
384 | #define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT) | ||
385 | #define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT) | ||
386 | #define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT) | ||
387 | #define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT) | ||
388 | #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT) | ||
389 | #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT) | ||
390 | #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT) | ||
391 | #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT) | ||
392 | |||
393 | /* | ||
394 | * OPERATION Command Constructs | ||
395 | */ | ||
396 | |||
397 | /* Operation type selectors - OP TYPE */ | ||
398 | #define OP_TYPE_SHIFT 24 | ||
399 | #define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT) | ||
400 | |||
401 | #define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT) | ||
402 | #define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT) | ||
403 | #define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT) | ||
404 | #define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT) | ||
405 | #define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT) | ||
406 | #define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT) | ||
407 | |||
408 | /* ProtocolID selectors - PROTID */ | ||
409 | #define OP_PCLID_SHIFT 16 | ||
410 | #define OP_PCLID_MASK (0xff << 16) | ||
411 | |||
412 | /* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */ | ||
413 | #define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT) | ||
414 | #define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT) | ||
415 | #define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT) | ||
416 | #define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT) | ||
417 | #define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT) | ||
418 | #define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT) | ||
419 | #define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT) | ||
420 | #define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT) | ||
421 | #define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT) | ||
422 | #define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT) | ||
423 | #define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT) | ||
424 | #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) | ||
425 | |||
426 | /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ | ||
427 | #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) | ||
428 | #define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT) | ||
429 | #define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT) | ||
430 | #define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT) | ||
431 | #define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT) | ||
432 | #define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT) | ||
433 | #define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT) | ||
434 | #define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT) | ||
435 | #define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT) | ||
436 | #define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT) | ||
437 | |||
438 | /* | ||
439 | * ProtocolInfo selectors | ||
440 | */ | ||
441 | #define OP_PCLINFO_MASK 0xffff | ||
442 | |||
443 | /* for OP_PCLID_IPSEC */ | ||
444 | #define OP_PCL_IPSEC_CIPHER_MASK 0xff00 | ||
445 | #define OP_PCL_IPSEC_AUTH_MASK 0x00ff | ||
446 | |||
447 | #define OP_PCL_IPSEC_DES_IV64 0x0100 | ||
448 | #define OP_PCL_IPSEC_DES 0x0200 | ||
449 | #define OP_PCL_IPSEC_3DES 0x0300 | ||
450 | #define OP_PCL_IPSEC_AES_CBC 0x0c00 | ||
451 | #define OP_PCL_IPSEC_AES_CTR 0x0d00 | ||
452 | #define OP_PCL_IPSEC_AES_XTS 0x1600 | ||
453 | #define OP_PCL_IPSEC_AES_CCM8 0x0e00 | ||
454 | #define OP_PCL_IPSEC_AES_CCM12 0x0f00 | ||
455 | #define OP_PCL_IPSEC_AES_CCM16 0x1000 | ||
456 | #define OP_PCL_IPSEC_AES_GCM8 0x1200 | ||
457 | #define OP_PCL_IPSEC_AES_GCM12 0x1300 | ||
458 | #define OP_PCL_IPSEC_AES_GCM16 0x1400 | ||
459 | |||
460 | #define OP_PCL_IPSEC_HMAC_NULL 0x0000 | ||
461 | #define OP_PCL_IPSEC_HMAC_MD5_96 0x0001 | ||
462 | #define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002 | ||
463 | #define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005 | ||
464 | #define OP_PCL_IPSEC_HMAC_MD5_128 0x0006 | ||
465 | #define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007 | ||
466 | #define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c | ||
467 | #define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d | ||
468 | #define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e | ||
469 | |||
470 | /* For SRTP - OP_PCLID_SRTP */ | ||
471 | #define OP_PCL_SRTP_CIPHER_MASK 0xff00 | ||
472 | #define OP_PCL_SRTP_AUTH_MASK 0x00ff | ||
473 | |||
474 | #define OP_PCL_SRTP_AES_CTR 0x0d00 | ||
475 | |||
476 | #define OP_PCL_SRTP_HMAC_SHA1_160 0x0007 | ||
477 | |||
478 | /* For SSL 3.0 - OP_PCLID_SSL30 */ | ||
479 | #define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f | ||
480 | #define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030 | ||
481 | #define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031 | ||
482 | #define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032 | ||
483 | #define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033 | ||
484 | #define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034 | ||
485 | #define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c | ||
486 | #define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090 | ||
487 | #define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094 | ||
488 | #define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004 | ||
489 | #define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009 | ||
490 | #define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e | ||
491 | #define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013 | ||
492 | #define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018 | ||
493 | #define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d | ||
494 | #define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e | ||
495 | #define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f | ||
496 | |||
497 | #define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035 | ||
498 | #define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036 | ||
499 | #define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037 | ||
500 | #define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038 | ||
501 | #define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039 | ||
502 | #define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a | ||
503 | #define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d | ||
504 | #define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091 | ||
505 | #define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095 | ||
506 | #define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005 | ||
507 | #define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a | ||
508 | #define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f | ||
509 | #define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014 | ||
510 | #define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019 | ||
511 | #define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020 | ||
512 | #define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021 | ||
513 | #define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022 | ||
514 | |||
515 | #define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023 | ||
516 | |||
517 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f | ||
518 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b | ||
519 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f | ||
520 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093 | ||
521 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a | ||
522 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d | ||
523 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010 | ||
524 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013 | ||
525 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016 | ||
526 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b | ||
527 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003 | ||
528 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008 | ||
529 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d | ||
530 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012 | ||
531 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017 | ||
532 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a | ||
533 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b | ||
534 | #define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c | ||
535 | |||
536 | #define OP_PCL_SSL30_DES40_CBC_MD5 0x0029 | ||
537 | |||
538 | #define OP_PCL_SSL30_DES_CBC_MD5 0x0022 | ||
539 | |||
540 | #define OP_PCL_SSL30_DES40_CBC_SHA 0x0008 | ||
541 | #define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b | ||
542 | #define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e | ||
543 | #define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011 | ||
544 | #define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014 | ||
545 | #define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019 | ||
546 | #define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026 | ||
547 | |||
548 | #define OP_PCL_SSL30_DES_CBC_SHA 0x001e | ||
549 | #define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009 | ||
550 | #define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c | ||
551 | #define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f | ||
552 | #define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012 | ||
553 | #define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015 | ||
554 | #define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a | ||
555 | |||
556 | #define OP_PCL_SSL30_RC4_128_MD5 0x0024 | ||
557 | #define OP_PCL_SSL30_RC4_128_MD5_2 0x0004 | ||
558 | #define OP_PCL_SSL30_RC4_128_MD5_3 0x0018 | ||
559 | |||
560 | #define OP_PCL_SSL30_RC4_40_MD5 0x002b | ||
561 | #define OP_PCL_SSL30_RC4_40_MD5_2 0x0003 | ||
562 | #define OP_PCL_SSL30_RC4_40_MD5_3 0x0017 | ||
563 | |||
564 | #define OP_PCL_SSL30_RC4_128_SHA 0x0020 | ||
565 | #define OP_PCL_SSL30_RC4_128_SHA_2 0x008a | ||
566 | #define OP_PCL_SSL30_RC4_128_SHA_3 0x008e | ||
567 | #define OP_PCL_SSL30_RC4_128_SHA_4 0x0092 | ||
568 | #define OP_PCL_SSL30_RC4_128_SHA_5 0x0005 | ||
569 | #define OP_PCL_SSL30_RC4_128_SHA_6 0xc002 | ||
570 | #define OP_PCL_SSL30_RC4_128_SHA_7 0xc007 | ||
571 | #define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c | ||
572 | #define OP_PCL_SSL30_RC4_128_SHA_9 0xc011 | ||
573 | #define OP_PCL_SSL30_RC4_128_SHA_10 0xc016 | ||
574 | |||
575 | #define OP_PCL_SSL30_RC4_40_SHA 0x0028 | ||
576 | |||
577 | |||
578 | /* For TLS 1.0 - OP_PCLID_TLS10 */ | ||
579 | #define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f | ||
580 | #define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030 | ||
581 | #define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031 | ||
582 | #define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032 | ||
583 | #define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033 | ||
584 | #define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034 | ||
585 | #define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c | ||
586 | #define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090 | ||
587 | #define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094 | ||
588 | #define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004 | ||
589 | #define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009 | ||
590 | #define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e | ||
591 | #define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013 | ||
592 | #define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018 | ||
593 | #define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d | ||
594 | #define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e | ||
595 | #define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f | ||
596 | |||
597 | #define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035 | ||
598 | #define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036 | ||
599 | #define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037 | ||
600 | #define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038 | ||
601 | #define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039 | ||
602 | #define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a | ||
603 | #define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d | ||
604 | #define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091 | ||
605 | #define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095 | ||
606 | #define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005 | ||
607 | #define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a | ||
608 | #define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f | ||
609 | #define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014 | ||
610 | #define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019 | ||
611 | #define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020 | ||
612 | #define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021 | ||
613 | #define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022 | ||
614 | |||
615 | /* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */ | ||
616 | |||
617 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f | ||
618 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b | ||
619 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f | ||
620 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093 | ||
621 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a | ||
622 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d | ||
623 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010 | ||
624 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013 | ||
625 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016 | ||
626 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b | ||
627 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003 | ||
628 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008 | ||
629 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d | ||
630 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012 | ||
631 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017 | ||
632 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a | ||
633 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b | ||
634 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c | ||
635 | |||
636 | #define OP_PCL_TLS10_DES40_CBC_MD5 0x0029 | ||
637 | |||
638 | #define OP_PCL_TLS10_DES_CBC_MD5 0x0022 | ||
639 | |||
640 | #define OP_PCL_TLS10_DES40_CBC_SHA 0x0008 | ||
641 | #define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b | ||
642 | #define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e | ||
643 | #define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011 | ||
644 | #define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014 | ||
645 | #define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019 | ||
646 | #define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026 | ||
647 | |||
648 | |||
649 | #define OP_PCL_TLS10_DES_CBC_SHA 0x001e | ||
650 | #define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009 | ||
651 | #define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c | ||
652 | #define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f | ||
653 | #define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012 | ||
654 | #define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015 | ||
655 | #define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a | ||
656 | |||
657 | #define OP_PCL_TLS10_RC4_128_MD5 0x0024 | ||
658 | #define OP_PCL_TLS10_RC4_128_MD5_2 0x0004 | ||
659 | #define OP_PCL_TLS10_RC4_128_MD5_3 0x0018 | ||
660 | |||
661 | #define OP_PCL_TLS10_RC4_40_MD5 0x002b | ||
662 | #define OP_PCL_TLS10_RC4_40_MD5_2 0x0003 | ||
663 | #define OP_PCL_TLS10_RC4_40_MD5_3 0x0017 | ||
664 | |||
665 | #define OP_PCL_TLS10_RC4_128_SHA 0x0020 | ||
666 | #define OP_PCL_TLS10_RC4_128_SHA_2 0x008a | ||
667 | #define OP_PCL_TLS10_RC4_128_SHA_3 0x008e | ||
668 | #define OP_PCL_TLS10_RC4_128_SHA_4 0x0092 | ||
669 | #define OP_PCL_TLS10_RC4_128_SHA_5 0x0005 | ||
670 | #define OP_PCL_TLS10_RC4_128_SHA_6 0xc002 | ||
671 | #define OP_PCL_TLS10_RC4_128_SHA_7 0xc007 | ||
672 | #define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c | ||
673 | #define OP_PCL_TLS10_RC4_128_SHA_9 0xc011 | ||
674 | #define OP_PCL_TLS10_RC4_128_SHA_10 0xc016 | ||
675 | |||
676 | #define OP_PCL_TLS10_RC4_40_SHA 0x0028 | ||
677 | |||
678 | #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23 | ||
679 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30 | ||
680 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34 | ||
681 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36 | ||
682 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33 | ||
683 | #define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35 | ||
684 | #define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80 | ||
685 | #define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84 | ||
686 | #define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86 | ||
687 | #define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83 | ||
688 | #define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85 | ||
689 | #define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20 | ||
690 | #define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24 | ||
691 | #define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26 | ||
692 | #define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23 | ||
693 | #define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25 | ||
694 | #define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60 | ||
695 | #define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64 | ||
696 | #define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66 | ||
697 | #define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63 | ||
698 | #define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65 | ||
699 | |||
700 | |||
701 | |||
702 | /* For TLS 1.1 - OP_PCLID_TLS11 */ | ||
703 | #define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f | ||
704 | #define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030 | ||
705 | #define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031 | ||
706 | #define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032 | ||
707 | #define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033 | ||
708 | #define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034 | ||
709 | #define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c | ||
710 | #define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090 | ||
711 | #define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094 | ||
712 | #define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004 | ||
713 | #define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009 | ||
714 | #define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e | ||
715 | #define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013 | ||
716 | #define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018 | ||
717 | #define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d | ||
718 | #define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e | ||
719 | #define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f | ||
720 | |||
721 | #define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035 | ||
722 | #define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036 | ||
723 | #define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037 | ||
724 | #define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038 | ||
725 | #define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039 | ||
726 | #define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a | ||
727 | #define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d | ||
728 | #define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091 | ||
729 | #define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095 | ||
730 | #define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005 | ||
731 | #define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a | ||
732 | #define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f | ||
733 | #define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014 | ||
734 | #define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019 | ||
735 | #define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020 | ||
736 | #define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021 | ||
737 | #define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022 | ||
738 | |||
739 | /* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */ | ||
740 | |||
741 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f | ||
742 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b | ||
743 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f | ||
744 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093 | ||
745 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a | ||
746 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d | ||
747 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010 | ||
748 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013 | ||
749 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016 | ||
750 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b | ||
751 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003 | ||
752 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008 | ||
753 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d | ||
754 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012 | ||
755 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017 | ||
756 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a | ||
757 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b | ||
758 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c | ||
759 | |||
760 | #define OP_PCL_TLS11_DES40_CBC_MD5 0x0029 | ||
761 | |||
762 | #define OP_PCL_TLS11_DES_CBC_MD5 0x0022 | ||
763 | |||
764 | #define OP_PCL_TLS11_DES40_CBC_SHA 0x0008 | ||
765 | #define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b | ||
766 | #define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e | ||
767 | #define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011 | ||
768 | #define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014 | ||
769 | #define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019 | ||
770 | #define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026 | ||
771 | |||
772 | #define OP_PCL_TLS11_DES_CBC_SHA 0x001e | ||
773 | #define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009 | ||
774 | #define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c | ||
775 | #define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f | ||
776 | #define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012 | ||
777 | #define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015 | ||
778 | #define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a | ||
779 | |||
780 | #define OP_PCL_TLS11_RC4_128_MD5 0x0024 | ||
781 | #define OP_PCL_TLS11_RC4_128_MD5_2 0x0004 | ||
782 | #define OP_PCL_TLS11_RC4_128_MD5_3 0x0018 | ||
783 | |||
784 | #define OP_PCL_TLS11_RC4_40_MD5 0x002b | ||
785 | #define OP_PCL_TLS11_RC4_40_MD5_2 0x0003 | ||
786 | #define OP_PCL_TLS11_RC4_40_MD5_3 0x0017 | ||
787 | |||
788 | #define OP_PCL_TLS11_RC4_128_SHA 0x0020 | ||
789 | #define OP_PCL_TLS11_RC4_128_SHA_2 0x008a | ||
790 | #define OP_PCL_TLS11_RC4_128_SHA_3 0x008e | ||
791 | #define OP_PCL_TLS11_RC4_128_SHA_4 0x0092 | ||
792 | #define OP_PCL_TLS11_RC4_128_SHA_5 0x0005 | ||
793 | #define OP_PCL_TLS11_RC4_128_SHA_6 0xc002 | ||
794 | #define OP_PCL_TLS11_RC4_128_SHA_7 0xc007 | ||
795 | #define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c | ||
796 | #define OP_PCL_TLS11_RC4_128_SHA_9 0xc011 | ||
797 | #define OP_PCL_TLS11_RC4_128_SHA_10 0xc016 | ||
798 | |||
799 | #define OP_PCL_TLS11_RC4_40_SHA 0x0028 | ||
800 | |||
801 | #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23 | ||
802 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30 | ||
803 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34 | ||
804 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36 | ||
805 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33 | ||
806 | #define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35 | ||
807 | #define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80 | ||
808 | #define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84 | ||
809 | #define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86 | ||
810 | #define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83 | ||
811 | #define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85 | ||
812 | #define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20 | ||
813 | #define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24 | ||
814 | #define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26 | ||
815 | #define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23 | ||
816 | #define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25 | ||
817 | #define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60 | ||
818 | #define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64 | ||
819 | #define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66 | ||
820 | #define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63 | ||
821 | #define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65 | ||
822 | |||
823 | |||
824 | /* For TLS 1.2 - OP_PCLID_TLS12 */ | ||
825 | #define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f | ||
826 | #define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030 | ||
827 | #define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031 | ||
828 | #define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032 | ||
829 | #define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033 | ||
830 | #define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034 | ||
831 | #define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c | ||
832 | #define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090 | ||
833 | #define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094 | ||
834 | #define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004 | ||
835 | #define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009 | ||
836 | #define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e | ||
837 | #define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013 | ||
838 | #define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018 | ||
839 | #define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d | ||
840 | #define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e | ||
841 | #define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f | ||
842 | |||
843 | #define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035 | ||
844 | #define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036 | ||
845 | #define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037 | ||
846 | #define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038 | ||
847 | #define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039 | ||
848 | #define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a | ||
849 | #define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d | ||
850 | #define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091 | ||
851 | #define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095 | ||
852 | #define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005 | ||
853 | #define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a | ||
854 | #define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f | ||
855 | #define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014 | ||
856 | #define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019 | ||
857 | #define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020 | ||
858 | #define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021 | ||
859 | #define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022 | ||
860 | |||
861 | /* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */ | ||
862 | |||
863 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f | ||
864 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b | ||
865 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f | ||
866 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093 | ||
867 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a | ||
868 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d | ||
869 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010 | ||
870 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013 | ||
871 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016 | ||
872 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b | ||
873 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003 | ||
874 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008 | ||
875 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d | ||
876 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012 | ||
877 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017 | ||
878 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a | ||
879 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b | ||
880 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c | ||
881 | |||
882 | #define OP_PCL_TLS12_DES40_CBC_MD5 0x0029 | ||
883 | |||
884 | #define OP_PCL_TLS12_DES_CBC_MD5 0x0022 | ||
885 | |||
886 | #define OP_PCL_TLS12_DES40_CBC_SHA 0x0008 | ||
887 | #define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b | ||
888 | #define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e | ||
889 | #define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011 | ||
890 | #define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014 | ||
891 | #define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019 | ||
892 | #define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026 | ||
893 | |||
894 | #define OP_PCL_TLS12_DES_CBC_SHA 0x001e | ||
895 | #define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009 | ||
896 | #define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c | ||
897 | #define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f | ||
898 | #define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012 | ||
899 | #define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015 | ||
900 | #define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a | ||
901 | |||
902 | #define OP_PCL_TLS12_RC4_128_MD5 0x0024 | ||
903 | #define OP_PCL_TLS12_RC4_128_MD5_2 0x0004 | ||
904 | #define OP_PCL_TLS12_RC4_128_MD5_3 0x0018 | ||
905 | |||
906 | #define OP_PCL_TLS12_RC4_40_MD5 0x002b | ||
907 | #define OP_PCL_TLS12_RC4_40_MD5_2 0x0003 | ||
908 | #define OP_PCL_TLS12_RC4_40_MD5_3 0x0017 | ||
909 | |||
910 | #define OP_PCL_TLS12_RC4_128_SHA 0x0020 | ||
911 | #define OP_PCL_TLS12_RC4_128_SHA_2 0x008a | ||
912 | #define OP_PCL_TLS12_RC4_128_SHA_3 0x008e | ||
913 | #define OP_PCL_TLS12_RC4_128_SHA_4 0x0092 | ||
914 | #define OP_PCL_TLS12_RC4_128_SHA_5 0x0005 | ||
915 | #define OP_PCL_TLS12_RC4_128_SHA_6 0xc002 | ||
916 | #define OP_PCL_TLS12_RC4_128_SHA_7 0xc007 | ||
917 | #define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c | ||
918 | #define OP_PCL_TLS12_RC4_128_SHA_9 0xc011 | ||
919 | #define OP_PCL_TLS12_RC4_128_SHA_10 0xc016 | ||
920 | |||
921 | #define OP_PCL_TLS12_RC4_40_SHA 0x0028 | ||
922 | |||
923 | /* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */ | ||
924 | #define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e | ||
925 | #define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f | ||
926 | #define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040 | ||
927 | #define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067 | ||
928 | #define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c | ||
929 | |||
930 | /* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */ | ||
931 | #define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068 | ||
932 | #define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069 | ||
933 | #define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a | ||
934 | #define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b | ||
935 | #define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d | ||
936 | |||
937 | /* AEAD_AES_xxx_CCM/GCM remain to be defined... */ | ||
938 | |||
939 | #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23 | ||
940 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30 | ||
941 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34 | ||
942 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36 | ||
943 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33 | ||
944 | #define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35 | ||
945 | #define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80 | ||
946 | #define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84 | ||
947 | #define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86 | ||
948 | #define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83 | ||
949 | #define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85 | ||
950 | #define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20 | ||
951 | #define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24 | ||
952 | #define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26 | ||
953 | #define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23 | ||
954 | #define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25 | ||
955 | #define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60 | ||
956 | #define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64 | ||
957 | #define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66 | ||
958 | #define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63 | ||
959 | #define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65 | ||
960 | |||
961 | /* For DTLS - OP_PCLID_DTLS */ | ||
962 | |||
963 | #define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f | ||
964 | #define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030 | ||
965 | #define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031 | ||
966 | #define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032 | ||
967 | #define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033 | ||
968 | #define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034 | ||
969 | #define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c | ||
970 | #define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090 | ||
971 | #define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094 | ||
972 | #define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004 | ||
973 | #define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009 | ||
974 | #define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e | ||
975 | #define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013 | ||
976 | #define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018 | ||
977 | #define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d | ||
978 | #define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e | ||
979 | #define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f | ||
980 | |||
981 | #define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035 | ||
982 | #define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036 | ||
983 | #define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037 | ||
984 | #define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038 | ||
985 | #define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039 | ||
986 | #define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a | ||
987 | #define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d | ||
988 | #define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091 | ||
989 | #define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095 | ||
990 | #define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005 | ||
991 | #define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a | ||
992 | #define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f | ||
993 | #define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014 | ||
994 | #define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019 | ||
995 | #define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020 | ||
996 | #define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021 | ||
997 | #define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022 | ||
998 | |||
999 | /* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */ | ||
1000 | |||
1001 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f | ||
1002 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b | ||
1003 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f | ||
1004 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093 | ||
1005 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a | ||
1006 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d | ||
1007 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010 | ||
1008 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013 | ||
1009 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016 | ||
1010 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b | ||
1011 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003 | ||
1012 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008 | ||
1013 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d | ||
1014 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012 | ||
1015 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017 | ||
1016 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a | ||
1017 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b | ||
1018 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c | ||
1019 | |||
1020 | #define OP_PCL_DTLS_DES40_CBC_MD5 0x0029 | ||
1021 | |||
1022 | #define OP_PCL_DTLS_DES_CBC_MD5 0x0022 | ||
1023 | |||
1024 | #define OP_PCL_DTLS_DES40_CBC_SHA 0x0008 | ||
1025 | #define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b | ||
1026 | #define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e | ||
1027 | #define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011 | ||
1028 | #define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014 | ||
1029 | #define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019 | ||
1030 | #define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026 | ||
1031 | |||
1032 | |||
1033 | #define OP_PCL_DTLS_DES_CBC_SHA 0x001e | ||
1034 | #define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009 | ||
1035 | #define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c | ||
1036 | #define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f | ||
1037 | #define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012 | ||
1038 | #define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015 | ||
1039 | #define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a | ||
1040 | |||
1041 | |||
1042 | #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23 | ||
1043 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30 | ||
1044 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34 | ||
1045 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36 | ||
1046 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33 | ||
1047 | #define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35 | ||
1048 | #define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80 | ||
1049 | #define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84 | ||
1050 | #define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86 | ||
1051 | #define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83 | ||
1052 | #define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85 | ||
1053 | #define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20 | ||
1054 | #define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24 | ||
1055 | #define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26 | ||
1056 | #define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23 | ||
1057 | #define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25 | ||
1058 | #define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60 | ||
1059 | #define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64 | ||
1060 | #define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66 | ||
1061 | #define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63 | ||
1062 | #define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65 | ||
1063 | |||
1064 | /* 802.16 WiMAX protinfos */ | ||
1065 | #define OP_PCL_WIMAX_OFDM 0x0201 | ||
1066 | #define OP_PCL_WIMAX_OFDMA 0x0231 | ||
1067 | |||
1068 | /* 802.11 WiFi protinfos */ | ||
1069 | #define OP_PCL_WIFI 0xac04 | ||
1070 | |||
1071 | /* MacSec protinfos */ | ||
1072 | #define OP_PCL_MACSEC 0x0001 | ||
1073 | |||
1074 | /* PKI unidirectional protocol protinfo bits */ | ||
1075 | #define OP_PCL_PKPROT_TEST 0x0008 | ||
1076 | #define OP_PCL_PKPROT_DECRYPT 0x0004 | ||
1077 | #define OP_PCL_PKPROT_ECC 0x0002 | ||
1078 | #define OP_PCL_PKPROT_F2M 0x0001 | ||
1079 | |||
1080 | /* For non-protocol/alg-only op commands */ | ||
1081 | #define OP_ALG_TYPE_SHIFT 24 | ||
1082 | #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT) | ||
1083 | #define OP_ALG_TYPE_CLASS1 2 | ||
1084 | #define OP_ALG_TYPE_CLASS2 4 | ||
1085 | |||
1086 | #define OP_ALG_ALGSEL_SHIFT 16 | ||
1087 | #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) | ||
1088 | #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT) | ||
1089 | #define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT) | ||
1090 | #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) | ||
1091 | #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) | ||
1092 | #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) | ||
1093 | #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) | ||
1094 | #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) | ||
1095 | #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) | ||
1096 | #define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT) | ||
1097 | #define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT) | ||
1098 | #define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT) | ||
1099 | #define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT) | ||
1100 | #define OP_ALG_ALGSEL_SNOW (0x60 << OP_ALG_ALGSEL_SHIFT) | ||
1101 | #define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT) | ||
1102 | #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT) | ||
1103 | #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT) | ||
1104 | #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT) | ||
1105 | |||
1106 | #define OP_ALG_AAI_SHIFT 4 | ||
1107 | #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT) | ||
1108 | |||
1109 | /* blockcipher AAI set */ | ||
1110 | #define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT) | ||
1111 | #define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT) | ||
1112 | #define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT) | ||
1113 | #define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT) | ||
1114 | #define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT) | ||
1115 | #define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT) | ||
1116 | #define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT) | ||
1117 | #define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT) | ||
1118 | #define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT) | ||
1119 | #define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT) | ||
1120 | #define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT) | ||
1121 | #define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT) | ||
1122 | #define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT) | ||
1123 | #define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT) | ||
1124 | #define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT) | ||
1125 | #define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT) | ||
1126 | #define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT) | ||
1127 | #define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT) | ||
1128 | #define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT) | ||
1129 | #define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT) | ||
1130 | #define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT) | ||
1131 | #define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT) | ||
1132 | #define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT) | ||
1133 | #define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT) | ||
1134 | #define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT) | ||
1135 | #define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT) | ||
1136 | #define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT) | ||
1137 | #define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT) | ||
1138 | #define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT) | ||
1139 | |||
1140 | /* randomizer AAI set */ | ||
1141 | #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) | ||
1142 | #define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT) | ||
1143 | #define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT) | ||
1144 | |||
1145 | /* hmac/smac AAI set */ | ||
1146 | #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) | ||
1147 | #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT) | ||
1148 | #define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT) | ||
1149 | #define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT) | ||
1150 | |||
1151 | /* CRC AAI set*/ | ||
1152 | #define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT) | ||
1153 | #define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT) | ||
1154 | #define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT) | ||
1155 | #define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT) | ||
1156 | #define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT) | ||
1157 | #define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT) | ||
1158 | |||
1159 | /* Kasumi/SNOW AAI set */ | ||
1160 | #define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT) | ||
1161 | #define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT) | ||
1162 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) | ||
1163 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) | ||
1164 | |||
1165 | |||
1166 | #define OP_ALG_AS_SHIFT 2 | ||
1167 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) | ||
1168 | #define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) | ||
1169 | #define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT) | ||
1170 | #define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT) | ||
1171 | #define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT) | ||
1172 | |||
1173 | #define OP_ALG_ICV_SHIFT 1 | ||
1174 | #define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT) | ||
1175 | #define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT) | ||
1176 | #define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT) | ||
1177 | |||
1178 | #define OP_ALG_DIR_SHIFT 0 | ||
1179 | #define OP_ALG_DIR_MASK 1 | ||
1180 | #define OP_ALG_DECRYPT 0 | ||
1181 | #define OP_ALG_ENCRYPT 1 | ||
1182 | |||
1183 | /* PKHA algorithm type set */ | ||
1184 | #define OP_ALG_PK 0x00800000 | ||
1185 | #define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */ | ||
1186 | |||
1187 | /* PKHA mode clear memory functions */ | ||
1188 | #define OP_ALG_PKMODE_A_RAM 0x80000 | ||
1189 | #define OP_ALG_PKMODE_B_RAM 0x40000 | ||
1190 | #define OP_ALG_PKMODE_E_RAM 0x20000 | ||
1191 | #define OP_ALG_PKMODE_N_RAM 0x10000 | ||
1192 | #define OP_ALG_PKMODE_CLEARMEM 0x00001 | ||
1193 | |||
1194 | /* PKHA mode modular-arithmetic functions */ | ||
1195 | #define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000 | ||
1196 | #define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000 | ||
1197 | #define OP_ALG_PKMODE_MOD_F2M 0x20000 | ||
1198 | #define OP_ALG_PKMODE_MOD_R2_IN 0x10000 | ||
1199 | #define OP_ALG_PKMODE_PRJECTV 0x00800 | ||
1200 | #define OP_ALG_PKMODE_TIME_EQ 0x400 | ||
1201 | #define OP_ALG_PKMODE_OUT_B 0x000 | ||
1202 | #define OP_ALG_PKMODE_OUT_A 0x100 | ||
1203 | #define OP_ALG_PKMODE_MOD_ADD 0x002 | ||
1204 | #define OP_ALG_PKMODE_MOD_SUB_AB 0x003 | ||
1205 | #define OP_ALG_PKMODE_MOD_SUB_BA 0x004 | ||
1206 | #define OP_ALG_PKMODE_MOD_MULT 0x005 | ||
1207 | #define OP_ALG_PKMODE_MOD_EXPO 0x006 | ||
1208 | #define OP_ALG_PKMODE_MOD_REDUCT 0x007 | ||
1209 | #define OP_ALG_PKMODE_MOD_INV 0x008 | ||
1210 | #define OP_ALG_PKMODE_MOD_ECC_ADD 0x009 | ||
1211 | #define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a | ||
1212 | #define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b | ||
1213 | #define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c | ||
1214 | #define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d | ||
1215 | #define OP_ALG_PKMODE_MOD_GCD 0x00e | ||
1216 | #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f | ||
1217 | |||
1218 | /* PKHA mode copy-memory functions */ | ||
1219 | #define OP_ALG_PKMODE_SRC_REG_SHIFT 13 | ||
1220 | #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT) | ||
1221 | #define OP_ALG_PKMODE_DST_REG_SHIFT 10 | ||
1222 | #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT) | ||
1223 | #define OP_ALG_PKMODE_SRC_SEG_SHIFT 8 | ||
1224 | #define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT) | ||
1225 | #define OP_ALG_PKMODE_DST_SEG_SHIFT 6 | ||
1226 | #define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT) | ||
1227 | |||
1228 | #define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT) | ||
1229 | #define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT) | ||
1230 | #define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT) | ||
1231 | #define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT) | ||
1232 | #define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT) | ||
1233 | #define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT) | ||
1234 | #define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT) | ||
1235 | #define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT) | ||
1236 | #define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT) | ||
1237 | #define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT) | ||
1238 | #define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT) | ||
1239 | #define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT) | ||
1240 | #define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT) | ||
1241 | #define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT) | ||
1242 | #define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT) | ||
1243 | #define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80 | ||
1244 | #define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81 | ||
1245 | |||
1246 | /* | ||
1247 | * SEQ_IN_PTR Command Constructs | ||
1248 | */ | ||
1249 | |||
1250 | /* Release Buffers */ | ||
1251 | #define SQIN_RBS 0x04000000 | ||
1252 | |||
1253 | /* Sequence pointer is really a descriptor */ | ||
1254 | #define SQIN_INL 0x02000000 | ||
1255 | |||
1256 | /* Sequence pointer is a scatter-gather table */ | ||
1257 | #define SQIN_SGF 0x01000000 | ||
1258 | |||
1259 | /* Appends to a previous pointer */ | ||
1260 | #define SQIN_PRE 0x00800000 | ||
1261 | |||
1262 | /* Use extended length following pointer */ | ||
1263 | #define SQIN_EXT 0x00400000 | ||
1264 | |||
1265 | /* Restore sequence with pointer/length */ | ||
1266 | #define SQIN_RTO 0x00200000 | ||
1267 | |||
1268 | /* Replace job descriptor */ | ||
1269 | #define SQIN_RJD 0x00100000 | ||
1270 | |||
1271 | #define SQIN_LEN_SHIFT 0 | ||
1272 | #define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT) | ||
1273 | |||
1274 | /* | ||
1275 | * SEQ_OUT_PTR Command Constructs | ||
1276 | */ | ||
1277 | |||
1278 | /* Sequence pointer is a scatter-gather table */ | ||
1279 | #define SQOUT_SGF 0x01000000 | ||
1280 | |||
1281 | /* Appends to a previous pointer */ | ||
1282 | #define SQOUT_PRE 0x00800000 | ||
1283 | |||
1284 | /* Restore sequence with pointer/length */ | ||
1285 | #define SQOUT_RTO 0x00200000 | ||
1286 | |||
1287 | /* Use extended length following pointer */ | ||
1288 | #define SQOUT_EXT 0x00400000 | ||
1289 | |||
1290 | #define SQOUT_LEN_SHIFT 0 | ||
1291 | #define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT) | ||
1292 | |||
1293 | |||
1294 | /* | ||
1295 | * SIGNATURE Command Constructs | ||
1296 | */ | ||
1297 | |||
1298 | /* TYPE field is all that's relevant */ | ||
1299 | #define SIGN_TYPE_SHIFT 16 | ||
1300 | #define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT) | ||
1301 | |||
1302 | #define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT) | ||
1303 | #define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT) | ||
1304 | #define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT) | ||
1305 | #define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT) | ||
1306 | #define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT) | ||
1307 | #define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT) | ||
1308 | |||
1309 | /* | ||
1310 | * MOVE Command Constructs | ||
1311 | */ | ||
1312 | |||
1313 | #define MOVE_AUX_SHIFT 25 | ||
1314 | #define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT) | ||
1315 | #define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT) | ||
1316 | #define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT) | ||
1317 | |||
1318 | #define MOVE_WAITCOMP_SHIFT 24 | ||
1319 | #define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT) | ||
1320 | #define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT) | ||
1321 | |||
1322 | #define MOVE_SRC_SHIFT 20 | ||
1323 | #define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT) | ||
1324 | #define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT) | ||
1325 | #define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT) | ||
1326 | #define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT) | ||
1327 | #define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT) | ||
1328 | #define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT) | ||
1329 | #define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT) | ||
1330 | #define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT) | ||
1331 | #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT) | ||
1332 | #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT) | ||
1333 | #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT) | ||
1334 | |||
1335 | #define MOVE_DEST_SHIFT 16 | ||
1336 | #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT) | ||
1337 | #define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT) | ||
1338 | #define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT) | ||
1339 | #define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT) | ||
1340 | #define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT) | ||
1341 | #define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT) | ||
1342 | #define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT) | ||
1343 | #define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT) | ||
1344 | #define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT) | ||
1345 | #define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT) | ||
1346 | #define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT) | ||
1347 | #define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT) | ||
1348 | #define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT) | ||
1349 | #define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT) | ||
1350 | |||
1351 | #define MOVE_OFFSET_SHIFT 8 | ||
1352 | #define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT) | ||
1353 | |||
1354 | #define MOVE_LEN_SHIFT 0 | ||
1355 | #define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT) | ||
1356 | |||
1357 | #define MOVELEN_MRSEL_SHIFT 0 | ||
1358 | #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT) | ||
1359 | |||
1360 | /* | ||
1361 | * MATH Command Constructs | ||
1362 | */ | ||
1363 | |||
1364 | #define MATH_IFB_SHIFT 26 | ||
1365 | #define MATH_IFB_MASK (1 << MATH_IFB_SHIFT) | ||
1366 | #define MATH_IFB (1 << MATH_IFB_SHIFT) | ||
1367 | |||
1368 | #define MATH_NFU_SHIFT 25 | ||
1369 | #define MATH_NFU_MASK (1 << MATH_NFU_SHIFT) | ||
1370 | #define MATH_NFU (1 << MATH_NFU_SHIFT) | ||
1371 | |||
1372 | #define MATH_STL_SHIFT 24 | ||
1373 | #define MATH_STL_MASK (1 << MATH_STL_SHIFT) | ||
1374 | #define MATH_STL (1 << MATH_STL_SHIFT) | ||
1375 | |||
1376 | /* Function selectors */ | ||
1377 | #define MATH_FUN_SHIFT 20 | ||
1378 | #define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT) | ||
1379 | #define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT) | ||
1380 | #define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT) | ||
1381 | #define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT) | ||
1382 | #define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT) | ||
1383 | #define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT) | ||
1384 | #define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT) | ||
1385 | #define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT) | ||
1386 | #define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT) | ||
1387 | #define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT) | ||
1388 | #define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT) | ||
1389 | #define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT) | ||
1390 | |||
1391 | /* Source 0 selectors */ | ||
1392 | #define MATH_SRC0_SHIFT 16 | ||
1393 | #define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT) | ||
1394 | #define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT) | ||
1395 | #define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT) | ||
1396 | #define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT) | ||
1397 | #define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT) | ||
1398 | #define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT) | ||
1399 | #define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT) | ||
1400 | #define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT) | ||
1401 | #define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT) | ||
1402 | #define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT) | ||
1403 | #define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT) | ||
1404 | |||
1405 | /* Source 1 selectors */ | ||
1406 | #define MATH_SRC1_SHIFT 12 | ||
1407 | #define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT) | ||
1408 | #define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT) | ||
1409 | #define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT) | ||
1410 | #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) | ||
1411 | #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) | ||
1412 | #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) | ||
1413 | #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) | ||
1414 | #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) | ||
1415 | #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) | ||
1416 | |||
1417 | /* Destination selectors */ | ||
1418 | #define MATH_DEST_SHIFT 8 | ||
1419 | #define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT) | ||
1420 | #define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT) | ||
1421 | #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) | ||
1422 | #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) | ||
1423 | #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) | ||
1424 | #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) | ||
1425 | #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) | ||
1426 | #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) | ||
1427 | #define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT) | ||
1428 | #define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT) | ||
1429 | |||
1430 | /* Length selectors */ | ||
1431 | #define MATH_LEN_SHIFT 0 | ||
1432 | #define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT) | ||
1433 | #define MATH_LEN_1BYTE 0x01 | ||
1434 | #define MATH_LEN_2BYTE 0x02 | ||
1435 | #define MATH_LEN_4BYTE 0x04 | ||
1436 | #define MATH_LEN_8BYTE 0x08 | ||
1437 | |||
1438 | /* | ||
1439 | * JUMP Command Constructs | ||
1440 | */ | ||
1441 | |||
1442 | #define JUMP_CLASS_SHIFT 25 | ||
1443 | #define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT) | ||
1444 | #define JUMP_CLASS_NONE 0 | ||
1445 | #define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT) | ||
1446 | #define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT) | ||
1447 | #define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT) | ||
1448 | |||
1449 | #define JUMP_JSL_SHIFT 24 | ||
1450 | #define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT) | ||
1451 | #define JUMP_JSL (1 << JUMP_JSL_SHIFT) | ||
1452 | |||
1453 | #define JUMP_TYPE_SHIFT 22 | ||
1454 | #define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT) | ||
1455 | #define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT) | ||
1456 | #define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT) | ||
1457 | #define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT) | ||
1458 | #define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT) | ||
1459 | |||
1460 | #define JUMP_TEST_SHIFT 16 | ||
1461 | #define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT) | ||
1462 | #define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT) | ||
1463 | #define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT) | ||
1464 | #define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT) | ||
1465 | #define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT) | ||
1466 | |||
1467 | /* Condition codes. JSL bit is factored in */ | ||
1468 | #define JUMP_COND_SHIFT 8 | ||
1469 | #define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT) | ||
1470 | #define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT) | ||
1471 | #define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT) | ||
1472 | #define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT) | ||
1473 | #define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT) | ||
1474 | #define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT) | ||
1475 | #define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT) | ||
1476 | #define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT) | ||
1477 | |||
1478 | #define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL) | ||
1479 | #define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL) | ||
1480 | #define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL) | ||
1481 | #define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL) | ||
1482 | #define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL) | ||
1483 | #define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL) | ||
1484 | #define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL) | ||
1485 | #define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL) | ||
1486 | |||
1487 | #define JUMP_OFFSET_SHIFT 0 | ||
1488 | #define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT) | ||
1489 | |||
1490 | /* | ||
1491 | * NFIFO ENTRY | ||
1492 | * Data Constructs | ||
1493 | * | ||
1494 | */ | ||
1495 | #define NFIFOENTRY_DEST_SHIFT 30 | ||
1496 | #define NFIFOENTRY_DEST_MASK (3 << NFIFOENTRY_DEST_SHIFT) | ||
1497 | #define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT) | ||
1498 | #define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT) | ||
1499 | #define NFIFOENTRY_DEST_CLASS2 (2 << NFIFOENTRY_DEST_SHIFT) | ||
1500 | #define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT) | ||
1501 | |||
1502 | #define NFIFOENTRY_LC2_SHIFT 29 | ||
1503 | #define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT) | ||
1504 | #define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT) | ||
1505 | |||
1506 | #define NFIFOENTRY_LC1_SHIFT 28 | ||
1507 | #define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT) | ||
1508 | #define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT) | ||
1509 | |||
1510 | #define NFIFOENTRY_FC2_SHIFT 27 | ||
1511 | #define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT) | ||
1512 | #define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT) | ||
1513 | |||
1514 | #define NFIFOENTRY_FC1_SHIFT 26 | ||
1515 | #define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT) | ||
1516 | #define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT) | ||
1517 | |||
1518 | #define NFIFOENTRY_STYPE_SHIFT 24 | ||
1519 | #define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT) | ||
1520 | #define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT) | ||
1521 | #define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT) | ||
1522 | #define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT) | ||
1523 | #define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT) | ||
1524 | |||
1525 | #define NFIFOENTRY_DTYPE_SHIFT 20 | ||
1526 | #define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT) | ||
1527 | |||
1528 | #define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT) | ||
1529 | #define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT) | ||
1530 | #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT) | ||
1531 | #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT) | ||
1532 | #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT) | ||
1533 | #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT) | ||
1534 | #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT) | ||
1535 | |||
1536 | #define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT) | ||
1537 | #define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT) | ||
1538 | #define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT) | ||
1539 | #define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT) | ||
1540 | #define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT) | ||
1541 | #define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT) | ||
1542 | #define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT) | ||
1543 | #define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT) | ||
1544 | #define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT) | ||
1545 | #define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT) | ||
1546 | #define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT) | ||
1547 | #define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT) | ||
1548 | |||
1549 | |||
1550 | #define NFIFOENTRY_BND_SHIFT 19 | ||
1551 | #define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT) | ||
1552 | #define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT) | ||
1553 | |||
1554 | #define NFIFOENTRY_PTYPE_SHIFT 16 | ||
1555 | #define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT) | ||
1556 | |||
1557 | #define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT) | ||
1558 | #define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT) | ||
1559 | #define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT) | ||
1560 | #define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT) | ||
1561 | #define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT) | ||
1562 | #define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT) | ||
1563 | #define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT) | ||
1564 | #define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT) | ||
1565 | |||
1566 | #define NFIFOENTRY_OC_SHIFT 15 | ||
1567 | #define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT) | ||
1568 | #define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT) | ||
1569 | |||
1570 | #define NFIFOENTRY_AST_SHIFT 14 | ||
1571 | #define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT) | ||
1572 | #define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT) | ||
1573 | |||
1574 | #define NFIFOENTRY_BM_SHIFT 11 | ||
1575 | #define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT) | ||
1576 | #define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT) | ||
1577 | |||
1578 | #define NFIFOENTRY_PS_SHIFT 10 | ||
1579 | #define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT) | ||
1580 | #define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT) | ||
1581 | |||
1582 | |||
1583 | #define NFIFOENTRY_DLEN_SHIFT 0 | ||
1584 | #define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT) | ||
1585 | |||
1586 | #define NFIFOENTRY_PLEN_SHIFT 0 | ||
1587 | #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) | ||
1588 | |||
1589 | /* | ||
1590 | * PDB internal definitions | ||
1591 | */ | ||
1592 | |||
1593 | /* IPSec ESP CBC Encap/Decap Options */ | ||
1594 | #define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */ | ||
1595 | #define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */ | ||
1596 | #define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */ | ||
1597 | #define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */ | ||
1598 | #define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */ | ||
1599 | #define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */ | ||
1600 | #define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ | ||
1601 | #define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */ | ||
1602 | #define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */ | ||
1603 | #define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */ | ||
1604 | |||
1605 | #endif /* DESC_H */ | ||
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h new file mode 100644 index 000000000000..46915800c26f --- /dev/null +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * caam descriptor construction helper functions | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | */ | ||
6 | |||
7 | #include "desc.h" | ||
8 | |||
9 | #define IMMEDIATE (1 << 23) | ||
10 | #define CAAM_CMD_SZ sizeof(u32) | ||
11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) | ||
12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64) | ||
13 | |||
14 | #ifdef DEBUG | ||
15 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ | ||
16 | &__func__[sizeof("append")]); } while (0) | ||
17 | #else | ||
18 | #define PRINT_POS | ||
19 | #endif | ||
20 | |||
21 | #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ | ||
22 | LDST_SRCDST_WORD_DECOCTRL | \ | ||
23 | (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) | ||
24 | #define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ | ||
25 | LDST_SRCDST_WORD_DECOCTRL | \ | ||
26 | (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) | ||
27 | |||
28 | static inline int desc_len(u32 *desc) | ||
29 | { | ||
30 | return *desc & HDR_DESCLEN_MASK; | ||
31 | } | ||
32 | |||
33 | static inline int desc_bytes(void *desc) | ||
34 | { | ||
35 | return desc_len(desc) * CAAM_CMD_SZ; | ||
36 | } | ||
37 | |||
38 | static inline u32 *desc_end(u32 *desc) | ||
39 | { | ||
40 | return desc + desc_len(desc); | ||
41 | } | ||
42 | |||
43 | static inline void *sh_desc_pdb(u32 *desc) | ||
44 | { | ||
45 | return desc + 1; | ||
46 | } | ||
47 | |||
48 | static inline void init_desc(u32 *desc, u32 options) | ||
49 | { | ||
50 | *desc = options | HDR_ONE | 1; | ||
51 | } | ||
52 | |||
53 | static inline void init_sh_desc(u32 *desc, u32 options) | ||
54 | { | ||
55 | PRINT_POS; | ||
56 | init_desc(desc, CMD_SHARED_DESC_HDR | options); | ||
57 | } | ||
58 | |||
59 | static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) | ||
60 | { | ||
61 | u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1; | ||
62 | |||
63 | init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) | | ||
64 | options); | ||
65 | } | ||
66 | |||
67 | static inline void init_job_desc(u32 *desc, u32 options) | ||
68 | { | ||
69 | init_desc(desc, CMD_DESC_HDR | options); | ||
70 | } | ||
71 | |||
72 | static inline void append_ptr(u32 *desc, dma_addr_t ptr) | ||
73 | { | ||
74 | dma_addr_t *offset = (dma_addr_t *)desc_end(desc); | ||
75 | |||
76 | *offset = ptr; | ||
77 | |||
78 | (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ; | ||
79 | } | ||
80 | |||
81 | static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, | ||
82 | u32 options) | ||
83 | { | ||
84 | PRINT_POS; | ||
85 | init_job_desc(desc, HDR_SHARED | options | | ||
86 | (len << HDR_START_IDX_SHIFT)); | ||
87 | append_ptr(desc, ptr); | ||
88 | } | ||
89 | |||
90 | static inline void append_data(u32 *desc, void *data, int len) | ||
91 | { | ||
92 | u32 *offset = desc_end(desc); | ||
93 | |||
94 | if (len) /* avoid sparse warning: memcpy with byte count of 0 */ | ||
95 | memcpy(offset, data, len); | ||
96 | |||
97 | (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; | ||
98 | } | ||
99 | |||
100 | static inline void append_cmd(u32 *desc, u32 command) | ||
101 | { | ||
102 | u32 *cmd = desc_end(desc); | ||
103 | |||
104 | *cmd = command; | ||
105 | |||
106 | (*desc)++; | ||
107 | } | ||
108 | |||
109 | static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, | ||
110 | u32 command) | ||
111 | { | ||
112 | append_cmd(desc, command | len); | ||
113 | append_ptr(desc, ptr); | ||
114 | } | ||
115 | |||
116 | static inline void append_cmd_data(u32 *desc, void *data, int len, | ||
117 | u32 command) | ||
118 | { | ||
119 | append_cmd(desc, command | IMMEDIATE | len); | ||
120 | append_data(desc, data, len); | ||
121 | } | ||
122 | |||
123 | static inline u32 *append_jump(u32 *desc, u32 options) | ||
124 | { | ||
125 | u32 *cmd = desc_end(desc); | ||
126 | |||
127 | PRINT_POS; | ||
128 | append_cmd(desc, CMD_JUMP | options); | ||
129 | |||
130 | return cmd; | ||
131 | } | ||
132 | |||
133 | static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) | ||
134 | { | ||
135 | *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc)); | ||
136 | } | ||
137 | |||
138 | #define APPEND_CMD(cmd, op) \ | ||
139 | static inline void append_##cmd(u32 *desc, u32 options) \ | ||
140 | { \ | ||
141 | PRINT_POS; \ | ||
142 | append_cmd(desc, CMD_##op | options); \ | ||
143 | } | ||
144 | APPEND_CMD(operation, OPERATION) | ||
145 | APPEND_CMD(move, MOVE) | ||
146 | |||
147 | #define APPEND_CMD_LEN(cmd, op) \ | ||
148 | static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \ | ||
149 | { \ | ||
150 | PRINT_POS; \ | ||
151 | append_cmd(desc, CMD_##op | len | options); \ | ||
152 | } | ||
153 | APPEND_CMD_LEN(seq_store, SEQ_STORE) | ||
154 | APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD) | ||
155 | APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE) | ||
156 | |||
157 | #define APPEND_CMD_PTR(cmd, op) \ | ||
158 | static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \ | ||
159 | u32 options) \ | ||
160 | { \ | ||
161 | PRINT_POS; \ | ||
162 | append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ | ||
163 | } | ||
164 | APPEND_CMD_PTR(key, KEY) | ||
165 | APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR) | ||
166 | APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR) | ||
167 | APPEND_CMD_PTR(load, LOAD) | ||
168 | APPEND_CMD_PTR(store, STORE) | ||
169 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) | ||
170 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) | ||
171 | |||
172 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ | ||
173 | static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | ||
174 | unsigned int len, u32 options) \ | ||
175 | { \ | ||
176 | PRINT_POS; \ | ||
177 | append_cmd_data(desc, data, len, CMD_##op | options); \ | ||
178 | } | ||
179 | APPEND_CMD_PTR_TO_IMM(load, LOAD); | ||
180 | APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); | ||
181 | |||
182 | /* | ||
183 | * 2nd variant for commands whose specified immediate length differs | ||
184 | * from length of immediate data provided, e.g., split keys | ||
185 | */ | ||
186 | #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ | ||
187 | static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | ||
188 | unsigned int data_len, \ | ||
189 | unsigned int len, u32 options) \ | ||
190 | { \ | ||
191 | PRINT_POS; \ | ||
192 | append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \ | ||
193 | append_data(desc, data, data_len); \ | ||
194 | } | ||
195 | APPEND_CMD_PTR_TO_IMM2(key, KEY); | ||
196 | |||
197 | #define APPEND_CMD_RAW_IMM(cmd, op, type) \ | ||
198 | static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ | ||
199 | u32 options) \ | ||
200 | { \ | ||
201 | PRINT_POS; \ | ||
202 | append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \ | ||
203 | append_cmd(desc, immediate); \ | ||
204 | } | ||
205 | APPEND_CMD_RAW_IMM(load, LOAD, u32); | ||
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c new file mode 100644 index 000000000000..7e2d54bffad6 --- /dev/null +++ b/drivers/crypto/caam/error.c | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | * CAAM Error Reporting | ||
3 | * | ||
4 | * Copyright 2009-2011 Freescale Semiconductor, Inc. | ||
5 | */ | ||
6 | |||
7 | #include "compat.h" | ||
8 | #include "regs.h" | ||
9 | #include "intern.h" | ||
10 | #include "desc.h" | ||
11 | #include "jr.h" | ||
12 | #include "error.h" | ||
13 | |||
14 | #define SPRINTFCAT(str, format, param, max_alloc) \ | ||
15 | { \ | ||
16 | char *tmp; \ | ||
17 | \ | ||
18 | tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ | ||
19 | sprintf(tmp, format, param); \ | ||
20 | strcat(str, tmp); \ | ||
21 | kfree(tmp); \ | ||
22 | } | ||
23 | |||
24 | static void report_jump_idx(u32 status, char *outstr) | ||
25 | { | ||
26 | u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> | ||
27 | JRSTA_DECOERR_INDEX_SHIFT; | ||
28 | |||
29 | if (status & JRSTA_DECOERR_JUMP) | ||
30 | strcat(outstr, "jump tgt desc idx "); | ||
31 | else | ||
32 | strcat(outstr, "desc idx "); | ||
33 | |||
34 | SPRINTFCAT(outstr, "%d: ", idx, sizeof("255")); | ||
35 | } | ||
36 | |||
37 | static void report_ccb_status(u32 status, char *outstr) | ||
38 | { | ||
39 | char *cha_id_list[] = { | ||
40 | "", | ||
41 | "AES", | ||
42 | "DES, 3DES", | ||
43 | "ARC4", | ||
44 | "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512", | ||
45 | "RNG", | ||
46 | "SNOW f8", | ||
47 | "Kasumi f8, f9", | ||
48 | "All Public Key Algorithms", | ||
49 | "CRC", | ||
50 | "SNOW f9", | ||
51 | }; | ||
52 | char *err_id_list[] = { | ||
53 | "None. No error.", | ||
54 | "Mode error.", | ||
55 | "Data size error.", | ||
56 | "Key size error.", | ||
57 | "PKHA A memory size error.", | ||
58 | "PKHA B memory size error.", | ||
59 | "Data arrived out of sequence error.", | ||
60 | "PKHA divide-by-zero error.", | ||
61 | "PKHA modulus even error.", | ||
62 | "DES key parity error.", | ||
63 | "ICV check failed.", | ||
64 | "Hardware error.", | ||
65 | "Unsupported CCM AAD size.", | ||
66 | "Class 1 CHA is not reset", | ||
67 | "Invalid CHA combination was selected", | ||
68 | "Invalid CHA selected.", | ||
69 | }; | ||
70 | u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> | ||
71 | JRSTA_CCBERR_CHAID_SHIFT; | ||
72 | u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; | ||
73 | |||
74 | report_jump_idx(status, outstr); | ||
75 | |||
76 | if (cha_id < ARRAY_SIZE(cha_id_list)) { | ||
77 | SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id], | ||
78 | strlen(cha_id_list[cha_id])); | ||
79 | } else { | ||
80 | SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ", | ||
81 | cha_id, sizeof("ff")); | ||
82 | } | ||
83 | |||
84 | if (err_id < ARRAY_SIZE(err_id_list)) { | ||
85 | SPRINTFCAT(outstr, "%s", err_id_list[err_id], | ||
86 | strlen(err_id_list[err_id])); | ||
87 | } else { | ||
88 | SPRINTFCAT(outstr, "unidentified err_id value 0x%02x", | ||
89 | err_id, sizeof("ff")); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | static void report_jump_status(u32 status, char *outstr) | ||
94 | { | ||
95 | SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); | ||
96 | } | ||
97 | |||
98 | static void report_deco_status(u32 status, char *outstr) | ||
99 | { | ||
100 | const struct { | ||
101 | u8 value; | ||
102 | char *error_text; | ||
103 | } desc_error_list[] = { | ||
104 | { 0x00, "None. No error." }, | ||
105 | { 0x01, "SGT Length Error. The descriptor is trying to read " | ||
106 | "more data than is contained in the SGT table." }, | ||
107 | { 0x02, "Reserved." }, | ||
108 | { 0x03, "Job Ring Control Error. There is a bad value in the " | ||
109 | "Job Ring Control register." }, | ||
110 | { 0x04, "Invalid Descriptor Command. The Descriptor Command " | ||
111 | "field is invalid." }, | ||
112 | { 0x05, "Reserved." }, | ||
113 | { 0x06, "Invalid KEY Command" }, | ||
114 | { 0x07, "Invalid LOAD Command" }, | ||
115 | { 0x08, "Invalid STORE Command" }, | ||
116 | { 0x09, "Invalid OPERATION Command" }, | ||
117 | { 0x0A, "Invalid FIFO LOAD Command" }, | ||
118 | { 0x0B, "Invalid FIFO STORE Command" }, | ||
119 | { 0x0C, "Invalid MOVE Command" }, | ||
120 | { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is " | ||
121 | "invalid because the target is not a Job Header " | ||
122 | "Command, or the jump is from a Trusted Descriptor to " | ||
123 | "a Job Descriptor, or because the target Descriptor " | ||
124 | "contains a Shared Descriptor." }, | ||
125 | { 0x0E, "Invalid MATH Command" }, | ||
126 | { 0x0F, "Invalid SIGNATURE Command" }, | ||
127 | { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR " | ||
128 | "Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO " | ||
129 | "LOAD, or SEQ FIFO STORE decremented the input or " | ||
130 | "output sequence length below 0. This error may result " | ||
131 | "if a built-in PROTOCOL Command has encountered a " | ||
132 | "malformed PDU." }, | ||
133 | { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."}, | ||
134 | { 0x12, "Shared Descriptor Header Error" }, | ||
135 | { 0x13, "Header Error. Invalid length or parity, or certain " | ||
136 | "other problems." }, | ||
137 | { 0x14, "Burster Error. Burster has gotten to an illegal " | ||
138 | "state" }, | ||
139 | { 0x15, "Context Register Length Error. The descriptor is " | ||
140 | "trying to read or write past the end of the Context " | ||
141 | "Register. A SEQ LOAD or SEQ STORE with the VLF bit " | ||
142 | "set was executed with too large a length in the " | ||
143 | "variable length register (VSOL for SEQ STORE or VSIL " | ||
144 | "for SEQ LOAD)." }, | ||
145 | { 0x16, "DMA Error" }, | ||
146 | { 0x17, "Reserved." }, | ||
147 | { 0x1A, "Job failed due to JR reset" }, | ||
148 | { 0x1B, "Job failed due to Fail Mode" }, | ||
149 | { 0x1C, "DECO Watchdog timer timeout error" }, | ||
150 | { 0x1D, "DECO tried to copy a key from another DECO but the " | ||
151 | "other DECO's Key Registers were locked" }, | ||
152 | { 0x1E, "DECO attempted to copy data from a DECO that had an " | ||
153 | "unmasked Descriptor error" }, | ||
154 | { 0x1F, "LIODN error. DECO was trying to share from itself or " | ||
155 | "from another DECO but the two Non-SEQ LIODN values " | ||
156 | "didn't match or the 'shared from' DECO's Descriptor " | ||
157 | "required that the SEQ LIODNs be the same and they " | ||
158 | "aren't." }, | ||
159 | { 0x20, "DECO has completed a reset initiated via the DRR " | ||
160 | "register" }, | ||
161 | { 0x21, "Nonce error. When using EKT (CCM) key encryption " | ||
162 | "option in the FIFO STORE Command, the Nonce counter " | ||
163 | "reached its maximum value and this encryption mode " | ||
164 | "can no longer be used." }, | ||
165 | { 0x22, "Meta data is too large (> 511 bytes) for TLS decap " | ||
166 | "(input frame; block ciphers) and IPsec decap (output " | ||
167 | "frame, when doing the next header byte update) and " | ||
168 | "DCRC (output frame)." }, | ||
169 | { 0x80, "DNR (do not run) error" }, | ||
170 | { 0x81, "undefined protocol command" }, | ||
171 | { 0x82, "invalid setting in PDB" }, | ||
172 | { 0x83, "Anti-replay LATE error" }, | ||
173 | { 0x84, "Anti-replay REPLAY error" }, | ||
174 | { 0x85, "Sequence number overflow" }, | ||
175 | { 0x86, "Sigver invalid signature" }, | ||
176 | { 0x87, "DSA Sign Illegal test descriptor" }, | ||
177 | { 0x88, "Protocol Format Error - A protocol has seen an error " | ||
178 | "in the format of data received. When running RSA, " | ||
179 | "this means that formatting with random padding was " | ||
180 | "used, and did not follow the form: 0x00, 0x02, 8-to-N " | ||
181 | "bytes of non-zero pad, 0x00, F data." }, | ||
182 | { 0x89, "Protocol Size Error - A protocol has seen an error in " | ||
183 | "size. When running RSA, pdb size N < (size of F) when " | ||
184 | "no formatting is used; or pdb size N < (F + 11) when " | ||
185 | "formatting is used." }, | ||
186 | { 0xC1, "Blob Command error: Undefined mode" }, | ||
187 | { 0xC2, "Blob Command error: Secure Memory Blob mode error" }, | ||
188 | { 0xC4, "Blob Command error: Black Blob key or input size " | ||
189 | "error" }, | ||
190 | { 0xC5, "Blob Command error: Invalid key destination" }, | ||
191 | { 0xC8, "Blob Command error: Trusted/Secure mode error" }, | ||
192 | { 0xF0, "IPsec TTL or hop limit field either came in as 0, " | ||
193 | "or was decremented to 0" }, | ||
194 | { 0xF1, "3GPP HFN matches or exceeds the Threshold" }, | ||
195 | }; | ||
196 | u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK; | ||
197 | int i; | ||
198 | |||
199 | report_jump_idx(status, outstr); | ||
200 | |||
201 | for (i = 0; i < ARRAY_SIZE(desc_error_list); i++) | ||
202 | if (desc_error_list[i].value == desc_error) | ||
203 | break; | ||
204 | |||
205 | if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) { | ||
206 | SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text, | ||
207 | strlen(desc_error_list[i].error_text)); | ||
208 | } else { | ||
209 | SPRINTFCAT(outstr, "unidentified error value 0x%02x", | ||
210 | desc_error, sizeof("ff")); | ||
211 | } | ||
212 | } | ||
213 | |||
214 | static void report_jr_status(u32 status, char *outstr) | ||
215 | { | ||
216 | SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); | ||
217 | } | ||
218 | |||
219 | static void report_cond_code_status(u32 status, char *outstr) | ||
220 | { | ||
221 | SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); | ||
222 | } | ||
223 | |||
224 | char *caam_jr_strstatus(char *outstr, u32 status) | ||
225 | { | ||
226 | struct stat_src { | ||
227 | void (*report_ssed)(u32 status, char *outstr); | ||
228 | char *error; | ||
229 | } status_src[] = { | ||
230 | { NULL, "No error" }, | ||
231 | { NULL, NULL }, | ||
232 | { report_ccb_status, "CCB" }, | ||
233 | { report_jump_status, "Jump" }, | ||
234 | { report_deco_status, "DECO" }, | ||
235 | { NULL, NULL }, | ||
236 | { report_jr_status, "Job Ring" }, | ||
237 | { report_cond_code_status, "Condition Code" }, | ||
238 | }; | ||
239 | u32 ssrc = status >> JRSTA_SSRC_SHIFT; | ||
240 | |||
241 | sprintf(outstr, "%s: ", status_src[ssrc].error); | ||
242 | |||
243 | if (status_src[ssrc].report_ssed) | ||
244 | status_src[ssrc].report_ssed(status, outstr); | ||
245 | |||
246 | return outstr; | ||
247 | } | ||
248 | EXPORT_SYMBOL(caam_jr_strstatus); | ||
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h new file mode 100644 index 000000000000..02c7baa1748e --- /dev/null +++ b/drivers/crypto/caam/error.h | |||
@@ -0,0 +1,11 @@ | |||
1 | /* | ||
2 | * CAAM Error Reporting code header | ||
3 | * | ||
4 | * Copyright 2009-2011 Freescale Semiconductor, Inc. | ||
5 | */ | ||
6 | |||
7 | #ifndef CAAM_ERROR_H | ||
8 | #define CAAM_ERROR_H | ||
9 | #define CAAM_ERROR_STR_MAX 302 | ||
10 | extern char *caam_jr_strstatus(char *outstr, u32 status); | ||
11 | #endif /* CAAM_ERROR_H */ | ||
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h new file mode 100644 index 000000000000..a34be01b0b29 --- /dev/null +++ b/drivers/crypto/caam/intern.h | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * CAAM/SEC 4.x driver backend | ||
3 | * Private/internal definitions between modules | ||
4 | * | ||
5 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #ifndef INTERN_H | ||
10 | #define INTERN_H | ||
11 | |||
12 | #define JOBR_UNASSIGNED 0 | ||
13 | #define JOBR_ASSIGNED 1 | ||
14 | |||
15 | /* Currently comes from Kconfig param as a ^2 (driver-required) */ | ||
16 | #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) | ||
17 | |||
18 | /* Kconfig params for interrupt coalescing if selected (else zero) */ | ||
19 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC | ||
20 | #define JOBR_INTC JRCFG_ICEN | ||
21 | #define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD | ||
22 | #define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD | ||
23 | #else | ||
24 | #define JOBR_INTC 0 | ||
25 | #define JOBR_INTC_TIME_THLD 0 | ||
26 | #define JOBR_INTC_COUNT_THLD 0 | ||
27 | #endif | ||
28 | |||
29 | /* | ||
30 | * Storage for tracking each in-process entry moving across a ring | ||
31 | * Each entry on an output ring needs one of these | ||
32 | */ | ||
33 | struct caam_jrentry_info { | ||
34 | void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg); | ||
35 | void *cbkarg; /* Argument per ring entry */ | ||
36 | u32 *desc_addr_virt; /* Stored virt addr for postprocessing */ | ||
37 | dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */ | ||
38 | u32 desc_size; /* Stored size for postprocessing, header derived */ | ||
39 | }; | ||
40 | |||
41 | /* Private sub-storage for a single JobR */ | ||
42 | struct caam_drv_private_jr { | ||
43 | struct device *parentdev; /* points back to controller dev */ | ||
44 | int ridx; | ||
45 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | ||
46 | struct tasklet_struct irqtask[NR_CPUS]; | ||
47 | int irq; /* One per queue */ | ||
48 | int assign; /* busy/free */ | ||
49 | |||
50 | /* Job ring info */ | ||
51 | int ringsize; /* Size of rings (assume input = output) */ | ||
52 | struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ | ||
53 | spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */ | ||
54 | int inp_ring_write_index; /* Input index "tail" */ | ||
55 | int head; /* entinfo (s/w ring) head index */ | ||
56 | dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */ | ||
57 | spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */ | ||
58 | int out_ring_read_index; /* Output index "tail" */ | ||
59 | int tail; /* entinfo (s/w ring) tail index */ | ||
60 | struct jr_outentry *outring; /* Base of output ring, DMA-safe */ | ||
61 | }; | ||
62 | |||
63 | /* | ||
64 | * Driver-private storage for a single CAAM block instance | ||
65 | */ | ||
66 | struct caam_drv_private { | ||
67 | |||
68 | struct device *dev; | ||
69 | struct device **jrdev; /* Alloc'ed array per sub-device */ | ||
70 | spinlock_t jr_alloc_lock; | ||
71 | struct platform_device *pdev; | ||
72 | |||
73 | /* Physical-presence section */ | ||
74 | struct caam_ctrl *ctrl; /* controller region */ | ||
75 | struct caam_deco **deco; /* DECO/CCB views */ | ||
76 | struct caam_assurance *ac; | ||
77 | struct caam_queue_if *qi; /* QI control region */ | ||
78 | |||
79 | /* | ||
80 | * Detected geometry block. Filled in from device tree if powerpc, | ||
81 | * or from register-based version detection code | ||
82 | */ | ||
83 | u8 total_jobrs; /* Total Job Rings in device */ | ||
84 | u8 qi_present; /* Nonzero if QI present in device */ | ||
85 | int secvio_irq; /* Security violation interrupt number */ | ||
86 | |||
87 | /* which jr allocated to scatterlist crypto */ | ||
88 | atomic_t tfm_count ____cacheline_aligned; | ||
89 | int num_jrs_for_algapi; | ||
90 | struct device **algapi_jr; | ||
91 | /* list of registered crypto algorithms (mk generic context handle?) */ | ||
92 | struct list_head alg_list; | ||
93 | |||
94 | /* | ||
95 | * debugfs entries for developer view into driver/device | ||
96 | * variables at runtime. | ||
97 | */ | ||
98 | #ifdef CONFIG_DEBUG_FS | ||
99 | struct dentry *dfs_root; | ||
100 | struct dentry *ctl; /* controller dir */ | ||
101 | struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req; | ||
102 | struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes; | ||
103 | struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes; | ||
104 | struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus; | ||
105 | |||
106 | struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; | ||
107 | struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; | ||
108 | #endif | ||
109 | }; | ||
110 | |||
111 | void caam_jr_algapi_init(struct device *dev); | ||
112 | void caam_jr_algapi_remove(struct device *dev); | ||
113 | #endif /* INTERN_H */ | ||
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c new file mode 100644 index 000000000000..340fa322c0f0 --- /dev/null +++ b/drivers/crypto/caam/jr.c | |||
@@ -0,0 +1,517 @@ | |||
1 | /* | ||
2 | * CAAM/SEC 4.x transport/backend driver | ||
3 | * JobR backend functionality | ||
4 | * | ||
5 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
6 | */ | ||
7 | |||
8 | #include "compat.h" | ||
9 | #include "regs.h" | ||
10 | #include "jr.h" | ||
11 | #include "desc.h" | ||
12 | #include "intern.h" | ||
13 | |||
14 | /* Main per-ring interrupt handler */ | ||
15 | static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | ||
16 | { | ||
17 | struct device *dev = st_dev; | ||
18 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
19 | u32 irqstate; | ||
20 | |||
21 | /* | ||
22 | * Check the output ring for ready responses, kick | ||
23 | * tasklet if jobs done. | ||
24 | */ | ||
25 | irqstate = rd_reg32(&jrp->rregs->jrintstatus); | ||
26 | if (!irqstate) | ||
27 | return IRQ_NONE; | ||
28 | |||
29 | /* | ||
30 | * If JobR error, we got more development work to do | ||
31 | * Flag a bug now, but we really need to shut down and | ||
32 | * restart the queue (and fix code). | ||
33 | */ | ||
34 | if (irqstate & JRINT_JR_ERROR) { | ||
35 | dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); | ||
36 | BUG(); | ||
37 | } | ||
38 | |||
39 | /* mask valid interrupts */ | ||
40 | setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
41 | |||
42 | /* Have valid interrupt at this point, just ACK and trigger */ | ||
43 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); | ||
44 | |||
45 | preempt_disable(); | ||
46 | tasklet_schedule(&jrp->irqtask[smp_processor_id()]); | ||
47 | preempt_enable(); | ||
48 | |||
49 | return IRQ_HANDLED; | ||
50 | } | ||
51 | |||
52 | /* Deferred service handler, run as interrupt-fired tasklet */ | ||
53 | static void caam_jr_dequeue(unsigned long devarg) | ||
54 | { | ||
55 | int hw_idx, sw_idx, i, head, tail; | ||
56 | struct device *dev = (struct device *)devarg; | ||
57 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
58 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); | ||
59 | u32 *userdesc, userstatus; | ||
60 | void *userarg; | ||
61 | unsigned long flags; | ||
62 | |||
63 | spin_lock_irqsave(&jrp->outlock, flags); | ||
64 | |||
65 | head = ACCESS_ONCE(jrp->head); | ||
66 | sw_idx = tail = jrp->tail; | ||
67 | |||
68 | while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && | ||
69 | rd_reg32(&jrp->rregs->outring_used)) { | ||
70 | |||
71 | hw_idx = jrp->out_ring_read_index; | ||
72 | for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { | ||
73 | sw_idx = (tail + i) & (JOBR_DEPTH - 1); | ||
74 | |||
75 | smp_read_barrier_depends(); | ||
76 | |||
77 | if (jrp->outring[hw_idx].desc == | ||
78 | jrp->entinfo[sw_idx].desc_addr_dma) | ||
79 | break; /* found */ | ||
80 | } | ||
81 | /* we should never fail to find a matching descriptor */ | ||
82 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); | ||
83 | |||
84 | /* Unmap just-run descriptor so we can post-process */ | ||
85 | dma_unmap_single(dev, jrp->outring[hw_idx].desc, | ||
86 | jrp->entinfo[sw_idx].desc_size, | ||
87 | DMA_TO_DEVICE); | ||
88 | |||
89 | /* mark completed, avoid matching on a recycled desc addr */ | ||
90 | jrp->entinfo[sw_idx].desc_addr_dma = 0; | ||
91 | |||
92 | /* Stash callback params for use outside of lock */ | ||
93 | usercall = jrp->entinfo[sw_idx].callbk; | ||
94 | userarg = jrp->entinfo[sw_idx].cbkarg; | ||
95 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; | ||
96 | userstatus = jrp->outring[hw_idx].jrstatus; | ||
97 | |||
98 | smp_mb(); | ||
99 | |||
100 | jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & | ||
101 | (JOBR_DEPTH - 1); | ||
102 | |||
103 | /* | ||
104 | * if this job completed out-of-order, do not increment | ||
105 | * the tail. Otherwise, increment tail by 1 plus the | ||
106 | * number of subsequent jobs already completed out-of-order | ||
107 | */ | ||
108 | if (sw_idx == tail) { | ||
109 | do { | ||
110 | tail = (tail + 1) & (JOBR_DEPTH - 1); | ||
111 | smp_read_barrier_depends(); | ||
112 | } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && | ||
113 | jrp->entinfo[tail].desc_addr_dma == 0); | ||
114 | |||
115 | jrp->tail = tail; | ||
116 | } | ||
117 | |||
118 | /* set done */ | ||
119 | wr_reg32(&jrp->rregs->outring_rmvd, 1); | ||
120 | |||
121 | spin_unlock_irqrestore(&jrp->outlock, flags); | ||
122 | |||
123 | /* Finally, execute user's callback */ | ||
124 | usercall(dev, userdesc, userstatus, userarg); | ||
125 | |||
126 | spin_lock_irqsave(&jrp->outlock, flags); | ||
127 | |||
128 | head = ACCESS_ONCE(jrp->head); | ||
129 | sw_idx = tail = jrp->tail; | ||
130 | } | ||
131 | |||
132 | spin_unlock_irqrestore(&jrp->outlock, flags); | ||
133 | |||
134 | /* reenable / unmask IRQs */ | ||
135 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
136 | } | ||
137 | |||
138 | /** | ||
139 | * caam_jr_register() - Alloc a ring for someone to use as needed. Returns | ||
140 | * an ordinal of the rings allocated, else returns -ENODEV if no rings | ||
141 | * are available. | ||
142 | * @ctrldev: points to the controller level dev (parent) that | ||
143 | * owns rings available for use. | ||
144 | * @dev: points to where a pointer to the newly allocated queue's | ||
145 | * dev can be written to if successful. | ||
146 | **/ | ||
147 | int caam_jr_register(struct device *ctrldev, struct device **rdev) | ||
148 | { | ||
149 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | ||
150 | struct caam_drv_private_jr *jrpriv = NULL; | ||
151 | unsigned long flags; | ||
152 | int ring; | ||
153 | |||
154 | /* Lock, if free ring - assign, unlock */ | ||
155 | spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); | ||
156 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | ||
157 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); | ||
158 | if (jrpriv->assign == JOBR_UNASSIGNED) { | ||
159 | jrpriv->assign = JOBR_ASSIGNED; | ||
160 | *rdev = ctrlpriv->jrdev[ring]; | ||
161 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | ||
162 | return ring; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /* If assigned, write dev where caller needs it */ | ||
167 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | ||
168 | *rdev = NULL; | ||
169 | |||
170 | return -ENODEV; | ||
171 | } | ||
172 | EXPORT_SYMBOL(caam_jr_register); | ||
173 | |||
174 | /** | ||
175 | * caam_jr_deregister() - Deregister an API and release the queue. | ||
176 | * Returns 0 if OK, -EBUSY if queue still contains pending entries | ||
177 | * or unprocessed results at the time of the call | ||
178 | * @dev - points to the dev that identifies the queue to | ||
179 | * be released. | ||
180 | **/ | ||
181 | int caam_jr_deregister(struct device *rdev) | ||
182 | { | ||
183 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); | ||
184 | struct caam_drv_private *ctrlpriv; | ||
185 | unsigned long flags; | ||
186 | |||
187 | /* Get the owning controller's private space */ | ||
188 | ctrlpriv = dev_get_drvdata(jrpriv->parentdev); | ||
189 | |||
190 | /* | ||
191 | * Make sure ring empty before release | ||
192 | */ | ||
193 | if (rd_reg32(&jrpriv->rregs->outring_used) || | ||
194 | (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH)) | ||
195 | return -EBUSY; | ||
196 | |||
197 | /* Release ring */ | ||
198 | spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); | ||
199 | jrpriv->assign = JOBR_UNASSIGNED; | ||
200 | spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | EXPORT_SYMBOL(caam_jr_deregister); | ||
205 | |||
206 | /** | ||
207 | * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, | ||
208 | * -EBUSY if the queue is full, -EIO if it cannot map the caller's | ||
209 | * descriptor. | ||
210 | * @dev: device of the job ring to be used. This device should have | ||
211 | * been assigned prior by caam_jr_register(). | ||
212 | * @desc: points to a job descriptor that execute our request. All | ||
213 | * descriptors (and all referenced data) must be in a DMAable | ||
214 | * region, and all data references must be physical addresses | ||
215 | * accessible to CAAM (i.e. within a PAMU window granted | ||
216 | * to it). | ||
217 | * @cbk: pointer to a callback function to be invoked upon completion | ||
218 | * of this request. This has the form: | ||
219 | * callback(struct device *dev, u32 *desc, u32 stat, void *arg) | ||
220 | * where: | ||
221 | * @dev: contains the job ring device that processed this | ||
222 | * response. | ||
223 | * @desc: descriptor that initiated the request, same as | ||
224 | * "desc" being argued to caam_jr_enqueue(). | ||
225 | * @status: untranslated status received from CAAM. See the | ||
226 | * reference manual for a detailed description of | ||
227 | * error meaning, or see the JRSTA definitions in the | ||
228 | * register header file | ||
229 | * @areq: optional pointer to an argument passed with the | ||
230 | * original request | ||
231 | * @areq: optional pointer to a user argument for use at callback | ||
232 | * time. | ||
233 | **/ | ||
234 | int caam_jr_enqueue(struct device *dev, u32 *desc, | ||
235 | void (*cbk)(struct device *dev, u32 *desc, | ||
236 | u32 status, void *areq), | ||
237 | void *areq) | ||
238 | { | ||
239 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
240 | struct caam_jrentry_info *head_entry; | ||
241 | unsigned long flags; | ||
242 | int head, tail, desc_size; | ||
243 | dma_addr_t desc_dma; | ||
244 | |||
245 | desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32); | ||
246 | desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); | ||
247 | if (dma_mapping_error(dev, desc_dma)) { | ||
248 | dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); | ||
249 | return -EIO; | ||
250 | } | ||
251 | |||
252 | spin_lock_irqsave(&jrp->inplock, flags); | ||
253 | |||
254 | head = jrp->head; | ||
255 | tail = ACCESS_ONCE(jrp->tail); | ||
256 | |||
257 | if (!rd_reg32(&jrp->rregs->inpring_avail) || | ||
258 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { | ||
259 | spin_unlock_irqrestore(&jrp->inplock, flags); | ||
260 | dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); | ||
261 | return -EBUSY; | ||
262 | } | ||
263 | |||
264 | head_entry = &jrp->entinfo[head]; | ||
265 | head_entry->desc_addr_virt = desc; | ||
266 | head_entry->desc_size = desc_size; | ||
267 | head_entry->callbk = (void *)cbk; | ||
268 | head_entry->cbkarg = areq; | ||
269 | head_entry->desc_addr_dma = desc_dma; | ||
270 | |||
271 | jrp->inpring[jrp->inp_ring_write_index] = desc_dma; | ||
272 | |||
273 | smp_wmb(); | ||
274 | |||
275 | jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & | ||
276 | (JOBR_DEPTH - 1); | ||
277 | jrp->head = (head + 1) & (JOBR_DEPTH - 1); | ||
278 | |||
279 | wmb(); | ||
280 | |||
281 | wr_reg32(&jrp->rregs->inpring_jobadd, 1); | ||
282 | |||
283 | spin_unlock_irqrestore(&jrp->inplock, flags); | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | EXPORT_SYMBOL(caam_jr_enqueue); | ||
288 | |||
289 | static int caam_reset_hw_jr(struct device *dev) | ||
290 | { | ||
291 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
292 | unsigned int timeout = 100000; | ||
293 | |||
294 | /* | ||
295 | * mask interrupts since we are going to poll | ||
296 | * for reset completion status | ||
297 | */ | ||
298 | setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
299 | |||
300 | /* initiate flush (required prior to reset) */ | ||
301 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
302 | while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == | ||
303 | JRINT_ERR_HALT_INPROGRESS) && --timeout) | ||
304 | cpu_relax(); | ||
305 | |||
306 | if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != | ||
307 | JRINT_ERR_HALT_COMPLETE || timeout == 0) { | ||
308 | dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); | ||
309 | return -EIO; | ||
310 | } | ||
311 | |||
312 | /* initiate reset */ | ||
313 | timeout = 100000; | ||
314 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
315 | while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) | ||
316 | cpu_relax(); | ||
317 | |||
318 | if (timeout == 0) { | ||
319 | dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); | ||
320 | return -EIO; | ||
321 | } | ||
322 | |||
323 | /* unmask interrupts */ | ||
324 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * Init JobR independent of platform property detection | ||
331 | */ | ||
332 | static int caam_jr_init(struct device *dev) | ||
333 | { | ||
334 | struct caam_drv_private_jr *jrp; | ||
335 | dma_addr_t inpbusaddr, outbusaddr; | ||
336 | int i, error; | ||
337 | |||
338 | jrp = dev_get_drvdata(dev); | ||
339 | |||
340 | /* Connect job ring interrupt handler. */ | ||
341 | for_each_possible_cpu(i) | ||
342 | tasklet_init(&jrp->irqtask[i], caam_jr_dequeue, | ||
343 | (unsigned long)dev); | ||
344 | |||
345 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, | ||
346 | "caam-jobr", dev); | ||
347 | if (error) { | ||
348 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", | ||
349 | jrp->ridx, jrp->irq); | ||
350 | irq_dispose_mapping(jrp->irq); | ||
351 | jrp->irq = 0; | ||
352 | return -EINVAL; | ||
353 | } | ||
354 | |||
355 | error = caam_reset_hw_jr(dev); | ||
356 | if (error) | ||
357 | return error; | ||
358 | |||
359 | jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH, | ||
360 | GFP_KERNEL | GFP_DMA); | ||
361 | jrp->outring = kzalloc(sizeof(struct jr_outentry) * | ||
362 | JOBR_DEPTH, GFP_KERNEL | GFP_DMA); | ||
363 | |||
364 | jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, | ||
365 | GFP_KERNEL); | ||
366 | |||
367 | if ((jrp->inpring == NULL) || (jrp->outring == NULL) || | ||
368 | (jrp->entinfo == NULL)) { | ||
369 | dev_err(dev, "can't allocate job rings for %d\n", | ||
370 | jrp->ridx); | ||
371 | return -ENOMEM; | ||
372 | } | ||
373 | |||
374 | for (i = 0; i < JOBR_DEPTH; i++) | ||
375 | jrp->entinfo[i].desc_addr_dma = !0; | ||
376 | |||
377 | /* Setup rings */ | ||
378 | inpbusaddr = dma_map_single(dev, jrp->inpring, | ||
379 | sizeof(u32 *) * JOBR_DEPTH, | ||
380 | DMA_BIDIRECTIONAL); | ||
381 | if (dma_mapping_error(dev, inpbusaddr)) { | ||
382 | dev_err(dev, "caam_jr_init(): can't map input ring\n"); | ||
383 | kfree(jrp->inpring); | ||
384 | kfree(jrp->outring); | ||
385 | kfree(jrp->entinfo); | ||
386 | return -EIO; | ||
387 | } | ||
388 | |||
389 | outbusaddr = dma_map_single(dev, jrp->outring, | ||
390 | sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
391 | DMA_BIDIRECTIONAL); | ||
392 | if (dma_mapping_error(dev, outbusaddr)) { | ||
393 | dev_err(dev, "caam_jr_init(): can't map output ring\n"); | ||
394 | dma_unmap_single(dev, inpbusaddr, | ||
395 | sizeof(u32 *) * JOBR_DEPTH, | ||
396 | DMA_BIDIRECTIONAL); | ||
397 | kfree(jrp->inpring); | ||
398 | kfree(jrp->outring); | ||
399 | kfree(jrp->entinfo); | ||
400 | return -EIO; | ||
401 | } | ||
402 | |||
403 | jrp->inp_ring_write_index = 0; | ||
404 | jrp->out_ring_read_index = 0; | ||
405 | jrp->head = 0; | ||
406 | jrp->tail = 0; | ||
407 | |||
408 | wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); | ||
409 | wr_reg64(&jrp->rregs->outring_base, outbusaddr); | ||
410 | wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); | ||
411 | wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); | ||
412 | |||
413 | jrp->ringsize = JOBR_DEPTH; | ||
414 | |||
415 | spin_lock_init(&jrp->inplock); | ||
416 | spin_lock_init(&jrp->outlock); | ||
417 | |||
418 | /* Select interrupt coalescing parameters */ | ||
419 | setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC | | ||
420 | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | | ||
421 | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); | ||
422 | |||
423 | jrp->assign = JOBR_UNASSIGNED; | ||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Shutdown JobR independent of platform property code | ||
429 | */ | ||
430 | int caam_jr_shutdown(struct device *dev) | ||
431 | { | ||
432 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
433 | dma_addr_t inpbusaddr, outbusaddr; | ||
434 | int ret, i; | ||
435 | |||
436 | ret = caam_reset_hw_jr(dev); | ||
437 | |||
438 | for_each_possible_cpu(i) | ||
439 | tasklet_kill(&jrp->irqtask[i]); | ||
440 | |||
441 | /* Release interrupt */ | ||
442 | free_irq(jrp->irq, dev); | ||
443 | |||
444 | /* Free rings */ | ||
445 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); | ||
446 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); | ||
447 | dma_unmap_single(dev, outbusaddr, | ||
448 | sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
449 | DMA_BIDIRECTIONAL); | ||
450 | dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH, | ||
451 | DMA_BIDIRECTIONAL); | ||
452 | kfree(jrp->outring); | ||
453 | kfree(jrp->inpring); | ||
454 | kfree(jrp->entinfo); | ||
455 | |||
456 | return ret; | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Probe routine for each detected JobR subsystem. It assumes that | ||
461 | * property detection was picked up externally. | ||
462 | */ | ||
463 | int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | ||
464 | int ring) | ||
465 | { | ||
466 | struct device *ctrldev, *jrdev; | ||
467 | struct platform_device *jr_pdev; | ||
468 | struct caam_drv_private *ctrlpriv; | ||
469 | struct caam_drv_private_jr *jrpriv; | ||
470 | u32 *jroffset; | ||
471 | int error; | ||
472 | |||
473 | ctrldev = &pdev->dev; | ||
474 | ctrlpriv = dev_get_drvdata(ctrldev); | ||
475 | |||
476 | jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), | ||
477 | GFP_KERNEL); | ||
478 | if (jrpriv == NULL) { | ||
479 | dev_err(ctrldev, "can't alloc private mem for job ring %d\n", | ||
480 | ring); | ||
481 | return -ENOMEM; | ||
482 | } | ||
483 | jrpriv->parentdev = ctrldev; /* point back to parent */ | ||
484 | jrpriv->ridx = ring; /* save ring identity relative to detection */ | ||
485 | |||
486 | /* | ||
487 | * Derive a pointer to the detected JobRs regs | ||
488 | * Driver has already iomapped the entire space, we just | ||
489 | * need to add in the offset to this JobR. Don't know if I | ||
490 | * like this long-term, but it'll run | ||
491 | */ | ||
492 | jroffset = (u32 *)of_get_property(np, "reg", NULL); | ||
493 | jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl | ||
494 | + *jroffset); | ||
495 | |||
496 | /* Build a local dev for each detected queue */ | ||
497 | jr_pdev = of_platform_device_create(np, NULL, ctrldev); | ||
498 | if (jr_pdev == NULL) { | ||
499 | kfree(jrpriv); | ||
500 | return -EINVAL; | ||
501 | } | ||
502 | jrdev = &jr_pdev->dev; | ||
503 | dev_set_drvdata(jrdev, jrpriv); | ||
504 | ctrlpriv->jrdev[ring] = jrdev; | ||
505 | |||
506 | /* Identify the interrupt */ | ||
507 | jrpriv->irq = of_irq_to_resource(np, 0, NULL); | ||
508 | |||
509 | /* Now do the platform independent part */ | ||
510 | error = caam_jr_init(jrdev); /* now turn on hardware */ | ||
511 | if (error) { | ||
512 | kfree(jrpriv); | ||
513 | return error; | ||
514 | } | ||
515 | |||
516 | return error; | ||
517 | } | ||
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h new file mode 100644 index 000000000000..c23df395b622 --- /dev/null +++ b/drivers/crypto/caam/jr.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * CAAM public-level include definitions for the JobR backend | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | */ | ||
6 | |||
7 | #ifndef JR_H | ||
8 | #define JR_H | ||
9 | |||
10 | /* Prototypes for backend-level services exposed to APIs */ | ||
11 | int caam_jr_register(struct device *ctrldev, struct device **rdev); | ||
12 | int caam_jr_deregister(struct device *rdev); | ||
13 | int caam_jr_enqueue(struct device *dev, u32 *desc, | ||
14 | void (*cbk)(struct device *dev, u32 *desc, u32 status, | ||
15 | void *areq), | ||
16 | void *areq); | ||
17 | |||
18 | extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | ||
19 | int ring); | ||
20 | extern int caam_jr_shutdown(struct device *dev); | ||
21 | #endif /* JR_H */ | ||
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h new file mode 100644 index 000000000000..aee394e39056 --- /dev/null +++ b/drivers/crypto/caam/regs.h | |||
@@ -0,0 +1,663 @@ | |||
1 | /* | ||
2 | * CAAM hardware register-level view | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | */ | ||
6 | |||
7 | #ifndef REGS_H | ||
8 | #define REGS_H | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include <linux/io.h> | ||
12 | |||
13 | /* | ||
14 | * Architecture-specific register access methods | ||
15 | * | ||
16 | * CAAM's bus-addressable registers are 64 bits internally. | ||
17 | * They have been wired to be safely accessible on 32-bit | ||
18 | * architectures, however. Registers were organized such | ||
19 | * that (a) they can be contained in 32 bits, (b) if not, then they | ||
20 | * can be treated as two 32-bit entities, or finally (c) if they | ||
21 | * must be treated as a single 64-bit value, then this can safely | ||
22 | * be done with two 32-bit cycles. | ||
23 | * | ||
24 | * For 32-bit operations on 64-bit values, CAAM follows the same | ||
25 | * 64-bit register access conventions as it's predecessors, in that | ||
26 | * writes are "triggered" by a write to the register at the numerically | ||
27 | * higher address, thus, a full 64-bit write cycle requires a write | ||
28 | * to the lower address, followed by a write to the higher address, | ||
29 | * which will latch/execute the write cycle. | ||
30 | * | ||
31 | * For example, let's assume a SW reset of CAAM through the master | ||
32 | * configuration register. | ||
33 | * - SWRST is in bit 31 of MCFG. | ||
34 | * - MCFG begins at base+0x0000. | ||
35 | * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower) | ||
36 | * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher) | ||
37 | * | ||
38 | * (and on Power, the convention is 0-31, 32-63, I know...) | ||
39 | * | ||
40 | * Assuming a 64-bit write to this MCFG to perform a software reset | ||
41 | * would then require a write of 0 to base+0x0000, followed by a | ||
42 | * write of 0x80000000 to base+0x0004, which would "execute" the | ||
43 | * reset. | ||
44 | * | ||
45 | * Of course, since MCFG 63-32 is all zero, we could cheat and simply | ||
46 | * write 0x8000000 to base+0x0004, and the reset would work fine. | ||
47 | * However, since CAAM does contain some write-and-read-intended | ||
48 | * 64-bit registers, this code defines 64-bit access methods for | ||
49 | * the sake of internal consistency and simplicity, and so that a | ||
50 | * clean transition to 64-bit is possible when it becomes necessary. | ||
51 | * | ||
52 | * There are limitations to this that the developer must recognize. | ||
53 | * 32-bit architectures cannot enforce an atomic-64 operation, | ||
54 | * Therefore: | ||
55 | * | ||
56 | * - On writes, since the HW is assumed to latch the cycle on the | ||
57 | * write of the higher-numeric-address word, then ordered | ||
58 | * writes work OK. | ||
59 | * | ||
60 | * - For reads, where a register contains a relevant value of more | ||
61 | * that 32 bits, the hardware employs logic to latch the other | ||
62 | * "half" of the data until read, ensuring an accurate value. | ||
63 | * This is of particular relevance when dealing with CAAM's | ||
64 | * performance counters. | ||
65 | * | ||
66 | */ | ||
67 | |||
68 | #ifdef __BIG_ENDIAN | ||
69 | #define wr_reg32(reg, data) out_be32(reg, data) | ||
70 | #define rd_reg32(reg) in_be32(reg) | ||
71 | #ifdef CONFIG_64BIT | ||
72 | #define wr_reg64(reg, data) out_be64(reg, data) | ||
73 | #define rd_reg64(reg) in_be64(reg) | ||
74 | #endif | ||
75 | #else | ||
76 | #ifdef __LITTLE_ENDIAN | ||
77 | #define wr_reg32(reg, data) __raw_writel(reg, data) | ||
78 | #define rd_reg32(reg) __raw_readl(reg) | ||
79 | #ifdef CONFIG_64BIT | ||
80 | #define wr_reg64(reg, data) __raw_writeq(reg, data) | ||
81 | #define rd_reg64(reg) __raw_readq(reg) | ||
82 | #endif | ||
83 | #endif | ||
84 | #endif | ||
85 | |||
86 | #ifndef CONFIG_64BIT | ||
87 | static inline void wr_reg64(u64 __iomem *reg, u64 data) | ||
88 | { | ||
89 | wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); | ||
90 | wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull); | ||
91 | } | ||
92 | |||
93 | static inline u64 rd_reg64(u64 __iomem *reg) | ||
94 | { | ||
95 | return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | | ||
96 | ((u64)rd_reg32((u32 __iomem *)reg + 1)); | ||
97 | } | ||
98 | #endif | ||
99 | |||
100 | /* | ||
101 | * jr_outentry | ||
102 | * Represents each entry in a JobR output ring | ||
103 | */ | ||
104 | struct jr_outentry { | ||
105 | dma_addr_t desc;/* Pointer to completed descriptor */ | ||
106 | u32 jrstatus; /* Status for completed descriptor */ | ||
107 | } __packed; | ||
108 | |||
109 | /* | ||
110 | * caam_perfmon - Performance Monitor/Secure Memory Status/ | ||
111 | * CAAM Global Status/Component Version IDs | ||
112 | * | ||
113 | * Spans f00-fff wherever instantiated | ||
114 | */ | ||
115 | |||
116 | /* Number of DECOs */ | ||
117 | #define CHA_NUM_DECONUM_SHIFT 56 | ||
118 | #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) | ||
119 | |||
120 | struct caam_perfmon { | ||
121 | /* Performance Monitor Registers f00-f9f */ | ||
122 | u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */ | ||
123 | u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */ | ||
124 | u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests */ | ||
125 | u64 ob_enc_bytes; /* PC_OB_ENCRYPT - Outbound Bytes Encrypted */ | ||
126 | u64 ob_prot_bytes; /* PC_OB_PROTECT - Outbound Bytes Protected */ | ||
127 | u64 ib_dec_bytes; /* PC_IB_DECRYPT - Inbound Bytes Decrypted */ | ||
128 | u64 ib_valid_bytes; /* PC_IB_VALIDATED Inbound Bytes Validated */ | ||
129 | u64 rsvd[13]; | ||
130 | |||
131 | /* CAAM Hardware Instantiation Parameters fa0-fbf */ | ||
132 | u64 cha_rev; /* CRNR - CHA Revision Number */ | ||
133 | #define CTPR_QI_SHIFT 57 | ||
134 | #define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT) | ||
135 | u64 comp_parms; /* CTPR - Compile Parameters Register */ | ||
136 | u64 rsvd1[2]; | ||
137 | |||
138 | /* CAAM Global Status fc0-fdf */ | ||
139 | u64 faultaddr; /* FAR - Fault Address */ | ||
140 | u32 faultliodn; /* FALR - Fault Address LIODN */ | ||
141 | u32 faultdetail; /* FADR - Fault Addr Detail */ | ||
142 | u32 rsvd2; | ||
143 | u32 status; /* CSTA - CAAM Status */ | ||
144 | u64 rsvd3; | ||
145 | |||
146 | /* Component Instantiation Parameters fe0-fff */ | ||
147 | u32 rtic_id; /* RVID - RTIC Version ID */ | ||
148 | u32 ccb_id; /* CCBVID - CCB Version ID */ | ||
149 | u64 cha_id; /* CHAVID - CHA Version ID */ | ||
150 | u64 cha_num; /* CHANUM - CHA Number */ | ||
151 | u64 caam_id; /* CAAMVID - CAAM Version ID */ | ||
152 | }; | ||
153 | |||
154 | /* LIODN programming for DMA configuration */ | ||
155 | #define MSTRID_LOCK_LIODN 0x80000000 | ||
156 | #define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */ | ||
157 | |||
158 | #define MSTRID_LIODN_MASK 0x0fff | ||
159 | struct masterid { | ||
160 | u32 liodn_ms; /* lock and make-trusted control bits */ | ||
161 | u32 liodn_ls; /* LIODN for non-sequence and seq access */ | ||
162 | }; | ||
163 | |||
164 | /* Partition ID for DMA configuration */ | ||
165 | struct partid { | ||
166 | u32 rsvd1; | ||
167 | u32 pidr; /* partition ID, DECO */ | ||
168 | }; | ||
169 | |||
170 | /* RNG test mode (replicated twice in some configurations) */ | ||
171 | /* Padded out to 0x100 */ | ||
172 | struct rngtst { | ||
173 | u32 mode; /* RTSTMODEx - Test mode */ | ||
174 | u32 rsvd1[3]; | ||
175 | u32 reset; /* RTSTRESETx - Test reset control */ | ||
176 | u32 rsvd2[3]; | ||
177 | u32 status; /* RTSTSSTATUSx - Test status */ | ||
178 | u32 rsvd3; | ||
179 | u32 errstat; /* RTSTERRSTATx - Test error status */ | ||
180 | u32 rsvd4; | ||
181 | u32 errctl; /* RTSTERRCTLx - Test error control */ | ||
182 | u32 rsvd5; | ||
183 | u32 entropy; /* RTSTENTROPYx - Test entropy */ | ||
184 | u32 rsvd6[15]; | ||
185 | u32 verifctl; /* RTSTVERIFCTLx - Test verification control */ | ||
186 | u32 rsvd7; | ||
187 | u32 verifstat; /* RTSTVERIFSTATx - Test verification status */ | ||
188 | u32 rsvd8; | ||
189 | u32 verifdata; /* RTSTVERIFDx - Test verification data */ | ||
190 | u32 rsvd9; | ||
191 | u32 xkey; /* RTSTXKEYx - Test XKEY */ | ||
192 | u32 rsvd10; | ||
193 | u32 oscctctl; /* RTSTOSCCTCTLx - Test osc. counter control */ | ||
194 | u32 rsvd11; | ||
195 | u32 oscct; /* RTSTOSCCTx - Test oscillator counter */ | ||
196 | u32 rsvd12; | ||
197 | u32 oscctstat; /* RTSTODCCTSTATx - Test osc counter status */ | ||
198 | u32 rsvd13[2]; | ||
199 | u32 ofifo[4]; /* RTSTOFIFOx - Test output FIFO */ | ||
200 | u32 rsvd14[15]; | ||
201 | }; | ||
202 | |||
203 | /* | ||
204 | * caam_ctrl - basic core configuration | ||
205 | * starts base + 0x0000 padded out to 0x1000 | ||
206 | */ | ||
207 | |||
208 | #define KEK_KEY_SIZE 8 | ||
209 | #define TKEK_KEY_SIZE 8 | ||
210 | #define TDSK_KEY_SIZE 8 | ||
211 | |||
212 | #define DECO_RESET 1 /* Use with DECO reset/availability regs */ | ||
213 | #define DECO_RESET_0 (DECO_RESET << 0) | ||
214 | #define DECO_RESET_1 (DECO_RESET << 1) | ||
215 | #define DECO_RESET_2 (DECO_RESET << 2) | ||
216 | #define DECO_RESET_3 (DECO_RESET << 3) | ||
217 | #define DECO_RESET_4 (DECO_RESET << 4) | ||
218 | |||
219 | struct caam_ctrl { | ||
220 | /* Basic Configuration Section 000-01f */ | ||
221 | /* Read/Writable */ | ||
222 | u32 rsvd1; | ||
223 | u32 mcr; /* MCFG Master Config Register */ | ||
224 | u32 rsvd2[2]; | ||
225 | |||
226 | /* Bus Access Configuration Section 010-11f */ | ||
227 | /* Read/Writable */ | ||
228 | struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */ | ||
229 | u32 rsvd3[12]; | ||
230 | struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */ | ||
231 | u32 rsvd4[7]; | ||
232 | u32 deco_rq; /* DECORR - DECO Request */ | ||
233 | struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ | ||
234 | u32 rsvd5[22]; | ||
235 | |||
236 | /* DECO Availability/Reset Section 120-3ff */ | ||
237 | u32 deco_avail; /* DAR - DECO availability */ | ||
238 | u32 deco_reset; /* DRR - DECO reset */ | ||
239 | u32 rsvd6[182]; | ||
240 | |||
241 | /* Key Encryption/Decryption Configuration 400-5ff */ | ||
242 | /* Read/Writable only while in Non-secure mode */ | ||
243 | u32 kek[KEK_KEY_SIZE]; /* JDKEKR - Key Encryption Key */ | ||
244 | u32 tkek[TKEK_KEY_SIZE]; /* TDKEKR - Trusted Desc KEK */ | ||
245 | u32 tdsk[TDSK_KEY_SIZE]; /* TDSKR - Trusted Desc Signing Key */ | ||
246 | u32 rsvd7[32]; | ||
247 | u64 sknonce; /* SKNR - Secure Key Nonce */ | ||
248 | u32 rsvd8[70]; | ||
249 | |||
250 | /* RNG Test/Verification/Debug Access 600-7ff */ | ||
251 | /* (Useful in Test/Debug modes only...) */ | ||
252 | struct rngtst rtst[2]; | ||
253 | |||
254 | u32 rsvd9[448]; | ||
255 | |||
256 | /* Performance Monitor f00-fff */ | ||
257 | struct caam_perfmon perfmon; | ||
258 | }; | ||
259 | |||
260 | /* | ||
261 | * Controller master config register defs | ||
262 | */ | ||
263 | #define MCFGR_SWRESET 0x80000000 /* software reset */ | ||
264 | #define MCFGR_WDENABLE 0x40000000 /* DECO watchdog enable */ | ||
265 | #define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */ | ||
266 | #define MCFGR_DMA_RESET 0x10000000 | ||
267 | #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ | ||
268 | |||
269 | /* AXI read cache control */ | ||
270 | #define MCFGR_ARCACHE_SHIFT 12 | ||
271 | #define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT) | ||
272 | |||
273 | /* AXI write cache control */ | ||
274 | #define MCFGR_AWCACHE_SHIFT 8 | ||
275 | #define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT) | ||
276 | |||
277 | /* AXI pipeline depth */ | ||
278 | #define MCFGR_AXIPIPE_SHIFT 4 | ||
279 | #define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT) | ||
280 | |||
281 | #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ | ||
282 | #define MCFGR_BURST_64 0x00000001 /* Max burst size */ | ||
283 | |||
284 | /* | ||
285 | * caam_job_ring - direct job ring setup | ||
286 | * 1-4 possible per instantiation, base + 1000/2000/3000/4000 | ||
287 | * Padded out to 0x1000 | ||
288 | */ | ||
289 | struct caam_job_ring { | ||
290 | /* Input ring */ | ||
291 | u64 inpring_base; /* IRBAx - Input desc ring baseaddr */ | ||
292 | u32 rsvd1; | ||
293 | u32 inpring_size; /* IRSx - Input ring size */ | ||
294 | u32 rsvd2; | ||
295 | u32 inpring_avail; /* IRSAx - Input ring room remaining */ | ||
296 | u32 rsvd3; | ||
297 | u32 inpring_jobadd; /* IRJAx - Input ring jobs added */ | ||
298 | |||
299 | /* Output Ring */ | ||
300 | u64 outring_base; /* ORBAx - Output status ring base addr */ | ||
301 | u32 rsvd4; | ||
302 | u32 outring_size; /* ORSx - Output ring size */ | ||
303 | u32 rsvd5; | ||
304 | u32 outring_rmvd; /* ORJRx - Output ring jobs removed */ | ||
305 | u32 rsvd6; | ||
306 | u32 outring_used; /* ORSFx - Output ring slots full */ | ||
307 | |||
308 | /* Status/Configuration */ | ||
309 | u32 rsvd7; | ||
310 | u32 jroutstatus; /* JRSTAx - JobR output status */ | ||
311 | u32 rsvd8; | ||
312 | u32 jrintstatus; /* JRINTx - JobR interrupt status */ | ||
313 | u32 rconfig_hi; /* JRxCFG - Ring configuration */ | ||
314 | u32 rconfig_lo; | ||
315 | |||
316 | /* Indices. CAAM maintains as "heads" of each queue */ | ||
317 | u32 rsvd9; | ||
318 | u32 inp_rdidx; /* IRRIx - Input ring read index */ | ||
319 | u32 rsvd10; | ||
320 | u32 out_wtidx; /* ORWIx - Output ring write index */ | ||
321 | |||
322 | /* Command/control */ | ||
323 | u32 rsvd11; | ||
324 | u32 jrcommand; /* JRCRx - JobR command */ | ||
325 | |||
326 | u32 rsvd12[932]; | ||
327 | |||
328 | /* Performance Monitor f00-fff */ | ||
329 | struct caam_perfmon perfmon; | ||
330 | }; | ||
331 | |||
332 | #define JR_RINGSIZE_MASK 0x03ff | ||
333 | /* | ||
334 | * jrstatus - Job Ring Output Status | ||
335 | * All values in lo word | ||
336 | * Also note, same values written out as status through QI | ||
337 | * in the command/status field of a frame descriptor | ||
338 | */ | ||
339 | #define JRSTA_SSRC_SHIFT 28 | ||
340 | #define JRSTA_SSRC_MASK 0xf0000000 | ||
341 | |||
342 | #define JRSTA_SSRC_NONE 0x00000000 | ||
343 | #define JRSTA_SSRC_CCB_ERROR 0x20000000 | ||
344 | #define JRSTA_SSRC_JUMP_HALT_USER 0x30000000 | ||
345 | #define JRSTA_SSRC_DECO 0x40000000 | ||
346 | #define JRSTA_SSRC_JRERROR 0x60000000 | ||
347 | #define JRSTA_SSRC_JUMP_HALT_CC 0x70000000 | ||
348 | |||
349 | #define JRSTA_DECOERR_JUMP 0x08000000 | ||
350 | #define JRSTA_DECOERR_INDEX_SHIFT 8 | ||
351 | #define JRSTA_DECOERR_INDEX_MASK 0xff00 | ||
352 | #define JRSTA_DECOERR_ERROR_MASK 0x00ff | ||
353 | |||
354 | #define JRSTA_DECOERR_NONE 0x00 | ||
355 | #define JRSTA_DECOERR_LINKLEN 0x01 | ||
356 | #define JRSTA_DECOERR_LINKPTR 0x02 | ||
357 | #define JRSTA_DECOERR_JRCTRL 0x03 | ||
358 | #define JRSTA_DECOERR_DESCCMD 0x04 | ||
359 | #define JRSTA_DECOERR_ORDER 0x05 | ||
360 | #define JRSTA_DECOERR_KEYCMD 0x06 | ||
361 | #define JRSTA_DECOERR_LOADCMD 0x07 | ||
362 | #define JRSTA_DECOERR_STORECMD 0x08 | ||
363 | #define JRSTA_DECOERR_OPCMD 0x09 | ||
364 | #define JRSTA_DECOERR_FIFOLDCMD 0x0a | ||
365 | #define JRSTA_DECOERR_FIFOSTCMD 0x0b | ||
366 | #define JRSTA_DECOERR_MOVECMD 0x0c | ||
367 | #define JRSTA_DECOERR_JUMPCMD 0x0d | ||
368 | #define JRSTA_DECOERR_MATHCMD 0x0e | ||
369 | #define JRSTA_DECOERR_SHASHCMD 0x0f | ||
370 | #define JRSTA_DECOERR_SEQCMD 0x10 | ||
371 | #define JRSTA_DECOERR_DECOINTERNAL 0x11 | ||
372 | #define JRSTA_DECOERR_SHDESCHDR 0x12 | ||
373 | #define JRSTA_DECOERR_HDRLEN 0x13 | ||
374 | #define JRSTA_DECOERR_BURSTER 0x14 | ||
375 | #define JRSTA_DECOERR_DESCSIGNATURE 0x15 | ||
376 | #define JRSTA_DECOERR_DMA 0x16 | ||
377 | #define JRSTA_DECOERR_BURSTFIFO 0x17 | ||
378 | #define JRSTA_DECOERR_JRRESET 0x1a | ||
379 | #define JRSTA_DECOERR_JOBFAIL 0x1b | ||
380 | #define JRSTA_DECOERR_DNRERR 0x80 | ||
381 | #define JRSTA_DECOERR_UNDEFPCL 0x81 | ||
382 | #define JRSTA_DECOERR_PDBERR 0x82 | ||
383 | #define JRSTA_DECOERR_ANRPLY_LATE 0x83 | ||
384 | #define JRSTA_DECOERR_ANRPLY_REPLAY 0x84 | ||
385 | #define JRSTA_DECOERR_SEQOVF 0x85 | ||
386 | #define JRSTA_DECOERR_INVSIGN 0x86 | ||
387 | #define JRSTA_DECOERR_DSASIGN 0x87 | ||
388 | |||
389 | #define JRSTA_CCBERR_JUMP 0x08000000 | ||
390 | #define JRSTA_CCBERR_INDEX_MASK 0xff00 | ||
391 | #define JRSTA_CCBERR_INDEX_SHIFT 8 | ||
392 | #define JRSTA_CCBERR_CHAID_MASK 0x00f0 | ||
393 | #define JRSTA_CCBERR_CHAID_SHIFT 4 | ||
394 | #define JRSTA_CCBERR_ERRID_MASK 0x000f | ||
395 | |||
396 | #define JRSTA_CCBERR_CHAID_AES (0x01 << JRSTA_CCBERR_CHAID_SHIFT) | ||
397 | #define JRSTA_CCBERR_CHAID_DES (0x02 << JRSTA_CCBERR_CHAID_SHIFT) | ||
398 | #define JRSTA_CCBERR_CHAID_ARC4 (0x03 << JRSTA_CCBERR_CHAID_SHIFT) | ||
399 | #define JRSTA_CCBERR_CHAID_MD (0x04 << JRSTA_CCBERR_CHAID_SHIFT) | ||
400 | #define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT) | ||
401 | #define JRSTA_CCBERR_CHAID_SNOW (0x06 << JRSTA_CCBERR_CHAID_SHIFT) | ||
402 | #define JRSTA_CCBERR_CHAID_KASUMI (0x07 << JRSTA_CCBERR_CHAID_SHIFT) | ||
403 | #define JRSTA_CCBERR_CHAID_PK (0x08 << JRSTA_CCBERR_CHAID_SHIFT) | ||
404 | #define JRSTA_CCBERR_CHAID_CRC (0x09 << JRSTA_CCBERR_CHAID_SHIFT) | ||
405 | |||
406 | #define JRSTA_CCBERR_ERRID_NONE 0x00 | ||
407 | #define JRSTA_CCBERR_ERRID_MODE 0x01 | ||
408 | #define JRSTA_CCBERR_ERRID_DATASIZ 0x02 | ||
409 | #define JRSTA_CCBERR_ERRID_KEYSIZ 0x03 | ||
410 | #define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04 | ||
411 | #define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05 | ||
412 | #define JRSTA_CCBERR_ERRID_SEQUENCE 0x06 | ||
413 | #define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07 | ||
414 | #define JRSTA_CCBERR_ERRID_PKMODEVN 0x08 | ||
415 | #define JRSTA_CCBERR_ERRID_KEYPARIT 0x09 | ||
416 | #define JRSTA_CCBERR_ERRID_ICVCHK 0x0a | ||
417 | #define JRSTA_CCBERR_ERRID_HARDWARE 0x0b | ||
418 | #define JRSTA_CCBERR_ERRID_CCMAAD 0x0c | ||
419 | #define JRSTA_CCBERR_ERRID_INVCHA 0x0f | ||
420 | |||
421 | #define JRINT_ERR_INDEX_MASK 0x3fff0000 | ||
422 | #define JRINT_ERR_INDEX_SHIFT 16 | ||
423 | #define JRINT_ERR_TYPE_MASK 0xf00 | ||
424 | #define JRINT_ERR_TYPE_SHIFT 8 | ||
425 | #define JRINT_ERR_HALT_MASK 0xc | ||
426 | #define JRINT_ERR_HALT_SHIFT 2 | ||
427 | #define JRINT_ERR_HALT_INPROGRESS 0x4 | ||
428 | #define JRINT_ERR_HALT_COMPLETE 0x8 | ||
429 | #define JRINT_JR_ERROR 0x02 | ||
430 | #define JRINT_JR_INT 0x01 | ||
431 | |||
432 | #define JRINT_ERR_TYPE_WRITE 1 | ||
433 | #define JRINT_ERR_TYPE_BAD_INPADDR 3 | ||
434 | #define JRINT_ERR_TYPE_BAD_OUTADDR 4 | ||
435 | #define JRINT_ERR_TYPE_INV_INPWRT 5 | ||
436 | #define JRINT_ERR_TYPE_INV_OUTWRT 6 | ||
437 | #define JRINT_ERR_TYPE_RESET 7 | ||
438 | #define JRINT_ERR_TYPE_REMOVE_OFL 8 | ||
439 | #define JRINT_ERR_TYPE_ADD_OFL 9 | ||
440 | |||
441 | #define JRCFG_SOE 0x04 | ||
442 | #define JRCFG_ICEN 0x02 | ||
443 | #define JRCFG_IMSK 0x01 | ||
444 | #define JRCFG_ICDCT_SHIFT 8 | ||
445 | #define JRCFG_ICTT_SHIFT 16 | ||
446 | |||
447 | #define JRCR_RESET 0x01 | ||
448 | |||
449 | /* | ||
450 | * caam_assurance - Assurance Controller View | ||
451 | * base + 0x6000 padded out to 0x1000 | ||
452 | */ | ||
453 | |||
454 | struct rtic_element { | ||
455 | u64 address; | ||
456 | u32 rsvd; | ||
457 | u32 length; | ||
458 | }; | ||
459 | |||
460 | struct rtic_block { | ||
461 | struct rtic_element element[2]; | ||
462 | }; | ||
463 | |||
464 | struct rtic_memhash { | ||
465 | u32 memhash_be[32]; | ||
466 | u32 memhash_le[32]; | ||
467 | }; | ||
468 | |||
469 | struct caam_assurance { | ||
470 | /* Status/Command/Watchdog */ | ||
471 | u32 rsvd1; | ||
472 | u32 status; /* RSTA - Status */ | ||
473 | u32 rsvd2; | ||
474 | u32 cmd; /* RCMD - Command */ | ||
475 | u32 rsvd3; | ||
476 | u32 ctrl; /* RCTL - Control */ | ||
477 | u32 rsvd4; | ||
478 | u32 throttle; /* RTHR - Throttle */ | ||
479 | u32 rsvd5[2]; | ||
480 | u64 watchdog; /* RWDOG - Watchdog Timer */ | ||
481 | u32 rsvd6; | ||
482 | u32 rend; /* REND - Endian corrections */ | ||
483 | u32 rsvd7[50]; | ||
484 | |||
485 | /* Block access/configuration @ 100/110/120/130 */ | ||
486 | struct rtic_block memblk[4]; /* Memory Blocks A-D */ | ||
487 | u32 rsvd8[32]; | ||
488 | |||
489 | /* Block hashes @ 200/300/400/500 */ | ||
490 | struct rtic_memhash hash[4]; /* Block hash values A-D */ | ||
491 | u32 rsvd_3[640]; | ||
492 | }; | ||
493 | |||
494 | /* | ||
495 | * caam_queue_if - QI configuration and control | ||
496 | * starts base + 0x7000, padded out to 0x1000 long | ||
497 | */ | ||
498 | |||
499 | struct caam_queue_if { | ||
500 | u32 qi_control_hi; /* QICTL - QI Control */ | ||
501 | u32 qi_control_lo; | ||
502 | u32 rsvd1; | ||
503 | u32 qi_status; /* QISTA - QI Status */ | ||
504 | u32 qi_deq_cfg_hi; /* QIDQC - QI Dequeue Configuration */ | ||
505 | u32 qi_deq_cfg_lo; | ||
506 | u32 qi_enq_cfg_hi; /* QISEQC - QI Enqueue Command */ | ||
507 | u32 qi_enq_cfg_lo; | ||
508 | u32 rsvd2[1016]; | ||
509 | }; | ||
510 | |||
511 | /* QI control bits - low word */ | ||
512 | #define QICTL_DQEN 0x01 /* Enable frame pop */ | ||
513 | #define QICTL_STOP 0x02 /* Stop dequeue/enqueue */ | ||
514 | #define QICTL_SOE 0x04 /* Stop on error */ | ||
515 | |||
516 | /* QI control bits - high word */ | ||
517 | #define QICTL_MBSI 0x01 | ||
518 | #define QICTL_MHWSI 0x02 | ||
519 | #define QICTL_MWSI 0x04 | ||
520 | #define QICTL_MDWSI 0x08 | ||
521 | #define QICTL_CBSI 0x10 /* CtrlDataByteSwapInput */ | ||
522 | #define QICTL_CHWSI 0x20 /* CtrlDataHalfSwapInput */ | ||
523 | #define QICTL_CWSI 0x40 /* CtrlDataWordSwapInput */ | ||
524 | #define QICTL_CDWSI 0x80 /* CtrlDataDWordSwapInput */ | ||
525 | #define QICTL_MBSO 0x0100 | ||
526 | #define QICTL_MHWSO 0x0200 | ||
527 | #define QICTL_MWSO 0x0400 | ||
528 | #define QICTL_MDWSO 0x0800 | ||
529 | #define QICTL_CBSO 0x1000 /* CtrlDataByteSwapOutput */ | ||
530 | #define QICTL_CHWSO 0x2000 /* CtrlDataHalfSwapOutput */ | ||
531 | #define QICTL_CWSO 0x4000 /* CtrlDataWordSwapOutput */ | ||
532 | #define QICTL_CDWSO 0x8000 /* CtrlDataDWordSwapOutput */ | ||
533 | #define QICTL_DMBS 0x010000 | ||
534 | #define QICTL_EPO 0x020000 | ||
535 | |||
536 | /* QI status bits */ | ||
537 | #define QISTA_PHRDERR 0x01 /* PreHeader Read Error */ | ||
538 | #define QISTA_CFRDERR 0x02 /* Compound Frame Read Error */ | ||
539 | #define QISTA_OFWRERR 0x04 /* Output Frame Read Error */ | ||
540 | #define QISTA_BPDERR 0x08 /* Buffer Pool Depleted */ | ||
541 | #define QISTA_BTSERR 0x10 /* Buffer Undersize */ | ||
542 | #define QISTA_CFWRERR 0x20 /* Compound Frame Write Err */ | ||
543 | #define QISTA_STOPD 0x80000000 /* QI Stopped (see QICTL) */ | ||
544 | |||
545 | /* deco_sg_table - DECO view of scatter/gather table */ | ||
546 | struct deco_sg_table { | ||
547 | u64 addr; /* Segment Address */ | ||
548 | u32 elen; /* E, F bits + 30-bit length */ | ||
549 | u32 bpid_offset; /* Buffer Pool ID + 16-bit length */ | ||
550 | }; | ||
551 | |||
552 | /* | ||
553 | * caam_deco - descriptor controller - CHA cluster block | ||
554 | * | ||
555 | * Only accessible when direct DECO access is turned on | ||
556 | * (done in DECORR, via MID programmed in DECOxMID | ||
557 | * | ||
558 | * 5 typical, base + 0x8000/9000/a000/b000 | ||
559 | * Padded out to 0x1000 long | ||
560 | */ | ||
561 | struct caam_deco { | ||
562 | u32 rsvd1; | ||
563 | u32 cls1_mode; /* CxC1MR - Class 1 Mode */ | ||
564 | u32 rsvd2; | ||
565 | u32 cls1_keysize; /* CxC1KSR - Class 1 Key Size */ | ||
566 | u32 cls1_datasize_hi; /* CxC1DSR - Class 1 Data Size */ | ||
567 | u32 cls1_datasize_lo; | ||
568 | u32 rsvd3; | ||
569 | u32 cls1_icvsize; /* CxC1ICVSR - Class 1 ICV size */ | ||
570 | u32 rsvd4[5]; | ||
571 | u32 cha_ctrl; /* CCTLR - CHA control */ | ||
572 | u32 rsvd5; | ||
573 | u32 irq_crtl; /* CxCIRQ - CCB interrupt done/error/clear */ | ||
574 | u32 rsvd6; | ||
575 | u32 clr_written; /* CxCWR - Clear-Written */ | ||
576 | u32 ccb_status_hi; /* CxCSTA - CCB Status/Error */ | ||
577 | u32 ccb_status_lo; | ||
578 | u32 rsvd7[3]; | ||
579 | u32 aad_size; /* CxAADSZR - Current AAD Size */ | ||
580 | u32 rsvd8; | ||
581 | u32 cls1_iv_size; /* CxC1IVSZR - Current Class 1 IV Size */ | ||
582 | u32 rsvd9[7]; | ||
583 | u32 pkha_a_size; /* PKASZRx - Size of PKHA A */ | ||
584 | u32 rsvd10; | ||
585 | u32 pkha_b_size; /* PKBSZRx - Size of PKHA B */ | ||
586 | u32 rsvd11; | ||
587 | u32 pkha_n_size; /* PKNSZRx - Size of PKHA N */ | ||
588 | u32 rsvd12; | ||
589 | u32 pkha_e_size; /* PKESZRx - Size of PKHA E */ | ||
590 | u32 rsvd13[24]; | ||
591 | u32 cls1_ctx[16]; /* CxC1CTXR - Class 1 Context @100 */ | ||
592 | u32 rsvd14[48]; | ||
593 | u32 cls1_key[8]; /* CxC1KEYR - Class 1 Key @200 */ | ||
594 | u32 rsvd15[121]; | ||
595 | u32 cls2_mode; /* CxC2MR - Class 2 Mode */ | ||
596 | u32 rsvd16; | ||
597 | u32 cls2_keysize; /* CxX2KSR - Class 2 Key Size */ | ||
598 | u32 cls2_datasize_hi; /* CxC2DSR - Class 2 Data Size */ | ||
599 | u32 cls2_datasize_lo; | ||
600 | u32 rsvd17; | ||
601 | u32 cls2_icvsize; /* CxC2ICVSZR - Class 2 ICV Size */ | ||
602 | u32 rsvd18[56]; | ||
603 | u32 cls2_ctx[18]; /* CxC2CTXR - Class 2 Context @500 */ | ||
604 | u32 rsvd19[46]; | ||
605 | u32 cls2_key[32]; /* CxC2KEYR - Class2 Key @600 */ | ||
606 | u32 rsvd20[84]; | ||
607 | u32 inp_infofifo_hi; /* CxIFIFO - Input Info FIFO @7d0 */ | ||
608 | u32 inp_infofifo_lo; | ||
609 | u32 rsvd21[2]; | ||
610 | u64 inp_datafifo; /* CxDFIFO - Input Data FIFO */ | ||
611 | u32 rsvd22[2]; | ||
612 | u64 out_datafifo; /* CxOFIFO - Output Data FIFO */ | ||
613 | u32 rsvd23[2]; | ||
614 | u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ | ||
615 | u32 jr_ctl_lo; | ||
616 | u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ | ||
617 | u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ | ||
618 | u32 op_status_lo; | ||
619 | u32 rsvd24[2]; | ||
620 | u32 liodn; /* DxLSR - DECO LIODN Status - non-seq */ | ||
621 | u32 td_liodn; /* DxLSR - DECO LIODN Status - trustdesc */ | ||
622 | u32 rsvd26[6]; | ||
623 | u64 math[4]; /* DxMTH - Math register */ | ||
624 | u32 rsvd27[8]; | ||
625 | struct deco_sg_table gthr_tbl[4]; /* DxGTR - Gather Tables */ | ||
626 | u32 rsvd28[16]; | ||
627 | struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */ | ||
628 | u32 rsvd29[48]; | ||
629 | u32 descbuf[64]; /* DxDESB - Descriptor buffer */ | ||
630 | u32 rsvd30[320]; | ||
631 | }; | ||
632 | |||
633 | /* | ||
634 | * Current top-level view of memory map is: | ||
635 | * | ||
636 | * 0x0000 - 0x0fff - CAAM Top-Level Control | ||
637 | * 0x1000 - 0x1fff - Job Ring 0 | ||
638 | * 0x2000 - 0x2fff - Job Ring 1 | ||
639 | * 0x3000 - 0x3fff - Job Ring 2 | ||
640 | * 0x4000 - 0x4fff - Job Ring 3 | ||
641 | * 0x5000 - 0x5fff - (unused) | ||
642 | * 0x6000 - 0x6fff - Assurance Controller | ||
643 | * 0x7000 - 0x7fff - Queue Interface | ||
644 | * 0x8000 - 0x8fff - DECO-CCB 0 | ||
645 | * 0x9000 - 0x9fff - DECO-CCB 1 | ||
646 | * 0xa000 - 0xafff - DECO-CCB 2 | ||
647 | * 0xb000 - 0xbfff - DECO-CCB 3 | ||
648 | * 0xc000 - 0xcfff - DECO-CCB 4 | ||
649 | * | ||
650 | * caam_full describes the full register view of CAAM if useful, | ||
651 | * although many configurations may choose to implement parts of | ||
652 | * the register map separately, in differing privilege regions | ||
653 | */ | ||
654 | struct caam_full { | ||
655 | struct caam_ctrl __iomem ctrl; | ||
656 | struct caam_job_ring jr[4]; | ||
657 | u64 rsvd[512]; | ||
658 | struct caam_assurance assure; | ||
659 | struct caam_queue_if qi; | ||
660 | struct caam_deco *deco; | ||
661 | }; | ||
662 | |||
663 | #endif /* REGS_H */ | ||
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index e449ac5627a5..a84250a5dd51 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -1467,7 +1467,7 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, | |||
1467 | return -EINVAL; | 1467 | return -EINVAL; |
1468 | 1468 | ||
1469 | while (size) { | 1469 | while (size) { |
1470 | copy = min(drest, min(size, dst->length)); | 1470 | copy = min3(drest, size, dst->length); |
1471 | 1471 | ||
1472 | size -= copy; | 1472 | size -= copy; |
1473 | drest -= copy; | 1473 | drest -= copy; |
@@ -1729,7 +1729,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset | |||
1729 | return -EINVAL; | 1729 | return -EINVAL; |
1730 | 1730 | ||
1731 | while (size) { | 1731 | while (size) { |
1732 | copy = min(srest, min(dst->length, size)); | 1732 | copy = min3(srest, dst->length, size); |
1733 | 1733 | ||
1734 | daddr = kmap_atomic(sg_page(dst), KM_IRQ0); | 1734 | daddr = kmap_atomic(sg_page(dst), KM_IRQ0); |
1735 | memcpy(daddr + dst->offset + offset, saddr, copy); | 1735 | memcpy(daddr + dst->offset + offset, saddr, copy); |
@@ -2700,8 +2700,7 @@ static void __devexit hifn_remove(struct pci_dev *pdev) | |||
2700 | dev = pci_get_drvdata(pdev); | 2700 | dev = pci_get_drvdata(pdev); |
2701 | 2701 | ||
2702 | if (dev) { | 2702 | if (dev) { |
2703 | cancel_delayed_work(&dev->work); | 2703 | cancel_delayed_work_sync(&dev->work); |
2704 | flush_scheduled_work(); | ||
2705 | 2704 | ||
2706 | hifn_unregister_rng(dev); | 2705 | hifn_unregister_rng(dev); |
2707 | hifn_unregister_alg(dev); | 2706 | hifn_unregister_alg(dev); |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 0d662213c066..4c20c5bf6058 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -1044,7 +1044,7 @@ static int aead_perform(struct aead_request *req, int encrypt, | |||
1044 | memcpy(crypt->iv, req->iv, ivsize); | 1044 | memcpy(crypt->iv, req->iv, ivsize); |
1045 | 1045 | ||
1046 | if (req->src != req->dst) { | 1046 | if (req->src != req->dst) { |
1047 | BUG(); /* -ENOTSUP because of my lazyness */ | 1047 | BUG(); /* -ENOTSUP because of my laziness */ |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | /* ASSOC data */ | 1050 | /* ASSOC data */ |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 7d279e578df5..3cf303ee3fe3 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -133,7 +133,6 @@ struct mv_req_hash_ctx { | |||
133 | int extra_bytes; /* unprocessed bytes in buffer */ | 133 | int extra_bytes; /* unprocessed bytes in buffer */ |
134 | enum hash_op op; | 134 | enum hash_op op; |
135 | int count_add; | 135 | int count_add; |
136 | struct scatterlist dummysg; | ||
137 | }; | 136 | }; |
138 | 137 | ||
139 | static void compute_aes_dec_key(struct mv_ctx *ctx) | 138 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
@@ -187,9 +186,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) | |||
187 | { | 186 | { |
188 | int ret; | 187 | int ret; |
189 | void *sbuf; | 188 | void *sbuf; |
190 | int copied = 0; | 189 | int copy_len; |
191 | 190 | ||
192 | while (1) { | 191 | while (len) { |
193 | if (!p->sg_src_left) { | 192 | if (!p->sg_src_left) { |
194 | ret = sg_miter_next(&p->src_sg_it); | 193 | ret = sg_miter_next(&p->src_sg_it); |
195 | BUG_ON(!ret); | 194 | BUG_ON(!ret); |
@@ -199,19 +198,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) | |||
199 | 198 | ||
200 | sbuf = p->src_sg_it.addr + p->src_start; | 199 | sbuf = p->src_sg_it.addr + p->src_start; |
201 | 200 | ||
202 | if (p->sg_src_left <= len - copied) { | 201 | copy_len = min(p->sg_src_left, len); |
203 | memcpy(dbuf + copied, sbuf, p->sg_src_left); | 202 | memcpy(dbuf, sbuf, copy_len); |
204 | copied += p->sg_src_left; | 203 | |
205 | p->sg_src_left = 0; | 204 | p->src_start += copy_len; |
206 | if (copied >= len) | 205 | p->sg_src_left -= copy_len; |
207 | break; | 206 | |
208 | } else { | 207 | len -= copy_len; |
209 | int copy_len = len - copied; | 208 | dbuf += copy_len; |
210 | memcpy(dbuf + copied, sbuf, copy_len); | ||
211 | p->src_start += copy_len; | ||
212 | p->sg_src_left -= copy_len; | ||
213 | break; | ||
214 | } | ||
215 | } | 209 | } |
216 | } | 210 | } |
217 | 211 | ||
@@ -275,7 +269,6 @@ static void mv_process_current_q(int first_block) | |||
275 | memcpy(cpg->sram + SRAM_CONFIG, &op, | 269 | memcpy(cpg->sram + SRAM_CONFIG, &op, |
276 | sizeof(struct sec_accel_config)); | 270 | sizeof(struct sec_accel_config)); |
277 | 271 | ||
278 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
279 | /* GO */ | 272 | /* GO */ |
280 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 273 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
281 | 274 | ||
@@ -302,6 +295,7 @@ static void mv_crypto_algo_completion(void) | |||
302 | static void mv_process_hash_current(int first_block) | 295 | static void mv_process_hash_current(int first_block) |
303 | { | 296 | { |
304 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | 297 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); |
298 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
305 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | 299 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); |
306 | struct req_progress *p = &cpg->p; | 300 | struct req_progress *p = &cpg->p; |
307 | struct sec_accel_config op = { 0 }; | 301 | struct sec_accel_config op = { 0 }; |
@@ -314,6 +308,8 @@ static void mv_process_hash_current(int first_block) | |||
314 | break; | 308 | break; |
315 | case COP_HMAC_SHA1: | 309 | case COP_HMAC_SHA1: |
316 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; | 310 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; |
311 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, | ||
312 | tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | ||
317 | break; | 313 | break; |
318 | } | 314 | } |
319 | 315 | ||
@@ -345,11 +341,16 @@ static void mv_process_hash_current(int first_block) | |||
345 | op.config |= CFG_LAST_FRAG; | 341 | op.config |= CFG_LAST_FRAG; |
346 | else | 342 | else |
347 | op.config |= CFG_MID_FRAG; | 343 | op.config |= CFG_MID_FRAG; |
344 | |||
345 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | ||
346 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | ||
347 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | ||
348 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | ||
349 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
348 | } | 350 | } |
349 | 351 | ||
350 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | 352 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); |
351 | 353 | ||
352 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
353 | /* GO */ | 354 | /* GO */ |
354 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 355 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
355 | 356 | ||
@@ -409,12 +410,6 @@ static void mv_hash_algo_completion(void) | |||
409 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); | 410 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); |
410 | sg_miter_stop(&cpg->p.src_sg_it); | 411 | sg_miter_stop(&cpg->p.src_sg_it); |
411 | 412 | ||
412 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
413 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
414 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
415 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
416 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
417 | |||
418 | if (likely(ctx->last_chunk)) { | 413 | if (likely(ctx->last_chunk)) { |
419 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { | 414 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { |
420 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | 415 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, |
@@ -422,6 +417,12 @@ static void mv_hash_algo_completion(void) | |||
422 | (req))); | 417 | (req))); |
423 | } else | 418 | } else |
424 | mv_hash_final_fallback(req); | 419 | mv_hash_final_fallback(req); |
420 | } else { | ||
421 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
422 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
423 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
424 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
425 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
425 | } | 426 | } |
426 | } | 427 | } |
427 | 428 | ||
@@ -480,7 +481,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | |||
480 | int i = 0; | 481 | int i = 0; |
481 | size_t cur_len; | 482 | size_t cur_len; |
482 | 483 | ||
483 | while (1) { | 484 | while (sl) { |
484 | cur_len = sl[i].length; | 485 | cur_len = sl[i].length; |
485 | ++i; | 486 | ++i; |
486 | if (total_bytes > cur_len) | 487 | if (total_bytes > cur_len) |
@@ -517,29 +518,12 @@ static void mv_start_new_hash_req(struct ahash_request *req) | |||
517 | { | 518 | { |
518 | struct req_progress *p = &cpg->p; | 519 | struct req_progress *p = &cpg->p; |
519 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 520 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
520 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
521 | int num_sgs, hw_bytes, old_extra_bytes, rc; | 521 | int num_sgs, hw_bytes, old_extra_bytes, rc; |
522 | cpg->cur_req = &req->base; | 522 | cpg->cur_req = &req->base; |
523 | memset(p, 0, sizeof(struct req_progress)); | 523 | memset(p, 0, sizeof(struct req_progress)); |
524 | hw_bytes = req->nbytes + ctx->extra_bytes; | 524 | hw_bytes = req->nbytes + ctx->extra_bytes; |
525 | old_extra_bytes = ctx->extra_bytes; | 525 | old_extra_bytes = ctx->extra_bytes; |
526 | 526 | ||
527 | if (unlikely(ctx->extra_bytes)) { | ||
528 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
529 | ctx->extra_bytes); | ||
530 | p->crypt_len = ctx->extra_bytes; | ||
531 | } | ||
532 | |||
533 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | ||
534 | |||
535 | if (unlikely(!ctx->first_hash)) { | ||
536 | writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | ||
537 | writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | ||
538 | writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | ||
539 | writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | ||
540 | writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
541 | } | ||
542 | |||
543 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; | 527 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; |
544 | if (ctx->extra_bytes != 0 | 528 | if (ctx->extra_bytes != 0 |
545 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) | 529 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) |
@@ -555,6 +539,12 @@ static void mv_start_new_hash_req(struct ahash_request *req) | |||
555 | p->complete = mv_hash_algo_completion; | 539 | p->complete = mv_hash_algo_completion; |
556 | p->process = mv_process_hash_current; | 540 | p->process = mv_process_hash_current; |
557 | 541 | ||
542 | if (unlikely(old_extra_bytes)) { | ||
543 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
544 | old_extra_bytes); | ||
545 | p->crypt_len = old_extra_bytes; | ||
546 | } | ||
547 | |||
558 | mv_process_hash_current(1); | 548 | mv_process_hash_current(1); |
559 | } else { | 549 | } else { |
560 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, | 550 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, |
@@ -603,9 +593,7 @@ static int queue_manag(void *data) | |||
603 | if (async_req->tfm->__crt_alg->cra_type != | 593 | if (async_req->tfm->__crt_alg->cra_type != |
604 | &crypto_ahash_type) { | 594 | &crypto_ahash_type) { |
605 | struct ablkcipher_request *req = | 595 | struct ablkcipher_request *req = |
606 | container_of(async_req, | 596 | ablkcipher_request_cast(async_req); |
607 | struct ablkcipher_request, | ||
608 | base); | ||
609 | mv_start_new_crypt_req(req); | 597 | mv_start_new_crypt_req(req); |
610 | } else { | 598 | } else { |
611 | struct ahash_request *req = | 599 | struct ahash_request *req = |
@@ -722,19 +710,13 @@ static int mv_hash_update(struct ahash_request *req) | |||
722 | static int mv_hash_final(struct ahash_request *req) | 710 | static int mv_hash_final(struct ahash_request *req) |
723 | { | 711 | { |
724 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 712 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
725 | /* dummy buffer of 4 bytes */ | 713 | |
726 | sg_init_one(&ctx->dummysg, ctx->buffer, 4); | ||
727 | /* I think I'm allowed to do that... */ | ||
728 | ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0); | ||
729 | mv_update_hash_req_ctx(ctx, 1, 0); | 714 | mv_update_hash_req_ctx(ctx, 1, 0); |
730 | return mv_handle_req(&req->base); | 715 | return mv_handle_req(&req->base); |
731 | } | 716 | } |
732 | 717 | ||
733 | static int mv_hash_finup(struct ahash_request *req) | 718 | static int mv_hash_finup(struct ahash_request *req) |
734 | { | 719 | { |
735 | if (!req->nbytes) | ||
736 | return mv_hash_final(req); | ||
737 | |||
738 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); | 720 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); |
739 | return mv_handle_req(&req->base); | 721 | return mv_handle_req(&req->base); |
740 | } | 722 | } |
@@ -857,7 +839,7 @@ static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, | |||
857 | printk(KERN_WARNING MV_CESA | 839 | printk(KERN_WARNING MV_CESA |
858 | "Base driver '%s' could not be loaded!\n", | 840 | "Base driver '%s' could not be loaded!\n", |
859 | base_hash_name); | 841 | base_hash_name); |
860 | err = PTR_ERR(fallback_tfm); | 842 | err = PTR_ERR(base_hash); |
861 | goto err_bad_base; | 843 | goto err_bad_base; |
862 | } | 844 | } |
863 | } | 845 | } |
@@ -1065,14 +1047,21 @@ static int mv_probe(struct platform_device *pdev) | |||
1065 | 1047 | ||
1066 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | 1048 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
1067 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | 1049 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
1050 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
1068 | 1051 | ||
1069 | ret = crypto_register_alg(&mv_aes_alg_ecb); | 1052 | ret = crypto_register_alg(&mv_aes_alg_ecb); |
1070 | if (ret) | 1053 | if (ret) { |
1054 | printk(KERN_WARNING MV_CESA | ||
1055 | "Could not register aes-ecb driver\n"); | ||
1071 | goto err_irq; | 1056 | goto err_irq; |
1057 | } | ||
1072 | 1058 | ||
1073 | ret = crypto_register_alg(&mv_aes_alg_cbc); | 1059 | ret = crypto_register_alg(&mv_aes_alg_cbc); |
1074 | if (ret) | 1060 | if (ret) { |
1061 | printk(KERN_WARNING MV_CESA | ||
1062 | "Could not register aes-cbc driver\n"); | ||
1075 | goto err_unreg_ecb; | 1063 | goto err_unreg_ecb; |
1064 | } | ||
1076 | 1065 | ||
1077 | ret = crypto_register_ahash(&mv_sha1_alg); | 1066 | ret = crypto_register_ahash(&mv_sha1_alg); |
1078 | if (ret == 0) | 1067 | if (ret == 0) |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 88ee01510ec0..2e5b2044c96f 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -1542,7 +1542,7 @@ out: | |||
1542 | return err; | 1542 | return err; |
1543 | } | 1543 | } |
1544 | 1544 | ||
1545 | static void __exit n2_unregister_algs(void) | 1545 | static void __devexit n2_unregister_algs(void) |
1546 | { | 1546 | { |
1547 | mutex_lock(&spu_lock); | 1547 | mutex_lock(&spu_lock); |
1548 | if (!--algs_registered) | 1548 | if (!--algs_registered) |
@@ -1832,7 +1832,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | |||
1832 | return -ENODEV; | 1832 | return -ENODEV; |
1833 | 1833 | ||
1834 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); | 1834 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); |
1835 | if (!intr) | 1835 | if (!ino) |
1836 | return -ENODEV; | 1836 | return -ENODEV; |
1837 | 1837 | ||
1838 | if (intr_len != ino_len) | 1838 | if (intr_len != ino_len) |
@@ -2004,8 +2004,7 @@ static void __devinit n2_spu_driver_version(void) | |||
2004 | pr_info("%s", version); | 2004 | pr_info("%s", version); |
2005 | } | 2005 | } |
2006 | 2006 | ||
2007 | static int __devinit n2_crypto_probe(struct platform_device *dev, | 2007 | static int __devinit n2_crypto_probe(struct platform_device *dev) |
2008 | const struct of_device_id *match) | ||
2009 | { | 2008 | { |
2010 | struct mdesc_handle *mdesc; | 2009 | struct mdesc_handle *mdesc; |
2011 | const char *full_name; | 2010 | const char *full_name; |
@@ -2116,8 +2115,7 @@ static void free_ncp(struct n2_mau *mp) | |||
2116 | kfree(mp); | 2115 | kfree(mp); |
2117 | } | 2116 | } |
2118 | 2117 | ||
2119 | static int __devinit n2_mau_probe(struct platform_device *dev, | 2118 | static int __devinit n2_mau_probe(struct platform_device *dev) |
2120 | const struct of_device_id *match) | ||
2121 | { | 2119 | { |
2122 | struct mdesc_handle *mdesc; | 2120 | struct mdesc_handle *mdesc; |
2123 | const char *full_name; | 2121 | const char *full_name; |
@@ -2211,7 +2209,7 @@ static struct of_device_id n2_crypto_match[] = { | |||
2211 | 2209 | ||
2212 | MODULE_DEVICE_TABLE(of, n2_crypto_match); | 2210 | MODULE_DEVICE_TABLE(of, n2_crypto_match); |
2213 | 2211 | ||
2214 | static struct of_platform_driver n2_crypto_driver = { | 2212 | static struct platform_driver n2_crypto_driver = { |
2215 | .driver = { | 2213 | .driver = { |
2216 | .name = "n2cp", | 2214 | .name = "n2cp", |
2217 | .owner = THIS_MODULE, | 2215 | .owner = THIS_MODULE, |
@@ -2235,7 +2233,7 @@ static struct of_device_id n2_mau_match[] = { | |||
2235 | 2233 | ||
2236 | MODULE_DEVICE_TABLE(of, n2_mau_match); | 2234 | MODULE_DEVICE_TABLE(of, n2_mau_match); |
2237 | 2235 | ||
2238 | static struct of_platform_driver n2_mau_driver = { | 2236 | static struct platform_driver n2_mau_driver = { |
2239 | .driver = { | 2237 | .driver = { |
2240 | .name = "ncp", | 2238 | .name = "ncp", |
2241 | .owner = THIS_MODULE, | 2239 | .owner = THIS_MODULE, |
@@ -2247,20 +2245,20 @@ static struct of_platform_driver n2_mau_driver = { | |||
2247 | 2245 | ||
2248 | static int __init n2_init(void) | 2246 | static int __init n2_init(void) |
2249 | { | 2247 | { |
2250 | int err = of_register_platform_driver(&n2_crypto_driver); | 2248 | int err = platform_driver_register(&n2_crypto_driver); |
2251 | 2249 | ||
2252 | if (!err) { | 2250 | if (!err) { |
2253 | err = of_register_platform_driver(&n2_mau_driver); | 2251 | err = platform_driver_register(&n2_mau_driver); |
2254 | if (err) | 2252 | if (err) |
2255 | of_unregister_platform_driver(&n2_crypto_driver); | 2253 | platform_driver_unregister(&n2_crypto_driver); |
2256 | } | 2254 | } |
2257 | return err; | 2255 | return err; |
2258 | } | 2256 | } |
2259 | 2257 | ||
2260 | static void __exit n2_exit(void) | 2258 | static void __exit n2_exit(void) |
2261 | { | 2259 | { |
2262 | of_unregister_platform_driver(&n2_mau_driver); | 2260 | platform_driver_unregister(&n2_mau_driver); |
2263 | of_unregister_platform_driver(&n2_crypto_driver); | 2261 | platform_driver_unregister(&n2_crypto_driver); |
2264 | } | 2262 | } |
2265 | 2263 | ||
2266 | module_init(n2_init); | 2264 | module_init(n2_init); |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c new file mode 100644 index 000000000000..5b970d9e9956 --- /dev/null +++ b/drivers/crypto/omap-aes.c | |||
@@ -0,0 +1,960 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for OMAP AES HW acceleration. | ||
5 | * | ||
6 | * Copyright (c) 2010 Nokia Corporation | ||
7 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/clk.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/scatterlist.h> | ||
25 | #include <linux/dma-mapping.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/crypto.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <crypto/scatterwalk.h> | ||
30 | #include <crypto/aes.h> | ||
31 | |||
32 | #include <plat/cpu.h> | ||
33 | #include <plat/dma.h> | ||
34 | |||
35 | /* OMAP TRM gives bitfields as start:end, where start is the higher bit | ||
36 | number. For example 7:0 */ | ||
37 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) | ||
38 | #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) | ||
39 | |||
40 | #define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04)) | ||
41 | #define AES_REG_IV(x) (0x20 + ((x) * 0x04)) | ||
42 | |||
43 | #define AES_REG_CTRL 0x30 | ||
44 | #define AES_REG_CTRL_CTR_WIDTH (1 << 7) | ||
45 | #define AES_REG_CTRL_CTR (1 << 6) | ||
46 | #define AES_REG_CTRL_CBC (1 << 5) | ||
47 | #define AES_REG_CTRL_KEY_SIZE (3 << 3) | ||
48 | #define AES_REG_CTRL_DIRECTION (1 << 2) | ||
49 | #define AES_REG_CTRL_INPUT_READY (1 << 1) | ||
50 | #define AES_REG_CTRL_OUTPUT_READY (1 << 0) | ||
51 | |||
52 | #define AES_REG_DATA 0x34 | ||
53 | #define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04)) | ||
54 | |||
55 | #define AES_REG_REV 0x44 | ||
56 | #define AES_REG_REV_MAJOR 0xF0 | ||
57 | #define AES_REG_REV_MINOR 0x0F | ||
58 | |||
59 | #define AES_REG_MASK 0x48 | ||
60 | #define AES_REG_MASK_SIDLE (1 << 6) | ||
61 | #define AES_REG_MASK_START (1 << 5) | ||
62 | #define AES_REG_MASK_DMA_OUT_EN (1 << 3) | ||
63 | #define AES_REG_MASK_DMA_IN_EN (1 << 2) | ||
64 | #define AES_REG_MASK_SOFTRESET (1 << 1) | ||
65 | #define AES_REG_AUTOIDLE (1 << 0) | ||
66 | |||
67 | #define AES_REG_SYSSTATUS 0x4C | ||
68 | #define AES_REG_SYSSTATUS_RESETDONE (1 << 0) | ||
69 | |||
70 | #define DEFAULT_TIMEOUT (5*HZ) | ||
71 | |||
72 | #define FLAGS_MODE_MASK 0x000f | ||
73 | #define FLAGS_ENCRYPT BIT(0) | ||
74 | #define FLAGS_CBC BIT(1) | ||
75 | #define FLAGS_GIV BIT(2) | ||
76 | |||
77 | #define FLAGS_INIT BIT(4) | ||
78 | #define FLAGS_FAST BIT(5) | ||
79 | #define FLAGS_BUSY BIT(6) | ||
80 | |||
81 | struct omap_aes_ctx { | ||
82 | struct omap_aes_dev *dd; | ||
83 | |||
84 | int keylen; | ||
85 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | ||
86 | unsigned long flags; | ||
87 | }; | ||
88 | |||
89 | struct omap_aes_reqctx { | ||
90 | unsigned long mode; | ||
91 | }; | ||
92 | |||
93 | #define OMAP_AES_QUEUE_LENGTH 1 | ||
94 | #define OMAP_AES_CACHE_SIZE 0 | ||
95 | |||
96 | struct omap_aes_dev { | ||
97 | struct list_head list; | ||
98 | unsigned long phys_base; | ||
99 | void __iomem *io_base; | ||
100 | struct clk *iclk; | ||
101 | struct omap_aes_ctx *ctx; | ||
102 | struct device *dev; | ||
103 | unsigned long flags; | ||
104 | int err; | ||
105 | |||
106 | spinlock_t lock; | ||
107 | struct crypto_queue queue; | ||
108 | |||
109 | struct tasklet_struct done_task; | ||
110 | struct tasklet_struct queue_task; | ||
111 | |||
112 | struct ablkcipher_request *req; | ||
113 | size_t total; | ||
114 | struct scatterlist *in_sg; | ||
115 | size_t in_offset; | ||
116 | struct scatterlist *out_sg; | ||
117 | size_t out_offset; | ||
118 | |||
119 | size_t buflen; | ||
120 | void *buf_in; | ||
121 | size_t dma_size; | ||
122 | int dma_in; | ||
123 | int dma_lch_in; | ||
124 | dma_addr_t dma_addr_in; | ||
125 | void *buf_out; | ||
126 | int dma_out; | ||
127 | int dma_lch_out; | ||
128 | dma_addr_t dma_addr_out; | ||
129 | }; | ||
130 | |||
131 | /* keep registered devices data here */ | ||
132 | static LIST_HEAD(dev_list); | ||
133 | static DEFINE_SPINLOCK(list_lock); | ||
134 | |||
135 | static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) | ||
136 | { | ||
137 | return __raw_readl(dd->io_base + offset); | ||
138 | } | ||
139 | |||
140 | static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, | ||
141 | u32 value) | ||
142 | { | ||
143 | __raw_writel(value, dd->io_base + offset); | ||
144 | } | ||
145 | |||
146 | static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, | ||
147 | u32 value, u32 mask) | ||
148 | { | ||
149 | u32 val; | ||
150 | |||
151 | val = omap_aes_read(dd, offset); | ||
152 | val &= ~mask; | ||
153 | val |= value; | ||
154 | omap_aes_write(dd, offset, val); | ||
155 | } | ||
156 | |||
157 | static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, | ||
158 | u32 *value, int count) | ||
159 | { | ||
160 | for (; count--; value++, offset += 4) | ||
161 | omap_aes_write(dd, offset, *value); | ||
162 | } | ||
163 | |||
164 | static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) | ||
165 | { | ||
166 | unsigned long timeout = jiffies + DEFAULT_TIMEOUT; | ||
167 | |||
168 | while (!(omap_aes_read(dd, offset) & bit)) { | ||
169 | if (time_is_before_jiffies(timeout)) { | ||
170 | dev_err(dd->dev, "omap-aes timeout\n"); | ||
171 | return -ETIMEDOUT; | ||
172 | } | ||
173 | } | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int omap_aes_hw_init(struct omap_aes_dev *dd) | ||
178 | { | ||
179 | /* | ||
180 | * clocks are enabled when request starts and disabled when finished. | ||
181 | * It may be long delays between requests. | ||
182 | * Device might go to off mode to save power. | ||
183 | */ | ||
184 | clk_enable(dd->iclk); | ||
185 | |||
186 | if (!(dd->flags & FLAGS_INIT)) { | ||
187 | /* is it necessary to reset before every operation? */ | ||
188 | omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, | ||
189 | AES_REG_MASK_SOFTRESET); | ||
190 | /* | ||
191 | * prevent OCP bus error (SRESP) in case an access to the module | ||
192 | * is performed while the module is coming out of soft reset | ||
193 | */ | ||
194 | __asm__ __volatile__("nop"); | ||
195 | __asm__ __volatile__("nop"); | ||
196 | |||
197 | if (omap_aes_wait(dd, AES_REG_SYSSTATUS, | ||
198 | AES_REG_SYSSTATUS_RESETDONE)) | ||
199 | return -ETIMEDOUT; | ||
200 | |||
201 | dd->flags |= FLAGS_INIT; | ||
202 | dd->err = 0; | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | ||
209 | { | ||
210 | unsigned int key32; | ||
211 | int i, err; | ||
212 | u32 val, mask; | ||
213 | |||
214 | err = omap_aes_hw_init(dd); | ||
215 | if (err) | ||
216 | return err; | ||
217 | |||
218 | val = 0; | ||
219 | if (dd->dma_lch_out >= 0) | ||
220 | val |= AES_REG_MASK_DMA_OUT_EN; | ||
221 | if (dd->dma_lch_in >= 0) | ||
222 | val |= AES_REG_MASK_DMA_IN_EN; | ||
223 | |||
224 | mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN; | ||
225 | |||
226 | omap_aes_write_mask(dd, AES_REG_MASK, val, mask); | ||
227 | |||
228 | key32 = dd->ctx->keylen / sizeof(u32); | ||
229 | |||
230 | /* it seems a key should always be set even if it has not changed */ | ||
231 | for (i = 0; i < key32; i++) { | ||
232 | omap_aes_write(dd, AES_REG_KEY(i), | ||
233 | __le32_to_cpu(dd->ctx->key[i])); | ||
234 | } | ||
235 | |||
236 | if ((dd->flags & FLAGS_CBC) && dd->req->info) | ||
237 | omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4); | ||
238 | |||
239 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); | ||
240 | if (dd->flags & FLAGS_CBC) | ||
241 | val |= AES_REG_CTRL_CBC; | ||
242 | if (dd->flags & FLAGS_ENCRYPT) | ||
243 | val |= AES_REG_CTRL_DIRECTION; | ||
244 | |||
245 | mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | | ||
246 | AES_REG_CTRL_KEY_SIZE; | ||
247 | |||
248 | omap_aes_write_mask(dd, AES_REG_CTRL, val, mask); | ||
249 | |||
250 | /* IN */ | ||
251 | omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, | ||
252 | dd->phys_base + AES_REG_DATA, 0, 4); | ||
253 | |||
254 | omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); | ||
255 | omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); | ||
256 | |||
257 | /* OUT */ | ||
258 | omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, | ||
259 | dd->phys_base + AES_REG_DATA, 0, 4); | ||
260 | |||
261 | omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); | ||
262 | omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) | ||
268 | { | ||
269 | struct omap_aes_dev *dd = NULL, *tmp; | ||
270 | |||
271 | spin_lock_bh(&list_lock); | ||
272 | if (!ctx->dd) { | ||
273 | list_for_each_entry(tmp, &dev_list, list) { | ||
274 | /* FIXME: take fist available aes core */ | ||
275 | dd = tmp; | ||
276 | break; | ||
277 | } | ||
278 | ctx->dd = dd; | ||
279 | } else { | ||
280 | /* already found before */ | ||
281 | dd = ctx->dd; | ||
282 | } | ||
283 | spin_unlock_bh(&list_lock); | ||
284 | |||
285 | return dd; | ||
286 | } | ||
287 | |||
288 | static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) | ||
289 | { | ||
290 | struct omap_aes_dev *dd = data; | ||
291 | |||
292 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { | ||
293 | pr_err("omap-aes DMA error status: 0x%hx\n", ch_status); | ||
294 | dd->err = -EIO; | ||
295 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | ||
296 | } else if (lch == dd->dma_lch_in) { | ||
297 | return; | ||
298 | } | ||
299 | |||
300 | /* dma_lch_out - completed */ | ||
301 | tasklet_schedule(&dd->done_task); | ||
302 | } | ||
303 | |||
304 | static int omap_aes_dma_init(struct omap_aes_dev *dd) | ||
305 | { | ||
306 | int err = -ENOMEM; | ||
307 | |||
308 | dd->dma_lch_out = -1; | ||
309 | dd->dma_lch_in = -1; | ||
310 | |||
311 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | ||
312 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); | ||
313 | dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; | ||
314 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | ||
315 | |||
316 | if (!dd->buf_in || !dd->buf_out) { | ||
317 | dev_err(dd->dev, "unable to alloc pages.\n"); | ||
318 | goto err_alloc; | ||
319 | } | ||
320 | |||
321 | /* MAP here */ | ||
322 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, | ||
323 | DMA_TO_DEVICE); | ||
324 | if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { | ||
325 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
326 | err = -EINVAL; | ||
327 | goto err_map_in; | ||
328 | } | ||
329 | |||
330 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, | ||
331 | DMA_FROM_DEVICE); | ||
332 | if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { | ||
333 | dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); | ||
334 | err = -EINVAL; | ||
335 | goto err_map_out; | ||
336 | } | ||
337 | |||
338 | err = omap_request_dma(dd->dma_in, "omap-aes-rx", | ||
339 | omap_aes_dma_callback, dd, &dd->dma_lch_in); | ||
340 | if (err) { | ||
341 | dev_err(dd->dev, "Unable to request DMA channel\n"); | ||
342 | goto err_dma_in; | ||
343 | } | ||
344 | err = omap_request_dma(dd->dma_out, "omap-aes-tx", | ||
345 | omap_aes_dma_callback, dd, &dd->dma_lch_out); | ||
346 | if (err) { | ||
347 | dev_err(dd->dev, "Unable to request DMA channel\n"); | ||
348 | goto err_dma_out; | ||
349 | } | ||
350 | |||
351 | return 0; | ||
352 | |||
353 | err_dma_out: | ||
354 | omap_free_dma(dd->dma_lch_in); | ||
355 | err_dma_in: | ||
356 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
357 | DMA_FROM_DEVICE); | ||
358 | err_map_out: | ||
359 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | ||
360 | err_map_in: | ||
361 | free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); | ||
362 | free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); | ||
363 | err_alloc: | ||
364 | if (err) | ||
365 | pr_err("error: %d\n", err); | ||
366 | return err; | ||
367 | } | ||
368 | |||
369 | static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) | ||
370 | { | ||
371 | omap_free_dma(dd->dma_lch_out); | ||
372 | omap_free_dma(dd->dma_lch_in); | ||
373 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, | ||
374 | DMA_FROM_DEVICE); | ||
375 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); | ||
376 | free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); | ||
377 | free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); | ||
378 | } | ||
379 | |||
380 | static void sg_copy_buf(void *buf, struct scatterlist *sg, | ||
381 | unsigned int start, unsigned int nbytes, int out) | ||
382 | { | ||
383 | struct scatter_walk walk; | ||
384 | |||
385 | if (!nbytes) | ||
386 | return; | ||
387 | |||
388 | scatterwalk_start(&walk, sg); | ||
389 | scatterwalk_advance(&walk, start); | ||
390 | scatterwalk_copychunks(buf, &walk, nbytes, out); | ||
391 | scatterwalk_done(&walk, out, 0); | ||
392 | } | ||
393 | |||
394 | static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, | ||
395 | size_t buflen, size_t total, int out) | ||
396 | { | ||
397 | unsigned int count, off = 0; | ||
398 | |||
399 | while (buflen && total) { | ||
400 | count = min((*sg)->length - *offset, total); | ||
401 | count = min(count, buflen); | ||
402 | |||
403 | if (!count) | ||
404 | return off; | ||
405 | |||
406 | /* | ||
407 | * buflen and total are AES_BLOCK_SIZE size aligned, | ||
408 | * so count should be also aligned | ||
409 | */ | ||
410 | |||
411 | sg_copy_buf(buf + off, *sg, *offset, count, out); | ||
412 | |||
413 | off += count; | ||
414 | buflen -= count; | ||
415 | *offset += count; | ||
416 | total -= count; | ||
417 | |||
418 | if (*offset == (*sg)->length) { | ||
419 | *sg = sg_next(*sg); | ||
420 | if (*sg) | ||
421 | *offset = 0; | ||
422 | else | ||
423 | total = 0; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | return off; | ||
428 | } | ||
429 | |||
430 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | ||
431 | dma_addr_t dma_addr_out, int length) | ||
432 | { | ||
433 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
434 | struct omap_aes_dev *dd = ctx->dd; | ||
435 | int len32; | ||
436 | |||
437 | pr_debug("len: %d\n", length); | ||
438 | |||
439 | dd->dma_size = length; | ||
440 | |||
441 | if (!(dd->flags & FLAGS_FAST)) | ||
442 | dma_sync_single_for_device(dd->dev, dma_addr_in, length, | ||
443 | DMA_TO_DEVICE); | ||
444 | |||
445 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
446 | |||
447 | /* IN */ | ||
448 | omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32, | ||
449 | len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in, | ||
450 | OMAP_DMA_DST_SYNC); | ||
451 | |||
452 | omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC, | ||
453 | dma_addr_in, 0, 0); | ||
454 | |||
455 | /* OUT */ | ||
456 | omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32, | ||
457 | len32, 1, OMAP_DMA_SYNC_PACKET, | ||
458 | dd->dma_out, OMAP_DMA_SRC_SYNC); | ||
459 | |||
460 | omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, | ||
461 | dma_addr_out, 0, 0); | ||
462 | |||
463 | omap_start_dma(dd->dma_lch_in); | ||
464 | omap_start_dma(dd->dma_lch_out); | ||
465 | |||
466 | /* start DMA or disable idle mode */ | ||
467 | omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, | ||
468 | AES_REG_MASK_START); | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | ||
474 | { | ||
475 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( | ||
476 | crypto_ablkcipher_reqtfm(dd->req)); | ||
477 | int err, fast = 0, in, out; | ||
478 | size_t count; | ||
479 | dma_addr_t addr_in, addr_out; | ||
480 | |||
481 | pr_debug("total: %d\n", dd->total); | ||
482 | |||
483 | if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { | ||
484 | /* check for alignment */ | ||
485 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); | ||
486 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); | ||
487 | |||
488 | fast = in && out; | ||
489 | } | ||
490 | |||
491 | if (fast) { | ||
492 | count = min(dd->total, sg_dma_len(dd->in_sg)); | ||
493 | count = min(count, sg_dma_len(dd->out_sg)); | ||
494 | |||
495 | if (count != dd->total) { | ||
496 | pr_err("request length != buffer length\n"); | ||
497 | return -EINVAL; | ||
498 | } | ||
499 | |||
500 | pr_debug("fast\n"); | ||
501 | |||
502 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
503 | if (!err) { | ||
504 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
505 | return -EINVAL; | ||
506 | } | ||
507 | |||
508 | err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
509 | if (!err) { | ||
510 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
511 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
512 | return -EINVAL; | ||
513 | } | ||
514 | |||
515 | addr_in = sg_dma_address(dd->in_sg); | ||
516 | addr_out = sg_dma_address(dd->out_sg); | ||
517 | |||
518 | dd->flags |= FLAGS_FAST; | ||
519 | |||
520 | } else { | ||
521 | /* use cache buffers */ | ||
522 | count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, | ||
523 | dd->buflen, dd->total, 0); | ||
524 | |||
525 | addr_in = dd->dma_addr_in; | ||
526 | addr_out = dd->dma_addr_out; | ||
527 | |||
528 | dd->flags &= ~FLAGS_FAST; | ||
529 | |||
530 | } | ||
531 | |||
532 | dd->total -= count; | ||
533 | |||
534 | err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); | ||
535 | if (err) { | ||
536 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
537 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | ||
538 | } | ||
539 | |||
540 | return err; | ||
541 | } | ||
542 | |||
543 | static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | ||
544 | { | ||
545 | struct ablkcipher_request *req = dd->req; | ||
546 | |||
547 | pr_debug("err: %d\n", err); | ||
548 | |||
549 | clk_disable(dd->iclk); | ||
550 | dd->flags &= ~FLAGS_BUSY; | ||
551 | |||
552 | req->base.complete(&req->base, err); | ||
553 | } | ||
554 | |||
555 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | ||
556 | { | ||
557 | int err = 0; | ||
558 | size_t count; | ||
559 | |||
560 | pr_debug("total: %d\n", dd->total); | ||
561 | |||
562 | omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); | ||
563 | |||
564 | omap_stop_dma(dd->dma_lch_in); | ||
565 | omap_stop_dma(dd->dma_lch_out); | ||
566 | |||
567 | if (dd->flags & FLAGS_FAST) { | ||
568 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | ||
569 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
570 | } else { | ||
571 | dma_sync_single_for_device(dd->dev, dd->dma_addr_out, | ||
572 | dd->dma_size, DMA_FROM_DEVICE); | ||
573 | |||
574 | /* copy data */ | ||
575 | count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, | ||
576 | dd->buflen, dd->dma_size, 1); | ||
577 | if (count != dd->dma_size) { | ||
578 | err = -EINVAL; | ||
579 | pr_err("not all data converted: %u\n", count); | ||
580 | } | ||
581 | } | ||
582 | |||
583 | return err; | ||
584 | } | ||
585 | |||
586 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, | ||
587 | struct ablkcipher_request *req) | ||
588 | { | ||
589 | struct crypto_async_request *async_req, *backlog; | ||
590 | struct omap_aes_ctx *ctx; | ||
591 | struct omap_aes_reqctx *rctx; | ||
592 | unsigned long flags; | ||
593 | int err, ret = 0; | ||
594 | |||
595 | spin_lock_irqsave(&dd->lock, flags); | ||
596 | if (req) | ||
597 | ret = ablkcipher_enqueue_request(&dd->queue, req); | ||
598 | if (dd->flags & FLAGS_BUSY) { | ||
599 | spin_unlock_irqrestore(&dd->lock, flags); | ||
600 | return ret; | ||
601 | } | ||
602 | backlog = crypto_get_backlog(&dd->queue); | ||
603 | async_req = crypto_dequeue_request(&dd->queue); | ||
604 | if (async_req) | ||
605 | dd->flags |= FLAGS_BUSY; | ||
606 | spin_unlock_irqrestore(&dd->lock, flags); | ||
607 | |||
608 | if (!async_req) | ||
609 | return ret; | ||
610 | |||
611 | if (backlog) | ||
612 | backlog->complete(backlog, -EINPROGRESS); | ||
613 | |||
614 | req = ablkcipher_request_cast(async_req); | ||
615 | |||
616 | /* assign new request to device */ | ||
617 | dd->req = req; | ||
618 | dd->total = req->nbytes; | ||
619 | dd->in_offset = 0; | ||
620 | dd->in_sg = req->src; | ||
621 | dd->out_offset = 0; | ||
622 | dd->out_sg = req->dst; | ||
623 | |||
624 | rctx = ablkcipher_request_ctx(req); | ||
625 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
626 | rctx->mode &= FLAGS_MODE_MASK; | ||
627 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; | ||
628 | |||
629 | dd->ctx = ctx; | ||
630 | ctx->dd = dd; | ||
631 | |||
632 | err = omap_aes_write_ctrl(dd); | ||
633 | if (!err) | ||
634 | err = omap_aes_crypt_dma_start(dd); | ||
635 | if (err) { | ||
636 | /* aes_task will not finish it, so do it here */ | ||
637 | omap_aes_finish_req(dd, err); | ||
638 | tasklet_schedule(&dd->queue_task); | ||
639 | } | ||
640 | |||
641 | return ret; /* return ret, which is enqueue return value */ | ||
642 | } | ||
643 | |||
644 | static void omap_aes_done_task(unsigned long data) | ||
645 | { | ||
646 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | ||
647 | int err; | ||
648 | |||
649 | pr_debug("enter\n"); | ||
650 | |||
651 | err = omap_aes_crypt_dma_stop(dd); | ||
652 | |||
653 | err = dd->err ? : err; | ||
654 | |||
655 | if (dd->total && !err) { | ||
656 | err = omap_aes_crypt_dma_start(dd); | ||
657 | if (!err) | ||
658 | return; /* DMA started. Not fininishing. */ | ||
659 | } | ||
660 | |||
661 | omap_aes_finish_req(dd, err); | ||
662 | omap_aes_handle_queue(dd, NULL); | ||
663 | |||
664 | pr_debug("exit\n"); | ||
665 | } | ||
666 | |||
667 | static void omap_aes_queue_task(unsigned long data) | ||
668 | { | ||
669 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | ||
670 | |||
671 | omap_aes_handle_queue(dd, NULL); | ||
672 | } | ||
673 | |||
674 | static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
675 | { | ||
676 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( | ||
677 | crypto_ablkcipher_reqtfm(req)); | ||
678 | struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
679 | struct omap_aes_dev *dd; | ||
680 | |||
681 | pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, | ||
682 | !!(mode & FLAGS_ENCRYPT), | ||
683 | !!(mode & FLAGS_CBC)); | ||
684 | |||
685 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
686 | pr_err("request size is not exact amount of AES blocks\n"); | ||
687 | return -EINVAL; | ||
688 | } | ||
689 | |||
690 | dd = omap_aes_find_dev(ctx); | ||
691 | if (!dd) | ||
692 | return -ENODEV; | ||
693 | |||
694 | rctx->mode = mode; | ||
695 | |||
696 | return omap_aes_handle_queue(dd, req); | ||
697 | } | ||
698 | |||
699 | /* ********************** ALG API ************************************ */ | ||
700 | |||
701 | static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
702 | unsigned int keylen) | ||
703 | { | ||
704 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
705 | |||
706 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | ||
707 | keylen != AES_KEYSIZE_256) | ||
708 | return -EINVAL; | ||
709 | |||
710 | pr_debug("enter, keylen: %d\n", keylen); | ||
711 | |||
712 | memcpy(ctx->key, key, keylen); | ||
713 | ctx->keylen = keylen; | ||
714 | |||
715 | return 0; | ||
716 | } | ||
717 | |||
718 | static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
719 | { | ||
720 | return omap_aes_crypt(req, FLAGS_ENCRYPT); | ||
721 | } | ||
722 | |||
723 | static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
724 | { | ||
725 | return omap_aes_crypt(req, 0); | ||
726 | } | ||
727 | |||
728 | static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
729 | { | ||
730 | return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); | ||
731 | } | ||
732 | |||
733 | static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
734 | { | ||
735 | return omap_aes_crypt(req, FLAGS_CBC); | ||
736 | } | ||
737 | |||
738 | static int omap_aes_cra_init(struct crypto_tfm *tfm) | ||
739 | { | ||
740 | pr_debug("enter\n"); | ||
741 | |||
742 | tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); | ||
743 | |||
744 | return 0; | ||
745 | } | ||
746 | |||
747 | static void omap_aes_cra_exit(struct crypto_tfm *tfm) | ||
748 | { | ||
749 | pr_debug("enter\n"); | ||
750 | } | ||
751 | |||
752 | /* ********************** ALGS ************************************ */ | ||
753 | |||
754 | static struct crypto_alg algs[] = { | ||
755 | { | ||
756 | .cra_name = "ecb(aes)", | ||
757 | .cra_driver_name = "ecb-aes-omap", | ||
758 | .cra_priority = 100, | ||
759 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
760 | .cra_blocksize = AES_BLOCK_SIZE, | ||
761 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | ||
762 | .cra_alignmask = 0, | ||
763 | .cra_type = &crypto_ablkcipher_type, | ||
764 | .cra_module = THIS_MODULE, | ||
765 | .cra_init = omap_aes_cra_init, | ||
766 | .cra_exit = omap_aes_cra_exit, | ||
767 | .cra_u.ablkcipher = { | ||
768 | .min_keysize = AES_MIN_KEY_SIZE, | ||
769 | .max_keysize = AES_MAX_KEY_SIZE, | ||
770 | .setkey = omap_aes_setkey, | ||
771 | .encrypt = omap_aes_ecb_encrypt, | ||
772 | .decrypt = omap_aes_ecb_decrypt, | ||
773 | } | ||
774 | }, | ||
775 | { | ||
776 | .cra_name = "cbc(aes)", | ||
777 | .cra_driver_name = "cbc-aes-omap", | ||
778 | .cra_priority = 100, | ||
779 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
780 | .cra_blocksize = AES_BLOCK_SIZE, | ||
781 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | ||
782 | .cra_alignmask = 0, | ||
783 | .cra_type = &crypto_ablkcipher_type, | ||
784 | .cra_module = THIS_MODULE, | ||
785 | .cra_init = omap_aes_cra_init, | ||
786 | .cra_exit = omap_aes_cra_exit, | ||
787 | .cra_u.ablkcipher = { | ||
788 | .min_keysize = AES_MIN_KEY_SIZE, | ||
789 | .max_keysize = AES_MAX_KEY_SIZE, | ||
790 | .ivsize = AES_BLOCK_SIZE, | ||
791 | .setkey = omap_aes_setkey, | ||
792 | .encrypt = omap_aes_cbc_encrypt, | ||
793 | .decrypt = omap_aes_cbc_decrypt, | ||
794 | } | ||
795 | } | ||
796 | }; | ||
797 | |||
798 | static int omap_aes_probe(struct platform_device *pdev) | ||
799 | { | ||
800 | struct device *dev = &pdev->dev; | ||
801 | struct omap_aes_dev *dd; | ||
802 | struct resource *res; | ||
803 | int err = -ENOMEM, i, j; | ||
804 | u32 reg; | ||
805 | |||
806 | dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); | ||
807 | if (dd == NULL) { | ||
808 | dev_err(dev, "unable to alloc data struct.\n"); | ||
809 | goto err_data; | ||
810 | } | ||
811 | dd->dev = dev; | ||
812 | platform_set_drvdata(pdev, dd); | ||
813 | |||
814 | spin_lock_init(&dd->lock); | ||
815 | crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); | ||
816 | |||
817 | /* Get the base address */ | ||
818 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
819 | if (!res) { | ||
820 | dev_err(dev, "invalid resource type\n"); | ||
821 | err = -ENODEV; | ||
822 | goto err_res; | ||
823 | } | ||
824 | dd->phys_base = res->start; | ||
825 | |||
826 | /* Get the DMA */ | ||
827 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
828 | if (!res) | ||
829 | dev_info(dev, "no DMA info\n"); | ||
830 | else | ||
831 | dd->dma_out = res->start; | ||
832 | |||
833 | /* Get the DMA */ | ||
834 | res = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
835 | if (!res) | ||
836 | dev_info(dev, "no DMA info\n"); | ||
837 | else | ||
838 | dd->dma_in = res->start; | ||
839 | |||
840 | /* Initializing the clock */ | ||
841 | dd->iclk = clk_get(dev, "ick"); | ||
842 | if (IS_ERR(dd->iclk)) { | ||
843 | dev_err(dev, "clock intialization failed.\n"); | ||
844 | err = PTR_ERR(dd->iclk); | ||
845 | goto err_res; | ||
846 | } | ||
847 | |||
848 | dd->io_base = ioremap(dd->phys_base, SZ_4K); | ||
849 | if (!dd->io_base) { | ||
850 | dev_err(dev, "can't ioremap\n"); | ||
851 | err = -ENOMEM; | ||
852 | goto err_io; | ||
853 | } | ||
854 | |||
855 | clk_enable(dd->iclk); | ||
856 | reg = omap_aes_read(dd, AES_REG_REV); | ||
857 | dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", | ||
858 | (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); | ||
859 | clk_disable(dd->iclk); | ||
860 | |||
861 | tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); | ||
862 | tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); | ||
863 | |||
864 | err = omap_aes_dma_init(dd); | ||
865 | if (err) | ||
866 | goto err_dma; | ||
867 | |||
868 | INIT_LIST_HEAD(&dd->list); | ||
869 | spin_lock(&list_lock); | ||
870 | list_add_tail(&dd->list, &dev_list); | ||
871 | spin_unlock(&list_lock); | ||
872 | |||
873 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
874 | pr_debug("i: %d\n", i); | ||
875 | INIT_LIST_HEAD(&algs[i].cra_list); | ||
876 | err = crypto_register_alg(&algs[i]); | ||
877 | if (err) | ||
878 | goto err_algs; | ||
879 | } | ||
880 | |||
881 | pr_info("probe() done\n"); | ||
882 | |||
883 | return 0; | ||
884 | err_algs: | ||
885 | for (j = 0; j < i; j++) | ||
886 | crypto_unregister_alg(&algs[j]); | ||
887 | omap_aes_dma_cleanup(dd); | ||
888 | err_dma: | ||
889 | tasklet_kill(&dd->done_task); | ||
890 | tasklet_kill(&dd->queue_task); | ||
891 | iounmap(dd->io_base); | ||
892 | err_io: | ||
893 | clk_put(dd->iclk); | ||
894 | err_res: | ||
895 | kfree(dd); | ||
896 | dd = NULL; | ||
897 | err_data: | ||
898 | dev_err(dev, "initialization failed.\n"); | ||
899 | return err; | ||
900 | } | ||
901 | |||
902 | static int omap_aes_remove(struct platform_device *pdev) | ||
903 | { | ||
904 | struct omap_aes_dev *dd = platform_get_drvdata(pdev); | ||
905 | int i; | ||
906 | |||
907 | if (!dd) | ||
908 | return -ENODEV; | ||
909 | |||
910 | spin_lock(&list_lock); | ||
911 | list_del(&dd->list); | ||
912 | spin_unlock(&list_lock); | ||
913 | |||
914 | for (i = 0; i < ARRAY_SIZE(algs); i++) | ||
915 | crypto_unregister_alg(&algs[i]); | ||
916 | |||
917 | tasklet_kill(&dd->done_task); | ||
918 | tasklet_kill(&dd->queue_task); | ||
919 | omap_aes_dma_cleanup(dd); | ||
920 | iounmap(dd->io_base); | ||
921 | clk_put(dd->iclk); | ||
922 | kfree(dd); | ||
923 | dd = NULL; | ||
924 | |||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | static struct platform_driver omap_aes_driver = { | ||
929 | .probe = omap_aes_probe, | ||
930 | .remove = omap_aes_remove, | ||
931 | .driver = { | ||
932 | .name = "omap-aes", | ||
933 | .owner = THIS_MODULE, | ||
934 | }, | ||
935 | }; | ||
936 | |||
937 | static int __init omap_aes_mod_init(void) | ||
938 | { | ||
939 | pr_info("loading %s driver\n", "omap-aes"); | ||
940 | |||
941 | if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) { | ||
942 | pr_err("Unsupported cpu\n"); | ||
943 | return -ENODEV; | ||
944 | } | ||
945 | |||
946 | return platform_driver_register(&omap_aes_driver); | ||
947 | } | ||
948 | |||
949 | static void __exit omap_aes_mod_exit(void) | ||
950 | { | ||
951 | platform_driver_unregister(&omap_aes_driver); | ||
952 | } | ||
953 | |||
954 | module_init(omap_aes_mod_init); | ||
955 | module_exit(omap_aes_mod_exit); | ||
956 | |||
957 | MODULE_DESCRIPTION("OMAP AES hw acceleration support."); | ||
958 | MODULE_LICENSE("GPL v2"); | ||
959 | MODULE_AUTHOR("Dmitry Kasatkin"); | ||
960 | |||
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 7d1485676886..ba8f1ea84c5e 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -72,24 +72,26 @@ | |||
72 | 72 | ||
73 | #define DEFAULT_TIMEOUT_INTERVAL HZ | 73 | #define DEFAULT_TIMEOUT_INTERVAL HZ |
74 | 74 | ||
75 | #define FLAGS_FIRST 0x0001 | ||
76 | #define FLAGS_FINUP 0x0002 | 75 | #define FLAGS_FINUP 0x0002 |
77 | #define FLAGS_FINAL 0x0004 | 76 | #define FLAGS_FINAL 0x0004 |
78 | #define FLAGS_FAST 0x0008 | 77 | #define FLAGS_SG 0x0008 |
79 | #define FLAGS_SHA1 0x0010 | 78 | #define FLAGS_SHA1 0x0010 |
80 | #define FLAGS_DMA_ACTIVE 0x0020 | 79 | #define FLAGS_DMA_ACTIVE 0x0020 |
81 | #define FLAGS_OUTPUT_READY 0x0040 | 80 | #define FLAGS_OUTPUT_READY 0x0040 |
82 | #define FLAGS_CLEAN 0x0080 | ||
83 | #define FLAGS_INIT 0x0100 | 81 | #define FLAGS_INIT 0x0100 |
84 | #define FLAGS_CPU 0x0200 | 82 | #define FLAGS_CPU 0x0200 |
85 | #define FLAGS_HMAC 0x0400 | 83 | #define FLAGS_HMAC 0x0400 |
86 | 84 | #define FLAGS_ERROR 0x0800 | |
87 | /* 3rd byte */ | 85 | #define FLAGS_BUSY 0x1000 |
88 | #define FLAGS_BUSY 16 | ||
89 | 86 | ||
90 | #define OP_UPDATE 1 | 87 | #define OP_UPDATE 1 |
91 | #define OP_FINAL 2 | 88 | #define OP_FINAL 2 |
92 | 89 | ||
90 | #define OMAP_ALIGN_MASK (sizeof(u32)-1) | ||
91 | #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) | ||
92 | |||
93 | #define BUFLEN PAGE_SIZE | ||
94 | |||
93 | struct omap_sham_dev; | 95 | struct omap_sham_dev; |
94 | 96 | ||
95 | struct omap_sham_reqctx { | 97 | struct omap_sham_reqctx { |
@@ -97,8 +99,8 @@ struct omap_sham_reqctx { | |||
97 | unsigned long flags; | 99 | unsigned long flags; |
98 | unsigned long op; | 100 | unsigned long op; |
99 | 101 | ||
102 | u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; | ||
100 | size_t digcnt; | 103 | size_t digcnt; |
101 | u8 *buffer; | ||
102 | size_t bufcnt; | 104 | size_t bufcnt; |
103 | size_t buflen; | 105 | size_t buflen; |
104 | dma_addr_t dma_addr; | 106 | dma_addr_t dma_addr; |
@@ -107,6 +109,8 @@ struct omap_sham_reqctx { | |||
107 | struct scatterlist *sg; | 109 | struct scatterlist *sg; |
108 | unsigned int offset; /* offset in current sg */ | 110 | unsigned int offset; /* offset in current sg */ |
109 | unsigned int total; /* total request */ | 111 | unsigned int total; /* total request */ |
112 | |||
113 | u8 buffer[0] OMAP_ALIGNED; | ||
110 | }; | 114 | }; |
111 | 115 | ||
112 | struct omap_sham_hmac_ctx { | 116 | struct omap_sham_hmac_ctx { |
@@ -136,6 +140,7 @@ struct omap_sham_dev { | |||
136 | int irq; | 140 | int irq; |
137 | struct clk *iclk; | 141 | struct clk *iclk; |
138 | spinlock_t lock; | 142 | spinlock_t lock; |
143 | int err; | ||
139 | int dma; | 144 | int dma; |
140 | int dma_lch; | 145 | int dma_lch; |
141 | struct tasklet_struct done_task; | 146 | struct tasklet_struct done_task; |
@@ -194,53 +199,68 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) | |||
194 | static void omap_sham_copy_hash(struct ahash_request *req, int out) | 199 | static void omap_sham_copy_hash(struct ahash_request *req, int out) |
195 | { | 200 | { |
196 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 201 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
202 | u32 *hash = (u32 *)ctx->digest; | ||
203 | int i; | ||
204 | |||
205 | /* MD5 is almost unused. So copy sha1 size to reduce code */ | ||
206 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { | ||
207 | if (out) | ||
208 | hash[i] = omap_sham_read(ctx->dd, | ||
209 | SHA_REG_DIGEST(i)); | ||
210 | else | ||
211 | omap_sham_write(ctx->dd, | ||
212 | SHA_REG_DIGEST(i), hash[i]); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static void omap_sham_copy_ready_hash(struct ahash_request *req) | ||
217 | { | ||
218 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
219 | u32 *in = (u32 *)ctx->digest; | ||
197 | u32 *hash = (u32 *)req->result; | 220 | u32 *hash = (u32 *)req->result; |
198 | int i; | 221 | int i; |
199 | 222 | ||
223 | if (!hash) | ||
224 | return; | ||
225 | |||
200 | if (likely(ctx->flags & FLAGS_SHA1)) { | 226 | if (likely(ctx->flags & FLAGS_SHA1)) { |
201 | /* SHA1 results are in big endian */ | 227 | /* SHA1 results are in big endian */ |
202 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | 228 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) |
203 | if (out) | 229 | hash[i] = be32_to_cpu(in[i]); |
204 | hash[i] = be32_to_cpu(omap_sham_read(ctx->dd, | ||
205 | SHA_REG_DIGEST(i))); | ||
206 | else | ||
207 | omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), | ||
208 | cpu_to_be32(hash[i])); | ||
209 | } else { | 230 | } else { |
210 | /* MD5 results are in little endian */ | 231 | /* MD5 results are in little endian */ |
211 | for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) | 232 | for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) |
212 | if (out) | 233 | hash[i] = le32_to_cpu(in[i]); |
213 | hash[i] = le32_to_cpu(omap_sham_read(ctx->dd, | ||
214 | SHA_REG_DIGEST(i))); | ||
215 | else | ||
216 | omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), | ||
217 | cpu_to_le32(hash[i])); | ||
218 | } | 234 | } |
219 | } | 235 | } |
220 | 236 | ||
221 | static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | 237 | static int omap_sham_hw_init(struct omap_sham_dev *dd) |
222 | int final, int dma) | ||
223 | { | 238 | { |
224 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 239 | clk_enable(dd->iclk); |
225 | u32 val = length << 5, mask; | ||
226 | 240 | ||
227 | if (unlikely(!ctx->digcnt)) { | 241 | if (!(dd->flags & FLAGS_INIT)) { |
242 | omap_sham_write_mask(dd, SHA_REG_MASK, | ||
243 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | ||
228 | 244 | ||
229 | clk_enable(dd->iclk); | 245 | if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, |
246 | SHA_REG_SYSSTATUS_RESETDONE)) | ||
247 | return -ETIMEDOUT; | ||
248 | |||
249 | dd->flags |= FLAGS_INIT; | ||
250 | dd->err = 0; | ||
251 | } | ||
230 | 252 | ||
231 | if (!(dd->flags & FLAGS_INIT)) { | 253 | return 0; |
232 | omap_sham_write_mask(dd, SHA_REG_MASK, | 254 | } |
233 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | ||
234 | 255 | ||
235 | if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, | 256 | static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, |
236 | SHA_REG_SYSSTATUS_RESETDONE)) | 257 | int final, int dma) |
237 | return -ETIMEDOUT; | 258 | { |
259 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
260 | u32 val = length << 5, mask; | ||
238 | 261 | ||
239 | dd->flags |= FLAGS_INIT; | 262 | if (likely(ctx->digcnt)) |
240 | } | ||
241 | } else { | ||
242 | omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); | 263 | omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); |
243 | } | ||
244 | 264 | ||
245 | omap_sham_write_mask(dd, SHA_REG_MASK, | 265 | omap_sham_write_mask(dd, SHA_REG_MASK, |
246 | SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), | 266 | SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), |
@@ -260,29 +280,26 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | |||
260 | SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; | 280 | SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; |
261 | 281 | ||
262 | omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); | 282 | omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); |
263 | |||
264 | return 0; | ||
265 | } | 283 | } |
266 | 284 | ||
267 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | 285 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, |
268 | size_t length, int final) | 286 | size_t length, int final) |
269 | { | 287 | { |
270 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 288 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
271 | int err, count, len32; | 289 | int count, len32; |
272 | const u32 *buffer = (const u32 *)buf; | 290 | const u32 *buffer = (const u32 *)buf; |
273 | 291 | ||
274 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | 292 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", |
275 | ctx->digcnt, length, final); | 293 | ctx->digcnt, length, final); |
276 | 294 | ||
277 | err = omap_sham_write_ctrl(dd, length, final, 0); | 295 | omap_sham_write_ctrl(dd, length, final, 0); |
278 | if (err) | 296 | |
279 | return err; | 297 | /* should be non-zero before next lines to disable clocks later */ |
298 | ctx->digcnt += length; | ||
280 | 299 | ||
281 | if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) | 300 | if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) |
282 | return -ETIMEDOUT; | 301 | return -ETIMEDOUT; |
283 | 302 | ||
284 | ctx->digcnt += length; | ||
285 | |||
286 | if (final) | 303 | if (final) |
287 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | 304 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ |
288 | 305 | ||
@@ -298,27 +315,21 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
298 | size_t length, int final) | 315 | size_t length, int final) |
299 | { | 316 | { |
300 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 317 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
301 | int err, len32; | 318 | int len32; |
302 | 319 | ||
303 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", | 320 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", |
304 | ctx->digcnt, length, final); | 321 | ctx->digcnt, length, final); |
305 | 322 | ||
306 | /* flush cache entries related to our page */ | ||
307 | if (dma_addr == ctx->dma_addr) | ||
308 | dma_sync_single_for_device(dd->dev, dma_addr, length, | ||
309 | DMA_TO_DEVICE); | ||
310 | |||
311 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 323 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
312 | 324 | ||
313 | omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, | 325 | omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, |
314 | 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC); | 326 | 1, OMAP_DMA_SYNC_PACKET, dd->dma, |
327 | OMAP_DMA_DST_SYNC_PREFETCH); | ||
315 | 328 | ||
316 | omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, | 329 | omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, |
317 | dma_addr, 0, 0); | 330 | dma_addr, 0, 0); |
318 | 331 | ||
319 | err = omap_sham_write_ctrl(dd, length, final, 1); | 332 | omap_sham_write_ctrl(dd, length, final, 1); |
320 | if (err) | ||
321 | return err; | ||
322 | 333 | ||
323 | ctx->digcnt += length; | 334 | ctx->digcnt += length; |
324 | 335 | ||
@@ -370,15 +381,29 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) | |||
370 | return 0; | 381 | return 0; |
371 | } | 382 | } |
372 | 383 | ||
384 | static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, | ||
385 | struct omap_sham_reqctx *ctx, | ||
386 | size_t length, int final) | ||
387 | { | ||
388 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, | ||
389 | DMA_TO_DEVICE); | ||
390 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
391 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); | ||
392 | return -EINVAL; | ||
393 | } | ||
394 | |||
395 | ctx->flags &= ~FLAGS_SG; | ||
396 | |||
397 | /* next call does not fail... so no unmap in the case of error */ | ||
398 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); | ||
399 | } | ||
400 | |||
373 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | 401 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) |
374 | { | 402 | { |
375 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 403 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
376 | unsigned int final; | 404 | unsigned int final; |
377 | size_t count; | 405 | size_t count; |
378 | 406 | ||
379 | if (!ctx->total) | ||
380 | return 0; | ||
381 | |||
382 | omap_sham_append_sg(ctx); | 407 | omap_sham_append_sg(ctx); |
383 | 408 | ||
384 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | 409 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; |
@@ -389,30 +414,68 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | |||
389 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | 414 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { |
390 | count = ctx->bufcnt; | 415 | count = ctx->bufcnt; |
391 | ctx->bufcnt = 0; | 416 | ctx->bufcnt = 0; |
392 | return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); | 417 | return omap_sham_xmit_dma_map(dd, ctx, count, final); |
393 | } | 418 | } |
394 | 419 | ||
395 | return 0; | 420 | return 0; |
396 | } | 421 | } |
397 | 422 | ||
398 | static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) | 423 | /* Start address alignment */ |
424 | #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) | ||
425 | /* SHA1 block size alignment */ | ||
426 | #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) | ||
427 | |||
428 | static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | ||
399 | { | 429 | { |
400 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 430 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
401 | unsigned int length; | 431 | unsigned int length, final, tail; |
432 | struct scatterlist *sg; | ||
433 | |||
434 | if (!ctx->total) | ||
435 | return 0; | ||
436 | |||
437 | if (ctx->bufcnt || ctx->offset) | ||
438 | return omap_sham_update_dma_slow(dd); | ||
439 | |||
440 | dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", | ||
441 | ctx->digcnt, ctx->bufcnt, ctx->total); | ||
402 | 442 | ||
403 | ctx->flags |= FLAGS_FAST; | 443 | sg = ctx->sg; |
404 | 444 | ||
405 | length = min(ctx->total, sg_dma_len(ctx->sg)); | 445 | if (!SG_AA(sg)) |
406 | ctx->total = length; | 446 | return omap_sham_update_dma_slow(dd); |
447 | |||
448 | if (!sg_is_last(sg) && !SG_SA(sg)) | ||
449 | /* size is not SHA1_BLOCK_SIZE aligned */ | ||
450 | return omap_sham_update_dma_slow(dd); | ||
451 | |||
452 | length = min(ctx->total, sg->length); | ||
453 | |||
454 | if (sg_is_last(sg)) { | ||
455 | if (!(ctx->flags & FLAGS_FINUP)) { | ||
456 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ | ||
457 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); | ||
458 | /* without finup() we need one block to close hash */ | ||
459 | if (!tail) | ||
460 | tail = SHA1_MD5_BLOCK_SIZE; | ||
461 | length -= tail; | ||
462 | } | ||
463 | } | ||
407 | 464 | ||
408 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | 465 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { |
409 | dev_err(dd->dev, "dma_map_sg error\n"); | 466 | dev_err(dd->dev, "dma_map_sg error\n"); |
410 | return -EINVAL; | 467 | return -EINVAL; |
411 | } | 468 | } |
412 | 469 | ||
470 | ctx->flags |= FLAGS_SG; | ||
471 | |||
413 | ctx->total -= length; | 472 | ctx->total -= length; |
473 | ctx->offset = length; /* offset where to start slow */ | ||
474 | |||
475 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | ||
414 | 476 | ||
415 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); | 477 | /* next call does not fail... so no unmap in the case of error */ |
478 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); | ||
416 | } | 479 | } |
417 | 480 | ||
418 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) | 481 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) |
@@ -432,37 +495,19 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | |||
432 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 495 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
433 | 496 | ||
434 | omap_stop_dma(dd->dma_lch); | 497 | omap_stop_dma(dd->dma_lch); |
435 | if (ctx->flags & FLAGS_FAST) | 498 | if (ctx->flags & FLAGS_SG) { |
436 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | 499 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); |
437 | 500 | if (ctx->sg->length == ctx->offset) { | |
438 | return 0; | 501 | ctx->sg = sg_next(ctx->sg); |
439 | } | 502 | if (ctx->sg) |
440 | 503 | ctx->offset = 0; | |
441 | static void omap_sham_cleanup(struct ahash_request *req) | 504 | } |
442 | { | 505 | } else { |
443 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
444 | struct omap_sham_dev *dd = ctx->dd; | ||
445 | unsigned long flags; | ||
446 | |||
447 | spin_lock_irqsave(&dd->lock, flags); | ||
448 | if (ctx->flags & FLAGS_CLEAN) { | ||
449 | spin_unlock_irqrestore(&dd->lock, flags); | ||
450 | return; | ||
451 | } | ||
452 | ctx->flags |= FLAGS_CLEAN; | ||
453 | spin_unlock_irqrestore(&dd->lock, flags); | ||
454 | |||
455 | if (ctx->digcnt) | ||
456 | clk_disable(dd->iclk); | ||
457 | |||
458 | if (ctx->dma_addr) | ||
459 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, | 506 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, |
460 | DMA_TO_DEVICE); | 507 | DMA_TO_DEVICE); |
508 | } | ||
461 | 509 | ||
462 | if (ctx->buffer) | 510 | return 0; |
463 | free_page((unsigned long)ctx->buffer); | ||
464 | |||
465 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); | ||
466 | } | 511 | } |
467 | 512 | ||
468 | static int omap_sham_init(struct ahash_request *req) | 513 | static int omap_sham_init(struct ahash_request *req) |
@@ -488,8 +533,6 @@ static int omap_sham_init(struct ahash_request *req) | |||
488 | 533 | ||
489 | ctx->flags = 0; | 534 | ctx->flags = 0; |
490 | 535 | ||
491 | ctx->flags |= FLAGS_FIRST; | ||
492 | |||
493 | dev_dbg(dd->dev, "init: digest size: %d\n", | 536 | dev_dbg(dd->dev, "init: digest size: %d\n", |
494 | crypto_ahash_digestsize(tfm)); | 537 | crypto_ahash_digestsize(tfm)); |
495 | 538 | ||
@@ -498,21 +541,7 @@ static int omap_sham_init(struct ahash_request *req) | |||
498 | 541 | ||
499 | ctx->bufcnt = 0; | 542 | ctx->bufcnt = 0; |
500 | ctx->digcnt = 0; | 543 | ctx->digcnt = 0; |
501 | 544 | ctx->buflen = BUFLEN; | |
502 | ctx->buflen = PAGE_SIZE; | ||
503 | ctx->buffer = (void *)__get_free_page( | ||
504 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
505 | GFP_KERNEL : GFP_ATOMIC); | ||
506 | if (!ctx->buffer) | ||
507 | return -ENOMEM; | ||
508 | |||
509 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, | ||
510 | DMA_TO_DEVICE); | ||
511 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
512 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); | ||
513 | free_page((unsigned long)ctx->buffer); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | 545 | ||
517 | if (tctx->flags & FLAGS_HMAC) { | 546 | if (tctx->flags & FLAGS_HMAC) { |
518 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 547 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
@@ -537,10 +566,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) | |||
537 | 566 | ||
538 | if (ctx->flags & FLAGS_CPU) | 567 | if (ctx->flags & FLAGS_CPU) |
539 | err = omap_sham_update_cpu(dd); | 568 | err = omap_sham_update_cpu(dd); |
540 | else if (ctx->flags & FLAGS_FAST) | ||
541 | err = omap_sham_update_dma_fast(dd); | ||
542 | else | 569 | else |
543 | err = omap_sham_update_dma_slow(dd); | 570 | err = omap_sham_update_dma_start(dd); |
544 | 571 | ||
545 | /* wait for dma completion before can take more data */ | 572 | /* wait for dma completion before can take more data */ |
546 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); | 573 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); |
@@ -559,21 +586,18 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) | |||
559 | use_dma = 0; | 586 | use_dma = 0; |
560 | 587 | ||
561 | if (use_dma) | 588 | if (use_dma) |
562 | err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); | 589 | err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); |
563 | else | 590 | else |
564 | err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); | 591 | err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); |
565 | 592 | ||
566 | ctx->bufcnt = 0; | 593 | ctx->bufcnt = 0; |
567 | 594 | ||
568 | if (err != -EINPROGRESS) | ||
569 | omap_sham_cleanup(req); | ||
570 | |||
571 | dev_dbg(dd->dev, "final_req: err: %d\n", err); | 595 | dev_dbg(dd->dev, "final_req: err: %d\n", err); |
572 | 596 | ||
573 | return err; | 597 | return err; |
574 | } | 598 | } |
575 | 599 | ||
576 | static int omap_sham_finish_req_hmac(struct ahash_request *req) | 600 | static int omap_sham_finish_hmac(struct ahash_request *req) |
577 | { | 601 | { |
578 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | 602 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); |
579 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 603 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
@@ -592,45 +616,67 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req) | |||
592 | crypto_shash_finup(&desc.shash, req->result, ds, req->result); | 616 | crypto_shash_finup(&desc.shash, req->result, ds, req->result); |
593 | } | 617 | } |
594 | 618 | ||
619 | static int omap_sham_finish(struct ahash_request *req) | ||
620 | { | ||
621 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
622 | struct omap_sham_dev *dd = ctx->dd; | ||
623 | int err = 0; | ||
624 | |||
625 | if (ctx->digcnt) { | ||
626 | omap_sham_copy_ready_hash(req); | ||
627 | if (ctx->flags & FLAGS_HMAC) | ||
628 | err = omap_sham_finish_hmac(req); | ||
629 | } | ||
630 | |||
631 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); | ||
632 | |||
633 | return err; | ||
634 | } | ||
635 | |||
595 | static void omap_sham_finish_req(struct ahash_request *req, int err) | 636 | static void omap_sham_finish_req(struct ahash_request *req, int err) |
596 | { | 637 | { |
597 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 638 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
639 | struct omap_sham_dev *dd = ctx->dd; | ||
598 | 640 | ||
599 | if (!err) { | 641 | if (!err) { |
600 | omap_sham_copy_hash(ctx->dd->req, 1); | 642 | omap_sham_copy_hash(ctx->dd->req, 1); |
601 | if (ctx->flags & FLAGS_HMAC) | 643 | if (ctx->flags & FLAGS_FINAL) |
602 | err = omap_sham_finish_req_hmac(req); | 644 | err = omap_sham_finish(req); |
645 | } else { | ||
646 | ctx->flags |= FLAGS_ERROR; | ||
603 | } | 647 | } |
604 | 648 | ||
605 | if (ctx->flags & FLAGS_FINAL) | 649 | clk_disable(dd->iclk); |
606 | omap_sham_cleanup(req); | 650 | dd->flags &= ~FLAGS_BUSY; |
607 | |||
608 | clear_bit(FLAGS_BUSY, &ctx->dd->flags); | ||
609 | 651 | ||
610 | if (req->base.complete) | 652 | if (req->base.complete) |
611 | req->base.complete(&req->base, err); | 653 | req->base.complete(&req->base, err); |
612 | } | 654 | } |
613 | 655 | ||
614 | static int omap_sham_handle_queue(struct omap_sham_dev *dd) | 656 | static int omap_sham_handle_queue(struct omap_sham_dev *dd, |
657 | struct ahash_request *req) | ||
615 | { | 658 | { |
616 | struct crypto_async_request *async_req, *backlog; | 659 | struct crypto_async_request *async_req, *backlog; |
617 | struct omap_sham_reqctx *ctx; | 660 | struct omap_sham_reqctx *ctx; |
618 | struct ahash_request *req, *prev_req; | 661 | struct ahash_request *prev_req; |
619 | unsigned long flags; | 662 | unsigned long flags; |
620 | int err = 0; | 663 | int err = 0, ret = 0; |
621 | |||
622 | if (test_and_set_bit(FLAGS_BUSY, &dd->flags)) | ||
623 | return 0; | ||
624 | 664 | ||
625 | spin_lock_irqsave(&dd->lock, flags); | 665 | spin_lock_irqsave(&dd->lock, flags); |
666 | if (req) | ||
667 | ret = ahash_enqueue_request(&dd->queue, req); | ||
668 | if (dd->flags & FLAGS_BUSY) { | ||
669 | spin_unlock_irqrestore(&dd->lock, flags); | ||
670 | return ret; | ||
671 | } | ||
626 | backlog = crypto_get_backlog(&dd->queue); | 672 | backlog = crypto_get_backlog(&dd->queue); |
627 | async_req = crypto_dequeue_request(&dd->queue); | 673 | async_req = crypto_dequeue_request(&dd->queue); |
628 | if (!async_req) | 674 | if (async_req) |
629 | clear_bit(FLAGS_BUSY, &dd->flags); | 675 | dd->flags |= FLAGS_BUSY; |
630 | spin_unlock_irqrestore(&dd->lock, flags); | 676 | spin_unlock_irqrestore(&dd->lock, flags); |
631 | 677 | ||
632 | if (!async_req) | 678 | if (!async_req) |
633 | return 0; | 679 | return ret; |
634 | 680 | ||
635 | if (backlog) | 681 | if (backlog) |
636 | backlog->complete(backlog, -EINPROGRESS); | 682 | backlog->complete(backlog, -EINPROGRESS); |
@@ -645,7 +691,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) | |||
645 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | 691 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", |
646 | ctx->op, req->nbytes); | 692 | ctx->op, req->nbytes); |
647 | 693 | ||
648 | if (req != prev_req && ctx->digcnt) | 694 | |
695 | err = omap_sham_hw_init(dd); | ||
696 | if (err) | ||
697 | goto err1; | ||
698 | |||
699 | omap_set_dma_dest_params(dd->dma_lch, 0, | ||
700 | OMAP_DMA_AMODE_CONSTANT, | ||
701 | dd->phys_base + SHA_REG_DIN(0), 0, 16); | ||
702 | |||
703 | omap_set_dma_dest_burst_mode(dd->dma_lch, | ||
704 | OMAP_DMA_DATA_BURST_16); | ||
705 | |||
706 | omap_set_dma_src_burst_mode(dd->dma_lch, | ||
707 | OMAP_DMA_DATA_BURST_4); | ||
708 | |||
709 | if (ctx->digcnt) | ||
649 | /* request has changed - restore hash */ | 710 | /* request has changed - restore hash */ |
650 | omap_sham_copy_hash(req, 0); | 711 | omap_sham_copy_hash(req, 0); |
651 | 712 | ||
@@ -657,7 +718,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) | |||
657 | } else if (ctx->op == OP_FINAL) { | 718 | } else if (ctx->op == OP_FINAL) { |
658 | err = omap_sham_final_req(dd); | 719 | err = omap_sham_final_req(dd); |
659 | } | 720 | } |
660 | 721 | err1: | |
661 | if (err != -EINPROGRESS) { | 722 | if (err != -EINPROGRESS) { |
662 | /* done_task will not finish it, so do it here */ | 723 | /* done_task will not finish it, so do it here */ |
663 | omap_sham_finish_req(req, err); | 724 | omap_sham_finish_req(req, err); |
@@ -666,7 +727,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) | |||
666 | 727 | ||
667 | dev_dbg(dd->dev, "exit, err: %d\n", err); | 728 | dev_dbg(dd->dev, "exit, err: %d\n", err); |
668 | 729 | ||
669 | return err; | 730 | return ret; |
670 | } | 731 | } |
671 | 732 | ||
672 | static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) | 733 | static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) |
@@ -674,18 +735,10 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) | |||
674 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 735 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
675 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | 736 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); |
676 | struct omap_sham_dev *dd = tctx->dd; | 737 | struct omap_sham_dev *dd = tctx->dd; |
677 | unsigned long flags; | ||
678 | int err; | ||
679 | 738 | ||
680 | ctx->op = op; | 739 | ctx->op = op; |
681 | 740 | ||
682 | spin_lock_irqsave(&dd->lock, flags); | 741 | return omap_sham_handle_queue(dd, req); |
683 | err = ahash_enqueue_request(&dd->queue, req); | ||
684 | spin_unlock_irqrestore(&dd->lock, flags); | ||
685 | |||
686 | omap_sham_handle_queue(dd); | ||
687 | |||
688 | return err; | ||
689 | } | 742 | } |
690 | 743 | ||
691 | static int omap_sham_update(struct ahash_request *req) | 744 | static int omap_sham_update(struct ahash_request *req) |
@@ -708,21 +761,13 @@ static int omap_sham_update(struct ahash_request *req) | |||
708 | */ | 761 | */ |
709 | omap_sham_append_sg(ctx); | 762 | omap_sham_append_sg(ctx); |
710 | return 0; | 763 | return 0; |
711 | } else if (ctx->bufcnt + ctx->total <= 64) { | 764 | } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { |
765 | /* | ||
766 | * faster to use CPU for short transfers | ||
767 | */ | ||
712 | ctx->flags |= FLAGS_CPU; | 768 | ctx->flags |= FLAGS_CPU; |
713 | } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) { | ||
714 | /* may be can use faster functions */ | ||
715 | int aligned = IS_ALIGNED((u32)ctx->sg->offset, | ||
716 | sizeof(u32)); | ||
717 | |||
718 | if (aligned && (ctx->flags & FLAGS_FIRST)) | ||
719 | /* digest: first and final */ | ||
720 | ctx->flags |= FLAGS_FAST; | ||
721 | |||
722 | ctx->flags &= ~FLAGS_FIRST; | ||
723 | } | 769 | } |
724 | } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { | 770 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { |
725 | /* if not finaup -> not fast */ | ||
726 | omap_sham_append_sg(ctx); | 771 | omap_sham_append_sg(ctx); |
727 | return 0; | 772 | return 0; |
728 | } | 773 | } |
@@ -756,20 +801,21 @@ static int omap_sham_final_shash(struct ahash_request *req) | |||
756 | static int omap_sham_final(struct ahash_request *req) | 801 | static int omap_sham_final(struct ahash_request *req) |
757 | { | 802 | { |
758 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 803 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
759 | int err = 0; | ||
760 | 804 | ||
761 | ctx->flags |= FLAGS_FINUP; | 805 | ctx->flags |= FLAGS_FINUP; |
762 | 806 | ||
807 | if (ctx->flags & FLAGS_ERROR) | ||
808 | return 0; /* uncompleted hash is not needed */ | ||
809 | |||
763 | /* OMAP HW accel works only with buffers >= 9 */ | 810 | /* OMAP HW accel works only with buffers >= 9 */ |
764 | /* HMAC is always >= 9 because of ipad */ | 811 | /* HMAC is always >= 9 because ipad == block size */ |
765 | if ((ctx->digcnt + ctx->bufcnt) < 9) | 812 | if ((ctx->digcnt + ctx->bufcnt) < 9) |
766 | err = omap_sham_final_shash(req); | 813 | return omap_sham_final_shash(req); |
767 | else if (ctx->bufcnt) | 814 | else if (ctx->bufcnt) |
768 | return omap_sham_enqueue(req, OP_FINAL); | 815 | return omap_sham_enqueue(req, OP_FINAL); |
769 | 816 | ||
770 | omap_sham_cleanup(req); | 817 | /* copy ready hash (+ finalize hmac) */ |
771 | 818 | return omap_sham_finish(req); | |
772 | return err; | ||
773 | } | 819 | } |
774 | 820 | ||
775 | static int omap_sham_finup(struct ahash_request *req) | 821 | static int omap_sham_finup(struct ahash_request *req) |
@@ -780,7 +826,7 @@ static int omap_sham_finup(struct ahash_request *req) | |||
780 | ctx->flags |= FLAGS_FINUP; | 826 | ctx->flags |= FLAGS_FINUP; |
781 | 827 | ||
782 | err1 = omap_sham_update(req); | 828 | err1 = omap_sham_update(req); |
783 | if (err1 == -EINPROGRESS) | 829 | if (err1 == -EINPROGRESS || err1 == -EBUSY) |
784 | return err1; | 830 | return err1; |
785 | /* | 831 | /* |
786 | * final() has to be always called to cleanup resources | 832 | * final() has to be always called to cleanup resources |
@@ -845,7 +891,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
845 | } | 891 | } |
846 | 892 | ||
847 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 893 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
848 | sizeof(struct omap_sham_reqctx)); | 894 | sizeof(struct omap_sham_reqctx) + BUFLEN); |
849 | 895 | ||
850 | if (alg_base) { | 896 | if (alg_base) { |
851 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 897 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
@@ -931,7 +977,7 @@ static struct ahash_alg algs[] = { | |||
931 | CRYPTO_ALG_NEED_FALLBACK, | 977 | CRYPTO_ALG_NEED_FALLBACK, |
932 | .cra_blocksize = SHA1_BLOCK_SIZE, | 978 | .cra_blocksize = SHA1_BLOCK_SIZE, |
933 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | 979 | .cra_ctxsize = sizeof(struct omap_sham_ctx), |
934 | .cra_alignmask = 0, | 980 | .cra_alignmask = OMAP_ALIGN_MASK, |
935 | .cra_module = THIS_MODULE, | 981 | .cra_module = THIS_MODULE, |
936 | .cra_init = omap_sham_cra_init, | 982 | .cra_init = omap_sham_cra_init, |
937 | .cra_exit = omap_sham_cra_exit, | 983 | .cra_exit = omap_sham_cra_exit, |
@@ -955,7 +1001,7 @@ static struct ahash_alg algs[] = { | |||
955 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1001 | .cra_blocksize = SHA1_BLOCK_SIZE, |
956 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | 1002 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + |
957 | sizeof(struct omap_sham_hmac_ctx), | 1003 | sizeof(struct omap_sham_hmac_ctx), |
958 | .cra_alignmask = 0, | 1004 | .cra_alignmask = OMAP_ALIGN_MASK, |
959 | .cra_module = THIS_MODULE, | 1005 | .cra_module = THIS_MODULE, |
960 | .cra_init = omap_sham_cra_sha1_init, | 1006 | .cra_init = omap_sham_cra_sha1_init, |
961 | .cra_exit = omap_sham_cra_exit, | 1007 | .cra_exit = omap_sham_cra_exit, |
@@ -979,7 +1025,7 @@ static struct ahash_alg algs[] = { | |||
979 | .cra_blocksize = SHA1_BLOCK_SIZE, | 1025 | .cra_blocksize = SHA1_BLOCK_SIZE, |
980 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | 1026 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + |
981 | sizeof(struct omap_sham_hmac_ctx), | 1027 | sizeof(struct omap_sham_hmac_ctx), |
982 | .cra_alignmask = 0, | 1028 | .cra_alignmask = OMAP_ALIGN_MASK, |
983 | .cra_module = THIS_MODULE, | 1029 | .cra_module = THIS_MODULE, |
984 | .cra_init = omap_sham_cra_md5_init, | 1030 | .cra_init = omap_sham_cra_md5_init, |
985 | .cra_exit = omap_sham_cra_exit, | 1031 | .cra_exit = omap_sham_cra_exit, |
@@ -992,7 +1038,7 @@ static void omap_sham_done_task(unsigned long data) | |||
992 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | 1038 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; |
993 | struct ahash_request *req = dd->req; | 1039 | struct ahash_request *req = dd->req; |
994 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 1040 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
995 | int ready = 1; | 1041 | int ready = 0, err = 0; |
996 | 1042 | ||
997 | if (ctx->flags & FLAGS_OUTPUT_READY) { | 1043 | if (ctx->flags & FLAGS_OUTPUT_READY) { |
998 | ctx->flags &= ~FLAGS_OUTPUT_READY; | 1044 | ctx->flags &= ~FLAGS_OUTPUT_READY; |
@@ -1002,15 +1048,18 @@ static void omap_sham_done_task(unsigned long data) | |||
1002 | if (dd->flags & FLAGS_DMA_ACTIVE) { | 1048 | if (dd->flags & FLAGS_DMA_ACTIVE) { |
1003 | dd->flags &= ~FLAGS_DMA_ACTIVE; | 1049 | dd->flags &= ~FLAGS_DMA_ACTIVE; |
1004 | omap_sham_update_dma_stop(dd); | 1050 | omap_sham_update_dma_stop(dd); |
1005 | omap_sham_update_dma_slow(dd); | 1051 | if (!dd->err) |
1052 | err = omap_sham_update_dma_start(dd); | ||
1006 | } | 1053 | } |
1007 | 1054 | ||
1008 | if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { | 1055 | err = dd->err ? : err; |
1009 | dev_dbg(dd->dev, "update done\n"); | 1056 | |
1057 | if (err != -EINPROGRESS && (ready || err)) { | ||
1058 | dev_dbg(dd->dev, "update done: err: %d\n", err); | ||
1010 | /* finish curent request */ | 1059 | /* finish curent request */ |
1011 | omap_sham_finish_req(req, 0); | 1060 | omap_sham_finish_req(req, err); |
1012 | /* start new request */ | 1061 | /* start new request */ |
1013 | omap_sham_handle_queue(dd); | 1062 | omap_sham_handle_queue(dd, NULL); |
1014 | } | 1063 | } |
1015 | } | 1064 | } |
1016 | 1065 | ||
@@ -1018,7 +1067,7 @@ static void omap_sham_queue_task(unsigned long data) | |||
1018 | { | 1067 | { |
1019 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | 1068 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; |
1020 | 1069 | ||
1021 | omap_sham_handle_queue(dd); | 1070 | omap_sham_handle_queue(dd, NULL); |
1022 | } | 1071 | } |
1023 | 1072 | ||
1024 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) | 1073 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) |
@@ -1040,6 +1089,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) | |||
1040 | omap_sham_read(dd, SHA_REG_CTRL); | 1089 | omap_sham_read(dd, SHA_REG_CTRL); |
1041 | 1090 | ||
1042 | ctx->flags |= FLAGS_OUTPUT_READY; | 1091 | ctx->flags |= FLAGS_OUTPUT_READY; |
1092 | dd->err = 0; | ||
1043 | tasklet_schedule(&dd->done_task); | 1093 | tasklet_schedule(&dd->done_task); |
1044 | 1094 | ||
1045 | return IRQ_HANDLED; | 1095 | return IRQ_HANDLED; |
@@ -1049,8 +1099,13 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | |||
1049 | { | 1099 | { |
1050 | struct omap_sham_dev *dd = data; | 1100 | struct omap_sham_dev *dd = data; |
1051 | 1101 | ||
1052 | if (likely(lch == dd->dma_lch)) | 1102 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { |
1053 | tasklet_schedule(&dd->done_task); | 1103 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); |
1104 | dd->err = -EIO; | ||
1105 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | ||
1106 | } | ||
1107 | |||
1108 | tasklet_schedule(&dd->done_task); | ||
1054 | } | 1109 | } |
1055 | 1110 | ||
1056 | static int omap_sham_dma_init(struct omap_sham_dev *dd) | 1111 | static int omap_sham_dma_init(struct omap_sham_dev *dd) |
@@ -1065,12 +1120,6 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd) | |||
1065 | dev_err(dd->dev, "Unable to request DMA channel\n"); | 1120 | dev_err(dd->dev, "Unable to request DMA channel\n"); |
1066 | return err; | 1121 | return err; |
1067 | } | 1122 | } |
1068 | omap_set_dma_dest_params(dd->dma_lch, 0, | ||
1069 | OMAP_DMA_AMODE_CONSTANT, | ||
1070 | dd->phys_base + SHA_REG_DIN(0), 0, 16); | ||
1071 | |||
1072 | omap_set_dma_dest_burst_mode(dd->dma_lch, | ||
1073 | OMAP_DMA_DATA_BURST_16); | ||
1074 | 1123 | ||
1075 | return 0; | 1124 | return 0; |
1076 | } | 1125 | } |
@@ -1146,9 +1195,9 @@ static int __devinit omap_sham_probe(struct platform_device *pdev) | |||
1146 | 1195 | ||
1147 | /* Initializing the clock */ | 1196 | /* Initializing the clock */ |
1148 | dd->iclk = clk_get(dev, "ick"); | 1197 | dd->iclk = clk_get(dev, "ick"); |
1149 | if (!dd->iclk) { | 1198 | if (IS_ERR(dd->iclk)) { |
1150 | dev_err(dev, "clock intialization failed.\n"); | 1199 | dev_err(dev, "clock intialization failed.\n"); |
1151 | err = -ENODEV; | 1200 | err = PTR_ERR(dd->iclk); |
1152 | goto clk_err; | 1201 | goto clk_err; |
1153 | } | 1202 | } |
1154 | 1203 | ||
@@ -1237,7 +1286,8 @@ static int __init omap_sham_mod_init(void) | |||
1237 | pr_info("loading %s driver\n", "omap-sham"); | 1286 | pr_info("loading %s driver\n", "omap-sham"); |
1238 | 1287 | ||
1239 | if (!cpu_class_is_omap2() || | 1288 | if (!cpu_class_is_omap2() || |
1240 | omap_type() != OMAP2_DEVICE_TYPE_SEC) { | 1289 | (omap_type() != OMAP2_DEVICE_TYPE_SEC && |
1290 | omap_type() != OMAP2_DEVICE_TYPE_EMU)) { | ||
1241 | pr_err("Unsupported cpu\n"); | 1291 | pr_err("Unsupported cpu\n"); |
1242 | return -ENODEV; | 1292 | return -ENODEV; |
1243 | } | 1293 | } |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 2e992bc8015b..db33d300aa23 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <crypto/algapi.h> | 10 | #include <crypto/algapi.h> |
11 | #include <crypto/aes.h> | 11 | #include <crypto/aes.h> |
12 | #include <crypto/padlock.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/types.h> | 15 | #include <linux/types.h> |
@@ -21,7 +22,6 @@ | |||
21 | #include <asm/byteorder.h> | 22 | #include <asm/byteorder.h> |
22 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
23 | #include <asm/i387.h> | 24 | #include <asm/i387.h> |
24 | #include "padlock.h" | ||
25 | 25 | ||
26 | /* | 26 | /* |
27 | * Number of data blocks actually fetched for each xcrypt insn. | 27 | * Number of data blocks actually fetched for each xcrypt insn. |
@@ -286,7 +286,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, | |||
286 | if (initial) | 286 | if (initial) |
287 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | 287 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ |
288 | : "+S" (input), "+D" (output), "+a" (iv) | 288 | : "+S" (input), "+D" (output), "+a" (iv) |
289 | : "d" (control_word), "b" (key), "c" (count)); | 289 | : "d" (control_word), "b" (key), "c" (initial)); |
290 | 290 | ||
291 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | 291 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ |
292 | : "+S" (input), "+D" (output), "+a" (iv) | 292 | : "+S" (input), "+D" (output), "+a" (iv) |
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index d3a27e0119bc..06bdb4b2c6a6 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/internal/hash.h> | 15 | #include <crypto/internal/hash.h> |
16 | #include <crypto/padlock.h> | ||
16 | #include <crypto/sha.h> | 17 | #include <crypto/sha.h> |
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
@@ -22,13 +23,6 @@ | |||
22 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
23 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
24 | #include <asm/i387.h> | 25 | #include <asm/i387.h> |
25 | #include "padlock.h" | ||
26 | |||
27 | #ifdef CONFIG_64BIT | ||
28 | #define STACK_ALIGN 16 | ||
29 | #else | ||
30 | #define STACK_ALIGN 4 | ||
31 | #endif | ||
32 | 26 | ||
33 | struct padlock_sha_desc { | 27 | struct padlock_sha_desc { |
34 | struct shash_desc fallback; | 28 | struct shash_desc fallback; |
@@ -294,9 +288,250 @@ static struct shash_alg sha256_alg = { | |||
294 | } | 288 | } |
295 | }; | 289 | }; |
296 | 290 | ||
291 | /* Add two shash_alg instance for hardware-implemented * | ||
292 | * multiple-parts hash supported by VIA Nano Processor.*/ | ||
293 | static int padlock_sha1_init_nano(struct shash_desc *desc) | ||
294 | { | ||
295 | struct sha1_state *sctx = shash_desc_ctx(desc); | ||
296 | |||
297 | *sctx = (struct sha1_state){ | ||
298 | .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, | ||
299 | }; | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static int padlock_sha1_update_nano(struct shash_desc *desc, | ||
305 | const u8 *data, unsigned int len) | ||
306 | { | ||
307 | struct sha1_state *sctx = shash_desc_ctx(desc); | ||
308 | unsigned int partial, done; | ||
309 | const u8 *src; | ||
310 | /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ | ||
311 | u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ | ||
312 | ((aligned(STACK_ALIGN))); | ||
313 | u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | ||
314 | int ts_state; | ||
315 | |||
316 | partial = sctx->count & 0x3f; | ||
317 | sctx->count += len; | ||
318 | done = 0; | ||
319 | src = data; | ||
320 | memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); | ||
321 | |||
322 | if ((partial + len) >= SHA1_BLOCK_SIZE) { | ||
323 | |||
324 | /* Append the bytes in state's buffer to a block to handle */ | ||
325 | if (partial) { | ||
326 | done = -partial; | ||
327 | memcpy(sctx->buffer + partial, data, | ||
328 | done + SHA1_BLOCK_SIZE); | ||
329 | src = sctx->buffer; | ||
330 | ts_state = irq_ts_save(); | ||
331 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" | ||
332 | : "+S"(src), "+D"(dst) \ | ||
333 | : "a"((long)-1), "c"((unsigned long)1)); | ||
334 | irq_ts_restore(ts_state); | ||
335 | done += SHA1_BLOCK_SIZE; | ||
336 | src = data + done; | ||
337 | } | ||
338 | |||
339 | /* Process the left bytes from the input data */ | ||
340 | if (len - done >= SHA1_BLOCK_SIZE) { | ||
341 | ts_state = irq_ts_save(); | ||
342 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" | ||
343 | : "+S"(src), "+D"(dst) | ||
344 | : "a"((long)-1), | ||
345 | "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); | ||
346 | irq_ts_restore(ts_state); | ||
347 | done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); | ||
348 | src = data + done; | ||
349 | } | ||
350 | partial = 0; | ||
351 | } | ||
352 | memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); | ||
353 | memcpy(sctx->buffer + partial, src, len - done); | ||
354 | |||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out) | ||
359 | { | ||
360 | struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); | ||
361 | unsigned int partial, padlen; | ||
362 | __be64 bits; | ||
363 | static const u8 padding[64] = { 0x80, }; | ||
364 | |||
365 | bits = cpu_to_be64(state->count << 3); | ||
366 | |||
367 | /* Pad out to 56 mod 64 */ | ||
368 | partial = state->count & 0x3f; | ||
369 | padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); | ||
370 | padlock_sha1_update_nano(desc, padding, padlen); | ||
371 | |||
372 | /* Append length field bytes */ | ||
373 | padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits)); | ||
374 | |||
375 | /* Swap to output */ | ||
376 | padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); | ||
377 | |||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static int padlock_sha256_init_nano(struct shash_desc *desc) | ||
382 | { | ||
383 | struct sha256_state *sctx = shash_desc_ctx(desc); | ||
384 | |||
385 | *sctx = (struct sha256_state){ | ||
386 | .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \ | ||
387 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7}, | ||
388 | }; | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, | ||
394 | unsigned int len) | ||
395 | { | ||
396 | struct sha256_state *sctx = shash_desc_ctx(desc); | ||
397 | unsigned int partial, done; | ||
398 | const u8 *src; | ||
399 | /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ | ||
400 | u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ | ||
401 | ((aligned(STACK_ALIGN))); | ||
402 | u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | ||
403 | int ts_state; | ||
404 | |||
405 | partial = sctx->count & 0x3f; | ||
406 | sctx->count += len; | ||
407 | done = 0; | ||
408 | src = data; | ||
409 | memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); | ||
410 | |||
411 | if ((partial + len) >= SHA256_BLOCK_SIZE) { | ||
412 | |||
413 | /* Append the bytes in state's buffer to a block to handle */ | ||
414 | if (partial) { | ||
415 | done = -partial; | ||
416 | memcpy(sctx->buf + partial, data, | ||
417 | done + SHA256_BLOCK_SIZE); | ||
418 | src = sctx->buf; | ||
419 | ts_state = irq_ts_save(); | ||
420 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" | ||
421 | : "+S"(src), "+D"(dst) | ||
422 | : "a"((long)-1), "c"((unsigned long)1)); | ||
423 | irq_ts_restore(ts_state); | ||
424 | done += SHA256_BLOCK_SIZE; | ||
425 | src = data + done; | ||
426 | } | ||
427 | |||
428 | /* Process the left bytes from input data*/ | ||
429 | if (len - done >= SHA256_BLOCK_SIZE) { | ||
430 | ts_state = irq_ts_save(); | ||
431 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" | ||
432 | : "+S"(src), "+D"(dst) | ||
433 | : "a"((long)-1), | ||
434 | "c"((unsigned long)((len - done) / 64))); | ||
435 | irq_ts_restore(ts_state); | ||
436 | done += ((len - done) - (len - done) % 64); | ||
437 | src = data + done; | ||
438 | } | ||
439 | partial = 0; | ||
440 | } | ||
441 | memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); | ||
442 | memcpy(sctx->buf + partial, src, len - done); | ||
443 | |||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out) | ||
448 | { | ||
449 | struct sha256_state *state = | ||
450 | (struct sha256_state *)shash_desc_ctx(desc); | ||
451 | unsigned int partial, padlen; | ||
452 | __be64 bits; | ||
453 | static const u8 padding[64] = { 0x80, }; | ||
454 | |||
455 | bits = cpu_to_be64(state->count << 3); | ||
456 | |||
457 | /* Pad out to 56 mod 64 */ | ||
458 | partial = state->count & 0x3f; | ||
459 | padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); | ||
460 | padlock_sha256_update_nano(desc, padding, padlen); | ||
461 | |||
462 | /* Append length field bytes */ | ||
463 | padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits)); | ||
464 | |||
465 | /* Swap to output */ | ||
466 | padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static int padlock_sha_export_nano(struct shash_desc *desc, | ||
472 | void *out) | ||
473 | { | ||
474 | int statesize = crypto_shash_statesize(desc->tfm); | ||
475 | void *sctx = shash_desc_ctx(desc); | ||
476 | |||
477 | memcpy(out, sctx, statesize); | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | static int padlock_sha_import_nano(struct shash_desc *desc, | ||
482 | const void *in) | ||
483 | { | ||
484 | int statesize = crypto_shash_statesize(desc->tfm); | ||
485 | void *sctx = shash_desc_ctx(desc); | ||
486 | |||
487 | memcpy(sctx, in, statesize); | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static struct shash_alg sha1_alg_nano = { | ||
492 | .digestsize = SHA1_DIGEST_SIZE, | ||
493 | .init = padlock_sha1_init_nano, | ||
494 | .update = padlock_sha1_update_nano, | ||
495 | .final = padlock_sha1_final_nano, | ||
496 | .export = padlock_sha_export_nano, | ||
497 | .import = padlock_sha_import_nano, | ||
498 | .descsize = sizeof(struct sha1_state), | ||
499 | .statesize = sizeof(struct sha1_state), | ||
500 | .base = { | ||
501 | .cra_name = "sha1", | ||
502 | .cra_driver_name = "sha1-padlock-nano", | ||
503 | .cra_priority = PADLOCK_CRA_PRIORITY, | ||
504 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
505 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
506 | .cra_module = THIS_MODULE, | ||
507 | } | ||
508 | }; | ||
509 | |||
510 | static struct shash_alg sha256_alg_nano = { | ||
511 | .digestsize = SHA256_DIGEST_SIZE, | ||
512 | .init = padlock_sha256_init_nano, | ||
513 | .update = padlock_sha256_update_nano, | ||
514 | .final = padlock_sha256_final_nano, | ||
515 | .export = padlock_sha_export_nano, | ||
516 | .import = padlock_sha_import_nano, | ||
517 | .descsize = sizeof(struct sha256_state), | ||
518 | .statesize = sizeof(struct sha256_state), | ||
519 | .base = { | ||
520 | .cra_name = "sha256", | ||
521 | .cra_driver_name = "sha256-padlock-nano", | ||
522 | .cra_priority = PADLOCK_CRA_PRIORITY, | ||
523 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
524 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
525 | .cra_module = THIS_MODULE, | ||
526 | } | ||
527 | }; | ||
528 | |||
297 | static int __init padlock_init(void) | 529 | static int __init padlock_init(void) |
298 | { | 530 | { |
299 | int rc = -ENODEV; | 531 | int rc = -ENODEV; |
532 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
533 | struct shash_alg *sha1; | ||
534 | struct shash_alg *sha256; | ||
300 | 535 | ||
301 | if (!cpu_has_phe) { | 536 | if (!cpu_has_phe) { |
302 | printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); | 537 | printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); |
@@ -308,11 +543,21 @@ static int __init padlock_init(void) | |||
308 | return -ENODEV; | 543 | return -ENODEV; |
309 | } | 544 | } |
310 | 545 | ||
311 | rc = crypto_register_shash(&sha1_alg); | 546 | /* Register the newly added algorithm module if on * |
547 | * VIA Nano processor, or else just do as before */ | ||
548 | if (c->x86_model < 0x0f) { | ||
549 | sha1 = &sha1_alg; | ||
550 | sha256 = &sha256_alg; | ||
551 | } else { | ||
552 | sha1 = &sha1_alg_nano; | ||
553 | sha256 = &sha256_alg_nano; | ||
554 | } | ||
555 | |||
556 | rc = crypto_register_shash(sha1); | ||
312 | if (rc) | 557 | if (rc) |
313 | goto out; | 558 | goto out; |
314 | 559 | ||
315 | rc = crypto_register_shash(&sha256_alg); | 560 | rc = crypto_register_shash(sha256); |
316 | if (rc) | 561 | if (rc) |
317 | goto out_unreg1; | 562 | goto out_unreg1; |
318 | 563 | ||
@@ -321,7 +566,8 @@ static int __init padlock_init(void) | |||
321 | return 0; | 566 | return 0; |
322 | 567 | ||
323 | out_unreg1: | 568 | out_unreg1: |
324 | crypto_unregister_shash(&sha1_alg); | 569 | crypto_unregister_shash(sha1); |
570 | |||
325 | out: | 571 | out: |
326 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); | 572 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); |
327 | return rc; | 573 | return rc; |
@@ -329,8 +575,15 @@ out: | |||
329 | 575 | ||
330 | static void __exit padlock_fini(void) | 576 | static void __exit padlock_fini(void) |
331 | { | 577 | { |
332 | crypto_unregister_shash(&sha1_alg); | 578 | struct cpuinfo_x86 *c = &cpu_data(0); |
333 | crypto_unregister_shash(&sha256_alg); | 579 | |
580 | if (c->x86_model >= 0x0f) { | ||
581 | crypto_unregister_shash(&sha1_alg_nano); | ||
582 | crypto_unregister_shash(&sha256_alg_nano); | ||
583 | } else { | ||
584 | crypto_unregister_shash(&sha1_alg); | ||
585 | crypto_unregister_shash(&sha256_alg); | ||
586 | } | ||
334 | } | 587 | } |
335 | 588 | ||
336 | module_init(padlock_init); | 589 | module_init(padlock_init); |
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h deleted file mode 100644 index b728e4518bd1..000000000000 --- a/drivers/crypto/padlock.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * Driver for VIA PadLock | ||
3 | * | ||
4 | * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the Free | ||
8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #ifndef _CRYPTO_PADLOCK_H | ||
14 | #define _CRYPTO_PADLOCK_H | ||
15 | |||
16 | #define PADLOCK_ALIGNMENT 16 | ||
17 | |||
18 | #define PFX "padlock: " | ||
19 | |||
20 | #define PADLOCK_CRA_PRIORITY 300 | ||
21 | #define PADLOCK_COMPOSITE_PRIORITY 400 | ||
22 | |||
23 | #endif /* _CRYPTO_PADLOCK_H */ | ||
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c new file mode 100644 index 000000000000..230b5b8cda1f --- /dev/null +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -0,0 +1,1873 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | #include <crypto/aead.h> | ||
19 | #include <crypto/aes.h> | ||
20 | #include <crypto/algapi.h> | ||
21 | #include <crypto/authenc.h> | ||
22 | #include <crypto/des.h> | ||
23 | #include <crypto/md5.h> | ||
24 | #include <crypto/sha.h> | ||
25 | #include <crypto/internal/skcipher.h> | ||
26 | #include <linux/clk.h> | ||
27 | #include <linux/crypto.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/dmapool.h> | ||
31 | #include <linux/err.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/list.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | #include <linux/pm.h> | ||
39 | #include <linux/rtnetlink.h> | ||
40 | #include <linux/scatterlist.h> | ||
41 | #include <linux/sched.h> | ||
42 | #include <linux/slab.h> | ||
43 | #include <linux/timer.h> | ||
44 | |||
45 | #include "picoxcell_crypto_regs.h" | ||
46 | |||
47 | /* | ||
48 | * The threshold for the number of entries in the CMD FIFO available before | ||
49 | * the CMD0_CNT interrupt is raised. Increasing this value will reduce the | ||
50 | * number of interrupts raised to the CPU. | ||
51 | */ | ||
52 | #define CMD0_IRQ_THRESHOLD 1 | ||
53 | |||
54 | /* | ||
55 | * The timeout period (in jiffies) for a PDU. When the the number of PDUs in | ||
56 | * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled. | ||
57 | * When there are packets in flight but lower than the threshold, we enable | ||
58 | * the timer and at expiry, attempt to remove any processed packets from the | ||
59 | * queue and if there are still packets left, schedule the timer again. | ||
60 | */ | ||
61 | #define PACKET_TIMEOUT 1 | ||
62 | |||
63 | /* The priority to register each algorithm with. */ | ||
64 | #define SPACC_CRYPTO_ALG_PRIORITY 10000 | ||
65 | |||
66 | #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16 | ||
67 | #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64 | ||
68 | #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64 | ||
69 | #define SPACC_CRYPTO_IPSEC_MAX_CTXS 32 | ||
70 | #define SPACC_CRYPTO_IPSEC_FIFO_SZ 32 | ||
71 | #define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64 | ||
72 | #define SPACC_CRYPTO_L2_HASH_PG_SZ 64 | ||
73 | #define SPACC_CRYPTO_L2_MAX_CTXS 128 | ||
74 | #define SPACC_CRYPTO_L2_FIFO_SZ 128 | ||
75 | |||
76 | #define MAX_DDT_LEN 16 | ||
77 | |||
78 | /* DDT format. This must match the hardware DDT format exactly. */ | ||
79 | struct spacc_ddt { | ||
80 | dma_addr_t p; | ||
81 | u32 len; | ||
82 | }; | ||
83 | |||
84 | /* | ||
85 | * Asynchronous crypto request structure. | ||
86 | * | ||
87 | * This structure defines a request that is either queued for processing or | ||
88 | * being processed. | ||
89 | */ | ||
90 | struct spacc_req { | ||
91 | struct list_head list; | ||
92 | struct spacc_engine *engine; | ||
93 | struct crypto_async_request *req; | ||
94 | int result; | ||
95 | bool is_encrypt; | ||
96 | unsigned ctx_id; | ||
97 | dma_addr_t src_addr, dst_addr; | ||
98 | struct spacc_ddt *src_ddt, *dst_ddt; | ||
99 | void (*complete)(struct spacc_req *req); | ||
100 | |||
101 | /* AEAD specific bits. */ | ||
102 | u8 *giv; | ||
103 | size_t giv_len; | ||
104 | dma_addr_t giv_pa; | ||
105 | }; | ||
106 | |||
107 | struct spacc_engine { | ||
108 | void __iomem *regs; | ||
109 | struct list_head pending; | ||
110 | int next_ctx; | ||
111 | spinlock_t hw_lock; | ||
112 | int in_flight; | ||
113 | struct list_head completed; | ||
114 | struct list_head in_progress; | ||
115 | struct tasklet_struct complete; | ||
116 | unsigned long fifo_sz; | ||
117 | void __iomem *cipher_ctx_base; | ||
118 | void __iomem *hash_key_base; | ||
119 | struct spacc_alg *algs; | ||
120 | unsigned num_algs; | ||
121 | struct list_head registered_algs; | ||
122 | size_t cipher_pg_sz; | ||
123 | size_t hash_pg_sz; | ||
124 | const char *name; | ||
125 | struct clk *clk; | ||
126 | struct device *dev; | ||
127 | unsigned max_ctxs; | ||
128 | struct timer_list packet_timeout; | ||
129 | unsigned stat_irq_thresh; | ||
130 | struct dma_pool *req_pool; | ||
131 | }; | ||
132 | |||
133 | /* Algorithm type mask. */ | ||
134 | #define SPACC_CRYPTO_ALG_MASK 0x7 | ||
135 | |||
136 | /* SPACC definition of a crypto algorithm. */ | ||
137 | struct spacc_alg { | ||
138 | unsigned long ctrl_default; | ||
139 | unsigned long type; | ||
140 | struct crypto_alg alg; | ||
141 | struct spacc_engine *engine; | ||
142 | struct list_head entry; | ||
143 | int key_offs; | ||
144 | int iv_offs; | ||
145 | }; | ||
146 | |||
147 | /* Generic context structure for any algorithm type. */ | ||
148 | struct spacc_generic_ctx { | ||
149 | struct spacc_engine *engine; | ||
150 | int flags; | ||
151 | int key_offs; | ||
152 | int iv_offs; | ||
153 | }; | ||
154 | |||
155 | /* Block cipher context. */ | ||
156 | struct spacc_ablk_ctx { | ||
157 | struct spacc_generic_ctx generic; | ||
158 | u8 key[AES_MAX_KEY_SIZE]; | ||
159 | u8 key_len; | ||
160 | /* | ||
161 | * The fallback cipher. If the operation can't be done in hardware, | ||
162 | * fallback to a software version. | ||
163 | */ | ||
164 | struct crypto_ablkcipher *sw_cipher; | ||
165 | }; | ||
166 | |||
167 | /* AEAD cipher context. */ | ||
168 | struct spacc_aead_ctx { | ||
169 | struct spacc_generic_ctx generic; | ||
170 | u8 cipher_key[AES_MAX_KEY_SIZE]; | ||
171 | u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ]; | ||
172 | u8 cipher_key_len; | ||
173 | u8 hash_key_len; | ||
174 | struct crypto_aead *sw_cipher; | ||
175 | size_t auth_size; | ||
176 | u8 salt[AES_BLOCK_SIZE]; | ||
177 | }; | ||
178 | |||
179 | static int spacc_ablk_submit(struct spacc_req *req); | ||
180 | |||
181 | static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) | ||
182 | { | ||
183 | return alg ? container_of(alg, struct spacc_alg, alg) : NULL; | ||
184 | } | ||
185 | |||
186 | static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) | ||
187 | { | ||
188 | u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); | ||
189 | |||
190 | return fifo_stat & SPA_FIFO_CMD_FULL; | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Given a cipher context, and a context number, get the base address of the | ||
195 | * context page. | ||
196 | * | ||
197 | * Returns the address of the context page where the key/context may | ||
198 | * be written. | ||
199 | */ | ||
200 | static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx, | ||
201 | unsigned indx, | ||
202 | bool is_cipher_ctx) | ||
203 | { | ||
204 | return is_cipher_ctx ? ctx->engine->cipher_ctx_base + | ||
205 | (indx * ctx->engine->cipher_pg_sz) : | ||
206 | ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); | ||
207 | } | ||
208 | |||
209 | /* The context pages can only be written with 32-bit accesses. */ | ||
210 | static inline void memcpy_toio32(u32 __iomem *dst, const void *src, | ||
211 | unsigned count) | ||
212 | { | ||
213 | const u32 *src32 = (const u32 *) src; | ||
214 | |||
215 | while (count--) | ||
216 | writel(*src32++, dst++); | ||
217 | } | ||
218 | |||
219 | static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx, | ||
220 | void __iomem *page_addr, const u8 *key, | ||
221 | size_t key_len, const u8 *iv, size_t iv_len) | ||
222 | { | ||
223 | void __iomem *key_ptr = page_addr + ctx->key_offs; | ||
224 | void __iomem *iv_ptr = page_addr + ctx->iv_offs; | ||
225 | |||
226 | memcpy_toio32(key_ptr, key, key_len / 4); | ||
227 | memcpy_toio32(iv_ptr, iv, iv_len / 4); | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * Load a context into the engines context memory. | ||
232 | * | ||
233 | * Returns the index of the context page where the context was loaded. | ||
234 | */ | ||
235 | static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, | ||
236 | const u8 *ciph_key, size_t ciph_len, | ||
237 | const u8 *iv, size_t ivlen, const u8 *hash_key, | ||
238 | size_t hash_len) | ||
239 | { | ||
240 | unsigned indx = ctx->engine->next_ctx++; | ||
241 | void __iomem *ciph_page_addr, *hash_page_addr; | ||
242 | |||
243 | ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1); | ||
244 | hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0); | ||
245 | |||
246 | ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1; | ||
247 | spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv, | ||
248 | ivlen); | ||
249 | writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) | | ||
250 | (1 << SPA_KEY_SZ_CIPHER_OFFSET), | ||
251 | ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); | ||
252 | |||
253 | if (hash_key) { | ||
254 | memcpy_toio32(hash_page_addr, hash_key, hash_len / 4); | ||
255 | writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET), | ||
256 | ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); | ||
257 | } | ||
258 | |||
259 | return indx; | ||
260 | } | ||
261 | |||
262 | /* Count the number of scatterlist entries in a scatterlist. */ | ||
263 | static int sg_count(struct scatterlist *sg_list, int nbytes) | ||
264 | { | ||
265 | struct scatterlist *sg = sg_list; | ||
266 | int sg_nents = 0; | ||
267 | |||
268 | while (nbytes > 0) { | ||
269 | ++sg_nents; | ||
270 | nbytes -= sg->length; | ||
271 | sg = sg_next(sg); | ||
272 | } | ||
273 | |||
274 | return sg_nents; | ||
275 | } | ||
276 | |||
277 | static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) | ||
278 | { | ||
279 | ddt->p = phys; | ||
280 | ddt->len = len; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * Take a crypto request and scatterlists for the data and turn them into DDTs | ||
285 | * for passing to the crypto engines. This also DMA maps the data so that the | ||
286 | * crypto engines can DMA to/from them. | ||
287 | */ | ||
288 | static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, | ||
289 | struct scatterlist *payload, | ||
290 | unsigned nbytes, | ||
291 | enum dma_data_direction dir, | ||
292 | dma_addr_t *ddt_phys) | ||
293 | { | ||
294 | unsigned nents, mapped_ents; | ||
295 | struct scatterlist *cur; | ||
296 | struct spacc_ddt *ddt; | ||
297 | int i; | ||
298 | |||
299 | nents = sg_count(payload, nbytes); | ||
300 | mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); | ||
301 | |||
302 | if (mapped_ents + 1 > MAX_DDT_LEN) | ||
303 | goto out; | ||
304 | |||
305 | ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys); | ||
306 | if (!ddt) | ||
307 | goto out; | ||
308 | |||
309 | for_each_sg(payload, cur, mapped_ents, i) | ||
310 | ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur)); | ||
311 | ddt_set(&ddt[mapped_ents], 0, 0); | ||
312 | |||
313 | return ddt; | ||
314 | |||
315 | out: | ||
316 | dma_unmap_sg(engine->dev, payload, nents, dir); | ||
317 | return NULL; | ||
318 | } | ||
319 | |||
320 | static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) | ||
321 | { | ||
322 | struct aead_request *areq = container_of(req->req, struct aead_request, | ||
323 | base); | ||
324 | struct spacc_engine *engine = req->engine; | ||
325 | struct spacc_ddt *src_ddt, *dst_ddt; | ||
326 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); | ||
327 | unsigned nents = sg_count(areq->src, areq->cryptlen); | ||
328 | dma_addr_t iv_addr; | ||
329 | struct scatterlist *cur; | ||
330 | int i, dst_ents, src_ents, assoc_ents; | ||
331 | u8 *iv = giv ? giv : areq->iv; | ||
332 | |||
333 | src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); | ||
334 | if (!src_ddt) | ||
335 | return -ENOMEM; | ||
336 | |||
337 | dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); | ||
338 | if (!dst_ddt) { | ||
339 | dma_pool_free(engine->req_pool, src_ddt, req->src_addr); | ||
340 | return -ENOMEM; | ||
341 | } | ||
342 | |||
343 | req->src_ddt = src_ddt; | ||
344 | req->dst_ddt = dst_ddt; | ||
345 | |||
346 | assoc_ents = dma_map_sg(engine->dev, areq->assoc, | ||
347 | sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); | ||
348 | if (areq->src != areq->dst) { | ||
349 | src_ents = dma_map_sg(engine->dev, areq->src, nents, | ||
350 | DMA_TO_DEVICE); | ||
351 | dst_ents = dma_map_sg(engine->dev, areq->dst, nents, | ||
352 | DMA_FROM_DEVICE); | ||
353 | } else { | ||
354 | src_ents = dma_map_sg(engine->dev, areq->src, nents, | ||
355 | DMA_BIDIRECTIONAL); | ||
356 | dst_ents = 0; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Map the IV/GIV. For the GIV it needs to be bidirectional as it is | ||
361 | * formed by the crypto block and sent as the ESP IV for IPSEC. | ||
362 | */ | ||
363 | iv_addr = dma_map_single(engine->dev, iv, ivsize, | ||
364 | giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); | ||
365 | req->giv_pa = iv_addr; | ||
366 | |||
367 | /* | ||
368 | * Map the associated data. For decryption we don't copy the | ||
369 | * associated data. | ||
370 | */ | ||
371 | for_each_sg(areq->assoc, cur, assoc_ents, i) { | ||
372 | ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); | ||
373 | if (req->is_encrypt) | ||
374 | ddt_set(dst_ddt++, sg_dma_address(cur), | ||
375 | sg_dma_len(cur)); | ||
376 | } | ||
377 | ddt_set(src_ddt++, iv_addr, ivsize); | ||
378 | |||
379 | if (giv || req->is_encrypt) | ||
380 | ddt_set(dst_ddt++, iv_addr, ivsize); | ||
381 | |||
382 | /* | ||
383 | * Now map in the payload for the source and destination and terminate | ||
384 | * with the NULL pointers. | ||
385 | */ | ||
386 | for_each_sg(areq->src, cur, src_ents, i) { | ||
387 | ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); | ||
388 | if (areq->src == areq->dst) | ||
389 | ddt_set(dst_ddt++, sg_dma_address(cur), | ||
390 | sg_dma_len(cur)); | ||
391 | } | ||
392 | |||
393 | for_each_sg(areq->dst, cur, dst_ents, i) | ||
394 | ddt_set(dst_ddt++, sg_dma_address(cur), | ||
395 | sg_dma_len(cur)); | ||
396 | |||
397 | ddt_set(src_ddt, 0, 0); | ||
398 | ddt_set(dst_ddt, 0, 0); | ||
399 | |||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | static void spacc_aead_free_ddts(struct spacc_req *req) | ||
404 | { | ||
405 | struct aead_request *areq = container_of(req->req, struct aead_request, | ||
406 | base); | ||
407 | struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); | ||
408 | struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); | ||
409 | struct spacc_engine *engine = aead_ctx->generic.engine; | ||
410 | unsigned ivsize = alg->alg.cra_aead.ivsize; | ||
411 | unsigned nents = sg_count(areq->src, areq->cryptlen); | ||
412 | |||
413 | if (areq->src != areq->dst) { | ||
414 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); | ||
415 | dma_unmap_sg(engine->dev, areq->dst, | ||
416 | sg_count(areq->dst, areq->cryptlen), | ||
417 | DMA_FROM_DEVICE); | ||
418 | } else | ||
419 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); | ||
420 | |||
421 | dma_unmap_sg(engine->dev, areq->assoc, | ||
422 | sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); | ||
423 | |||
424 | dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); | ||
425 | |||
426 | dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); | ||
427 | dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); | ||
428 | } | ||
429 | |||
430 | static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, | ||
431 | dma_addr_t ddt_addr, struct scatterlist *payload, | ||
432 | unsigned nbytes, enum dma_data_direction dir) | ||
433 | { | ||
434 | unsigned nents = sg_count(payload, nbytes); | ||
435 | |||
436 | dma_unmap_sg(req->engine->dev, payload, nents, dir); | ||
437 | dma_pool_free(req->engine->req_pool, ddt, ddt_addr); | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Set key for a DES operation in an AEAD cipher. This also performs weak key | ||
442 | * checking if required. | ||
443 | */ | ||
444 | static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key, | ||
445 | unsigned int len) | ||
446 | { | ||
447 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | ||
448 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
449 | u32 tmp[DES_EXPKEY_WORDS]; | ||
450 | |||
451 | if (unlikely(!des_ekey(tmp, key)) && | ||
452 | (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) { | ||
453 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
454 | return -EINVAL; | ||
455 | } | ||
456 | |||
457 | memcpy(ctx->cipher_key, key, len); | ||
458 | ctx->cipher_key_len = len; | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | /* Set the key for the AES block cipher component of the AEAD transform. */ | ||
464 | static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key, | ||
465 | unsigned int len) | ||
466 | { | ||
467 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | ||
468 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
469 | |||
470 | /* | ||
471 | * IPSec engine only supports 128 and 256 bit AES keys. If we get a | ||
472 | * request for any other size (192 bits) then we need to do a software | ||
473 | * fallback. | ||
474 | */ | ||
475 | if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { | ||
476 | /* | ||
477 | * Set the fallback transform to use the same request flags as | ||
478 | * the hardware transform. | ||
479 | */ | ||
480 | ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | ||
481 | ctx->sw_cipher->base.crt_flags |= | ||
482 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; | ||
483 | return crypto_aead_setkey(ctx->sw_cipher, key, len); | ||
484 | } | ||
485 | |||
486 | memcpy(ctx->cipher_key, key, len); | ||
487 | ctx->cipher_key_len = len; | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | ||
493 | unsigned int keylen) | ||
494 | { | ||
495 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | ||
496 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | ||
497 | struct rtattr *rta = (void *)key; | ||
498 | struct crypto_authenc_key_param *param; | ||
499 | unsigned int authkeylen, enckeylen; | ||
500 | int err = -EINVAL; | ||
501 | |||
502 | if (!RTA_OK(rta, keylen)) | ||
503 | goto badkey; | ||
504 | |||
505 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | ||
506 | goto badkey; | ||
507 | |||
508 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
509 | goto badkey; | ||
510 | |||
511 | param = RTA_DATA(rta); | ||
512 | enckeylen = be32_to_cpu(param->enckeylen); | ||
513 | |||
514 | key += RTA_ALIGN(rta->rta_len); | ||
515 | keylen -= RTA_ALIGN(rta->rta_len); | ||
516 | |||
517 | if (keylen < enckeylen) | ||
518 | goto badkey; | ||
519 | |||
520 | authkeylen = keylen - enckeylen; | ||
521 | |||
522 | if (enckeylen > AES_MAX_KEY_SIZE) | ||
523 | goto badkey; | ||
524 | |||
525 | if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | ||
526 | SPA_CTRL_CIPH_ALG_AES) | ||
527 | err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); | ||
528 | else | ||
529 | err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); | ||
530 | |||
531 | if (err) | ||
532 | goto badkey; | ||
533 | |||
534 | memcpy(ctx->hash_ctx, key, authkeylen); | ||
535 | ctx->hash_key_len = authkeylen; | ||
536 | |||
537 | return 0; | ||
538 | |||
539 | badkey: | ||
540 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
541 | return -EINVAL; | ||
542 | } | ||
543 | |||
544 | static int spacc_aead_setauthsize(struct crypto_aead *tfm, | ||
545 | unsigned int authsize) | ||
546 | { | ||
547 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); | ||
548 | |||
549 | ctx->auth_size = authsize; | ||
550 | |||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * Check if an AEAD request requires a fallback operation. Some requests can't | ||
556 | * be completed in hardware because the hardware may not support certain key | ||
557 | * sizes. In these cases we need to complete the request in software. | ||
558 | */ | ||
559 | static int spacc_aead_need_fallback(struct spacc_req *req) | ||
560 | { | ||
561 | struct aead_request *aead_req; | ||
562 | struct crypto_tfm *tfm = req->req->tfm; | ||
563 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | ||
564 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | ||
565 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
566 | |||
567 | aead_req = container_of(req->req, struct aead_request, base); | ||
568 | /* | ||
569 | * If we have a non-supported key-length, then we need to do a | ||
570 | * software fallback. | ||
571 | */ | ||
572 | if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | ||
573 | SPA_CTRL_CIPH_ALG_AES && | ||
574 | ctx->cipher_key_len != AES_KEYSIZE_128 && | ||
575 | ctx->cipher_key_len != AES_KEYSIZE_256) | ||
576 | return 1; | ||
577 | |||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, | ||
582 | bool is_encrypt) | ||
583 | { | ||
584 | struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); | ||
585 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); | ||
586 | int err; | ||
587 | |||
588 | if (ctx->sw_cipher) { | ||
589 | /* | ||
590 | * Change the request to use the software fallback transform, | ||
591 | * and once the ciphering has completed, put the old transform | ||
592 | * back into the request. | ||
593 | */ | ||
594 | aead_request_set_tfm(req, ctx->sw_cipher); | ||
595 | err = is_encrypt ? crypto_aead_encrypt(req) : | ||
596 | crypto_aead_decrypt(req); | ||
597 | aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); | ||
598 | } else | ||
599 | err = -EINVAL; | ||
600 | |||
601 | return err; | ||
602 | } | ||
603 | |||
604 | static void spacc_aead_complete(struct spacc_req *req) | ||
605 | { | ||
606 | spacc_aead_free_ddts(req); | ||
607 | req->req->complete(req->req, req->result); | ||
608 | } | ||
609 | |||
610 | static int spacc_aead_submit(struct spacc_req *req) | ||
611 | { | ||
612 | struct crypto_tfm *tfm = req->req->tfm; | ||
613 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
614 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | ||
615 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | ||
616 | struct spacc_engine *engine = ctx->generic.engine; | ||
617 | u32 ctrl, proc_len, assoc_len; | ||
618 | struct aead_request *aead_req = | ||
619 | container_of(req->req, struct aead_request, base); | ||
620 | |||
621 | req->result = -EINPROGRESS; | ||
622 | req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, | ||
623 | ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize, | ||
624 | ctx->hash_ctx, ctx->hash_key_len); | ||
625 | |||
626 | /* Set the source and destination DDT pointers. */ | ||
627 | writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); | ||
628 | writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); | ||
629 | writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); | ||
630 | |||
631 | assoc_len = aead_req->assoclen; | ||
632 | proc_len = aead_req->cryptlen + assoc_len; | ||
633 | |||
634 | /* | ||
635 | * If we aren't generating an IV, then we need to include the IV in the | ||
636 | * associated data so that it is included in the hash. | ||
637 | */ | ||
638 | if (!req->giv) { | ||
639 | assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); | ||
640 | proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); | ||
641 | } else | ||
642 | proc_len += req->giv_len; | ||
643 | |||
644 | /* | ||
645 | * If we are decrypting, we need to take the length of the ICV out of | ||
646 | * the processing length. | ||
647 | */ | ||
648 | if (!req->is_encrypt) | ||
649 | proc_len -= ctx->auth_size; | ||
650 | |||
651 | writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); | ||
652 | writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); | ||
653 | writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET); | ||
654 | writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); | ||
655 | writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); | ||
656 | |||
657 | ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | | ||
658 | (1 << SPA_CTRL_ICV_APPEND); | ||
659 | if (req->is_encrypt) | ||
660 | ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY); | ||
661 | else | ||
662 | ctrl |= (1 << SPA_CTRL_KEY_EXP); | ||
663 | |||
664 | mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | ||
665 | |||
666 | writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); | ||
667 | |||
668 | return -EINPROGRESS; | ||
669 | } | ||
670 | |||
671 | static int spacc_req_submit(struct spacc_req *req); | ||
672 | |||
673 | static void spacc_push(struct spacc_engine *engine) | ||
674 | { | ||
675 | struct spacc_req *req; | ||
676 | |||
677 | while (!list_empty(&engine->pending) && | ||
678 | engine->in_flight + 1 <= engine->fifo_sz) { | ||
679 | |||
680 | ++engine->in_flight; | ||
681 | req = list_first_entry(&engine->pending, struct spacc_req, | ||
682 | list); | ||
683 | list_move_tail(&req->list, &engine->in_progress); | ||
684 | |||
685 | req->result = spacc_req_submit(req); | ||
686 | } | ||
687 | } | ||
688 | |||
689 | /* | ||
690 | * Setup an AEAD request for processing. This will configure the engine, load | ||
691 | * the context and then start the packet processing. | ||
692 | * | ||
693 | * @giv Pointer to destination address for a generated IV. If the | ||
694 | * request does not need to generate an IV then this should be set to NULL. | ||
695 | */ | ||
696 | static int spacc_aead_setup(struct aead_request *req, u8 *giv, | ||
697 | unsigned alg_type, bool is_encrypt) | ||
698 | { | ||
699 | struct crypto_alg *alg = req->base.tfm->__crt_alg; | ||
700 | struct spacc_engine *engine = to_spacc_alg(alg)->engine; | ||
701 | struct spacc_req *dev_req = aead_request_ctx(req); | ||
702 | int err = -EINPROGRESS; | ||
703 | unsigned long flags; | ||
704 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); | ||
705 | |||
706 | dev_req->giv = giv; | ||
707 | dev_req->giv_len = ivsize; | ||
708 | dev_req->req = &req->base; | ||
709 | dev_req->is_encrypt = is_encrypt; | ||
710 | dev_req->result = -EBUSY; | ||
711 | dev_req->engine = engine; | ||
712 | dev_req->complete = spacc_aead_complete; | ||
713 | |||
714 | if (unlikely(spacc_aead_need_fallback(dev_req))) | ||
715 | return spacc_aead_do_fallback(req, alg_type, is_encrypt); | ||
716 | |||
717 | spacc_aead_make_ddts(dev_req, dev_req->giv); | ||
718 | |||
719 | err = -EINPROGRESS; | ||
720 | spin_lock_irqsave(&engine->hw_lock, flags); | ||
721 | if (unlikely(spacc_fifo_cmd_full(engine)) || | ||
722 | engine->in_flight + 1 > engine->fifo_sz) { | ||
723 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { | ||
724 | err = -EBUSY; | ||
725 | spin_unlock_irqrestore(&engine->hw_lock, flags); | ||
726 | goto out_free_ddts; | ||
727 | } | ||
728 | list_add_tail(&dev_req->list, &engine->pending); | ||
729 | } else { | ||
730 | list_add_tail(&dev_req->list, &engine->pending); | ||
731 | spacc_push(engine); | ||
732 | } | ||
733 | spin_unlock_irqrestore(&engine->hw_lock, flags); | ||
734 | |||
735 | goto out; | ||
736 | |||
737 | out_free_ddts: | ||
738 | spacc_aead_free_ddts(dev_req); | ||
739 | out: | ||
740 | return err; | ||
741 | } | ||
742 | |||
743 | static int spacc_aead_encrypt(struct aead_request *req) | ||
744 | { | ||
745 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
746 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | ||
747 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | ||
748 | |||
749 | return spacc_aead_setup(req, NULL, alg->type, 1); | ||
750 | } | ||
751 | |||
752 | static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) | ||
753 | { | ||
754 | struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); | ||
755 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | ||
756 | size_t ivsize = crypto_aead_ivsize(tfm); | ||
757 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | ||
758 | unsigned len; | ||
759 | __be64 seq; | ||
760 | |||
761 | memcpy(req->areq.iv, ctx->salt, ivsize); | ||
762 | len = ivsize; | ||
763 | if (ivsize > sizeof(u64)) { | ||
764 | memset(req->giv, 0, ivsize - sizeof(u64)); | ||
765 | len = sizeof(u64); | ||
766 | } | ||
767 | seq = cpu_to_be64(req->seq); | ||
768 | memcpy(req->giv + ivsize - len, &seq, len); | ||
769 | |||
770 | return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); | ||
771 | } | ||
772 | |||
773 | static int spacc_aead_decrypt(struct aead_request *req) | ||
774 | { | ||
775 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
776 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | ||
777 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | ||
778 | |||
779 | return spacc_aead_setup(req, NULL, alg->type, 0); | ||
780 | } | ||
781 | |||
782 | /* | ||
783 | * Initialise a new AEAD context. This is responsible for allocating the | ||
784 | * fallback cipher and initialising the context. | ||
785 | */ | ||
786 | static int spacc_aead_cra_init(struct crypto_tfm *tfm) | ||
787 | { | ||
788 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
789 | struct crypto_alg *alg = tfm->__crt_alg; | ||
790 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | ||
791 | struct spacc_engine *engine = spacc_alg->engine; | ||
792 | |||
793 | ctx->generic.flags = spacc_alg->type; | ||
794 | ctx->generic.engine = engine; | ||
795 | ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0, | ||
796 | CRYPTO_ALG_ASYNC | | ||
797 | CRYPTO_ALG_NEED_FALLBACK); | ||
798 | if (IS_ERR(ctx->sw_cipher)) { | ||
799 | dev_warn(engine->dev, "failed to allocate fallback for %s\n", | ||
800 | alg->cra_name); | ||
801 | ctx->sw_cipher = NULL; | ||
802 | } | ||
803 | ctx->generic.key_offs = spacc_alg->key_offs; | ||
804 | ctx->generic.iv_offs = spacc_alg->iv_offs; | ||
805 | |||
806 | get_random_bytes(ctx->salt, sizeof(ctx->salt)); | ||
807 | |||
808 | tfm->crt_aead.reqsize = sizeof(struct spacc_req); | ||
809 | |||
810 | return 0; | ||
811 | } | ||
812 | |||
813 | /* | ||
814 | * Destructor for an AEAD context. This is called when the transform is freed | ||
815 | * and must free the fallback cipher. | ||
816 | */ | ||
817 | static void spacc_aead_cra_exit(struct crypto_tfm *tfm) | ||
818 | { | ||
819 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
820 | |||
821 | if (ctx->sw_cipher) | ||
822 | crypto_free_aead(ctx->sw_cipher); | ||
823 | ctx->sw_cipher = NULL; | ||
824 | } | ||
825 | |||
826 | /* | ||
827 | * Set the DES key for a block cipher transform. This also performs weak key | ||
828 | * checking if the transform has requested it. | ||
829 | */ | ||
830 | static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
831 | unsigned int len) | ||
832 | { | ||
833 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
834 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | ||
835 | u32 tmp[DES_EXPKEY_WORDS]; | ||
836 | |||
837 | if (len > DES3_EDE_KEY_SIZE) { | ||
838 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
839 | return -EINVAL; | ||
840 | } | ||
841 | |||
842 | if (unlikely(!des_ekey(tmp, key)) && | ||
843 | (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
844 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
845 | return -EINVAL; | ||
846 | } | ||
847 | |||
848 | memcpy(ctx->key, key, len); | ||
849 | ctx->key_len = len; | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | /* | ||
855 | * Set the key for an AES block cipher. Some key lengths are not supported in | ||
856 | * hardware so this must also check whether a fallback is needed. | ||
857 | */ | ||
858 | static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
859 | unsigned int len) | ||
860 | { | ||
861 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
862 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | ||
863 | int err = 0; | ||
864 | |||
865 | if (len > AES_MAX_KEY_SIZE) { | ||
866 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
867 | return -EINVAL; | ||
868 | } | ||
869 | |||
870 | /* | ||
871 | * IPSec engine only supports 128 and 256 bit AES keys. If we get a | ||
872 | * request for any other size (192 bits) then we need to do a software | ||
873 | * fallback. | ||
874 | */ | ||
875 | if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) && | ||
876 | ctx->sw_cipher) { | ||
877 | /* | ||
878 | * Set the fallback transform to use the same request flags as | ||
879 | * the hardware transform. | ||
880 | */ | ||
881 | ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | ||
882 | ctx->sw_cipher->base.crt_flags |= | ||
883 | cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK; | ||
884 | |||
885 | err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len); | ||
886 | if (err) | ||
887 | goto sw_setkey_failed; | ||
888 | } else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) && | ||
889 | !ctx->sw_cipher) | ||
890 | err = -EINVAL; | ||
891 | |||
892 | memcpy(ctx->key, key, len); | ||
893 | ctx->key_len = len; | ||
894 | |||
895 | sw_setkey_failed: | ||
896 | if (err && ctx->sw_cipher) { | ||
897 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | ||
898 | tfm->crt_flags |= | ||
899 | ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK; | ||
900 | } | ||
901 | |||
902 | return err; | ||
903 | } | ||
904 | |||
905 | static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher, | ||
906 | const u8 *key, unsigned int len) | ||
907 | { | ||
908 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
909 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | ||
910 | int err = 0; | ||
911 | |||
912 | if (len > AES_MAX_KEY_SIZE) { | ||
913 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
914 | err = -EINVAL; | ||
915 | goto out; | ||
916 | } | ||
917 | |||
918 | memcpy(ctx->key, key, len); | ||
919 | ctx->key_len = len; | ||
920 | |||
921 | out: | ||
922 | return err; | ||
923 | } | ||
924 | |||
925 | static int spacc_ablk_need_fallback(struct spacc_req *req) | ||
926 | { | ||
927 | struct spacc_ablk_ctx *ctx; | ||
928 | struct crypto_tfm *tfm = req->req->tfm; | ||
929 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | ||
930 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | ||
931 | |||
932 | ctx = crypto_tfm_ctx(tfm); | ||
933 | |||
934 | return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | ||
935 | SPA_CTRL_CIPH_ALG_AES && | ||
936 | ctx->key_len != AES_KEYSIZE_128 && | ||
937 | ctx->key_len != AES_KEYSIZE_256; | ||
938 | } | ||
939 | |||
940 | static void spacc_ablk_complete(struct spacc_req *req) | ||
941 | { | ||
942 | struct ablkcipher_request *ablk_req = | ||
943 | container_of(req->req, struct ablkcipher_request, base); | ||
944 | |||
945 | if (ablk_req->src != ablk_req->dst) { | ||
946 | spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, | ||
947 | ablk_req->nbytes, DMA_TO_DEVICE); | ||
948 | spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, | ||
949 | ablk_req->nbytes, DMA_FROM_DEVICE); | ||
950 | } else | ||
951 | spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, | ||
952 | ablk_req->nbytes, DMA_BIDIRECTIONAL); | ||
953 | |||
954 | req->req->complete(req->req, req->result); | ||
955 | } | ||
956 | |||
957 | static int spacc_ablk_submit(struct spacc_req *req) | ||
958 | { | ||
959 | struct crypto_tfm *tfm = req->req->tfm; | ||
960 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | ||
961 | struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); | ||
962 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | ||
963 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | ||
964 | struct spacc_engine *engine = ctx->generic.engine; | ||
965 | u32 ctrl; | ||
966 | |||
967 | req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key, | ||
968 | ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize, | ||
969 | NULL, 0); | ||
970 | |||
971 | writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); | ||
972 | writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); | ||
973 | writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); | ||
974 | |||
975 | writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET); | ||
976 | writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); | ||
977 | writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); | ||
978 | writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET); | ||
979 | |||
980 | ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | | ||
981 | (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) : | ||
982 | (1 << SPA_CTRL_KEY_EXP)); | ||
983 | |||
984 | mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | ||
985 | |||
986 | writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); | ||
987 | |||
988 | return -EINPROGRESS; | ||
989 | } | ||
990 | |||
991 | static int spacc_ablk_do_fallback(struct ablkcipher_request *req, | ||
992 | unsigned alg_type, bool is_encrypt) | ||
993 | { | ||
994 | struct crypto_tfm *old_tfm = | ||
995 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | ||
996 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); | ||
997 | int err; | ||
998 | |||
999 | if (!ctx->sw_cipher) | ||
1000 | return -EINVAL; | ||
1001 | |||
1002 | /* | ||
1003 | * Change the request to use the software fallback transform, and once | ||
1004 | * the ciphering has completed, put the old transform back into the | ||
1005 | * request. | ||
1006 | */ | ||
1007 | ablkcipher_request_set_tfm(req, ctx->sw_cipher); | ||
1008 | err = is_encrypt ? crypto_ablkcipher_encrypt(req) : | ||
1009 | crypto_ablkcipher_decrypt(req); | ||
1010 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); | ||
1011 | |||
1012 | return err; | ||
1013 | } | ||
1014 | |||
1015 | static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type, | ||
1016 | bool is_encrypt) | ||
1017 | { | ||
1018 | struct crypto_alg *alg = req->base.tfm->__crt_alg; | ||
1019 | struct spacc_engine *engine = to_spacc_alg(alg)->engine; | ||
1020 | struct spacc_req *dev_req = ablkcipher_request_ctx(req); | ||
1021 | unsigned long flags; | ||
1022 | int err = -ENOMEM; | ||
1023 | |||
1024 | dev_req->req = &req->base; | ||
1025 | dev_req->is_encrypt = is_encrypt; | ||
1026 | dev_req->engine = engine; | ||
1027 | dev_req->complete = spacc_ablk_complete; | ||
1028 | dev_req->result = -EINPROGRESS; | ||
1029 | |||
1030 | if (unlikely(spacc_ablk_need_fallback(dev_req))) | ||
1031 | return spacc_ablk_do_fallback(req, alg_type, is_encrypt); | ||
1032 | |||
1033 | /* | ||
1034 | * Create the DDT's for the engine. If we share the same source and | ||
1035 | * destination then we can optimize by reusing the DDT's. | ||
1036 | */ | ||
1037 | if (req->src != req->dst) { | ||
1038 | dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src, | ||
1039 | req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr); | ||
1040 | if (!dev_req->src_ddt) | ||
1041 | goto out; | ||
1042 | |||
1043 | dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, | ||
1044 | req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr); | ||
1045 | if (!dev_req->dst_ddt) | ||
1046 | goto out_free_src; | ||
1047 | } else { | ||
1048 | dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, | ||
1049 | req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr); | ||
1050 | if (!dev_req->dst_ddt) | ||
1051 | goto out; | ||
1052 | |||
1053 | dev_req->src_ddt = NULL; | ||
1054 | dev_req->src_addr = dev_req->dst_addr; | ||
1055 | } | ||
1056 | |||
1057 | err = -EINPROGRESS; | ||
1058 | spin_lock_irqsave(&engine->hw_lock, flags); | ||
1059 | /* | ||
1060 | * Check if the engine will accept the operation now. If it won't then | ||
1061 | * we either stick it on the end of a pending list if we can backlog, | ||
1062 | * or bailout with an error if not. | ||
1063 | */ | ||
1064 | if (unlikely(spacc_fifo_cmd_full(engine)) || | ||
1065 | engine->in_flight + 1 > engine->fifo_sz) { | ||
1066 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { | ||
1067 | err = -EBUSY; | ||
1068 | spin_unlock_irqrestore(&engine->hw_lock, flags); | ||
1069 | goto out_free_ddts; | ||
1070 | } | ||
1071 | list_add_tail(&dev_req->list, &engine->pending); | ||
1072 | } else { | ||
1073 | list_add_tail(&dev_req->list, &engine->pending); | ||
1074 | spacc_push(engine); | ||
1075 | } | ||
1076 | spin_unlock_irqrestore(&engine->hw_lock, flags); | ||
1077 | |||
1078 | goto out; | ||
1079 | |||
1080 | out_free_ddts: | ||
1081 | spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst, | ||
1082 | req->nbytes, req->src == req->dst ? | ||
1083 | DMA_BIDIRECTIONAL : DMA_FROM_DEVICE); | ||
1084 | out_free_src: | ||
1085 | if (req->src != req->dst) | ||
1086 | spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr, | ||
1087 | req->src, req->nbytes, DMA_TO_DEVICE); | ||
1088 | out: | ||
1089 | return err; | ||
1090 | } | ||
1091 | |||
1092 | static int spacc_ablk_cra_init(struct crypto_tfm *tfm) | ||
1093 | { | ||
1094 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1095 | struct crypto_alg *alg = tfm->__crt_alg; | ||
1096 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | ||
1097 | struct spacc_engine *engine = spacc_alg->engine; | ||
1098 | |||
1099 | ctx->generic.flags = spacc_alg->type; | ||
1100 | ctx->generic.engine = engine; | ||
1101 | if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { | ||
1102 | ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0, | ||
1103 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | ||
1104 | if (IS_ERR(ctx->sw_cipher)) { | ||
1105 | dev_warn(engine->dev, "failed to allocate fallback for %s\n", | ||
1106 | alg->cra_name); | ||
1107 | ctx->sw_cipher = NULL; | ||
1108 | } | ||
1109 | } | ||
1110 | ctx->generic.key_offs = spacc_alg->key_offs; | ||
1111 | ctx->generic.iv_offs = spacc_alg->iv_offs; | ||
1112 | |||
1113 | tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req); | ||
1114 | |||
1115 | return 0; | ||
1116 | } | ||
1117 | |||
1118 | static void spacc_ablk_cra_exit(struct crypto_tfm *tfm) | ||
1119 | { | ||
1120 | struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1121 | |||
1122 | if (ctx->sw_cipher) | ||
1123 | crypto_free_ablkcipher(ctx->sw_cipher); | ||
1124 | ctx->sw_cipher = NULL; | ||
1125 | } | ||
1126 | |||
1127 | static int spacc_ablk_encrypt(struct ablkcipher_request *req) | ||
1128 | { | ||
1129 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); | ||
1130 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
1131 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | ||
1132 | |||
1133 | return spacc_ablk_setup(req, alg->type, 1); | ||
1134 | } | ||
1135 | |||
1136 | static int spacc_ablk_decrypt(struct ablkcipher_request *req) | ||
1137 | { | ||
1138 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); | ||
1139 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
1140 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | ||
1141 | |||
1142 | return spacc_ablk_setup(req, alg->type, 0); | ||
1143 | } | ||
1144 | |||
1145 | static inline int spacc_fifo_stat_empty(struct spacc_engine *engine) | ||
1146 | { | ||
1147 | return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) & | ||
1148 | SPA_FIFO_STAT_EMPTY; | ||
1149 | } | ||
1150 | |||
1151 | static void spacc_process_done(struct spacc_engine *engine) | ||
1152 | { | ||
1153 | struct spacc_req *req; | ||
1154 | unsigned long flags; | ||
1155 | |||
1156 | spin_lock_irqsave(&engine->hw_lock, flags); | ||
1157 | |||
1158 | while (!spacc_fifo_stat_empty(engine)) { | ||
1159 | req = list_first_entry(&engine->in_progress, struct spacc_req, | ||
1160 | list); | ||
1161 | list_move_tail(&req->list, &engine->completed); | ||
1162 | --engine->in_flight; | ||
1163 | |||
1164 | /* POP the status register. */ | ||
1165 | writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); | ||
1166 | req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) & | ||
1167 | SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET; | ||
1168 | |||
1169 | /* | ||
1170 | * Convert the SPAcc error status into the standard POSIX error | ||
1171 | * codes. | ||
1172 | */ | ||
1173 | if (unlikely(req->result)) { | ||
1174 | switch (req->result) { | ||
1175 | case SPA_STATUS_ICV_FAIL: | ||
1176 | req->result = -EBADMSG; | ||
1177 | break; | ||
1178 | |||
1179 | case SPA_STATUS_MEMORY_ERROR: | ||
1180 | dev_warn(engine->dev, | ||
1181 | "memory error triggered\n"); | ||
1182 | req->result = -EFAULT; | ||
1183 | break; | ||
1184 | |||
1185 | case SPA_STATUS_BLOCK_ERROR: | ||
1186 | dev_warn(engine->dev, | ||
1187 | "block error triggered\n"); | ||
1188 | req->result = -EIO; | ||
1189 | break; | ||
1190 | } | ||
1191 | } | ||
1192 | } | ||
1193 | |||
1194 | tasklet_schedule(&engine->complete); | ||
1195 | |||
1196 | spin_unlock_irqrestore(&engine->hw_lock, flags); | ||
1197 | } | ||
1198 | |||
1199 | static irqreturn_t spacc_spacc_irq(int irq, void *dev) | ||
1200 | { | ||
1201 | struct spacc_engine *engine = (struct spacc_engine *)dev; | ||
1202 | u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET); | ||
1203 | |||
1204 | writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET); | ||
1205 | spacc_process_done(engine); | ||
1206 | |||
1207 | return IRQ_HANDLED; | ||
1208 | } | ||
1209 | |||
1210 | static void spacc_packet_timeout(unsigned long data) | ||
1211 | { | ||
1212 | struct spacc_engine *engine = (struct spacc_engine *)data; | ||
1213 | |||
1214 | spacc_process_done(engine); | ||
1215 | } | ||
1216 | |||
1217 | static int spacc_req_submit(struct spacc_req *req) | ||
1218 | { | ||
1219 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | ||
1220 | |||
1221 | if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags)) | ||
1222 | return spacc_aead_submit(req); | ||
1223 | else | ||
1224 | return spacc_ablk_submit(req); | ||
1225 | } | ||
1226 | |||
1227 | static void spacc_spacc_complete(unsigned long data) | ||
1228 | { | ||
1229 | struct spacc_engine *engine = (struct spacc_engine *)data; | ||
1230 | struct spacc_req *req, *tmp; | ||
1231 | unsigned long flags; | ||
1232 | LIST_HEAD(completed); | ||
1233 | |||
1234 | spin_lock_irqsave(&engine->hw_lock, flags); | ||
1235 | |||
1236 | list_splice_init(&engine->completed, &completed); | ||
1237 | spacc_push(engine); | ||
1238 | if (engine->in_flight) | ||
1239 | mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | ||
1240 | |||
1241 | spin_unlock_irqrestore(&engine->hw_lock, flags); | ||
1242 | |||
1243 | list_for_each_entry_safe(req, tmp, &completed, list) { | ||
1244 | req->complete(req); | ||
1245 | list_del(&req->list); | ||
1246 | } | ||
1247 | } | ||
1248 | |||
1249 | #ifdef CONFIG_PM | ||
1250 | static int spacc_suspend(struct device *dev) | ||
1251 | { | ||
1252 | struct platform_device *pdev = to_platform_device(dev); | ||
1253 | struct spacc_engine *engine = platform_get_drvdata(pdev); | ||
1254 | |||
1255 | /* | ||
1256 | * We only support standby mode. All we have to do is gate the clock to | ||
1257 | * the spacc. The hardware will preserve state until we turn it back | ||
1258 | * on again. | ||
1259 | */ | ||
1260 | clk_disable(engine->clk); | ||
1261 | |||
1262 | return 0; | ||
1263 | } | ||
1264 | |||
1265 | static int spacc_resume(struct device *dev) | ||
1266 | { | ||
1267 | struct platform_device *pdev = to_platform_device(dev); | ||
1268 | struct spacc_engine *engine = platform_get_drvdata(pdev); | ||
1269 | |||
1270 | return clk_enable(engine->clk); | ||
1271 | } | ||
1272 | |||
1273 | static const struct dev_pm_ops spacc_pm_ops = { | ||
1274 | .suspend = spacc_suspend, | ||
1275 | .resume = spacc_resume, | ||
1276 | }; | ||
1277 | #endif /* CONFIG_PM */ | ||
1278 | |||
1279 | static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev) | ||
1280 | { | ||
1281 | return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL; | ||
1282 | } | ||
1283 | |||
1284 | static ssize_t spacc_stat_irq_thresh_show(struct device *dev, | ||
1285 | struct device_attribute *attr, | ||
1286 | char *buf) | ||
1287 | { | ||
1288 | struct spacc_engine *engine = spacc_dev_to_engine(dev); | ||
1289 | |||
1290 | return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh); | ||
1291 | } | ||
1292 | |||
1293 | static ssize_t spacc_stat_irq_thresh_store(struct device *dev, | ||
1294 | struct device_attribute *attr, | ||
1295 | const char *buf, size_t len) | ||
1296 | { | ||
1297 | struct spacc_engine *engine = spacc_dev_to_engine(dev); | ||
1298 | unsigned long thresh; | ||
1299 | |||
1300 | if (strict_strtoul(buf, 0, &thresh)) | ||
1301 | return -EINVAL; | ||
1302 | |||
1303 | thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); | ||
1304 | |||
1305 | engine->stat_irq_thresh = thresh; | ||
1306 | writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, | ||
1307 | engine->regs + SPA_IRQ_CTRL_REG_OFFSET); | ||
1308 | |||
1309 | return len; | ||
1310 | } | ||
1311 | static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show, | ||
1312 | spacc_stat_irq_thresh_store); | ||
1313 | |||
1314 | static struct spacc_alg ipsec_engine_algs[] = { | ||
1315 | { | ||
1316 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC, | ||
1317 | .key_offs = 0, | ||
1318 | .iv_offs = AES_MAX_KEY_SIZE, | ||
1319 | .alg = { | ||
1320 | .cra_name = "cbc(aes)", | ||
1321 | .cra_driver_name = "cbc-aes-picoxcell", | ||
1322 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1323 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1324 | CRYPTO_ALG_ASYNC | | ||
1325 | CRYPTO_ALG_NEED_FALLBACK, | ||
1326 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1327 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | ||
1328 | .cra_type = &crypto_ablkcipher_type, | ||
1329 | .cra_module = THIS_MODULE, | ||
1330 | .cra_ablkcipher = { | ||
1331 | .setkey = spacc_aes_setkey, | ||
1332 | .encrypt = spacc_ablk_encrypt, | ||
1333 | .decrypt = spacc_ablk_decrypt, | ||
1334 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1335 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1336 | .ivsize = AES_BLOCK_SIZE, | ||
1337 | }, | ||
1338 | .cra_init = spacc_ablk_cra_init, | ||
1339 | .cra_exit = spacc_ablk_cra_exit, | ||
1340 | }, | ||
1341 | }, | ||
1342 | { | ||
1343 | .key_offs = 0, | ||
1344 | .iv_offs = AES_MAX_KEY_SIZE, | ||
1345 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB, | ||
1346 | .alg = { | ||
1347 | .cra_name = "ecb(aes)", | ||
1348 | .cra_driver_name = "ecb-aes-picoxcell", | ||
1349 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1350 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1351 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | ||
1352 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1353 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | ||
1354 | .cra_type = &crypto_ablkcipher_type, | ||
1355 | .cra_module = THIS_MODULE, | ||
1356 | .cra_ablkcipher = { | ||
1357 | .setkey = spacc_aes_setkey, | ||
1358 | .encrypt = spacc_ablk_encrypt, | ||
1359 | .decrypt = spacc_ablk_decrypt, | ||
1360 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1361 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1362 | }, | ||
1363 | .cra_init = spacc_ablk_cra_init, | ||
1364 | .cra_exit = spacc_ablk_cra_exit, | ||
1365 | }, | ||
1366 | }, | ||
1367 | { | ||
1368 | .key_offs = DES_BLOCK_SIZE, | ||
1369 | .iv_offs = 0, | ||
1370 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, | ||
1371 | .alg = { | ||
1372 | .cra_name = "cbc(des)", | ||
1373 | .cra_driver_name = "cbc-des-picoxcell", | ||
1374 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1375 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1376 | .cra_blocksize = DES_BLOCK_SIZE, | ||
1377 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | ||
1378 | .cra_type = &crypto_ablkcipher_type, | ||
1379 | .cra_module = THIS_MODULE, | ||
1380 | .cra_ablkcipher = { | ||
1381 | .setkey = spacc_des_setkey, | ||
1382 | .encrypt = spacc_ablk_encrypt, | ||
1383 | .decrypt = spacc_ablk_decrypt, | ||
1384 | .min_keysize = DES_KEY_SIZE, | ||
1385 | .max_keysize = DES_KEY_SIZE, | ||
1386 | .ivsize = DES_BLOCK_SIZE, | ||
1387 | }, | ||
1388 | .cra_init = spacc_ablk_cra_init, | ||
1389 | .cra_exit = spacc_ablk_cra_exit, | ||
1390 | }, | ||
1391 | }, | ||
1392 | { | ||
1393 | .key_offs = DES_BLOCK_SIZE, | ||
1394 | .iv_offs = 0, | ||
1395 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, | ||
1396 | .alg = { | ||
1397 | .cra_name = "ecb(des)", | ||
1398 | .cra_driver_name = "ecb-des-picoxcell", | ||
1399 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1400 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1401 | .cra_blocksize = DES_BLOCK_SIZE, | ||
1402 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | ||
1403 | .cra_type = &crypto_ablkcipher_type, | ||
1404 | .cra_module = THIS_MODULE, | ||
1405 | .cra_ablkcipher = { | ||
1406 | .setkey = spacc_des_setkey, | ||
1407 | .encrypt = spacc_ablk_encrypt, | ||
1408 | .decrypt = spacc_ablk_decrypt, | ||
1409 | .min_keysize = DES_KEY_SIZE, | ||
1410 | .max_keysize = DES_KEY_SIZE, | ||
1411 | }, | ||
1412 | .cra_init = spacc_ablk_cra_init, | ||
1413 | .cra_exit = spacc_ablk_cra_exit, | ||
1414 | }, | ||
1415 | }, | ||
1416 | { | ||
1417 | .key_offs = DES_BLOCK_SIZE, | ||
1418 | .iv_offs = 0, | ||
1419 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, | ||
1420 | .alg = { | ||
1421 | .cra_name = "cbc(des3_ede)", | ||
1422 | .cra_driver_name = "cbc-des3-ede-picoxcell", | ||
1423 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1424 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1425 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1426 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | ||
1427 | .cra_type = &crypto_ablkcipher_type, | ||
1428 | .cra_module = THIS_MODULE, | ||
1429 | .cra_ablkcipher = { | ||
1430 | .setkey = spacc_des_setkey, | ||
1431 | .encrypt = spacc_ablk_encrypt, | ||
1432 | .decrypt = spacc_ablk_decrypt, | ||
1433 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
1434 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
1435 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1436 | }, | ||
1437 | .cra_init = spacc_ablk_cra_init, | ||
1438 | .cra_exit = spacc_ablk_cra_exit, | ||
1439 | }, | ||
1440 | }, | ||
1441 | { | ||
1442 | .key_offs = DES_BLOCK_SIZE, | ||
1443 | .iv_offs = 0, | ||
1444 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, | ||
1445 | .alg = { | ||
1446 | .cra_name = "ecb(des3_ede)", | ||
1447 | .cra_driver_name = "ecb-des3-ede-picoxcell", | ||
1448 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1449 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1450 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1451 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | ||
1452 | .cra_type = &crypto_ablkcipher_type, | ||
1453 | .cra_module = THIS_MODULE, | ||
1454 | .cra_ablkcipher = { | ||
1455 | .setkey = spacc_des_setkey, | ||
1456 | .encrypt = spacc_ablk_encrypt, | ||
1457 | .decrypt = spacc_ablk_decrypt, | ||
1458 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
1459 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
1460 | }, | ||
1461 | .cra_init = spacc_ablk_cra_init, | ||
1462 | .cra_exit = spacc_ablk_cra_exit, | ||
1463 | }, | ||
1464 | }, | ||
1465 | { | ||
1466 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | ||
1467 | SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, | ||
1468 | .key_offs = 0, | ||
1469 | .iv_offs = AES_MAX_KEY_SIZE, | ||
1470 | .alg = { | ||
1471 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | ||
1472 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell", | ||
1473 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1474 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1475 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1476 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | ||
1477 | .cra_type = &crypto_aead_type, | ||
1478 | .cra_module = THIS_MODULE, | ||
1479 | .cra_aead = { | ||
1480 | .setkey = spacc_aead_setkey, | ||
1481 | .setauthsize = spacc_aead_setauthsize, | ||
1482 | .encrypt = spacc_aead_encrypt, | ||
1483 | .decrypt = spacc_aead_decrypt, | ||
1484 | .givencrypt = spacc_aead_givencrypt, | ||
1485 | .ivsize = AES_BLOCK_SIZE, | ||
1486 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1487 | }, | ||
1488 | .cra_init = spacc_aead_cra_init, | ||
1489 | .cra_exit = spacc_aead_cra_exit, | ||
1490 | }, | ||
1491 | }, | ||
1492 | { | ||
1493 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | ||
1494 | SPA_CTRL_HASH_ALG_SHA256 | | ||
1495 | SPA_CTRL_HASH_MODE_HMAC, | ||
1496 | .key_offs = 0, | ||
1497 | .iv_offs = AES_MAX_KEY_SIZE, | ||
1498 | .alg = { | ||
1499 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | ||
1500 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell", | ||
1501 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1502 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1503 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1504 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | ||
1505 | .cra_type = &crypto_aead_type, | ||
1506 | .cra_module = THIS_MODULE, | ||
1507 | .cra_aead = { | ||
1508 | .setkey = spacc_aead_setkey, | ||
1509 | .setauthsize = spacc_aead_setauthsize, | ||
1510 | .encrypt = spacc_aead_encrypt, | ||
1511 | .decrypt = spacc_aead_decrypt, | ||
1512 | .givencrypt = spacc_aead_givencrypt, | ||
1513 | .ivsize = AES_BLOCK_SIZE, | ||
1514 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1515 | }, | ||
1516 | .cra_init = spacc_aead_cra_init, | ||
1517 | .cra_exit = spacc_aead_cra_exit, | ||
1518 | }, | ||
1519 | }, | ||
1520 | { | ||
1521 | .key_offs = 0, | ||
1522 | .iv_offs = AES_MAX_KEY_SIZE, | ||
1523 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | ||
1524 | SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, | ||
1525 | .alg = { | ||
1526 | .cra_name = "authenc(hmac(md5),cbc(aes))", | ||
1527 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell", | ||
1528 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1529 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1530 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1531 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | ||
1532 | .cra_type = &crypto_aead_type, | ||
1533 | .cra_module = THIS_MODULE, | ||
1534 | .cra_aead = { | ||
1535 | .setkey = spacc_aead_setkey, | ||
1536 | .setauthsize = spacc_aead_setauthsize, | ||
1537 | .encrypt = spacc_aead_encrypt, | ||
1538 | .decrypt = spacc_aead_decrypt, | ||
1539 | .givencrypt = spacc_aead_givencrypt, | ||
1540 | .ivsize = AES_BLOCK_SIZE, | ||
1541 | .maxauthsize = MD5_DIGEST_SIZE, | ||
1542 | }, | ||
1543 | .cra_init = spacc_aead_cra_init, | ||
1544 | .cra_exit = spacc_aead_cra_exit, | ||
1545 | }, | ||
1546 | }, | ||
1547 | { | ||
1548 | .key_offs = DES_BLOCK_SIZE, | ||
1549 | .iv_offs = 0, | ||
1550 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | | ||
1551 | SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, | ||
1552 | .alg = { | ||
1553 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | ||
1554 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell", | ||
1555 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1556 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1557 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1558 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | ||
1559 | .cra_type = &crypto_aead_type, | ||
1560 | .cra_module = THIS_MODULE, | ||
1561 | .cra_aead = { | ||
1562 | .setkey = spacc_aead_setkey, | ||
1563 | .setauthsize = spacc_aead_setauthsize, | ||
1564 | .encrypt = spacc_aead_encrypt, | ||
1565 | .decrypt = spacc_aead_decrypt, | ||
1566 | .givencrypt = spacc_aead_givencrypt, | ||
1567 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1568 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1569 | }, | ||
1570 | .cra_init = spacc_aead_cra_init, | ||
1571 | .cra_exit = spacc_aead_cra_exit, | ||
1572 | }, | ||
1573 | }, | ||
1574 | { | ||
1575 | .key_offs = DES_BLOCK_SIZE, | ||
1576 | .iv_offs = 0, | ||
1577 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | ||
1578 | SPA_CTRL_HASH_ALG_SHA256 | | ||
1579 | SPA_CTRL_HASH_MODE_HMAC, | ||
1580 | .alg = { | ||
1581 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", | ||
1582 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell", | ||
1583 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1584 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1585 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1586 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | ||
1587 | .cra_type = &crypto_aead_type, | ||
1588 | .cra_module = THIS_MODULE, | ||
1589 | .cra_aead = { | ||
1590 | .setkey = spacc_aead_setkey, | ||
1591 | .setauthsize = spacc_aead_setauthsize, | ||
1592 | .encrypt = spacc_aead_encrypt, | ||
1593 | .decrypt = spacc_aead_decrypt, | ||
1594 | .givencrypt = spacc_aead_givencrypt, | ||
1595 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1596 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1597 | }, | ||
1598 | .cra_init = spacc_aead_cra_init, | ||
1599 | .cra_exit = spacc_aead_cra_exit, | ||
1600 | }, | ||
1601 | }, | ||
1602 | { | ||
1603 | .key_offs = DES_BLOCK_SIZE, | ||
1604 | .iv_offs = 0, | ||
1605 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | | ||
1606 | SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, | ||
1607 | .alg = { | ||
1608 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | ||
1609 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell", | ||
1610 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1611 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1612 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1613 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | ||
1614 | .cra_type = &crypto_aead_type, | ||
1615 | .cra_module = THIS_MODULE, | ||
1616 | .cra_aead = { | ||
1617 | .setkey = spacc_aead_setkey, | ||
1618 | .setauthsize = spacc_aead_setauthsize, | ||
1619 | .encrypt = spacc_aead_encrypt, | ||
1620 | .decrypt = spacc_aead_decrypt, | ||
1621 | .givencrypt = spacc_aead_givencrypt, | ||
1622 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1623 | .maxauthsize = MD5_DIGEST_SIZE, | ||
1624 | }, | ||
1625 | .cra_init = spacc_aead_cra_init, | ||
1626 | .cra_exit = spacc_aead_cra_exit, | ||
1627 | }, | ||
1628 | }, | ||
1629 | }; | ||
1630 | |||
1631 | static struct spacc_alg l2_engine_algs[] = { | ||
1632 | { | ||
1633 | .key_offs = 0, | ||
1634 | .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN, | ||
1635 | .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI | | ||
1636 | SPA_CTRL_CIPH_MODE_F8, | ||
1637 | .alg = { | ||
1638 | .cra_name = "f8(kasumi)", | ||
1639 | .cra_driver_name = "f8-kasumi-picoxcell", | ||
1640 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | ||
1641 | .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC, | ||
1642 | .cra_blocksize = 8, | ||
1643 | .cra_ctxsize = sizeof(struct spacc_ablk_ctx), | ||
1644 | .cra_type = &crypto_ablkcipher_type, | ||
1645 | .cra_module = THIS_MODULE, | ||
1646 | .cra_ablkcipher = { | ||
1647 | .setkey = spacc_kasumi_f8_setkey, | ||
1648 | .encrypt = spacc_ablk_encrypt, | ||
1649 | .decrypt = spacc_ablk_decrypt, | ||
1650 | .min_keysize = 16, | ||
1651 | .max_keysize = 16, | ||
1652 | .ivsize = 8, | ||
1653 | }, | ||
1654 | .cra_init = spacc_ablk_cra_init, | ||
1655 | .cra_exit = spacc_ablk_cra_exit, | ||
1656 | }, | ||
1657 | }, | ||
1658 | }; | ||
1659 | |||
1660 | static int __devinit spacc_probe(struct platform_device *pdev, | ||
1661 | unsigned max_ctxs, size_t cipher_pg_sz, | ||
1662 | size_t hash_pg_sz, size_t fifo_sz, | ||
1663 | struct spacc_alg *algs, size_t num_algs) | ||
1664 | { | ||
1665 | int i, err, ret = -EINVAL; | ||
1666 | struct resource *mem, *irq; | ||
1667 | struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), | ||
1668 | GFP_KERNEL); | ||
1669 | if (!engine) | ||
1670 | return -ENOMEM; | ||
1671 | |||
1672 | engine->max_ctxs = max_ctxs; | ||
1673 | engine->cipher_pg_sz = cipher_pg_sz; | ||
1674 | engine->hash_pg_sz = hash_pg_sz; | ||
1675 | engine->fifo_sz = fifo_sz; | ||
1676 | engine->algs = algs; | ||
1677 | engine->num_algs = num_algs; | ||
1678 | engine->name = dev_name(&pdev->dev); | ||
1679 | |||
1680 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1681 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1682 | if (!mem || !irq) { | ||
1683 | dev_err(&pdev->dev, "no memory/irq resource for engine\n"); | ||
1684 | return -ENXIO; | ||
1685 | } | ||
1686 | |||
1687 | if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), | ||
1688 | engine->name)) | ||
1689 | return -ENOMEM; | ||
1690 | |||
1691 | engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); | ||
1692 | if (!engine->regs) { | ||
1693 | dev_err(&pdev->dev, "memory map failed\n"); | ||
1694 | return -ENOMEM; | ||
1695 | } | ||
1696 | |||
1697 | if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, | ||
1698 | engine->name, engine)) { | ||
1699 | dev_err(engine->dev, "failed to request IRQ\n"); | ||
1700 | return -EBUSY; | ||
1701 | } | ||
1702 | |||
1703 | engine->dev = &pdev->dev; | ||
1704 | engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET; | ||
1705 | engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET; | ||
1706 | |||
1707 | engine->req_pool = dmam_pool_create(engine->name, engine->dev, | ||
1708 | MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K); | ||
1709 | if (!engine->req_pool) | ||
1710 | return -ENOMEM; | ||
1711 | |||
1712 | spin_lock_init(&engine->hw_lock); | ||
1713 | |||
1714 | engine->clk = clk_get(&pdev->dev, NULL); | ||
1715 | if (IS_ERR(engine->clk)) { | ||
1716 | dev_info(&pdev->dev, "clk unavailable\n"); | ||
1717 | device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | ||
1718 | return PTR_ERR(engine->clk); | ||
1719 | } | ||
1720 | |||
1721 | if (clk_enable(engine->clk)) { | ||
1722 | dev_info(&pdev->dev, "unable to enable clk\n"); | ||
1723 | clk_put(engine->clk); | ||
1724 | return -EIO; | ||
1725 | } | ||
1726 | |||
1727 | err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); | ||
1728 | if (err) { | ||
1729 | clk_disable(engine->clk); | ||
1730 | clk_put(engine->clk); | ||
1731 | return err; | ||
1732 | } | ||
1733 | |||
1734 | |||
1735 | /* | ||
1736 | * Use an IRQ threshold of 50% as a default. This seems to be a | ||
1737 | * reasonable trade off of latency against throughput but can be | ||
1738 | * changed at runtime. | ||
1739 | */ | ||
1740 | engine->stat_irq_thresh = (engine->fifo_sz / 2); | ||
1741 | |||
1742 | /* | ||
1743 | * Configure the interrupts. We only use the STAT_CNT interrupt as we | ||
1744 | * only submit a new packet for processing when we complete another in | ||
1745 | * the queue. This minimizes time spent in the interrupt handler. | ||
1746 | */ | ||
1747 | writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, | ||
1748 | engine->regs + SPA_IRQ_CTRL_REG_OFFSET); | ||
1749 | writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, | ||
1750 | engine->regs + SPA_IRQ_EN_REG_OFFSET); | ||
1751 | |||
1752 | setup_timer(&engine->packet_timeout, spacc_packet_timeout, | ||
1753 | (unsigned long)engine); | ||
1754 | |||
1755 | INIT_LIST_HEAD(&engine->pending); | ||
1756 | INIT_LIST_HEAD(&engine->completed); | ||
1757 | INIT_LIST_HEAD(&engine->in_progress); | ||
1758 | engine->in_flight = 0; | ||
1759 | tasklet_init(&engine->complete, spacc_spacc_complete, | ||
1760 | (unsigned long)engine); | ||
1761 | |||
1762 | platform_set_drvdata(pdev, engine); | ||
1763 | |||
1764 | INIT_LIST_HEAD(&engine->registered_algs); | ||
1765 | for (i = 0; i < engine->num_algs; ++i) { | ||
1766 | engine->algs[i].engine = engine; | ||
1767 | err = crypto_register_alg(&engine->algs[i].alg); | ||
1768 | if (!err) { | ||
1769 | list_add_tail(&engine->algs[i].entry, | ||
1770 | &engine->registered_algs); | ||
1771 | ret = 0; | ||
1772 | } | ||
1773 | if (err) | ||
1774 | dev_err(engine->dev, "failed to register alg \"%s\"\n", | ||
1775 | engine->algs[i].alg.cra_name); | ||
1776 | else | ||
1777 | dev_dbg(engine->dev, "registered alg \"%s\"\n", | ||
1778 | engine->algs[i].alg.cra_name); | ||
1779 | } | ||
1780 | |||
1781 | return ret; | ||
1782 | } | ||
1783 | |||
1784 | static int __devexit spacc_remove(struct platform_device *pdev) | ||
1785 | { | ||
1786 | struct spacc_alg *alg, *next; | ||
1787 | struct spacc_engine *engine = platform_get_drvdata(pdev); | ||
1788 | |||
1789 | del_timer_sync(&engine->packet_timeout); | ||
1790 | device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | ||
1791 | |||
1792 | list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { | ||
1793 | list_del(&alg->entry); | ||
1794 | crypto_unregister_alg(&alg->alg); | ||
1795 | } | ||
1796 | |||
1797 | clk_disable(engine->clk); | ||
1798 | clk_put(engine->clk); | ||
1799 | |||
1800 | return 0; | ||
1801 | } | ||
1802 | |||
1803 | static int __devinit ipsec_probe(struct platform_device *pdev) | ||
1804 | { | ||
1805 | return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS, | ||
1806 | SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ, | ||
1807 | SPACC_CRYPTO_IPSEC_HASH_PG_SZ, | ||
1808 | SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs, | ||
1809 | ARRAY_SIZE(ipsec_engine_algs)); | ||
1810 | } | ||
1811 | |||
1812 | static struct platform_driver ipsec_driver = { | ||
1813 | .probe = ipsec_probe, | ||
1814 | .remove = __devexit_p(spacc_remove), | ||
1815 | .driver = { | ||
1816 | .name = "picoxcell-ipsec", | ||
1817 | #ifdef CONFIG_PM | ||
1818 | .pm = &spacc_pm_ops, | ||
1819 | #endif /* CONFIG_PM */ | ||
1820 | }, | ||
1821 | }; | ||
1822 | |||
1823 | static int __devinit l2_probe(struct platform_device *pdev) | ||
1824 | { | ||
1825 | return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS, | ||
1826 | SPACC_CRYPTO_L2_CIPHER_PG_SZ, | ||
1827 | SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ, | ||
1828 | l2_engine_algs, ARRAY_SIZE(l2_engine_algs)); | ||
1829 | } | ||
1830 | |||
1831 | static struct platform_driver l2_driver = { | ||
1832 | .probe = l2_probe, | ||
1833 | .remove = __devexit_p(spacc_remove), | ||
1834 | .driver = { | ||
1835 | .name = "picoxcell-l2", | ||
1836 | #ifdef CONFIG_PM | ||
1837 | .pm = &spacc_pm_ops, | ||
1838 | #endif /* CONFIG_PM */ | ||
1839 | }, | ||
1840 | }; | ||
1841 | |||
1842 | static int __init spacc_init(void) | ||
1843 | { | ||
1844 | int ret = platform_driver_register(&ipsec_driver); | ||
1845 | if (ret) { | ||
1846 | pr_err("failed to register ipsec spacc driver"); | ||
1847 | goto out; | ||
1848 | } | ||
1849 | |||
1850 | ret = platform_driver_register(&l2_driver); | ||
1851 | if (ret) { | ||
1852 | pr_err("failed to register l2 spacc driver"); | ||
1853 | goto l2_failed; | ||
1854 | } | ||
1855 | |||
1856 | return 0; | ||
1857 | |||
1858 | l2_failed: | ||
1859 | platform_driver_unregister(&ipsec_driver); | ||
1860 | out: | ||
1861 | return ret; | ||
1862 | } | ||
1863 | module_init(spacc_init); | ||
1864 | |||
1865 | static void __exit spacc_exit(void) | ||
1866 | { | ||
1867 | platform_driver_unregister(&ipsec_driver); | ||
1868 | platform_driver_unregister(&l2_driver); | ||
1869 | } | ||
1870 | module_exit(spacc_exit); | ||
1871 | |||
1872 | MODULE_LICENSE("GPL"); | ||
1873 | MODULE_AUTHOR("Jamie Iles"); | ||
diff --git a/drivers/crypto/picoxcell_crypto_regs.h b/drivers/crypto/picoxcell_crypto_regs.h new file mode 100644 index 000000000000..af93442564c9 --- /dev/null +++ b/drivers/crypto/picoxcell_crypto_regs.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010 Picochip Ltd., Jamie Iles | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | #ifndef __PICOXCELL_CRYPTO_REGS_H__ | ||
19 | #define __PICOXCELL_CRYPTO_REGS_H__ | ||
20 | |||
21 | #define SPA_STATUS_OK 0 | ||
22 | #define SPA_STATUS_ICV_FAIL 1 | ||
23 | #define SPA_STATUS_MEMORY_ERROR 2 | ||
24 | #define SPA_STATUS_BLOCK_ERROR 3 | ||
25 | |||
26 | #define SPA_IRQ_CTRL_STAT_CNT_OFFSET 16 | ||
27 | #define SPA_IRQ_STAT_STAT_MASK (1 << 4) | ||
28 | #define SPA_FIFO_STAT_STAT_OFFSET 16 | ||
29 | #define SPA_FIFO_STAT_STAT_CNT_MASK (0x3F << SPA_FIFO_STAT_STAT_OFFSET) | ||
30 | #define SPA_STATUS_RES_CODE_OFFSET 24 | ||
31 | #define SPA_STATUS_RES_CODE_MASK (0x3 << SPA_STATUS_RES_CODE_OFFSET) | ||
32 | #define SPA_KEY_SZ_CTX_INDEX_OFFSET 8 | ||
33 | #define SPA_KEY_SZ_CIPHER_OFFSET 31 | ||
34 | |||
35 | #define SPA_IRQ_EN_REG_OFFSET 0x00000000 | ||
36 | #define SPA_IRQ_STAT_REG_OFFSET 0x00000004 | ||
37 | #define SPA_IRQ_CTRL_REG_OFFSET 0x00000008 | ||
38 | #define SPA_FIFO_STAT_REG_OFFSET 0x0000000C | ||
39 | #define SPA_SDMA_BRST_SZ_REG_OFFSET 0x00000010 | ||
40 | #define SPA_SRC_PTR_REG_OFFSET 0x00000020 | ||
41 | #define SPA_DST_PTR_REG_OFFSET 0x00000024 | ||
42 | #define SPA_OFFSET_REG_OFFSET 0x00000028 | ||
43 | #define SPA_AAD_LEN_REG_OFFSET 0x0000002C | ||
44 | #define SPA_PROC_LEN_REG_OFFSET 0x00000030 | ||
45 | #define SPA_ICV_LEN_REG_OFFSET 0x00000034 | ||
46 | #define SPA_ICV_OFFSET_REG_OFFSET 0x00000038 | ||
47 | #define SPA_SW_CTRL_REG_OFFSET 0x0000003C | ||
48 | #define SPA_CTRL_REG_OFFSET 0x00000040 | ||
49 | #define SPA_AUX_INFO_REG_OFFSET 0x0000004C | ||
50 | #define SPA_STAT_POP_REG_OFFSET 0x00000050 | ||
51 | #define SPA_STATUS_REG_OFFSET 0x00000054 | ||
52 | #define SPA_KEY_SZ_REG_OFFSET 0x00000100 | ||
53 | #define SPA_CIPH_KEY_BASE_REG_OFFSET 0x00004000 | ||
54 | #define SPA_HASH_KEY_BASE_REG_OFFSET 0x00008000 | ||
55 | #define SPA_RC4_CTX_BASE_REG_OFFSET 0x00020000 | ||
56 | |||
57 | #define SPA_IRQ_EN_REG_RESET 0x00000000 | ||
58 | #define SPA_IRQ_CTRL_REG_RESET 0x00000000 | ||
59 | #define SPA_FIFO_STAT_REG_RESET 0x00000000 | ||
60 | #define SPA_SDMA_BRST_SZ_REG_RESET 0x00000000 | ||
61 | #define SPA_SRC_PTR_REG_RESET 0x00000000 | ||
62 | #define SPA_DST_PTR_REG_RESET 0x00000000 | ||
63 | #define SPA_OFFSET_REG_RESET 0x00000000 | ||
64 | #define SPA_AAD_LEN_REG_RESET 0x00000000 | ||
65 | #define SPA_PROC_LEN_REG_RESET 0x00000000 | ||
66 | #define SPA_ICV_LEN_REG_RESET 0x00000000 | ||
67 | #define SPA_ICV_OFFSET_REG_RESET 0x00000000 | ||
68 | #define SPA_SW_CTRL_REG_RESET 0x00000000 | ||
69 | #define SPA_CTRL_REG_RESET 0x00000000 | ||
70 | #define SPA_AUX_INFO_REG_RESET 0x00000000 | ||
71 | #define SPA_STAT_POP_REG_RESET 0x00000000 | ||
72 | #define SPA_STATUS_REG_RESET 0x00000000 | ||
73 | #define SPA_KEY_SZ_REG_RESET 0x00000000 | ||
74 | |||
75 | #define SPA_CTRL_HASH_ALG_IDX 4 | ||
76 | #define SPA_CTRL_CIPH_MODE_IDX 8 | ||
77 | #define SPA_CTRL_HASH_MODE_IDX 12 | ||
78 | #define SPA_CTRL_CTX_IDX 16 | ||
79 | #define SPA_CTRL_ENCRYPT_IDX 24 | ||
80 | #define SPA_CTRL_AAD_COPY 25 | ||
81 | #define SPA_CTRL_ICV_PT 26 | ||
82 | #define SPA_CTRL_ICV_ENC 27 | ||
83 | #define SPA_CTRL_ICV_APPEND 28 | ||
84 | #define SPA_CTRL_KEY_EXP 29 | ||
85 | |||
86 | #define SPA_KEY_SZ_CXT_IDX 8 | ||
87 | #define SPA_KEY_SZ_CIPHER_IDX 31 | ||
88 | |||
89 | #define SPA_IRQ_EN_CMD0_EN (1 << 0) | ||
90 | #define SPA_IRQ_EN_STAT_EN (1 << 4) | ||
91 | #define SPA_IRQ_EN_GLBL_EN (1 << 31) | ||
92 | |||
93 | #define SPA_CTRL_CIPH_ALG_NULL 0x00 | ||
94 | #define SPA_CTRL_CIPH_ALG_DES 0x01 | ||
95 | #define SPA_CTRL_CIPH_ALG_AES 0x02 | ||
96 | #define SPA_CTRL_CIPH_ALG_RC4 0x03 | ||
97 | #define SPA_CTRL_CIPH_ALG_MULTI2 0x04 | ||
98 | #define SPA_CTRL_CIPH_ALG_KASUMI 0x05 | ||
99 | |||
100 | #define SPA_CTRL_HASH_ALG_NULL (0x00 << SPA_CTRL_HASH_ALG_IDX) | ||
101 | #define SPA_CTRL_HASH_ALG_MD5 (0x01 << SPA_CTRL_HASH_ALG_IDX) | ||
102 | #define SPA_CTRL_HASH_ALG_SHA (0x02 << SPA_CTRL_HASH_ALG_IDX) | ||
103 | #define SPA_CTRL_HASH_ALG_SHA224 (0x03 << SPA_CTRL_HASH_ALG_IDX) | ||
104 | #define SPA_CTRL_HASH_ALG_SHA256 (0x04 << SPA_CTRL_HASH_ALG_IDX) | ||
105 | #define SPA_CTRL_HASH_ALG_SHA384 (0x05 << SPA_CTRL_HASH_ALG_IDX) | ||
106 | #define SPA_CTRL_HASH_ALG_SHA512 (0x06 << SPA_CTRL_HASH_ALG_IDX) | ||
107 | #define SPA_CTRL_HASH_ALG_AESMAC (0x07 << SPA_CTRL_HASH_ALG_IDX) | ||
108 | #define SPA_CTRL_HASH_ALG_AESCMAC (0x08 << SPA_CTRL_HASH_ALG_IDX) | ||
109 | #define SPA_CTRL_HASH_ALG_KASF9 (0x09 << SPA_CTRL_HASH_ALG_IDX) | ||
110 | |||
111 | #define SPA_CTRL_CIPH_MODE_NULL (0x00 << SPA_CTRL_CIPH_MODE_IDX) | ||
112 | #define SPA_CTRL_CIPH_MODE_ECB (0x00 << SPA_CTRL_CIPH_MODE_IDX) | ||
113 | #define SPA_CTRL_CIPH_MODE_CBC (0x01 << SPA_CTRL_CIPH_MODE_IDX) | ||
114 | #define SPA_CTRL_CIPH_MODE_CTR (0x02 << SPA_CTRL_CIPH_MODE_IDX) | ||
115 | #define SPA_CTRL_CIPH_MODE_CCM (0x03 << SPA_CTRL_CIPH_MODE_IDX) | ||
116 | #define SPA_CTRL_CIPH_MODE_GCM (0x05 << SPA_CTRL_CIPH_MODE_IDX) | ||
117 | #define SPA_CTRL_CIPH_MODE_OFB (0x07 << SPA_CTRL_CIPH_MODE_IDX) | ||
118 | #define SPA_CTRL_CIPH_MODE_CFB (0x08 << SPA_CTRL_CIPH_MODE_IDX) | ||
119 | #define SPA_CTRL_CIPH_MODE_F8 (0x09 << SPA_CTRL_CIPH_MODE_IDX) | ||
120 | |||
121 | #define SPA_CTRL_HASH_MODE_RAW (0x00 << SPA_CTRL_HASH_MODE_IDX) | ||
122 | #define SPA_CTRL_HASH_MODE_SSLMAC (0x01 << SPA_CTRL_HASH_MODE_IDX) | ||
123 | #define SPA_CTRL_HASH_MODE_HMAC (0x02 << SPA_CTRL_HASH_MODE_IDX) | ||
124 | |||
125 | #define SPA_FIFO_STAT_EMPTY (1 << 31) | ||
126 | #define SPA_FIFO_CMD_FULL (1 << 7) | ||
127 | |||
128 | #endif /* __PICOXCELL_CRYPTO_REGS_H__ */ | ||
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c new file mode 100644 index 000000000000..8115417a1c93 --- /dev/null +++ b/drivers/crypto/s5p-sss.c | |||
@@ -0,0 +1,701 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for Samsung S5PV210 HW acceleration. | ||
5 | * | ||
6 | * Copyright (C) 2011 NetUP Inc. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/delay.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/scatterlist.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/crypto.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | |||
28 | #include <crypto/algapi.h> | ||
29 | #include <crypto/aes.h> | ||
30 | #include <crypto/ctr.h> | ||
31 | |||
32 | #include <plat/cpu.h> | ||
33 | #include <plat/dma.h> | ||
34 | |||
35 | #define _SBF(s, v) ((v) << (s)) | ||
36 | #define _BIT(b) _SBF(b, 1) | ||
37 | |||
38 | /* Feed control registers */ | ||
39 | #define SSS_REG_FCINTSTAT 0x0000 | ||
40 | #define SSS_FCINTSTAT_BRDMAINT _BIT(3) | ||
41 | #define SSS_FCINTSTAT_BTDMAINT _BIT(2) | ||
42 | #define SSS_FCINTSTAT_HRDMAINT _BIT(1) | ||
43 | #define SSS_FCINTSTAT_PKDMAINT _BIT(0) | ||
44 | |||
45 | #define SSS_REG_FCINTENSET 0x0004 | ||
46 | #define SSS_FCINTENSET_BRDMAINTENSET _BIT(3) | ||
47 | #define SSS_FCINTENSET_BTDMAINTENSET _BIT(2) | ||
48 | #define SSS_FCINTENSET_HRDMAINTENSET _BIT(1) | ||
49 | #define SSS_FCINTENSET_PKDMAINTENSET _BIT(0) | ||
50 | |||
51 | #define SSS_REG_FCINTENCLR 0x0008 | ||
52 | #define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3) | ||
53 | #define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2) | ||
54 | #define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1) | ||
55 | #define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0) | ||
56 | |||
57 | #define SSS_REG_FCINTPEND 0x000C | ||
58 | #define SSS_FCINTPEND_BRDMAINTP _BIT(3) | ||
59 | #define SSS_FCINTPEND_BTDMAINTP _BIT(2) | ||
60 | #define SSS_FCINTPEND_HRDMAINTP _BIT(1) | ||
61 | #define SSS_FCINTPEND_PKDMAINTP _BIT(0) | ||
62 | |||
63 | #define SSS_REG_FCFIFOSTAT 0x0010 | ||
64 | #define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7) | ||
65 | #define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6) | ||
66 | #define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5) | ||
67 | #define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4) | ||
68 | #define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3) | ||
69 | #define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2) | ||
70 | #define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1) | ||
71 | #define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0) | ||
72 | |||
73 | #define SSS_REG_FCFIFOCTRL 0x0014 | ||
74 | #define SSS_FCFIFOCTRL_DESSEL _BIT(2) | ||
75 | #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) | ||
76 | #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) | ||
77 | #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) | ||
78 | |||
79 | #define SSS_REG_FCBRDMAS 0x0020 | ||
80 | #define SSS_REG_FCBRDMAL 0x0024 | ||
81 | #define SSS_REG_FCBRDMAC 0x0028 | ||
82 | #define SSS_FCBRDMAC_BYTESWAP _BIT(1) | ||
83 | #define SSS_FCBRDMAC_FLUSH _BIT(0) | ||
84 | |||
85 | #define SSS_REG_FCBTDMAS 0x0030 | ||
86 | #define SSS_REG_FCBTDMAL 0x0034 | ||
87 | #define SSS_REG_FCBTDMAC 0x0038 | ||
88 | #define SSS_FCBTDMAC_BYTESWAP _BIT(1) | ||
89 | #define SSS_FCBTDMAC_FLUSH _BIT(0) | ||
90 | |||
91 | #define SSS_REG_FCHRDMAS 0x0040 | ||
92 | #define SSS_REG_FCHRDMAL 0x0044 | ||
93 | #define SSS_REG_FCHRDMAC 0x0048 | ||
94 | #define SSS_FCHRDMAC_BYTESWAP _BIT(1) | ||
95 | #define SSS_FCHRDMAC_FLUSH _BIT(0) | ||
96 | |||
97 | #define SSS_REG_FCPKDMAS 0x0050 | ||
98 | #define SSS_REG_FCPKDMAL 0x0054 | ||
99 | #define SSS_REG_FCPKDMAC 0x0058 | ||
100 | #define SSS_FCPKDMAC_BYTESWAP _BIT(3) | ||
101 | #define SSS_FCPKDMAC_DESCEND _BIT(2) | ||
102 | #define SSS_FCPKDMAC_TRANSMIT _BIT(1) | ||
103 | #define SSS_FCPKDMAC_FLUSH _BIT(0) | ||
104 | |||
105 | #define SSS_REG_FCPKDMAO 0x005C | ||
106 | |||
107 | /* AES registers */ | ||
108 | #define SSS_REG_AES_CONTROL 0x4000 | ||
109 | #define SSS_AES_BYTESWAP_DI _BIT(11) | ||
110 | #define SSS_AES_BYTESWAP_DO _BIT(10) | ||
111 | #define SSS_AES_BYTESWAP_IV _BIT(9) | ||
112 | #define SSS_AES_BYTESWAP_CNT _BIT(8) | ||
113 | #define SSS_AES_BYTESWAP_KEY _BIT(7) | ||
114 | #define SSS_AES_KEY_CHANGE_MODE _BIT(6) | ||
115 | #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) | ||
116 | #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) | ||
117 | #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) | ||
118 | #define SSS_AES_FIFO_MODE _BIT(3) | ||
119 | #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) | ||
120 | #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) | ||
121 | #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) | ||
122 | #define SSS_AES_MODE_DECRYPT _BIT(0) | ||
123 | |||
124 | #define SSS_REG_AES_STATUS 0x4004 | ||
125 | #define SSS_AES_BUSY _BIT(2) | ||
126 | #define SSS_AES_INPUT_READY _BIT(1) | ||
127 | #define SSS_AES_OUTPUT_READY _BIT(0) | ||
128 | |||
129 | #define SSS_REG_AES_IN_DATA(s) (0x4010 + (s << 2)) | ||
130 | #define SSS_REG_AES_OUT_DATA(s) (0x4020 + (s << 2)) | ||
131 | #define SSS_REG_AES_IV_DATA(s) (0x4030 + (s << 2)) | ||
132 | #define SSS_REG_AES_CNT_DATA(s) (0x4040 + (s << 2)) | ||
133 | #define SSS_REG_AES_KEY_DATA(s) (0x4080 + (s << 2)) | ||
134 | |||
135 | #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) | ||
136 | #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) | ||
137 | #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) | ||
138 | |||
139 | /* HW engine modes */ | ||
140 | #define FLAGS_AES_DECRYPT _BIT(0) | ||
141 | #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) | ||
142 | #define FLAGS_AES_CBC _SBF(1, 0x01) | ||
143 | #define FLAGS_AES_CTR _SBF(1, 0x02) | ||
144 | |||
145 | #define AES_KEY_LEN 16 | ||
146 | #define CRYPTO_QUEUE_LEN 1 | ||
147 | |||
148 | struct s5p_aes_reqctx { | ||
149 | unsigned long mode; | ||
150 | }; | ||
151 | |||
152 | struct s5p_aes_ctx { | ||
153 | struct s5p_aes_dev *dev; | ||
154 | |||
155 | uint8_t aes_key[AES_MAX_KEY_SIZE]; | ||
156 | uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; | ||
157 | int keylen; | ||
158 | }; | ||
159 | |||
160 | struct s5p_aes_dev { | ||
161 | struct device *dev; | ||
162 | struct clk *clk; | ||
163 | void __iomem *ioaddr; | ||
164 | int irq_hash; | ||
165 | int irq_fc; | ||
166 | |||
167 | struct ablkcipher_request *req; | ||
168 | struct s5p_aes_ctx *ctx; | ||
169 | struct scatterlist *sg_src; | ||
170 | struct scatterlist *sg_dst; | ||
171 | |||
172 | struct tasklet_struct tasklet; | ||
173 | struct crypto_queue queue; | ||
174 | bool busy; | ||
175 | spinlock_t lock; | ||
176 | }; | ||
177 | |||
178 | static struct s5p_aes_dev *s5p_dev; | ||
179 | |||
180 | static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) | ||
181 | { | ||
182 | SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); | ||
183 | SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg)); | ||
184 | } | ||
185 | |||
186 | static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) | ||
187 | { | ||
188 | SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg)); | ||
189 | SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); | ||
190 | } | ||
191 | |||
192 | static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) | ||
193 | { | ||
194 | /* holding a lock outside */ | ||
195 | dev->req->base.complete(&dev->req->base, err); | ||
196 | dev->busy = false; | ||
197 | } | ||
198 | |||
199 | static void s5p_unset_outdata(struct s5p_aes_dev *dev) | ||
200 | { | ||
201 | dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE); | ||
202 | } | ||
203 | |||
204 | static void s5p_unset_indata(struct s5p_aes_dev *dev) | ||
205 | { | ||
206 | dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE); | ||
207 | } | ||
208 | |||
209 | static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) | ||
210 | { | ||
211 | int err; | ||
212 | |||
213 | if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) { | ||
214 | err = -EINVAL; | ||
215 | goto exit; | ||
216 | } | ||
217 | if (!sg_dma_len(sg)) { | ||
218 | err = -EINVAL; | ||
219 | goto exit; | ||
220 | } | ||
221 | |||
222 | err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE); | ||
223 | if (!err) { | ||
224 | err = -ENOMEM; | ||
225 | goto exit; | ||
226 | } | ||
227 | |||
228 | dev->sg_dst = sg; | ||
229 | err = 0; | ||
230 | |||
231 | exit: | ||
232 | return err; | ||
233 | } | ||
234 | |||
235 | static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) | ||
236 | { | ||
237 | int err; | ||
238 | |||
239 | if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) { | ||
240 | err = -EINVAL; | ||
241 | goto exit; | ||
242 | } | ||
243 | if (!sg_dma_len(sg)) { | ||
244 | err = -EINVAL; | ||
245 | goto exit; | ||
246 | } | ||
247 | |||
248 | err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE); | ||
249 | if (!err) { | ||
250 | err = -ENOMEM; | ||
251 | goto exit; | ||
252 | } | ||
253 | |||
254 | dev->sg_src = sg; | ||
255 | err = 0; | ||
256 | |||
257 | exit: | ||
258 | return err; | ||
259 | } | ||
260 | |||
261 | static void s5p_aes_tx(struct s5p_aes_dev *dev) | ||
262 | { | ||
263 | int err = 0; | ||
264 | |||
265 | s5p_unset_outdata(dev); | ||
266 | |||
267 | if (!sg_is_last(dev->sg_dst)) { | ||
268 | err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); | ||
269 | if (err) { | ||
270 | s5p_aes_complete(dev, err); | ||
271 | return; | ||
272 | } | ||
273 | |||
274 | s5p_set_dma_outdata(dev, dev->sg_dst); | ||
275 | } else | ||
276 | s5p_aes_complete(dev, err); | ||
277 | } | ||
278 | |||
279 | static void s5p_aes_rx(struct s5p_aes_dev *dev) | ||
280 | { | ||
281 | int err; | ||
282 | |||
283 | s5p_unset_indata(dev); | ||
284 | |||
285 | if (!sg_is_last(dev->sg_src)) { | ||
286 | err = s5p_set_indata(dev, sg_next(dev->sg_src)); | ||
287 | if (err) { | ||
288 | s5p_aes_complete(dev, err); | ||
289 | return; | ||
290 | } | ||
291 | |||
292 | s5p_set_dma_indata(dev, dev->sg_src); | ||
293 | } | ||
294 | } | ||
295 | |||
296 | static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) | ||
297 | { | ||
298 | struct platform_device *pdev = dev_id; | ||
299 | struct s5p_aes_dev *dev = platform_get_drvdata(pdev); | ||
300 | uint32_t status; | ||
301 | unsigned long flags; | ||
302 | |||
303 | spin_lock_irqsave(&dev->lock, flags); | ||
304 | |||
305 | if (irq == dev->irq_fc) { | ||
306 | status = SSS_READ(dev, FCINTSTAT); | ||
307 | if (status & SSS_FCINTSTAT_BRDMAINT) | ||
308 | s5p_aes_rx(dev); | ||
309 | if (status & SSS_FCINTSTAT_BTDMAINT) | ||
310 | s5p_aes_tx(dev); | ||
311 | |||
312 | SSS_WRITE(dev, FCINTPEND, status); | ||
313 | } | ||
314 | |||
315 | spin_unlock_irqrestore(&dev->lock, flags); | ||
316 | |||
317 | return IRQ_HANDLED; | ||
318 | } | ||
319 | |||
320 | static void s5p_set_aes(struct s5p_aes_dev *dev, | ||
321 | uint8_t *key, uint8_t *iv, unsigned int keylen) | ||
322 | { | ||
323 | void __iomem *keystart; | ||
324 | |||
325 | memcpy(dev->ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); | ||
326 | |||
327 | if (keylen == AES_KEYSIZE_256) | ||
328 | keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(0); | ||
329 | else if (keylen == AES_KEYSIZE_192) | ||
330 | keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(2); | ||
331 | else | ||
332 | keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(4); | ||
333 | |||
334 | memcpy(keystart, key, keylen); | ||
335 | } | ||
336 | |||
337 | static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) | ||
338 | { | ||
339 | struct ablkcipher_request *req = dev->req; | ||
340 | |||
341 | uint32_t aes_control; | ||
342 | int err; | ||
343 | unsigned long flags; | ||
344 | |||
345 | aes_control = SSS_AES_KEY_CHANGE_MODE; | ||
346 | if (mode & FLAGS_AES_DECRYPT) | ||
347 | aes_control |= SSS_AES_MODE_DECRYPT; | ||
348 | |||
349 | if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) | ||
350 | aes_control |= SSS_AES_CHAIN_MODE_CBC; | ||
351 | else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) | ||
352 | aes_control |= SSS_AES_CHAIN_MODE_CTR; | ||
353 | |||
354 | if (dev->ctx->keylen == AES_KEYSIZE_192) | ||
355 | aes_control |= SSS_AES_KEY_SIZE_192; | ||
356 | else if (dev->ctx->keylen == AES_KEYSIZE_256) | ||
357 | aes_control |= SSS_AES_KEY_SIZE_256; | ||
358 | |||
359 | aes_control |= SSS_AES_FIFO_MODE; | ||
360 | |||
361 | /* as a variant it is possible to use byte swapping on DMA side */ | ||
362 | aes_control |= SSS_AES_BYTESWAP_DI | ||
363 | | SSS_AES_BYTESWAP_DO | ||
364 | | SSS_AES_BYTESWAP_IV | ||
365 | | SSS_AES_BYTESWAP_KEY | ||
366 | | SSS_AES_BYTESWAP_CNT; | ||
367 | |||
368 | spin_lock_irqsave(&dev->lock, flags); | ||
369 | |||
370 | SSS_WRITE(dev, FCINTENCLR, | ||
371 | SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR); | ||
372 | SSS_WRITE(dev, FCFIFOCTRL, 0x00); | ||
373 | |||
374 | err = s5p_set_indata(dev, req->src); | ||
375 | if (err) | ||
376 | goto indata_error; | ||
377 | |||
378 | err = s5p_set_outdata(dev, req->dst); | ||
379 | if (err) | ||
380 | goto outdata_error; | ||
381 | |||
382 | SSS_WRITE(dev, AES_CONTROL, aes_control); | ||
383 | s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); | ||
384 | |||
385 | s5p_set_dma_indata(dev, req->src); | ||
386 | s5p_set_dma_outdata(dev, req->dst); | ||
387 | |||
388 | SSS_WRITE(dev, FCINTENSET, | ||
389 | SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET); | ||
390 | |||
391 | spin_unlock_irqrestore(&dev->lock, flags); | ||
392 | |||
393 | return; | ||
394 | |||
395 | outdata_error: | ||
396 | s5p_unset_indata(dev); | ||
397 | |||
398 | indata_error: | ||
399 | s5p_aes_complete(dev, err); | ||
400 | spin_unlock_irqrestore(&dev->lock, flags); | ||
401 | } | ||
402 | |||
403 | static void s5p_tasklet_cb(unsigned long data) | ||
404 | { | ||
405 | struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data; | ||
406 | struct crypto_async_request *async_req, *backlog; | ||
407 | struct s5p_aes_reqctx *reqctx; | ||
408 | unsigned long flags; | ||
409 | |||
410 | spin_lock_irqsave(&dev->lock, flags); | ||
411 | backlog = crypto_get_backlog(&dev->queue); | ||
412 | async_req = crypto_dequeue_request(&dev->queue); | ||
413 | spin_unlock_irqrestore(&dev->lock, flags); | ||
414 | |||
415 | if (!async_req) | ||
416 | return; | ||
417 | |||
418 | if (backlog) | ||
419 | backlog->complete(backlog, -EINPROGRESS); | ||
420 | |||
421 | dev->req = ablkcipher_request_cast(async_req); | ||
422 | dev->ctx = crypto_tfm_ctx(dev->req->base.tfm); | ||
423 | reqctx = ablkcipher_request_ctx(dev->req); | ||
424 | |||
425 | s5p_aes_crypt_start(dev, reqctx->mode); | ||
426 | } | ||
427 | |||
428 | static int s5p_aes_handle_req(struct s5p_aes_dev *dev, | ||
429 | struct ablkcipher_request *req) | ||
430 | { | ||
431 | unsigned long flags; | ||
432 | int err; | ||
433 | |||
434 | spin_lock_irqsave(&dev->lock, flags); | ||
435 | if (dev->busy) { | ||
436 | err = -EAGAIN; | ||
437 | spin_unlock_irqrestore(&dev->lock, flags); | ||
438 | goto exit; | ||
439 | } | ||
440 | dev->busy = true; | ||
441 | |||
442 | err = ablkcipher_enqueue_request(&dev->queue, req); | ||
443 | spin_unlock_irqrestore(&dev->lock, flags); | ||
444 | |||
445 | tasklet_schedule(&dev->tasklet); | ||
446 | |||
447 | exit: | ||
448 | return err; | ||
449 | } | ||
450 | |||
451 | static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
452 | { | ||
453 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
454 | struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
455 | struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); | ||
456 | struct s5p_aes_dev *dev = ctx->dev; | ||
457 | |||
458 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
459 | pr_err("request size is not exact amount of AES blocks\n"); | ||
460 | return -EINVAL; | ||
461 | } | ||
462 | |||
463 | reqctx->mode = mode; | ||
464 | |||
465 | return s5p_aes_handle_req(dev, req); | ||
466 | } | ||
467 | |||
468 | static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, | ||
469 | const uint8_t *key, unsigned int keylen) | ||
470 | { | ||
471 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
472 | struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
473 | |||
474 | if (keylen != AES_KEYSIZE_128 && | ||
475 | keylen != AES_KEYSIZE_192 && | ||
476 | keylen != AES_KEYSIZE_256) | ||
477 | return -EINVAL; | ||
478 | |||
479 | memcpy(ctx->aes_key, key, keylen); | ||
480 | ctx->keylen = keylen; | ||
481 | |||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
486 | { | ||
487 | return s5p_aes_crypt(req, 0); | ||
488 | } | ||
489 | |||
490 | static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
491 | { | ||
492 | return s5p_aes_crypt(req, FLAGS_AES_DECRYPT); | ||
493 | } | ||
494 | |||
495 | static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
496 | { | ||
497 | return s5p_aes_crypt(req, FLAGS_AES_CBC); | ||
498 | } | ||
499 | |||
500 | static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
501 | { | ||
502 | return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC); | ||
503 | } | ||
504 | |||
505 | static int s5p_aes_cra_init(struct crypto_tfm *tfm) | ||
506 | { | ||
507 | struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
508 | |||
509 | ctx->dev = s5p_dev; | ||
510 | tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx); | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static struct crypto_alg algs[] = { | ||
516 | { | ||
517 | .cra_name = "ecb(aes)", | ||
518 | .cra_driver_name = "ecb-aes-s5p", | ||
519 | .cra_priority = 100, | ||
520 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
521 | CRYPTO_ALG_ASYNC, | ||
522 | .cra_blocksize = AES_BLOCK_SIZE, | ||
523 | .cra_ctxsize = sizeof(struct s5p_aes_ctx), | ||
524 | .cra_alignmask = 0x0f, | ||
525 | .cra_type = &crypto_ablkcipher_type, | ||
526 | .cra_module = THIS_MODULE, | ||
527 | .cra_init = s5p_aes_cra_init, | ||
528 | .cra_u.ablkcipher = { | ||
529 | .min_keysize = AES_MIN_KEY_SIZE, | ||
530 | .max_keysize = AES_MAX_KEY_SIZE, | ||
531 | .setkey = s5p_aes_setkey, | ||
532 | .encrypt = s5p_aes_ecb_encrypt, | ||
533 | .decrypt = s5p_aes_ecb_decrypt, | ||
534 | } | ||
535 | }, | ||
536 | { | ||
537 | .cra_name = "cbc(aes)", | ||
538 | .cra_driver_name = "cbc-aes-s5p", | ||
539 | .cra_priority = 100, | ||
540 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
541 | CRYPTO_ALG_ASYNC, | ||
542 | .cra_blocksize = AES_BLOCK_SIZE, | ||
543 | .cra_ctxsize = sizeof(struct s5p_aes_ctx), | ||
544 | .cra_alignmask = 0x0f, | ||
545 | .cra_type = &crypto_ablkcipher_type, | ||
546 | .cra_module = THIS_MODULE, | ||
547 | .cra_init = s5p_aes_cra_init, | ||
548 | .cra_u.ablkcipher = { | ||
549 | .min_keysize = AES_MIN_KEY_SIZE, | ||
550 | .max_keysize = AES_MAX_KEY_SIZE, | ||
551 | .ivsize = AES_BLOCK_SIZE, | ||
552 | .setkey = s5p_aes_setkey, | ||
553 | .encrypt = s5p_aes_cbc_encrypt, | ||
554 | .decrypt = s5p_aes_cbc_decrypt, | ||
555 | } | ||
556 | }, | ||
557 | }; | ||
558 | |||
559 | static int s5p_aes_probe(struct platform_device *pdev) | ||
560 | { | ||
561 | int i, j, err = -ENODEV; | ||
562 | struct s5p_aes_dev *pdata; | ||
563 | struct device *dev = &pdev->dev; | ||
564 | struct resource *res; | ||
565 | |||
566 | if (s5p_dev) | ||
567 | return -EEXIST; | ||
568 | |||
569 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
570 | if (!res) | ||
571 | return -ENODEV; | ||
572 | |||
573 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | ||
574 | if (!pdata) | ||
575 | return -ENOMEM; | ||
576 | |||
577 | if (!devm_request_mem_region(dev, res->start, | ||
578 | resource_size(res), pdev->name)) | ||
579 | return -EBUSY; | ||
580 | |||
581 | pdata->clk = clk_get(dev, "secss"); | ||
582 | if (IS_ERR(pdata->clk)) { | ||
583 | dev_err(dev, "failed to find secss clock source\n"); | ||
584 | return -ENOENT; | ||
585 | } | ||
586 | |||
587 | clk_enable(pdata->clk); | ||
588 | |||
589 | spin_lock_init(&pdata->lock); | ||
590 | pdata->ioaddr = devm_ioremap(dev, res->start, | ||
591 | resource_size(res)); | ||
592 | |||
593 | pdata->irq_hash = platform_get_irq_byname(pdev, "hash"); | ||
594 | if (pdata->irq_hash < 0) { | ||
595 | err = pdata->irq_hash; | ||
596 | dev_warn(dev, "hash interrupt is not available.\n"); | ||
597 | goto err_irq; | ||
598 | } | ||
599 | err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt, | ||
600 | IRQF_SHARED, pdev->name, pdev); | ||
601 | if (err < 0) { | ||
602 | dev_warn(dev, "hash interrupt is not available.\n"); | ||
603 | goto err_irq; | ||
604 | } | ||
605 | |||
606 | pdata->irq_fc = platform_get_irq_byname(pdev, "feed control"); | ||
607 | if (pdata->irq_fc < 0) { | ||
608 | err = pdata->irq_fc; | ||
609 | dev_warn(dev, "feed control interrupt is not available.\n"); | ||
610 | goto err_irq; | ||
611 | } | ||
612 | err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, | ||
613 | IRQF_SHARED, pdev->name, pdev); | ||
614 | if (err < 0) { | ||
615 | dev_warn(dev, "feed control interrupt is not available.\n"); | ||
616 | goto err_irq; | ||
617 | } | ||
618 | |||
619 | pdata->dev = dev; | ||
620 | platform_set_drvdata(pdev, pdata); | ||
621 | s5p_dev = pdata; | ||
622 | |||
623 | tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata); | ||
624 | crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); | ||
625 | |||
626 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
627 | INIT_LIST_HEAD(&algs[i].cra_list); | ||
628 | err = crypto_register_alg(&algs[i]); | ||
629 | if (err) | ||
630 | goto err_algs; | ||
631 | } | ||
632 | |||
633 | pr_info("s5p-sss driver registered\n"); | ||
634 | |||
635 | return 0; | ||
636 | |||
637 | err_algs: | ||
638 | dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err); | ||
639 | |||
640 | for (j = 0; j < i; j++) | ||
641 | crypto_unregister_alg(&algs[j]); | ||
642 | |||
643 | tasklet_kill(&pdata->tasklet); | ||
644 | |||
645 | err_irq: | ||
646 | clk_disable(pdata->clk); | ||
647 | clk_put(pdata->clk); | ||
648 | |||
649 | s5p_dev = NULL; | ||
650 | platform_set_drvdata(pdev, NULL); | ||
651 | |||
652 | return err; | ||
653 | } | ||
654 | |||
655 | static int s5p_aes_remove(struct platform_device *pdev) | ||
656 | { | ||
657 | struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); | ||
658 | int i; | ||
659 | |||
660 | if (!pdata) | ||
661 | return -ENODEV; | ||
662 | |||
663 | for (i = 0; i < ARRAY_SIZE(algs); i++) | ||
664 | crypto_unregister_alg(&algs[i]); | ||
665 | |||
666 | tasklet_kill(&pdata->tasklet); | ||
667 | |||
668 | clk_disable(pdata->clk); | ||
669 | clk_put(pdata->clk); | ||
670 | |||
671 | s5p_dev = NULL; | ||
672 | platform_set_drvdata(pdev, NULL); | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | static struct platform_driver s5p_aes_crypto = { | ||
678 | .probe = s5p_aes_probe, | ||
679 | .remove = s5p_aes_remove, | ||
680 | .driver = { | ||
681 | .owner = THIS_MODULE, | ||
682 | .name = "s5p-secss", | ||
683 | }, | ||
684 | }; | ||
685 | |||
686 | static int __init s5p_aes_mod_init(void) | ||
687 | { | ||
688 | return platform_driver_register(&s5p_aes_crypto); | ||
689 | } | ||
690 | |||
691 | static void __exit s5p_aes_mod_exit(void) | ||
692 | { | ||
693 | platform_driver_unregister(&s5p_aes_crypto); | ||
694 | } | ||
695 | |||
696 | module_init(s5p_aes_mod_init); | ||
697 | module_exit(s5p_aes_mod_exit); | ||
698 | |||
699 | MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); | ||
700 | MODULE_LICENSE("GPL v2"); | ||
701 | MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 4bcd825b5739..854e2632f9a6 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -161,7 +161,7 @@ struct talitos_private { | |||
161 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | 161 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) |
162 | { | 162 | { |
163 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); | 163 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); |
164 | talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); | 164 | talitos_ptr->eptr = upper_32_bits(dma_addr); |
165 | } | 165 | } |
166 | 166 | ||
167 | /* | 167 | /* |
@@ -332,10 +332,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, | |||
332 | 332 | ||
333 | /* GO! */ | 333 | /* GO! */ |
334 | wmb(); | 334 | wmb(); |
335 | out_be32(priv->reg + TALITOS_FF(ch), | 335 | out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc)); |
336 | cpu_to_be32(upper_32_bits(request->dma_desc))); | ||
337 | out_be32(priv->reg + TALITOS_FF_LO(ch), | 336 | out_be32(priv->reg + TALITOS_FF_LO(ch), |
338 | cpu_to_be32(lower_32_bits(request->dma_desc))); | 337 | lower_32_bits(request->dma_desc)); |
339 | 338 | ||
340 | spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); | 339 | spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); |
341 | 340 | ||
@@ -1751,14 +1750,14 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq) | |||
1751 | ahash_init(areq); | 1750 | ahash_init(areq); |
1752 | req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ | 1751 | req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ |
1753 | 1752 | ||
1754 | req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0); | 1753 | req_ctx->hw_context[0] = SHA224_H0; |
1755 | req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1); | 1754 | req_ctx->hw_context[1] = SHA224_H1; |
1756 | req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2); | 1755 | req_ctx->hw_context[2] = SHA224_H2; |
1757 | req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3); | 1756 | req_ctx->hw_context[3] = SHA224_H3; |
1758 | req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4); | 1757 | req_ctx->hw_context[4] = SHA224_H4; |
1759 | req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5); | 1758 | req_ctx->hw_context[5] = SHA224_H5; |
1760 | req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6); | 1759 | req_ctx->hw_context[6] = SHA224_H6; |
1761 | req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7); | 1760 | req_ctx->hw_context[7] = SHA224_H7; |
1762 | 1761 | ||
1763 | /* init 64-bit count */ | 1762 | /* init 64-bit count */ |
1764 | req_ctx->hw_context[8] = 0; | 1763 | req_ctx->hw_context[8] = 0; |
@@ -2333,8 +2332,7 @@ static int talitos_remove(struct platform_device *ofdev) | |||
2333 | talitos_unregister_rng(dev); | 2332 | talitos_unregister_rng(dev); |
2334 | 2333 | ||
2335 | for (i = 0; i < priv->num_channels; i++) | 2334 | for (i = 0; i < priv->num_channels; i++) |
2336 | if (priv->chan[i].fifo) | 2335 | kfree(priv->chan[i].fifo); |
2337 | kfree(priv->chan[i].fifo); | ||
2338 | 2336 | ||
2339 | kfree(priv->chan); | 2337 | kfree(priv->chan); |
2340 | 2338 | ||
@@ -2389,6 +2387,9 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
2389 | DESC_HDR_MODE0_MDEU_SHA256; | 2387 | DESC_HDR_MODE0_MDEU_SHA256; |
2390 | } | 2388 | } |
2391 | break; | 2389 | break; |
2390 | default: | ||
2391 | dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); | ||
2392 | return ERR_PTR(-EINVAL); | ||
2392 | } | 2393 | } |
2393 | 2394 | ||
2394 | alg->cra_module = THIS_MODULE; | 2395 | alg->cra_module = THIS_MODULE; |
@@ -2401,8 +2402,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
2401 | return t_alg; | 2402 | return t_alg; |
2402 | } | 2403 | } |
2403 | 2404 | ||
2404 | static int talitos_probe(struct platform_device *ofdev, | 2405 | static int talitos_probe(struct platform_device *ofdev) |
2405 | const struct of_device_id *match) | ||
2406 | { | 2406 | { |
2407 | struct device *dev = &ofdev->dev; | 2407 | struct device *dev = &ofdev->dev; |
2408 | struct device_node *np = ofdev->dev.of_node; | 2408 | struct device_node *np = ofdev->dev.of_node; |
@@ -2579,7 +2579,7 @@ static const struct of_device_id talitos_match[] = { | |||
2579 | }; | 2579 | }; |
2580 | MODULE_DEVICE_TABLE(of, talitos_match); | 2580 | MODULE_DEVICE_TABLE(of, talitos_match); |
2581 | 2581 | ||
2582 | static struct of_platform_driver talitos_driver = { | 2582 | static struct platform_driver talitos_driver = { |
2583 | .driver = { | 2583 | .driver = { |
2584 | .name = "talitos", | 2584 | .name = "talitos", |
2585 | .owner = THIS_MODULE, | 2585 | .owner = THIS_MODULE, |
@@ -2591,13 +2591,13 @@ static struct of_platform_driver talitos_driver = { | |||
2591 | 2591 | ||
2592 | static int __init talitos_init(void) | 2592 | static int __init talitos_init(void) |
2593 | { | 2593 | { |
2594 | return of_register_platform_driver(&talitos_driver); | 2594 | return platform_driver_register(&talitos_driver); |
2595 | } | 2595 | } |
2596 | module_init(talitos_init); | 2596 | module_init(talitos_init); |
2597 | 2597 | ||
2598 | static void __exit talitos_exit(void) | 2598 | static void __exit talitos_exit(void) |
2599 | { | 2599 | { |
2600 | of_unregister_platform_driver(&talitos_driver); | 2600 | platform_driver_unregister(&talitos_driver); |
2601 | } | 2601 | } |
2602 | module_exit(talitos_exit); | 2602 | module_exit(talitos_exit); |
2603 | 2603 | ||