diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:55:21 -0400 |
commit | bbb20089a3275a19e475dbc21320c3742e3ca423 (patch) | |
tree | 216fdc1cbef450ca688135c5b8969169482d9a48 /drivers/crypto | |
parent | 3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff) | |
parent | 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff) |
Merge branch 'dmaengine' into async-tx-next
Conflicts:
crypto/async_tx/async_xor.c
drivers/dma/ioat/dma_v2.h
drivers/dma/ioat/pci.c
drivers/md/raid5.c
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 8 | ||||
-rw-r--r-- | drivers/crypto/hifn_795x.c | 8 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 133 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 713 |
4 files changed, 628 insertions, 234 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 01afd758072f..5b27692372bf 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -12,7 +12,7 @@ if CRYPTO_HW | |||
12 | 12 | ||
13 | config CRYPTO_DEV_PADLOCK | 13 | config CRYPTO_DEV_PADLOCK |
14 | tristate "Support for VIA PadLock ACE" | 14 | tristate "Support for VIA PadLock ACE" |
15 | depends on X86_32 && !UML | 15 | depends on X86 && !UML |
16 | select CRYPTO_ALGAPI | 16 | select CRYPTO_ALGAPI |
17 | help | 17 | help |
18 | Some VIA processors come with an integrated crypto engine | 18 | Some VIA processors come with an integrated crypto engine |
@@ -34,7 +34,7 @@ config CRYPTO_DEV_PADLOCK_AES | |||
34 | Available in VIA C3 and newer CPUs. | 34 | Available in VIA C3 and newer CPUs. |
35 | 35 | ||
36 | If unsure say M. The compiled module will be | 36 | If unsure say M. The compiled module will be |
37 | called padlock-aes.ko | 37 | called padlock-aes. |
38 | 38 | ||
39 | config CRYPTO_DEV_PADLOCK_SHA | 39 | config CRYPTO_DEV_PADLOCK_SHA |
40 | tristate "PadLock driver for SHA1 and SHA256 algorithms" | 40 | tristate "PadLock driver for SHA1 and SHA256 algorithms" |
@@ -47,7 +47,7 @@ config CRYPTO_DEV_PADLOCK_SHA | |||
47 | Available in VIA C7 and newer processors. | 47 | Available in VIA C7 and newer processors. |
48 | 48 | ||
49 | If unsure say M. The compiled module will be | 49 | If unsure say M. The compiled module will be |
50 | called padlock-sha.ko | 50 | called padlock-sha. |
51 | 51 | ||
52 | config CRYPTO_DEV_GEODE | 52 | config CRYPTO_DEV_GEODE |
53 | tristate "Support for the Geode LX AES engine" | 53 | tristate "Support for the Geode LX AES engine" |
@@ -79,7 +79,7 @@ config ZCRYPT_MONOLITHIC | |||
79 | bool "Monolithic zcrypt module" | 79 | bool "Monolithic zcrypt module" |
80 | depends on ZCRYPT="m" | 80 | depends on ZCRYPT="m" |
81 | help | 81 | help |
82 | Select this option if you want to have a single module z90crypt.ko | 82 | Select this option if you want to have a single module z90crypt, |
83 | that contains all parts of the crypto device driver (ap bus, | 83 | that contains all parts of the crypto device driver (ap bus, |
84 | request router and all the card drivers). | 84 | request router and all the card drivers). |
85 | 85 | ||
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 2bef086fb342..5f753fc08730 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data) | |||
2564 | hifn_process_queue(dev); | 2564 | hifn_process_queue(dev); |
2565 | } | 2565 | } |
2566 | 2566 | ||
2567 | static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 2567 | static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
2568 | { | 2568 | { |
2569 | int err, i; | 2569 | int err, i; |
2570 | struct hifn_device *dev; | 2570 | struct hifn_device *dev; |
@@ -2696,7 +2696,7 @@ err_out_disable_pci_device: | |||
2696 | return err; | 2696 | return err; |
2697 | } | 2697 | } |
2698 | 2698 | ||
2699 | static void hifn_remove(struct pci_dev *pdev) | 2699 | static void __devexit hifn_remove(struct pci_dev *pdev) |
2700 | { | 2700 | { |
2701 | int i; | 2701 | int i; |
2702 | struct hifn_device *dev; | 2702 | struct hifn_device *dev; |
@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = { | |||
2744 | .remove = __devexit_p(hifn_remove), | 2744 | .remove = __devexit_p(hifn_remove), |
2745 | }; | 2745 | }; |
2746 | 2746 | ||
2747 | static int __devinit hifn_init(void) | 2747 | static int __init hifn_init(void) |
2748 | { | 2748 | { |
2749 | unsigned int freq; | 2749 | unsigned int freq; |
2750 | int err; | 2750 | int err; |
@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void) | |||
2789 | return 0; | 2789 | return 0; |
2790 | } | 2790 | } |
2791 | 2791 | ||
2792 | static void __devexit hifn_fini(void) | 2792 | static void __exit hifn_fini(void) |
2793 | { | 2793 | { |
2794 | pci_unregister_driver(&hifn_pci_driver); | 2794 | pci_unregister_driver(&hifn_pci_driver); |
2795 | 2795 | ||
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 856b3cc25583..a9952b1236b0 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -18,9 +18,22 @@ | |||
18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <asm/byteorder.h> | 20 | #include <asm/byteorder.h> |
21 | #include <asm/processor.h> | ||
21 | #include <asm/i387.h> | 22 | #include <asm/i387.h> |
22 | #include "padlock.h" | 23 | #include "padlock.h" |
23 | 24 | ||
25 | /* | ||
26 | * Number of data blocks actually fetched for each xcrypt insn. | ||
27 | * Processors with prefetch errata will fetch extra blocks. | ||
28 | */ | ||
29 | static unsigned int ecb_fetch_blocks = 2; | ||
30 | #define MAX_ECB_FETCH_BLOCKS (8) | ||
31 | #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) | ||
32 | |||
33 | static unsigned int cbc_fetch_blocks = 1; | ||
34 | #define MAX_CBC_FETCH_BLOCKS (4) | ||
35 | #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) | ||
36 | |||
24 | /* Control word. */ | 37 | /* Control word. */ |
25 | struct cword { | 38 | struct cword { |
26 | unsigned int __attribute__ ((__packed__)) | 39 | unsigned int __attribute__ ((__packed__)) |
@@ -154,7 +167,11 @@ static inline void padlock_reset_key(struct cword *cword) | |||
154 | int cpu = raw_smp_processor_id(); | 167 | int cpu = raw_smp_processor_id(); |
155 | 168 | ||
156 | if (cword != per_cpu(last_cword, cpu)) | 169 | if (cword != per_cpu(last_cword, cpu)) |
170 | #ifndef CONFIG_X86_64 | ||
157 | asm volatile ("pushfl; popfl"); | 171 | asm volatile ("pushfl; popfl"); |
172 | #else | ||
173 | asm volatile ("pushfq; popfq"); | ||
174 | #endif | ||
158 | } | 175 | } |
159 | 176 | ||
160 | static inline void padlock_store_cword(struct cword *cword) | 177 | static inline void padlock_store_cword(struct cword *cword) |
@@ -168,64 +185,111 @@ static inline void padlock_store_cword(struct cword *cword) | |||
168 | * should be used only inside the irq_ts_save/restore() context | 185 | * should be used only inside the irq_ts_save/restore() context |
169 | */ | 186 | */ |
170 | 187 | ||
171 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, | 188 | static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
172 | struct cword *control_word) | 189 | struct cword *control_word, int count) |
173 | { | 190 | { |
174 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | 191 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
175 | : "+S"(input), "+D"(output) | 192 | : "+S"(input), "+D"(output) |
176 | : "d"(control_word), "b"(key), "c"(1)); | 193 | : "d"(control_word), "b"(key), "c"(count)); |
177 | } | 194 | } |
178 | 195 | ||
179 | static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) | 196 | static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
197 | u8 *iv, struct cword *control_word, int count) | ||
180 | { | 198 | { |
181 | u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; | 199 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ |
200 | : "+S" (input), "+D" (output), "+a" (iv) | ||
201 | : "d" (control_word), "b" (key), "c" (count)); | ||
202 | return iv; | ||
203 | } | ||
204 | |||
205 | static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, | ||
206 | struct cword *cword, int count) | ||
207 | { | ||
208 | /* | ||
209 | * Padlock prefetches extra data so we must provide mapped input buffers. | ||
210 | * Assume there are at least 16 bytes of stack already in use. | ||
211 | */ | ||
212 | u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; | ||
182 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | 213 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); |
183 | 214 | ||
184 | memcpy(tmp, in, AES_BLOCK_SIZE); | 215 | memcpy(tmp, in, count * AES_BLOCK_SIZE); |
185 | padlock_xcrypt(tmp, out, key, cword); | 216 | rep_xcrypt_ecb(tmp, out, key, cword, count); |
186 | } | 217 | } |
187 | 218 | ||
188 | static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, | 219 | static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, |
189 | struct cword *cword) | 220 | u8 *iv, struct cword *cword, int count) |
190 | { | 221 | { |
191 | /* padlock_xcrypt requires at least two blocks of data. */ | 222 | /* |
192 | if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & | 223 | * Padlock prefetches extra data so we must provide mapped input buffers. |
193 | (PAGE_SIZE - 1)))) { | 224 | * Assume there are at least 16 bytes of stack already in use. |
194 | aes_crypt_copy(in, out, key, cword); | 225 | */ |
226 | u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; | ||
227 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | ||
228 | |||
229 | memcpy(tmp, in, count * AES_BLOCK_SIZE); | ||
230 | return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); | ||
231 | } | ||
232 | |||
233 | static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, | ||
234 | struct cword *cword, int count) | ||
235 | { | ||
236 | /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. | ||
237 | * We could avoid some copying here but it's probably not worth it. | ||
238 | */ | ||
239 | if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) { | ||
240 | ecb_crypt_copy(in, out, key, cword, count); | ||
195 | return; | 241 | return; |
196 | } | 242 | } |
197 | 243 | ||
198 | padlock_xcrypt(in, out, key, cword); | 244 | rep_xcrypt_ecb(in, out, key, cword, count); |
245 | } | ||
246 | |||
247 | static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, | ||
248 | u8 *iv, struct cword *cword, int count) | ||
249 | { | ||
250 | /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ | ||
251 | if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE)) | ||
252 | return cbc_crypt_copy(in, out, key, iv, cword, count); | ||
253 | |||
254 | return rep_xcrypt_cbc(in, out, key, iv, cword, count); | ||
199 | } | 255 | } |
200 | 256 | ||
201 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, | 257 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
202 | void *control_word, u32 count) | 258 | void *control_word, u32 count) |
203 | { | 259 | { |
204 | if (count == 1) { | 260 | u32 initial = count & (ecb_fetch_blocks - 1); |
205 | aes_crypt(input, output, key, control_word); | 261 | |
262 | if (count < ecb_fetch_blocks) { | ||
263 | ecb_crypt(input, output, key, control_word, count); | ||
206 | return; | 264 | return; |
207 | } | 265 | } |
208 | 266 | ||
209 | asm volatile ("test $1, %%cl;" | 267 | if (initial) |
210 | "je 1f;" | 268 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
211 | "lea -1(%%ecx), %%eax;" | 269 | : "+S"(input), "+D"(output) |
212 | "mov $1, %%ecx;" | 270 | : "d"(control_word), "b"(key), "c"(initial)); |
213 | ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ | 271 | |
214 | "mov %%eax, %%ecx;" | 272 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
215 | "1:" | ||
216 | ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | ||
217 | : "+S"(input), "+D"(output) | 273 | : "+S"(input), "+D"(output) |
218 | : "d"(control_word), "b"(key), "c"(count) | 274 | : "d"(control_word), "b"(key), "c"(count - initial)); |
219 | : "ax"); | ||
220 | } | 275 | } |
221 | 276 | ||
222 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, | 277 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
223 | u8 *iv, void *control_word, u32 count) | 278 | u8 *iv, void *control_word, u32 count) |
224 | { | 279 | { |
225 | /* rep xcryptcbc */ | 280 | u32 initial = count & (cbc_fetch_blocks - 1); |
226 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" | 281 | |
282 | if (count < cbc_fetch_blocks) | ||
283 | return cbc_crypt(input, output, key, iv, control_word, count); | ||
284 | |||
285 | if (initial) | ||
286 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | ||
287 | : "+S" (input), "+D" (output), "+a" (iv) | ||
288 | : "d" (control_word), "b" (key), "c" (count)); | ||
289 | |||
290 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | ||
227 | : "+S" (input), "+D" (output), "+a" (iv) | 291 | : "+S" (input), "+D" (output), "+a" (iv) |
228 | : "d" (control_word), "b" (key), "c" (count)); | 292 | : "d" (control_word), "b" (key), "c" (count-initial)); |
229 | return iv; | 293 | return iv; |
230 | } | 294 | } |
231 | 295 | ||
@@ -236,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |||
236 | 300 | ||
237 | padlock_reset_key(&ctx->cword.encrypt); | 301 | padlock_reset_key(&ctx->cword.encrypt); |
238 | ts_state = irq_ts_save(); | 302 | ts_state = irq_ts_save(); |
239 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); | 303 | ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); |
240 | irq_ts_restore(ts_state); | 304 | irq_ts_restore(ts_state); |
241 | padlock_store_cword(&ctx->cword.encrypt); | 305 | padlock_store_cword(&ctx->cword.encrypt); |
242 | } | 306 | } |
@@ -248,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |||
248 | 312 | ||
249 | padlock_reset_key(&ctx->cword.encrypt); | 313 | padlock_reset_key(&ctx->cword.encrypt); |
250 | ts_state = irq_ts_save(); | 314 | ts_state = irq_ts_save(); |
251 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); | 315 | ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); |
252 | irq_ts_restore(ts_state); | 316 | irq_ts_restore(ts_state); |
253 | padlock_store_cword(&ctx->cword.encrypt); | 317 | padlock_store_cword(&ctx->cword.encrypt); |
254 | } | 318 | } |
@@ -441,6 +505,7 @@ static struct crypto_alg cbc_aes_alg = { | |||
441 | static int __init padlock_init(void) | 505 | static int __init padlock_init(void) |
442 | { | 506 | { |
443 | int ret; | 507 | int ret; |
508 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
444 | 509 | ||
445 | if (!cpu_has_xcrypt) { | 510 | if (!cpu_has_xcrypt) { |
446 | printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); | 511 | printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); |
@@ -463,6 +528,12 @@ static int __init padlock_init(void) | |||
463 | 528 | ||
464 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | 529 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); |
465 | 530 | ||
531 | if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { | ||
532 | ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; | ||
533 | cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; | ||
534 | printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); | ||
535 | } | ||
536 | |||
466 | out: | 537 | out: |
467 | return ret; | 538 | return ret; |
468 | 539 | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a3918c16b3db..c70775fd3ce2 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <crypto/sha.h> | 44 | #include <crypto/sha.h> |
45 | #include <crypto/aead.h> | 45 | #include <crypto/aead.h> |
46 | #include <crypto/authenc.h> | 46 | #include <crypto/authenc.h> |
47 | #include <crypto/skcipher.h> | ||
48 | #include <crypto/scatterwalk.h> | ||
47 | 49 | ||
48 | #include "talitos.h" | 50 | #include "talitos.h" |
49 | 51 | ||
@@ -339,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
339 | status = error; | 341 | status = error; |
340 | 342 | ||
341 | dma_unmap_single(dev, request->dma_desc, | 343 | dma_unmap_single(dev, request->dma_desc, |
342 | sizeof(struct talitos_desc), DMA_BIDIRECTIONAL); | 344 | sizeof(struct talitos_desc), |
345 | DMA_BIDIRECTIONAL); | ||
343 | 346 | ||
344 | /* copy entries so we can call callback outside lock */ | 347 | /* copy entries so we can call callback outside lock */ |
345 | saved_req.desc = request->desc; | 348 | saved_req.desc = request->desc; |
@@ -413,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch) | |||
413 | /* | 416 | /* |
414 | * user diagnostics; report root cause of error based on execution unit status | 417 | * user diagnostics; report root cause of error based on execution unit status |
415 | */ | 418 | */ |
416 | static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc) | 419 | static void report_eu_error(struct device *dev, int ch, |
420 | struct talitos_desc *desc) | ||
417 | { | 421 | { |
418 | struct talitos_private *priv = dev_get_drvdata(dev); | 422 | struct talitos_private *priv = dev_get_drvdata(dev); |
419 | int i; | 423 | int i; |
@@ -684,8 +688,8 @@ struct talitos_ctx { | |||
684 | unsigned int authsize; | 688 | unsigned int authsize; |
685 | }; | 689 | }; |
686 | 690 | ||
687 | static int aead_authenc_setauthsize(struct crypto_aead *authenc, | 691 | static int aead_setauthsize(struct crypto_aead *authenc, |
688 | unsigned int authsize) | 692 | unsigned int authsize) |
689 | { | 693 | { |
690 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 694 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
691 | 695 | ||
@@ -694,8 +698,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc, | |||
694 | return 0; | 698 | return 0; |
695 | } | 699 | } |
696 | 700 | ||
697 | static int aead_authenc_setkey(struct crypto_aead *authenc, | 701 | static int aead_setkey(struct crypto_aead *authenc, |
698 | const u8 *key, unsigned int keylen) | 702 | const u8 *key, unsigned int keylen) |
699 | { | 703 | { |
700 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 704 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
701 | struct rtattr *rta = (void *)key; | 705 | struct rtattr *rta = (void *)key; |
@@ -740,7 +744,7 @@ badkey: | |||
740 | } | 744 | } |
741 | 745 | ||
742 | /* | 746 | /* |
743 | * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor | 747 | * talitos_edesc - s/w-extended descriptor |
744 | * @src_nents: number of segments in input scatterlist | 748 | * @src_nents: number of segments in input scatterlist |
745 | * @dst_nents: number of segments in output scatterlist | 749 | * @dst_nents: number of segments in output scatterlist |
746 | * @dma_len: length of dma mapped link_tbl space | 750 | * @dma_len: length of dma mapped link_tbl space |
@@ -752,17 +756,67 @@ badkey: | |||
752 | * is greater than 1, an integrity check value is concatenated to the end | 756 | * is greater than 1, an integrity check value is concatenated to the end |
753 | * of link_tbl data | 757 | * of link_tbl data |
754 | */ | 758 | */ |
755 | struct ipsec_esp_edesc { | 759 | struct talitos_edesc { |
756 | int src_nents; | 760 | int src_nents; |
757 | int dst_nents; | 761 | int dst_nents; |
762 | int src_is_chained; | ||
763 | int dst_is_chained; | ||
758 | int dma_len; | 764 | int dma_len; |
759 | dma_addr_t dma_link_tbl; | 765 | dma_addr_t dma_link_tbl; |
760 | struct talitos_desc desc; | 766 | struct talitos_desc desc; |
761 | struct talitos_ptr link_tbl[0]; | 767 | struct talitos_ptr link_tbl[0]; |
762 | }; | 768 | }; |
763 | 769 | ||
770 | static int talitos_map_sg(struct device *dev, struct scatterlist *sg, | ||
771 | unsigned int nents, enum dma_data_direction dir, | ||
772 | int chained) | ||
773 | { | ||
774 | if (unlikely(chained)) | ||
775 | while (sg) { | ||
776 | dma_map_sg(dev, sg, 1, dir); | ||
777 | sg = scatterwalk_sg_next(sg); | ||
778 | } | ||
779 | else | ||
780 | dma_map_sg(dev, sg, nents, dir); | ||
781 | return nents; | ||
782 | } | ||
783 | |||
784 | static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, | ||
785 | enum dma_data_direction dir) | ||
786 | { | ||
787 | while (sg) { | ||
788 | dma_unmap_sg(dev, sg, 1, dir); | ||
789 | sg = scatterwalk_sg_next(sg); | ||
790 | } | ||
791 | } | ||
792 | |||
793 | static void talitos_sg_unmap(struct device *dev, | ||
794 | struct talitos_edesc *edesc, | ||
795 | struct scatterlist *src, | ||
796 | struct scatterlist *dst) | ||
797 | { | ||
798 | unsigned int src_nents = edesc->src_nents ? : 1; | ||
799 | unsigned int dst_nents = edesc->dst_nents ? : 1; | ||
800 | |||
801 | if (src != dst) { | ||
802 | if (edesc->src_is_chained) | ||
803 | talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); | ||
804 | else | ||
805 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | ||
806 | |||
807 | if (edesc->dst_is_chained) | ||
808 | talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); | ||
809 | else | ||
810 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); | ||
811 | } else | ||
812 | if (edesc->src_is_chained) | ||
813 | talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); | ||
814 | else | ||
815 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); | ||
816 | } | ||
817 | |||
764 | static void ipsec_esp_unmap(struct device *dev, | 818 | static void ipsec_esp_unmap(struct device *dev, |
765 | struct ipsec_esp_edesc *edesc, | 819 | struct talitos_edesc *edesc, |
766 | struct aead_request *areq) | 820 | struct aead_request *areq) |
767 | { | 821 | { |
768 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); | 822 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); |
@@ -772,15 +826,7 @@ static void ipsec_esp_unmap(struct device *dev, | |||
772 | 826 | ||
773 | dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); | 827 | dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); |
774 | 828 | ||
775 | if (areq->src != areq->dst) { | 829 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); |
776 | dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, | ||
777 | DMA_TO_DEVICE); | ||
778 | dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1, | ||
779 | DMA_FROM_DEVICE); | ||
780 | } else { | ||
781 | dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1, | ||
782 | DMA_BIDIRECTIONAL); | ||
783 | } | ||
784 | 830 | ||
785 | if (edesc->dma_len) | 831 | if (edesc->dma_len) |
786 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | 832 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, |
@@ -795,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
795 | int err) | 841 | int err) |
796 | { | 842 | { |
797 | struct aead_request *areq = context; | 843 | struct aead_request *areq = context; |
798 | struct ipsec_esp_edesc *edesc = | ||
799 | container_of(desc, struct ipsec_esp_edesc, desc); | ||
800 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 844 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
801 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 845 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
846 | struct talitos_edesc *edesc; | ||
802 | struct scatterlist *sg; | 847 | struct scatterlist *sg; |
803 | void *icvdata; | 848 | void *icvdata; |
804 | 849 | ||
850 | edesc = container_of(desc, struct talitos_edesc, desc); | ||
851 | |||
805 | ipsec_esp_unmap(dev, edesc, areq); | 852 | ipsec_esp_unmap(dev, edesc, areq); |
806 | 853 | ||
807 | /* copy the generated ICV to dst */ | 854 | /* copy the generated ICV to dst */ |
@@ -819,17 +866,18 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
819 | } | 866 | } |
820 | 867 | ||
821 | static void ipsec_esp_decrypt_swauth_done(struct device *dev, | 868 | static void ipsec_esp_decrypt_swauth_done(struct device *dev, |
822 | struct talitos_desc *desc, void *context, | 869 | struct talitos_desc *desc, |
823 | int err) | 870 | void *context, int err) |
824 | { | 871 | { |
825 | struct aead_request *req = context; | 872 | struct aead_request *req = context; |
826 | struct ipsec_esp_edesc *edesc = | ||
827 | container_of(desc, struct ipsec_esp_edesc, desc); | ||
828 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 873 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
829 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 874 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
875 | struct talitos_edesc *edesc; | ||
830 | struct scatterlist *sg; | 876 | struct scatterlist *sg; |
831 | void *icvdata; | 877 | void *icvdata; |
832 | 878 | ||
879 | edesc = container_of(desc, struct talitos_edesc, desc); | ||
880 | |||
833 | ipsec_esp_unmap(dev, edesc, req); | 881 | ipsec_esp_unmap(dev, edesc, req); |
834 | 882 | ||
835 | if (!err) { | 883 | if (!err) { |
@@ -851,20 +899,20 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, | |||
851 | } | 899 | } |
852 | 900 | ||
853 | static void ipsec_esp_decrypt_hwauth_done(struct device *dev, | 901 | static void ipsec_esp_decrypt_hwauth_done(struct device *dev, |
854 | struct talitos_desc *desc, void *context, | 902 | struct talitos_desc *desc, |
855 | int err) | 903 | void *context, int err) |
856 | { | 904 | { |
857 | struct aead_request *req = context; | 905 | struct aead_request *req = context; |
858 | struct ipsec_esp_edesc *edesc = | 906 | struct talitos_edesc *edesc; |
859 | container_of(desc, struct ipsec_esp_edesc, desc); | 907 | |
908 | edesc = container_of(desc, struct talitos_edesc, desc); | ||
860 | 909 | ||
861 | ipsec_esp_unmap(dev, edesc, req); | 910 | ipsec_esp_unmap(dev, edesc, req); |
862 | 911 | ||
863 | /* check ICV auth status */ | 912 | /* check ICV auth status */ |
864 | if (!err) | 913 | if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != |
865 | if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != | 914 | DESC_HDR_LO_ICCR1_PASS)) |
866 | DESC_HDR_LO_ICCR1_PASS) | 915 | err = -EBADMSG; |
867 | err = -EBADMSG; | ||
868 | 916 | ||
869 | kfree(edesc); | 917 | kfree(edesc); |
870 | 918 | ||
@@ -886,7 +934,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
886 | link_tbl_ptr->j_extent = 0; | 934 | link_tbl_ptr->j_extent = 0; |
887 | link_tbl_ptr++; | 935 | link_tbl_ptr++; |
888 | cryptlen -= sg_dma_len(sg); | 936 | cryptlen -= sg_dma_len(sg); |
889 | sg = sg_next(sg); | 937 | sg = scatterwalk_sg_next(sg); |
890 | } | 938 | } |
891 | 939 | ||
892 | /* adjust (decrease) last one (or two) entry's len to cryptlen */ | 940 | /* adjust (decrease) last one (or two) entry's len to cryptlen */ |
@@ -910,7 +958,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
910 | /* | 958 | /* |
911 | * fill in and submit ipsec_esp descriptor | 959 | * fill in and submit ipsec_esp descriptor |
912 | */ | 960 | */ |
913 | static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | 961 | static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, |
914 | u8 *giv, u64 seq, | 962 | u8 *giv, u64 seq, |
915 | void (*callback) (struct device *dev, | 963 | void (*callback) (struct device *dev, |
916 | struct talitos_desc *desc, | 964 | struct talitos_desc *desc, |
@@ -952,32 +1000,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
952 | desc->ptr[4].len = cpu_to_be16(cryptlen); | 1000 | desc->ptr[4].len = cpu_to_be16(cryptlen); |
953 | desc->ptr[4].j_extent = authsize; | 1001 | desc->ptr[4].j_extent = authsize; |
954 | 1002 | ||
955 | if (areq->src == areq->dst) | 1003 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, |
956 | sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, | 1004 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL |
957 | DMA_BIDIRECTIONAL); | 1005 | : DMA_TO_DEVICE, |
958 | else | 1006 | edesc->src_is_chained); |
959 | sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, | ||
960 | DMA_TO_DEVICE); | ||
961 | 1007 | ||
962 | if (sg_count == 1) { | 1008 | if (sg_count == 1) { |
963 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1009 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); |
964 | } else { | 1010 | } else { |
965 | sg_link_tbl_len = cryptlen; | 1011 | sg_link_tbl_len = cryptlen; |
966 | 1012 | ||
967 | if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && | 1013 | if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) |
968 | (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { | ||
969 | sg_link_tbl_len = cryptlen + authsize; | 1014 | sg_link_tbl_len = cryptlen + authsize; |
970 | } | 1015 | |
971 | sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, | 1016 | sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, |
972 | &edesc->link_tbl[0]); | 1017 | &edesc->link_tbl[0]); |
973 | if (sg_count > 1) { | 1018 | if (sg_count > 1) { |
974 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1019 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
975 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); | 1020 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); |
976 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1021 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
977 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1022 | edesc->dma_len, |
1023 | DMA_BIDIRECTIONAL); | ||
978 | } else { | 1024 | } else { |
979 | /* Only one segment now, so no link tbl needed */ | 1025 | /* Only one segment now, so no link tbl needed */ |
980 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1026 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> |
1027 | src)); | ||
981 | } | 1028 | } |
982 | } | 1029 | } |
983 | 1030 | ||
@@ -985,10 +1032,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
985 | desc->ptr[5].len = cpu_to_be16(cryptlen); | 1032 | desc->ptr[5].len = cpu_to_be16(cryptlen); |
986 | desc->ptr[5].j_extent = authsize; | 1033 | desc->ptr[5].j_extent = authsize; |
987 | 1034 | ||
988 | if (areq->src != areq->dst) { | 1035 | if (areq->src != areq->dst) |
989 | sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, | 1036 | sg_count = talitos_map_sg(dev, areq->dst, |
990 | DMA_FROM_DEVICE); | 1037 | edesc->dst_nents ? : 1, |
991 | } | 1038 | DMA_FROM_DEVICE, |
1039 | edesc->dst_is_chained); | ||
992 | 1040 | ||
993 | if (sg_count == 1) { | 1041 | if (sg_count == 1) { |
994 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 1042 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); |
@@ -1033,49 +1081,55 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |||
1033 | return ret; | 1081 | return ret; |
1034 | } | 1082 | } |
1035 | 1083 | ||
1036 | |||
1037 | /* | 1084 | /* |
1038 | * derive number of elements in scatterlist | 1085 | * derive number of elements in scatterlist |
1039 | */ | 1086 | */ |
1040 | static int sg_count(struct scatterlist *sg_list, int nbytes) | 1087 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) |
1041 | { | 1088 | { |
1042 | struct scatterlist *sg = sg_list; | 1089 | struct scatterlist *sg = sg_list; |
1043 | int sg_nents = 0; | 1090 | int sg_nents = 0; |
1044 | 1091 | ||
1045 | while (nbytes) { | 1092 | *chained = 0; |
1093 | while (nbytes > 0) { | ||
1046 | sg_nents++; | 1094 | sg_nents++; |
1047 | nbytes -= sg->length; | 1095 | nbytes -= sg->length; |
1048 | sg = sg_next(sg); | 1096 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
1097 | *chained = 1; | ||
1098 | sg = scatterwalk_sg_next(sg); | ||
1049 | } | 1099 | } |
1050 | 1100 | ||
1051 | return sg_nents; | 1101 | return sg_nents; |
1052 | } | 1102 | } |
1053 | 1103 | ||
1054 | /* | 1104 | /* |
1055 | * allocate and map the ipsec_esp extended descriptor | 1105 | * allocate and map the extended descriptor |
1056 | */ | 1106 | */ |
1057 | static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | 1107 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1058 | int icv_stashing) | 1108 | struct scatterlist *src, |
1109 | struct scatterlist *dst, | ||
1110 | unsigned int cryptlen, | ||
1111 | unsigned int authsize, | ||
1112 | int icv_stashing, | ||
1113 | u32 cryptoflags) | ||
1059 | { | 1114 | { |
1060 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1115 | struct talitos_edesc *edesc; |
1061 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | ||
1062 | struct ipsec_esp_edesc *edesc; | ||
1063 | int src_nents, dst_nents, alloc_len, dma_len; | 1116 | int src_nents, dst_nents, alloc_len, dma_len; |
1064 | gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1117 | int src_chained, dst_chained = 0; |
1118 | gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
1065 | GFP_ATOMIC; | 1119 | GFP_ATOMIC; |
1066 | 1120 | ||
1067 | if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { | 1121 | if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { |
1068 | dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); | 1122 | dev_err(dev, "length exceeds h/w max limit\n"); |
1069 | return ERR_PTR(-EINVAL); | 1123 | return ERR_PTR(-EINVAL); |
1070 | } | 1124 | } |
1071 | 1125 | ||
1072 | src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize); | 1126 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
1073 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1127 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1074 | 1128 | ||
1075 | if (areq->dst == areq->src) { | 1129 | if (dst == src) { |
1076 | dst_nents = src_nents; | 1130 | dst_nents = src_nents; |
1077 | } else { | 1131 | } else { |
1078 | dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); | 1132 | dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); |
1079 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1133 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
1080 | } | 1134 | } |
1081 | 1135 | ||
@@ -1084,39 +1138,52 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | |||
1084 | * allowing for two separate entries for ICV and generated ICV (+ 2), | 1138 | * allowing for two separate entries for ICV and generated ICV (+ 2), |
1085 | * and the ICV data itself | 1139 | * and the ICV data itself |
1086 | */ | 1140 | */ |
1087 | alloc_len = sizeof(struct ipsec_esp_edesc); | 1141 | alloc_len = sizeof(struct talitos_edesc); |
1088 | if (src_nents || dst_nents) { | 1142 | if (src_nents || dst_nents) { |
1089 | dma_len = (src_nents + dst_nents + 2) * | 1143 | dma_len = (src_nents + dst_nents + 2) * |
1090 | sizeof(struct talitos_ptr) + ctx->authsize; | 1144 | sizeof(struct talitos_ptr) + authsize; |
1091 | alloc_len += dma_len; | 1145 | alloc_len += dma_len; |
1092 | } else { | 1146 | } else { |
1093 | dma_len = 0; | 1147 | dma_len = 0; |
1094 | alloc_len += icv_stashing ? ctx->authsize : 0; | 1148 | alloc_len += icv_stashing ? authsize : 0; |
1095 | } | 1149 | } |
1096 | 1150 | ||
1097 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1151 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1098 | if (!edesc) { | 1152 | if (!edesc) { |
1099 | dev_err(ctx->dev, "could not allocate edescriptor\n"); | 1153 | dev_err(dev, "could not allocate edescriptor\n"); |
1100 | return ERR_PTR(-ENOMEM); | 1154 | return ERR_PTR(-ENOMEM); |
1101 | } | 1155 | } |
1102 | 1156 | ||
1103 | edesc->src_nents = src_nents; | 1157 | edesc->src_nents = src_nents; |
1104 | edesc->dst_nents = dst_nents; | 1158 | edesc->dst_nents = dst_nents; |
1159 | edesc->src_is_chained = src_chained; | ||
1160 | edesc->dst_is_chained = dst_chained; | ||
1105 | edesc->dma_len = dma_len; | 1161 | edesc->dma_len = dma_len; |
1106 | edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0], | 1162 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], |
1107 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1163 | edesc->dma_len, DMA_BIDIRECTIONAL); |
1108 | 1164 | ||
1109 | return edesc; | 1165 | return edesc; |
1110 | } | 1166 | } |
1111 | 1167 | ||
1112 | static int aead_authenc_encrypt(struct aead_request *req) | 1168 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, |
1169 | int icv_stashing) | ||
1170 | { | ||
1171 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | ||
1172 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | ||
1173 | |||
1174 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, | ||
1175 | areq->cryptlen, ctx->authsize, icv_stashing, | ||
1176 | areq->base.flags); | ||
1177 | } | ||
1178 | |||
1179 | static int aead_encrypt(struct aead_request *req) | ||
1113 | { | 1180 | { |
1114 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 1181 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
1115 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1182 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1116 | struct ipsec_esp_edesc *edesc; | 1183 | struct talitos_edesc *edesc; |
1117 | 1184 | ||
1118 | /* allocate extended descriptor */ | 1185 | /* allocate extended descriptor */ |
1119 | edesc = ipsec_esp_edesc_alloc(req, 0); | 1186 | edesc = aead_edesc_alloc(req, 0); |
1120 | if (IS_ERR(edesc)) | 1187 | if (IS_ERR(edesc)) |
1121 | return PTR_ERR(edesc); | 1188 | return PTR_ERR(edesc); |
1122 | 1189 | ||
@@ -1126,70 +1193,67 @@ static int aead_authenc_encrypt(struct aead_request *req) | |||
1126 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); | 1193 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); |
1127 | } | 1194 | } |
1128 | 1195 | ||
1129 | 1196 | static int aead_decrypt(struct aead_request *req) | |
1130 | |||
1131 | static int aead_authenc_decrypt(struct aead_request *req) | ||
1132 | { | 1197 | { |
1133 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 1198 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
1134 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1199 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1135 | unsigned int authsize = ctx->authsize; | 1200 | unsigned int authsize = ctx->authsize; |
1136 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); | 1201 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); |
1137 | struct ipsec_esp_edesc *edesc; | 1202 | struct talitos_edesc *edesc; |
1138 | struct scatterlist *sg; | 1203 | struct scatterlist *sg; |
1139 | void *icvdata; | 1204 | void *icvdata; |
1140 | 1205 | ||
1141 | req->cryptlen -= authsize; | 1206 | req->cryptlen -= authsize; |
1142 | 1207 | ||
1143 | /* allocate extended descriptor */ | 1208 | /* allocate extended descriptor */ |
1144 | edesc = ipsec_esp_edesc_alloc(req, 1); | 1209 | edesc = aead_edesc_alloc(req, 1); |
1145 | if (IS_ERR(edesc)) | 1210 | if (IS_ERR(edesc)) |
1146 | return PTR_ERR(edesc); | 1211 | return PTR_ERR(edesc); |
1147 | 1212 | ||
1148 | if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && | 1213 | if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && |
1149 | (((!edesc->src_nents && !edesc->dst_nents) || | 1214 | ((!edesc->src_nents && !edesc->dst_nents) || |
1150 | priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { | 1215 | priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { |
1151 | 1216 | ||
1152 | /* decrypt and check the ICV */ | 1217 | /* decrypt and check the ICV */ |
1153 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | | 1218 | edesc->desc.hdr = ctx->desc_hdr_template | |
1219 | DESC_HDR_DIR_INBOUND | | ||
1154 | DESC_HDR_MODE1_MDEU_CICV; | 1220 | DESC_HDR_MODE1_MDEU_CICV; |
1155 | 1221 | ||
1156 | /* reset integrity check result bits */ | 1222 | /* reset integrity check result bits */ |
1157 | edesc->desc.hdr_lo = 0; | 1223 | edesc->desc.hdr_lo = 0; |
1158 | 1224 | ||
1159 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); | 1225 | return ipsec_esp(edesc, req, NULL, 0, |
1226 | ipsec_esp_decrypt_hwauth_done); | ||
1160 | 1227 | ||
1161 | } else { | 1228 | } |
1162 | |||
1163 | /* Have to check the ICV with software */ | ||
1164 | 1229 | ||
1165 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | 1230 | /* Have to check the ICV with software */ |
1231 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | ||
1166 | 1232 | ||
1167 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ | 1233 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ |
1168 | if (edesc->dma_len) | 1234 | if (edesc->dma_len) |
1169 | icvdata = &edesc->link_tbl[edesc->src_nents + | 1235 | icvdata = &edesc->link_tbl[edesc->src_nents + |
1170 | edesc->dst_nents + 2]; | 1236 | edesc->dst_nents + 2]; |
1171 | else | 1237 | else |
1172 | icvdata = &edesc->link_tbl[0]; | 1238 | icvdata = &edesc->link_tbl[0]; |
1173 | 1239 | ||
1174 | sg = sg_last(req->src, edesc->src_nents ? : 1); | 1240 | sg = sg_last(req->src, edesc->src_nents ? : 1); |
1175 | 1241 | ||
1176 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, | 1242 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, |
1177 | ctx->authsize); | 1243 | ctx->authsize); |
1178 | 1244 | ||
1179 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); | 1245 | return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); |
1180 | } | ||
1181 | } | 1246 | } |
1182 | 1247 | ||
1183 | static int aead_authenc_givencrypt( | 1248 | static int aead_givencrypt(struct aead_givcrypt_request *req) |
1184 | struct aead_givcrypt_request *req) | ||
1185 | { | 1249 | { |
1186 | struct aead_request *areq = &req->areq; | 1250 | struct aead_request *areq = &req->areq; |
1187 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1251 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1188 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1252 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1189 | struct ipsec_esp_edesc *edesc; | 1253 | struct talitos_edesc *edesc; |
1190 | 1254 | ||
1191 | /* allocate extended descriptor */ | 1255 | /* allocate extended descriptor */ |
1192 | edesc = ipsec_esp_edesc_alloc(areq, 0); | 1256 | edesc = aead_edesc_alloc(areq, 0); |
1193 | if (IS_ERR(edesc)) | 1257 | if (IS_ERR(edesc)) |
1194 | return PTR_ERR(edesc); | 1258 | return PTR_ERR(edesc); |
1195 | 1259 | ||
@@ -1204,31 +1268,228 @@ static int aead_authenc_givencrypt( | |||
1204 | ipsec_esp_encrypt_done); | 1268 | ipsec_esp_encrypt_done); |
1205 | } | 1269 | } |
1206 | 1270 | ||
1271 | static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | ||
1272 | const u8 *key, unsigned int keylen) | ||
1273 | { | ||
1274 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1275 | struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); | ||
1276 | |||
1277 | if (keylen > TALITOS_MAX_KEY_SIZE) | ||
1278 | goto badkey; | ||
1279 | |||
1280 | if (keylen < alg->min_keysize || keylen > alg->max_keysize) | ||
1281 | goto badkey; | ||
1282 | |||
1283 | memcpy(&ctx->key, key, keylen); | ||
1284 | ctx->keylen = keylen; | ||
1285 | |||
1286 | return 0; | ||
1287 | |||
1288 | badkey: | ||
1289 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1290 | return -EINVAL; | ||
1291 | } | ||
1292 | |||
1293 | static void common_nonsnoop_unmap(struct device *dev, | ||
1294 | struct talitos_edesc *edesc, | ||
1295 | struct ablkcipher_request *areq) | ||
1296 | { | ||
1297 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | ||
1298 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); | ||
1299 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); | ||
1300 | |||
1301 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); | ||
1302 | |||
1303 | if (edesc->dma_len) | ||
1304 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | ||
1305 | DMA_BIDIRECTIONAL); | ||
1306 | } | ||
1307 | |||
1308 | static void ablkcipher_done(struct device *dev, | ||
1309 | struct talitos_desc *desc, void *context, | ||
1310 | int err) | ||
1311 | { | ||
1312 | struct ablkcipher_request *areq = context; | ||
1313 | struct talitos_edesc *edesc; | ||
1314 | |||
1315 | edesc = container_of(desc, struct talitos_edesc, desc); | ||
1316 | |||
1317 | common_nonsnoop_unmap(dev, edesc, areq); | ||
1318 | |||
1319 | kfree(edesc); | ||
1320 | |||
1321 | areq->base.complete(&areq->base, err); | ||
1322 | } | ||
1323 | |||
1324 | static int common_nonsnoop(struct talitos_edesc *edesc, | ||
1325 | struct ablkcipher_request *areq, | ||
1326 | u8 *giv, | ||
1327 | void (*callback) (struct device *dev, | ||
1328 | struct talitos_desc *desc, | ||
1329 | void *context, int error)) | ||
1330 | { | ||
1331 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1332 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1333 | struct device *dev = ctx->dev; | ||
1334 | struct talitos_desc *desc = &edesc->desc; | ||
1335 | unsigned int cryptlen = areq->nbytes; | ||
1336 | unsigned int ivsize; | ||
1337 | int sg_count, ret; | ||
1338 | |||
1339 | /* first DWORD empty */ | ||
1340 | desc->ptr[0].len = 0; | ||
1341 | desc->ptr[0].ptr = 0; | ||
1342 | desc->ptr[0].j_extent = 0; | ||
1343 | |||
1344 | /* cipher iv */ | ||
1345 | ivsize = crypto_ablkcipher_ivsize(cipher); | ||
1346 | map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, | ||
1347 | DMA_TO_DEVICE); | ||
1348 | |||
1349 | /* cipher key */ | ||
1350 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | ||
1351 | (char *)&ctx->key, 0, DMA_TO_DEVICE); | ||
1352 | |||
1353 | /* | ||
1354 | * cipher in | ||
1355 | */ | ||
1356 | desc->ptr[3].len = cpu_to_be16(cryptlen); | ||
1357 | desc->ptr[3].j_extent = 0; | ||
1358 | |||
1359 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, | ||
1360 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL | ||
1361 | : DMA_TO_DEVICE, | ||
1362 | edesc->src_is_chained); | ||
1363 | |||
1364 | if (sg_count == 1) { | ||
1365 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); | ||
1366 | } else { | ||
1367 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, | ||
1368 | &edesc->link_tbl[0]); | ||
1369 | if (sg_count > 1) { | ||
1370 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1371 | desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); | ||
1372 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | ||
1373 | edesc->dma_len, | ||
1374 | DMA_BIDIRECTIONAL); | ||
1375 | } else { | ||
1376 | /* Only one segment now, so no link tbl needed */ | ||
1377 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> | ||
1378 | src)); | ||
1379 | } | ||
1380 | } | ||
1381 | |||
1382 | /* cipher out */ | ||
1383 | desc->ptr[4].len = cpu_to_be16(cryptlen); | ||
1384 | desc->ptr[4].j_extent = 0; | ||
1385 | |||
1386 | if (areq->src != areq->dst) | ||
1387 | sg_count = talitos_map_sg(dev, areq->dst, | ||
1388 | edesc->dst_nents ? : 1, | ||
1389 | DMA_FROM_DEVICE, | ||
1390 | edesc->dst_is_chained); | ||
1391 | |||
1392 | if (sg_count == 1) { | ||
1393 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | ||
1394 | } else { | ||
1395 | struct talitos_ptr *link_tbl_ptr = | ||
1396 | &edesc->link_tbl[edesc->src_nents + 1]; | ||
1397 | |||
1398 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1399 | desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) | ||
1400 | edesc->dma_link_tbl + | ||
1401 | edesc->src_nents + 1); | ||
1402 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | ||
1403 | link_tbl_ptr); | ||
1404 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | ||
1405 | edesc->dma_len, DMA_BIDIRECTIONAL); | ||
1406 | } | ||
1407 | |||
1408 | /* iv out */ | ||
1409 | map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, | ||
1410 | DMA_FROM_DEVICE); | ||
1411 | |||
1412 | /* last DWORD empty */ | ||
1413 | desc->ptr[6].len = 0; | ||
1414 | desc->ptr[6].ptr = 0; | ||
1415 | desc->ptr[6].j_extent = 0; | ||
1416 | |||
1417 | ret = talitos_submit(dev, desc, callback, areq); | ||
1418 | if (ret != -EINPROGRESS) { | ||
1419 | common_nonsnoop_unmap(dev, edesc, areq); | ||
1420 | kfree(edesc); | ||
1421 | } | ||
1422 | return ret; | ||
1423 | } | ||
1424 | |||
1425 | static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | ||
1426 | areq) | ||
1427 | { | ||
1428 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1429 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1430 | |||
1431 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, | ||
1432 | 0, 0, areq->base.flags); | ||
1433 | } | ||
1434 | |||
1435 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) | ||
1436 | { | ||
1437 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1438 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1439 | struct talitos_edesc *edesc; | ||
1440 | |||
1441 | /* allocate extended descriptor */ | ||
1442 | edesc = ablkcipher_edesc_alloc(areq); | ||
1443 | if (IS_ERR(edesc)) | ||
1444 | return PTR_ERR(edesc); | ||
1445 | |||
1446 | /* set encrypt */ | ||
1447 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; | ||
1448 | |||
1449 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | ||
1450 | } | ||
1451 | |||
1452 | static int ablkcipher_decrypt(struct ablkcipher_request *areq) | ||
1453 | { | ||
1454 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1455 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1456 | struct talitos_edesc *edesc; | ||
1457 | |||
1458 | /* allocate extended descriptor */ | ||
1459 | edesc = ablkcipher_edesc_alloc(areq); | ||
1460 | if (IS_ERR(edesc)) | ||
1461 | return PTR_ERR(edesc); | ||
1462 | |||
1463 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | ||
1464 | |||
1465 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | ||
1466 | } | ||
1467 | |||
1207 | struct talitos_alg_template { | 1468 | struct talitos_alg_template { |
1208 | char name[CRYPTO_MAX_ALG_NAME]; | 1469 | struct crypto_alg alg; |
1209 | char driver_name[CRYPTO_MAX_ALG_NAME]; | ||
1210 | unsigned int blocksize; | ||
1211 | struct aead_alg aead; | ||
1212 | struct device *dev; | ||
1213 | __be32 desc_hdr_template; | 1470 | __be32 desc_hdr_template; |
1214 | }; | 1471 | }; |
1215 | 1472 | ||
1216 | static struct talitos_alg_template driver_algs[] = { | 1473 | static struct talitos_alg_template driver_algs[] = { |
1217 | /* single-pass ipsec_esp descriptor */ | 1474 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ |
1218 | { | 1475 | { |
1219 | .name = "authenc(hmac(sha1),cbc(aes))", | 1476 | .alg = { |
1220 | .driver_name = "authenc-hmac-sha1-cbc-aes-talitos", | 1477 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
1221 | .blocksize = AES_BLOCK_SIZE, | 1478 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", |
1222 | .aead = { | 1479 | .cra_blocksize = AES_BLOCK_SIZE, |
1223 | .setkey = aead_authenc_setkey, | 1480 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1224 | .setauthsize = aead_authenc_setauthsize, | 1481 | .cra_type = &crypto_aead_type, |
1225 | .encrypt = aead_authenc_encrypt, | 1482 | .cra_aead = { |
1226 | .decrypt = aead_authenc_decrypt, | 1483 | .setkey = aead_setkey, |
1227 | .givencrypt = aead_authenc_givencrypt, | 1484 | .setauthsize = aead_setauthsize, |
1228 | .geniv = "<built-in>", | 1485 | .encrypt = aead_encrypt, |
1229 | .ivsize = AES_BLOCK_SIZE, | 1486 | .decrypt = aead_decrypt, |
1230 | .maxauthsize = SHA1_DIGEST_SIZE, | 1487 | .givencrypt = aead_givencrypt, |
1231 | }, | 1488 | .geniv = "<built-in>", |
1489 | .ivsize = AES_BLOCK_SIZE, | ||
1490 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1491 | } | ||
1492 | }, | ||
1232 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1493 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1233 | DESC_HDR_SEL0_AESU | | 1494 | DESC_HDR_SEL0_AESU | |
1234 | DESC_HDR_MODE0_AESU_CBC | | 1495 | DESC_HDR_MODE0_AESU_CBC | |
@@ -1238,19 +1499,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1238 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1499 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
1239 | }, | 1500 | }, |
1240 | { | 1501 | { |
1241 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | 1502 | .alg = { |
1242 | .driver_name = "authenc-hmac-sha1-cbc-3des-talitos", | 1503 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", |
1243 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1504 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", |
1244 | .aead = { | 1505 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1245 | .setkey = aead_authenc_setkey, | 1506 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1246 | .setauthsize = aead_authenc_setauthsize, | 1507 | .cra_type = &crypto_aead_type, |
1247 | .encrypt = aead_authenc_encrypt, | 1508 | .cra_aead = { |
1248 | .decrypt = aead_authenc_decrypt, | 1509 | .setkey = aead_setkey, |
1249 | .givencrypt = aead_authenc_givencrypt, | 1510 | .setauthsize = aead_setauthsize, |
1250 | .geniv = "<built-in>", | 1511 | .encrypt = aead_encrypt, |
1251 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1512 | .decrypt = aead_decrypt, |
1252 | .maxauthsize = SHA1_DIGEST_SIZE, | 1513 | .givencrypt = aead_givencrypt, |
1253 | }, | 1514 | .geniv = "<built-in>", |
1515 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1516 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1517 | } | ||
1518 | }, | ||
1254 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1519 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1255 | DESC_HDR_SEL0_DEU | | 1520 | DESC_HDR_SEL0_DEU | |
1256 | DESC_HDR_MODE0_DEU_CBC | | 1521 | DESC_HDR_MODE0_DEU_CBC | |
@@ -1261,19 +1526,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1261 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1526 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
1262 | }, | 1527 | }, |
1263 | { | 1528 | { |
1264 | .name = "authenc(hmac(sha256),cbc(aes))", | 1529 | .alg = { |
1265 | .driver_name = "authenc-hmac-sha256-cbc-aes-talitos", | 1530 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
1266 | .blocksize = AES_BLOCK_SIZE, | 1531 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", |
1267 | .aead = { | 1532 | .cra_blocksize = AES_BLOCK_SIZE, |
1268 | .setkey = aead_authenc_setkey, | 1533 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1269 | .setauthsize = aead_authenc_setauthsize, | 1534 | .cra_type = &crypto_aead_type, |
1270 | .encrypt = aead_authenc_encrypt, | 1535 | .cra_aead = { |
1271 | .decrypt = aead_authenc_decrypt, | 1536 | .setkey = aead_setkey, |
1272 | .givencrypt = aead_authenc_givencrypt, | 1537 | .setauthsize = aead_setauthsize, |
1273 | .geniv = "<built-in>", | 1538 | .encrypt = aead_encrypt, |
1274 | .ivsize = AES_BLOCK_SIZE, | 1539 | .decrypt = aead_decrypt, |
1275 | .maxauthsize = SHA256_DIGEST_SIZE, | 1540 | .givencrypt = aead_givencrypt, |
1276 | }, | 1541 | .geniv = "<built-in>", |
1542 | .ivsize = AES_BLOCK_SIZE, | ||
1543 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1544 | } | ||
1545 | }, | ||
1277 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1546 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1278 | DESC_HDR_SEL0_AESU | | 1547 | DESC_HDR_SEL0_AESU | |
1279 | DESC_HDR_MODE0_AESU_CBC | | 1548 | DESC_HDR_MODE0_AESU_CBC | |
@@ -1283,19 +1552,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1283 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 1552 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
1284 | }, | 1553 | }, |
1285 | { | 1554 | { |
1286 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | 1555 | .alg = { |
1287 | .driver_name = "authenc-hmac-sha256-cbc-3des-talitos", | 1556 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", |
1288 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1557 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", |
1289 | .aead = { | 1558 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1290 | .setkey = aead_authenc_setkey, | 1559 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1291 | .setauthsize = aead_authenc_setauthsize, | 1560 | .cra_type = &crypto_aead_type, |
1292 | .encrypt = aead_authenc_encrypt, | 1561 | .cra_aead = { |
1293 | .decrypt = aead_authenc_decrypt, | 1562 | .setkey = aead_setkey, |
1294 | .givencrypt = aead_authenc_givencrypt, | 1563 | .setauthsize = aead_setauthsize, |
1295 | .geniv = "<built-in>", | 1564 | .encrypt = aead_encrypt, |
1296 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1565 | .decrypt = aead_decrypt, |
1297 | .maxauthsize = SHA256_DIGEST_SIZE, | 1566 | .givencrypt = aead_givencrypt, |
1298 | }, | 1567 | .geniv = "<built-in>", |
1568 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1569 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1570 | } | ||
1571 | }, | ||
1299 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1572 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1300 | DESC_HDR_SEL0_DEU | | 1573 | DESC_HDR_SEL0_DEU | |
1301 | DESC_HDR_MODE0_DEU_CBC | | 1574 | DESC_HDR_MODE0_DEU_CBC | |
@@ -1306,19 +1579,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1306 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 1579 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
1307 | }, | 1580 | }, |
1308 | { | 1581 | { |
1309 | .name = "authenc(hmac(md5),cbc(aes))", | 1582 | .alg = { |
1310 | .driver_name = "authenc-hmac-md5-cbc-aes-talitos", | 1583 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
1311 | .blocksize = AES_BLOCK_SIZE, | 1584 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", |
1312 | .aead = { | 1585 | .cra_blocksize = AES_BLOCK_SIZE, |
1313 | .setkey = aead_authenc_setkey, | 1586 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1314 | .setauthsize = aead_authenc_setauthsize, | 1587 | .cra_type = &crypto_aead_type, |
1315 | .encrypt = aead_authenc_encrypt, | 1588 | .cra_aead = { |
1316 | .decrypt = aead_authenc_decrypt, | 1589 | .setkey = aead_setkey, |
1317 | .givencrypt = aead_authenc_givencrypt, | 1590 | .setauthsize = aead_setauthsize, |
1318 | .geniv = "<built-in>", | 1591 | .encrypt = aead_encrypt, |
1319 | .ivsize = AES_BLOCK_SIZE, | 1592 | .decrypt = aead_decrypt, |
1320 | .maxauthsize = MD5_DIGEST_SIZE, | 1593 | .givencrypt = aead_givencrypt, |
1321 | }, | 1594 | .geniv = "<built-in>", |
1595 | .ivsize = AES_BLOCK_SIZE, | ||
1596 | .maxauthsize = MD5_DIGEST_SIZE, | ||
1597 | } | ||
1598 | }, | ||
1322 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1599 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1323 | DESC_HDR_SEL0_AESU | | 1600 | DESC_HDR_SEL0_AESU | |
1324 | DESC_HDR_MODE0_AESU_CBC | | 1601 | DESC_HDR_MODE0_AESU_CBC | |
@@ -1328,19 +1605,23 @@ static struct talitos_alg_template driver_algs[] = { | |||
1328 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 1605 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
1329 | }, | 1606 | }, |
1330 | { | 1607 | { |
1331 | .name = "authenc(hmac(md5),cbc(des3_ede))", | 1608 | .alg = { |
1332 | .driver_name = "authenc-hmac-md5-cbc-3des-talitos", | 1609 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
1333 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1610 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", |
1334 | .aead = { | 1611 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1335 | .setkey = aead_authenc_setkey, | 1612 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
1336 | .setauthsize = aead_authenc_setauthsize, | 1613 | .cra_type = &crypto_aead_type, |
1337 | .encrypt = aead_authenc_encrypt, | 1614 | .cra_aead = { |
1338 | .decrypt = aead_authenc_decrypt, | 1615 | .setkey = aead_setkey, |
1339 | .givencrypt = aead_authenc_givencrypt, | 1616 | .setauthsize = aead_setauthsize, |
1340 | .geniv = "<built-in>", | 1617 | .encrypt = aead_encrypt, |
1341 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1618 | .decrypt = aead_decrypt, |
1342 | .maxauthsize = MD5_DIGEST_SIZE, | 1619 | .givencrypt = aead_givencrypt, |
1343 | }, | 1620 | .geniv = "<built-in>", |
1621 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1622 | .maxauthsize = MD5_DIGEST_SIZE, | ||
1623 | } | ||
1624 | }, | ||
1344 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 1625 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
1345 | DESC_HDR_SEL0_DEU | | 1626 | DESC_HDR_SEL0_DEU | |
1346 | DESC_HDR_MODE0_DEU_CBC | | 1627 | DESC_HDR_MODE0_DEU_CBC | |
@@ -1349,6 +1630,52 @@ static struct talitos_alg_template driver_algs[] = { | |||
1349 | DESC_HDR_MODE1_MDEU_INIT | | 1630 | DESC_HDR_MODE1_MDEU_INIT | |
1350 | DESC_HDR_MODE1_MDEU_PAD | | 1631 | DESC_HDR_MODE1_MDEU_PAD | |
1351 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 1632 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
1633 | }, | ||
1634 | /* ABLKCIPHER algorithms. */ | ||
1635 | { | ||
1636 | .alg = { | ||
1637 | .cra_name = "cbc(aes)", | ||
1638 | .cra_driver_name = "cbc-aes-talitos", | ||
1639 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1640 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1641 | CRYPTO_ALG_ASYNC, | ||
1642 | .cra_type = &crypto_ablkcipher_type, | ||
1643 | .cra_ablkcipher = { | ||
1644 | .setkey = ablkcipher_setkey, | ||
1645 | .encrypt = ablkcipher_encrypt, | ||
1646 | .decrypt = ablkcipher_decrypt, | ||
1647 | .geniv = "eseqiv", | ||
1648 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1649 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1650 | .ivsize = AES_BLOCK_SIZE, | ||
1651 | } | ||
1652 | }, | ||
1653 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
1654 | DESC_HDR_SEL0_AESU | | ||
1655 | DESC_HDR_MODE0_AESU_CBC, | ||
1656 | }, | ||
1657 | { | ||
1658 | .alg = { | ||
1659 | .cra_name = "cbc(des3_ede)", | ||
1660 | .cra_driver_name = "cbc-3des-talitos", | ||
1661 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
1662 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1663 | CRYPTO_ALG_ASYNC, | ||
1664 | .cra_type = &crypto_ablkcipher_type, | ||
1665 | .cra_ablkcipher = { | ||
1666 | .setkey = ablkcipher_setkey, | ||
1667 | .encrypt = ablkcipher_encrypt, | ||
1668 | .decrypt = ablkcipher_decrypt, | ||
1669 | .geniv = "eseqiv", | ||
1670 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
1671 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
1672 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
1673 | } | ||
1674 | }, | ||
1675 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
1676 | DESC_HDR_SEL0_DEU | | ||
1677 | DESC_HDR_MODE0_DEU_CBC | | ||
1678 | DESC_HDR_MODE0_DEU_3DES, | ||
1352 | } | 1679 | } |
1353 | }; | 1680 | }; |
1354 | 1681 | ||
@@ -1362,12 +1689,14 @@ struct talitos_crypto_alg { | |||
1362 | static int talitos_cra_init(struct crypto_tfm *tfm) | 1689 | static int talitos_cra_init(struct crypto_tfm *tfm) |
1363 | { | 1690 | { |
1364 | struct crypto_alg *alg = tfm->__crt_alg; | 1691 | struct crypto_alg *alg = tfm->__crt_alg; |
1365 | struct talitos_crypto_alg *talitos_alg = | 1692 | struct talitos_crypto_alg *talitos_alg; |
1366 | container_of(alg, struct talitos_crypto_alg, crypto_alg); | ||
1367 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | 1693 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); |
1368 | 1694 | ||
1695 | talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); | ||
1696 | |||
1369 | /* update context with ptr to dev */ | 1697 | /* update context with ptr to dev */ |
1370 | ctx->dev = talitos_alg->dev; | 1698 | ctx->dev = talitos_alg->dev; |
1699 | |||
1371 | /* copy descriptor header template value */ | 1700 | /* copy descriptor header template value */ |
1372 | ctx->desc_hdr_template = talitos_alg->desc_hdr_template; | 1701 | ctx->desc_hdr_template = talitos_alg->desc_hdr_template; |
1373 | 1702 | ||
@@ -1453,19 +1782,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
1453 | return ERR_PTR(-ENOMEM); | 1782 | return ERR_PTR(-ENOMEM); |
1454 | 1783 | ||
1455 | alg = &t_alg->crypto_alg; | 1784 | alg = &t_alg->crypto_alg; |
1785 | *alg = template->alg; | ||
1456 | 1786 | ||
1457 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); | ||
1458 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
1459 | template->driver_name); | ||
1460 | alg->cra_module = THIS_MODULE; | 1787 | alg->cra_module = THIS_MODULE; |
1461 | alg->cra_init = talitos_cra_init; | 1788 | alg->cra_init = talitos_cra_init; |
1462 | alg->cra_priority = TALITOS_CRA_PRIORITY; | 1789 | alg->cra_priority = TALITOS_CRA_PRIORITY; |
1463 | alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
1464 | alg->cra_blocksize = template->blocksize; | ||
1465 | alg->cra_alignmask = 0; | 1790 | alg->cra_alignmask = 0; |
1466 | alg->cra_type = &crypto_aead_type; | ||
1467 | alg->cra_ctxsize = sizeof(struct talitos_ctx); | 1791 | alg->cra_ctxsize = sizeof(struct talitos_ctx); |
1468 | alg->cra_u.aead = template->aead; | ||
1469 | 1792 | ||
1470 | t_alg->desc_hdr_template = template->desc_hdr_template; | 1793 | t_alg->desc_hdr_template = template->desc_hdr_template; |
1471 | t_alg->dev = dev; | 1794 | t_alg->dev = dev; |