aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--crypto/digest.c2
-rw-r--r--crypto/tcrypt.c28
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/crypto/padlock-aes.c28
-rw-r--r--drivers/crypto/padlock-sha.c9
-rw-r--r--drivers/crypto/talitos.c54
-rw-r--r--include/asm-x86/i387.h32
-rw-r--r--include/crypto/hash.h18
8 files changed, 153 insertions, 26 deletions
diff --git a/crypto/digest.c b/crypto/digest.c
index ac0919460d14..5d3f1303da98 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -225,7 +225,7 @@ int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
225 struct ahash_tfm *crt = &tfm->crt_ahash; 225 struct ahash_tfm *crt = &tfm->crt_ahash;
226 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; 226 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
227 227
228 if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) 228 if (dalg->dia_digestsize > PAGE_SIZE / 8)
229 return -EINVAL; 229 return -EINVAL;
230 230
231 crt->init = digest_async_init; 231 crt->init = digest_async_init;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 59821a22d752..66368022e0bf 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -481,21 +481,31 @@ next_one:
481 481
482 for (k = 0, temp = 0; k < template[i].np; k++) { 482 for (k = 0, temp = 0; k < template[i].np; k++) {
483 printk(KERN_INFO "page %u\n", k); 483 printk(KERN_INFO "page %u\n", k);
484 q = &axbuf[IDX[k]]; 484 q = &xbuf[IDX[k]];
485 hexdump(q, template[i].tap[k]); 485
486 n = template[i].tap[k];
487 if (k == template[i].np - 1)
488 n += enc ? authsize : -authsize;
489 hexdump(q, n);
486 printk(KERN_INFO "%s\n", 490 printk(KERN_INFO "%s\n",
487 memcmp(q, template[i].result + temp, 491 memcmp(q, template[i].result + temp, n) ?
488 template[i].tap[k] -
489 (k < template[i].np - 1 || enc ?
490 0 : authsize)) ?
491 "fail" : "pass"); 492 "fail" : "pass");
492 493
493 for (n = 0; q[template[i].tap[k] + n]; n++) 494 q += n;
494 ; 495 if (k == template[i].np - 1 && !enc) {
496 if (memcmp(q, template[i].input +
497 temp + n, authsize))
498 n = authsize;
499 else
500 n = 0;
501 } else {
502 for (n = 0; q[n]; n++)
503 ;
504 }
495 if (n) { 505 if (n) {
496 printk("Result buffer corruption %u " 506 printk("Result buffer corruption %u "
497 "bytes:\n", n); 507 "bytes:\n", n);
498 hexdump(&q[template[i].tap[k]], n); 508 hexdump(q, n);
499 } 509 }
500 510
501 temp += template[i].tap[k]; 511 temp += template[i].tap[k];
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index f7feae4ebb5e..128202e18fc9 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -31,6 +31,7 @@
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/msr.h> 32#include <asm/msr.h>
33#include <asm/cpufeature.h> 33#include <asm/cpufeature.h>
34#include <asm/i387.h>
34 35
35 36
36#define PFX KBUILD_MODNAME ": " 37#define PFX KBUILD_MODNAME ": "
@@ -67,16 +68,23 @@ enum {
67 * Another possible performance boost may come from simply buffering 68 * Another possible performance boost may come from simply buffering
68 * until we have 4 bytes, thus returning a u32 at a time, 69 * until we have 4 bytes, thus returning a u32 at a time,
69 * instead of the current u8-at-a-time. 70 * instead of the current u8-at-a-time.
71 *
72 * Padlock instructions can generate a spurious DNA fault, so
73 * we have to call them in the context of irq_ts_save/restore()
70 */ 74 */
71 75
72static inline u32 xstore(u32 *addr, u32 edx_in) 76static inline u32 xstore(u32 *addr, u32 edx_in)
73{ 77{
74 u32 eax_out; 78 u32 eax_out;
79 int ts_state;
80
81 ts_state = irq_ts_save();
75 82
76 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" 83 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
77 :"=m"(*addr), "=a"(eax_out) 84 :"=m"(*addr), "=a"(eax_out)
78 :"D"(addr), "d"(edx_in)); 85 :"D"(addr), "d"(edx_in));
79 86
87 irq_ts_restore(ts_state);
80 return eax_out; 88 return eax_out;
81} 89}
82 90
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 54a2a166e566..bf2917d197a0 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <asm/byteorder.h> 18#include <asm/byteorder.h>
19#include <asm/i387.h>
19#include "padlock.h" 20#include "padlock.h"
20 21
21/* Control word. */ 22/* Control word. */
@@ -141,6 +142,12 @@ static inline void padlock_reset_key(void)
141 asm volatile ("pushfl; popfl"); 142 asm volatile ("pushfl; popfl");
142} 143}
143 144
145/*
146 * While the padlock instructions don't use FP/SSE registers, they
147 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
148 * should be used only inside the irq_ts_save/restore() context
149 */
150
144static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 151static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
145 void *control_word) 152 void *control_word)
146{ 153{
@@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
205static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 212static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
206{ 213{
207 struct aes_ctx *ctx = aes_ctx(tfm); 214 struct aes_ctx *ctx = aes_ctx(tfm);
215 int ts_state;
208 padlock_reset_key(); 216 padlock_reset_key();
217
218 ts_state = irq_ts_save();
209 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 219 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
220 irq_ts_restore(ts_state);
210} 221}
211 222
212static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 223static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
213{ 224{
214 struct aes_ctx *ctx = aes_ctx(tfm); 225 struct aes_ctx *ctx = aes_ctx(tfm);
226 int ts_state;
215 padlock_reset_key(); 227 padlock_reset_key();
228
229 ts_state = irq_ts_save();
216 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 230 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
231 irq_ts_restore(ts_state);
217} 232}
218 233
219static struct crypto_alg aes_alg = { 234static struct crypto_alg aes_alg = {
@@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
244 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 259 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
245 struct blkcipher_walk walk; 260 struct blkcipher_walk walk;
246 int err; 261 int err;
262 int ts_state;
247 263
248 padlock_reset_key(); 264 padlock_reset_key();
249 265
250 blkcipher_walk_init(&walk, dst, src, nbytes); 266 blkcipher_walk_init(&walk, dst, src, nbytes);
251 err = blkcipher_walk_virt(desc, &walk); 267 err = blkcipher_walk_virt(desc, &walk);
252 268
269 ts_state = irq_ts_save();
253 while ((nbytes = walk.nbytes)) { 270 while ((nbytes = walk.nbytes)) {
254 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 271 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
255 ctx->E, &ctx->cword.encrypt, 272 ctx->E, &ctx->cword.encrypt,
@@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
257 nbytes &= AES_BLOCK_SIZE - 1; 274 nbytes &= AES_BLOCK_SIZE - 1;
258 err = blkcipher_walk_done(desc, &walk, nbytes); 275 err = blkcipher_walk_done(desc, &walk, nbytes);
259 } 276 }
277 irq_ts_restore(ts_state);
260 278
261 return err; 279 return err;
262} 280}
@@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
268 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 286 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
269 struct blkcipher_walk walk; 287 struct blkcipher_walk walk;
270 int err; 288 int err;
289 int ts_state;
271 290
272 padlock_reset_key(); 291 padlock_reset_key();
273 292
274 blkcipher_walk_init(&walk, dst, src, nbytes); 293 blkcipher_walk_init(&walk, dst, src, nbytes);
275 err = blkcipher_walk_virt(desc, &walk); 294 err = blkcipher_walk_virt(desc, &walk);
276 295
296 ts_state = irq_ts_save();
277 while ((nbytes = walk.nbytes)) { 297 while ((nbytes = walk.nbytes)) {
278 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 298 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
279 ctx->D, &ctx->cword.decrypt, 299 ctx->D, &ctx->cword.decrypt,
@@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
281 nbytes &= AES_BLOCK_SIZE - 1; 301 nbytes &= AES_BLOCK_SIZE - 1;
282 err = blkcipher_walk_done(desc, &walk, nbytes); 302 err = blkcipher_walk_done(desc, &walk, nbytes);
283 } 303 }
284 304 irq_ts_restore(ts_state);
285 return err; 305 return err;
286} 306}
287 307
@@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
314 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 334 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
315 struct blkcipher_walk walk; 335 struct blkcipher_walk walk;
316 int err; 336 int err;
337 int ts_state;
317 338
318 padlock_reset_key(); 339 padlock_reset_key();
319 340
320 blkcipher_walk_init(&walk, dst, src, nbytes); 341 blkcipher_walk_init(&walk, dst, src, nbytes);
321 err = blkcipher_walk_virt(desc, &walk); 342 err = blkcipher_walk_virt(desc, &walk);
322 343
344 ts_state = irq_ts_save();
323 while ((nbytes = walk.nbytes)) { 345 while ((nbytes = walk.nbytes)) {
324 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, 346 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
325 walk.dst.virt.addr, ctx->E, 347 walk.dst.virt.addr, ctx->E,
@@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
329 nbytes &= AES_BLOCK_SIZE - 1; 351 nbytes &= AES_BLOCK_SIZE - 1;
330 err = blkcipher_walk_done(desc, &walk, nbytes); 352 err = blkcipher_walk_done(desc, &walk, nbytes);
331 } 353 }
354 irq_ts_restore(ts_state);
332 355
333 return err; 356 return err;
334} 357}
@@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
340 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 363 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
341 struct blkcipher_walk walk; 364 struct blkcipher_walk walk;
342 int err; 365 int err;
366 int ts_state;
343 367
344 padlock_reset_key(); 368 padlock_reset_key();
345 369
346 blkcipher_walk_init(&walk, dst, src, nbytes); 370 blkcipher_walk_init(&walk, dst, src, nbytes);
347 err = blkcipher_walk_virt(desc, &walk); 371 err = blkcipher_walk_virt(desc, &walk);
348 372
373 ts_state = irq_ts_save();
349 while ((nbytes = walk.nbytes)) { 374 while ((nbytes = walk.nbytes)) {
350 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, 375 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
351 ctx->D, walk.iv, &ctx->cword.decrypt, 376 ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
354 err = blkcipher_walk_done(desc, &walk, nbytes); 379 err = blkcipher_walk_done(desc, &walk, nbytes);
355 } 380 }
356 381
382 irq_ts_restore(ts_state);
357 return err; 383 return err;
358} 384}
359 385
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 40d5680fa013..a7fbadebf623 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <asm/i387.h>
25#include "padlock.h" 26#include "padlock.h"
26 27
27#define SHA1_DEFAULT_FALLBACK "sha1-generic" 28#define SHA1_DEFAULT_FALLBACK "sha1-generic"
@@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count)
102 * PadLock microcode needs it that big. */ 103 * PadLock microcode needs it that big. */
103 char buf[128+16]; 104 char buf[128+16];
104 char *result = NEAREST_ALIGNED(buf); 105 char *result = NEAREST_ALIGNED(buf);
106 int ts_state;
105 107
106 ((uint32_t *)result)[0] = SHA1_H0; 108 ((uint32_t *)result)[0] = SHA1_H0;
107 ((uint32_t *)result)[1] = SHA1_H1; 109 ((uint32_t *)result)[1] = SHA1_H1;
@@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count)
109 ((uint32_t *)result)[3] = SHA1_H3; 111 ((uint32_t *)result)[3] = SHA1_H3;
110 ((uint32_t *)result)[4] = SHA1_H4; 112 ((uint32_t *)result)[4] = SHA1_H4;
111 113
114 /* prevent taking the spurious DNA fault with padlock. */
115 ts_state = irq_ts_save();
112 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ 116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
113 : "+S"(in), "+D"(result) 117 : "+S"(in), "+D"(result)
114 : "c"(count), "a"(0)); 118 : "c"(count), "a"(0));
119 irq_ts_restore(ts_state);
115 120
116 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); 121 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
117} 122}
@@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count)
123 * PadLock microcode needs it that big. */ 128 * PadLock microcode needs it that big. */
124 char buf[128+16]; 129 char buf[128+16];
125 char *result = NEAREST_ALIGNED(buf); 130 char *result = NEAREST_ALIGNED(buf);
131 int ts_state;
126 132
127 ((uint32_t *)result)[0] = SHA256_H0; 133 ((uint32_t *)result)[0] = SHA256_H0;
128 ((uint32_t *)result)[1] = SHA256_H1; 134 ((uint32_t *)result)[1] = SHA256_H1;
@@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count)
133 ((uint32_t *)result)[6] = SHA256_H6; 139 ((uint32_t *)result)[6] = SHA256_H6;
134 ((uint32_t *)result)[7] = SHA256_H7; 140 ((uint32_t *)result)[7] = SHA256_H7;
135 141
142 /* prevent taking the spurious DNA fault with padlock. */
143 ts_state = irq_ts_save();
136 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ 144 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
137 : "+S"(in), "+D"(result) 145 : "+S"(in), "+D"(result)
138 : "c"(count), "a"(0)); 146 : "c"(count), "a"(0));
147 irq_ts_restore(ts_state);
139 148
140 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); 149 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
141} 150}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 681c15f42083..ee827a7f7c6a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -96,6 +96,9 @@ struct talitos_private {
96 unsigned int exec_units; 96 unsigned int exec_units;
97 unsigned int desc_types; 97 unsigned int desc_types;
98 98
99 /* SEC Compatibility info */
100 unsigned long features;
101
99 /* next channel to be assigned next incoming descriptor */ 102 /* next channel to be assigned next incoming descriptor */
100 atomic_t last_chan; 103 atomic_t last_chan;
101 104
@@ -133,6 +136,9 @@ struct talitos_private {
133 struct hwrng rng; 136 struct hwrng rng;
134}; 137};
135 138
139/* .features flag */
140#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
141
136/* 142/*
137 * map virtual single (contiguous) pointer to h/w descriptor pointer 143 * map virtual single (contiguous) pointer to h/w descriptor pointer
138 */ 144 */
@@ -785,7 +791,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
785 /* copy the generated ICV to dst */ 791 /* copy the generated ICV to dst */
786 if (edesc->dma_len) { 792 if (edesc->dma_len) {
787 icvdata = &edesc->link_tbl[edesc->src_nents + 793 icvdata = &edesc->link_tbl[edesc->src_nents +
788 edesc->dst_nents + 1]; 794 edesc->dst_nents + 2];
789 sg = sg_last(areq->dst, edesc->dst_nents); 795 sg = sg_last(areq->dst, edesc->dst_nents);
790 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, 796 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
791 icvdata, ctx->authsize); 797 icvdata, ctx->authsize);
@@ -814,7 +820,7 @@ static void ipsec_esp_decrypt_done(struct device *dev,
814 /* auth check */ 820 /* auth check */
815 if (edesc->dma_len) 821 if (edesc->dma_len)
816 icvdata = &edesc->link_tbl[edesc->src_nents + 822 icvdata = &edesc->link_tbl[edesc->src_nents +
817 edesc->dst_nents + 1]; 823 edesc->dst_nents + 2];
818 else 824 else
819 icvdata = &edesc->link_tbl[0]; 825 icvdata = &edesc->link_tbl[0];
820 826
@@ -921,10 +927,30 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
921 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 927 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
922 &edesc->link_tbl[0]); 928 &edesc->link_tbl[0]);
923 if (sg_count > 1) { 929 if (sg_count > 1) {
930 struct talitos_ptr *link_tbl_ptr =
931 &edesc->link_tbl[sg_count-1];
932 struct scatterlist *sg;
933 struct talitos_private *priv = dev_get_drvdata(dev);
934
924 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 935 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
925 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 936 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
926 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 937 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
927 edesc->dma_len, DMA_BIDIRECTIONAL); 938 edesc->dma_len, DMA_BIDIRECTIONAL);
939 /* If necessary for this SEC revision,
940 * add a link table entry for ICV.
941 */
942 if ((priv->features &
943 TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) &&
944 (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
945 link_tbl_ptr->j_extent = 0;
946 link_tbl_ptr++;
947 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
948 link_tbl_ptr->len = cpu_to_be16(authsize);
949 sg = sg_last(areq->src, edesc->src_nents ? : 1);
950 link_tbl_ptr->ptr = cpu_to_be32(
951 (char *)sg_dma_address(sg)
952 + sg->length - authsize);
953 }
928 } else { 954 } else {
929 /* Only one segment now, so no link tbl needed */ 955 /* Only one segment now, so no link tbl needed */
930 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 956 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
@@ -944,12 +970,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
944 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 970 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
945 } else { 971 } else {
946 struct talitos_ptr *link_tbl_ptr = 972 struct talitos_ptr *link_tbl_ptr =
947 &edesc->link_tbl[edesc->src_nents]; 973 &edesc->link_tbl[edesc->src_nents + 1];
948 struct scatterlist *sg;
949 974
950 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 975 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
951 edesc->dma_link_tbl + 976 edesc->dma_link_tbl +
952 edesc->src_nents); 977 edesc->src_nents + 1);
953 if (areq->src == areq->dst) { 978 if (areq->src == areq->dst) {
954 memcpy(link_tbl_ptr, &edesc->link_tbl[0], 979 memcpy(link_tbl_ptr, &edesc->link_tbl[0],
955 edesc->src_nents * sizeof(struct talitos_ptr)); 980 edesc->src_nents * sizeof(struct talitos_ptr));
@@ -957,14 +982,10 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
957 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 982 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
958 link_tbl_ptr); 983 link_tbl_ptr);
959 } 984 }
985 /* Add an entry to the link table for ICV data */
960 link_tbl_ptr += sg_count - 1; 986 link_tbl_ptr += sg_count - 1;
961
962 /* handle case where sg_last contains the ICV exclusively */
963 sg = sg_last(areq->dst, edesc->dst_nents);
964 if (sg->length == ctx->authsize)
965 link_tbl_ptr--;
966
967 link_tbl_ptr->j_extent = 0; 987 link_tbl_ptr->j_extent = 0;
988 sg_count++;
968 link_tbl_ptr++; 989 link_tbl_ptr++;
969 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 990 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
970 link_tbl_ptr->len = cpu_to_be16(authsize); 991 link_tbl_ptr->len = cpu_to_be16(authsize);
@@ -973,7 +994,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
973 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 994 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
974 edesc->dma_link_tbl + 995 edesc->dma_link_tbl +
975 edesc->src_nents + 996 edesc->src_nents +
976 edesc->dst_nents + 1); 997 edesc->dst_nents + 2);
977 998
978 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 999 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
979 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1000 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1040,12 +1061,12 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1040 1061
1041 /* 1062 /*
1042 * allocate space for base edesc plus the link tables, 1063 * allocate space for base edesc plus the link tables,
1043 * allowing for a separate entry for the generated ICV (+ 1), 1064 * allowing for two separate entries for ICV and generated ICV (+ 2),
1044 * and the ICV data itself 1065 * and the ICV data itself
1045 */ 1066 */
1046 alloc_len = sizeof(struct ipsec_esp_edesc); 1067 alloc_len = sizeof(struct ipsec_esp_edesc);
1047 if (src_nents || dst_nents) { 1068 if (src_nents || dst_nents) {
1048 dma_len = (src_nents + dst_nents + 1) * 1069 dma_len = (src_nents + dst_nents + 2) *
1049 sizeof(struct talitos_ptr) + ctx->authsize; 1070 sizeof(struct talitos_ptr) + ctx->authsize;
1050 alloc_len += dma_len; 1071 alloc_len += dma_len;
1051 } else { 1072 } else {
@@ -1104,7 +1125,7 @@ static int aead_authenc_decrypt(struct aead_request *req)
1104 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1125 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1105 if (edesc->dma_len) 1126 if (edesc->dma_len)
1106 icvdata = &edesc->link_tbl[edesc->src_nents + 1127 icvdata = &edesc->link_tbl[edesc->src_nents +
1107 edesc->dst_nents + 1]; 1128 edesc->dst_nents + 2];
1108 else 1129 else
1109 icvdata = &edesc->link_tbl[0]; 1130 icvdata = &edesc->link_tbl[0];
1110 1131
@@ -1480,6 +1501,9 @@ static int talitos_probe(struct of_device *ofdev,
1480 goto err_out; 1501 goto err_out;
1481 } 1502 }
1482 1503
1504 if (of_device_is_compatible(np, "fsl,sec3.0"))
1505 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
1506
1483 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1507 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1484 GFP_KERNEL); 1508 GFP_KERNEL);
1485 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1509 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 96fa8449ff11..6d3b21063419 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/regset.h> 15#include <linux/regset.h>
16#include <linux/hardirq.h>
16#include <asm/asm.h> 17#include <asm/asm.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/sigcontext.h> 19#include <asm/sigcontext.h>
@@ -236,6 +237,37 @@ static inline void kernel_fpu_end(void)
236 preempt_enable(); 237 preempt_enable();
237} 238}
238 239
240/*
241 * Some instructions like VIA's padlock instructions generate a spurious
242 * DNA fault but don't modify SSE registers. And these instructions
243 * get used from interrupt context aswell. To prevent these kernel instructions
244 * in interrupt context interact wrongly with other user/kernel fpu usage, we
245 * should use them only in the context of irq_ts_save/restore()
246 */
247static inline int irq_ts_save(void)
248{
249 /*
250 * If we are in process context, we are ok to take a spurious DNA fault.
251 * Otherwise, doing clts() in process context require pre-emption to
252 * be disabled or some heavy lifting like kernel_fpu_begin()
253 */
254 if (!in_interrupt())
255 return 0;
256
257 if (read_cr0() & X86_CR0_TS) {
258 clts();
259 return 1;
260 }
261
262 return 0;
263}
264
265static inline void irq_ts_restore(int TS_state)
266{
267 if (TS_state)
268 stts();
269}
270
239#ifdef CONFIG_X86_64 271#ifdef CONFIG_X86_64
240 272
241static inline void save_init_fpu(struct task_struct *tsk) 273static inline void save_init_fpu(struct task_struct *tsk)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d12498ec8a4e..ee48ef8fb2ea 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -101,6 +101,24 @@ static inline int crypto_ahash_digest(struct ahash_request *req)
101 return crt->digest(req); 101 return crt->digest(req);
102} 102}
103 103
104static inline int crypto_ahash_init(struct ahash_request *req)
105{
106 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
107 return crt->init(req);
108}
109
110static inline int crypto_ahash_update(struct ahash_request *req)
111{
112 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
113 return crt->update(req);
114}
115
116static inline int crypto_ahash_final(struct ahash_request *req)
117{
118 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
119 return crt->final(req);
120}
121
104static inline void ahash_request_set_tfm(struct ahash_request *req, 122static inline void ahash_request_set_tfm(struct ahash_request *req,
105 struct crypto_ahash *tfm) 123 struct crypto_ahash *tfm)
106{ 124{