summaryrefslogtreecommitdiffstats
path: root/crypto/aes_ti.c
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2018-10-18 00:37:58 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2018-11-09 04:36:48 -0500
commit0a6a40c2a8c184a2fb467efacfb1cd338d719e0b (patch)
tree83c30f4e2775779925bd03c9d3f5f5f4f06887ae /crypto/aes_ti.c
parent9f4debe38415583086ce814798eeb864aeb39551 (diff)
crypto: aes_ti - disable interrupts while accessing S-box
In the "aes-fixed-time" AES implementation, disable interrupts while accessing the S-box, in order to make cache-timing attacks more difficult. Previously it was possible for the CPU to be interrupted while the S-box was loaded into L1 cache, potentially evicting the cachelines and causing later table lookups to be time-variant. In tests I did on x86 and ARM, this doesn't affect performance significantly. Responsiveness is potentially a concern, but interrupts are only disabled for a single AES block. Note that even after this change, the implementation still isn't necessarily guaranteed to be constant-time; see https://cr.yp.to/antiforgery/cachetiming-20050414.pdf for a discussion of the many difficulties involved in writing truly constant-time AES software. But it's valuable to make such attacks more difficult. Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/aes_ti.c')
-rw-r--r--crypto/aes_ti.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
index 03023b2290e8..1ff9785b30f5 100644
--- a/crypto/aes_ti.c
+++ b/crypto/aes_ti.c
@@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
269 const u32 *rkp = ctx->key_enc + 4; 269 const u32 *rkp = ctx->key_enc + 4;
270 int rounds = 6 + ctx->key_length / 4; 270 int rounds = 6 + ctx->key_length / 4;
271 u32 st0[4], st1[4]; 271 u32 st0[4], st1[4];
272 unsigned long flags;
272 int round; 273 int round;
273 274
274 st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in); 275 st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
@@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
276 st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8); 277 st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
277 st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12); 278 st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
278 279
280 /*
281 * Temporarily disable interrupts to avoid races where cachelines are
282 * evicted when the CPU is interrupted to do something else.
283 */
284 local_irq_save(flags);
285
279 st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; 286 st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
280 st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; 287 st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
281 st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; 288 st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
@@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
300 put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4); 307 put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
301 put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8); 308 put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
302 put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12); 309 put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
310
311 local_irq_restore(flags);
303} 312}
304 313
305static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 314static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
@@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
308 const u32 *rkp = ctx->key_dec + 4; 317 const u32 *rkp = ctx->key_dec + 4;
309 int rounds = 6 + ctx->key_length / 4; 318 int rounds = 6 + ctx->key_length / 4;
310 u32 st0[4], st1[4]; 319 u32 st0[4], st1[4];
320 unsigned long flags;
311 int round; 321 int round;
312 322
313 st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in); 323 st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
@@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
315 st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8); 325 st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
316 st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12); 326 st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
317 327
328 /*
329 * Temporarily disable interrupts to avoid races where cachelines are
330 * evicted when the CPU is interrupted to do something else.
331 */
332 local_irq_save(flags);
333
318 st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; 334 st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
319 st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; 335 st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
320 st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; 336 st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
@@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
339 put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4); 355 put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
340 put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8); 356 put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
341 put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12); 357 put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
358
359 local_irq_restore(flags);
342} 360}
343 361
344static struct crypto_alg aes_alg = { 362static struct crypto_alg aes_alg = {