aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-10-30 05:25:15 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-01-09 17:15:34 -0500
commit06ace7a9bafeb9047352707eb79e8eaa0dfdf5f2 (patch)
treefa22bbc2e8ea5bee00b6aec353783144b6f8735a /arch/i386/crypto
parent2df15fffc612b53b2c8e4ff3c981a82441bc00ae (diff)
[CRYPTO] Use standard byte order macros wherever possible
A lot of crypto code needs to read/write a 32-bit/64-bit words in a specific gender. Many of them open code them by reading/writing one byte at a time. This patch converts all the applicable usages over to use the standard byte order macros. This is based on a previous patch by Denis Vlasenko. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/i386/crypto')
-rw-r--r--arch/i386/crypto/aes.c44
1 files changed, 23 insertions, 21 deletions
diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c
index 88ee85c3b43b..1deb9ff564be 100644
--- a/arch/i386/crypto/aes.c
+++ b/arch/i386/crypto/aes.c
@@ -36,6 +36,8 @@
36 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 36 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
37 * 37 *
38 */ 38 */
39
40#include <asm/byteorder.h>
39#include <linux/kernel.h> 41#include <linux/kernel.h>
40#include <linux/module.h> 42#include <linux/module.h>
41#include <linux/init.h> 43#include <linux/init.h>
@@ -59,7 +61,6 @@ struct aes_ctx {
59}; 61};
60 62
61#define WPOLY 0x011b 63#define WPOLY 0x011b
62#define u32_in(x) le32_to_cpup((const __le32 *)(x))
63#define bytes2word(b0, b1, b2, b3) \ 64#define bytes2word(b0, b1, b2, b3) \
64 (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) 65 (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
65 66
@@ -393,13 +394,14 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
393 int i; 394 int i;
394 u32 ss[8]; 395 u32 ss[8];
395 struct aes_ctx *ctx = ctx_arg; 396 struct aes_ctx *ctx = ctx_arg;
397 const __le32 *key = (const __le32 *)in_key;
396 398
397 /* encryption schedule */ 399 /* encryption schedule */
398 400
399 ctx->ekey[0] = ss[0] = u32_in(in_key); 401 ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]);
400 ctx->ekey[1] = ss[1] = u32_in(in_key + 4); 402 ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]);
401 ctx->ekey[2] = ss[2] = u32_in(in_key + 8); 403 ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]);
402 ctx->ekey[3] = ss[3] = u32_in(in_key + 12); 404 ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]);
403 405
404 switch(key_len) { 406 switch(key_len) {
405 case 16: 407 case 16:
@@ -410,8 +412,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
410 break; 412 break;
411 413
412 case 24: 414 case 24:
413 ctx->ekey[4] = ss[4] = u32_in(in_key + 16); 415 ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
414 ctx->ekey[5] = ss[5] = u32_in(in_key + 20); 416 ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
415 for (i = 0; i < 7; i++) 417 for (i = 0; i < 7; i++)
416 ke6(ctx->ekey, i); 418 ke6(ctx->ekey, i);
417 kel6(ctx->ekey, 7); 419 kel6(ctx->ekey, 7);
@@ -419,10 +421,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
419 break; 421 break;
420 422
421 case 32: 423 case 32:
422 ctx->ekey[4] = ss[4] = u32_in(in_key + 16); 424 ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
423 ctx->ekey[5] = ss[5] = u32_in(in_key + 20); 425 ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
424 ctx->ekey[6] = ss[6] = u32_in(in_key + 24); 426 ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]);
425 ctx->ekey[7] = ss[7] = u32_in(in_key + 28); 427 ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]);
426 for (i = 0; i < 6; i++) 428 for (i = 0; i < 6; i++)
427 ke8(ctx->ekey, i); 429 ke8(ctx->ekey, i);
428 kel8(ctx->ekey, 6); 430 kel8(ctx->ekey, 6);
@@ -436,10 +438,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
436 438
437 /* decryption schedule */ 439 /* decryption schedule */
438 440
439 ctx->dkey[0] = ss[0] = u32_in(in_key); 441 ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]);
440 ctx->dkey[1] = ss[1] = u32_in(in_key + 4); 442 ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]);
441 ctx->dkey[2] = ss[2] = u32_in(in_key + 8); 443 ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]);
442 ctx->dkey[3] = ss[3] = u32_in(in_key + 12); 444 ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]);
443 445
444 switch (key_len) { 446 switch (key_len) {
445 case 16: 447 case 16:
@@ -450,8 +452,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
450 break; 452 break;
451 453
452 case 24: 454 case 24:
453 ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); 455 ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
454 ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); 456 ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
455 kdf6(ctx->dkey, 0); 457 kdf6(ctx->dkey, 0);
456 for (i = 1; i < 7; i++) 458 for (i = 1; i < 7; i++)
457 kd6(ctx->dkey, i); 459 kd6(ctx->dkey, i);
@@ -459,10 +461,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
459 break; 461 break;
460 462
461 case 32: 463 case 32:
462 ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); 464 ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
463 ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); 465 ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
464 ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24)); 466 ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6]));
465 ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28)); 467 ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7]));
466 kdf8(ctx->dkey, 0); 468 kdf8(ctx->dkey, 0);
467 for (i = 1; i < 6; i++) 469 for (i = 1; i < 6; i++)
468 kd8(ctx->dkey, i); 470 kd8(ctx->dkey, i);