diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2005-10-30 05:25:15 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-01-09 17:15:34 -0500 |
commit | 06ace7a9bafeb9047352707eb79e8eaa0dfdf5f2 (patch) | |
tree | fa22bbc2e8ea5bee00b6aec353783144b6f8735a /crypto | |
parent | 2df15fffc612b53b2c8e4ff3c981a82441bc00ae (diff) |
[CRYPTO] Use standard byte order macros wherever possible
A lot of crypto code needs to read/write a 32-bit/64-bit words in a
specific gender. Many of them open code them by reading/writing one
byte at a time. This patch converts all the applicable usages over
to use the standard byte order macros.
This is based on a previous patch by Denis Vlasenko.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/aes.c | 60 | ||||
-rw-r--r-- | crypto/anubis.c | 38 | ||||
-rw-r--r-- | crypto/blowfish.c | 2 | ||||
-rw-r--r-- | crypto/cast5.c | 46 | ||||
-rw-r--r-- | crypto/cast6.c | 82 | ||||
-rw-r--r-- | crypto/crc32c.c | 1 | ||||
-rw-r--r-- | crypto/des.c | 2 | ||||
-rw-r--r-- | crypto/khazad.c | 45 | ||||
-rw-r--r-- | crypto/md4.c | 1 | ||||
-rw-r--r-- | crypto/md5.c | 1 | ||||
-rw-r--r-- | crypto/michael_mic.c | 40 | ||||
-rw-r--r-- | crypto/serpent.c | 1 | ||||
-rw-r--r-- | crypto/sha1.c | 28 | ||||
-rw-r--r-- | crypto/sha256.c | 31 | ||||
-rw-r--r-- | crypto/sha512.c | 54 | ||||
-rw-r--r-- | crypto/tea.c | 95 | ||||
-rw-r--r-- | crypto/tgr192.c | 64 | ||||
-rw-r--r-- | crypto/twofish.c | 12 | ||||
-rw-r--r-- | crypto/wp512.c | 32 |
19 files changed, 240 insertions, 395 deletions
diff --git a/crypto/aes.c b/crypto/aes.c index 5df92888ef5a..35a11deef29b 100644 --- a/crypto/aes.c +++ b/crypto/aes.c | |||
@@ -73,9 +73,6 @@ byte(const u32 x, const unsigned n) | |||
73 | return x >> (n << 3); | 73 | return x >> (n << 3); |
74 | } | 74 | } |
75 | 75 | ||
76 | #define u32_in(x) le32_to_cpu(*(const u32 *)(x)) | ||
77 | #define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from)) | ||
78 | |||
79 | struct aes_ctx { | 76 | struct aes_ctx { |
80 | int key_length; | 77 | int key_length; |
81 | u32 E[60]; | 78 | u32 E[60]; |
@@ -256,6 +253,7 @@ static int | |||
256 | aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | 253 | aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) |
257 | { | 254 | { |
258 | struct aes_ctx *ctx = ctx_arg; | 255 | struct aes_ctx *ctx = ctx_arg; |
256 | const __le32 *key = (const __le32 *)in_key; | ||
259 | u32 i, t, u, v, w; | 257 | u32 i, t, u, v, w; |
260 | 258 | ||
261 | if (key_len != 16 && key_len != 24 && key_len != 32) { | 259 | if (key_len != 16 && key_len != 24 && key_len != 32) { |
@@ -265,10 +263,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
265 | 263 | ||
266 | ctx->key_length = key_len; | 264 | ctx->key_length = key_len; |
267 | 265 | ||
268 | E_KEY[0] = u32_in (in_key); | 266 | E_KEY[0] = le32_to_cpu(key[0]); |
269 | E_KEY[1] = u32_in (in_key + 4); | 267 | E_KEY[1] = le32_to_cpu(key[1]); |
270 | E_KEY[2] = u32_in (in_key + 8); | 268 | E_KEY[2] = le32_to_cpu(key[2]); |
271 | E_KEY[3] = u32_in (in_key + 12); | 269 | E_KEY[3] = le32_to_cpu(key[3]); |
272 | 270 | ||
273 | switch (key_len) { | 271 | switch (key_len) { |
274 | case 16: | 272 | case 16: |
@@ -278,17 +276,17 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
278 | break; | 276 | break; |
279 | 277 | ||
280 | case 24: | 278 | case 24: |
281 | E_KEY[4] = u32_in (in_key + 16); | 279 | E_KEY[4] = le32_to_cpu(key[4]); |
282 | t = E_KEY[5] = u32_in (in_key + 20); | 280 | t = E_KEY[5] = le32_to_cpu(key[5]); |
283 | for (i = 0; i < 8; ++i) | 281 | for (i = 0; i < 8; ++i) |
284 | loop6 (i); | 282 | loop6 (i); |
285 | break; | 283 | break; |
286 | 284 | ||
287 | case 32: | 285 | case 32: |
288 | E_KEY[4] = u32_in (in_key + 16); | 286 | E_KEY[4] = le32_to_cpu(key[4]); |
289 | E_KEY[5] = u32_in (in_key + 20); | 287 | E_KEY[5] = le32_to_cpu(key[5]); |
290 | E_KEY[6] = u32_in (in_key + 24); | 288 | E_KEY[6] = le32_to_cpu(key[6]); |
291 | t = E_KEY[7] = u32_in (in_key + 28); | 289 | t = E_KEY[7] = le32_to_cpu(key[7]); |
292 | for (i = 0; i < 7; ++i) | 290 | for (i = 0; i < 7; ++i) |
293 | loop8 (i); | 291 | loop8 (i); |
294 | break; | 292 | break; |
@@ -324,13 +322,15 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
324 | static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) | 322 | static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) |
325 | { | 323 | { |
326 | const struct aes_ctx *ctx = ctx_arg; | 324 | const struct aes_ctx *ctx = ctx_arg; |
325 | const __le32 *src = (const __le32 *)in; | ||
326 | __le32 *dst = (__le32 *)out; | ||
327 | u32 b0[4], b1[4]; | 327 | u32 b0[4], b1[4]; |
328 | const u32 *kp = E_KEY + 4; | 328 | const u32 *kp = E_KEY + 4; |
329 | 329 | ||
330 | b0[0] = u32_in (in) ^ E_KEY[0]; | 330 | b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0]; |
331 | b0[1] = u32_in (in + 4) ^ E_KEY[1]; | 331 | b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1]; |
332 | b0[2] = u32_in (in + 8) ^ E_KEY[2]; | 332 | b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2]; |
333 | b0[3] = u32_in (in + 12) ^ E_KEY[3]; | 333 | b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3]; |
334 | 334 | ||
335 | if (ctx->key_length > 24) { | 335 | if (ctx->key_length > 24) { |
336 | f_nround (b1, b0, kp); | 336 | f_nround (b1, b0, kp); |
@@ -353,10 +353,10 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) | |||
353 | f_nround (b1, b0, kp); | 353 | f_nround (b1, b0, kp); |
354 | f_lround (b0, b1, kp); | 354 | f_lround (b0, b1, kp); |
355 | 355 | ||
356 | u32_out (out, b0[0]); | 356 | dst[0] = cpu_to_le32(b0[0]); |
357 | u32_out (out + 4, b0[1]); | 357 | dst[1] = cpu_to_le32(b0[1]); |
358 | u32_out (out + 8, b0[2]); | 358 | dst[2] = cpu_to_le32(b0[2]); |
359 | u32_out (out + 12, b0[3]); | 359 | dst[3] = cpu_to_le32(b0[3]); |
360 | } | 360 | } |
361 | 361 | ||
362 | /* decrypt a block of text */ | 362 | /* decrypt a block of text */ |
@@ -377,14 +377,16 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) | |||
377 | static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) | 377 | static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) |
378 | { | 378 | { |
379 | const struct aes_ctx *ctx = ctx_arg; | 379 | const struct aes_ctx *ctx = ctx_arg; |
380 | const __le32 *src = (const __le32 *)in; | ||
381 | __le32 *dst = (__le32 *)out; | ||
380 | u32 b0[4], b1[4]; | 382 | u32 b0[4], b1[4]; |
381 | const int key_len = ctx->key_length; | 383 | const int key_len = ctx->key_length; |
382 | const u32 *kp = D_KEY + key_len + 20; | 384 | const u32 *kp = D_KEY + key_len + 20; |
383 | 385 | ||
384 | b0[0] = u32_in (in) ^ E_KEY[key_len + 24]; | 386 | b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24]; |
385 | b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25]; | 387 | b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25]; |
386 | b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26]; | 388 | b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26]; |
387 | b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27]; | 389 | b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27]; |
388 | 390 | ||
389 | if (key_len > 24) { | 391 | if (key_len > 24) { |
390 | i_nround (b1, b0, kp); | 392 | i_nround (b1, b0, kp); |
@@ -407,10 +409,10 @@ static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) | |||
407 | i_nround (b1, b0, kp); | 409 | i_nround (b1, b0, kp); |
408 | i_lround (b0, b1, kp); | 410 | i_lround (b0, b1, kp); |
409 | 411 | ||
410 | u32_out (out, b0[0]); | 412 | dst[0] = cpu_to_le32(b0[0]); |
411 | u32_out (out + 4, b0[1]); | 413 | dst[1] = cpu_to_le32(b0[1]); |
412 | u32_out (out + 8, b0[2]); | 414 | dst[2] = cpu_to_le32(b0[2]); |
413 | u32_out (out + 12, b0[3]); | 415 | dst[3] = cpu_to_le32(b0[3]); |
414 | } | 416 | } |
415 | 417 | ||
416 | 418 | ||
diff --git a/crypto/anubis.c b/crypto/anubis.c index 3925eb0133cb..94c4b1f3e3a7 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c | |||
@@ -32,8 +32,10 @@ | |||
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | #include <asm/byteorder.h> | ||
35 | #include <asm/scatterlist.h> | 36 | #include <asm/scatterlist.h> |
36 | #include <linux/crypto.h> | 37 | #include <linux/crypto.h> |
38 | #include <linux/types.h> | ||
37 | 39 | ||
38 | #define ANUBIS_MIN_KEY_SIZE 16 | 40 | #define ANUBIS_MIN_KEY_SIZE 16 |
39 | #define ANUBIS_MAX_KEY_SIZE 40 | 41 | #define ANUBIS_MAX_KEY_SIZE 40 |
@@ -461,8 +463,8 @@ static const u32 rc[] = { | |||
461 | static int anubis_setkey(void *ctx_arg, const u8 *in_key, | 463 | static int anubis_setkey(void *ctx_arg, const u8 *in_key, |
462 | unsigned int key_len, u32 *flags) | 464 | unsigned int key_len, u32 *flags) |
463 | { | 465 | { |
464 | 466 | const __be32 *key = (const __be32 *)in_key; | |
465 | int N, R, i, pos, r; | 467 | int N, R, i, r; |
466 | u32 kappa[ANUBIS_MAX_N]; | 468 | u32 kappa[ANUBIS_MAX_N]; |
467 | u32 inter[ANUBIS_MAX_N]; | 469 | u32 inter[ANUBIS_MAX_N]; |
468 | 470 | ||
@@ -483,13 +485,8 @@ static int anubis_setkey(void *ctx_arg, const u8 *in_key, | |||
483 | ctx->R = R = 8 + N; | 485 | ctx->R = R = 8 + N; |
484 | 486 | ||
485 | /* * map cipher key to initial key state (mu): */ | 487 | /* * map cipher key to initial key state (mu): */ |
486 | for (i = 0, pos = 0; i < N; i++, pos += 4) { | 488 | for (i = 0; i < N; i++) |
487 | kappa[i] = | 489 | kappa[i] = be32_to_cpu(key[i]); |
488 | (in_key[pos ] << 24) ^ | ||
489 | (in_key[pos + 1] << 16) ^ | ||
490 | (in_key[pos + 2] << 8) ^ | ||
491 | (in_key[pos + 3] ); | ||
492 | } | ||
493 | 490 | ||
494 | /* | 491 | /* |
495 | * generate R + 1 round keys: | 492 | * generate R + 1 round keys: |
@@ -578,7 +575,9 @@ static int anubis_setkey(void *ctx_arg, const u8 *in_key, | |||
578 | static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], | 575 | static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], |
579 | u8 *ciphertext, const u8 *plaintext, const int R) | 576 | u8 *ciphertext, const u8 *plaintext, const int R) |
580 | { | 577 | { |
581 | int i, pos, r; | 578 | const __be32 *src = (const __be32 *)plaintext; |
579 | __be32 *dst = (__be32 *)ciphertext; | ||
580 | int i, r; | ||
582 | u32 state[4]; | 581 | u32 state[4]; |
583 | u32 inter[4]; | 582 | u32 inter[4]; |
584 | 583 | ||
@@ -586,14 +585,8 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], | |||
586 | * map plaintext block to cipher state (mu) | 585 | * map plaintext block to cipher state (mu) |
587 | * and add initial round key (sigma[K^0]): | 586 | * and add initial round key (sigma[K^0]): |
588 | */ | 587 | */ |
589 | for (i = 0, pos = 0; i < 4; i++, pos += 4) { | 588 | for (i = 0; i < 4; i++) |
590 | state[i] = | 589 | state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; |
591 | (plaintext[pos ] << 24) ^ | ||
592 | (plaintext[pos + 1] << 16) ^ | ||
593 | (plaintext[pos + 2] << 8) ^ | ||
594 | (plaintext[pos + 3] ) ^ | ||
595 | roundKey[0][i]; | ||
596 | } | ||
597 | 590 | ||
598 | /* | 591 | /* |
599 | * R - 1 full rounds: | 592 | * R - 1 full rounds: |
@@ -663,13 +656,8 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], | |||
663 | * map cipher state to ciphertext block (mu^{-1}): | 656 | * map cipher state to ciphertext block (mu^{-1}): |
664 | */ | 657 | */ |
665 | 658 | ||
666 | for (i = 0, pos = 0; i < 4; i++, pos += 4) { | 659 | for (i = 0; i < 4; i++) |
667 | u32 w = inter[i]; | 660 | dst[i] = cpu_to_be32(inter[i]); |
668 | ciphertext[pos ] = (u8)(w >> 24); | ||
669 | ciphertext[pos + 1] = (u8)(w >> 16); | ||
670 | ciphertext[pos + 2] = (u8)(w >> 8); | ||
671 | ciphertext[pos + 3] = (u8)(w ); | ||
672 | } | ||
673 | } | 661 | } |
674 | 662 | ||
675 | static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 663 | static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) |
diff --git a/crypto/blowfish.c b/crypto/blowfish.c index a8b29d54e7d8..99fc45950d50 100644 --- a/crypto/blowfish.c +++ b/crypto/blowfish.c | |||
@@ -19,8 +19,10 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <asm/byteorder.h> | ||
22 | #include <asm/scatterlist.h> | 23 | #include <asm/scatterlist.h> |
23 | #include <linux/crypto.h> | 24 | #include <linux/crypto.h> |
25 | #include <linux/types.h> | ||
24 | 26 | ||
25 | #define BF_BLOCK_SIZE 8 | 27 | #define BF_BLOCK_SIZE 8 |
26 | #define BF_MIN_KEY_SIZE 4 | 28 | #define BF_MIN_KEY_SIZE 4 |
diff --git a/crypto/cast5.c b/crypto/cast5.c index bc42f42b4fe3..282641c974a8 100644 --- a/crypto/cast5.c +++ b/crypto/cast5.c | |||
@@ -21,11 +21,13 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | 23 | ||
24 | #include <asm/byteorder.h> | ||
24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
25 | #include <linux/crypto.h> | 26 | #include <linux/crypto.h> |
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
28 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/types.h> | ||
29 | 31 | ||
30 | #define CAST5_BLOCK_SIZE 8 | 32 | #define CAST5_BLOCK_SIZE 8 |
31 | #define CAST5_MIN_KEY_SIZE 5 | 33 | #define CAST5_MIN_KEY_SIZE 5 |
@@ -578,6 +580,8 @@ static const u32 sb8[256] = { | |||
578 | static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | 580 | static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) |
579 | { | 581 | { |
580 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 582 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; |
583 | const __be32 *src = (const __be32 *)inbuf; | ||
584 | __be32 *dst = (__be32 *)outbuf; | ||
581 | u32 l, r, t; | 585 | u32 l, r, t; |
582 | u32 I; /* used by the Fx macros */ | 586 | u32 I; /* used by the Fx macros */ |
583 | u32 *Km; | 587 | u32 *Km; |
@@ -589,8 +593,8 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | |||
589 | /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and | 593 | /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and |
590 | * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) | 594 | * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) |
591 | */ | 595 | */ |
592 | l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; | 596 | l = be32_to_cpu(src[0]); |
593 | r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; | 597 | r = be32_to_cpu(src[1]); |
594 | 598 | ||
595 | /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: | 599 | /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: |
596 | * Li = Ri-1; | 600 | * Li = Ri-1; |
@@ -634,19 +638,15 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | |||
634 | 638 | ||
635 | /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and | 639 | /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and |
636 | * concatenate to form the ciphertext.) */ | 640 | * concatenate to form the ciphertext.) */ |
637 | outbuf[0] = (r >> 24) & 0xff; | 641 | dst[0] = cpu_to_be32(r); |
638 | outbuf[1] = (r >> 16) & 0xff; | 642 | dst[1] = cpu_to_be32(l); |
639 | outbuf[2] = (r >> 8) & 0xff; | ||
640 | outbuf[3] = r & 0xff; | ||
641 | outbuf[4] = (l >> 24) & 0xff; | ||
642 | outbuf[5] = (l >> 16) & 0xff; | ||
643 | outbuf[6] = (l >> 8) & 0xff; | ||
644 | outbuf[7] = l & 0xff; | ||
645 | } | 643 | } |
646 | 644 | ||
647 | static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | 645 | static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) |
648 | { | 646 | { |
649 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 647 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; |
648 | const __be32 *src = (const __be32 *)inbuf; | ||
649 | __be32 *dst = (__be32 *)outbuf; | ||
650 | u32 l, r, t; | 650 | u32 l, r, t; |
651 | u32 I; | 651 | u32 I; |
652 | u32 *Km; | 652 | u32 *Km; |
@@ -655,8 +655,8 @@ static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | |||
655 | Km = c->Km; | 655 | Km = c->Km; |
656 | Kr = c->Kr; | 656 | Kr = c->Kr; |
657 | 657 | ||
658 | l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; | 658 | l = be32_to_cpu(src[0]); |
659 | r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; | 659 | r = be32_to_cpu(src[1]); |
660 | 660 | ||
661 | if (!(c->rr)) { | 661 | if (!(c->rr)) { |
662 | t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); | 662 | t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); |
@@ -690,14 +690,8 @@ static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | |||
690 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | 690 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); |
691 | } | 691 | } |
692 | 692 | ||
693 | outbuf[0] = (r >> 24) & 0xff; | 693 | dst[0] = cpu_to_be32(r); |
694 | outbuf[1] = (r >> 16) & 0xff; | 694 | dst[1] = cpu_to_be32(l); |
695 | outbuf[2] = (r >> 8) & 0xff; | ||
696 | outbuf[3] = r & 0xff; | ||
697 | outbuf[4] = (l >> 24) & 0xff; | ||
698 | outbuf[5] = (l >> 16) & 0xff; | ||
699 | outbuf[6] = (l >> 8) & 0xff; | ||
700 | outbuf[7] = l & 0xff; | ||
701 | } | 695 | } |
702 | 696 | ||
703 | static void key_schedule(u32 * x, u32 * z, u32 * k) | 697 | static void key_schedule(u32 * x, u32 * z, u32 * k) |
@@ -782,7 +776,7 @@ cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags) | |||
782 | u32 x[4]; | 776 | u32 x[4]; |
783 | u32 z[4]; | 777 | u32 z[4]; |
784 | u32 k[16]; | 778 | u32 k[16]; |
785 | u8 p_key[16]; | 779 | __be32 p_key[4]; |
786 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 780 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; |
787 | 781 | ||
788 | if (key_len < 5 || key_len > 16) { | 782 | if (key_len < 5 || key_len > 16) { |
@@ -796,12 +790,10 @@ cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags) | |||
796 | memcpy(p_key, key, key_len); | 790 | memcpy(p_key, key, key_len); |
797 | 791 | ||
798 | 792 | ||
799 | x[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; | 793 | x[0] = be32_to_cpu(p_key[0]); |
800 | x[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; | 794 | x[1] = be32_to_cpu(p_key[1]); |
801 | x[2] = | 795 | x[2] = be32_to_cpu(p_key[2]); |
802 | p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; | 796 | x[3] = be32_to_cpu(p_key[3]); |
803 | x[3] = | ||
804 | p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; | ||
805 | 797 | ||
806 | key_schedule(x, z, k); | 798 | key_schedule(x, z, k); |
807 | for (i = 0; i < 16; i++) | 799 | for (i = 0; i < 16; i++) |
diff --git a/crypto/cast6.c b/crypto/cast6.c index 3eb081073423..d317fff6ea10 100644 --- a/crypto/cast6.c +++ b/crypto/cast6.c | |||
@@ -18,11 +18,13 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | 20 | ||
21 | #include <asm/byteorder.h> | ||
21 | #include <linux/init.h> | 22 | #include <linux/init.h> |
22 | #include <linux/crypto.h> | 23 | #include <linux/crypto.h> |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
25 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/types.h> | ||
26 | 28 | ||
27 | #define CAST6_BLOCK_SIZE 16 | 29 | #define CAST6_BLOCK_SIZE 16 |
28 | #define CAST6_MIN_KEY_SIZE 16 | 30 | #define CAST6_MIN_KEY_SIZE 16 |
@@ -384,7 +386,7 @@ cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags) | |||
384 | { | 386 | { |
385 | int i; | 387 | int i; |
386 | u32 key[8]; | 388 | u32 key[8]; |
387 | u8 p_key[32]; /* padded key */ | 389 | __be32 p_key[8]; /* padded key */ |
388 | struct cast6_ctx *c = (struct cast6_ctx *) ctx; | 390 | struct cast6_ctx *c = (struct cast6_ctx *) ctx; |
389 | 391 | ||
390 | if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { | 392 | if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { |
@@ -395,14 +397,14 @@ cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags) | |||
395 | memset (p_key, 0, 32); | 397 | memset (p_key, 0, 32); |
396 | memcpy (p_key, in_key, key_len); | 398 | memcpy (p_key, in_key, key_len); |
397 | 399 | ||
398 | key[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; /* A */ | 400 | key[0] = be32_to_cpu(p_key[0]); /* A */ |
399 | key[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; /* B */ | 401 | key[1] = be32_to_cpu(p_key[1]); /* B */ |
400 | key[2] = p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; /* C */ | 402 | key[2] = be32_to_cpu(p_key[2]); /* C */ |
401 | key[3] = p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; /* D */ | 403 | key[3] = be32_to_cpu(p_key[3]); /* D */ |
402 | key[4] = p_key[16] << 24 | p_key[17] << 16 | p_key[18] << 8 | p_key[19]; /* E */ | 404 | key[4] = be32_to_cpu(p_key[4]); /* E */ |
403 | key[5] = p_key[20] << 24 | p_key[21] << 16 | p_key[22] << 8 | p_key[23]; /* F */ | 405 | key[5] = be32_to_cpu(p_key[5]); /* F */ |
404 | key[6] = p_key[24] << 24 | p_key[25] << 16 | p_key[26] << 8 | p_key[27]; /* G */ | 406 | key[6] = be32_to_cpu(p_key[6]); /* G */ |
405 | key[7] = p_key[28] << 24 | p_key[29] << 16 | p_key[30] << 8 | p_key[31]; /* H */ | 407 | key[7] = be32_to_cpu(p_key[7]); /* H */ |
406 | 408 | ||
407 | 409 | ||
408 | 410 | ||
@@ -444,14 +446,16 @@ static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) { | |||
444 | 446 | ||
445 | static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | 447 | static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { |
446 | struct cast6_ctx * c = (struct cast6_ctx *)ctx; | 448 | struct cast6_ctx * c = (struct cast6_ctx *)ctx; |
449 | const __be32 *src = (const __be32 *)inbuf; | ||
450 | __be32 *dst = (__be32 *)outbuf; | ||
447 | u32 block[4]; | 451 | u32 block[4]; |
448 | u32 * Km; | 452 | u32 * Km; |
449 | u8 * Kr; | 453 | u8 * Kr; |
450 | 454 | ||
451 | block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; | 455 | block[0] = be32_to_cpu(src[0]); |
452 | block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; | 456 | block[1] = be32_to_cpu(src[1]); |
453 | block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; | 457 | block[2] = be32_to_cpu(src[2]); |
454 | block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; | 458 | block[3] = be32_to_cpu(src[3]); |
455 | 459 | ||
456 | Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); | 460 | Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); |
457 | Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); | 461 | Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); |
@@ -465,35 +469,25 @@ static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | |||
465 | Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); | 469 | Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); |
466 | Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); | 470 | Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); |
467 | Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); | 471 | Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); |
468 | 472 | ||
469 | outbuf[0] = (block[0] >> 24) & 0xff; | 473 | dst[0] = cpu_to_be32(block[0]); |
470 | outbuf[1] = (block[0] >> 16) & 0xff; | 474 | dst[1] = cpu_to_be32(block[1]); |
471 | outbuf[2] = (block[0] >> 8) & 0xff; | 475 | dst[2] = cpu_to_be32(block[2]); |
472 | outbuf[3] = block[0] & 0xff; | 476 | dst[3] = cpu_to_be32(block[3]); |
473 | outbuf[4] = (block[1] >> 24) & 0xff; | ||
474 | outbuf[5] = (block[1] >> 16) & 0xff; | ||
475 | outbuf[6] = (block[1] >> 8) & 0xff; | ||
476 | outbuf[7] = block[1] & 0xff; | ||
477 | outbuf[8] = (block[2] >> 24) & 0xff; | ||
478 | outbuf[9] = (block[2] >> 16) & 0xff; | ||
479 | outbuf[10] = (block[2] >> 8) & 0xff; | ||
480 | outbuf[11] = block[2] & 0xff; | ||
481 | outbuf[12] = (block[3] >> 24) & 0xff; | ||
482 | outbuf[13] = (block[3] >> 16) & 0xff; | ||
483 | outbuf[14] = (block[3] >> 8) & 0xff; | ||
484 | outbuf[15] = block[3] & 0xff; | ||
485 | } | 477 | } |
486 | 478 | ||
487 | static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | 479 | static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { |
488 | struct cast6_ctx * c = (struct cast6_ctx *)ctx; | 480 | struct cast6_ctx * c = (struct cast6_ctx *)ctx; |
481 | const __be32 *src = (const __be32 *)inbuf; | ||
482 | __be32 *dst = (__be32 *)outbuf; | ||
489 | u32 block[4]; | 483 | u32 block[4]; |
490 | u32 * Km; | 484 | u32 * Km; |
491 | u8 * Kr; | 485 | u8 * Kr; |
492 | 486 | ||
493 | block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; | 487 | block[0] = be32_to_cpu(src[0]); |
494 | block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; | 488 | block[1] = be32_to_cpu(src[1]); |
495 | block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; | 489 | block[2] = be32_to_cpu(src[2]); |
496 | block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; | 490 | block[3] = be32_to_cpu(src[3]); |
497 | 491 | ||
498 | Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); | 492 | Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); |
499 | Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); | 493 | Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); |
@@ -508,22 +502,10 @@ static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | |||
508 | Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); | 502 | Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); |
509 | Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); | 503 | Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); |
510 | 504 | ||
511 | outbuf[0] = (block[0] >> 24) & 0xff; | 505 | dst[0] = cpu_to_be32(block[0]); |
512 | outbuf[1] = (block[0] >> 16) & 0xff; | 506 | dst[1] = cpu_to_be32(block[1]); |
513 | outbuf[2] = (block[0] >> 8) & 0xff; | 507 | dst[2] = cpu_to_be32(block[2]); |
514 | outbuf[3] = block[0] & 0xff; | 508 | dst[3] = cpu_to_be32(block[3]); |
515 | outbuf[4] = (block[1] >> 24) & 0xff; | ||
516 | outbuf[5] = (block[1] >> 16) & 0xff; | ||
517 | outbuf[6] = (block[1] >> 8) & 0xff; | ||
518 | outbuf[7] = block[1] & 0xff; | ||
519 | outbuf[8] = (block[2] >> 24) & 0xff; | ||
520 | outbuf[9] = (block[2] >> 16) & 0xff; | ||
521 | outbuf[10] = (block[2] >> 8) & 0xff; | ||
522 | outbuf[11] = block[2] & 0xff; | ||
523 | outbuf[12] = (block[3] >> 24) & 0xff; | ||
524 | outbuf[13] = (block[3] >> 16) & 0xff; | ||
525 | outbuf[14] = (block[3] >> 8) & 0xff; | ||
526 | outbuf[15] = block[3] & 0xff; | ||
527 | } | 509 | } |
528 | 510 | ||
529 | static struct crypto_alg alg = { | 511 | static struct crypto_alg alg = { |
diff --git a/crypto/crc32c.c b/crypto/crc32c.c index 256956cd9377..953362423a5c 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
18 | #include <linux/crc32c.h> | 18 | #include <linux/crc32c.h> |
19 | #include <linux/types.h> | ||
19 | #include <asm/byteorder.h> | 20 | #include <asm/byteorder.h> |
20 | 21 | ||
21 | #define CHKSUM_BLOCK_SIZE 32 | 22 | #define CHKSUM_BLOCK_SIZE 32 |
diff --git a/crypto/des.c b/crypto/des.c index a3c863dddded..dae42981012c 100644 --- a/crypto/des.c +++ b/crypto/des.c | |||
@@ -12,11 +12,13 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <asm/byteorder.h> | ||
15 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
19 | #include <linux/crypto.h> | 20 | #include <linux/crypto.h> |
21 | #include <linux/types.h> | ||
20 | 22 | ||
21 | #define DES_KEY_SIZE 8 | 23 | #define DES_KEY_SIZE 8 |
22 | #define DES_EXPKEY_WORDS 32 | 24 | #define DES_EXPKEY_WORDS 32 |
diff --git a/crypto/khazad.c b/crypto/khazad.c index 738cb0dd1e7c..6809210362c1 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c | |||
@@ -22,8 +22,10 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <asm/byteorder.h> | ||
25 | #include <asm/scatterlist.h> | 26 | #include <asm/scatterlist.h> |
26 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> |
28 | #include <linux/types.h> | ||
27 | 29 | ||
28 | #define KHAZAD_KEY_SIZE 16 | 30 | #define KHAZAD_KEY_SIZE 16 |
29 | #define KHAZAD_BLOCK_SIZE 8 | 31 | #define KHAZAD_BLOCK_SIZE 8 |
@@ -755,8 +757,8 @@ static const u64 c[KHAZAD_ROUNDS + 1] = { | |||
755 | static int khazad_setkey(void *ctx_arg, const u8 *in_key, | 757 | static int khazad_setkey(void *ctx_arg, const u8 *in_key, |
756 | unsigned int key_len, u32 *flags) | 758 | unsigned int key_len, u32 *flags) |
757 | { | 759 | { |
758 | |||
759 | struct khazad_ctx *ctx = ctx_arg; | 760 | struct khazad_ctx *ctx = ctx_arg; |
761 | const __be64 *key = (const __be64 *)in_key; | ||
760 | int r; | 762 | int r; |
761 | const u64 *S = T7; | 763 | const u64 *S = T7; |
762 | u64 K2, K1; | 764 | u64 K2, K1; |
@@ -767,22 +769,8 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key, | |||
767 | return -EINVAL; | 769 | return -EINVAL; |
768 | } | 770 | } |
769 | 771 | ||
770 | K2 = ((u64)in_key[ 0] << 56) ^ | 772 | K2 = be64_to_cpu(key[0]); |
771 | ((u64)in_key[ 1] << 48) ^ | 773 | K1 = be64_to_cpu(key[1]); |
772 | ((u64)in_key[ 2] << 40) ^ | ||
773 | ((u64)in_key[ 3] << 32) ^ | ||
774 | ((u64)in_key[ 4] << 24) ^ | ||
775 | ((u64)in_key[ 5] << 16) ^ | ||
776 | ((u64)in_key[ 6] << 8) ^ | ||
777 | ((u64)in_key[ 7] ); | ||
778 | K1 = ((u64)in_key[ 8] << 56) ^ | ||
779 | ((u64)in_key[ 9] << 48) ^ | ||
780 | ((u64)in_key[10] << 40) ^ | ||
781 | ((u64)in_key[11] << 32) ^ | ||
782 | ((u64)in_key[12] << 24) ^ | ||
783 | ((u64)in_key[13] << 16) ^ | ||
784 | ((u64)in_key[14] << 8) ^ | ||
785 | ((u64)in_key[15] ); | ||
786 | 774 | ||
787 | /* setup the encrypt key */ | 775 | /* setup the encrypt key */ |
788 | for (r = 0; r <= KHAZAD_ROUNDS; r++) { | 776 | for (r = 0; r <= KHAZAD_ROUNDS; r++) { |
@@ -820,19 +808,12 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key, | |||
820 | static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], | 808 | static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], |
821 | u8 *ciphertext, const u8 *plaintext) | 809 | u8 *ciphertext, const u8 *plaintext) |
822 | { | 810 | { |
823 | 811 | const __be64 *src = (const __be64 *)plaintext; | |
812 | __be64 *dst = (__be64 *)ciphertext; | ||
824 | int r; | 813 | int r; |
825 | u64 state; | 814 | u64 state; |
826 | 815 | ||
827 | state = ((u64)plaintext[0] << 56) ^ | 816 | state = be64_to_cpu(*src) ^ roundKey[0]; |
828 | ((u64)plaintext[1] << 48) ^ | ||
829 | ((u64)plaintext[2] << 40) ^ | ||
830 | ((u64)plaintext[3] << 32) ^ | ||
831 | ((u64)plaintext[4] << 24) ^ | ||
832 | ((u64)plaintext[5] << 16) ^ | ||
833 | ((u64)plaintext[6] << 8) ^ | ||
834 | ((u64)plaintext[7] ) ^ | ||
835 | roundKey[0]; | ||
836 | 817 | ||
837 | for (r = 1; r < KHAZAD_ROUNDS; r++) { | 818 | for (r = 1; r < KHAZAD_ROUNDS; r++) { |
838 | state = T0[(int)(state >> 56) ] ^ | 819 | state = T0[(int)(state >> 56) ] ^ |
@@ -856,15 +837,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], | |||
856 | (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ | 837 | (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ |
857 | roundKey[KHAZAD_ROUNDS]; | 838 | roundKey[KHAZAD_ROUNDS]; |
858 | 839 | ||
859 | ciphertext[0] = (u8)(state >> 56); | 840 | *dst = cpu_to_be64(state); |
860 | ciphertext[1] = (u8)(state >> 48); | ||
861 | ciphertext[2] = (u8)(state >> 40); | ||
862 | ciphertext[3] = (u8)(state >> 32); | ||
863 | ciphertext[4] = (u8)(state >> 24); | ||
864 | ciphertext[5] = (u8)(state >> 16); | ||
865 | ciphertext[6] = (u8)(state >> 8); | ||
866 | ciphertext[7] = (u8)(state ); | ||
867 | |||
868 | } | 841 | } |
869 | 842 | ||
870 | static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 843 | static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) |
diff --git a/crypto/md4.c b/crypto/md4.c index bef6a9e5ac9b..a2d6df5c0f8c 100644 --- a/crypto/md4.c +++ b/crypto/md4.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/crypto.h> | 24 | #include <linux/crypto.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/types.h> | ||
27 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> |
28 | 29 | ||
29 | #define MD4_DIGEST_SIZE 16 | 30 | #define MD4_DIGEST_SIZE 16 |
diff --git a/crypto/md5.c b/crypto/md5.c index 1ed45f9c263e..7f041aef5da2 100644 --- a/crypto/md5.c +++ b/crypto/md5.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <linux/crypto.h> | 21 | #include <linux/crypto.h> |
22 | #include <linux/types.h> | ||
22 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> |
23 | 24 | ||
24 | #define MD5_DIGEST_SIZE 16 | 25 | #define MD5_DIGEST_SIZE 16 |
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index a470bcb3693e..4f6ab23e14ad 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c | |||
@@ -10,10 +10,12 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <asm/byteorder.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/string.h> | 16 | #include <linux/string.h> |
16 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
18 | #include <linux/types.h> | ||
17 | 19 | ||
18 | 20 | ||
19 | struct michael_mic_ctx { | 21 | struct michael_mic_ctx { |
@@ -43,21 +45,6 @@ do { \ | |||
43 | } while (0) | 45 | } while (0) |
44 | 46 | ||
45 | 47 | ||
46 | static inline u32 get_le32(const u8 *p) | ||
47 | { | ||
48 | return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); | ||
49 | } | ||
50 | |||
51 | |||
52 | static inline void put_le32(u8 *p, u32 v) | ||
53 | { | ||
54 | p[0] = v; | ||
55 | p[1] = v >> 8; | ||
56 | p[2] = v >> 16; | ||
57 | p[3] = v >> 24; | ||
58 | } | ||
59 | |||
60 | |||
61 | static void michael_init(void *ctx) | 48 | static void michael_init(void *ctx) |
62 | { | 49 | { |
63 | struct michael_mic_ctx *mctx = ctx; | 50 | struct michael_mic_ctx *mctx = ctx; |
@@ -68,6 +55,7 @@ static void michael_init(void *ctx) | |||
68 | static void michael_update(void *ctx, const u8 *data, unsigned int len) | 55 | static void michael_update(void *ctx, const u8 *data, unsigned int len) |
69 | { | 56 | { |
70 | struct michael_mic_ctx *mctx = ctx; | 57 | struct michael_mic_ctx *mctx = ctx; |
58 | const __le32 *src; | ||
71 | 59 | ||
72 | if (mctx->pending_len) { | 60 | if (mctx->pending_len) { |
73 | int flen = 4 - mctx->pending_len; | 61 | int flen = 4 - mctx->pending_len; |
@@ -81,21 +69,23 @@ static void michael_update(void *ctx, const u8 *data, unsigned int len) | |||
81 | if (mctx->pending_len < 4) | 69 | if (mctx->pending_len < 4) |
82 | return; | 70 | return; |
83 | 71 | ||
84 | mctx->l ^= get_le32(mctx->pending); | 72 | src = (const __le32 *)mctx->pending; |
73 | mctx->l ^= le32_to_cpup(src); | ||
85 | michael_block(mctx->l, mctx->r); | 74 | michael_block(mctx->l, mctx->r); |
86 | mctx->pending_len = 0; | 75 | mctx->pending_len = 0; |
87 | } | 76 | } |
88 | 77 | ||
78 | src = (const __le32 *)data; | ||
79 | |||
89 | while (len >= 4) { | 80 | while (len >= 4) { |
90 | mctx->l ^= get_le32(data); | 81 | mctx->l ^= le32_to_cpup(src++); |
91 | michael_block(mctx->l, mctx->r); | 82 | michael_block(mctx->l, mctx->r); |
92 | data += 4; | ||
93 | len -= 4; | 83 | len -= 4; |
94 | } | 84 | } |
95 | 85 | ||
96 | if (len > 0) { | 86 | if (len > 0) { |
97 | mctx->pending_len = len; | 87 | mctx->pending_len = len; |
98 | memcpy(mctx->pending, data, len); | 88 | memcpy(mctx->pending, src, len); |
99 | } | 89 | } |
100 | } | 90 | } |
101 | 91 | ||
@@ -104,6 +94,7 @@ static void michael_final(void *ctx, u8 *out) | |||
104 | { | 94 | { |
105 | struct michael_mic_ctx *mctx = ctx; | 95 | struct michael_mic_ctx *mctx = ctx; |
106 | u8 *data = mctx->pending; | 96 | u8 *data = mctx->pending; |
97 | __le32 *dst = (__le32 *)out; | ||
107 | 98 | ||
108 | /* Last block and padding (0x5a, 4..7 x 0) */ | 99 | /* Last block and padding (0x5a, 4..7 x 0) */ |
109 | switch (mctx->pending_len) { | 100 | switch (mctx->pending_len) { |
@@ -125,8 +116,8 @@ static void michael_final(void *ctx, u8 *out) | |||
125 | /* l ^= 0; */ | 116 | /* l ^= 0; */ |
126 | michael_block(mctx->l, mctx->r); | 117 | michael_block(mctx->l, mctx->r); |
127 | 118 | ||
128 | put_le32(out, mctx->l); | 119 | dst[0] = cpu_to_le32(mctx->l); |
129 | put_le32(out + 4, mctx->r); | 120 | dst[1] = cpu_to_le32(mctx->r); |
130 | } | 121 | } |
131 | 122 | ||
132 | 123 | ||
@@ -134,13 +125,16 @@ static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen, | |||
134 | u32 *flags) | 125 | u32 *flags) |
135 | { | 126 | { |
136 | struct michael_mic_ctx *mctx = ctx; | 127 | struct michael_mic_ctx *mctx = ctx; |
128 | const __le32 *data = (const __le32 *)key; | ||
129 | |||
137 | if (keylen != 8) { | 130 | if (keylen != 8) { |
138 | if (flags) | 131 | if (flags) |
139 | *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; | 132 | *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; |
140 | return -EINVAL; | 133 | return -EINVAL; |
141 | } | 134 | } |
142 | mctx->l = get_le32(key); | 135 | |
143 | mctx->r = get_le32(key + 4); | 136 | mctx->l = le32_to_cpu(data[0]); |
137 | mctx->r = le32_to_cpu(data[1]); | ||
144 | return 0; | 138 | return 0; |
145 | } | 139 | } |
146 | 140 | ||
diff --git a/crypto/serpent.c b/crypto/serpent.c index 3cf2c5067eea..a950ff85f632 100644 --- a/crypto/serpent.c +++ b/crypto/serpent.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> |
22 | #include <linux/crypto.h> | 22 | #include <linux/crypto.h> |
23 | #include <linux/types.h> | ||
23 | 24 | ||
24 | /* Key is padded to the maximum of 256 bits before round key generation. | 25 | /* Key is padded to the maximum of 256 bits before round key generation. |
25 | * Any key length <= 256 bits (32 bytes) is allowed by the algorithm. | 26 | * Any key length <= 256 bits (32 bytes) is allowed by the algorithm. |
diff --git a/crypto/sha1.c b/crypto/sha1.c index 4016f3b8ce9b..c686e7826174 100644 --- a/crypto/sha1.c +++ b/crypto/sha1.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/crypto.h> | 22 | #include <linux/crypto.h> |
23 | #include <linux/cryptohash.h> | 23 | #include <linux/cryptohash.h> |
24 | #include <linux/types.h> | ||
24 | #include <asm/scatterlist.h> | 25 | #include <asm/scatterlist.h> |
25 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
26 | 27 | ||
@@ -72,20 +73,12 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len) | |||
72 | static void sha1_final(void* ctx, u8 *out) | 73 | static void sha1_final(void* ctx, u8 *out) |
73 | { | 74 | { |
74 | struct sha1_ctx *sctx = ctx; | 75 | struct sha1_ctx *sctx = ctx; |
75 | u32 i, j, index, padlen; | 76 | __be32 *dst = (__be32 *)out; |
76 | u64 t; | 77 | u32 i, index, padlen; |
77 | u8 bits[8] = { 0, }; | 78 | __be64 bits; |
78 | static const u8 padding[64] = { 0x80, }; | 79 | static const u8 padding[64] = { 0x80, }; |
79 | 80 | ||
80 | t = sctx->count; | 81 | bits = cpu_to_be64(sctx->count); |
81 | bits[7] = 0xff & t; t>>=8; | ||
82 | bits[6] = 0xff & t; t>>=8; | ||
83 | bits[5] = 0xff & t; t>>=8; | ||
84 | bits[4] = 0xff & t; t>>=8; | ||
85 | bits[3] = 0xff & t; t>>=8; | ||
86 | bits[2] = 0xff & t; t>>=8; | ||
87 | bits[1] = 0xff & t; t>>=8; | ||
88 | bits[0] = 0xff & t; | ||
89 | 82 | ||
90 | /* Pad out to 56 mod 64 */ | 83 | /* Pad out to 56 mod 64 */ |
91 | index = (sctx->count >> 3) & 0x3f; | 84 | index = (sctx->count >> 3) & 0x3f; |
@@ -93,16 +86,11 @@ static void sha1_final(void* ctx, u8 *out) | |||
93 | sha1_update(sctx, padding, padlen); | 86 | sha1_update(sctx, padding, padlen); |
94 | 87 | ||
95 | /* Append length */ | 88 | /* Append length */ |
96 | sha1_update(sctx, bits, sizeof bits); | 89 | sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); |
97 | 90 | ||
98 | /* Store state in digest */ | 91 | /* Store state in digest */ |
99 | for (i = j = 0; i < 5; i++, j += 4) { | 92 | for (i = 0; i < 5; i++) |
100 | u32 t2 = sctx->state[i]; | 93 | dst[i] = cpu_to_be32(sctx->state[i]); |
101 | out[j+3] = t2 & 0xff; t2>>=8; | ||
102 | out[j+2] = t2 & 0xff; t2>>=8; | ||
103 | out[j+1] = t2 & 0xff; t2>>=8; | ||
104 | out[j ] = t2 & 0xff; | ||
105 | } | ||
106 | 94 | ||
107 | /* Wipe context */ | 95 | /* Wipe context */ |
108 | memset(sctx, 0, sizeof *sctx); | 96 | memset(sctx, 0, sizeof *sctx); |
diff --git a/crypto/sha256.c b/crypto/sha256.c index c78da50a9b7a..9d5ef674d6a9 100644 --- a/crypto/sha256.c +++ b/crypto/sha256.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/crypto.h> | 22 | #include <linux/crypto.h> |
23 | #include <linux/types.h> | ||
23 | #include <asm/scatterlist.h> | 24 | #include <asm/scatterlist.h> |
24 | #include <asm/byteorder.h> | 25 | #include <asm/byteorder.h> |
25 | 26 | ||
@@ -279,22 +280,15 @@ static void sha256_update(void *ctx, const u8 *data, unsigned int len) | |||
279 | static void sha256_final(void* ctx, u8 *out) | 280 | static void sha256_final(void* ctx, u8 *out) |
280 | { | 281 | { |
281 | struct sha256_ctx *sctx = ctx; | 282 | struct sha256_ctx *sctx = ctx; |
282 | u8 bits[8]; | 283 | __be32 *dst = (__be32 *)out; |
283 | unsigned int index, pad_len, t; | 284 | __be32 bits[2]; |
284 | int i, j; | 285 | unsigned int index, pad_len; |
286 | int i; | ||
285 | static const u8 padding[64] = { 0x80, }; | 287 | static const u8 padding[64] = { 0x80, }; |
286 | 288 | ||
287 | /* Save number of bits */ | 289 | /* Save number of bits */ |
288 | t = sctx->count[0]; | 290 | bits[1] = cpu_to_be32(sctx->count[0]); |
289 | bits[7] = t; t >>= 8; | 291 | bits[0] = cpu_to_be32(sctx->count[1]); |
290 | bits[6] = t; t >>= 8; | ||
291 | bits[5] = t; t >>= 8; | ||
292 | bits[4] = t; | ||
293 | t = sctx->count[1]; | ||
294 | bits[3] = t; t >>= 8; | ||
295 | bits[2] = t; t >>= 8; | ||
296 | bits[1] = t; t >>= 8; | ||
297 | bits[0] = t; | ||
298 | 292 | ||
299 | /* Pad out to 56 mod 64. */ | 293 | /* Pad out to 56 mod 64. */ |
300 | index = (sctx->count[0] >> 3) & 0x3f; | 294 | index = (sctx->count[0] >> 3) & 0x3f; |
@@ -302,16 +296,11 @@ static void sha256_final(void* ctx, u8 *out) | |||
302 | sha256_update(sctx, padding, pad_len); | 296 | sha256_update(sctx, padding, pad_len); |
303 | 297 | ||
304 | /* Append length (before padding) */ | 298 | /* Append length (before padding) */ |
305 | sha256_update(sctx, bits, 8); | 299 | sha256_update(sctx, (const u8 *)bits, sizeof(bits)); |
306 | 300 | ||
307 | /* Store state in digest */ | 301 | /* Store state in digest */ |
308 | for (i = j = 0; i < 8; i++, j += 4) { | 302 | for (i = 0; i < 8; i++) |
309 | t = sctx->state[i]; | 303 | dst[i] = cpu_to_be32(sctx->state[i]); |
310 | out[j+3] = t; t >>= 8; | ||
311 | out[j+2] = t; t >>= 8; | ||
312 | out[j+1] = t; t >>= 8; | ||
313 | out[j ] = t; | ||
314 | } | ||
315 | 304 | ||
316 | /* Zeroize sensitive information. */ | 305 | /* Zeroize sensitive information. */ |
317 | memset(sctx, 0, sizeof(*sctx)); | 306 | memset(sctx, 0, sizeof(*sctx)); |
diff --git a/crypto/sha512.c b/crypto/sha512.c index c663438322e9..3e6e9392310c 100644 --- a/crypto/sha512.c +++ b/crypto/sha512.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
20 | #include <linux/types.h> | ||
20 | 21 | ||
21 | #include <asm/scatterlist.h> | 22 | #include <asm/scatterlist.h> |
22 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> |
@@ -235,39 +236,17 @@ static void | |||
235 | sha512_final(void *ctx, u8 *hash) | 236 | sha512_final(void *ctx, u8 *hash) |
236 | { | 237 | { |
237 | struct sha512_ctx *sctx = ctx; | 238 | struct sha512_ctx *sctx = ctx; |
238 | |||
239 | static u8 padding[128] = { 0x80, }; | 239 | static u8 padding[128] = { 0x80, }; |
240 | 240 | __be64 *dst = (__be64 *)hash; | |
241 | u32 t; | 241 | __be32 bits[4]; |
242 | u64 t2; | ||
243 | u8 bits[128]; | ||
244 | unsigned int index, pad_len; | 242 | unsigned int index, pad_len; |
245 | int i, j; | 243 | int i; |
246 | |||
247 | index = pad_len = t = i = j = 0; | ||
248 | t2 = 0; | ||
249 | 244 | ||
250 | /* Save number of bits */ | 245 | /* Save number of bits */ |
251 | t = sctx->count[0]; | 246 | bits[3] = cpu_to_be32(sctx->count[0]); |
252 | bits[15] = t; t>>=8; | 247 | bits[2] = cpu_to_be32(sctx->count[1]); |
253 | bits[14] = t; t>>=8; | 248 | bits[1] = cpu_to_be32(sctx->count[2]); |
254 | bits[13] = t; t>>=8; | 249 | bits[0] = cpu_to_be32(sctx->count[3]); |
255 | bits[12] = t; | ||
256 | t = sctx->count[1]; | ||
257 | bits[11] = t; t>>=8; | ||
258 | bits[10] = t; t>>=8; | ||
259 | bits[9 ] = t; t>>=8; | ||
260 | bits[8 ] = t; | ||
261 | t = sctx->count[2]; | ||
262 | bits[7 ] = t; t>>=8; | ||
263 | bits[6 ] = t; t>>=8; | ||
264 | bits[5 ] = t; t>>=8; | ||
265 | bits[4 ] = t; | ||
266 | t = sctx->count[3]; | ||
267 | bits[3 ] = t; t>>=8; | ||
268 | bits[2 ] = t; t>>=8; | ||
269 | bits[1 ] = t; t>>=8; | ||
270 | bits[0 ] = t; | ||
271 | 250 | ||
272 | /* Pad out to 112 mod 128. */ | 251 | /* Pad out to 112 mod 128. */ |
273 | index = (sctx->count[0] >> 3) & 0x7f; | 252 | index = (sctx->count[0] >> 3) & 0x7f; |
@@ -275,21 +254,12 @@ sha512_final(void *ctx, u8 *hash) | |||
275 | sha512_update(sctx, padding, pad_len); | 254 | sha512_update(sctx, padding, pad_len); |
276 | 255 | ||
277 | /* Append length (before padding) */ | 256 | /* Append length (before padding) */ |
278 | sha512_update(sctx, bits, 16); | 257 | sha512_update(sctx, (const u8 *)bits, sizeof(bits)); |
279 | 258 | ||
280 | /* Store state in digest */ | 259 | /* Store state in digest */ |
281 | for (i = j = 0; i < 8; i++, j += 8) { | 260 | for (i = 0; i < 8; i++) |
282 | t2 = sctx->state[i]; | 261 | dst[i] = cpu_to_be64(sctx->state[i]); |
283 | hash[j+7] = (char)t2 & 0xff; t2>>=8; | 262 | |
284 | hash[j+6] = (char)t2 & 0xff; t2>>=8; | ||
285 | hash[j+5] = (char)t2 & 0xff; t2>>=8; | ||
286 | hash[j+4] = (char)t2 & 0xff; t2>>=8; | ||
287 | hash[j+3] = (char)t2 & 0xff; t2>>=8; | ||
288 | hash[j+2] = (char)t2 & 0xff; t2>>=8; | ||
289 | hash[j+1] = (char)t2 & 0xff; t2>>=8; | ||
290 | hash[j ] = (char)t2 & 0xff; | ||
291 | } | ||
292 | |||
293 | /* Zeroize sensitive information. */ | 263 | /* Zeroize sensitive information. */ |
294 | memset(sctx, 0, sizeof(struct sha512_ctx)); | 264 | memset(sctx, 0, sizeof(struct sha512_ctx)); |
295 | } | 265 | } |
diff --git a/crypto/tea.c b/crypto/tea.c index 5924efdd3a16..e0077c72ec2a 100644 --- a/crypto/tea.c +++ b/crypto/tea.c | |||
@@ -22,8 +22,10 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <asm/byteorder.h> | ||
25 | #include <asm/scatterlist.h> | 26 | #include <asm/scatterlist.h> |
26 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> |
28 | #include <linux/types.h> | ||
27 | 29 | ||
28 | #define TEA_KEY_SIZE 16 | 30 | #define TEA_KEY_SIZE 16 |
29 | #define TEA_BLOCK_SIZE 8 | 31 | #define TEA_BLOCK_SIZE 8 |
@@ -35,9 +37,6 @@ | |||
35 | #define XTEA_ROUNDS 32 | 37 | #define XTEA_ROUNDS 32 |
36 | #define XTEA_DELTA 0x9e3779b9 | 38 | #define XTEA_DELTA 0x9e3779b9 |
37 | 39 | ||
38 | #define u32_in(x) le32_to_cpu(*(const __le32 *)(x)) | ||
39 | #define u32_out(to, from) (*(__le32 *)(to) = cpu_to_le32(from)) | ||
40 | |||
41 | struct tea_ctx { | 40 | struct tea_ctx { |
42 | u32 KEY[4]; | 41 | u32 KEY[4]; |
43 | }; | 42 | }; |
@@ -49,8 +48,8 @@ struct xtea_ctx { | |||
49 | static int tea_setkey(void *ctx_arg, const u8 *in_key, | 48 | static int tea_setkey(void *ctx_arg, const u8 *in_key, |
50 | unsigned int key_len, u32 *flags) | 49 | unsigned int key_len, u32 *flags) |
51 | { | 50 | { |
52 | |||
53 | struct tea_ctx *ctx = ctx_arg; | 51 | struct tea_ctx *ctx = ctx_arg; |
52 | const __le32 *key = (const __le32 *)in_key; | ||
54 | 53 | ||
55 | if (key_len != 16) | 54 | if (key_len != 16) |
56 | { | 55 | { |
@@ -58,10 +57,10 @@ static int tea_setkey(void *ctx_arg, const u8 *in_key, | |||
58 | return -EINVAL; | 57 | return -EINVAL; |
59 | } | 58 | } |
60 | 59 | ||
61 | ctx->KEY[0] = u32_in (in_key); | 60 | ctx->KEY[0] = le32_to_cpu(key[0]); |
62 | ctx->KEY[1] = u32_in (in_key + 4); | 61 | ctx->KEY[1] = le32_to_cpu(key[1]); |
63 | ctx->KEY[2] = u32_in (in_key + 8); | 62 | ctx->KEY[2] = le32_to_cpu(key[2]); |
64 | ctx->KEY[3] = u32_in (in_key + 12); | 63 | ctx->KEY[3] = le32_to_cpu(key[3]); |
65 | 64 | ||
66 | return 0; | 65 | return 0; |
67 | 66 | ||
@@ -73,9 +72,11 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
73 | u32 k0, k1, k2, k3; | 72 | u32 k0, k1, k2, k3; |
74 | 73 | ||
75 | struct tea_ctx *ctx = ctx_arg; | 74 | struct tea_ctx *ctx = ctx_arg; |
75 | const __le32 *in = (const __le32 *)src; | ||
76 | __le32 *out = (__le32 *)dst; | ||
76 | 77 | ||
77 | y = u32_in (src); | 78 | y = le32_to_cpu(in[0]); |
78 | z = u32_in (src + 4); | 79 | z = le32_to_cpu(in[1]); |
79 | 80 | ||
80 | k0 = ctx->KEY[0]; | 81 | k0 = ctx->KEY[0]; |
81 | k1 = ctx->KEY[1]; | 82 | k1 = ctx->KEY[1]; |
@@ -90,19 +91,20 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
90 | z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); | 91 | z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); |
91 | } | 92 | } |
92 | 93 | ||
93 | u32_out (dst, y); | 94 | out[0] = cpu_to_le32(y); |
94 | u32_out (dst + 4, z); | 95 | out[1] = cpu_to_le32(z); |
95 | } | 96 | } |
96 | 97 | ||
97 | static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 98 | static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) |
98 | { | 99 | { |
99 | u32 y, z, n, sum; | 100 | u32 y, z, n, sum; |
100 | u32 k0, k1, k2, k3; | 101 | u32 k0, k1, k2, k3; |
101 | |||
102 | struct tea_ctx *ctx = ctx_arg; | 102 | struct tea_ctx *ctx = ctx_arg; |
103 | const __le32 *in = (const __le32 *)src; | ||
104 | __le32 *out = (__le32 *)dst; | ||
103 | 105 | ||
104 | y = u32_in (src); | 106 | y = le32_to_cpu(in[0]); |
105 | z = u32_in (src + 4); | 107 | z = le32_to_cpu(in[1]); |
106 | 108 | ||
107 | k0 = ctx->KEY[0]; | 109 | k0 = ctx->KEY[0]; |
108 | k1 = ctx->KEY[1]; | 110 | k1 = ctx->KEY[1]; |
@@ -119,16 +121,15 @@ static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
119 | sum -= TEA_DELTA; | 121 | sum -= TEA_DELTA; |
120 | } | 122 | } |
121 | 123 | ||
122 | u32_out (dst, y); | 124 | out[0] = cpu_to_le32(y); |
123 | u32_out (dst + 4, z); | 125 | out[1] = cpu_to_le32(z); |
124 | |||
125 | } | 126 | } |
126 | 127 | ||
127 | static int xtea_setkey(void *ctx_arg, const u8 *in_key, | 128 | static int xtea_setkey(void *ctx_arg, const u8 *in_key, |
128 | unsigned int key_len, u32 *flags) | 129 | unsigned int key_len, u32 *flags) |
129 | { | 130 | { |
130 | |||
131 | struct xtea_ctx *ctx = ctx_arg; | 131 | struct xtea_ctx *ctx = ctx_arg; |
132 | const __le32 *key = (const __le32 *)in_key; | ||
132 | 133 | ||
133 | if (key_len != 16) | 134 | if (key_len != 16) |
134 | { | 135 | { |
@@ -136,10 +137,10 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key, | |||
136 | return -EINVAL; | 137 | return -EINVAL; |
137 | } | 138 | } |
138 | 139 | ||
139 | ctx->KEY[0] = u32_in (in_key); | 140 | ctx->KEY[0] = le32_to_cpu(key[0]); |
140 | ctx->KEY[1] = u32_in (in_key + 4); | 141 | ctx->KEY[1] = le32_to_cpu(key[1]); |
141 | ctx->KEY[2] = u32_in (in_key + 8); | 142 | ctx->KEY[2] = le32_to_cpu(key[2]); |
142 | ctx->KEY[3] = u32_in (in_key + 12); | 143 | ctx->KEY[3] = le32_to_cpu(key[3]); |
143 | 144 | ||
144 | return 0; | 145 | return 0; |
145 | 146 | ||
@@ -147,14 +148,15 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key, | |||
147 | 148 | ||
148 | static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 149 | static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) |
149 | { | 150 | { |
150 | |||
151 | u32 y, z, sum = 0; | 151 | u32 y, z, sum = 0; |
152 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; | 152 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; |
153 | 153 | ||
154 | struct xtea_ctx *ctx = ctx_arg; | 154 | struct xtea_ctx *ctx = ctx_arg; |
155 | const __le32 *in = (const __le32 *)src; | ||
156 | __le32 *out = (__le32 *)dst; | ||
155 | 157 | ||
156 | y = u32_in (src); | 158 | y = le32_to_cpu(in[0]); |
157 | z = u32_in (src + 4); | 159 | z = le32_to_cpu(in[1]); |
158 | 160 | ||
159 | while (sum != limit) { | 161 | while (sum != limit) { |
160 | y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); | 162 | y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); |
@@ -162,19 +164,19 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
162 | z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); | 164 | z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); |
163 | } | 165 | } |
164 | 166 | ||
165 | u32_out (dst, y); | 167 | out[0] = cpu_to_le32(y); |
166 | u32_out (dst + 4, z); | 168 | out[1] = cpu_to_le32(z); |
167 | |||
168 | } | 169 | } |
169 | 170 | ||
170 | static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 171 | static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) |
171 | { | 172 | { |
172 | |||
173 | u32 y, z, sum; | 173 | u32 y, z, sum; |
174 | struct tea_ctx *ctx = ctx_arg; | 174 | struct tea_ctx *ctx = ctx_arg; |
175 | const __le32 *in = (const __le32 *)src; | ||
176 | __le32 *out = (__le32 *)dst; | ||
175 | 177 | ||
176 | y = u32_in (src); | 178 | y = le32_to_cpu(in[0]); |
177 | z = u32_in (src + 4); | 179 | z = le32_to_cpu(in[1]); |
178 | 180 | ||
179 | sum = XTEA_DELTA * XTEA_ROUNDS; | 181 | sum = XTEA_DELTA * XTEA_ROUNDS; |
180 | 182 | ||
@@ -184,22 +186,22 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
184 | y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); | 186 | y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); |
185 | } | 187 | } |
186 | 188 | ||
187 | u32_out (dst, y); | 189 | out[0] = cpu_to_le32(y); |
188 | u32_out (dst + 4, z); | 190 | out[1] = cpu_to_le32(z); |
189 | |||
190 | } | 191 | } |
191 | 192 | ||
192 | 193 | ||
193 | static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 194 | static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) |
194 | { | 195 | { |
195 | |||
196 | u32 y, z, sum = 0; | 196 | u32 y, z, sum = 0; |
197 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; | 197 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; |
198 | 198 | ||
199 | struct xtea_ctx *ctx = ctx_arg; | 199 | struct xtea_ctx *ctx = ctx_arg; |
200 | const __le32 *in = (const __le32 *)src; | ||
201 | __le32 *out = (__le32 *)dst; | ||
200 | 202 | ||
201 | y = u32_in (src); | 203 | y = le32_to_cpu(in[0]); |
202 | z = u32_in (src + 4); | 204 | z = le32_to_cpu(in[1]); |
203 | 205 | ||
204 | while (sum != limit) { | 206 | while (sum != limit) { |
205 | y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; | 207 | y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; |
@@ -207,19 +209,19 @@ static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
207 | z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; | 209 | z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; |
208 | } | 210 | } |
209 | 211 | ||
210 | u32_out (dst, y); | 212 | out[0] = cpu_to_le32(y); |
211 | u32_out (dst + 4, z); | 213 | out[1] = cpu_to_le32(z); |
212 | |||
213 | } | 214 | } |
214 | 215 | ||
215 | static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 216 | static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) |
216 | { | 217 | { |
217 | |||
218 | u32 y, z, sum; | 218 | u32 y, z, sum; |
219 | struct tea_ctx *ctx = ctx_arg; | 219 | struct tea_ctx *ctx = ctx_arg; |
220 | const __le32 *in = (const __le32 *)src; | ||
221 | __le32 *out = (__le32 *)dst; | ||
220 | 222 | ||
221 | y = u32_in (src); | 223 | y = le32_to_cpu(in[0]); |
222 | z = u32_in (src + 4); | 224 | z = le32_to_cpu(in[1]); |
223 | 225 | ||
224 | sum = XTEA_DELTA * XTEA_ROUNDS; | 226 | sum = XTEA_DELTA * XTEA_ROUNDS; |
225 | 227 | ||
@@ -229,9 +231,8 @@ static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
229 | y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; | 231 | y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; |
230 | } | 232 | } |
231 | 233 | ||
232 | u32_out (dst, y); | 234 | out[0] = cpu_to_le32(y); |
233 | u32_out (dst + 4, z); | 235 | out[1] = cpu_to_le32(z); |
234 | |||
235 | } | 236 | } |
236 | 237 | ||
237 | static struct crypto_alg tea_alg = { | 238 | static struct crypto_alg tea_alg = { |
diff --git a/crypto/tgr192.c b/crypto/tgr192.c index f0a45cf716d0..2d8e44f6fbe9 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c | |||
@@ -24,8 +24,10 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <asm/byteorder.h> | ||
27 | #include <asm/scatterlist.h> | 28 | #include <asm/scatterlist.h> |
28 | #include <linux/crypto.h> | 29 | #include <linux/crypto.h> |
30 | #include <linux/types.h> | ||
29 | 31 | ||
30 | #define TGR192_DIGEST_SIZE 24 | 32 | #define TGR192_DIGEST_SIZE 24 |
31 | #define TGR160_DIGEST_SIZE 20 | 33 | #define TGR160_DIGEST_SIZE 20 |
@@ -467,18 +469,10 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data) | |||
467 | u64 a, b, c, aa, bb, cc; | 469 | u64 a, b, c, aa, bb, cc; |
468 | u64 x[8]; | 470 | u64 x[8]; |
469 | int i; | 471 | int i; |
470 | const u8 *ptr = data; | 472 | const __le64 *ptr = (const __le64 *)data; |
471 | 473 | ||
472 | for (i = 0; i < 8; i++, ptr += 8) { | 474 | for (i = 0; i < 8; i++) |
473 | x[i] = (((u64)ptr[7] ) << 56) ^ | 475 | x[i] = le64_to_cpu(ptr[i]); |
474 | (((u64)ptr[6] & 0xffL) << 48) ^ | ||
475 | (((u64)ptr[5] & 0xffL) << 40) ^ | ||
476 | (((u64)ptr[4] & 0xffL) << 32) ^ | ||
477 | (((u64)ptr[3] & 0xffL) << 24) ^ | ||
478 | (((u64)ptr[2] & 0xffL) << 16) ^ | ||
479 | (((u64)ptr[1] & 0xffL) << 8) ^ | ||
480 | (((u64)ptr[0] & 0xffL) ); | ||
481 | } | ||
482 | 476 | ||
483 | /* save */ | 477 | /* save */ |
484 | a = aa = tctx->a; | 478 | a = aa = tctx->a; |
@@ -558,9 +552,10 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len) | |||
558 | static void tgr192_final(void *ctx, u8 * out) | 552 | static void tgr192_final(void *ctx, u8 * out) |
559 | { | 553 | { |
560 | struct tgr192_ctx *tctx = ctx; | 554 | struct tgr192_ctx *tctx = ctx; |
555 | __be64 *dst = (__be64 *)out; | ||
556 | __be64 *be64p; | ||
557 | __le32 *le32p; | ||
561 | u32 t, msb, lsb; | 558 | u32 t, msb, lsb; |
562 | u8 *p; | ||
563 | int i, j; | ||
564 | 559 | ||
565 | tgr192_update(tctx, NULL, 0); /* flush */ ; | 560 | tgr192_update(tctx, NULL, 0); /* flush */ ; |
566 | 561 | ||
@@ -594,41 +589,16 @@ static void tgr192_final(void *ctx, u8 * out) | |||
594 | memset(tctx->hash, 0, 56); /* fill next block with zeroes */ | 589 | memset(tctx->hash, 0, 56); /* fill next block with zeroes */ |
595 | } | 590 | } |
596 | /* append the 64 bit count */ | 591 | /* append the 64 bit count */ |
597 | tctx->hash[56] = lsb; | 592 | le32p = (__le32 *)&tctx->hash[56]; |
598 | tctx->hash[57] = lsb >> 8; | 593 | le32p[0] = cpu_to_le32(lsb); |
599 | tctx->hash[58] = lsb >> 16; | 594 | le32p[1] = cpu_to_le32(msb); |
600 | tctx->hash[59] = lsb >> 24; | 595 | |
601 | tctx->hash[60] = msb; | ||
602 | tctx->hash[61] = msb >> 8; | ||
603 | tctx->hash[62] = msb >> 16; | ||
604 | tctx->hash[63] = msb >> 24; | ||
605 | tgr192_transform(tctx, tctx->hash); | 596 | tgr192_transform(tctx, tctx->hash); |
606 | 597 | ||
607 | p = tctx->hash; | 598 | be64p = (__be64 *)tctx->hash; |
608 | *p++ = tctx->a >> 56; *p++ = tctx->a >> 48; *p++ = tctx->a >> 40; | 599 | dst[0] = be64p[0] = cpu_to_be64(tctx->a); |
609 | *p++ = tctx->a >> 32; *p++ = tctx->a >> 24; *p++ = tctx->a >> 16; | 600 | dst[1] = be64p[1] = cpu_to_be64(tctx->b); |
610 | *p++ = tctx->a >> 8; *p++ = tctx->a;\ | 601 | dst[2] = be64p[2] = cpu_to_be64(tctx->c); |
611 | *p++ = tctx->b >> 56; *p++ = tctx->b >> 48; *p++ = tctx->b >> 40; | ||
612 | *p++ = tctx->b >> 32; *p++ = tctx->b >> 24; *p++ = tctx->b >> 16; | ||
613 | *p++ = tctx->b >> 8; *p++ = tctx->b; | ||
614 | *p++ = tctx->c >> 56; *p++ = tctx->c >> 48; *p++ = tctx->c >> 40; | ||
615 | *p++ = tctx->c >> 32; *p++ = tctx->c >> 24; *p++ = tctx->c >> 16; | ||
616 | *p++ = tctx->c >> 8; *p++ = tctx->c; | ||
617 | |||
618 | |||
619 | /* unpack the hash */ | ||
620 | j = 7; | ||
621 | for (i = 0; i < 8; i++) { | ||
622 | out[j--] = (tctx->a >> 8 * i) & 0xff; | ||
623 | } | ||
624 | j = 15; | ||
625 | for (i = 0; i < 8; i++) { | ||
626 | out[j--] = (tctx->b >> 8 * i) & 0xff; | ||
627 | } | ||
628 | j = 23; | ||
629 | for (i = 0; i < 8; i++) { | ||
630 | out[j--] = (tctx->c >> 8 * i) & 0xff; | ||
631 | } | ||
632 | } | 602 | } |
633 | 603 | ||
634 | static void tgr160_final(void *ctx, u8 * out) | 604 | static void tgr160_final(void *ctx, u8 * out) |
diff --git a/crypto/twofish.c b/crypto/twofish.c index 4efff8cf9958..b501d5ab9c45 100644 --- a/crypto/twofish.c +++ b/crypto/twofish.c | |||
@@ -37,6 +37,8 @@ | |||
37 | * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the | 37 | * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the |
38 | * Third Edition. | 38 | * Third Edition. |
39 | */ | 39 | */ |
40 | |||
41 | #include <asm/byteorder.h> | ||
40 | #include <linux/module.h> | 42 | #include <linux/module.h> |
41 | #include <linux/init.h> | 43 | #include <linux/init.h> |
42 | #include <linux/types.h> | 44 | #include <linux/types.h> |
@@ -621,13 +623,11 @@ static const u8 calc_sb_tbl[512] = { | |||
621 | * whitening subkey number m. */ | 623 | * whitening subkey number m. */ |
622 | 624 | ||
623 | #define INPACK(n, x, m) \ | 625 | #define INPACK(n, x, m) \ |
624 | x = in[4 * (n)] ^ (in[4 * (n) + 1] << 8) \ | 626 | x = le32_to_cpu(src[n]) ^ ctx->w[m] |
625 | ^ (in[4 * (n) + 2] << 16) ^ (in[4 * (n) + 3] << 24) ^ ctx->w[m] | ||
626 | 627 | ||
627 | #define OUTUNPACK(n, x, m) \ | 628 | #define OUTUNPACK(n, x, m) \ |
628 | x ^= ctx->w[m]; \ | 629 | x ^= ctx->w[m]; \ |
629 | out[4 * (n)] = x; out[4 * (n) + 1] = x >> 8; \ | 630 | dst[n] = cpu_to_le32(x) |
630 | out[4 * (n) + 2] = x >> 16; out[4 * (n) + 3] = x >> 24 | ||
631 | 631 | ||
632 | #define TF_MIN_KEY_SIZE 16 | 632 | #define TF_MIN_KEY_SIZE 16 |
633 | #define TF_MAX_KEY_SIZE 32 | 633 | #define TF_MAX_KEY_SIZE 32 |
@@ -804,6 +804,8 @@ static int twofish_setkey(void *cx, const u8 *key, | |||
804 | static void twofish_encrypt(void *cx, u8 *out, const u8 *in) | 804 | static void twofish_encrypt(void *cx, u8 *out, const u8 *in) |
805 | { | 805 | { |
806 | struct twofish_ctx *ctx = cx; | 806 | struct twofish_ctx *ctx = cx; |
807 | const __le32 *src = (const __le32 *)in; | ||
808 | __le32 *dst = (__le32 *)out; | ||
807 | 809 | ||
808 | /* The four 32-bit chunks of the text. */ | 810 | /* The four 32-bit chunks of the text. */ |
809 | u32 a, b, c, d; | 811 | u32 a, b, c, d; |
@@ -839,6 +841,8 @@ static void twofish_encrypt(void *cx, u8 *out, const u8 *in) | |||
839 | static void twofish_decrypt(void *cx, u8 *out, const u8 *in) | 841 | static void twofish_decrypt(void *cx, u8 *out, const u8 *in) |
840 | { | 842 | { |
841 | struct twofish_ctx *ctx = cx; | 843 | struct twofish_ctx *ctx = cx; |
844 | const __le32 *src = (const __le32 *)in; | ||
845 | __le32 *dst = (__le32 *)out; | ||
842 | 846 | ||
843 | /* The four 32-bit chunks of the text. */ | 847 | /* The four 32-bit chunks of the text. */ |
844 | u32 a, b, c, d; | 848 | u32 a, b, c, d; |
diff --git a/crypto/wp512.c b/crypto/wp512.c index fd6e20e1f291..b226a126cfae 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c | |||
@@ -22,8 +22,10 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <asm/byteorder.h> | ||
25 | #include <asm/scatterlist.h> | 26 | #include <asm/scatterlist.h> |
26 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> |
28 | #include <linux/types.h> | ||
27 | 29 | ||
28 | #define WP512_DIGEST_SIZE 64 | 30 | #define WP512_DIGEST_SIZE 64 |
29 | #define WP384_DIGEST_SIZE 48 | 31 | #define WP384_DIGEST_SIZE 48 |
@@ -778,19 +780,10 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) { | |||
778 | u64 block[8]; /* mu(buffer) */ | 780 | u64 block[8]; /* mu(buffer) */ |
779 | u64 state[8]; /* the cipher state */ | 781 | u64 state[8]; /* the cipher state */ |
780 | u64 L[8]; | 782 | u64 L[8]; |
781 | u8 *buffer = wctx->buffer; | 783 | const __be64 *buffer = (const __be64 *)wctx->buffer; |
782 | 784 | ||
783 | for (i = 0; i < 8; i++, buffer += 8) { | 785 | for (i = 0; i < 8; i++) |
784 | block[i] = | 786 | block[i] = be64_to_cpu(buffer[i]); |
785 | (((u64)buffer[0] ) << 56) ^ | ||
786 | (((u64)buffer[1] & 0xffL) << 48) ^ | ||
787 | (((u64)buffer[2] & 0xffL) << 40) ^ | ||
788 | (((u64)buffer[3] & 0xffL) << 32) ^ | ||
789 | (((u64)buffer[4] & 0xffL) << 24) ^ | ||
790 | (((u64)buffer[5] & 0xffL) << 16) ^ | ||
791 | (((u64)buffer[6] & 0xffL) << 8) ^ | ||
792 | (((u64)buffer[7] & 0xffL) ); | ||
793 | } | ||
794 | 787 | ||
795 | state[0] = block[0] ^ (K[0] = wctx->hash[0]); | 788 | state[0] = block[0] ^ (K[0] = wctx->hash[0]); |
796 | state[1] = block[1] ^ (K[1] = wctx->hash[1]); | 789 | state[1] = block[1] ^ (K[1] = wctx->hash[1]); |
@@ -1069,7 +1062,7 @@ static void wp512_final(void *ctx, u8 *out) | |||
1069 | u8 *bitLength = wctx->bitLength; | 1062 | u8 *bitLength = wctx->bitLength; |
1070 | int bufferBits = wctx->bufferBits; | 1063 | int bufferBits = wctx->bufferBits; |
1071 | int bufferPos = wctx->bufferPos; | 1064 | int bufferPos = wctx->bufferPos; |
1072 | u8 *digest = out; | 1065 | __be64 *digest = (__be64 *)out; |
1073 | 1066 | ||
1074 | buffer[bufferPos] |= 0x80U >> (bufferBits & 7); | 1067 | buffer[bufferPos] |= 0x80U >> (bufferBits & 7); |
1075 | bufferPos++; | 1068 | bufferPos++; |
@@ -1088,17 +1081,8 @@ static void wp512_final(void *ctx, u8 *out) | |||
1088 | memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], | 1081 | memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], |
1089 | bitLength, WP512_LENGTHBYTES); | 1082 | bitLength, WP512_LENGTHBYTES); |
1090 | wp512_process_buffer(wctx); | 1083 | wp512_process_buffer(wctx); |
1091 | for (i = 0; i < WP512_DIGEST_SIZE/8; i++) { | 1084 | for (i = 0; i < WP512_DIGEST_SIZE/8; i++) |
1092 | digest[0] = (u8)(wctx->hash[i] >> 56); | 1085 | digest[i] = cpu_to_be64(wctx->hash[i]); |
1093 | digest[1] = (u8)(wctx->hash[i] >> 48); | ||
1094 | digest[2] = (u8)(wctx->hash[i] >> 40); | ||
1095 | digest[3] = (u8)(wctx->hash[i] >> 32); | ||
1096 | digest[4] = (u8)(wctx->hash[i] >> 24); | ||
1097 | digest[5] = (u8)(wctx->hash[i] >> 16); | ||
1098 | digest[6] = (u8)(wctx->hash[i] >> 8); | ||
1099 | digest[7] = (u8)(wctx->hash[i] ); | ||
1100 | digest += 8; | ||
1101 | } | ||
1102 | wctx->bufferBits = bufferBits; | 1086 | wctx->bufferBits = bufferBits; |
1103 | wctx->bufferPos = bufferPos; | 1087 | wctx->bufferPos = bufferPos; |
1104 | } | 1088 | } |