diff options
| author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-09 18:12:52 -0500 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-09 18:12:52 -0500 | 
| commit | 1fd5a46dd6bbca3a1275465120caf4748872c2a7 (patch) | |
| tree | c24862a43f57974394ebb58a1d9005e4093e3bf7 | |
| parent | 2cc6055060d975e8c7601f4a1c68ef2d3050b4e9 (diff) | |
| parent | dff2c03534f525813342ab8dec90c5bb1ee07471 (diff) | |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
68 files changed, 1022 insertions, 747 deletions
| diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S index 7b73c67cb4e8..911b15377f2e 100644 --- a/arch/i386/crypto/aes-i586-asm.S +++ b/arch/i386/crypto/aes-i586-asm.S | |||
| @@ -255,18 +255,17 @@ aes_enc_blk: | |||
| 255 | xor 8(%ebp),%r4 | 255 | xor 8(%ebp),%r4 | 
| 256 | xor 12(%ebp),%r5 | 256 | xor 12(%ebp),%r5 | 
| 257 | 257 | ||
| 258 | sub $8,%esp // space for register saves on stack | 258 | sub $8,%esp // space for register saves on stack | 
| 259 | add $16,%ebp // increment to next round key | 259 | add $16,%ebp // increment to next round key | 
| 260 | sub $10,%r3 | 260 | cmp $12,%r3 | 
| 261 | je 4f // 10 rounds for 128-bit key | 261 | jb 4f // 10 rounds for 128-bit key | 
| 262 | add $32,%ebp | 262 | lea 32(%ebp),%ebp | 
| 263 | sub $2,%r3 | 263 | je 3f // 12 rounds for 192-bit key | 
| 264 | je 3f // 12 rounds for 128-bit key | 264 | lea 32(%ebp),%ebp | 
| 265 | add $32,%ebp | 265 | |
| 266 | 266 | 2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 256-bit key | |
| 267 | 2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 128-bit key | ||
| 268 | fwd_rnd2( -48(%ebp) ,ft_tab) | 267 | fwd_rnd2( -48(%ebp) ,ft_tab) | 
| 269 | 3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 128-bit key | 268 | 3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 192-bit key | 
| 270 | fwd_rnd2( -16(%ebp) ,ft_tab) | 269 | fwd_rnd2( -16(%ebp) ,ft_tab) | 
| 271 | 4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key | 270 | 4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key | 
| 272 | fwd_rnd2( +16(%ebp) ,ft_tab) | 271 | fwd_rnd2( +16(%ebp) ,ft_tab) | 
| @@ -334,18 +333,17 @@ aes_dec_blk: | |||
| 334 | xor 8(%ebp),%r4 | 333 | xor 8(%ebp),%r4 | 
| 335 | xor 12(%ebp),%r5 | 334 | xor 12(%ebp),%r5 | 
| 336 | 335 | ||
| 337 | sub $8,%esp // space for register saves on stack | 336 | sub $8,%esp // space for register saves on stack | 
| 338 | sub $16,%ebp // increment to next round key | 337 | sub $16,%ebp // increment to next round key | 
| 339 | sub $10,%r3 | 338 | cmp $12,%r3 | 
| 340 | je 4f // 10 rounds for 128-bit key | 339 | jb 4f // 10 rounds for 128-bit key | 
| 341 | sub $32,%ebp | 340 | lea -32(%ebp),%ebp | 
| 342 | sub $2,%r3 | 341 | je 3f // 12 rounds for 192-bit key | 
| 343 | je 3f // 12 rounds for 128-bit key | 342 | lea -32(%ebp),%ebp | 
| 344 | sub $32,%ebp | ||
| 345 | 343 | ||
| 346 | 2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 128-bit key | 344 | 2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 256-bit key | 
| 347 | inv_rnd2( +48(%ebp), it_tab) | 345 | inv_rnd2( +48(%ebp), it_tab) | 
| 348 | 3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 128-bit key | 346 | 3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 192-bit key | 
| 349 | inv_rnd2( +16(%ebp), it_tab) | 347 | inv_rnd2( +16(%ebp), it_tab) | 
| 350 | 4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key | 348 | 4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key | 
| 351 | inv_rnd2( -16(%ebp), it_tab) | 349 | inv_rnd2( -16(%ebp), it_tab) | 
| diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c index 88ee85c3b43b..a50397b1d5c7 100644 --- a/arch/i386/crypto/aes.c +++ b/arch/i386/crypto/aes.c | |||
| @@ -36,6 +36,8 @@ | |||
| 36 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> | 36 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> | 
| 37 | * | 37 | * | 
| 38 | */ | 38 | */ | 
| 39 | |||
| 40 | #include <asm/byteorder.h> | ||
| 39 | #include <linux/kernel.h> | 41 | #include <linux/kernel.h> | 
| 40 | #include <linux/module.h> | 42 | #include <linux/module.h> | 
| 41 | #include <linux/init.h> | 43 | #include <linux/init.h> | 
| @@ -59,7 +61,6 @@ struct aes_ctx { | |||
| 59 | }; | 61 | }; | 
| 60 | 62 | ||
| 61 | #define WPOLY 0x011b | 63 | #define WPOLY 0x011b | 
| 62 | #define u32_in(x) le32_to_cpup((const __le32 *)(x)) | ||
| 63 | #define bytes2word(b0, b1, b2, b3) \ | 64 | #define bytes2word(b0, b1, b2, b3) \ | 
| 64 | (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) | 65 | (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) | 
| 65 | 66 | ||
| @@ -93,7 +94,6 @@ static u32 rcon_tab[RC_LENGTH]; | |||
| 93 | 94 | ||
| 94 | u32 ft_tab[4][256]; | 95 | u32 ft_tab[4][256]; | 
| 95 | u32 fl_tab[4][256]; | 96 | u32 fl_tab[4][256]; | 
| 96 | static u32 ls_tab[4][256]; | ||
| 97 | static u32 im_tab[4][256]; | 97 | static u32 im_tab[4][256]; | 
| 98 | u32 il_tab[4][256]; | 98 | u32 il_tab[4][256]; | 
| 99 | u32 it_tab[4][256]; | 99 | u32 it_tab[4][256]; | 
| @@ -144,15 +144,6 @@ static void gen_tabs(void) | |||
| 144 | fl_tab[2][i] = upr(w, 2); | 144 | fl_tab[2][i] = upr(w, 2); | 
| 145 | fl_tab[3][i] = upr(w, 3); | 145 | fl_tab[3][i] = upr(w, 3); | 
| 146 | 146 | ||
| 147 | /* | ||
| 148 | * table for key schedule if fl_tab above is | ||
| 149 | * not of the required form | ||
| 150 | */ | ||
| 151 | ls_tab[0][i] = w; | ||
| 152 | ls_tab[1][i] = upr(w, 1); | ||
| 153 | ls_tab[2][i] = upr(w, 2); | ||
| 154 | ls_tab[3][i] = upr(w, 3); | ||
| 155 | |||
| 156 | b = fi(inv_affine((u8)i)); | 147 | b = fi(inv_affine((u8)i)); | 
| 157 | w = bytes2word(fe(b), f9(b), fd(b), fb(b)); | 148 | w = bytes2word(fe(b), f9(b), fd(b), fb(b)); | 
| 158 | 149 | ||
| @@ -393,13 +384,14 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 393 | int i; | 384 | int i; | 
| 394 | u32 ss[8]; | 385 | u32 ss[8]; | 
| 395 | struct aes_ctx *ctx = ctx_arg; | 386 | struct aes_ctx *ctx = ctx_arg; | 
| 387 | const __le32 *key = (const __le32 *)in_key; | ||
| 396 | 388 | ||
| 397 | /* encryption schedule */ | 389 | /* encryption schedule */ | 
| 398 | 390 | ||
| 399 | ctx->ekey[0] = ss[0] = u32_in(in_key); | 391 | ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]); | 
| 400 | ctx->ekey[1] = ss[1] = u32_in(in_key + 4); | 392 | ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]); | 
| 401 | ctx->ekey[2] = ss[2] = u32_in(in_key + 8); | 393 | ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]); | 
| 402 | ctx->ekey[3] = ss[3] = u32_in(in_key + 12); | 394 | ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]); | 
| 403 | 395 | ||
| 404 | switch(key_len) { | 396 | switch(key_len) { | 
| 405 | case 16: | 397 | case 16: | 
| @@ -410,8 +402,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 410 | break; | 402 | break; | 
| 411 | 403 | ||
| 412 | case 24: | 404 | case 24: | 
| 413 | ctx->ekey[4] = ss[4] = u32_in(in_key + 16); | 405 | ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]); | 
| 414 | ctx->ekey[5] = ss[5] = u32_in(in_key + 20); | 406 | ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]); | 
| 415 | for (i = 0; i < 7; i++) | 407 | for (i = 0; i < 7; i++) | 
| 416 | ke6(ctx->ekey, i); | 408 | ke6(ctx->ekey, i); | 
| 417 | kel6(ctx->ekey, 7); | 409 | kel6(ctx->ekey, 7); | 
| @@ -419,10 +411,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 419 | break; | 411 | break; | 
| 420 | 412 | ||
| 421 | case 32: | 413 | case 32: | 
| 422 | ctx->ekey[4] = ss[4] = u32_in(in_key + 16); | 414 | ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]); | 
| 423 | ctx->ekey[5] = ss[5] = u32_in(in_key + 20); | 415 | ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]); | 
| 424 | ctx->ekey[6] = ss[6] = u32_in(in_key + 24); | 416 | ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]); | 
| 425 | ctx->ekey[7] = ss[7] = u32_in(in_key + 28); | 417 | ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]); | 
| 426 | for (i = 0; i < 6; i++) | 418 | for (i = 0; i < 6; i++) | 
| 427 | ke8(ctx->ekey, i); | 419 | ke8(ctx->ekey, i); | 
| 428 | kel8(ctx->ekey, 6); | 420 | kel8(ctx->ekey, 6); | 
| @@ -436,10 +428,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 436 | 428 | ||
| 437 | /* decryption schedule */ | 429 | /* decryption schedule */ | 
| 438 | 430 | ||
| 439 | ctx->dkey[0] = ss[0] = u32_in(in_key); | 431 | ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]); | 
| 440 | ctx->dkey[1] = ss[1] = u32_in(in_key + 4); | 432 | ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]); | 
| 441 | ctx->dkey[2] = ss[2] = u32_in(in_key + 8); | 433 | ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]); | 
| 442 | ctx->dkey[3] = ss[3] = u32_in(in_key + 12); | 434 | ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]); | 
| 443 | 435 | ||
| 444 | switch (key_len) { | 436 | switch (key_len) { | 
| 445 | case 16: | 437 | case 16: | 
| @@ -450,8 +442,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 450 | break; | 442 | break; | 
| 451 | 443 | ||
| 452 | case 24: | 444 | case 24: | 
| 453 | ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); | 445 | ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4])); | 
| 454 | ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); | 446 | ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5])); | 
| 455 | kdf6(ctx->dkey, 0); | 447 | kdf6(ctx->dkey, 0); | 
| 456 | for (i = 1; i < 7; i++) | 448 | for (i = 1; i < 7; i++) | 
| 457 | kd6(ctx->dkey, i); | 449 | kd6(ctx->dkey, i); | 
| @@ -459,10 +451,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 459 | break; | 451 | break; | 
| 460 | 452 | ||
| 461 | case 32: | 453 | case 32: | 
| 462 | ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); | 454 | ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4])); | 
| 463 | ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); | 455 | ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5])); | 
| 464 | ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24)); | 456 | ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6])); | 
| 465 | ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28)); | 457 | ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7])); | 
| 466 | kdf8(ctx->dkey, 0); | 458 | kdf8(ctx->dkey, 0); | 
| 467 | for (i = 1; i < 6; i++) | 459 | for (i = 1; i < 6; i++) | 
| 468 | kd8(ctx->dkey, i); | 460 | kd8(ctx->dkey, i); | 
| @@ -484,6 +476,8 @@ static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src) | |||
| 484 | 476 | ||
| 485 | static struct crypto_alg aes_alg = { | 477 | static struct crypto_alg aes_alg = { | 
| 486 | .cra_name = "aes", | 478 | .cra_name = "aes", | 
| 479 | .cra_driver_name = "aes-i586", | ||
| 480 | .cra_priority = 200, | ||
| 487 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 481 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 488 | .cra_blocksize = AES_BLOCK_SIZE, | 482 | .cra_blocksize = AES_BLOCK_SIZE, | 
| 489 | .cra_ctxsize = sizeof(struct aes_ctx), | 483 | .cra_ctxsize = sizeof(struct aes_ctx), | 
| diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c index acfdaa28791e..fb1b961a2e2f 100644 --- a/arch/x86_64/crypto/aes.c +++ b/arch/x86_64/crypto/aes.c | |||
| @@ -74,8 +74,6 @@ static inline u8 byte(const u32 x, const unsigned n) | |||
| 74 | return x >> (n << 3); | 74 | return x >> (n << 3); | 
| 75 | } | 75 | } | 
| 76 | 76 | ||
| 77 | #define u32_in(x) le32_to_cpu(*(const __le32 *)(x)) | ||
| 78 | |||
| 79 | struct aes_ctx | 77 | struct aes_ctx | 
| 80 | { | 78 | { | 
| 81 | u32 key_length; | 79 | u32 key_length; | 
| @@ -234,6 +232,7 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, | |||
| 234 | u32 *flags) | 232 | u32 *flags) | 
| 235 | { | 233 | { | 
| 236 | struct aes_ctx *ctx = ctx_arg; | 234 | struct aes_ctx *ctx = ctx_arg; | 
| 235 | const __le32 *key = (const __le32 *)in_key; | ||
| 237 | u32 i, j, t, u, v, w; | 236 | u32 i, j, t, u, v, w; | 
| 238 | 237 | ||
| 239 | if (key_len != 16 && key_len != 24 && key_len != 32) { | 238 | if (key_len != 16 && key_len != 24 && key_len != 32) { | 
| @@ -243,10 +242,10 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, | |||
| 243 | 242 | ||
| 244 | ctx->key_length = key_len; | 243 | ctx->key_length = key_len; | 
| 245 | 244 | ||
| 246 | D_KEY[key_len + 24] = E_KEY[0] = u32_in(in_key); | 245 | D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]); | 
| 247 | D_KEY[key_len + 25] = E_KEY[1] = u32_in(in_key + 4); | 246 | D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]); | 
| 248 | D_KEY[key_len + 26] = E_KEY[2] = u32_in(in_key + 8); | 247 | D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]); | 
| 249 | D_KEY[key_len + 27] = E_KEY[3] = u32_in(in_key + 12); | 248 | D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]); | 
| 250 | 249 | ||
| 251 | switch (key_len) { | 250 | switch (key_len) { | 
| 252 | case 16: | 251 | case 16: | 
| @@ -256,17 +255,17 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, | |||
| 256 | break; | 255 | break; | 
| 257 | 256 | ||
| 258 | case 24: | 257 | case 24: | 
| 259 | E_KEY[4] = u32_in(in_key + 16); | 258 | E_KEY[4] = le32_to_cpu(key[4]); | 
| 260 | t = E_KEY[5] = u32_in(in_key + 20); | 259 | t = E_KEY[5] = le32_to_cpu(key[5]); | 
| 261 | for (i = 0; i < 8; ++i) | 260 | for (i = 0; i < 8; ++i) | 
| 262 | loop6 (i); | 261 | loop6 (i); | 
| 263 | break; | 262 | break; | 
| 264 | 263 | ||
| 265 | case 32: | 264 | case 32: | 
| 266 | E_KEY[4] = u32_in(in_key + 16); | 265 | E_KEY[4] = le32_to_cpu(key[4]); | 
| 267 | E_KEY[5] = u32_in(in_key + 20); | 266 | E_KEY[5] = le32_to_cpu(key[5]); | 
| 268 | E_KEY[6] = u32_in(in_key + 24); | 267 | E_KEY[6] = le32_to_cpu(key[6]); | 
| 269 | t = E_KEY[7] = u32_in(in_key + 28); | 268 | t = E_KEY[7] = le32_to_cpu(key[7]); | 
| 270 | for (i = 0; i < 7; ++i) | 269 | for (i = 0; i < 7; ++i) | 
| 271 | loop8(i); | 270 | loop8(i); | 
| 272 | break; | 271 | break; | 
| @@ -290,6 +289,8 @@ extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in); | |||
| 290 | 289 | ||
| 291 | static struct crypto_alg aes_alg = { | 290 | static struct crypto_alg aes_alg = { | 
| 292 | .cra_name = "aes", | 291 | .cra_name = "aes", | 
| 292 | .cra_driver_name = "aes-x86_64", | ||
| 293 | .cra_priority = 200, | ||
| 293 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 294 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 294 | .cra_blocksize = AES_BLOCK_SIZE, | 295 | .cra_blocksize = AES_BLOCK_SIZE, | 
| 295 | .cra_ctxsize = sizeof(struct aes_ctx), | 296 | .cra_ctxsize = sizeof(struct aes_ctx), | 
| diff --git a/crypto/Kconfig b/crypto/Kconfig index 52e1d4108a99..c442f2e7ce46 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -157,7 +157,7 @@ config CRYPTO_SERPENT | |||
| 157 | 157 | ||
| 158 | config CRYPTO_AES | 158 | config CRYPTO_AES | 
| 159 | tristate "AES cipher algorithms" | 159 | tristate "AES cipher algorithms" | 
| 160 | depends on CRYPTO && !(X86 || UML_X86) | 160 | depends on CRYPTO | 
| 161 | help | 161 | help | 
| 162 | AES cipher algorithms (FIPS-197). AES uses the Rijndael | 162 | AES cipher algorithms (FIPS-197). AES uses the Rijndael | 
| 163 | algorithm. | 163 | algorithm. | 
| diff --git a/crypto/aes.c b/crypto/aes.c index 5df92888ef5a..0a6a5c143686 100644 --- a/crypto/aes.c +++ b/crypto/aes.c | |||
| @@ -73,9 +73,6 @@ byte(const u32 x, const unsigned n) | |||
| 73 | return x >> (n << 3); | 73 | return x >> (n << 3); | 
| 74 | } | 74 | } | 
| 75 | 75 | ||
| 76 | #define u32_in(x) le32_to_cpu(*(const u32 *)(x)) | ||
| 77 | #define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from)) | ||
| 78 | |||
| 79 | struct aes_ctx { | 76 | struct aes_ctx { | 
| 80 | int key_length; | 77 | int key_length; | 
| 81 | u32 E[60]; | 78 | u32 E[60]; | 
| @@ -256,6 +253,7 @@ static int | |||
| 256 | aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | 253 | aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | 
| 257 | { | 254 | { | 
| 258 | struct aes_ctx *ctx = ctx_arg; | 255 | struct aes_ctx *ctx = ctx_arg; | 
| 256 | const __le32 *key = (const __le32 *)in_key; | ||
| 259 | u32 i, t, u, v, w; | 257 | u32 i, t, u, v, w; | 
| 260 | 258 | ||
| 261 | if (key_len != 16 && key_len != 24 && key_len != 32) { | 259 | if (key_len != 16 && key_len != 24 && key_len != 32) { | 
| @@ -265,10 +263,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 265 | 263 | ||
| 266 | ctx->key_length = key_len; | 264 | ctx->key_length = key_len; | 
| 267 | 265 | ||
| 268 | E_KEY[0] = u32_in (in_key); | 266 | E_KEY[0] = le32_to_cpu(key[0]); | 
| 269 | E_KEY[1] = u32_in (in_key + 4); | 267 | E_KEY[1] = le32_to_cpu(key[1]); | 
| 270 | E_KEY[2] = u32_in (in_key + 8); | 268 | E_KEY[2] = le32_to_cpu(key[2]); | 
| 271 | E_KEY[3] = u32_in (in_key + 12); | 269 | E_KEY[3] = le32_to_cpu(key[3]); | 
| 272 | 270 | ||
| 273 | switch (key_len) { | 271 | switch (key_len) { | 
| 274 | case 16: | 272 | case 16: | 
| @@ -278,17 +276,17 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 278 | break; | 276 | break; | 
| 279 | 277 | ||
| 280 | case 24: | 278 | case 24: | 
| 281 | E_KEY[4] = u32_in (in_key + 16); | 279 | E_KEY[4] = le32_to_cpu(key[4]); | 
| 282 | t = E_KEY[5] = u32_in (in_key + 20); | 280 | t = E_KEY[5] = le32_to_cpu(key[5]); | 
| 283 | for (i = 0; i < 8; ++i) | 281 | for (i = 0; i < 8; ++i) | 
| 284 | loop6 (i); | 282 | loop6 (i); | 
| 285 | break; | 283 | break; | 
| 286 | 284 | ||
| 287 | case 32: | 285 | case 32: | 
| 288 | E_KEY[4] = u32_in (in_key + 16); | 286 | E_KEY[4] = le32_to_cpu(key[4]); | 
| 289 | E_KEY[5] = u32_in (in_key + 20); | 287 | E_KEY[5] = le32_to_cpu(key[5]); | 
| 290 | E_KEY[6] = u32_in (in_key + 24); | 288 | E_KEY[6] = le32_to_cpu(key[6]); | 
| 291 | t = E_KEY[7] = u32_in (in_key + 28); | 289 | t = E_KEY[7] = le32_to_cpu(key[7]); | 
| 292 | for (i = 0; i < 7; ++i) | 290 | for (i = 0; i < 7; ++i) | 
| 293 | loop8 (i); | 291 | loop8 (i); | 
| 294 | break; | 292 | break; | 
| @@ -324,13 +322,15 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) | |||
| 324 | static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) | 322 | static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) | 
| 325 | { | 323 | { | 
| 326 | const struct aes_ctx *ctx = ctx_arg; | 324 | const struct aes_ctx *ctx = ctx_arg; | 
| 325 | const __le32 *src = (const __le32 *)in; | ||
| 326 | __le32 *dst = (__le32 *)out; | ||
| 327 | u32 b0[4], b1[4]; | 327 | u32 b0[4], b1[4]; | 
| 328 | const u32 *kp = E_KEY + 4; | 328 | const u32 *kp = E_KEY + 4; | 
| 329 | 329 | ||
| 330 | b0[0] = u32_in (in) ^ E_KEY[0]; | 330 | b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0]; | 
| 331 | b0[1] = u32_in (in + 4) ^ E_KEY[1]; | 331 | b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1]; | 
| 332 | b0[2] = u32_in (in + 8) ^ E_KEY[2]; | 332 | b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2]; | 
| 333 | b0[3] = u32_in (in + 12) ^ E_KEY[3]; | 333 | b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3]; | 
| 334 | 334 | ||
| 335 | if (ctx->key_length > 24) { | 335 | if (ctx->key_length > 24) { | 
| 336 | f_nround (b1, b0, kp); | 336 | f_nround (b1, b0, kp); | 
| @@ -353,10 +353,10 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) | |||
| 353 | f_nround (b1, b0, kp); | 353 | f_nround (b1, b0, kp); | 
| 354 | f_lround (b0, b1, kp); | 354 | f_lround (b0, b1, kp); | 
| 355 | 355 | ||
| 356 | u32_out (out, b0[0]); | 356 | dst[0] = cpu_to_le32(b0[0]); | 
| 357 | u32_out (out + 4, b0[1]); | 357 | dst[1] = cpu_to_le32(b0[1]); | 
| 358 | u32_out (out + 8, b0[2]); | 358 | dst[2] = cpu_to_le32(b0[2]); | 
| 359 | u32_out (out + 12, b0[3]); | 359 | dst[3] = cpu_to_le32(b0[3]); | 
| 360 | } | 360 | } | 
| 361 | 361 | ||
| 362 | /* decrypt a block of text */ | 362 | /* decrypt a block of text */ | 
| @@ -377,14 +377,16 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) | |||
| 377 | static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) | 377 | static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) | 
| 378 | { | 378 | { | 
| 379 | const struct aes_ctx *ctx = ctx_arg; | 379 | const struct aes_ctx *ctx = ctx_arg; | 
| 380 | const __le32 *src = (const __le32 *)in; | ||
| 381 | __le32 *dst = (__le32 *)out; | ||
| 380 | u32 b0[4], b1[4]; | 382 | u32 b0[4], b1[4]; | 
| 381 | const int key_len = ctx->key_length; | 383 | const int key_len = ctx->key_length; | 
| 382 | const u32 *kp = D_KEY + key_len + 20; | 384 | const u32 *kp = D_KEY + key_len + 20; | 
| 383 | 385 | ||
| 384 | b0[0] = u32_in (in) ^ E_KEY[key_len + 24]; | 386 | b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24]; | 
| 385 | b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25]; | 387 | b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25]; | 
| 386 | b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26]; | 388 | b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26]; | 
| 387 | b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27]; | 389 | b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27]; | 
| 388 | 390 | ||
| 389 | if (key_len > 24) { | 391 | if (key_len > 24) { | 
| 390 | i_nround (b1, b0, kp); | 392 | i_nround (b1, b0, kp); | 
| @@ -407,18 +409,21 @@ static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) | |||
| 407 | i_nround (b1, b0, kp); | 409 | i_nround (b1, b0, kp); | 
| 408 | i_lround (b0, b1, kp); | 410 | i_lround (b0, b1, kp); | 
| 409 | 411 | ||
| 410 | u32_out (out, b0[0]); | 412 | dst[0] = cpu_to_le32(b0[0]); | 
| 411 | u32_out (out + 4, b0[1]); | 413 | dst[1] = cpu_to_le32(b0[1]); | 
| 412 | u32_out (out + 8, b0[2]); | 414 | dst[2] = cpu_to_le32(b0[2]); | 
| 413 | u32_out (out + 12, b0[3]); | 415 | dst[3] = cpu_to_le32(b0[3]); | 
| 414 | } | 416 | } | 
| 415 | 417 | ||
| 416 | 418 | ||
| 417 | static struct crypto_alg aes_alg = { | 419 | static struct crypto_alg aes_alg = { | 
| 418 | .cra_name = "aes", | 420 | .cra_name = "aes", | 
| 421 | .cra_driver_name = "aes-generic", | ||
| 422 | .cra_priority = 100, | ||
| 419 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 423 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 420 | .cra_blocksize = AES_BLOCK_SIZE, | 424 | .cra_blocksize = AES_BLOCK_SIZE, | 
| 421 | .cra_ctxsize = sizeof(struct aes_ctx), | 425 | .cra_ctxsize = sizeof(struct aes_ctx), | 
| 426 | .cra_alignmask = 3, | ||
| 422 | .cra_module = THIS_MODULE, | 427 | .cra_module = THIS_MODULE, | 
| 423 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | 428 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | 
| 424 | .cra_u = { | 429 | .cra_u = { | 
| diff --git a/crypto/anubis.c b/crypto/anubis.c index 3925eb0133cb..2c796bdb91a6 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c | |||
| @@ -32,8 +32,10 @@ | |||
| 32 | #include <linux/init.h> | 32 | #include <linux/init.h> | 
| 33 | #include <linux/module.h> | 33 | #include <linux/module.h> | 
| 34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> | 
| 35 | #include <asm/byteorder.h> | ||
| 35 | #include <asm/scatterlist.h> | 36 | #include <asm/scatterlist.h> | 
| 36 | #include <linux/crypto.h> | 37 | #include <linux/crypto.h> | 
| 38 | #include <linux/types.h> | ||
| 37 | 39 | ||
| 38 | #define ANUBIS_MIN_KEY_SIZE 16 | 40 | #define ANUBIS_MIN_KEY_SIZE 16 | 
| 39 | #define ANUBIS_MAX_KEY_SIZE 40 | 41 | #define ANUBIS_MAX_KEY_SIZE 40 | 
| @@ -461,8 +463,8 @@ static const u32 rc[] = { | |||
| 461 | static int anubis_setkey(void *ctx_arg, const u8 *in_key, | 463 | static int anubis_setkey(void *ctx_arg, const u8 *in_key, | 
| 462 | unsigned int key_len, u32 *flags) | 464 | unsigned int key_len, u32 *flags) | 
| 463 | { | 465 | { | 
| 464 | 466 | const __be32 *key = (const __be32 *)in_key; | |
| 465 | int N, R, i, pos, r; | 467 | int N, R, i, r; | 
| 466 | u32 kappa[ANUBIS_MAX_N]; | 468 | u32 kappa[ANUBIS_MAX_N]; | 
| 467 | u32 inter[ANUBIS_MAX_N]; | 469 | u32 inter[ANUBIS_MAX_N]; | 
| 468 | 470 | ||
| @@ -483,13 +485,8 @@ static int anubis_setkey(void *ctx_arg, const u8 *in_key, | |||
| 483 | ctx->R = R = 8 + N; | 485 | ctx->R = R = 8 + N; | 
| 484 | 486 | ||
| 485 | /* * map cipher key to initial key state (mu): */ | 487 | /* * map cipher key to initial key state (mu): */ | 
| 486 | for (i = 0, pos = 0; i < N; i++, pos += 4) { | 488 | for (i = 0; i < N; i++) | 
| 487 | kappa[i] = | 489 | kappa[i] = be32_to_cpu(key[i]); | 
| 488 | (in_key[pos ] << 24) ^ | ||
| 489 | (in_key[pos + 1] << 16) ^ | ||
| 490 | (in_key[pos + 2] << 8) ^ | ||
| 491 | (in_key[pos + 3] ); | ||
| 492 | } | ||
| 493 | 490 | ||
| 494 | /* | 491 | /* | 
| 495 | * generate R + 1 round keys: | 492 | * generate R + 1 round keys: | 
| @@ -578,7 +575,9 @@ static int anubis_setkey(void *ctx_arg, const u8 *in_key, | |||
| 578 | static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], | 575 | static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], | 
| 579 | u8 *ciphertext, const u8 *plaintext, const int R) | 576 | u8 *ciphertext, const u8 *plaintext, const int R) | 
| 580 | { | 577 | { | 
| 581 | int i, pos, r; | 578 | const __be32 *src = (const __be32 *)plaintext; | 
| 579 | __be32 *dst = (__be32 *)ciphertext; | ||
| 580 | int i, r; | ||
| 582 | u32 state[4]; | 581 | u32 state[4]; | 
| 583 | u32 inter[4]; | 582 | u32 inter[4]; | 
| 584 | 583 | ||
| @@ -586,14 +585,8 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], | |||
| 586 | * map plaintext block to cipher state (mu) | 585 | * map plaintext block to cipher state (mu) | 
| 587 | * and add initial round key (sigma[K^0]): | 586 | * and add initial round key (sigma[K^0]): | 
| 588 | */ | 587 | */ | 
| 589 | for (i = 0, pos = 0; i < 4; i++, pos += 4) { | 588 | for (i = 0; i < 4; i++) | 
| 590 | state[i] = | 589 | state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; | 
| 591 | (plaintext[pos ] << 24) ^ | ||
| 592 | (plaintext[pos + 1] << 16) ^ | ||
| 593 | (plaintext[pos + 2] << 8) ^ | ||
| 594 | (plaintext[pos + 3] ) ^ | ||
| 595 | roundKey[0][i]; | ||
| 596 | } | ||
| 597 | 590 | ||
| 598 | /* | 591 | /* | 
| 599 | * R - 1 full rounds: | 592 | * R - 1 full rounds: | 
| @@ -663,13 +656,8 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], | |||
| 663 | * map cipher state to ciphertext block (mu^{-1}): | 656 | * map cipher state to ciphertext block (mu^{-1}): | 
| 664 | */ | 657 | */ | 
| 665 | 658 | ||
| 666 | for (i = 0, pos = 0; i < 4; i++, pos += 4) { | 659 | for (i = 0; i < 4; i++) | 
| 667 | u32 w = inter[i]; | 660 | dst[i] = cpu_to_be32(inter[i]); | 
| 668 | ciphertext[pos ] = (u8)(w >> 24); | ||
| 669 | ciphertext[pos + 1] = (u8)(w >> 16); | ||
| 670 | ciphertext[pos + 2] = (u8)(w >> 8); | ||
| 671 | ciphertext[pos + 3] = (u8)(w ); | ||
| 672 | } | ||
| 673 | } | 661 | } | 
| 674 | 662 | ||
| 675 | static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 663 | static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 
| @@ -689,6 +677,7 @@ static struct crypto_alg anubis_alg = { | |||
| 689 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 677 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 690 | .cra_blocksize = ANUBIS_BLOCK_SIZE, | 678 | .cra_blocksize = ANUBIS_BLOCK_SIZE, | 
| 691 | .cra_ctxsize = sizeof (struct anubis_ctx), | 679 | .cra_ctxsize = sizeof (struct anubis_ctx), | 
| 680 | .cra_alignmask = 3, | ||
| 692 | .cra_module = THIS_MODULE, | 681 | .cra_module = THIS_MODULE, | 
| 693 | .cra_list = LIST_HEAD_INIT(anubis_alg.cra_list), | 682 | .cra_list = LIST_HEAD_INIT(anubis_alg.cra_list), | 
| 694 | .cra_u = { .cipher = { | 683 | .cra_u = { .cipher = { | 
| diff --git a/crypto/api.c b/crypto/api.c index 40ae42e9b6a6..e26156f71839 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * | 3 | * | 
| 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 
| 5 | * Copyright (c) 2002 David S. Miller (davem@redhat.com) | 5 | * Copyright (c) 2002 David S. Miller (davem@redhat.com) | 
| 6 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 6 | * | 7 | * | 
| 7 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> | 8 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> | 
| 8 | * and Nettle, by Niels Möller. | 9 | * and Nettle, by Niels Möller. | 
| @@ -18,9 +19,11 @@ | |||
| 18 | #include <linux/init.h> | 19 | #include <linux/init.h> | 
| 19 | #include <linux/crypto.h> | 20 | #include <linux/crypto.h> | 
| 20 | #include <linux/errno.h> | 21 | #include <linux/errno.h> | 
| 22 | #include <linux/kernel.h> | ||
| 21 | #include <linux/kmod.h> | 23 | #include <linux/kmod.h> | 
| 22 | #include <linux/rwsem.h> | 24 | #include <linux/rwsem.h> | 
| 23 | #include <linux/slab.h> | 25 | #include <linux/slab.h> | 
| 26 | #include <linux/string.h> | ||
| 24 | #include "internal.h" | 27 | #include "internal.h" | 
| 25 | 28 | ||
| 26 | LIST_HEAD(crypto_alg_list); | 29 | LIST_HEAD(crypto_alg_list); | 
| @@ -39,6 +42,7 @@ static inline void crypto_alg_put(struct crypto_alg *alg) | |||
| 39 | static struct crypto_alg *crypto_alg_lookup(const char *name) | 42 | static struct crypto_alg *crypto_alg_lookup(const char *name) | 
| 40 | { | 43 | { | 
| 41 | struct crypto_alg *q, *alg = NULL; | 44 | struct crypto_alg *q, *alg = NULL; | 
| 45 | int best = -1; | ||
| 42 | 46 | ||
| 43 | if (!name) | 47 | if (!name) | 
| 44 | return NULL; | 48 | return NULL; | 
| @@ -46,11 +50,23 @@ static struct crypto_alg *crypto_alg_lookup(const char *name) | |||
| 46 | down_read(&crypto_alg_sem); | 50 | down_read(&crypto_alg_sem); | 
| 47 | 51 | ||
| 48 | list_for_each_entry(q, &crypto_alg_list, cra_list) { | 52 | list_for_each_entry(q, &crypto_alg_list, cra_list) { | 
| 49 | if (!(strcmp(q->cra_name, name))) { | 53 | int exact, fuzzy; | 
| 50 | if (crypto_alg_get(q)) | 54 | |
| 51 | alg = q; | 55 | exact = !strcmp(q->cra_driver_name, name); | 
| 56 | fuzzy = !strcmp(q->cra_name, name); | ||
| 57 | if (!exact && !(fuzzy && q->cra_priority > best)) | ||
| 58 | continue; | ||
| 59 | |||
| 60 | if (unlikely(!crypto_alg_get(q))) | ||
| 61 | continue; | ||
| 62 | |||
| 63 | best = q->cra_priority; | ||
| 64 | if (alg) | ||
| 65 | crypto_alg_put(alg); | ||
| 66 | alg = q; | ||
| 67 | |||
| 68 | if (exact) | ||
| 52 | break; | 69 | break; | 
| 53 | } | ||
| 54 | } | 70 | } | 
| 55 | 71 | ||
| 56 | up_read(&crypto_alg_sem); | 72 | up_read(&crypto_alg_sem); | 
| @@ -207,9 +223,26 @@ void crypto_free_tfm(struct crypto_tfm *tfm) | |||
| 207 | kfree(tfm); | 223 | kfree(tfm); | 
| 208 | } | 224 | } | 
| 209 | 225 | ||
| 226 | static inline int crypto_set_driver_name(struct crypto_alg *alg) | ||
| 227 | { | ||
| 228 | static const char suffix[] = "-generic"; | ||
| 229 | char *driver_name = (char *)alg->cra_driver_name; | ||
| 230 | int len; | ||
| 231 | |||
| 232 | if (*driver_name) | ||
| 233 | return 0; | ||
| 234 | |||
| 235 | len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | ||
| 236 | if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) | ||
| 237 | return -ENAMETOOLONG; | ||
| 238 | |||
| 239 | memcpy(driver_name + len, suffix, sizeof(suffix)); | ||
| 240 | return 0; | ||
| 241 | } | ||
| 242 | |||
| 210 | int crypto_register_alg(struct crypto_alg *alg) | 243 | int crypto_register_alg(struct crypto_alg *alg) | 
| 211 | { | 244 | { | 
| 212 | int ret = 0; | 245 | int ret; | 
| 213 | struct crypto_alg *q; | 246 | struct crypto_alg *q; | 
| 214 | 247 | ||
| 215 | if (alg->cra_alignmask & (alg->cra_alignmask + 1)) | 248 | if (alg->cra_alignmask & (alg->cra_alignmask + 1)) | 
| @@ -218,13 +251,20 @@ int crypto_register_alg(struct crypto_alg *alg) | |||
| 218 | if (alg->cra_alignmask & alg->cra_blocksize) | 251 | if (alg->cra_alignmask & alg->cra_blocksize) | 
| 219 | return -EINVAL; | 252 | return -EINVAL; | 
| 220 | 253 | ||
| 221 | if (alg->cra_blocksize > PAGE_SIZE) | 254 | if (alg->cra_blocksize > PAGE_SIZE / 8) | 
| 255 | return -EINVAL; | ||
| 256 | |||
| 257 | if (alg->cra_priority < 0) | ||
| 222 | return -EINVAL; | 258 | return -EINVAL; | 
| 223 | 259 | ||
| 260 | ret = crypto_set_driver_name(alg); | ||
| 261 | if (unlikely(ret)) | ||
| 262 | return ret; | ||
| 263 | |||
| 224 | down_write(&crypto_alg_sem); | 264 | down_write(&crypto_alg_sem); | 
| 225 | 265 | ||
| 226 | list_for_each_entry(q, &crypto_alg_list, cra_list) { | 266 | list_for_each_entry(q, &crypto_alg_list, cra_list) { | 
| 227 | if (!(strcmp(q->cra_name, alg->cra_name))) { | 267 | if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) { | 
| 228 | ret = -EEXIST; | 268 | ret = -EEXIST; | 
| 229 | goto out; | 269 | goto out; | 
| 230 | } | 270 | } | 
| diff --git a/crypto/blowfish.c b/crypto/blowfish.c index a8b29d54e7d8..7f710b201f20 100644 --- a/crypto/blowfish.c +++ b/crypto/blowfish.c | |||
| @@ -19,8 +19,10 @@ | |||
| 19 | #include <linux/init.h> | 19 | #include <linux/init.h> | 
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> | 
| 21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> | 
| 22 | #include <asm/byteorder.h> | ||
| 22 | #include <asm/scatterlist.h> | 23 | #include <asm/scatterlist.h> | 
| 23 | #include <linux/crypto.h> | 24 | #include <linux/crypto.h> | 
| 25 | #include <linux/types.h> | ||
| 24 | 26 | ||
| 25 | #define BF_BLOCK_SIZE 8 | 27 | #define BF_BLOCK_SIZE 8 | 
| 26 | #define BF_MIN_KEY_SIZE 4 | 28 | #define BF_MIN_KEY_SIZE 4 | 
| @@ -451,6 +453,7 @@ static struct crypto_alg alg = { | |||
| 451 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 453 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 452 | .cra_blocksize = BF_BLOCK_SIZE, | 454 | .cra_blocksize = BF_BLOCK_SIZE, | 
| 453 | .cra_ctxsize = sizeof(struct bf_ctx), | 455 | .cra_ctxsize = sizeof(struct bf_ctx), | 
| 456 | .cra_alignmask = 3, | ||
| 454 | .cra_module = THIS_MODULE, | 457 | .cra_module = THIS_MODULE, | 
| 455 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 458 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 
| 456 | .cra_u = { .cipher = { | 459 | .cra_u = { .cipher = { | 
| diff --git a/crypto/cast5.c b/crypto/cast5.c index bc42f42b4fe3..8834c8580c04 100644 --- a/crypto/cast5.c +++ b/crypto/cast5.c | |||
| @@ -21,11 +21,13 @@ | |||
| 21 | */ | 21 | */ | 
| 22 | 22 | ||
| 23 | 23 | ||
| 24 | #include <asm/byteorder.h> | ||
| 24 | #include <linux/init.h> | 25 | #include <linux/init.h> | 
| 25 | #include <linux/crypto.h> | 26 | #include <linux/crypto.h> | 
| 26 | #include <linux/module.h> | 27 | #include <linux/module.h> | 
| 27 | #include <linux/errno.h> | 28 | #include <linux/errno.h> | 
| 28 | #include <linux/string.h> | 29 | #include <linux/string.h> | 
| 30 | #include <linux/types.h> | ||
| 29 | 31 | ||
| 30 | #define CAST5_BLOCK_SIZE 8 | 32 | #define CAST5_BLOCK_SIZE 8 | 
| 31 | #define CAST5_MIN_KEY_SIZE 5 | 33 | #define CAST5_MIN_KEY_SIZE 5 | 
| @@ -578,6 +580,8 @@ static const u32 sb8[256] = { | |||
| 578 | static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | 580 | static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | 
| 579 | { | 581 | { | 
| 580 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 582 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 
| 583 | const __be32 *src = (const __be32 *)inbuf; | ||
| 584 | __be32 *dst = (__be32 *)outbuf; | ||
| 581 | u32 l, r, t; | 585 | u32 l, r, t; | 
| 582 | u32 I; /* used by the Fx macros */ | 586 | u32 I; /* used by the Fx macros */ | 
| 583 | u32 *Km; | 587 | u32 *Km; | 
| @@ -589,8 +593,8 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | |||
| 589 | /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and | 593 | /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and | 
| 590 | * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) | 594 | * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) | 
| 591 | */ | 595 | */ | 
| 592 | l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; | 596 | l = be32_to_cpu(src[0]); | 
| 593 | r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; | 597 | r = be32_to_cpu(src[1]); | 
| 594 | 598 | ||
| 595 | /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: | 599 | /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: | 
| 596 | * Li = Ri-1; | 600 | * Li = Ri-1; | 
| @@ -634,19 +638,15 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | |||
| 634 | 638 | ||
| 635 | /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and | 639 | /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and | 
| 636 | * concatenate to form the ciphertext.) */ | 640 | * concatenate to form the ciphertext.) */ | 
| 637 | outbuf[0] = (r >> 24) & 0xff; | 641 | dst[0] = cpu_to_be32(r); | 
| 638 | outbuf[1] = (r >> 16) & 0xff; | 642 | dst[1] = cpu_to_be32(l); | 
| 639 | outbuf[2] = (r >> 8) & 0xff; | ||
| 640 | outbuf[3] = r & 0xff; | ||
| 641 | outbuf[4] = (l >> 24) & 0xff; | ||
| 642 | outbuf[5] = (l >> 16) & 0xff; | ||
| 643 | outbuf[6] = (l >> 8) & 0xff; | ||
| 644 | outbuf[7] = l & 0xff; | ||
| 645 | } | 643 | } | 
| 646 | 644 | ||
| 647 | static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | 645 | static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | 
| 648 | { | 646 | { | 
| 649 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 647 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 
| 648 | const __be32 *src = (const __be32 *)inbuf; | ||
| 649 | __be32 *dst = (__be32 *)outbuf; | ||
| 650 | u32 l, r, t; | 650 | u32 l, r, t; | 
| 651 | u32 I; | 651 | u32 I; | 
| 652 | u32 *Km; | 652 | u32 *Km; | 
| @@ -655,8 +655,8 @@ static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | |||
| 655 | Km = c->Km; | 655 | Km = c->Km; | 
| 656 | Kr = c->Kr; | 656 | Kr = c->Kr; | 
| 657 | 657 | ||
| 658 | l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; | 658 | l = be32_to_cpu(src[0]); | 
| 659 | r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; | 659 | r = be32_to_cpu(src[1]); | 
| 660 | 660 | ||
| 661 | if (!(c->rr)) { | 661 | if (!(c->rr)) { | 
| 662 | t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); | 662 | t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); | 
| @@ -690,14 +690,8 @@ static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) | |||
| 690 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | 690 | t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); | 
| 691 | } | 691 | } | 
| 692 | 692 | ||
| 693 | outbuf[0] = (r >> 24) & 0xff; | 693 | dst[0] = cpu_to_be32(r); | 
| 694 | outbuf[1] = (r >> 16) & 0xff; | 694 | dst[1] = cpu_to_be32(l); | 
| 695 | outbuf[2] = (r >> 8) & 0xff; | ||
| 696 | outbuf[3] = r & 0xff; | ||
| 697 | outbuf[4] = (l >> 24) & 0xff; | ||
| 698 | outbuf[5] = (l >> 16) & 0xff; | ||
| 699 | outbuf[6] = (l >> 8) & 0xff; | ||
| 700 | outbuf[7] = l & 0xff; | ||
| 701 | } | 695 | } | 
| 702 | 696 | ||
| 703 | static void key_schedule(u32 * x, u32 * z, u32 * k) | 697 | static void key_schedule(u32 * x, u32 * z, u32 * k) | 
| @@ -782,7 +776,7 @@ cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags) | |||
| 782 | u32 x[4]; | 776 | u32 x[4]; | 
| 783 | u32 z[4]; | 777 | u32 z[4]; | 
| 784 | u32 k[16]; | 778 | u32 k[16]; | 
| 785 | u8 p_key[16]; | 779 | __be32 p_key[4]; | 
| 786 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 780 | struct cast5_ctx *c = (struct cast5_ctx *) ctx; | 
| 787 | 781 | ||
| 788 | if (key_len < 5 || key_len > 16) { | 782 | if (key_len < 5 || key_len > 16) { | 
| @@ -796,12 +790,10 @@ cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags) | |||
| 796 | memcpy(p_key, key, key_len); | 790 | memcpy(p_key, key, key_len); | 
| 797 | 791 | ||
| 798 | 792 | ||
| 799 | x[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; | 793 | x[0] = be32_to_cpu(p_key[0]); | 
| 800 | x[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; | 794 | x[1] = be32_to_cpu(p_key[1]); | 
| 801 | x[2] = | 795 | x[2] = be32_to_cpu(p_key[2]); | 
| 802 | p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; | 796 | x[3] = be32_to_cpu(p_key[3]); | 
| 803 | x[3] = | ||
| 804 | p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; | ||
| 805 | 797 | ||
| 806 | key_schedule(x, z, k); | 798 | key_schedule(x, z, k); | 
| 807 | for (i = 0; i < 16; i++) | 799 | for (i = 0; i < 16; i++) | 
| @@ -817,6 +809,7 @@ static struct crypto_alg alg = { | |||
| 817 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 809 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 818 | .cra_blocksize = CAST5_BLOCK_SIZE, | 810 | .cra_blocksize = CAST5_BLOCK_SIZE, | 
| 819 | .cra_ctxsize = sizeof(struct cast5_ctx), | 811 | .cra_ctxsize = sizeof(struct cast5_ctx), | 
| 812 | .cra_alignmask = 3, | ||
| 820 | .cra_module = THIS_MODULE, | 813 | .cra_module = THIS_MODULE, | 
| 821 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 814 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 
| 822 | .cra_u = { | 815 | .cra_u = { | 
| diff --git a/crypto/cast6.c b/crypto/cast6.c index 3eb081073423..9e28740ba775 100644 --- a/crypto/cast6.c +++ b/crypto/cast6.c | |||
| @@ -18,11 +18,13 @@ | |||
| 18 | */ | 18 | */ | 
| 19 | 19 | ||
| 20 | 20 | ||
| 21 | #include <asm/byteorder.h> | ||
| 21 | #include <linux/init.h> | 22 | #include <linux/init.h> | 
| 22 | #include <linux/crypto.h> | 23 | #include <linux/crypto.h> | 
| 23 | #include <linux/module.h> | 24 | #include <linux/module.h> | 
| 24 | #include <linux/errno.h> | 25 | #include <linux/errno.h> | 
| 25 | #include <linux/string.h> | 26 | #include <linux/string.h> | 
| 27 | #include <linux/types.h> | ||
| 26 | 28 | ||
| 27 | #define CAST6_BLOCK_SIZE 16 | 29 | #define CAST6_BLOCK_SIZE 16 | 
| 28 | #define CAST6_MIN_KEY_SIZE 16 | 30 | #define CAST6_MIN_KEY_SIZE 16 | 
| @@ -384,7 +386,7 @@ cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags) | |||
| 384 | { | 386 | { | 
| 385 | int i; | 387 | int i; | 
| 386 | u32 key[8]; | 388 | u32 key[8]; | 
| 387 | u8 p_key[32]; /* padded key */ | 389 | __be32 p_key[8]; /* padded key */ | 
| 388 | struct cast6_ctx *c = (struct cast6_ctx *) ctx; | 390 | struct cast6_ctx *c = (struct cast6_ctx *) ctx; | 
| 389 | 391 | ||
| 390 | if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { | 392 | if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { | 
| @@ -395,14 +397,14 @@ cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags) | |||
| 395 | memset (p_key, 0, 32); | 397 | memset (p_key, 0, 32); | 
| 396 | memcpy (p_key, in_key, key_len); | 398 | memcpy (p_key, in_key, key_len); | 
| 397 | 399 | ||
| 398 | key[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; /* A */ | 400 | key[0] = be32_to_cpu(p_key[0]); /* A */ | 
| 399 | key[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; /* B */ | 401 | key[1] = be32_to_cpu(p_key[1]); /* B */ | 
| 400 | key[2] = p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; /* C */ | 402 | key[2] = be32_to_cpu(p_key[2]); /* C */ | 
| 401 | key[3] = p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; /* D */ | 403 | key[3] = be32_to_cpu(p_key[3]); /* D */ | 
| 402 | key[4] = p_key[16] << 24 | p_key[17] << 16 | p_key[18] << 8 | p_key[19]; /* E */ | 404 | key[4] = be32_to_cpu(p_key[4]); /* E */ | 
| 403 | key[5] = p_key[20] << 24 | p_key[21] << 16 | p_key[22] << 8 | p_key[23]; /* F */ | 405 | key[5] = be32_to_cpu(p_key[5]); /* F */ | 
| 404 | key[6] = p_key[24] << 24 | p_key[25] << 16 | p_key[26] << 8 | p_key[27]; /* G */ | 406 | key[6] = be32_to_cpu(p_key[6]); /* G */ | 
| 405 | key[7] = p_key[28] << 24 | p_key[29] << 16 | p_key[30] << 8 | p_key[31]; /* H */ | 407 | key[7] = be32_to_cpu(p_key[7]); /* H */ | 
| 406 | 408 | ||
| 407 | 409 | ||
| 408 | 410 | ||
| @@ -444,14 +446,16 @@ static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) { | |||
| 444 | 446 | ||
| 445 | static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | 447 | static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | 
| 446 | struct cast6_ctx * c = (struct cast6_ctx *)ctx; | 448 | struct cast6_ctx * c = (struct cast6_ctx *)ctx; | 
| 449 | const __be32 *src = (const __be32 *)inbuf; | ||
| 450 | __be32 *dst = (__be32 *)outbuf; | ||
| 447 | u32 block[4]; | 451 | u32 block[4]; | 
| 448 | u32 * Km; | 452 | u32 * Km; | 
| 449 | u8 * Kr; | 453 | u8 * Kr; | 
| 450 | 454 | ||
| 451 | block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; | 455 | block[0] = be32_to_cpu(src[0]); | 
| 452 | block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; | 456 | block[1] = be32_to_cpu(src[1]); | 
| 453 | block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; | 457 | block[2] = be32_to_cpu(src[2]); | 
| 454 | block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; | 458 | block[3] = be32_to_cpu(src[3]); | 
| 455 | 459 | ||
| 456 | Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); | 460 | Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); | 
| 457 | Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); | 461 | Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); | 
| @@ -465,35 +469,25 @@ static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | |||
| 465 | Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); | 469 | Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); | 
| 466 | Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); | 470 | Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); | 
| 467 | Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); | 471 | Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); | 
| 468 | 472 | ||
| 469 | outbuf[0] = (block[0] >> 24) & 0xff; | 473 | dst[0] = cpu_to_be32(block[0]); | 
| 470 | outbuf[1] = (block[0] >> 16) & 0xff; | 474 | dst[1] = cpu_to_be32(block[1]); | 
| 471 | outbuf[2] = (block[0] >> 8) & 0xff; | 475 | dst[2] = cpu_to_be32(block[2]); | 
| 472 | outbuf[3] = block[0] & 0xff; | 476 | dst[3] = cpu_to_be32(block[3]); | 
| 473 | outbuf[4] = (block[1] >> 24) & 0xff; | ||
| 474 | outbuf[5] = (block[1] >> 16) & 0xff; | ||
| 475 | outbuf[6] = (block[1] >> 8) & 0xff; | ||
| 476 | outbuf[7] = block[1] & 0xff; | ||
| 477 | outbuf[8] = (block[2] >> 24) & 0xff; | ||
| 478 | outbuf[9] = (block[2] >> 16) & 0xff; | ||
| 479 | outbuf[10] = (block[2] >> 8) & 0xff; | ||
| 480 | outbuf[11] = block[2] & 0xff; | ||
| 481 | outbuf[12] = (block[3] >> 24) & 0xff; | ||
| 482 | outbuf[13] = (block[3] >> 16) & 0xff; | ||
| 483 | outbuf[14] = (block[3] >> 8) & 0xff; | ||
| 484 | outbuf[15] = block[3] & 0xff; | ||
| 485 | } | 477 | } | 
| 486 | 478 | ||
| 487 | static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | 479 | static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | 
| 488 | struct cast6_ctx * c = (struct cast6_ctx *)ctx; | 480 | struct cast6_ctx * c = (struct cast6_ctx *)ctx; | 
| 481 | const __be32 *src = (const __be32 *)inbuf; | ||
| 482 | __be32 *dst = (__be32 *)outbuf; | ||
| 489 | u32 block[4]; | 483 | u32 block[4]; | 
| 490 | u32 * Km; | 484 | u32 * Km; | 
| 491 | u8 * Kr; | 485 | u8 * Kr; | 
| 492 | 486 | ||
| 493 | block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; | 487 | block[0] = be32_to_cpu(src[0]); | 
| 494 | block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; | 488 | block[1] = be32_to_cpu(src[1]); | 
| 495 | block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; | 489 | block[2] = be32_to_cpu(src[2]); | 
| 496 | block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; | 490 | block[3] = be32_to_cpu(src[3]); | 
| 497 | 491 | ||
| 498 | Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); | 492 | Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); | 
| 499 | Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); | 493 | Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); | 
| @@ -508,22 +502,10 @@ static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { | |||
| 508 | Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); | 502 | Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); | 
| 509 | Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); | 503 | Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); | 
| 510 | 504 | ||
| 511 | outbuf[0] = (block[0] >> 24) & 0xff; | 505 | dst[0] = cpu_to_be32(block[0]); | 
| 512 | outbuf[1] = (block[0] >> 16) & 0xff; | 506 | dst[1] = cpu_to_be32(block[1]); | 
| 513 | outbuf[2] = (block[0] >> 8) & 0xff; | 507 | dst[2] = cpu_to_be32(block[2]); | 
| 514 | outbuf[3] = block[0] & 0xff; | 508 | dst[3] = cpu_to_be32(block[3]); | 
| 515 | outbuf[4] = (block[1] >> 24) & 0xff; | ||
| 516 | outbuf[5] = (block[1] >> 16) & 0xff; | ||
| 517 | outbuf[6] = (block[1] >> 8) & 0xff; | ||
| 518 | outbuf[7] = block[1] & 0xff; | ||
| 519 | outbuf[8] = (block[2] >> 24) & 0xff; | ||
| 520 | outbuf[9] = (block[2] >> 16) & 0xff; | ||
| 521 | outbuf[10] = (block[2] >> 8) & 0xff; | ||
| 522 | outbuf[11] = block[2] & 0xff; | ||
| 523 | outbuf[12] = (block[3] >> 24) & 0xff; | ||
| 524 | outbuf[13] = (block[3] >> 16) & 0xff; | ||
| 525 | outbuf[14] = (block[3] >> 8) & 0xff; | ||
| 526 | outbuf[15] = block[3] & 0xff; | ||
| 527 | } | 509 | } | 
| 528 | 510 | ||
| 529 | static struct crypto_alg alg = { | 511 | static struct crypto_alg alg = { | 
| @@ -531,6 +513,7 @@ static struct crypto_alg alg = { | |||
| 531 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 513 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 532 | .cra_blocksize = CAST6_BLOCK_SIZE, | 514 | .cra_blocksize = CAST6_BLOCK_SIZE, | 
| 533 | .cra_ctxsize = sizeof(struct cast6_ctx), | 515 | .cra_ctxsize = sizeof(struct cast6_ctx), | 
| 516 | .cra_alignmask = 3, | ||
| 534 | .cra_module = THIS_MODULE, | 517 | .cra_module = THIS_MODULE, | 
| 535 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 518 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 
| 536 | .cra_u = { | 519 | .cra_u = { | 
| diff --git a/crypto/cipher.c b/crypto/cipher.c index dfd4bcfc5975..65bcea0cd17c 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c | |||
| @@ -212,9 +212,10 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc, | |||
| 212 | struct crypto_tfm *tfm = desc->tfm; | 212 | struct crypto_tfm *tfm = desc->tfm; | 
| 213 | void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; | 213 | void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; | 
| 214 | int bsize = crypto_tfm_alg_blocksize(tfm); | 214 | int bsize = crypto_tfm_alg_blocksize(tfm); | 
| 215 | unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm); | ||
| 215 | 216 | ||
| 216 | u8 stack[src == dst ? bsize : 0]; | 217 | u8 stack[src == dst ? bsize + alignmask : 0]; | 
| 217 | u8 *buf = stack; | 218 | u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); | 
| 218 | u8 **dst_p = src == dst ? &buf : &dst; | 219 | u8 **dst_p = src == dst ? &buf : &dst; | 
| 219 | 220 | ||
| 220 | void (*fn)(void *, u8 *, const u8 *) = desc->crfn; | 221 | void (*fn)(void *, u8 *, const u8 *) = desc->crfn; | 
| diff --git a/crypto/crc32c.c b/crypto/crc32c.c index 256956cd9377..953362423a5c 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/string.h> | 16 | #include <linux/string.h> | 
| 17 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> | 
| 18 | #include <linux/crc32c.h> | 18 | #include <linux/crc32c.h> | 
| 19 | #include <linux/types.h> | ||
| 19 | #include <asm/byteorder.h> | 20 | #include <asm/byteorder.h> | 
| 20 | 21 | ||
| 21 | #define CHKSUM_BLOCK_SIZE 32 | 22 | #define CHKSUM_BLOCK_SIZE 32 | 
| diff --git a/crypto/des.c b/crypto/des.c index a3c863dddded..7bb548653dc6 100644 --- a/crypto/des.c +++ b/crypto/des.c | |||
| @@ -12,11 +12,13 @@ | |||
| 12 | * | 12 | * | 
| 13 | */ | 13 | */ | 
| 14 | 14 | ||
| 15 | #include <asm/byteorder.h> | ||
| 15 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> | 
| 16 | #include <linux/init.h> | 17 | #include <linux/init.h> | 
| 17 | #include <linux/module.h> | 18 | #include <linux/module.h> | 
| 18 | #include <linux/errno.h> | 19 | #include <linux/errno.h> | 
| 19 | #include <linux/crypto.h> | 20 | #include <linux/crypto.h> | 
| 21 | #include <linux/types.h> | ||
| 20 | 22 | ||
| 21 | #define DES_KEY_SIZE 8 | 23 | #define DES_KEY_SIZE 8 | 
| 22 | #define DES_EXPKEY_WORDS 32 | 24 | #define DES_EXPKEY_WORDS 32 | 
| @@ -947,6 +949,7 @@ static struct crypto_alg des_alg = { | |||
| 947 | .cra_blocksize = DES_BLOCK_SIZE, | 949 | .cra_blocksize = DES_BLOCK_SIZE, | 
| 948 | .cra_ctxsize = sizeof(struct des_ctx), | 950 | .cra_ctxsize = sizeof(struct des_ctx), | 
| 949 | .cra_module = THIS_MODULE, | 951 | .cra_module = THIS_MODULE, | 
| 952 | .cra_alignmask = 3, | ||
| 950 | .cra_list = LIST_HEAD_INIT(des_alg.cra_list), | 953 | .cra_list = LIST_HEAD_INIT(des_alg.cra_list), | 
| 951 | .cra_u = { .cipher = { | 954 | .cra_u = { .cipher = { | 
| 952 | .cia_min_keysize = DES_KEY_SIZE, | 955 | .cia_min_keysize = DES_KEY_SIZE, | 
| diff --git a/crypto/internal.h b/crypto/internal.h index 37aa652ce5ce..959e602909a6 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | * Cryptographic API. | 2 | * Cryptographic API. | 
| 3 | * | 3 | * | 
| 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 
| 5 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 5 | * | 6 | * | 
| 6 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it | 
| 7 | * under the terms of the GNU General Public License as published by the Free | 8 | * under the terms of the GNU General Public License as published by the Free | 
| @@ -16,10 +17,15 @@ | |||
| 16 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> | 
| 17 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> | 
| 18 | #include <linux/init.h> | 19 | #include <linux/init.h> | 
| 20 | #include <linux/list.h> | ||
| 19 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> | 
| 22 | #include <linux/rwsem.h> | ||
| 20 | #include <linux/slab.h> | 23 | #include <linux/slab.h> | 
| 21 | #include <asm/kmap_types.h> | 24 | #include <asm/kmap_types.h> | 
| 22 | 25 | ||
| 26 | extern struct list_head crypto_alg_list; | ||
| 27 | extern struct rw_semaphore crypto_alg_sem; | ||
| 28 | |||
| 23 | extern enum km_type crypto_km_types[]; | 29 | extern enum km_type crypto_km_types[]; | 
| 24 | 30 | ||
| 25 | static inline enum km_type crypto_kmap_type(int out) | 31 | static inline enum km_type crypto_kmap_type(int out) | 
| diff --git a/crypto/khazad.c b/crypto/khazad.c index 738cb0dd1e7c..807f2bf4ea24 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c | |||
| @@ -22,8 +22,10 @@ | |||
| 22 | #include <linux/init.h> | 22 | #include <linux/init.h> | 
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> | 
| 24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> | 
| 25 | #include <asm/byteorder.h> | ||
| 25 | #include <asm/scatterlist.h> | 26 | #include <asm/scatterlist.h> | 
| 26 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> | 
| 28 | #include <linux/types.h> | ||
| 27 | 29 | ||
| 28 | #define KHAZAD_KEY_SIZE 16 | 30 | #define KHAZAD_KEY_SIZE 16 | 
| 29 | #define KHAZAD_BLOCK_SIZE 8 | 31 | #define KHAZAD_BLOCK_SIZE 8 | 
| @@ -755,8 +757,8 @@ static const u64 c[KHAZAD_ROUNDS + 1] = { | |||
| 755 | static int khazad_setkey(void *ctx_arg, const u8 *in_key, | 757 | static int khazad_setkey(void *ctx_arg, const u8 *in_key, | 
| 756 | unsigned int key_len, u32 *flags) | 758 | unsigned int key_len, u32 *flags) | 
| 757 | { | 759 | { | 
| 758 | |||
| 759 | struct khazad_ctx *ctx = ctx_arg; | 760 | struct khazad_ctx *ctx = ctx_arg; | 
| 761 | const __be64 *key = (const __be64 *)in_key; | ||
| 760 | int r; | 762 | int r; | 
| 761 | const u64 *S = T7; | 763 | const u64 *S = T7; | 
| 762 | u64 K2, K1; | 764 | u64 K2, K1; | 
| @@ -767,22 +769,8 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key, | |||
| 767 | return -EINVAL; | 769 | return -EINVAL; | 
| 768 | } | 770 | } | 
| 769 | 771 | ||
| 770 | K2 = ((u64)in_key[ 0] << 56) ^ | 772 | K2 = be64_to_cpu(key[0]); | 
| 771 | ((u64)in_key[ 1] << 48) ^ | 773 | K1 = be64_to_cpu(key[1]); | 
| 772 | ((u64)in_key[ 2] << 40) ^ | ||
| 773 | ((u64)in_key[ 3] << 32) ^ | ||
| 774 | ((u64)in_key[ 4] << 24) ^ | ||
| 775 | ((u64)in_key[ 5] << 16) ^ | ||
| 776 | ((u64)in_key[ 6] << 8) ^ | ||
| 777 | ((u64)in_key[ 7] ); | ||
| 778 | K1 = ((u64)in_key[ 8] << 56) ^ | ||
| 779 | ((u64)in_key[ 9] << 48) ^ | ||
| 780 | ((u64)in_key[10] << 40) ^ | ||
| 781 | ((u64)in_key[11] << 32) ^ | ||
| 782 | ((u64)in_key[12] << 24) ^ | ||
| 783 | ((u64)in_key[13] << 16) ^ | ||
| 784 | ((u64)in_key[14] << 8) ^ | ||
| 785 | ((u64)in_key[15] ); | ||
| 786 | 774 | ||
| 787 | /* setup the encrypt key */ | 775 | /* setup the encrypt key */ | 
| 788 | for (r = 0; r <= KHAZAD_ROUNDS; r++) { | 776 | for (r = 0; r <= KHAZAD_ROUNDS; r++) { | 
| @@ -820,19 +808,12 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key, | |||
| 820 | static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], | 808 | static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], | 
| 821 | u8 *ciphertext, const u8 *plaintext) | 809 | u8 *ciphertext, const u8 *plaintext) | 
| 822 | { | 810 | { | 
| 823 | 811 | const __be64 *src = (const __be64 *)plaintext; | |
| 812 | __be64 *dst = (__be64 *)ciphertext; | ||
| 824 | int r; | 813 | int r; | 
| 825 | u64 state; | 814 | u64 state; | 
| 826 | 815 | ||
| 827 | state = ((u64)plaintext[0] << 56) ^ | 816 | state = be64_to_cpu(*src) ^ roundKey[0]; | 
| 828 | ((u64)plaintext[1] << 48) ^ | ||
| 829 | ((u64)plaintext[2] << 40) ^ | ||
| 830 | ((u64)plaintext[3] << 32) ^ | ||
| 831 | ((u64)plaintext[4] << 24) ^ | ||
| 832 | ((u64)plaintext[5] << 16) ^ | ||
| 833 | ((u64)plaintext[6] << 8) ^ | ||
| 834 | ((u64)plaintext[7] ) ^ | ||
| 835 | roundKey[0]; | ||
| 836 | 817 | ||
| 837 | for (r = 1; r < KHAZAD_ROUNDS; r++) { | 818 | for (r = 1; r < KHAZAD_ROUNDS; r++) { | 
| 838 | state = T0[(int)(state >> 56) ] ^ | 819 | state = T0[(int)(state >> 56) ] ^ | 
| @@ -856,15 +837,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], | |||
| 856 | (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ | 837 | (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ | 
| 857 | roundKey[KHAZAD_ROUNDS]; | 838 | roundKey[KHAZAD_ROUNDS]; | 
| 858 | 839 | ||
| 859 | ciphertext[0] = (u8)(state >> 56); | 840 | *dst = cpu_to_be64(state); | 
| 860 | ciphertext[1] = (u8)(state >> 48); | ||
| 861 | ciphertext[2] = (u8)(state >> 40); | ||
| 862 | ciphertext[3] = (u8)(state >> 32); | ||
| 863 | ciphertext[4] = (u8)(state >> 24); | ||
| 864 | ciphertext[5] = (u8)(state >> 16); | ||
| 865 | ciphertext[6] = (u8)(state >> 8); | ||
| 866 | ciphertext[7] = (u8)(state ); | ||
| 867 | |||
| 868 | } | 841 | } | 
| 869 | 842 | ||
| 870 | static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 843 | static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 
| @@ -884,6 +857,7 @@ static struct crypto_alg khazad_alg = { | |||
| 884 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 857 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 885 | .cra_blocksize = KHAZAD_BLOCK_SIZE, | 858 | .cra_blocksize = KHAZAD_BLOCK_SIZE, | 
| 886 | .cra_ctxsize = sizeof (struct khazad_ctx), | 859 | .cra_ctxsize = sizeof (struct khazad_ctx), | 
| 860 | .cra_alignmask = 7, | ||
| 887 | .cra_module = THIS_MODULE, | 861 | .cra_module = THIS_MODULE, | 
| 888 | .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list), | 862 | .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list), | 
| 889 | .cra_u = { .cipher = { | 863 | .cra_u = { .cipher = { | 
| diff --git a/crypto/md4.c b/crypto/md4.c index bef6a9e5ac9b..a2d6df5c0f8c 100644 --- a/crypto/md4.c +++ b/crypto/md4.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/crypto.h> | 24 | #include <linux/crypto.h> | 
| 25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> | 
| 26 | #include <linux/string.h> | 26 | #include <linux/string.h> | 
| 27 | #include <linux/types.h> | ||
| 27 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> | 
| 28 | 29 | ||
| 29 | #define MD4_DIGEST_SIZE 16 | 30 | #define MD4_DIGEST_SIZE 16 | 
| diff --git a/crypto/md5.c b/crypto/md5.c index 1ed45f9c263e..7f041aef5da2 100644 --- a/crypto/md5.c +++ b/crypto/md5.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/module.h> | 19 | #include <linux/module.h> | 
| 20 | #include <linux/string.h> | 20 | #include <linux/string.h> | 
| 21 | #include <linux/crypto.h> | 21 | #include <linux/crypto.h> | 
| 22 | #include <linux/types.h> | ||
| 22 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> | 
| 23 | 24 | ||
| 24 | #define MD5_DIGEST_SIZE 16 | 25 | #define MD5_DIGEST_SIZE 16 | 
| diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index a470bcb3693e..4f6ab23e14ad 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c | |||
| @@ -10,10 +10,12 @@ | |||
| 10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. | 
| 11 | */ | 11 | */ | 
| 12 | 12 | ||
| 13 | #include <asm/byteorder.h> | ||
| 13 | #include <linux/init.h> | 14 | #include <linux/init.h> | 
| 14 | #include <linux/module.h> | 15 | #include <linux/module.h> | 
| 15 | #include <linux/string.h> | 16 | #include <linux/string.h> | 
| 16 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> | 
| 18 | #include <linux/types.h> | ||
| 17 | 19 | ||
| 18 | 20 | ||
| 19 | struct michael_mic_ctx { | 21 | struct michael_mic_ctx { | 
| @@ -43,21 +45,6 @@ do { \ | |||
| 43 | } while (0) | 45 | } while (0) | 
| 44 | 46 | ||
| 45 | 47 | ||
| 46 | static inline u32 get_le32(const u8 *p) | ||
| 47 | { | ||
| 48 | return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); | ||
| 49 | } | ||
| 50 | |||
| 51 | |||
| 52 | static inline void put_le32(u8 *p, u32 v) | ||
| 53 | { | ||
| 54 | p[0] = v; | ||
| 55 | p[1] = v >> 8; | ||
| 56 | p[2] = v >> 16; | ||
| 57 | p[3] = v >> 24; | ||
| 58 | } | ||
| 59 | |||
| 60 | |||
| 61 | static void michael_init(void *ctx) | 48 | static void michael_init(void *ctx) | 
| 62 | { | 49 | { | 
| 63 | struct michael_mic_ctx *mctx = ctx; | 50 | struct michael_mic_ctx *mctx = ctx; | 
| @@ -68,6 +55,7 @@ static void michael_init(void *ctx) | |||
| 68 | static void michael_update(void *ctx, const u8 *data, unsigned int len) | 55 | static void michael_update(void *ctx, const u8 *data, unsigned int len) | 
| 69 | { | 56 | { | 
| 70 | struct michael_mic_ctx *mctx = ctx; | 57 | struct michael_mic_ctx *mctx = ctx; | 
| 58 | const __le32 *src; | ||
| 71 | 59 | ||
| 72 | if (mctx->pending_len) { | 60 | if (mctx->pending_len) { | 
| 73 | int flen = 4 - mctx->pending_len; | 61 | int flen = 4 - mctx->pending_len; | 
| @@ -81,21 +69,23 @@ static void michael_update(void *ctx, const u8 *data, unsigned int len) | |||
| 81 | if (mctx->pending_len < 4) | 69 | if (mctx->pending_len < 4) | 
| 82 | return; | 70 | return; | 
| 83 | 71 | ||
| 84 | mctx->l ^= get_le32(mctx->pending); | 72 | src = (const __le32 *)mctx->pending; | 
| 73 | mctx->l ^= le32_to_cpup(src); | ||
| 85 | michael_block(mctx->l, mctx->r); | 74 | michael_block(mctx->l, mctx->r); | 
| 86 | mctx->pending_len = 0; | 75 | mctx->pending_len = 0; | 
| 87 | } | 76 | } | 
| 88 | 77 | ||
| 78 | src = (const __le32 *)data; | ||
| 79 | |||
| 89 | while (len >= 4) { | 80 | while (len >= 4) { | 
| 90 | mctx->l ^= get_le32(data); | 81 | mctx->l ^= le32_to_cpup(src++); | 
| 91 | michael_block(mctx->l, mctx->r); | 82 | michael_block(mctx->l, mctx->r); | 
| 92 | data += 4; | ||
| 93 | len -= 4; | 83 | len -= 4; | 
| 94 | } | 84 | } | 
| 95 | 85 | ||
| 96 | if (len > 0) { | 86 | if (len > 0) { | 
| 97 | mctx->pending_len = len; | 87 | mctx->pending_len = len; | 
| 98 | memcpy(mctx->pending, data, len); | 88 | memcpy(mctx->pending, src, len); | 
| 99 | } | 89 | } | 
| 100 | } | 90 | } | 
| 101 | 91 | ||
| @@ -104,6 +94,7 @@ static void michael_final(void *ctx, u8 *out) | |||
| 104 | { | 94 | { | 
| 105 | struct michael_mic_ctx *mctx = ctx; | 95 | struct michael_mic_ctx *mctx = ctx; | 
| 106 | u8 *data = mctx->pending; | 96 | u8 *data = mctx->pending; | 
| 97 | __le32 *dst = (__le32 *)out; | ||
| 107 | 98 | ||
| 108 | /* Last block and padding (0x5a, 4..7 x 0) */ | 99 | /* Last block and padding (0x5a, 4..7 x 0) */ | 
| 109 | switch (mctx->pending_len) { | 100 | switch (mctx->pending_len) { | 
| @@ -125,8 +116,8 @@ static void michael_final(void *ctx, u8 *out) | |||
| 125 | /* l ^= 0; */ | 116 | /* l ^= 0; */ | 
| 126 | michael_block(mctx->l, mctx->r); | 117 | michael_block(mctx->l, mctx->r); | 
| 127 | 118 | ||
| 128 | put_le32(out, mctx->l); | 119 | dst[0] = cpu_to_le32(mctx->l); | 
| 129 | put_le32(out + 4, mctx->r); | 120 | dst[1] = cpu_to_le32(mctx->r); | 
| 130 | } | 121 | } | 
| 131 | 122 | ||
| 132 | 123 | ||
| @@ -134,13 +125,16 @@ static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen, | |||
| 134 | u32 *flags) | 125 | u32 *flags) | 
| 135 | { | 126 | { | 
| 136 | struct michael_mic_ctx *mctx = ctx; | 127 | struct michael_mic_ctx *mctx = ctx; | 
| 128 | const __le32 *data = (const __le32 *)key; | ||
| 129 | |||
| 137 | if (keylen != 8) { | 130 | if (keylen != 8) { | 
| 138 | if (flags) | 131 | if (flags) | 
| 139 | *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; | 132 | *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; | 
| 140 | return -EINVAL; | 133 | return -EINVAL; | 
| 141 | } | 134 | } | 
| 142 | mctx->l = get_le32(key); | 135 | |
| 143 | mctx->r = get_le32(key + 4); | 136 | mctx->l = le32_to_cpu(data[0]); | 
| 137 | mctx->r = le32_to_cpu(data[1]); | ||
| 144 | return 0; | 138 | return 0; | 
| 145 | } | 139 | } | 
| 146 | 140 | ||
| diff --git a/crypto/proc.c b/crypto/proc.c index 630ba91c08f1..c0a5dd7ce2cc 100644 --- a/crypto/proc.c +++ b/crypto/proc.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Procfs information. | 4 | * Procfs information. | 
| 5 | * | 5 | * | 
| 6 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 6 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 
| 7 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 7 | * | 8 | * | 
| 8 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it | 
| 9 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free | 
| @@ -18,9 +19,6 @@ | |||
| 18 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> | 
| 19 | #include "internal.h" | 20 | #include "internal.h" | 
| 20 | 21 | ||
| 21 | extern struct list_head crypto_alg_list; | ||
| 22 | extern struct rw_semaphore crypto_alg_sem; | ||
| 23 | |||
| 24 | static void *c_start(struct seq_file *m, loff_t *pos) | 22 | static void *c_start(struct seq_file *m, loff_t *pos) | 
| 25 | { | 23 | { | 
| 26 | struct list_head *v; | 24 | struct list_head *v; | 
| @@ -53,7 +51,9 @@ static int c_show(struct seq_file *m, void *p) | |||
| 53 | struct crypto_alg *alg = (struct crypto_alg *)p; | 51 | struct crypto_alg *alg = (struct crypto_alg *)p; | 
| 54 | 52 | ||
| 55 | seq_printf(m, "name : %s\n", alg->cra_name); | 53 | seq_printf(m, "name : %s\n", alg->cra_name); | 
| 54 | seq_printf(m, "driver : %s\n", alg->cra_driver_name); | ||
| 56 | seq_printf(m, "module : %s\n", module_name(alg->cra_module)); | 55 | seq_printf(m, "module : %s\n", module_name(alg->cra_module)); | 
| 56 | seq_printf(m, "priority : %d\n", alg->cra_priority); | ||
| 57 | 57 | ||
| 58 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | 58 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | 
| 59 | case CRYPTO_ALG_TYPE_CIPHER: | 59 | case CRYPTO_ALG_TYPE_CIPHER: | 
| diff --git a/crypto/serpent.c b/crypto/serpent.c index 3cf2c5067eea..52ad1a492620 100644 --- a/crypto/serpent.c +++ b/crypto/serpent.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> | 
| 21 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> | 
| 22 | #include <linux/crypto.h> | 22 | #include <linux/crypto.h> | 
| 23 | #include <linux/types.h> | ||
| 23 | 24 | ||
| 24 | /* Key is padded to the maximum of 256 bits before round key generation. | 25 | /* Key is padded to the maximum of 256 bits before round key generation. | 
| 25 | * Any key length <= 256 bits (32 bytes) is allowed by the algorithm. | 26 | * Any key length <= 256 bits (32 bytes) is allowed by the algorithm. | 
| @@ -552,6 +553,7 @@ static struct crypto_alg tnepres_alg = { | |||
| 552 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 553 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 553 | .cra_blocksize = SERPENT_BLOCK_SIZE, | 554 | .cra_blocksize = SERPENT_BLOCK_SIZE, | 
| 554 | .cra_ctxsize = sizeof(struct serpent_ctx), | 555 | .cra_ctxsize = sizeof(struct serpent_ctx), | 
| 556 | .cra_alignmask = 3, | ||
| 555 | .cra_module = THIS_MODULE, | 557 | .cra_module = THIS_MODULE, | 
| 556 | .cra_list = LIST_HEAD_INIT(serpent_alg.cra_list), | 558 | .cra_list = LIST_HEAD_INIT(serpent_alg.cra_list), | 
| 557 | .cra_u = { .cipher = { | 559 | .cra_u = { .cipher = { | 
| diff --git a/crypto/sha1.c b/crypto/sha1.c index 4016f3b8ce9b..21571ed35b7e 100644 --- a/crypto/sha1.c +++ b/crypto/sha1.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> | 
| 22 | #include <linux/crypto.h> | 22 | #include <linux/crypto.h> | 
| 23 | #include <linux/cryptohash.h> | 23 | #include <linux/cryptohash.h> | 
| 24 | #include <linux/types.h> | ||
| 24 | #include <asm/scatterlist.h> | 25 | #include <asm/scatterlist.h> | 
| 25 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> | 
| 26 | 27 | ||
| @@ -48,23 +49,33 @@ static void sha1_init(void *ctx) | |||
| 48 | static void sha1_update(void *ctx, const u8 *data, unsigned int len) | 49 | static void sha1_update(void *ctx, const u8 *data, unsigned int len) | 
| 49 | { | 50 | { | 
| 50 | struct sha1_ctx *sctx = ctx; | 51 | struct sha1_ctx *sctx = ctx; | 
| 51 | unsigned int i, j; | 52 | unsigned int partial, done; | 
| 52 | u32 temp[SHA_WORKSPACE_WORDS]; | 53 | const u8 *src; | 
| 53 | 54 | ||
| 54 | j = (sctx->count >> 3) & 0x3f; | 55 | partial = sctx->count & 0x3f; | 
| 55 | sctx->count += len << 3; | 56 | sctx->count += len; | 
| 57 | done = 0; | ||
| 58 | src = data; | ||
| 56 | 59 | ||
| 57 | if ((j + len) > 63) { | 60 | if ((partial + len) > 63) { | 
| 58 | memcpy(&sctx->buffer[j], data, (i = 64-j)); | 61 | u32 temp[SHA_WORKSPACE_WORDS]; | 
| 59 | sha_transform(sctx->state, sctx->buffer, temp); | 62 | |
| 60 | for ( ; i + 63 < len; i += 64) { | 63 | if (partial) { | 
| 61 | sha_transform(sctx->state, &data[i], temp); | 64 | done = -partial; | 
| 65 | memcpy(sctx->buffer + partial, data, done + 64); | ||
| 66 | src = sctx->buffer; | ||
| 62 | } | 67 | } | 
| 63 | j = 0; | 68 | |
| 69 | do { | ||
| 70 | sha_transform(sctx->state, src, temp); | ||
| 71 | done += 64; | ||
| 72 | src = data + done; | ||
| 73 | } while (done + 63 < len); | ||
| 74 | |||
| 75 | memset(temp, 0, sizeof(temp)); | ||
| 76 | partial = 0; | ||
| 64 | } | 77 | } | 
| 65 | else i = 0; | 78 | memcpy(sctx->buffer + partial, src, len - done); | 
| 66 | memset(temp, 0, sizeof(temp)); | ||
| 67 | memcpy(&sctx->buffer[j], &data[i], len - i); | ||
| 68 | } | 79 | } | 
| 69 | 80 | ||
| 70 | 81 | ||
| @@ -72,37 +83,24 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len) | |||
| 72 | static void sha1_final(void* ctx, u8 *out) | 83 | static void sha1_final(void* ctx, u8 *out) | 
| 73 | { | 84 | { | 
| 74 | struct sha1_ctx *sctx = ctx; | 85 | struct sha1_ctx *sctx = ctx; | 
| 75 | u32 i, j, index, padlen; | 86 | __be32 *dst = (__be32 *)out; | 
| 76 | u64 t; | 87 | u32 i, index, padlen; | 
| 77 | u8 bits[8] = { 0, }; | 88 | __be64 bits; | 
| 78 | static const u8 padding[64] = { 0x80, }; | 89 | static const u8 padding[64] = { 0x80, }; | 
| 79 | 90 | ||
| 80 | t = sctx->count; | 91 | bits = cpu_to_be64(sctx->count << 3); | 
| 81 | bits[7] = 0xff & t; t>>=8; | ||
| 82 | bits[6] = 0xff & t; t>>=8; | ||
| 83 | bits[5] = 0xff & t; t>>=8; | ||
| 84 | bits[4] = 0xff & t; t>>=8; | ||
| 85 | bits[3] = 0xff & t; t>>=8; | ||
| 86 | bits[2] = 0xff & t; t>>=8; | ||
| 87 | bits[1] = 0xff & t; t>>=8; | ||
| 88 | bits[0] = 0xff & t; | ||
| 89 | 92 | ||
| 90 | /* Pad out to 56 mod 64 */ | 93 | /* Pad out to 56 mod 64 */ | 
| 91 | index = (sctx->count >> 3) & 0x3f; | 94 | index = sctx->count & 0x3f; | 
| 92 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | 95 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | 
| 93 | sha1_update(sctx, padding, padlen); | 96 | sha1_update(sctx, padding, padlen); | 
| 94 | 97 | ||
| 95 | /* Append length */ | 98 | /* Append length */ | 
| 96 | sha1_update(sctx, bits, sizeof bits); | 99 | sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); | 
| 97 | 100 | ||
| 98 | /* Store state in digest */ | 101 | /* Store state in digest */ | 
| 99 | for (i = j = 0; i < 5; i++, j += 4) { | 102 | for (i = 0; i < 5; i++) | 
| 100 | u32 t2 = sctx->state[i]; | 103 | dst[i] = cpu_to_be32(sctx->state[i]); | 
| 101 | out[j+3] = t2 & 0xff; t2>>=8; | ||
| 102 | out[j+2] = t2 & 0xff; t2>>=8; | ||
| 103 | out[j+1] = t2 & 0xff; t2>>=8; | ||
| 104 | out[j ] = t2 & 0xff; | ||
| 105 | } | ||
| 106 | 104 | ||
| 107 | /* Wipe context */ | 105 | /* Wipe context */ | 
| 108 | memset(sctx, 0, sizeof *sctx); | 106 | memset(sctx, 0, sizeof *sctx); | 
| diff --git a/crypto/sha256.c b/crypto/sha256.c index c78da50a9b7a..9d5ef674d6a9 100644 --- a/crypto/sha256.c +++ b/crypto/sha256.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> | 
| 21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> | 
| 22 | #include <linux/crypto.h> | 22 | #include <linux/crypto.h> | 
| 23 | #include <linux/types.h> | ||
| 23 | #include <asm/scatterlist.h> | 24 | #include <asm/scatterlist.h> | 
| 24 | #include <asm/byteorder.h> | 25 | #include <asm/byteorder.h> | 
| 25 | 26 | ||
| @@ -279,22 +280,15 @@ static void sha256_update(void *ctx, const u8 *data, unsigned int len) | |||
| 279 | static void sha256_final(void* ctx, u8 *out) | 280 | static void sha256_final(void* ctx, u8 *out) | 
| 280 | { | 281 | { | 
| 281 | struct sha256_ctx *sctx = ctx; | 282 | struct sha256_ctx *sctx = ctx; | 
| 282 | u8 bits[8]; | 283 | __be32 *dst = (__be32 *)out; | 
| 283 | unsigned int index, pad_len, t; | 284 | __be32 bits[2]; | 
| 284 | int i, j; | 285 | unsigned int index, pad_len; | 
| 286 | int i; | ||
| 285 | static const u8 padding[64] = { 0x80, }; | 287 | static const u8 padding[64] = { 0x80, }; | 
| 286 | 288 | ||
| 287 | /* Save number of bits */ | 289 | /* Save number of bits */ | 
| 288 | t = sctx->count[0]; | 290 | bits[1] = cpu_to_be32(sctx->count[0]); | 
| 289 | bits[7] = t; t >>= 8; | 291 | bits[0] = cpu_to_be32(sctx->count[1]); | 
| 290 | bits[6] = t; t >>= 8; | ||
| 291 | bits[5] = t; t >>= 8; | ||
| 292 | bits[4] = t; | ||
| 293 | t = sctx->count[1]; | ||
| 294 | bits[3] = t; t >>= 8; | ||
| 295 | bits[2] = t; t >>= 8; | ||
| 296 | bits[1] = t; t >>= 8; | ||
| 297 | bits[0] = t; | ||
| 298 | 292 | ||
| 299 | /* Pad out to 56 mod 64. */ | 293 | /* Pad out to 56 mod 64. */ | 
| 300 | index = (sctx->count[0] >> 3) & 0x3f; | 294 | index = (sctx->count[0] >> 3) & 0x3f; | 
| @@ -302,16 +296,11 @@ static void sha256_final(void* ctx, u8 *out) | |||
| 302 | sha256_update(sctx, padding, pad_len); | 296 | sha256_update(sctx, padding, pad_len); | 
| 303 | 297 | ||
| 304 | /* Append length (before padding) */ | 298 | /* Append length (before padding) */ | 
| 305 | sha256_update(sctx, bits, 8); | 299 | sha256_update(sctx, (const u8 *)bits, sizeof(bits)); | 
| 306 | 300 | ||
| 307 | /* Store state in digest */ | 301 | /* Store state in digest */ | 
| 308 | for (i = j = 0; i < 8; i++, j += 4) { | 302 | for (i = 0; i < 8; i++) | 
| 309 | t = sctx->state[i]; | 303 | dst[i] = cpu_to_be32(sctx->state[i]); | 
| 310 | out[j+3] = t; t >>= 8; | ||
| 311 | out[j+2] = t; t >>= 8; | ||
| 312 | out[j+1] = t; t >>= 8; | ||
| 313 | out[j ] = t; | ||
| 314 | } | ||
| 315 | 304 | ||
| 316 | /* Zeroize sensitive information. */ | 305 | /* Zeroize sensitive information. */ | 
| 317 | memset(sctx, 0, sizeof(*sctx)); | 306 | memset(sctx, 0, sizeof(*sctx)); | 
| diff --git a/crypto/sha512.c b/crypto/sha512.c index c663438322e9..3e6e9392310c 100644 --- a/crypto/sha512.c +++ b/crypto/sha512.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> | 
| 18 | #include <linux/init.h> | 18 | #include <linux/init.h> | 
| 19 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> | 
| 20 | #include <linux/types.h> | ||
| 20 | 21 | ||
| 21 | #include <asm/scatterlist.h> | 22 | #include <asm/scatterlist.h> | 
| 22 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> | 
| @@ -235,39 +236,17 @@ static void | |||
| 235 | sha512_final(void *ctx, u8 *hash) | 236 | sha512_final(void *ctx, u8 *hash) | 
| 236 | { | 237 | { | 
| 237 | struct sha512_ctx *sctx = ctx; | 238 | struct sha512_ctx *sctx = ctx; | 
| 238 | |||
| 239 | static u8 padding[128] = { 0x80, }; | 239 | static u8 padding[128] = { 0x80, }; | 
| 240 | 240 | __be64 *dst = (__be64 *)hash; | |
| 241 | u32 t; | 241 | __be32 bits[4]; | 
| 242 | u64 t2; | ||
| 243 | u8 bits[128]; | ||
| 244 | unsigned int index, pad_len; | 242 | unsigned int index, pad_len; | 
| 245 | int i, j; | 243 | int i; | 
| 246 | |||
| 247 | index = pad_len = t = i = j = 0; | ||
| 248 | t2 = 0; | ||
| 249 | 244 | ||
| 250 | /* Save number of bits */ | 245 | /* Save number of bits */ | 
| 251 | t = sctx->count[0]; | 246 | bits[3] = cpu_to_be32(sctx->count[0]); | 
| 252 | bits[15] = t; t>>=8; | 247 | bits[2] = cpu_to_be32(sctx->count[1]); | 
| 253 | bits[14] = t; t>>=8; | 248 | bits[1] = cpu_to_be32(sctx->count[2]); | 
| 254 | bits[13] = t; t>>=8; | 249 | bits[0] = cpu_to_be32(sctx->count[3]); | 
| 255 | bits[12] = t; | ||
| 256 | t = sctx->count[1]; | ||
| 257 | bits[11] = t; t>>=8; | ||
| 258 | bits[10] = t; t>>=8; | ||
| 259 | bits[9 ] = t; t>>=8; | ||
| 260 | bits[8 ] = t; | ||
| 261 | t = sctx->count[2]; | ||
| 262 | bits[7 ] = t; t>>=8; | ||
| 263 | bits[6 ] = t; t>>=8; | ||
| 264 | bits[5 ] = t; t>>=8; | ||
| 265 | bits[4 ] = t; | ||
| 266 | t = sctx->count[3]; | ||
| 267 | bits[3 ] = t; t>>=8; | ||
| 268 | bits[2 ] = t; t>>=8; | ||
| 269 | bits[1 ] = t; t>>=8; | ||
| 270 | bits[0 ] = t; | ||
| 271 | 250 | ||
| 272 | /* Pad out to 112 mod 128. */ | 251 | /* Pad out to 112 mod 128. */ | 
| 273 | index = (sctx->count[0] >> 3) & 0x7f; | 252 | index = (sctx->count[0] >> 3) & 0x7f; | 
| @@ -275,21 +254,12 @@ sha512_final(void *ctx, u8 *hash) | |||
| 275 | sha512_update(sctx, padding, pad_len); | 254 | sha512_update(sctx, padding, pad_len); | 
| 276 | 255 | ||
| 277 | /* Append length (before padding) */ | 256 | /* Append length (before padding) */ | 
| 278 | sha512_update(sctx, bits, 16); | 257 | sha512_update(sctx, (const u8 *)bits, sizeof(bits)); | 
| 279 | 258 | ||
| 280 | /* Store state in digest */ | 259 | /* Store state in digest */ | 
| 281 | for (i = j = 0; i < 8; i++, j += 8) { | 260 | for (i = 0; i < 8; i++) | 
| 282 | t2 = sctx->state[i]; | 261 | dst[i] = cpu_to_be64(sctx->state[i]); | 
| 283 | hash[j+7] = (char)t2 & 0xff; t2>>=8; | 262 | |
| 284 | hash[j+6] = (char)t2 & 0xff; t2>>=8; | ||
| 285 | hash[j+5] = (char)t2 & 0xff; t2>>=8; | ||
| 286 | hash[j+4] = (char)t2 & 0xff; t2>>=8; | ||
| 287 | hash[j+3] = (char)t2 & 0xff; t2>>=8; | ||
| 288 | hash[j+2] = (char)t2 & 0xff; t2>>=8; | ||
| 289 | hash[j+1] = (char)t2 & 0xff; t2>>=8; | ||
| 290 | hash[j ] = (char)t2 & 0xff; | ||
| 291 | } | ||
| 292 | |||
| 293 | /* Zeroize sensitive information. */ | 263 | /* Zeroize sensitive information. */ | 
| 294 | memset(sctx, 0, sizeof(struct sha512_ctx)); | 264 | memset(sctx, 0, sizeof(struct sha512_ctx)); | 
| 295 | } | 265 | } | 
| diff --git a/crypto/tea.c b/crypto/tea.c index 5924efdd3a16..a6a02b30e470 100644 --- a/crypto/tea.c +++ b/crypto/tea.c | |||
| @@ -22,8 +22,10 @@ | |||
| 22 | #include <linux/init.h> | 22 | #include <linux/init.h> | 
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> | 
| 24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> | 
| 25 | #include <asm/byteorder.h> | ||
| 25 | #include <asm/scatterlist.h> | 26 | #include <asm/scatterlist.h> | 
| 26 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> | 
| 28 | #include <linux/types.h> | ||
| 27 | 29 | ||
| 28 | #define TEA_KEY_SIZE 16 | 30 | #define TEA_KEY_SIZE 16 | 
| 29 | #define TEA_BLOCK_SIZE 8 | 31 | #define TEA_BLOCK_SIZE 8 | 
| @@ -35,9 +37,6 @@ | |||
| 35 | #define XTEA_ROUNDS 32 | 37 | #define XTEA_ROUNDS 32 | 
| 36 | #define XTEA_DELTA 0x9e3779b9 | 38 | #define XTEA_DELTA 0x9e3779b9 | 
| 37 | 39 | ||
| 38 | #define u32_in(x) le32_to_cpu(*(const __le32 *)(x)) | ||
| 39 | #define u32_out(to, from) (*(__le32 *)(to) = cpu_to_le32(from)) | ||
| 40 | |||
| 41 | struct tea_ctx { | 40 | struct tea_ctx { | 
| 42 | u32 KEY[4]; | 41 | u32 KEY[4]; | 
| 43 | }; | 42 | }; | 
| @@ -49,8 +48,8 @@ struct xtea_ctx { | |||
| 49 | static int tea_setkey(void *ctx_arg, const u8 *in_key, | 48 | static int tea_setkey(void *ctx_arg, const u8 *in_key, | 
| 50 | unsigned int key_len, u32 *flags) | 49 | unsigned int key_len, u32 *flags) | 
| 51 | { | 50 | { | 
| 52 | |||
| 53 | struct tea_ctx *ctx = ctx_arg; | 51 | struct tea_ctx *ctx = ctx_arg; | 
| 52 | const __le32 *key = (const __le32 *)in_key; | ||
| 54 | 53 | ||
| 55 | if (key_len != 16) | 54 | if (key_len != 16) | 
| 56 | { | 55 | { | 
| @@ -58,10 +57,10 @@ static int tea_setkey(void *ctx_arg, const u8 *in_key, | |||
| 58 | return -EINVAL; | 57 | return -EINVAL; | 
| 59 | } | 58 | } | 
| 60 | 59 | ||
| 61 | ctx->KEY[0] = u32_in (in_key); | 60 | ctx->KEY[0] = le32_to_cpu(key[0]); | 
| 62 | ctx->KEY[1] = u32_in (in_key + 4); | 61 | ctx->KEY[1] = le32_to_cpu(key[1]); | 
| 63 | ctx->KEY[2] = u32_in (in_key + 8); | 62 | ctx->KEY[2] = le32_to_cpu(key[2]); | 
| 64 | ctx->KEY[3] = u32_in (in_key + 12); | 63 | ctx->KEY[3] = le32_to_cpu(key[3]); | 
| 65 | 64 | ||
| 66 | return 0; | 65 | return 0; | 
| 67 | 66 | ||
| @@ -73,9 +72,11 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
| 73 | u32 k0, k1, k2, k3; | 72 | u32 k0, k1, k2, k3; | 
| 74 | 73 | ||
| 75 | struct tea_ctx *ctx = ctx_arg; | 74 | struct tea_ctx *ctx = ctx_arg; | 
| 75 | const __le32 *in = (const __le32 *)src; | ||
| 76 | __le32 *out = (__le32 *)dst; | ||
| 76 | 77 | ||
| 77 | y = u32_in (src); | 78 | y = le32_to_cpu(in[0]); | 
| 78 | z = u32_in (src + 4); | 79 | z = le32_to_cpu(in[1]); | 
| 79 | 80 | ||
| 80 | k0 = ctx->KEY[0]; | 81 | k0 = ctx->KEY[0]; | 
| 81 | k1 = ctx->KEY[1]; | 82 | k1 = ctx->KEY[1]; | 
| @@ -90,19 +91,20 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
| 90 | z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); | 91 | z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); | 
| 91 | } | 92 | } | 
| 92 | 93 | ||
| 93 | u32_out (dst, y); | 94 | out[0] = cpu_to_le32(y); | 
| 94 | u32_out (dst + 4, z); | 95 | out[1] = cpu_to_le32(z); | 
| 95 | } | 96 | } | 
| 96 | 97 | ||
| 97 | static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 98 | static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 
| 98 | { | 99 | { | 
| 99 | u32 y, z, n, sum; | 100 | u32 y, z, n, sum; | 
| 100 | u32 k0, k1, k2, k3; | 101 | u32 k0, k1, k2, k3; | 
| 101 | |||
| 102 | struct tea_ctx *ctx = ctx_arg; | 102 | struct tea_ctx *ctx = ctx_arg; | 
| 103 | const __le32 *in = (const __le32 *)src; | ||
| 104 | __le32 *out = (__le32 *)dst; | ||
| 103 | 105 | ||
| 104 | y = u32_in (src); | 106 | y = le32_to_cpu(in[0]); | 
| 105 | z = u32_in (src + 4); | 107 | z = le32_to_cpu(in[1]); | 
| 106 | 108 | ||
| 107 | k0 = ctx->KEY[0]; | 109 | k0 = ctx->KEY[0]; | 
| 108 | k1 = ctx->KEY[1]; | 110 | k1 = ctx->KEY[1]; | 
| @@ -119,16 +121,15 @@ static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
| 119 | sum -= TEA_DELTA; | 121 | sum -= TEA_DELTA; | 
| 120 | } | 122 | } | 
| 121 | 123 | ||
| 122 | u32_out (dst, y); | 124 | out[0] = cpu_to_le32(y); | 
| 123 | u32_out (dst + 4, z); | 125 | out[1] = cpu_to_le32(z); | 
| 124 | |||
| 125 | } | 126 | } | 
| 126 | 127 | ||
| 127 | static int xtea_setkey(void *ctx_arg, const u8 *in_key, | 128 | static int xtea_setkey(void *ctx_arg, const u8 *in_key, | 
| 128 | unsigned int key_len, u32 *flags) | 129 | unsigned int key_len, u32 *flags) | 
| 129 | { | 130 | { | 
| 130 | |||
| 131 | struct xtea_ctx *ctx = ctx_arg; | 131 | struct xtea_ctx *ctx = ctx_arg; | 
| 132 | const __le32 *key = (const __le32 *)in_key; | ||
| 132 | 133 | ||
| 133 | if (key_len != 16) | 134 | if (key_len != 16) | 
| 134 | { | 135 | { | 
| @@ -136,10 +137,10 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key, | |||
| 136 | return -EINVAL; | 137 | return -EINVAL; | 
| 137 | } | 138 | } | 
| 138 | 139 | ||
| 139 | ctx->KEY[0] = u32_in (in_key); | 140 | ctx->KEY[0] = le32_to_cpu(key[0]); | 
| 140 | ctx->KEY[1] = u32_in (in_key + 4); | 141 | ctx->KEY[1] = le32_to_cpu(key[1]); | 
| 141 | ctx->KEY[2] = u32_in (in_key + 8); | 142 | ctx->KEY[2] = le32_to_cpu(key[2]); | 
| 142 | ctx->KEY[3] = u32_in (in_key + 12); | 143 | ctx->KEY[3] = le32_to_cpu(key[3]); | 
| 143 | 144 | ||
| 144 | return 0; | 145 | return 0; | 
| 145 | 146 | ||
| @@ -147,14 +148,15 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key, | |||
| 147 | 148 | ||
| 148 | static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 149 | static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 
| 149 | { | 150 | { | 
| 150 | |||
| 151 | u32 y, z, sum = 0; | 151 | u32 y, z, sum = 0; | 
| 152 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; | 152 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; | 
| 153 | 153 | ||
| 154 | struct xtea_ctx *ctx = ctx_arg; | 154 | struct xtea_ctx *ctx = ctx_arg; | 
| 155 | const __le32 *in = (const __le32 *)src; | ||
| 156 | __le32 *out = (__le32 *)dst; | ||
| 155 | 157 | ||
| 156 | y = u32_in (src); | 158 | y = le32_to_cpu(in[0]); | 
| 157 | z = u32_in (src + 4); | 159 | z = le32_to_cpu(in[1]); | 
| 158 | 160 | ||
| 159 | while (sum != limit) { | 161 | while (sum != limit) { | 
| 160 | y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); | 162 | y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); | 
| @@ -162,19 +164,19 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
| 162 | z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); | 164 | z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); | 
| 163 | } | 165 | } | 
| 164 | 166 | ||
| 165 | u32_out (dst, y); | 167 | out[0] = cpu_to_le32(y); | 
| 166 | u32_out (dst + 4, z); | 168 | out[1] = cpu_to_le32(z); | 
| 167 | |||
| 168 | } | 169 | } | 
| 169 | 170 | ||
| 170 | static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 171 | static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 
| 171 | { | 172 | { | 
| 172 | |||
| 173 | u32 y, z, sum; | 173 | u32 y, z, sum; | 
| 174 | struct tea_ctx *ctx = ctx_arg; | 174 | struct tea_ctx *ctx = ctx_arg; | 
| 175 | const __le32 *in = (const __le32 *)src; | ||
| 176 | __le32 *out = (__le32 *)dst; | ||
| 175 | 177 | ||
| 176 | y = u32_in (src); | 178 | y = le32_to_cpu(in[0]); | 
| 177 | z = u32_in (src + 4); | 179 | z = le32_to_cpu(in[1]); | 
| 178 | 180 | ||
| 179 | sum = XTEA_DELTA * XTEA_ROUNDS; | 181 | sum = XTEA_DELTA * XTEA_ROUNDS; | 
| 180 | 182 | ||
| @@ -184,22 +186,22 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
| 184 | y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); | 186 | y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); | 
| 185 | } | 187 | } | 
| 186 | 188 | ||
| 187 | u32_out (dst, y); | 189 | out[0] = cpu_to_le32(y); | 
| 188 | u32_out (dst + 4, z); | 190 | out[1] = cpu_to_le32(z); | 
| 189 | |||
| 190 | } | 191 | } | 
| 191 | 192 | ||
| 192 | 193 | ||
| 193 | static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 194 | static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | 
| 194 | { | 195 | { | 
| 195 | |||
| 196 | u32 y, z, sum = 0; | 196 | u32 y, z, sum = 0; | 
| 197 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; | 197 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; | 
| 198 | 198 | ||
| 199 | struct xtea_ctx *ctx = ctx_arg; | 199 | struct xtea_ctx *ctx = ctx_arg; | 
| 200 | const __le32 *in = (const __le32 *)src; | ||
| 201 | __le32 *out = (__le32 *)dst; | ||
| 200 | 202 | ||
| 201 | y = u32_in (src); | 203 | y = le32_to_cpu(in[0]); | 
| 202 | z = u32_in (src + 4); | 204 | z = le32_to_cpu(in[1]); | 
| 203 | 205 | ||
| 204 | while (sum != limit) { | 206 | while (sum != limit) { | 
| 205 | y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; | 207 | y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; | 
| @@ -207,19 +209,19 @@ static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
| 207 | z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; | 209 | z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; | 
| 208 | } | 210 | } | 
| 209 | 211 | ||
| 210 | u32_out (dst, y); | 212 | out[0] = cpu_to_le32(y); | 
| 211 | u32_out (dst + 4, z); | 213 | out[1] = cpu_to_le32(z); | 
| 212 | |||
| 213 | } | 214 | } | 
| 214 | 215 | ||
| 215 | static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 216 | static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | 
| 216 | { | 217 | { | 
| 217 | |||
| 218 | u32 y, z, sum; | 218 | u32 y, z, sum; | 
| 219 | struct tea_ctx *ctx = ctx_arg; | 219 | struct tea_ctx *ctx = ctx_arg; | 
| 220 | const __le32 *in = (const __le32 *)src; | ||
| 221 | __le32 *out = (__le32 *)dst; | ||
| 220 | 222 | ||
| 221 | y = u32_in (src); | 223 | y = le32_to_cpu(in[0]); | 
| 222 | z = u32_in (src + 4); | 224 | z = le32_to_cpu(in[1]); | 
| 223 | 225 | ||
| 224 | sum = XTEA_DELTA * XTEA_ROUNDS; | 226 | sum = XTEA_DELTA * XTEA_ROUNDS; | 
| 225 | 227 | ||
| @@ -229,9 +231,8 @@ static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
| 229 | y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; | 231 | y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; | 
| 230 | } | 232 | } | 
| 231 | 233 | ||
| 232 | u32_out (dst, y); | 234 | out[0] = cpu_to_le32(y); | 
| 233 | u32_out (dst + 4, z); | 235 | out[1] = cpu_to_le32(z); | 
| 234 | |||
| 235 | } | 236 | } | 
| 236 | 237 | ||
| 237 | static struct crypto_alg tea_alg = { | 238 | static struct crypto_alg tea_alg = { | 
| @@ -239,6 +240,7 @@ static struct crypto_alg tea_alg = { | |||
| 239 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 240 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 240 | .cra_blocksize = TEA_BLOCK_SIZE, | 241 | .cra_blocksize = TEA_BLOCK_SIZE, | 
| 241 | .cra_ctxsize = sizeof (struct tea_ctx), | 242 | .cra_ctxsize = sizeof (struct tea_ctx), | 
| 243 | .cra_alignmask = 3, | ||
| 242 | .cra_module = THIS_MODULE, | 244 | .cra_module = THIS_MODULE, | 
| 243 | .cra_list = LIST_HEAD_INIT(tea_alg.cra_list), | 245 | .cra_list = LIST_HEAD_INIT(tea_alg.cra_list), | 
| 244 | .cra_u = { .cipher = { | 246 | .cra_u = { .cipher = { | 
| @@ -254,6 +256,7 @@ static struct crypto_alg xtea_alg = { | |||
| 254 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 256 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 255 | .cra_blocksize = XTEA_BLOCK_SIZE, | 257 | .cra_blocksize = XTEA_BLOCK_SIZE, | 
| 256 | .cra_ctxsize = sizeof (struct xtea_ctx), | 258 | .cra_ctxsize = sizeof (struct xtea_ctx), | 
| 259 | .cra_alignmask = 3, | ||
| 257 | .cra_module = THIS_MODULE, | 260 | .cra_module = THIS_MODULE, | 
| 258 | .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), | 261 | .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), | 
| 259 | .cra_u = { .cipher = { | 262 | .cra_u = { .cipher = { | 
| @@ -269,6 +272,7 @@ static struct crypto_alg xeta_alg = { | |||
| 269 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 272 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 270 | .cra_blocksize = XTEA_BLOCK_SIZE, | 273 | .cra_blocksize = XTEA_BLOCK_SIZE, | 
| 271 | .cra_ctxsize = sizeof (struct xtea_ctx), | 274 | .cra_ctxsize = sizeof (struct xtea_ctx), | 
| 275 | .cra_alignmask = 3, | ||
| 272 | .cra_module = THIS_MODULE, | 276 | .cra_module = THIS_MODULE, | 
| 273 | .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), | 277 | .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), | 
| 274 | .cra_u = { .cipher = { | 278 | .cra_u = { .cipher = { | 
| diff --git a/crypto/tgr192.c b/crypto/tgr192.c index f0a45cf716d0..2d8e44f6fbe9 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c | |||
| @@ -24,8 +24,10 @@ | |||
| 24 | #include <linux/init.h> | 24 | #include <linux/init.h> | 
| 25 | #include <linux/module.h> | 25 | #include <linux/module.h> | 
| 26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> | 
| 27 | #include <asm/byteorder.h> | ||
| 27 | #include <asm/scatterlist.h> | 28 | #include <asm/scatterlist.h> | 
| 28 | #include <linux/crypto.h> | 29 | #include <linux/crypto.h> | 
| 30 | #include <linux/types.h> | ||
| 29 | 31 | ||
| 30 | #define TGR192_DIGEST_SIZE 24 | 32 | #define TGR192_DIGEST_SIZE 24 | 
| 31 | #define TGR160_DIGEST_SIZE 20 | 33 | #define TGR160_DIGEST_SIZE 20 | 
| @@ -467,18 +469,10 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data) | |||
| 467 | u64 a, b, c, aa, bb, cc; | 469 | u64 a, b, c, aa, bb, cc; | 
| 468 | u64 x[8]; | 470 | u64 x[8]; | 
| 469 | int i; | 471 | int i; | 
| 470 | const u8 *ptr = data; | 472 | const __le64 *ptr = (const __le64 *)data; | 
| 471 | 473 | ||
| 472 | for (i = 0; i < 8; i++, ptr += 8) { | 474 | for (i = 0; i < 8; i++) | 
| 473 | x[i] = (((u64)ptr[7] ) << 56) ^ | 475 | x[i] = le64_to_cpu(ptr[i]); | 
| 474 | (((u64)ptr[6] & 0xffL) << 48) ^ | ||
| 475 | (((u64)ptr[5] & 0xffL) << 40) ^ | ||
| 476 | (((u64)ptr[4] & 0xffL) << 32) ^ | ||
| 477 | (((u64)ptr[3] & 0xffL) << 24) ^ | ||
| 478 | (((u64)ptr[2] & 0xffL) << 16) ^ | ||
| 479 | (((u64)ptr[1] & 0xffL) << 8) ^ | ||
| 480 | (((u64)ptr[0] & 0xffL) ); | ||
| 481 | } | ||
| 482 | 476 | ||
| 483 | /* save */ | 477 | /* save */ | 
| 484 | a = aa = tctx->a; | 478 | a = aa = tctx->a; | 
| @@ -558,9 +552,10 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len) | |||
| 558 | static void tgr192_final(void *ctx, u8 * out) | 552 | static void tgr192_final(void *ctx, u8 * out) | 
| 559 | { | 553 | { | 
| 560 | struct tgr192_ctx *tctx = ctx; | 554 | struct tgr192_ctx *tctx = ctx; | 
| 555 | __be64 *dst = (__be64 *)out; | ||
| 556 | __be64 *be64p; | ||
| 557 | __le32 *le32p; | ||
| 561 | u32 t, msb, lsb; | 558 | u32 t, msb, lsb; | 
| 562 | u8 *p; | ||
| 563 | int i, j; | ||
| 564 | 559 | ||
| 565 | tgr192_update(tctx, NULL, 0); /* flush */ ; | 560 | tgr192_update(tctx, NULL, 0); /* flush */ ; | 
| 566 | 561 | ||
| @@ -594,41 +589,16 @@ static void tgr192_final(void *ctx, u8 * out) | |||
| 594 | memset(tctx->hash, 0, 56); /* fill next block with zeroes */ | 589 | memset(tctx->hash, 0, 56); /* fill next block with zeroes */ | 
| 595 | } | 590 | } | 
| 596 | /* append the 64 bit count */ | 591 | /* append the 64 bit count */ | 
| 597 | tctx->hash[56] = lsb; | 592 | le32p = (__le32 *)&tctx->hash[56]; | 
| 598 | tctx->hash[57] = lsb >> 8; | 593 | le32p[0] = cpu_to_le32(lsb); | 
| 599 | tctx->hash[58] = lsb >> 16; | 594 | le32p[1] = cpu_to_le32(msb); | 
| 600 | tctx->hash[59] = lsb >> 24; | 595 | |
| 601 | tctx->hash[60] = msb; | ||
| 602 | tctx->hash[61] = msb >> 8; | ||
| 603 | tctx->hash[62] = msb >> 16; | ||
| 604 | tctx->hash[63] = msb >> 24; | ||
| 605 | tgr192_transform(tctx, tctx->hash); | 596 | tgr192_transform(tctx, tctx->hash); | 
| 606 | 597 | ||
| 607 | p = tctx->hash; | 598 | be64p = (__be64 *)tctx->hash; | 
| 608 | *p++ = tctx->a >> 56; *p++ = tctx->a >> 48; *p++ = tctx->a >> 40; | 599 | dst[0] = be64p[0] = cpu_to_be64(tctx->a); | 
| 609 | *p++ = tctx->a >> 32; *p++ = tctx->a >> 24; *p++ = tctx->a >> 16; | 600 | dst[1] = be64p[1] = cpu_to_be64(tctx->b); | 
| 610 | *p++ = tctx->a >> 8; *p++ = tctx->a;\ | 601 | dst[2] = be64p[2] = cpu_to_be64(tctx->c); | 
| 611 | *p++ = tctx->b >> 56; *p++ = tctx->b >> 48; *p++ = tctx->b >> 40; | ||
| 612 | *p++ = tctx->b >> 32; *p++ = tctx->b >> 24; *p++ = tctx->b >> 16; | ||
| 613 | *p++ = tctx->b >> 8; *p++ = tctx->b; | ||
| 614 | *p++ = tctx->c >> 56; *p++ = tctx->c >> 48; *p++ = tctx->c >> 40; | ||
| 615 | *p++ = tctx->c >> 32; *p++ = tctx->c >> 24; *p++ = tctx->c >> 16; | ||
| 616 | *p++ = tctx->c >> 8; *p++ = tctx->c; | ||
| 617 | |||
| 618 | |||
| 619 | /* unpack the hash */ | ||
| 620 | j = 7; | ||
| 621 | for (i = 0; i < 8; i++) { | ||
| 622 | out[j--] = (tctx->a >> 8 * i) & 0xff; | ||
| 623 | } | ||
| 624 | j = 15; | ||
| 625 | for (i = 0; i < 8; i++) { | ||
| 626 | out[j--] = (tctx->b >> 8 * i) & 0xff; | ||
| 627 | } | ||
| 628 | j = 23; | ||
| 629 | for (i = 0; i < 8; i++) { | ||
| 630 | out[j--] = (tctx->c >> 8 * i) & 0xff; | ||
| 631 | } | ||
| 632 | } | 602 | } | 
| 633 | 603 | ||
| 634 | static void tgr160_final(void *ctx, u8 * out) | 604 | static void tgr160_final(void *ctx, u8 * out) | 
| diff --git a/crypto/twofish.c b/crypto/twofish.c index 4efff8cf9958..a26d885486fb 100644 --- a/crypto/twofish.c +++ b/crypto/twofish.c | |||
| @@ -37,6 +37,8 @@ | |||
| 37 | * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the | 37 | * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the | 
| 38 | * Third Edition. | 38 | * Third Edition. | 
| 39 | */ | 39 | */ | 
| 40 | |||
| 41 | #include <asm/byteorder.h> | ||
| 40 | #include <linux/module.h> | 42 | #include <linux/module.h> | 
| 41 | #include <linux/init.h> | 43 | #include <linux/init.h> | 
| 42 | #include <linux/types.h> | 44 | #include <linux/types.h> | 
| @@ -621,13 +623,11 @@ static const u8 calc_sb_tbl[512] = { | |||
| 621 | * whitening subkey number m. */ | 623 | * whitening subkey number m. */ | 
| 622 | 624 | ||
| 623 | #define INPACK(n, x, m) \ | 625 | #define INPACK(n, x, m) \ | 
| 624 | x = in[4 * (n)] ^ (in[4 * (n) + 1] << 8) \ | 626 | x = le32_to_cpu(src[n]) ^ ctx->w[m] | 
| 625 | ^ (in[4 * (n) + 2] << 16) ^ (in[4 * (n) + 3] << 24) ^ ctx->w[m] | ||
| 626 | 627 | ||
| 627 | #define OUTUNPACK(n, x, m) \ | 628 | #define OUTUNPACK(n, x, m) \ | 
| 628 | x ^= ctx->w[m]; \ | 629 | x ^= ctx->w[m]; \ | 
| 629 | out[4 * (n)] = x; out[4 * (n) + 1] = x >> 8; \ | 630 | dst[n] = cpu_to_le32(x) | 
| 630 | out[4 * (n) + 2] = x >> 16; out[4 * (n) + 3] = x >> 24 | ||
| 631 | 631 | ||
| 632 | #define TF_MIN_KEY_SIZE 16 | 632 | #define TF_MIN_KEY_SIZE 16 | 
| 633 | #define TF_MAX_KEY_SIZE 32 | 633 | #define TF_MAX_KEY_SIZE 32 | 
| @@ -804,6 +804,8 @@ static int twofish_setkey(void *cx, const u8 *key, | |||
| 804 | static void twofish_encrypt(void *cx, u8 *out, const u8 *in) | 804 | static void twofish_encrypt(void *cx, u8 *out, const u8 *in) | 
| 805 | { | 805 | { | 
| 806 | struct twofish_ctx *ctx = cx; | 806 | struct twofish_ctx *ctx = cx; | 
| 807 | const __le32 *src = (const __le32 *)in; | ||
| 808 | __le32 *dst = (__le32 *)out; | ||
| 807 | 809 | ||
| 808 | /* The four 32-bit chunks of the text. */ | 810 | /* The four 32-bit chunks of the text. */ | 
| 809 | u32 a, b, c, d; | 811 | u32 a, b, c, d; | 
| @@ -839,6 +841,8 @@ static void twofish_encrypt(void *cx, u8 *out, const u8 *in) | |||
| 839 | static void twofish_decrypt(void *cx, u8 *out, const u8 *in) | 841 | static void twofish_decrypt(void *cx, u8 *out, const u8 *in) | 
| 840 | { | 842 | { | 
| 841 | struct twofish_ctx *ctx = cx; | 843 | struct twofish_ctx *ctx = cx; | 
| 844 | const __le32 *src = (const __le32 *)in; | ||
| 845 | __le32 *dst = (__le32 *)out; | ||
| 842 | 846 | ||
| 843 | /* The four 32-bit chunks of the text. */ | 847 | /* The four 32-bit chunks of the text. */ | 
| 844 | u32 a, b, c, d; | 848 | u32 a, b, c, d; | 
| @@ -875,6 +879,7 @@ static struct crypto_alg alg = { | |||
| 875 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 879 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 876 | .cra_blocksize = TF_BLOCK_SIZE, | 880 | .cra_blocksize = TF_BLOCK_SIZE, | 
| 877 | .cra_ctxsize = sizeof(struct twofish_ctx), | 881 | .cra_ctxsize = sizeof(struct twofish_ctx), | 
| 882 | .cra_alignmask = 3, | ||
| 878 | .cra_module = THIS_MODULE, | 883 | .cra_module = THIS_MODULE, | 
| 879 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 884 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 
| 880 | .cra_u = { .cipher = { | 885 | .cra_u = { .cipher = { | 
| diff --git a/crypto/wp512.c b/crypto/wp512.c index fd6e20e1f291..b226a126cfae 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c | |||
| @@ -22,8 +22,10 @@ | |||
| 22 | #include <linux/init.h> | 22 | #include <linux/init.h> | 
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> | 
| 24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> | 
| 25 | #include <asm/byteorder.h> | ||
| 25 | #include <asm/scatterlist.h> | 26 | #include <asm/scatterlist.h> | 
| 26 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> | 
| 28 | #include <linux/types.h> | ||
| 27 | 29 | ||
| 28 | #define WP512_DIGEST_SIZE 64 | 30 | #define WP512_DIGEST_SIZE 64 | 
| 29 | #define WP384_DIGEST_SIZE 48 | 31 | #define WP384_DIGEST_SIZE 48 | 
| @@ -778,19 +780,10 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) { | |||
| 778 | u64 block[8]; /* mu(buffer) */ | 780 | u64 block[8]; /* mu(buffer) */ | 
| 779 | u64 state[8]; /* the cipher state */ | 781 | u64 state[8]; /* the cipher state */ | 
| 780 | u64 L[8]; | 782 | u64 L[8]; | 
| 781 | u8 *buffer = wctx->buffer; | 783 | const __be64 *buffer = (const __be64 *)wctx->buffer; | 
| 782 | 784 | ||
| 783 | for (i = 0; i < 8; i++, buffer += 8) { | 785 | for (i = 0; i < 8; i++) | 
| 784 | block[i] = | 786 | block[i] = be64_to_cpu(buffer[i]); | 
| 785 | (((u64)buffer[0] ) << 56) ^ | ||
| 786 | (((u64)buffer[1] & 0xffL) << 48) ^ | ||
| 787 | (((u64)buffer[2] & 0xffL) << 40) ^ | ||
| 788 | (((u64)buffer[3] & 0xffL) << 32) ^ | ||
| 789 | (((u64)buffer[4] & 0xffL) << 24) ^ | ||
| 790 | (((u64)buffer[5] & 0xffL) << 16) ^ | ||
| 791 | (((u64)buffer[6] & 0xffL) << 8) ^ | ||
| 792 | (((u64)buffer[7] & 0xffL) ); | ||
| 793 | } | ||
| 794 | 787 | ||
| 795 | state[0] = block[0] ^ (K[0] = wctx->hash[0]); | 788 | state[0] = block[0] ^ (K[0] = wctx->hash[0]); | 
| 796 | state[1] = block[1] ^ (K[1] = wctx->hash[1]); | 789 | state[1] = block[1] ^ (K[1] = wctx->hash[1]); | 
| @@ -1069,7 +1062,7 @@ static void wp512_final(void *ctx, u8 *out) | |||
| 1069 | u8 *bitLength = wctx->bitLength; | 1062 | u8 *bitLength = wctx->bitLength; | 
| 1070 | int bufferBits = wctx->bufferBits; | 1063 | int bufferBits = wctx->bufferBits; | 
| 1071 | int bufferPos = wctx->bufferPos; | 1064 | int bufferPos = wctx->bufferPos; | 
| 1072 | u8 *digest = out; | 1065 | __be64 *digest = (__be64 *)out; | 
| 1073 | 1066 | ||
| 1074 | buffer[bufferPos] |= 0x80U >> (bufferBits & 7); | 1067 | buffer[bufferPos] |= 0x80U >> (bufferBits & 7); | 
| 1075 | bufferPos++; | 1068 | bufferPos++; | 
| @@ -1088,17 +1081,8 @@ static void wp512_final(void *ctx, u8 *out) | |||
| 1088 | memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], | 1081 | memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], | 
| 1089 | bitLength, WP512_LENGTHBYTES); | 1082 | bitLength, WP512_LENGTHBYTES); | 
| 1090 | wp512_process_buffer(wctx); | 1083 | wp512_process_buffer(wctx); | 
| 1091 | for (i = 0; i < WP512_DIGEST_SIZE/8; i++) { | 1084 | for (i = 0; i < WP512_DIGEST_SIZE/8; i++) | 
| 1092 | digest[0] = (u8)(wctx->hash[i] >> 56); | 1085 | digest[i] = cpu_to_be64(wctx->hash[i]); | 
| 1093 | digest[1] = (u8)(wctx->hash[i] >> 48); | ||
| 1094 | digest[2] = (u8)(wctx->hash[i] >> 40); | ||
| 1095 | digest[3] = (u8)(wctx->hash[i] >> 32); | ||
| 1096 | digest[4] = (u8)(wctx->hash[i] >> 24); | ||
| 1097 | digest[5] = (u8)(wctx->hash[i] >> 16); | ||
| 1098 | digest[6] = (u8)(wctx->hash[i] >> 8); | ||
| 1099 | digest[7] = (u8)(wctx->hash[i] ); | ||
| 1100 | digest += 8; | ||
| 1101 | } | ||
| 1102 | wctx->bufferBits = bufferBits; | 1086 | wctx->bufferBits = bufferBits; | 
| 1103 | wctx->bufferPos = bufferPos; | 1087 | wctx->bufferPos = bufferPos; | 
| 1104 | } | 1088 | } | 
| diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 71407c578afe..64819aa7cac4 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
| @@ -99,9 +99,6 @@ byte(const uint32_t x, const unsigned n) | |||
| 99 | return x >> (n << 3); | 99 | return x >> (n << 3); | 
| 100 | } | 100 | } | 
| 101 | 101 | ||
| 102 | #define uint32_t_in(x) le32_to_cpu(*(const uint32_t *)(x)) | ||
| 103 | #define uint32_t_out(to, from) (*(uint32_t *)(to) = cpu_to_le32(from)) | ||
| 104 | |||
| 105 | #define E_KEY ctx->E | 102 | #define E_KEY ctx->E | 
| 106 | #define D_KEY ctx->D | 103 | #define D_KEY ctx->D | 
| 107 | 104 | ||
| @@ -294,6 +291,7 @@ static int | |||
| 294 | aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) | 291 | aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) | 
| 295 | { | 292 | { | 
| 296 | struct aes_ctx *ctx = aes_ctx(ctx_arg); | 293 | struct aes_ctx *ctx = aes_ctx(ctx_arg); | 
| 294 | const __le32 *key = (const __le32 *)in_key; | ||
| 297 | uint32_t i, t, u, v, w; | 295 | uint32_t i, t, u, v, w; | 
| 298 | uint32_t P[AES_EXTENDED_KEY_SIZE]; | 296 | uint32_t P[AES_EXTENDED_KEY_SIZE]; | 
| 299 | uint32_t rounds; | 297 | uint32_t rounds; | 
| @@ -313,10 +311,10 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t | |||
| 313 | ctx->E = ctx->e_data; | 311 | ctx->E = ctx->e_data; | 
| 314 | ctx->D = ctx->e_data; | 312 | ctx->D = ctx->e_data; | 
| 315 | 313 | ||
| 316 | E_KEY[0] = uint32_t_in (in_key); | 314 | E_KEY[0] = le32_to_cpu(key[0]); | 
| 317 | E_KEY[1] = uint32_t_in (in_key + 4); | 315 | E_KEY[1] = le32_to_cpu(key[1]); | 
| 318 | E_KEY[2] = uint32_t_in (in_key + 8); | 316 | E_KEY[2] = le32_to_cpu(key[2]); | 
| 319 | E_KEY[3] = uint32_t_in (in_key + 12); | 317 | E_KEY[3] = le32_to_cpu(key[3]); | 
| 320 | 318 | ||
| 321 | /* Prepare control words. */ | 319 | /* Prepare control words. */ | 
| 322 | memset(&ctx->cword, 0, sizeof(ctx->cword)); | 320 | memset(&ctx->cword, 0, sizeof(ctx->cword)); | 
| @@ -343,17 +341,17 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t | |||
| 343 | break; | 341 | break; | 
| 344 | 342 | ||
| 345 | case 24: | 343 | case 24: | 
| 346 | E_KEY[4] = uint32_t_in (in_key + 16); | 344 | E_KEY[4] = le32_to_cpu(key[4]); | 
| 347 | t = E_KEY[5] = uint32_t_in (in_key + 20); | 345 | t = E_KEY[5] = le32_to_cpu(key[5]); | 
| 348 | for (i = 0; i < 8; ++i) | 346 | for (i = 0; i < 8; ++i) | 
| 349 | loop6 (i); | 347 | loop6 (i); | 
| 350 | break; | 348 | break; | 
| 351 | 349 | ||
| 352 | case 32: | 350 | case 32: | 
| 353 | E_KEY[4] = uint32_t_in (in_key + 16); | 351 | E_KEY[4] = le32_to_cpu(in_key[4]); | 
| 354 | E_KEY[5] = uint32_t_in (in_key + 20); | 352 | E_KEY[5] = le32_to_cpu(in_key[5]); | 
| 355 | E_KEY[6] = uint32_t_in (in_key + 24); | 353 | E_KEY[6] = le32_to_cpu(in_key[6]); | 
| 356 | t = E_KEY[7] = uint32_t_in (in_key + 28); | 354 | t = E_KEY[7] = le32_to_cpu(in_key[7]); | 
| 357 | for (i = 0; i < 7; ++i) | 355 | for (i = 0; i < 7; ++i) | 
| 358 | loop8 (i); | 356 | loop8 (i); | 
| 359 | break; | 357 | break; | 
| @@ -468,6 +466,8 @@ static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, | |||
| 468 | 466 | ||
| 469 | static struct crypto_alg aes_alg = { | 467 | static struct crypto_alg aes_alg = { | 
| 470 | .cra_name = "aes", | 468 | .cra_name = "aes", | 
| 469 | .cra_driver_name = "aes-padlock", | ||
| 470 | .cra_priority = 300, | ||
| 471 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 471 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 
| 472 | .cra_blocksize = AES_BLOCK_SIZE, | 472 | .cra_blocksize = AES_BLOCK_SIZE, | 
| 473 | .cra_ctxsize = sizeof(struct aes_ctx), | 473 | .cra_ctxsize = sizeof(struct aes_ctx), | 
| diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h index 3cf2b7a12348..b78489bc298a 100644 --- a/drivers/crypto/padlock.h +++ b/drivers/crypto/padlock.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | /* Control word. */ | 18 | /* Control word. */ | 
| 19 | struct cword { | 19 | struct cword { | 
| 20 | int __attribute__ ((__packed__)) | 20 | unsigned int __attribute__ ((__packed__)) | 
| 21 | rounds:4, | 21 | rounds:4, | 
| 22 | algo:3, | 22 | algo:3, | 
| 23 | keygen:1, | 23 | keygen:1, | 
| diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 733bc25b2bf9..4959800a18d7 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -27,6 +27,19 @@ config NETDEVICES | |||
| 27 | # that for each of the symbols. | 27 | # that for each of the symbols. | 
| 28 | if NETDEVICES | 28 | if NETDEVICES | 
| 29 | 29 | ||
| 30 | config IFB | ||
| 31 | tristate "Intermediate Functional Block support" | ||
| 32 | depends on NET_CLS_ACT | ||
| 33 | ---help--- | ||
| 34 | This is an intermidiate driver that allows sharing of | ||
| 35 | resources. | ||
| 36 | To compile this driver as a module, choose M here: the module | ||
| 37 | will be called ifb. If you want to use more than one ifb | ||
| 38 | device at a time, you need to compile this driver as a module. | ||
| 39 | Instead of 'ifb', the devices will then be called 'ifb0', | ||
| 40 | 'ifb1' etc. | ||
| 41 | Look at the iproute2 documentation directory for usage etc | ||
| 42 | |||
| 30 | config DUMMY | 43 | config DUMMY | 
| 31 | tristate "Dummy net driver support" | 44 | tristate "Dummy net driver support" | 
| 32 | ---help--- | 45 | ---help--- | 
| diff --git a/drivers/net/Makefile b/drivers/net/Makefile index b74a7cb5bae6..00e72b12fb92 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
| @@ -125,6 +125,7 @@ ifeq ($(CONFIG_SLIP_COMPRESSED),y) | |||
| 125 | endif | 125 | endif | 
| 126 | 126 | ||
| 127 | obj-$(CONFIG_DUMMY) += dummy.o | 127 | obj-$(CONFIG_DUMMY) += dummy.o | 
| 128 | obj-$(CONFIG_IFB) += ifb.o | ||
| 128 | obj-$(CONFIG_DE600) += de600.o | 129 | obj-$(CONFIG_DE600) += de600.o | 
| 129 | obj-$(CONFIG_DE620) += de620.o | 130 | obj-$(CONFIG_DE620) += de620.o | 
| 130 | obj-$(CONFIG_LANCE) += lance.o | 131 | obj-$(CONFIG_LANCE) += lance.o | 
| diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 41b3d83c2ab8..f4424cf886c5 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c | |||
| @@ -515,6 +515,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) | |||
| 515 | count = kiss_esc(p, (unsigned char *)ax->xbuff, len); | 515 | count = kiss_esc(p, (unsigned char *)ax->xbuff, len); | 
| 516 | } | 516 | } | 
| 517 | } | 517 | } | 
| 518 | spin_unlock_bh(&ax->buflock); | ||
| 518 | 519 | ||
| 519 | set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); | 520 | set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); | 
| 520 | actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); | 521 | actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); | 
| @@ -524,7 +525,6 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) | |||
| 524 | ax->dev->trans_start = jiffies; | 525 | ax->dev->trans_start = jiffies; | 
| 525 | ax->xleft = count - actual; | 526 | ax->xleft = count - actual; | 
| 526 | ax->xhead = ax->xbuff + actual; | 527 | ax->xhead = ax->xbuff + actual; | 
| 527 | spin_unlock_bh(&ax->buflock); | ||
| 528 | } | 528 | } | 
| 529 | 529 | ||
| 530 | /* Encapsulate an AX.25 packet and kick it into a TTY queue. */ | 530 | /* Encapsulate an AX.25 packet and kick it into a TTY queue. */ | 
| diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c new file mode 100644 index 000000000000..1b699259b4ec --- /dev/null +++ b/drivers/net/ifb.c | |||
| @@ -0,0 +1,294 @@ | |||
| 1 | /* drivers/net/ifb.c: | ||
| 2 | |||
| 3 | The purpose of this driver is to provide a device that allows | ||
| 4 | for sharing of resources: | ||
| 5 | |||
| 6 | 1) qdiscs/policies that are per device as opposed to system wide. | ||
| 7 | ifb allows for a device which can be redirected to thus providing | ||
| 8 | an impression of sharing. | ||
| 9 | |||
| 10 | 2) Allows for queueing incoming traffic for shaping instead of | ||
| 11 | dropping. | ||
| 12 | |||
| 13 | The original concept is based on what is known as the IMQ | ||
| 14 | driver initially written by Martin Devera, later rewritten | ||
| 15 | by Patrick McHardy and then maintained by Andre Correa. | ||
| 16 | |||
| 17 | You need the tc action mirror or redirect to feed this device | ||
| 18 | packets. | ||
| 19 | |||
| 20 | This program is free software; you can redistribute it and/or | ||
| 21 | modify it under the terms of the GNU General Public License | ||
| 22 | as published by the Free Software Foundation; either version | ||
| 23 | 2 of the License, or (at your option) any later version. | ||
| 24 | |||
| 25 | Authors: Jamal Hadi Salim (2005) | ||
| 26 | |||
| 27 | */ | ||
| 28 | |||
| 29 | |||
| 30 | #include <linux/config.h> | ||
| 31 | #include <linux/module.h> | ||
| 32 | #include <linux/kernel.h> | ||
| 33 | #include <linux/netdevice.h> | ||
| 34 | #include <linux/etherdevice.h> | ||
| 35 | #include <linux/init.h> | ||
| 36 | #include <linux/moduleparam.h> | ||
| 37 | #include <net/pkt_sched.h> | ||
| 38 | |||
| 39 | #define TX_TIMEOUT (2*HZ) | ||
| 40 | |||
| 41 | #define TX_Q_LIMIT 32 | ||
| 42 | struct ifb_private { | ||
| 43 | struct net_device_stats stats; | ||
| 44 | struct tasklet_struct ifb_tasklet; | ||
| 45 | int tasklet_pending; | ||
| 46 | /* mostly debug stats leave in for now */ | ||
| 47 | unsigned long st_task_enter; /* tasklet entered */ | ||
| 48 | unsigned long st_txq_refl_try; /* transmit queue refill attempt */ | ||
| 49 | unsigned long st_rxq_enter; /* receive queue entered */ | ||
| 50 | unsigned long st_rx2tx_tran; /* receive to trasmit transfers */ | ||
| 51 | unsigned long st_rxq_notenter; /*receiveQ not entered, resched */ | ||
| 52 | unsigned long st_rx_frm_egr; /* received from egress path */ | ||
| 53 | unsigned long st_rx_frm_ing; /* received from ingress path */ | ||
| 54 | unsigned long st_rxq_check; | ||
| 55 | unsigned long st_rxq_rsch; | ||
| 56 | struct sk_buff_head rq; | ||
| 57 | struct sk_buff_head tq; | ||
| 58 | }; | ||
| 59 | |||
| 60 | static int numifbs = 1; | ||
| 61 | |||
| 62 | static void ri_tasklet(unsigned long dev); | ||
| 63 | static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); | ||
| 64 | static struct net_device_stats *ifb_get_stats(struct net_device *dev); | ||
| 65 | static int ifb_open(struct net_device *dev); | ||
| 66 | static int ifb_close(struct net_device *dev); | ||
| 67 | |||
| 68 | static void ri_tasklet(unsigned long dev) | ||
| 69 | { | ||
| 70 | |||
| 71 | struct net_device *_dev = (struct net_device *)dev; | ||
| 72 | struct ifb_private *dp = netdev_priv(_dev); | ||
| 73 | struct net_device_stats *stats = &dp->stats; | ||
| 74 | struct sk_buff *skb; | ||
| 75 | |||
| 76 | dp->st_task_enter++; | ||
| 77 | if ((skb = skb_peek(&dp->tq)) == NULL) { | ||
| 78 | dp->st_txq_refl_try++; | ||
| 79 | if (spin_trylock(&_dev->xmit_lock)) { | ||
| 80 | dp->st_rxq_enter++; | ||
| 81 | while ((skb = skb_dequeue(&dp->rq)) != NULL) { | ||
| 82 | skb_queue_tail(&dp->tq, skb); | ||
| 83 | dp->st_rx2tx_tran++; | ||
| 84 | } | ||
| 85 | spin_unlock(&_dev->xmit_lock); | ||
| 86 | } else { | ||
| 87 | /* reschedule */ | ||
| 88 | dp->st_rxq_notenter++; | ||
| 89 | goto resched; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | while ((skb = skb_dequeue(&dp->tq)) != NULL) { | ||
| 94 | u32 from = G_TC_FROM(skb->tc_verd); | ||
| 95 | |||
| 96 | skb->tc_verd = 0; | ||
| 97 | skb->tc_verd = SET_TC_NCLS(skb->tc_verd); | ||
| 98 | stats->tx_packets++; | ||
| 99 | stats->tx_bytes +=skb->len; | ||
| 100 | if (from & AT_EGRESS) { | ||
| 101 | dp->st_rx_frm_egr++; | ||
| 102 | dev_queue_xmit(skb); | ||
| 103 | } else if (from & AT_INGRESS) { | ||
| 104 | |||
| 105 | dp->st_rx_frm_ing++; | ||
| 106 | netif_rx(skb); | ||
| 107 | } else { | ||
| 108 | dev_kfree_skb(skb); | ||
| 109 | stats->tx_dropped++; | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | if (spin_trylock(&_dev->xmit_lock)) { | ||
| 114 | dp->st_rxq_check++; | ||
| 115 | if ((skb = skb_peek(&dp->rq)) == NULL) { | ||
| 116 | dp->tasklet_pending = 0; | ||
| 117 | if (netif_queue_stopped(_dev)) | ||
| 118 | netif_wake_queue(_dev); | ||
| 119 | } else { | ||
| 120 | dp->st_rxq_rsch++; | ||
| 121 | spin_unlock(&_dev->xmit_lock); | ||
| 122 | goto resched; | ||
| 123 | } | ||
| 124 | spin_unlock(&_dev->xmit_lock); | ||
| 125 | } else { | ||
| 126 | resched: | ||
| 127 | dp->tasklet_pending = 1; | ||
| 128 | tasklet_schedule(&dp->ifb_tasklet); | ||
| 129 | } | ||
| 130 | |||
| 131 | } | ||
| 132 | |||
| 133 | static void __init ifb_setup(struct net_device *dev) | ||
| 134 | { | ||
| 135 | /* Initialize the device structure. */ | ||
| 136 | dev->get_stats = ifb_get_stats; | ||
| 137 | dev->hard_start_xmit = ifb_xmit; | ||
| 138 | dev->open = &ifb_open; | ||
| 139 | dev->stop = &ifb_close; | ||
| 140 | |||
| 141 | /* Fill in device structure with ethernet-generic values. */ | ||
| 142 | ether_setup(dev); | ||
| 143 | dev->tx_queue_len = TX_Q_LIMIT; | ||
| 144 | dev->change_mtu = NULL; | ||
| 145 | dev->flags |= IFF_NOARP; | ||
| 146 | dev->flags &= ~IFF_MULTICAST; | ||
| 147 | SET_MODULE_OWNER(dev); | ||
| 148 | random_ether_addr(dev->dev_addr); | ||
| 149 | } | ||
| 150 | |||
| 151 | static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 152 | { | ||
| 153 | struct ifb_private *dp = netdev_priv(dev); | ||
| 154 | struct net_device_stats *stats = &dp->stats; | ||
| 155 | int ret = 0; | ||
| 156 | u32 from = G_TC_FROM(skb->tc_verd); | ||
| 157 | |||
| 158 | stats->tx_packets++; | ||
| 159 | stats->tx_bytes+=skb->len; | ||
| 160 | |||
| 161 | if (!from || !skb->input_dev) { | ||
| 162 | dropped: | ||
| 163 | dev_kfree_skb(skb); | ||
| 164 | stats->rx_dropped++; | ||
| 165 | return ret; | ||
| 166 | } else { | ||
| 167 | /* | ||
| 168 | * note we could be going | ||
| 169 | * ingress -> egress or | ||
| 170 | * egress -> ingress | ||
| 171 | */ | ||
| 172 | skb->dev = skb->input_dev; | ||
| 173 | skb->input_dev = dev; | ||
| 174 | if (from & AT_INGRESS) { | ||
| 175 | skb_pull(skb, skb->dev->hard_header_len); | ||
| 176 | } else { | ||
| 177 | if (!(from & AT_EGRESS)) { | ||
| 178 | goto dropped; | ||
| 179 | } | ||
| 180 | } | ||
| 181 | } | ||
| 182 | |||
| 183 | if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { | ||
| 184 | netif_stop_queue(dev); | ||
| 185 | } | ||
| 186 | |||
| 187 | dev->trans_start = jiffies; | ||
| 188 | skb_queue_tail(&dp->rq, skb); | ||
| 189 | if (!dp->tasklet_pending) { | ||
| 190 | dp->tasklet_pending = 1; | ||
| 191 | tasklet_schedule(&dp->ifb_tasklet); | ||
| 192 | } | ||
| 193 | |||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | static struct net_device_stats *ifb_get_stats(struct net_device *dev) | ||
| 198 | { | ||
| 199 | struct ifb_private *dp = netdev_priv(dev); | ||
| 200 | struct net_device_stats *stats = &dp->stats; | ||
| 201 | |||
| 202 | pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n", | ||
| 203 | dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter, | ||
| 204 | dp->st_rx2tx_tran dp->st_rxq_notenter, dp->st_rx_frm_egr, | ||
| 205 | dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch ); | ||
| 206 | |||
| 207 | return stats; | ||
| 208 | } | ||
| 209 | |||
| 210 | static struct net_device **ifbs; | ||
| 211 | |||
| 212 | /* Number of ifb devices to be set up by this module. */ | ||
| 213 | module_param(numifbs, int, 0); | ||
| 214 | MODULE_PARM_DESC(numifbs, "Number of ifb devices"); | ||
| 215 | |||
| 216 | static int ifb_close(struct net_device *dev) | ||
| 217 | { | ||
| 218 | struct ifb_private *dp = netdev_priv(dev); | ||
| 219 | |||
| 220 | tasklet_kill(&dp->ifb_tasklet); | ||
| 221 | netif_stop_queue(dev); | ||
| 222 | skb_queue_purge(&dp->rq); | ||
| 223 | skb_queue_purge(&dp->tq); | ||
| 224 | return 0; | ||
| 225 | } | ||
| 226 | |||
| 227 | static int ifb_open(struct net_device *dev) | ||
| 228 | { | ||
| 229 | struct ifb_private *dp = netdev_priv(dev); | ||
| 230 | |||
| 231 | tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); | ||
| 232 | skb_queue_head_init(&dp->rq); | ||
| 233 | skb_queue_head_init(&dp->tq); | ||
| 234 | netif_start_queue(dev); | ||
| 235 | |||
| 236 | return 0; | ||
| 237 | } | ||
| 238 | |||
| 239 | static int __init ifb_init_one(int index) | ||
| 240 | { | ||
| 241 | struct net_device *dev_ifb; | ||
| 242 | int err; | ||
| 243 | |||
| 244 | dev_ifb = alloc_netdev(sizeof(struct ifb_private), | ||
| 245 | "ifb%d", ifb_setup); | ||
| 246 | |||
| 247 | if (!dev_ifb) | ||
| 248 | return -ENOMEM; | ||
| 249 | |||
| 250 | if ((err = register_netdev(dev_ifb))) { | ||
| 251 | free_netdev(dev_ifb); | ||
| 252 | dev_ifb = NULL; | ||
| 253 | } else { | ||
| 254 | ifbs[index] = dev_ifb; | ||
| 255 | } | ||
| 256 | |||
| 257 | return err; | ||
| 258 | } | ||
| 259 | |||
| 260 | static void ifb_free_one(int index) | ||
| 261 | { | ||
| 262 | unregister_netdev(ifbs[index]); | ||
| 263 | free_netdev(ifbs[index]); | ||
| 264 | } | ||
| 265 | |||
| 266 | static int __init ifb_init_module(void) | ||
| 267 | { | ||
| 268 | int i, err = 0; | ||
| 269 | ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL); | ||
| 270 | if (!ifbs) | ||
| 271 | return -ENOMEM; | ||
| 272 | for (i = 0; i < numifbs && !err; i++) | ||
| 273 | err = ifb_init_one(i); | ||
| 274 | if (err) { | ||
| 275 | while (--i >= 0) | ||
| 276 | ifb_free_one(i); | ||
| 277 | } | ||
| 278 | |||
| 279 | return err; | ||
| 280 | } | ||
| 281 | |||
| 282 | static void __exit ifb_cleanup_module(void) | ||
| 283 | { | ||
| 284 | int i; | ||
| 285 | |||
| 286 | for (i = 0; i < numifbs; i++) | ||
| 287 | ifb_free_one(i); | ||
| 288 | kfree(ifbs); | ||
| 289 | } | ||
| 290 | |||
| 291 | module_init(ifb_init_module); | ||
| 292 | module_exit(ifb_cleanup_module); | ||
| 293 | MODULE_LICENSE("GPL"); | ||
| 294 | MODULE_AUTHOR("Jamal Hadi Salim"); | ||
| diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3c89df6e7768..d88bf8aa8b47 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * | 3 | * | 
| 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 4 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 
| 5 | * Copyright (c) 2002 David S. Miller (davem@redhat.com) | 5 | * Copyright (c) 2002 David S. Miller (davem@redhat.com) | 
| 6 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 6 | * | 7 | * | 
| 7 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> | 8 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> | 
| 8 | * and Nettle, by Niels Möller. | 9 | * and Nettle, by Niels Möller. | 
| @@ -126,7 +127,11 @@ struct crypto_alg { | |||
| 126 | unsigned int cra_blocksize; | 127 | unsigned int cra_blocksize; | 
| 127 | unsigned int cra_ctxsize; | 128 | unsigned int cra_ctxsize; | 
| 128 | unsigned int cra_alignmask; | 129 | unsigned int cra_alignmask; | 
| 130 | |||
| 131 | int cra_priority; | ||
| 132 | |||
| 129 | const char cra_name[CRYPTO_MAX_ALG_NAME]; | 133 | const char cra_name[CRYPTO_MAX_ALG_NAME]; | 
| 134 | const char cra_driver_name[CRYPTO_MAX_ALG_NAME]; | ||
| 130 | 135 | ||
| 131 | union { | 136 | union { | 
| 132 | struct cipher_alg cipher; | 137 | struct cipher_alg cipher; | 
| diff --git a/include/net/act_api.h b/include/net/act_api.h index b55eb7c7f033..11e9eaf79f5a 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h | |||
| @@ -63,7 +63,7 @@ struct tc_action_ops | |||
| 63 | __u32 type; /* TBD to match kind */ | 63 | __u32 type; /* TBD to match kind */ | 
| 64 | __u32 capab; /* capabilities includes 4 bit version */ | 64 | __u32 capab; /* capabilities includes 4 bit version */ | 
| 65 | struct module *owner; | 65 | struct module *owner; | 
| 66 | int (*act)(struct sk_buff **, struct tc_action *, struct tcf_result *); | 66 | int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *); | 
| 67 | int (*get_stats)(struct sk_buff *, struct tc_action *); | 67 | int (*get_stats)(struct sk_buff *, struct tc_action *); | 
| 68 | int (*dump)(struct sk_buff *, struct tc_action *,int , int); | 68 | int (*dump)(struct sk_buff *, struct tc_action *,int , int); | 
| 69 | int (*cleanup)(struct tc_action *, int bind); | 69 | int (*cleanup)(struct tc_action *, int bind); | 
| diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 6492e7363d84..b94d1ad92c4d 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef __NET_PKT_SCHED_H | 1 | #ifndef __NET_PKT_SCHED_H | 
| 2 | #define __NET_PKT_SCHED_H | 2 | #define __NET_PKT_SCHED_H | 
| 3 | 3 | ||
| 4 | #include <linux/jiffies.h> | ||
| 4 | #include <net/sch_generic.h> | 5 | #include <net/sch_generic.h> | 
| 5 | 6 | ||
| 6 | struct qdisc_walker | 7 | struct qdisc_walker | 
| @@ -59,8 +60,8 @@ typedef struct timeval psched_time_t; | |||
| 59 | typedef long psched_tdiff_t; | 60 | typedef long psched_tdiff_t; | 
| 60 | 61 | ||
| 61 | #define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp)) | 62 | #define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp)) | 
| 62 | #define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ)) | 63 | #define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs) | 
| 63 | #define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ)) | 64 | #define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay) | 
| 64 | 65 | ||
| 65 | #else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ | 66 | #else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ | 
| 66 | 67 | ||
| @@ -123,9 +124,9 @@ do { \ | |||
| 123 | default: \ | 124 | default: \ | 
| 124 | __delta = 0; \ | 125 | __delta = 0; \ | 
| 125 | case 2: \ | 126 | case 2: \ | 
| 126 | __delta += 1000000; \ | 127 | __delta += USEC_PER_SEC; \ | 
| 127 | case 1: \ | 128 | case 1: \ | 
| 128 | __delta += 1000000; \ | 129 | __delta += USEC_PER_SEC; \ | 
| 129 | } \ | 130 | } \ | 
| 130 | } \ | 131 | } \ | 
| 131 | __delta; \ | 132 | __delta; \ | 
| @@ -136,9 +137,9 @@ psched_tod_diff(int delta_sec, int bound) | |||
| 136 | { | 137 | { | 
| 137 | int delta; | 138 | int delta; | 
| 138 | 139 | ||
| 139 | if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1) | 140 | if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1) | 
| 140 | return bound; | 141 | return bound; | 
| 141 | delta = delta_sec * 1000000; | 142 | delta = delta_sec * USEC_PER_SEC; | 
| 142 | if (delta > bound || delta < 0) | 143 | if (delta > bound || delta < 0) | 
| 143 | delta = bound; | 144 | delta = bound; | 
| 144 | return delta; | 145 | return delta; | 
| @@ -152,9 +153,9 @@ psched_tod_diff(int delta_sec, int bound) | |||
| 152 | default: \ | 153 | default: \ | 
| 153 | __delta = psched_tod_diff(__delta_sec, bound); break; \ | 154 | __delta = psched_tod_diff(__delta_sec, bound); break; \ | 
| 154 | case 2: \ | 155 | case 2: \ | 
| 155 | __delta += 1000000; \ | 156 | __delta += USEC_PER_SEC; \ | 
| 156 | case 1: \ | 157 | case 1: \ | 
| 157 | __delta += 1000000; \ | 158 | __delta += USEC_PER_SEC; \ | 
| 158 | case 0: \ | 159 | case 0: \ | 
| 159 | if (__delta > bound || __delta < 0) \ | 160 | if (__delta > bound || __delta < 0) \ | 
| 160 | __delta = bound; \ | 161 | __delta = bound; \ | 
| @@ -170,15 +171,15 @@ psched_tod_diff(int delta_sec, int bound) | |||
| 170 | ({ \ | 171 | ({ \ | 
| 171 | int __delta = (tv).tv_usec + (delta); \ | 172 | int __delta = (tv).tv_usec + (delta); \ | 
| 172 | (tv_res).tv_sec = (tv).tv_sec; \ | 173 | (tv_res).tv_sec = (tv).tv_sec; \ | 
| 173 | if (__delta > 1000000) { (tv_res).tv_sec++; __delta -= 1000000; } \ | 174 | if (__delta > USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \ | 
| 174 | (tv_res).tv_usec = __delta; \ | 175 | (tv_res).tv_usec = __delta; \ | 
| 175 | }) | 176 | }) | 
| 176 | 177 | ||
| 177 | #define PSCHED_TADD(tv, delta) \ | 178 | #define PSCHED_TADD(tv, delta) \ | 
| 178 | ({ \ | 179 | ({ \ | 
| 179 | (tv).tv_usec += (delta); \ | 180 | (tv).tv_usec += (delta); \ | 
| 180 | if ((tv).tv_usec > 1000000) { (tv).tv_sec++; \ | 181 | if ((tv).tv_usec > USEC_PER_SEC) { (tv).tv_sec++; \ | 
| 181 | (tv).tv_usec -= 1000000; } \ | 182 | (tv).tv_usec -= USEC_PER_SEC; } \ | 
| 182 | }) | 183 | }) | 
| 183 | 184 | ||
| 184 | /* Set/check that time is in the "past perfect"; | 185 | /* Set/check that time is in the "past perfect"; | 
| diff --git a/net/core/dev.c b/net/core/dev.c index 5081287923d5..bf66b114d3c2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1092,15 +1092,12 @@ int skb_checksum_help(struct sk_buff *skb, int inward) | |||
| 1092 | goto out; | 1092 | goto out; | 
| 1093 | } | 1093 | } | 
| 1094 | 1094 | ||
| 1095 | if (offset > (int)skb->len) | 1095 | BUG_ON(offset > (int)skb->len); | 
| 1096 | BUG(); | ||
| 1097 | csum = skb_checksum(skb, offset, skb->len-offset, 0); | 1096 | csum = skb_checksum(skb, offset, skb->len-offset, 0); | 
| 1098 | 1097 | ||
| 1099 | offset = skb->tail - skb->h.raw; | 1098 | offset = skb->tail - skb->h.raw; | 
| 1100 | if (offset <= 0) | 1099 | BUG_ON(offset <= 0); | 
| 1101 | BUG(); | 1100 | BUG_ON(skb->csum + 2 > offset); | 
| 1102 | if (skb->csum + 2 > offset) | ||
| 1103 | BUG(); | ||
| 1104 | 1101 | ||
| 1105 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); | 1102 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); | 
| 1106 | skb->ip_summed = CHECKSUM_NONE; | 1103 | skb->ip_summed = CHECKSUM_NONE; | 
| diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 070f91cfde59..d0732e9c8560 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -791,8 +791,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) | |||
| 791 | int end = offset + skb_shinfo(skb)->frags[i].size; | 791 | int end = offset + skb_shinfo(skb)->frags[i].size; | 
| 792 | if (end > len) { | 792 | if (end > len) { | 
| 793 | if (skb_cloned(skb)) { | 793 | if (skb_cloned(skb)) { | 
| 794 | if (!realloc) | 794 | BUG_ON(!realloc); | 
| 795 | BUG(); | ||
| 796 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 795 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 
| 797 | return -ENOMEM; | 796 | return -ENOMEM; | 
| 798 | } | 797 | } | 
| @@ -894,8 +893,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) | |||
| 894 | struct sk_buff *insp = NULL; | 893 | struct sk_buff *insp = NULL; | 
| 895 | 894 | ||
| 896 | do { | 895 | do { | 
| 897 | if (!list) | 896 | BUG_ON(!list); | 
| 898 | BUG(); | ||
| 899 | 897 | ||
| 900 | if (list->len <= eat) { | 898 | if (list->len <= eat) { | 
| 901 | /* Eaten as whole. */ | 899 | /* Eaten as whole. */ | 
| @@ -1199,8 +1197,7 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |||
| 1199 | start = end; | 1197 | start = end; | 
| 1200 | } | 1198 | } | 
| 1201 | } | 1199 | } | 
| 1202 | if (len) | 1200 | BUG_ON(len); | 
| 1203 | BUG(); | ||
| 1204 | 1201 | ||
| 1205 | return csum; | 1202 | return csum; | 
| 1206 | } | 1203 | } | 
| @@ -1282,8 +1279,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
| 1282 | start = end; | 1279 | start = end; | 
| 1283 | } | 1280 | } | 
| 1284 | } | 1281 | } | 
| 1285 | if (len) | 1282 | BUG_ON(len); | 
| 1286 | BUG(); | ||
| 1287 | return csum; | 1283 | return csum; | 
| 1288 | } | 1284 | } | 
| 1289 | 1285 | ||
| @@ -1297,8 +1293,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) | |||
| 1297 | else | 1293 | else | 
| 1298 | csstart = skb_headlen(skb); | 1294 | csstart = skb_headlen(skb); | 
| 1299 | 1295 | ||
| 1300 | if (csstart > skb_headlen(skb)) | 1296 | BUG_ON(csstart > skb_headlen(skb)); | 
| 1301 | BUG(); | ||
| 1302 | 1297 | ||
| 1303 | memcpy(to, skb->data, csstart); | 1298 | memcpy(to, skb->data, csstart); | 
| 1304 | 1299 | ||
| diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index be5a519cd2f8..105039eb7629 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
| @@ -899,8 +899,7 @@ static void icmp_address_reply(struct sk_buff *skb) | |||
| 899 | u32 _mask, *mp; | 899 | u32 _mask, *mp; | 
| 900 | 900 | ||
| 901 | mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); | 901 | mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); | 
| 902 | if (mp == NULL) | 902 | BUG_ON(mp == NULL); | 
| 903 | BUG(); | ||
| 904 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { | 903 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { | 
| 905 | if (*mp == ifa->ifa_mask && | 904 | if (*mp == ifa->ifa_mask && | 
| 906 | inet_ifa_match(rt->rt_src, ifa)) | 905 | inet_ifa_match(rt->rt_src, ifa)) | 
| diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index c49908192047..457db99c76df 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
| @@ -50,9 +50,10 @@ static struct sock *idiagnl; | |||
| 50 | #define INET_DIAG_PUT(skb, attrtype, attrlen) \ | 50 | #define INET_DIAG_PUT(skb, attrtype, attrlen) \ | 
| 51 | RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) | 51 | RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) | 
| 52 | 52 | ||
| 53 | static int inet_diag_fill(struct sk_buff *skb, struct sock *sk, | 53 | static int inet_csk_diag_fill(struct sock *sk, | 
| 54 | int ext, u32 pid, u32 seq, u16 nlmsg_flags, | 54 | struct sk_buff *skb, | 
| 55 | const struct nlmsghdr *unlh) | 55 | int ext, u32 pid, u32 seq, u16 nlmsg_flags, | 
| 56 | const struct nlmsghdr *unlh) | ||
| 56 | { | 57 | { | 
| 57 | const struct inet_sock *inet = inet_sk(sk); | 58 | const struct inet_sock *inet = inet_sk(sk); | 
| 58 | const struct inet_connection_sock *icsk = inet_csk(sk); | 59 | const struct inet_connection_sock *icsk = inet_csk(sk); | 
| @@ -70,20 +71,22 @@ static int inet_diag_fill(struct sk_buff *skb, struct sock *sk, | |||
| 70 | nlh->nlmsg_flags = nlmsg_flags; | 71 | nlh->nlmsg_flags = nlmsg_flags; | 
| 71 | 72 | ||
| 72 | r = NLMSG_DATA(nlh); | 73 | r = NLMSG_DATA(nlh); | 
| 73 | if (sk->sk_state != TCP_TIME_WAIT) { | 74 | BUG_ON(sk->sk_state == TCP_TIME_WAIT); | 
| 74 | if (ext & (1 << (INET_DIAG_MEMINFO - 1))) | 75 | |
| 75 | minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, | 76 | if (ext & (1 << (INET_DIAG_MEMINFO - 1))) | 
| 76 | sizeof(*minfo)); | 77 | minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo)); | 
| 77 | if (ext & (1 << (INET_DIAG_INFO - 1))) | 78 | |
| 78 | info = INET_DIAG_PUT(skb, INET_DIAG_INFO, | 79 | if (ext & (1 << (INET_DIAG_INFO - 1))) | 
| 79 | handler->idiag_info_size); | 80 | info = INET_DIAG_PUT(skb, INET_DIAG_INFO, | 
| 80 | 81 | handler->idiag_info_size); | |
| 81 | if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { | 82 | |
| 82 | size_t len = strlen(icsk->icsk_ca_ops->name); | 83 | if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { | 
| 83 | strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), | 84 | const size_t len = strlen(icsk->icsk_ca_ops->name); | 
| 84 | icsk->icsk_ca_ops->name); | 85 | |
| 85 | } | 86 | strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), | 
| 87 | icsk->icsk_ca_ops->name); | ||
| 86 | } | 88 | } | 
| 89 | |||
| 87 | r->idiag_family = sk->sk_family; | 90 | r->idiag_family = sk->sk_family; | 
| 88 | r->idiag_state = sk->sk_state; | 91 | r->idiag_state = sk->sk_state; | 
| 89 | r->idiag_timer = 0; | 92 | r->idiag_timer = 0; | 
| @@ -93,37 +96,6 @@ static int inet_diag_fill(struct sk_buff *skb, struct sock *sk, | |||
| 93 | r->id.idiag_cookie[0] = (u32)(unsigned long)sk; | 96 | r->id.idiag_cookie[0] = (u32)(unsigned long)sk; | 
| 94 | r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); | 97 | r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); | 
| 95 | 98 | ||
| 96 | if (r->idiag_state == TCP_TIME_WAIT) { | ||
| 97 | const struct inet_timewait_sock *tw = inet_twsk(sk); | ||
| 98 | long tmo = tw->tw_ttd - jiffies; | ||
| 99 | if (tmo < 0) | ||
| 100 | tmo = 0; | ||
| 101 | |||
| 102 | r->id.idiag_sport = tw->tw_sport; | ||
| 103 | r->id.idiag_dport = tw->tw_dport; | ||
| 104 | r->id.idiag_src[0] = tw->tw_rcv_saddr; | ||
| 105 | r->id.idiag_dst[0] = tw->tw_daddr; | ||
| 106 | r->idiag_state = tw->tw_substate; | ||
| 107 | r->idiag_timer = 3; | ||
| 108 | r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ; | ||
| 109 | r->idiag_rqueue = 0; | ||
| 110 | r->idiag_wqueue = 0; | ||
| 111 | r->idiag_uid = 0; | ||
| 112 | r->idiag_inode = 0; | ||
| 113 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
| 114 | if (r->idiag_family == AF_INET6) { | ||
| 115 | const struct inet6_timewait_sock *tw6 = inet6_twsk(sk); | ||
| 116 | |||
| 117 | ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, | ||
| 118 | &tw6->tw_v6_rcv_saddr); | ||
| 119 | ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, | ||
| 120 | &tw6->tw_v6_daddr); | ||
| 121 | } | ||
| 122 | #endif | ||
| 123 | nlh->nlmsg_len = skb->tail - b; | ||
| 124 | return skb->len; | ||
| 125 | } | ||
| 126 | |||
| 127 | r->id.idiag_sport = inet->sport; | 99 | r->id.idiag_sport = inet->sport; | 
| 128 | r->id.idiag_dport = inet->dport; | 100 | r->id.idiag_dport = inet->dport; | 
| 129 | r->id.idiag_src[0] = inet->rcv_saddr; | 101 | r->id.idiag_src[0] = inet->rcv_saddr; | 
| @@ -185,7 +157,75 @@ nlmsg_failure: | |||
| 185 | return -1; | 157 | return -1; | 
| 186 | } | 158 | } | 
| 187 | 159 | ||
| 188 | static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh) | 160 | static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, | 
| 161 | struct sk_buff *skb, int ext, u32 pid, | ||
| 162 | u32 seq, u16 nlmsg_flags, | ||
| 163 | const struct nlmsghdr *unlh) | ||
| 164 | { | ||
| 165 | long tmo; | ||
| 166 | struct inet_diag_msg *r; | ||
| 167 | const unsigned char *previous_tail = skb->tail; | ||
| 168 | struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, | ||
| 169 | unlh->nlmsg_type, sizeof(*r)); | ||
| 170 | |||
| 171 | r = NLMSG_DATA(nlh); | ||
| 172 | BUG_ON(tw->tw_state != TCP_TIME_WAIT); | ||
| 173 | |||
| 174 | nlh->nlmsg_flags = nlmsg_flags; | ||
| 175 | |||
| 176 | tmo = tw->tw_ttd - jiffies; | ||
| 177 | if (tmo < 0) | ||
| 178 | tmo = 0; | ||
| 179 | |||
| 180 | r->idiag_family = tw->tw_family; | ||
| 181 | r->idiag_state = tw->tw_state; | ||
| 182 | r->idiag_timer = 0; | ||
| 183 | r->idiag_retrans = 0; | ||
| 184 | r->id.idiag_if = tw->tw_bound_dev_if; | ||
| 185 | r->id.idiag_cookie[0] = (u32)(unsigned long)tw; | ||
| 186 | r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); | ||
| 187 | r->id.idiag_sport = tw->tw_sport; | ||
| 188 | r->id.idiag_dport = tw->tw_dport; | ||
| 189 | r->id.idiag_src[0] = tw->tw_rcv_saddr; | ||
| 190 | r->id.idiag_dst[0] = tw->tw_daddr; | ||
| 191 | r->idiag_state = tw->tw_substate; | ||
| 192 | r->idiag_timer = 3; | ||
| 193 | r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ; | ||
| 194 | r->idiag_rqueue = 0; | ||
| 195 | r->idiag_wqueue = 0; | ||
| 196 | r->idiag_uid = 0; | ||
| 197 | r->idiag_inode = 0; | ||
| 198 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
| 199 | if (tw->tw_family == AF_INET6) { | ||
| 200 | const struct inet6_timewait_sock *tw6 = | ||
| 201 | inet6_twsk((struct sock *)tw); | ||
| 202 | |||
| 203 | ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, | ||
| 204 | &tw6->tw_v6_rcv_saddr); | ||
| 205 | ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, | ||
| 206 | &tw6->tw_v6_daddr); | ||
| 207 | } | ||
| 208 | #endif | ||
| 209 | nlh->nlmsg_len = skb->tail - previous_tail; | ||
| 210 | return skb->len; | ||
| 211 | nlmsg_failure: | ||
| 212 | skb_trim(skb, previous_tail - skb->data); | ||
| 213 | return -1; | ||
| 214 | } | ||
| 215 | |||
| 216 | static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, | ||
| 217 | int ext, u32 pid, u32 seq, u16 nlmsg_flags, | ||
| 218 | const struct nlmsghdr *unlh) | ||
| 219 | { | ||
| 220 | if (sk->sk_state == TCP_TIME_WAIT) | ||
| 221 | return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, | ||
| 222 | skb, ext, pid, seq, nlmsg_flags, | ||
| 223 | unlh); | ||
| 224 | return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh); | ||
| 225 | } | ||
| 226 | |||
| 227 | static int inet_diag_get_exact(struct sk_buff *in_skb, | ||
| 228 | const struct nlmsghdr *nlh) | ||
| 189 | { | 229 | { | 
| 190 | int err; | 230 | int err; | 
| 191 | struct sock *sk; | 231 | struct sock *sk; | 
| @@ -235,7 +275,7 @@ static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nl | |||
| 235 | if (!rep) | 275 | if (!rep) | 
| 236 | goto out; | 276 | goto out; | 
| 237 | 277 | ||
| 238 | if (inet_diag_fill(rep, sk, req->idiag_ext, | 278 | if (sk_diag_fill(sk, rep, req->idiag_ext, | 
| 239 | NETLINK_CB(in_skb).pid, | 279 | NETLINK_CB(in_skb).pid, | 
| 240 | nlh->nlmsg_seq, 0, nlh) <= 0) | 280 | nlh->nlmsg_seq, 0, nlh) <= 0) | 
| 241 | BUG(); | 281 | BUG(); | 
| @@ -283,7 +323,7 @@ static int bitstring_match(const u32 *a1, const u32 *a2, int bits) | |||
| 283 | 323 | ||
| 284 | 324 | ||
| 285 | static int inet_diag_bc_run(const void *bc, int len, | 325 | static int inet_diag_bc_run(const void *bc, int len, | 
| 286 | const struct inet_diag_entry *entry) | 326 | const struct inet_diag_entry *entry) | 
| 287 | { | 327 | { | 
| 288 | while (len > 0) { | 328 | while (len > 0) { | 
| 289 | int yes = 1; | 329 | int yes = 1; | 
| @@ -322,7 +362,7 @@ static int inet_diag_bc_run(const void *bc, int len, | |||
| 322 | yes = 0; | 362 | yes = 0; | 
| 323 | break; | 363 | break; | 
| 324 | } | 364 | } | 
| 325 | 365 | ||
| 326 | if (cond->prefix_len == 0) | 366 | if (cond->prefix_len == 0) | 
| 327 | break; | 367 | break; | 
| 328 | 368 | ||
| @@ -331,7 +371,8 @@ static int inet_diag_bc_run(const void *bc, int len, | |||
| 331 | else | 371 | else | 
| 332 | addr = entry->daddr; | 372 | addr = entry->daddr; | 
| 333 | 373 | ||
| 334 | if (bitstring_match(addr, cond->addr, cond->prefix_len)) | 374 | if (bitstring_match(addr, cond->addr, | 
| 375 | cond->prefix_len)) | ||
| 335 | break; | 376 | break; | 
| 336 | if (entry->family == AF_INET6 && | 377 | if (entry->family == AF_INET6 && | 
| 337 | cond->family == AF_INET) { | 378 | cond->family == AF_INET) { | 
| @@ -346,7 +387,7 @@ static int inet_diag_bc_run(const void *bc, int len, | |||
| 346 | } | 387 | } | 
| 347 | } | 388 | } | 
| 348 | 389 | ||
| 349 | if (yes) { | 390 | if (yes) { | 
| 350 | len -= op->yes; | 391 | len -= op->yes; | 
| 351 | bc += op->yes; | 392 | bc += op->yes; | 
| 352 | } else { | 393 | } else { | 
| @@ -407,14 +448,15 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) | |||
| 407 | default: | 448 | default: | 
| 408 | return -EINVAL; | 449 | return -EINVAL; | 
| 409 | } | 450 | } | 
| 410 | bc += op->yes; | 451 | bc += op->yes; | 
| 411 | len -= op->yes; | 452 | len -= op->yes; | 
| 412 | } | 453 | } | 
| 413 | return len == 0 ? 0 : -EINVAL; | 454 | return len == 0 ? 0 : -EINVAL; | 
| 414 | } | 455 | } | 
| 415 | 456 | ||
| 416 | static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk, | 457 | static int inet_csk_diag_dump(struct sock *sk, | 
| 417 | struct netlink_callback *cb) | 458 | struct sk_buff *skb, | 
| 459 | struct netlink_callback *cb) | ||
| 418 | { | 460 | { | 
| 419 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); | 461 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); | 
| 420 | 462 | ||
| @@ -444,14 +486,50 @@ static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk, | |||
| 444 | return 0; | 486 | return 0; | 
| 445 | } | 487 | } | 
| 446 | 488 | ||
| 447 | return inet_diag_fill(skb, sk, r->idiag_ext, NETLINK_CB(cb->skb).pid, | 489 | return inet_csk_diag_fill(sk, skb, r->idiag_ext, | 
| 448 | cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); | 490 | NETLINK_CB(cb->skb).pid, | 
| 491 | cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); | ||
| 492 | } | ||
| 493 | |||
| 494 | static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, | ||
| 495 | struct sk_buff *skb, | ||
| 496 | struct netlink_callback *cb) | ||
| 497 | { | ||
| 498 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); | ||
| 499 | |||
| 500 | if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { | ||
| 501 | struct inet_diag_entry entry; | ||
| 502 | struct rtattr *bc = (struct rtattr *)(r + 1); | ||
| 503 | |||
| 504 | entry.family = tw->tw_family; | ||
| 505 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
| 506 | if (tw->tw_family == AF_INET6) { | ||
| 507 | struct inet6_timewait_sock *tw6 = | ||
| 508 | inet6_twsk((struct sock *)tw); | ||
| 509 | entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32; | ||
| 510 | entry.daddr = tw6->tw_v6_daddr.s6_addr32; | ||
| 511 | } else | ||
| 512 | #endif | ||
| 513 | { | ||
| 514 | entry.saddr = &tw->tw_rcv_saddr; | ||
| 515 | entry.daddr = &tw->tw_daddr; | ||
| 516 | } | ||
| 517 | entry.sport = tw->tw_num; | ||
| 518 | entry.dport = ntohs(tw->tw_dport); | ||
| 519 | entry.userlocks = 0; | ||
| 520 | |||
| 521 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) | ||
| 522 | return 0; | ||
| 523 | } | ||
| 524 | |||
| 525 | return inet_twsk_diag_fill(tw, skb, r->idiag_ext, | ||
| 526 | NETLINK_CB(cb->skb).pid, | ||
| 527 | cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); | ||
| 449 | } | 528 | } | 
| 450 | 529 | ||
| 451 | static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | 530 | static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | 
| 452 | struct request_sock *req, | 531 | struct request_sock *req, u32 pid, u32 seq, | 
| 453 | u32 pid, u32 seq, | 532 | const struct nlmsghdr *unlh) | 
| 454 | const struct nlmsghdr *unlh) | ||
| 455 | { | 533 | { | 
| 456 | const struct inet_request_sock *ireq = inet_rsk(req); | 534 | const struct inet_request_sock *ireq = inet_rsk(req); | 
| 457 | struct inet_sock *inet = inet_sk(sk); | 535 | struct inet_sock *inet = inet_sk(sk); | 
| @@ -504,7 +582,7 @@ nlmsg_failure: | |||
| 504 | } | 582 | } | 
| 505 | 583 | ||
| 506 | static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | 584 | static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | 
| 507 | struct netlink_callback *cb) | 585 | struct netlink_callback *cb) | 
| 508 | { | 586 | { | 
| 509 | struct inet_diag_entry entry; | 587 | struct inet_diag_entry entry; | 
| 510 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); | 588 | struct inet_diag_req *r = NLMSG_DATA(cb->nlh); | 
| @@ -556,7 +634,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
| 556 | inet6_rsk(req)->loc_addr.s6_addr32 : | 634 | inet6_rsk(req)->loc_addr.s6_addr32 : | 
| 557 | #endif | 635 | #endif | 
| 558 | &ireq->loc_addr; | 636 | &ireq->loc_addr; | 
| 559 | entry.daddr = | 637 | entry.daddr = | 
| 560 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 638 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 
| 561 | (entry.family == AF_INET6) ? | 639 | (entry.family == AF_INET6) ? | 
| 562 | inet6_rsk(req)->rmt_addr.s6_addr32 : | 640 | inet6_rsk(req)->rmt_addr.s6_addr32 : | 
| @@ -599,7 +677,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 599 | handler = inet_diag_table[cb->nlh->nlmsg_type]; | 677 | handler = inet_diag_table[cb->nlh->nlmsg_type]; | 
| 600 | BUG_ON(handler == NULL); | 678 | BUG_ON(handler == NULL); | 
| 601 | hashinfo = handler->idiag_hashinfo; | 679 | hashinfo = handler->idiag_hashinfo; | 
| 602 | 680 | ||
| 603 | s_i = cb->args[1]; | 681 | s_i = cb->args[1]; | 
| 604 | s_num = num = cb->args[2]; | 682 | s_num = num = cb->args[2]; | 
| 605 | 683 | ||
| @@ -630,7 +708,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 630 | cb->args[3] > 0) | 708 | cb->args[3] > 0) | 
| 631 | goto syn_recv; | 709 | goto syn_recv; | 
| 632 | 710 | ||
| 633 | if (inet_diag_dump_sock(skb, sk, cb) < 0) { | 711 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { | 
| 634 | inet_listen_unlock(hashinfo); | 712 | inet_listen_unlock(hashinfo); | 
| 635 | goto done; | 713 | goto done; | 
| 636 | } | 714 | } | 
| @@ -672,7 +750,6 @@ skip_listen_ht: | |||
| 672 | s_num = 0; | 750 | s_num = 0; | 
| 673 | 751 | ||
| 674 | read_lock_bh(&head->lock); | 752 | read_lock_bh(&head->lock); | 
| 675 | |||
| 676 | num = 0; | 753 | num = 0; | 
| 677 | sk_for_each(sk, node, &head->chain) { | 754 | sk_for_each(sk, node, &head->chain) { | 
| 678 | struct inet_sock *inet = inet_sk(sk); | 755 | struct inet_sock *inet = inet_sk(sk); | 
| @@ -684,9 +761,10 @@ skip_listen_ht: | |||
| 684 | if (r->id.idiag_sport != inet->sport && | 761 | if (r->id.idiag_sport != inet->sport && | 
| 685 | r->id.idiag_sport) | 762 | r->id.idiag_sport) | 
| 686 | goto next_normal; | 763 | goto next_normal; | 
| 687 | if (r->id.idiag_dport != inet->dport && r->id.idiag_dport) | 764 | if (r->id.idiag_dport != inet->dport && | 
| 765 | r->id.idiag_dport) | ||
| 688 | goto next_normal; | 766 | goto next_normal; | 
| 689 | if (inet_diag_dump_sock(skb, sk, cb) < 0) { | 767 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { | 
| 690 | read_unlock_bh(&head->lock); | 768 | read_unlock_bh(&head->lock); | 
| 691 | goto done; | 769 | goto done; | 
| 692 | } | 770 | } | 
| @@ -695,19 +773,20 @@ next_normal: | |||
| 695 | } | 773 | } | 
| 696 | 774 | ||
| 697 | if (r->idiag_states & TCPF_TIME_WAIT) { | 775 | if (r->idiag_states & TCPF_TIME_WAIT) { | 
| 698 | sk_for_each(sk, node, | 776 | struct inet_timewait_sock *tw; | 
| 777 | |||
| 778 | inet_twsk_for_each(tw, node, | ||
| 699 | &hashinfo->ehash[i + hashinfo->ehash_size].chain) { | 779 | &hashinfo->ehash[i + hashinfo->ehash_size].chain) { | 
| 700 | struct inet_sock *inet = inet_sk(sk); | ||
| 701 | 780 | ||
| 702 | if (num < s_num) | 781 | if (num < s_num) | 
| 703 | goto next_dying; | 782 | goto next_dying; | 
| 704 | if (r->id.idiag_sport != inet->sport && | 783 | if (r->id.idiag_sport != tw->tw_sport && | 
| 705 | r->id.idiag_sport) | 784 | r->id.idiag_sport) | 
| 706 | goto next_dying; | 785 | goto next_dying; | 
| 707 | if (r->id.idiag_dport != inet->dport && | 786 | if (r->id.idiag_dport != tw->tw_dport && | 
| 708 | r->id.idiag_dport) | 787 | r->id.idiag_dport) | 
| 709 | goto next_dying; | 788 | goto next_dying; | 
| 710 | if (inet_diag_dump_sock(skb, sk, cb) < 0) { | 789 | if (inet_twsk_diag_dump(tw, skb, cb) < 0) { | 
| 711 | read_unlock_bh(&head->lock); | 790 | read_unlock_bh(&head->lock); | 
| 712 | goto done; | 791 | goto done; | 
| 713 | } | 792 | } | 
| @@ -724,8 +803,7 @@ done: | |||
| 724 | return skb->len; | 803 | return skb->len; | 
| 725 | } | 804 | } | 
| 726 | 805 | ||
| 727 | static __inline__ int | 806 | static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | 
| 728 | inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | ||
| 729 | { | 807 | { | 
| 730 | if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) | 808 | if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) | 
| 731 | return 0; | 809 | return 0; | 
| @@ -755,9 +833,8 @@ inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 755 | } | 833 | } | 
| 756 | return netlink_dump_start(idiagnl, skb, nlh, | 834 | return netlink_dump_start(idiagnl, skb, nlh, | 
| 757 | inet_diag_dump, NULL); | 835 | inet_diag_dump, NULL); | 
| 758 | } else { | 836 | } else | 
| 759 | return inet_diag_get_exact(skb, nlh); | 837 | return inet_diag_get_exact(skb, nlh); | 
| 760 | } | ||
| 761 | 838 | ||
| 762 | err_inval: | 839 | err_inval: | 
| 763 | return -EINVAL; | 840 | return -EINVAL; | 
| @@ -766,15 +843,15 @@ err_inval: | |||
| 766 | 843 | ||
| 767 | static inline void inet_diag_rcv_skb(struct sk_buff *skb) | 844 | static inline void inet_diag_rcv_skb(struct sk_buff *skb) | 
| 768 | { | 845 | { | 
| 769 | int err; | ||
| 770 | struct nlmsghdr * nlh; | ||
| 771 | |||
| 772 | if (skb->len >= NLMSG_SPACE(0)) { | 846 | if (skb->len >= NLMSG_SPACE(0)) { | 
| 773 | nlh = (struct nlmsghdr *)skb->data; | 847 | int err; | 
| 774 | if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) | 848 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; | 
| 849 | |||
| 850 | if (nlh->nlmsg_len < sizeof(*nlh) || | ||
| 851 | skb->len < nlh->nlmsg_len) | ||
| 775 | return; | 852 | return; | 
| 776 | err = inet_diag_rcv_msg(skb, nlh); | 853 | err = inet_diag_rcv_msg(skb, nlh); | 
| 777 | if (err || nlh->nlmsg_flags & NLM_F_ACK) | 854 | if (err || nlh->nlmsg_flags & NLM_F_ACK) | 
| 778 | netlink_ack(skb, nlh, err); | 855 | netlink_ack(skb, nlh, err); | 
| 779 | } | 856 | } | 
| 780 | } | 857 | } | 
| diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index ce5fe3f74a3d..2160874ce7aa 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
| @@ -304,8 +304,7 @@ static void unlink_from_pool(struct inet_peer *p) | |||
| 304 | /* look for a node to insert instead of p */ | 304 | /* look for a node to insert instead of p */ | 
| 305 | struct inet_peer *t; | 305 | struct inet_peer *t; | 
| 306 | t = lookup_rightempty(p); | 306 | t = lookup_rightempty(p); | 
| 307 | if (*stackptr[-1] != t) | 307 | BUG_ON(*stackptr[-1] != t); | 
| 308 | BUG(); | ||
| 309 | **--stackptr = t->avl_left; | 308 | **--stackptr = t->avl_left; | 
| 310 | /* t is removed, t->v4daddr > x->v4daddr for any | 309 | /* t is removed, t->v4daddr > x->v4daddr for any | 
| 311 | * x in p->avl_left subtree. | 310 | * x in p->avl_left subtree. | 
| @@ -314,8 +313,7 @@ static void unlink_from_pool(struct inet_peer *p) | |||
| 314 | t->avl_left = p->avl_left; | 313 | t->avl_left = p->avl_left; | 
| 315 | t->avl_right = p->avl_right; | 314 | t->avl_right = p->avl_right; | 
| 316 | t->avl_height = p->avl_height; | 315 | t->avl_height = p->avl_height; | 
| 317 | if (delp[1] != &p->avl_left) | 316 | BUG_ON(delp[1] != &p->avl_left); | 
| 318 | BUG(); | ||
| 319 | delp[1] = &t->avl_left; /* was &p->avl_left */ | 317 | delp[1] = &t->avl_left; /* was &p->avl_left */ | 
| 320 | } | 318 | } | 
| 321 | peer_avl_rebalance(stack, stackptr); | 319 | peer_avl_rebalance(stack, stackptr); | 
| diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index de16e944777f..1e93eafa7af1 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -188,7 +188,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key) | |||
| 188 | } | 188 | } | 
| 189 | 189 | ||
| 190 | if (ipgre_fb_tunnel_dev->flags&IFF_UP) | 190 | if (ipgre_fb_tunnel_dev->flags&IFF_UP) | 
| 191 | return ipgre_fb_tunnel_dev->priv; | 191 | return netdev_priv(ipgre_fb_tunnel_dev); | 
| 192 | return NULL; | 192 | return NULL; | 
| 193 | } | 193 | } | 
| 194 | 194 | ||
| @@ -278,7 +278,7 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int | |||
| 278 | return NULL; | 278 | return NULL; | 
| 279 | 279 | ||
| 280 | dev->init = ipgre_tunnel_init; | 280 | dev->init = ipgre_tunnel_init; | 
| 281 | nt = dev->priv; | 281 | nt = netdev_priv(dev); | 
| 282 | nt->parms = *parms; | 282 | nt->parms = *parms; | 
| 283 | 283 | ||
| 284 | if (register_netdevice(dev) < 0) { | 284 | if (register_netdevice(dev) < 0) { | 
| @@ -286,9 +286,6 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int | |||
| 286 | goto failed; | 286 | goto failed; | 
| 287 | } | 287 | } | 
| 288 | 288 | ||
| 289 | nt = dev->priv; | ||
| 290 | nt->parms = *parms; | ||
| 291 | |||
| 292 | dev_hold(dev); | 289 | dev_hold(dev); | 
| 293 | ipgre_tunnel_link(nt); | 290 | ipgre_tunnel_link(nt); | 
| 294 | return nt; | 291 | return nt; | 
| @@ -299,7 +296,7 @@ failed: | |||
| 299 | 296 | ||
| 300 | static void ipgre_tunnel_uninit(struct net_device *dev) | 297 | static void ipgre_tunnel_uninit(struct net_device *dev) | 
| 301 | { | 298 | { | 
| 302 | ipgre_tunnel_unlink((struct ip_tunnel*)dev->priv); | 299 | ipgre_tunnel_unlink(netdev_priv(dev)); | 
| 303 | dev_put(dev); | 300 | dev_put(dev); | 
| 304 | } | 301 | } | 
| 305 | 302 | ||
| @@ -518,7 +515,7 @@ out: | |||
| 518 | skb2->dst->ops->update_pmtu(skb2->dst, rel_info); | 515 | skb2->dst->ops->update_pmtu(skb2->dst, rel_info); | 
| 519 | rel_info = htonl(rel_info); | 516 | rel_info = htonl(rel_info); | 
| 520 | } else if (type == ICMP_TIME_EXCEEDED) { | 517 | } else if (type == ICMP_TIME_EXCEEDED) { | 
| 521 | struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv; | 518 | struct ip_tunnel *t = netdev_priv(skb2->dev); | 
| 522 | if (t->parms.iph.ttl) { | 519 | if (t->parms.iph.ttl) { | 
| 523 | rel_type = ICMP_DEST_UNREACH; | 520 | rel_type = ICMP_DEST_UNREACH; | 
| 524 | rel_code = ICMP_HOST_UNREACH; | 521 | rel_code = ICMP_HOST_UNREACH; | 
| @@ -669,7 +666,7 @@ drop_nolock: | |||
| 669 | 666 | ||
| 670 | static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 667 | static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 
| 671 | { | 668 | { | 
| 672 | struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; | 669 | struct ip_tunnel *tunnel = netdev_priv(dev); | 
| 673 | struct net_device_stats *stats = &tunnel->stat; | 670 | struct net_device_stats *stats = &tunnel->stat; | 
| 674 | struct iphdr *old_iph = skb->nh.iph; | 671 | struct iphdr *old_iph = skb->nh.iph; | 
| 675 | struct iphdr *tiph; | 672 | struct iphdr *tiph; | 
| @@ -915,7 +912,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 915 | t = ipgre_tunnel_locate(&p, 0); | 912 | t = ipgre_tunnel_locate(&p, 0); | 
| 916 | } | 913 | } | 
| 917 | if (t == NULL) | 914 | if (t == NULL) | 
| 918 | t = (struct ip_tunnel*)dev->priv; | 915 | t = netdev_priv(dev); | 
| 919 | memcpy(&p, &t->parms, sizeof(p)); | 916 | memcpy(&p, &t->parms, sizeof(p)); | 
| 920 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 917 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 
| 921 | err = -EFAULT; | 918 | err = -EFAULT; | 
| @@ -955,7 +952,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 955 | } else { | 952 | } else { | 
| 956 | unsigned nflags=0; | 953 | unsigned nflags=0; | 
| 957 | 954 | ||
| 958 | t = (struct ip_tunnel*)dev->priv; | 955 | t = netdev_priv(dev); | 
| 959 | 956 | ||
| 960 | if (MULTICAST(p.iph.daddr)) | 957 | if (MULTICAST(p.iph.daddr)) | 
| 961 | nflags = IFF_BROADCAST; | 958 | nflags = IFF_BROADCAST; | 
| @@ -1004,7 +1001,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 1004 | if ((t = ipgre_tunnel_locate(&p, 0)) == NULL) | 1001 | if ((t = ipgre_tunnel_locate(&p, 0)) == NULL) | 
| 1005 | goto done; | 1002 | goto done; | 
| 1006 | err = -EPERM; | 1003 | err = -EPERM; | 
| 1007 | if (t == ipgre_fb_tunnel_dev->priv) | 1004 | if (t == netdev_priv(ipgre_fb_tunnel_dev)) | 
| 1008 | goto done; | 1005 | goto done; | 
| 1009 | dev = t->dev; | 1006 | dev = t->dev; | 
| 1010 | } | 1007 | } | 
| @@ -1021,12 +1018,12 @@ done: | |||
| 1021 | 1018 | ||
| 1022 | static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev) | 1019 | static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev) | 
| 1023 | { | 1020 | { | 
| 1024 | return &(((struct ip_tunnel*)dev->priv)->stat); | 1021 | return &(((struct ip_tunnel*)netdev_priv(dev))->stat); | 
| 1025 | } | 1022 | } | 
| 1026 | 1023 | ||
| 1027 | static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 1024 | static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 
| 1028 | { | 1025 | { | 
| 1029 | struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; | 1026 | struct ip_tunnel *tunnel = netdev_priv(dev); | 
| 1030 | if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen) | 1027 | if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen) | 
| 1031 | return -EINVAL; | 1028 | return -EINVAL; | 
| 1032 | dev->mtu = new_mtu; | 1029 | dev->mtu = new_mtu; | 
| @@ -1066,7 +1063,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1066 | static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, | 1063 | static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, | 
| 1067 | void *daddr, void *saddr, unsigned len) | 1064 | void *daddr, void *saddr, unsigned len) | 
| 1068 | { | 1065 | { | 
| 1069 | struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; | 1066 | struct ip_tunnel *t = netdev_priv(dev); | 
| 1070 | struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); | 1067 | struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); | 
| 1071 | u16 *p = (u16*)(iph+1); | 1068 | u16 *p = (u16*)(iph+1); | 
| 1072 | 1069 | ||
| @@ -1093,7 +1090,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh | |||
| 1093 | 1090 | ||
| 1094 | static int ipgre_open(struct net_device *dev) | 1091 | static int ipgre_open(struct net_device *dev) | 
| 1095 | { | 1092 | { | 
| 1096 | struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; | 1093 | struct ip_tunnel *t = netdev_priv(dev); | 
| 1097 | 1094 | ||
| 1098 | if (MULTICAST(t->parms.iph.daddr)) { | 1095 | if (MULTICAST(t->parms.iph.daddr)) { | 
| 1099 | struct flowi fl = { .oif = t->parms.link, | 1096 | struct flowi fl = { .oif = t->parms.link, | 
| @@ -1117,7 +1114,7 @@ static int ipgre_open(struct net_device *dev) | |||
| 1117 | 1114 | ||
| 1118 | static int ipgre_close(struct net_device *dev) | 1115 | static int ipgre_close(struct net_device *dev) | 
| 1119 | { | 1116 | { | 
| 1120 | struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; | 1117 | struct ip_tunnel *t = netdev_priv(dev); | 
| 1121 | if (MULTICAST(t->parms.iph.daddr) && t->mlink) { | 1118 | if (MULTICAST(t->parms.iph.daddr) && t->mlink) { | 
| 1122 | struct in_device *in_dev = inetdev_by_index(t->mlink); | 1119 | struct in_device *in_dev = inetdev_by_index(t->mlink); | 
| 1123 | if (in_dev) { | 1120 | if (in_dev) { | 
| @@ -1157,7 +1154,7 @@ static int ipgre_tunnel_init(struct net_device *dev) | |||
| 1157 | int mtu = ETH_DATA_LEN; | 1154 | int mtu = ETH_DATA_LEN; | 
| 1158 | int addend = sizeof(struct iphdr) + 4; | 1155 | int addend = sizeof(struct iphdr) + 4; | 
| 1159 | 1156 | ||
| 1160 | tunnel = (struct ip_tunnel*)dev->priv; | 1157 | tunnel = netdev_priv(dev); | 
| 1161 | iph = &tunnel->parms.iph; | 1158 | iph = &tunnel->parms.iph; | 
| 1162 | 1159 | ||
| 1163 | tunnel->dev = dev; | 1160 | tunnel->dev = dev; | 
| @@ -1221,7 +1218,7 @@ static int ipgre_tunnel_init(struct net_device *dev) | |||
| 1221 | 1218 | ||
| 1222 | static int __init ipgre_fb_tunnel_init(struct net_device *dev) | 1219 | static int __init ipgre_fb_tunnel_init(struct net_device *dev) | 
| 1223 | { | 1220 | { | 
| 1224 | struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; | 1221 | struct ip_tunnel *tunnel = netdev_priv(dev); | 
| 1225 | struct iphdr *iph = &tunnel->parms.iph; | 1222 | struct iphdr *iph = &tunnel->parms.iph; | 
| 1226 | 1223 | ||
| 1227 | tunnel->dev = dev; | 1224 | tunnel->dev = dev; | 
| diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index c2169b47ddfd..3324fbfe528a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -69,6 +69,7 @@ | |||
| 69 | #include <net/ip.h> | 69 | #include <net/ip.h> | 
| 70 | #include <net/protocol.h> | 70 | #include <net/protocol.h> | 
| 71 | #include <net/route.h> | 71 | #include <net/route.h> | 
| 72 | #include <net/xfrm.h> | ||
| 72 | #include <linux/skbuff.h> | 73 | #include <linux/skbuff.h> | 
| 73 | #include <net/sock.h> | 74 | #include <net/sock.h> | 
| 74 | #include <net/arp.h> | 75 | #include <net/arp.h> | 
| diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index bbd85f5ec985..bc5ca23b2646 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
| @@ -244,7 +244,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c | |||
| 244 | if (dev == NULL) | 244 | if (dev == NULL) | 
| 245 | return NULL; | 245 | return NULL; | 
| 246 | 246 | ||
| 247 | nt = dev->priv; | 247 | nt = netdev_priv(dev); | 
| 248 | SET_MODULE_OWNER(dev); | 248 | SET_MODULE_OWNER(dev); | 
| 249 | dev->init = ipip_tunnel_init; | 249 | dev->init = ipip_tunnel_init; | 
| 250 | nt->parms = *parms; | 250 | nt->parms = *parms; | 
| @@ -269,7 +269,7 @@ static void ipip_tunnel_uninit(struct net_device *dev) | |||
| 269 | tunnels_wc[0] = NULL; | 269 | tunnels_wc[0] = NULL; | 
| 270 | write_unlock_bh(&ipip_lock); | 270 | write_unlock_bh(&ipip_lock); | 
| 271 | } else | 271 | } else | 
| 272 | ipip_tunnel_unlink((struct ip_tunnel*)dev->priv); | 272 | ipip_tunnel_unlink(netdev_priv(dev)); | 
| 273 | dev_put(dev); | 273 | dev_put(dev); | 
| 274 | } | 274 | } | 
| 275 | 275 | ||
| @@ -443,7 +443,7 @@ out: | |||
| 443 | skb2->dst->ops->update_pmtu(skb2->dst, rel_info); | 443 | skb2->dst->ops->update_pmtu(skb2->dst, rel_info); | 
| 444 | rel_info = htonl(rel_info); | 444 | rel_info = htonl(rel_info); | 
| 445 | } else if (type == ICMP_TIME_EXCEEDED) { | 445 | } else if (type == ICMP_TIME_EXCEEDED) { | 
| 446 | struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv; | 446 | struct ip_tunnel *t = netdev_priv(skb2->dev); | 
| 447 | if (t->parms.iph.ttl) { | 447 | if (t->parms.iph.ttl) { | 
| 448 | rel_type = ICMP_DEST_UNREACH; | 448 | rel_type = ICMP_DEST_UNREACH; | 
| 449 | rel_code = ICMP_HOST_UNREACH; | 449 | rel_code = ICMP_HOST_UNREACH; | 
| @@ -514,7 +514,7 @@ out: | |||
| 514 | 514 | ||
| 515 | static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 515 | static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 
| 516 | { | 516 | { | 
| 517 | struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; | 517 | struct ip_tunnel *tunnel = netdev_priv(dev); | 
| 518 | struct net_device_stats *stats = &tunnel->stat; | 518 | struct net_device_stats *stats = &tunnel->stat; | 
| 519 | struct iphdr *tiph = &tunnel->parms.iph; | 519 | struct iphdr *tiph = &tunnel->parms.iph; | 
| 520 | u8 tos = tunnel->parms.iph.tos; | 520 | u8 tos = tunnel->parms.iph.tos; | 
| @@ -674,7 +674,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 674 | t = ipip_tunnel_locate(&p, 0); | 674 | t = ipip_tunnel_locate(&p, 0); | 
| 675 | } | 675 | } | 
| 676 | if (t == NULL) | 676 | if (t == NULL) | 
| 677 | t = (struct ip_tunnel*)dev->priv; | 677 | t = netdev_priv(dev); | 
| 678 | memcpy(&p, &t->parms, sizeof(p)); | 678 | memcpy(&p, &t->parms, sizeof(p)); | 
| 679 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 679 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 
| 680 | err = -EFAULT; | 680 | err = -EFAULT; | 
| @@ -711,7 +711,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 711 | err = -EINVAL; | 711 | err = -EINVAL; | 
| 712 | break; | 712 | break; | 
| 713 | } | 713 | } | 
| 714 | t = (struct ip_tunnel*)dev->priv; | 714 | t = netdev_priv(dev); | 
| 715 | ipip_tunnel_unlink(t); | 715 | ipip_tunnel_unlink(t); | 
| 716 | t->parms.iph.saddr = p.iph.saddr; | 716 | t->parms.iph.saddr = p.iph.saddr; | 
| 717 | t->parms.iph.daddr = p.iph.daddr; | 717 | t->parms.iph.daddr = p.iph.daddr; | 
| @@ -765,7 +765,7 @@ done: | |||
| 765 | 765 | ||
| 766 | static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev) | 766 | static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev) | 
| 767 | { | 767 | { | 
| 768 | return &(((struct ip_tunnel*)dev->priv)->stat); | 768 | return &(((struct ip_tunnel*)netdev_priv(dev))->stat); | 
| 769 | } | 769 | } | 
| 770 | 770 | ||
| 771 | static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 771 | static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 
| @@ -800,7 +800,7 @@ static int ipip_tunnel_init(struct net_device *dev) | |||
| 800 | struct ip_tunnel *tunnel; | 800 | struct ip_tunnel *tunnel; | 
| 801 | struct iphdr *iph; | 801 | struct iphdr *iph; | 
| 802 | 802 | ||
| 803 | tunnel = (struct ip_tunnel*)dev->priv; | 803 | tunnel = netdev_priv(dev); | 
| 804 | iph = &tunnel->parms.iph; | 804 | iph = &tunnel->parms.iph; | 
| 805 | 805 | ||
| 806 | tunnel->dev = dev; | 806 | tunnel->dev = dev; | 
| @@ -838,7 +838,7 @@ static int ipip_tunnel_init(struct net_device *dev) | |||
| 838 | 838 | ||
| 839 | static int __init ipip_fb_tunnel_init(struct net_device *dev) | 839 | static int __init ipip_fb_tunnel_init(struct net_device *dev) | 
| 840 | { | 840 | { | 
| 841 | struct ip_tunnel *tunnel = dev->priv; | 841 | struct ip_tunnel *tunnel = netdev_priv(dev); | 
| 842 | struct iphdr *iph = &tunnel->parms.iph; | 842 | struct iphdr *iph = &tunnel->parms.iph; | 
| 843 | 843 | ||
| 844 | tunnel->dev = dev; | 844 | tunnel->dev = dev; | 
| diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9a5c0ce7ff35..f58ac9854c3f 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
| @@ -178,8 +178,8 @@ static int reg_vif_num = -1; | |||
| 178 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 178 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 
| 179 | { | 179 | { | 
| 180 | read_lock(&mrt_lock); | 180 | read_lock(&mrt_lock); | 
| 181 | ((struct net_device_stats*)dev->priv)->tx_bytes += skb->len; | 181 | ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len; | 
| 182 | ((struct net_device_stats*)dev->priv)->tx_packets++; | 182 | ((struct net_device_stats*)netdev_priv(dev))->tx_packets++; | 
| 183 | ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); | 183 | ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); | 
| 184 | read_unlock(&mrt_lock); | 184 | read_unlock(&mrt_lock); | 
| 185 | kfree_skb(skb); | 185 | kfree_skb(skb); | 
| @@ -188,7 +188,7 @@ static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 188 | 188 | ||
| 189 | static struct net_device_stats *reg_vif_get_stats(struct net_device *dev) | 189 | static struct net_device_stats *reg_vif_get_stats(struct net_device *dev) | 
| 190 | { | 190 | { | 
| 191 | return (struct net_device_stats*)dev->priv; | 191 | return (struct net_device_stats*)netdev_priv(dev); | 
| 192 | } | 192 | } | 
| 193 | 193 | ||
| 194 | static void reg_vif_setup(struct net_device *dev) | 194 | static void reg_vif_setup(struct net_device *dev) | 
| @@ -1149,8 +1149,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
| 1149 | if (vif->flags & VIFF_REGISTER) { | 1149 | if (vif->flags & VIFF_REGISTER) { | 
| 1150 | vif->pkt_out++; | 1150 | vif->pkt_out++; | 
| 1151 | vif->bytes_out+=skb->len; | 1151 | vif->bytes_out+=skb->len; | 
| 1152 | ((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len; | 1152 | ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len; | 
| 1153 | ((struct net_device_stats*)vif->dev->priv)->tx_packets++; | 1153 | ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++; | 
| 1154 | ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); | 1154 | ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); | 
| 1155 | kfree_skb(skb); | 1155 | kfree_skb(skb); | 
| 1156 | return; | 1156 | return; | 
| @@ -1210,8 +1210,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
| 1210 | if (vif->flags & VIFF_TUNNEL) { | 1210 | if (vif->flags & VIFF_TUNNEL) { | 
| 1211 | ip_encap(skb, vif->local, vif->remote); | 1211 | ip_encap(skb, vif->local, vif->remote); | 
| 1212 | /* FIXME: extra output firewall step used to be here. --RR */ | 1212 | /* FIXME: extra output firewall step used to be here. --RR */ | 
| 1213 | ((struct ip_tunnel *)vif->dev->priv)->stat.tx_packets++; | 1213 | ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++; | 
| 1214 | ((struct ip_tunnel *)vif->dev->priv)->stat.tx_bytes+=skb->len; | 1214 | ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len; | 
| 1215 | } | 1215 | } | 
| 1216 | 1216 | ||
| 1217 | IPCB(skb)->flags |= IPSKB_FORWARDED; | 1217 | IPCB(skb)->flags |= IPSKB_FORWARDED; | 
| @@ -1467,8 +1467,8 @@ int pim_rcv_v1(struct sk_buff * skb) | |||
| 1467 | skb->pkt_type = PACKET_HOST; | 1467 | skb->pkt_type = PACKET_HOST; | 
| 1468 | dst_release(skb->dst); | 1468 | dst_release(skb->dst); | 
| 1469 | skb->dst = NULL; | 1469 | skb->dst = NULL; | 
| 1470 | ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len; | 1470 | ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; | 
| 1471 | ((struct net_device_stats*)reg_dev->priv)->rx_packets++; | 1471 | ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; | 
| 1472 | nf_reset(skb); | 1472 | nf_reset(skb); | 
| 1473 | netif_rx(skb); | 1473 | netif_rx(skb); | 
| 1474 | dev_put(reg_dev); | 1474 | dev_put(reg_dev); | 
| @@ -1522,8 +1522,8 @@ static int pim_rcv(struct sk_buff * skb) | |||
| 1522 | skb->ip_summed = 0; | 1522 | skb->ip_summed = 0; | 
| 1523 | skb->pkt_type = PACKET_HOST; | 1523 | skb->pkt_type = PACKET_HOST; | 
| 1524 | dst_release(skb->dst); | 1524 | dst_release(skb->dst); | 
| 1525 | ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len; | 1525 | ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; | 
| 1526 | ((struct net_device_stats*)reg_dev->priv)->rx_packets++; | 1526 | ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; | 
| 1527 | skb->dst = NULL; | 1527 | skb->dst = NULL; | 
| 1528 | nf_reset(skb); | 1528 | nf_reset(skb); | 
| 1529 | netif_rx(skb); | 1529 | netif_rx(skb); | 
| diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0a461232329f..a97ed5416c28 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -3347,7 +3347,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
| 3347 | int offset = start - TCP_SKB_CB(skb)->seq; | 3347 | int offset = start - TCP_SKB_CB(skb)->seq; | 
| 3348 | int size = TCP_SKB_CB(skb)->end_seq - start; | 3348 | int size = TCP_SKB_CB(skb)->end_seq - start; | 
| 3349 | 3349 | ||
| 3350 | if (offset < 0) BUG(); | 3350 | BUG_ON(offset < 0); | 
| 3351 | if (size > 0) { | 3351 | if (size > 0) { | 
| 3352 | size = min(copy, size); | 3352 | size = min(copy, size); | 
| 3353 | if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) | 3353 | if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) | 
| diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index b4c4beba0ede..efa3e72cfcfa 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -226,6 +226,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
| 226 | ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); | 226 | ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); | 
| 227 | ipv6_addr_copy(&hdr->daddr, first_hop); | 227 | ipv6_addr_copy(&hdr->daddr, first_hop); | 
| 228 | 228 | ||
| 229 | skb->priority = sk->sk_priority; | ||
| 230 | |||
| 229 | mtu = dst_mtu(dst); | 231 | mtu = dst_mtu(dst); | 
| 230 | if ((skb->len <= mtu) || ipfragok) { | 232 | if ((skb->len <= mtu) || ipfragok) { | 
| 231 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 233 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 
| @@ -1182,6 +1184,8 @@ int ip6_push_pending_frames(struct sock *sk) | |||
| 1182 | ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); | 1184 | ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); | 
| 1183 | ipv6_addr_copy(&hdr->daddr, final_dst); | 1185 | ipv6_addr_copy(&hdr->daddr, final_dst); | 
| 1184 | 1186 | ||
| 1187 | skb->priority = sk->sk_priority; | ||
| 1188 | |||
| 1185 | skb->dst = dst_clone(&rt->u.dst); | 1189 | skb->dst = dst_clone(&rt->u.dst); | 
| 1186 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 1190 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 
| 1187 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); | 1191 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); | 
| diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index f079621c8b67..c3c2bf699a67 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -243,7 +243,7 @@ ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt) | |||
| 243 | if (dev == NULL) | 243 | if (dev == NULL) | 
| 244 | return -ENOMEM; | 244 | return -ENOMEM; | 
| 245 | 245 | ||
| 246 | t = dev->priv; | 246 | t = netdev_priv(dev); | 
| 247 | dev->init = ip6ip6_tnl_dev_init; | 247 | dev->init = ip6ip6_tnl_dev_init; | 
| 248 | t->parms = *p; | 248 | t->parms = *p; | 
| 249 | 249 | ||
| @@ -308,7 +308,7 @@ ip6ip6_tnl_locate(struct ip6_tnl_parm *p, struct ip6_tnl **pt, int create) | |||
| 308 | static void | 308 | static void | 
| 309 | ip6ip6_tnl_dev_uninit(struct net_device *dev) | 309 | ip6ip6_tnl_dev_uninit(struct net_device *dev) | 
| 310 | { | 310 | { | 
| 311 | struct ip6_tnl *t = dev->priv; | 311 | struct ip6_tnl *t = netdev_priv(dev); | 
| 312 | 312 | ||
| 313 | if (dev == ip6ip6_fb_tnl_dev) { | 313 | if (dev == ip6ip6_fb_tnl_dev) { | 
| 314 | write_lock_bh(&ip6ip6_lock); | 314 | write_lock_bh(&ip6ip6_lock); | 
| @@ -623,7 +623,7 @@ ip6ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) | |||
| 623 | static int | 623 | static int | 
| 624 | ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 624 | ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 
| 625 | { | 625 | { | 
| 626 | struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; | 626 | struct ip6_tnl *t = netdev_priv(dev); | 
| 627 | struct net_device_stats *stats = &t->stat; | 627 | struct net_device_stats *stats = &t->stat; | 
| 628 | struct ipv6hdr *ipv6h = skb->nh.ipv6h; | 628 | struct ipv6hdr *ipv6h = skb->nh.ipv6h; | 
| 629 | struct ipv6_txoptions *opt = NULL; | 629 | struct ipv6_txoptions *opt = NULL; | 
| @@ -933,11 +933,11 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 933 | break; | 933 | break; | 
| 934 | } | 934 | } | 
| 935 | if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV) | 935 | if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV) | 
| 936 | t = (struct ip6_tnl *) dev->priv; | 936 | t = netdev_priv(dev); | 
| 937 | else if (err) | 937 | else if (err) | 
| 938 | break; | 938 | break; | 
| 939 | } else | 939 | } else | 
| 940 | t = (struct ip6_tnl *) dev->priv; | 940 | t = netdev_priv(dev); | 
| 941 | 941 | ||
| 942 | memcpy(&p, &t->parms, sizeof (p)); | 942 | memcpy(&p, &t->parms, sizeof (p)); | 
| 943 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { | 943 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { | 
| @@ -955,7 +955,7 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 955 | break; | 955 | break; | 
| 956 | } | 956 | } | 
| 957 | if (!create && dev != ip6ip6_fb_tnl_dev) { | 957 | if (!create && dev != ip6ip6_fb_tnl_dev) { | 
| 958 | t = (struct ip6_tnl *) dev->priv; | 958 | t = netdev_priv(dev); | 
| 959 | } | 959 | } | 
| 960 | if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) { | 960 | if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) { | 
| 961 | break; | 961 | break; | 
| @@ -991,12 +991,12 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 991 | err = ip6ip6_tnl_locate(&p, &t, 0); | 991 | err = ip6ip6_tnl_locate(&p, &t, 0); | 
| 992 | if (err) | 992 | if (err) | 
| 993 | break; | 993 | break; | 
| 994 | if (t == ip6ip6_fb_tnl_dev->priv) { | 994 | if (t == netdev_priv(ip6ip6_fb_tnl_dev)) { | 
| 995 | err = -EPERM; | 995 | err = -EPERM; | 
| 996 | break; | 996 | break; | 
| 997 | } | 997 | } | 
| 998 | } else { | 998 | } else { | 
| 999 | t = (struct ip6_tnl *) dev->priv; | 999 | t = netdev_priv(dev); | 
| 1000 | } | 1000 | } | 
| 1001 | err = unregister_netdevice(t->dev); | 1001 | err = unregister_netdevice(t->dev); | 
| 1002 | break; | 1002 | break; | 
| @@ -1016,7 +1016,7 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 1016 | static struct net_device_stats * | 1016 | static struct net_device_stats * | 
| 1017 | ip6ip6_tnl_get_stats(struct net_device *dev) | 1017 | ip6ip6_tnl_get_stats(struct net_device *dev) | 
| 1018 | { | 1018 | { | 
| 1019 | return &(((struct ip6_tnl *) dev->priv)->stat); | 1019 | return &(((struct ip6_tnl *)netdev_priv(dev))->stat); | 
| 1020 | } | 1020 | } | 
| 1021 | 1021 | ||
| 1022 | /** | 1022 | /** | 
| @@ -1073,7 +1073,7 @@ static void ip6ip6_tnl_dev_setup(struct net_device *dev) | |||
| 1073 | static inline void | 1073 | static inline void | 
| 1074 | ip6ip6_tnl_dev_init_gen(struct net_device *dev) | 1074 | ip6ip6_tnl_dev_init_gen(struct net_device *dev) | 
| 1075 | { | 1075 | { | 
| 1076 | struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; | 1076 | struct ip6_tnl *t = netdev_priv(dev); | 
| 1077 | t->fl.proto = IPPROTO_IPV6; | 1077 | t->fl.proto = IPPROTO_IPV6; | 
| 1078 | t->dev = dev; | 1078 | t->dev = dev; | 
| 1079 | strcpy(t->parms.name, dev->name); | 1079 | strcpy(t->parms.name, dev->name); | 
| @@ -1087,7 +1087,7 @@ ip6ip6_tnl_dev_init_gen(struct net_device *dev) | |||
| 1087 | static int | 1087 | static int | 
| 1088 | ip6ip6_tnl_dev_init(struct net_device *dev) | 1088 | ip6ip6_tnl_dev_init(struct net_device *dev) | 
| 1089 | { | 1089 | { | 
| 1090 | struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; | 1090 | struct ip6_tnl *t = netdev_priv(dev); | 
| 1091 | ip6ip6_tnl_dev_init_gen(dev); | 1091 | ip6ip6_tnl_dev_init_gen(dev); | 
| 1092 | ip6ip6_tnl_link_config(t); | 1092 | ip6ip6_tnl_link_config(t); | 
| 1093 | return 0; | 1093 | return 0; | 
| @@ -1103,7 +1103,7 @@ ip6ip6_tnl_dev_init(struct net_device *dev) | |||
| 1103 | static int | 1103 | static int | 
| 1104 | ip6ip6_fb_tnl_dev_init(struct net_device *dev) | 1104 | ip6ip6_fb_tnl_dev_init(struct net_device *dev) | 
| 1105 | { | 1105 | { | 
| 1106 | struct ip6_tnl *t = dev->priv; | 1106 | struct ip6_tnl *t = netdev_priv(dev); | 
| 1107 | ip6ip6_tnl_dev_init_gen(dev); | 1107 | ip6ip6_tnl_dev_init_gen(dev); | 
| 1108 | dev_hold(dev); | 1108 | dev_hold(dev); | 
| 1109 | tnls_wc[0] = t; | 1109 | tnls_wc[0] = t; | 
| diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 02872ae8a439..0dae48aa1cec 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
| @@ -184,7 +184,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int | |||
| 184 | if (dev == NULL) | 184 | if (dev == NULL) | 
| 185 | return NULL; | 185 | return NULL; | 
| 186 | 186 | ||
| 187 | nt = dev->priv; | 187 | nt = netdev_priv(dev); | 
| 188 | dev->init = ipip6_tunnel_init; | 188 | dev->init = ipip6_tunnel_init; | 
| 189 | nt->parms = *parms; | 189 | nt->parms = *parms; | 
| 190 | 190 | ||
| @@ -210,7 +210,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev) | |||
| 210 | write_unlock_bh(&ipip6_lock); | 210 | write_unlock_bh(&ipip6_lock); | 
| 211 | dev_put(dev); | 211 | dev_put(dev); | 
| 212 | } else { | 212 | } else { | 
| 213 | ipip6_tunnel_unlink((struct ip_tunnel*)dev->priv); | 213 | ipip6_tunnel_unlink(netdev_priv(dev)); | 
| 214 | dev_put(dev); | 214 | dev_put(dev); | 
| 215 | } | 215 | } | 
| 216 | } | 216 | } | 
| @@ -346,7 +346,7 @@ out: | |||
| 346 | rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); | 346 | rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); | 
| 347 | 347 | ||
| 348 | if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { | 348 | if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { | 
| 349 | struct ip_tunnel * t = (struct ip_tunnel*)rt6i->rt6i_dev->priv; | 349 | struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); | 
| 350 | if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) { | 350 | if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) { | 
| 351 | rel_type = ICMPV6_DEST_UNREACH; | 351 | rel_type = ICMPV6_DEST_UNREACH; | 
| 352 | rel_code = ICMPV6_ADDR_UNREACH; | 352 | rel_code = ICMPV6_ADDR_UNREACH; | 
| @@ -424,7 +424,7 @@ static inline u32 try_6to4(struct in6_addr *v6dst) | |||
| 424 | 424 | ||
| 425 | static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 425 | static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 
| 426 | { | 426 | { | 
| 427 | struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; | 427 | struct ip_tunnel *tunnel = netdev_priv(dev); | 
| 428 | struct net_device_stats *stats = &tunnel->stat; | 428 | struct net_device_stats *stats = &tunnel->stat; | 
| 429 | struct iphdr *tiph = &tunnel->parms.iph; | 429 | struct iphdr *tiph = &tunnel->parms.iph; | 
| 430 | struct ipv6hdr *iph6 = skb->nh.ipv6h; | 430 | struct ipv6hdr *iph6 = skb->nh.ipv6h; | 
| @@ -610,7 +610,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 610 | t = ipip6_tunnel_locate(&p, 0); | 610 | t = ipip6_tunnel_locate(&p, 0); | 
| 611 | } | 611 | } | 
| 612 | if (t == NULL) | 612 | if (t == NULL) | 
| 613 | t = (struct ip_tunnel*)dev->priv; | 613 | t = netdev_priv(dev); | 
| 614 | memcpy(&p, &t->parms, sizeof(p)); | 614 | memcpy(&p, &t->parms, sizeof(p)); | 
| 615 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 615 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 
| 616 | err = -EFAULT; | 616 | err = -EFAULT; | 
| @@ -647,7 +647,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 647 | err = -EINVAL; | 647 | err = -EINVAL; | 
| 648 | break; | 648 | break; | 
| 649 | } | 649 | } | 
| 650 | t = (struct ip_tunnel*)dev->priv; | 650 | t = netdev_priv(dev); | 
| 651 | ipip6_tunnel_unlink(t); | 651 | ipip6_tunnel_unlink(t); | 
| 652 | t->parms.iph.saddr = p.iph.saddr; | 652 | t->parms.iph.saddr = p.iph.saddr; | 
| 653 | t->parms.iph.daddr = p.iph.daddr; | 653 | t->parms.iph.daddr = p.iph.daddr; | 
| @@ -683,7 +683,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 683 | if ((t = ipip6_tunnel_locate(&p, 0)) == NULL) | 683 | if ((t = ipip6_tunnel_locate(&p, 0)) == NULL) | 
| 684 | goto done; | 684 | goto done; | 
| 685 | err = -EPERM; | 685 | err = -EPERM; | 
| 686 | if (t == ipip6_fb_tunnel_dev->priv) | 686 | if (t == netdev_priv(ipip6_fb_tunnel_dev)) | 
| 687 | goto done; | 687 | goto done; | 
| 688 | dev = t->dev; | 688 | dev = t->dev; | 
| 689 | } | 689 | } | 
| @@ -700,7 +700,7 @@ done: | |||
| 700 | 700 | ||
| 701 | static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev) | 701 | static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev) | 
| 702 | { | 702 | { | 
| 703 | return &(((struct ip_tunnel*)dev->priv)->stat); | 703 | return &(((struct ip_tunnel*)netdev_priv(dev))->stat); | 
| 704 | } | 704 | } | 
| 705 | 705 | ||
| 706 | static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 706 | static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 
| @@ -735,7 +735,7 @@ static int ipip6_tunnel_init(struct net_device *dev) | |||
| 735 | struct ip_tunnel *tunnel; | 735 | struct ip_tunnel *tunnel; | 
| 736 | struct iphdr *iph; | 736 | struct iphdr *iph; | 
| 737 | 737 | ||
| 738 | tunnel = (struct ip_tunnel*)dev->priv; | 738 | tunnel = netdev_priv(dev); | 
| 739 | iph = &tunnel->parms.iph; | 739 | iph = &tunnel->parms.iph; | 
| 740 | 740 | ||
| 741 | tunnel->dev = dev; | 741 | tunnel->dev = dev; | 
| @@ -775,7 +775,7 @@ static int ipip6_tunnel_init(struct net_device *dev) | |||
| 775 | 775 | ||
| 776 | static int __init ipip6_fb_tunnel_init(struct net_device *dev) | 776 | static int __init ipip6_fb_tunnel_init(struct net_device *dev) | 
| 777 | { | 777 | { | 
| 778 | struct ip_tunnel *tunnel = dev->priv; | 778 | struct ip_tunnel *tunnel = netdev_priv(dev); | 
| 779 | struct iphdr *iph = &tunnel->parms.iph; | 779 | struct iphdr *iph = &tunnel->parms.iph; | 
| 780 | 780 | ||
| 781 | tunnel->dev = dev; | 781 | tunnel->dev = dev; | 
| diff --git a/net/key/af_key.c b/net/key/af_key.c index 52efd04cbedb..4c2f6d694f88 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -297,8 +297,7 @@ static int pfkey_error(struct sadb_msg *orig, int err, struct sock *sk) | |||
| 297 | err = EINTR; | 297 | err = EINTR; | 
| 298 | if (err >= 512) | 298 | if (err >= 512) | 
| 299 | err = EINVAL; | 299 | err = EINVAL; | 
| 300 | if (err <= 0 || err >= 256) | 300 | BUG_ON(err <= 0 || err >= 256); | 
| 301 | BUG(); | ||
| 302 | 301 | ||
| 303 | hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); | 302 | hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); | 
| 304 | pfkey_hdr_dup(hdr, orig); | 303 | pfkey_hdr_dup(hdr, orig); | 
| diff --git a/net/sched/Makefile b/net/sched/Makefile index e48d0d456b3e..0f06aec66094 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile | |||
| @@ -7,13 +7,13 @@ obj-y := sch_generic.o | |||
| 7 | obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o | 7 | obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o | 
| 8 | obj-$(CONFIG_NET_CLS) += cls_api.o | 8 | obj-$(CONFIG_NET_CLS) += cls_api.o | 
| 9 | obj-$(CONFIG_NET_CLS_ACT) += act_api.o | 9 | obj-$(CONFIG_NET_CLS_ACT) += act_api.o | 
| 10 | obj-$(CONFIG_NET_ACT_POLICE) += police.o | 10 | obj-$(CONFIG_NET_ACT_POLICE) += act_police.o | 
| 11 | obj-$(CONFIG_NET_CLS_POLICE) += police.o | 11 | obj-$(CONFIG_NET_CLS_POLICE) += act_police.o | 
| 12 | obj-$(CONFIG_NET_ACT_GACT) += gact.o | 12 | obj-$(CONFIG_NET_ACT_GACT) += act_gact.o | 
| 13 | obj-$(CONFIG_NET_ACT_MIRRED) += mirred.o | 13 | obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o | 
| 14 | obj-$(CONFIG_NET_ACT_IPT) += ipt.o | 14 | obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o | 
| 15 | obj-$(CONFIG_NET_ACT_PEDIT) += pedit.o | 15 | obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o | 
| 16 | obj-$(CONFIG_NET_ACT_SIMP) += simple.o | 16 | obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o | 
| 17 | obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o | 17 | obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o | 
| 18 | obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o | 18 | obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o | 
| 19 | obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o | 19 | obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o | 
| diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 2ce1cb2aa2ed..792ce59940ec 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
| @@ -165,7 +165,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, | |||
| 165 | while ((a = act) != NULL) { | 165 | while ((a = act) != NULL) { | 
| 166 | repeat: | 166 | repeat: | 
| 167 | if (a->ops && a->ops->act) { | 167 | if (a->ops && a->ops->act) { | 
| 168 | ret = a->ops->act(&skb, a, res); | 168 | ret = a->ops->act(skb, a, res); | 
| 169 | if (TC_MUNGED & skb->tc_verd) { | 169 | if (TC_MUNGED & skb->tc_verd) { | 
| 170 | /* copied already, allow trampling */ | 170 | /* copied already, allow trampling */ | 
| 171 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); | 171 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); | 
| @@ -290,7 +290,7 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, | |||
| 290 | if (a_o == NULL) { | 290 | if (a_o == NULL) { | 
| 291 | #ifdef CONFIG_KMOD | 291 | #ifdef CONFIG_KMOD | 
| 292 | rtnl_unlock(); | 292 | rtnl_unlock(); | 
| 293 | request_module(act_name); | 293 | request_module("act_%s", act_name); | 
| 294 | rtnl_lock(); | 294 | rtnl_lock(); | 
| 295 | 295 | ||
| 296 | a_o = tc_lookup_action_n(act_name); | 296 | a_o = tc_lookup_action_n(act_name); | 
| diff --git a/net/sched/gact.c b/net/sched/act_gact.c index d1c6d542912a..a1e68f78dcc2 100644 --- a/net/sched/gact.c +++ b/net/sched/act_gact.c | |||
| @@ -135,10 +135,9 @@ tcf_gact_cleanup(struct tc_action *a, int bind) | |||
| 135 | } | 135 | } | 
| 136 | 136 | ||
| 137 | static int | 137 | static int | 
| 138 | tcf_gact(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) | 138 | tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) | 
| 139 | { | 139 | { | 
| 140 | struct tcf_gact *p = PRIV(a, gact); | 140 | struct tcf_gact *p = PRIV(a, gact); | 
| 141 | struct sk_buff *skb = *pskb; | ||
| 142 | int action = TC_ACT_SHOT; | 141 | int action = TC_ACT_SHOT; | 
| 143 | 142 | ||
| 144 | spin_lock(&p->lock); | 143 | spin_lock(&p->lock); | 
| diff --git a/net/sched/ipt.c b/net/sched/act_ipt.c index f50136eed211..b5001939b74b 100644 --- a/net/sched/ipt.c +++ b/net/sched/act_ipt.c | |||
| @@ -201,11 +201,10 @@ tcf_ipt_cleanup(struct tc_action *a, int bind) | |||
| 201 | } | 201 | } | 
| 202 | 202 | ||
| 203 | static int | 203 | static int | 
| 204 | tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) | 204 | tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) | 
| 205 | { | 205 | { | 
| 206 | int ret = 0, result = 0; | 206 | int ret = 0, result = 0; | 
| 207 | struct tcf_ipt *p = PRIV(a, ipt); | 207 | struct tcf_ipt *p = PRIV(a, ipt); | 
| 208 | struct sk_buff *skb = *pskb; | ||
| 209 | 208 | ||
| 210 | if (skb_cloned(skb)) { | 209 | if (skb_cloned(skb)) { | 
| 211 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 210 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 
| @@ -222,6 +221,9 @@ tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) | |||
| 222 | worry later - danger - this API seems to have changed | 221 | worry later - danger - this API seems to have changed | 
| 223 | from earlier kernels */ | 222 | from earlier kernels */ | 
| 224 | 223 | ||
| 224 | /* iptables targets take a double skb pointer in case the skb | ||
| 225 | * needs to be replaced. We don't own the skb, so this must not | ||
| 226 | * happen. The pskb_expand_head above should make sure of this */ | ||
| 225 | ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, | 227 | ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, | 
| 226 | p->hook, p->t->data, NULL); | 228 | p->hook, p->t->data, NULL); | 
| 227 | switch (ret) { | 229 | switch (ret) { | 
| diff --git a/net/sched/mirred.c b/net/sched/act_mirred.c index 20d06916dc0b..4fcccbd50885 100644 --- a/net/sched/mirred.c +++ b/net/sched/act_mirred.c | |||
| @@ -158,12 +158,11 @@ tcf_mirred_cleanup(struct tc_action *a, int bind) | |||
| 158 | } | 158 | } | 
| 159 | 159 | ||
| 160 | static int | 160 | static int | 
| 161 | tcf_mirred(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) | 161 | tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) | 
| 162 | { | 162 | { | 
| 163 | struct tcf_mirred *p = PRIV(a, mirred); | 163 | struct tcf_mirred *p = PRIV(a, mirred); | 
| 164 | struct net_device *dev; | 164 | struct net_device *dev; | 
| 165 | struct sk_buff *skb2 = NULL; | 165 | struct sk_buff *skb2 = NULL; | 
| 166 | struct sk_buff *skb = *pskb; | ||
| 167 | u32 at = G_TC_AT(skb->tc_verd); | 166 | u32 at = G_TC_AT(skb->tc_verd); | 
| 168 | 167 | ||
| 169 | spin_lock(&p->lock); | 168 | spin_lock(&p->lock); | 
| diff --git a/net/sched/pedit.c b/net/sched/act_pedit.c index 767d24f4610e..1742a68e0122 100644 --- a/net/sched/pedit.c +++ b/net/sched/act_pedit.c | |||
| @@ -130,10 +130,9 @@ tcf_pedit_cleanup(struct tc_action *a, int bind) | |||
| 130 | } | 130 | } | 
| 131 | 131 | ||
| 132 | static int | 132 | static int | 
| 133 | tcf_pedit(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) | 133 | tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) | 
| 134 | { | 134 | { | 
| 135 | struct tcf_pedit *p = PRIV(a, pedit); | 135 | struct tcf_pedit *p = PRIV(a, pedit); | 
| 136 | struct sk_buff *skb = *pskb; | ||
| 137 | int i, munged = 0; | 136 | int i, munged = 0; | 
| 138 | u8 *pptr; | 137 | u8 *pptr; | 
| 139 | 138 | ||
| @@ -246,10 +245,12 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref) | |||
| 246 | t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); | 245 | t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); | 
| 247 | t.expires = jiffies_to_clock_t(p->tm.expires); | 246 | t.expires = jiffies_to_clock_t(p->tm.expires); | 
| 248 | RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); | 247 | RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); | 
| 248 | kfree(opt); | ||
| 249 | return skb->len; | 249 | return skb->len; | 
| 250 | 250 | ||
| 251 | rtattr_failure: | 251 | rtattr_failure: | 
| 252 | skb_trim(skb, b - skb->data); | 252 | skb_trim(skb, b - skb->data); | 
| 253 | kfree(opt); | ||
| 253 | return -1; | 254 | return -1; | 
| 254 | } | 255 | } | 
| 255 | 256 | ||
| diff --git a/net/sched/police.c b/net/sched/act_police.c index eb39fb2f39b6..fa877f8f652c 100644 --- a/net/sched/police.c +++ b/net/sched/act_police.c | |||
| @@ -284,11 +284,10 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind) | |||
| 284 | return 0; | 284 | return 0; | 
| 285 | } | 285 | } | 
| 286 | 286 | ||
| 287 | static int tcf_act_police(struct sk_buff **pskb, struct tc_action *a, | 287 | static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | 
| 288 | struct tcf_result *res) | 288 | struct tcf_result *res) | 
| 289 | { | 289 | { | 
| 290 | psched_time_t now; | 290 | psched_time_t now; | 
| 291 | struct sk_buff *skb = *pskb; | ||
| 292 | struct tcf_police *p = PRIV(a); | 291 | struct tcf_police *p = PRIV(a); | 
| 293 | long toks; | 292 | long toks; | 
| 294 | long ptoks = 0; | 293 | long ptoks = 0; | 
| @@ -408,7 +407,7 @@ police_cleanup_module(void) | |||
| 408 | module_init(police_init_module); | 407 | module_init(police_init_module); | 
| 409 | module_exit(police_cleanup_module); | 408 | module_exit(police_cleanup_module); | 
| 410 | 409 | ||
| 411 | #endif | 410 | #else /* CONFIG_NET_CLS_ACT */ | 
| 412 | 411 | ||
| 413 | struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) | 412 | struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) | 
| 414 | { | 413 | { | 
| @@ -545,6 +544,7 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p) | |||
| 545 | spin_unlock(&p->lock); | 544 | spin_unlock(&p->lock); | 
| 546 | return p->action; | 545 | return p->action; | 
| 547 | } | 546 | } | 
| 547 | EXPORT_SYMBOL(tcf_police); | ||
| 548 | 548 | ||
| 549 | int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) | 549 | int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) | 
| 550 | { | 550 | { | 
| @@ -601,13 +601,4 @@ errout: | |||
| 601 | return -1; | 601 | return -1; | 
| 602 | } | 602 | } | 
| 603 | 603 | ||
| 604 | 604 | #endif /* CONFIG_NET_CLS_ACT */ | |
| 605 | EXPORT_SYMBOL(tcf_police); | ||
| 606 | EXPORT_SYMBOL(tcf_police_destroy); | ||
| 607 | EXPORT_SYMBOL(tcf_police_dump); | ||
| 608 | EXPORT_SYMBOL(tcf_police_dump_stats); | ||
| 609 | EXPORT_SYMBOL(tcf_police_hash); | ||
| 610 | EXPORT_SYMBOL(tcf_police_ht); | ||
| 611 | EXPORT_SYMBOL(tcf_police_locate); | ||
| 612 | EXPORT_SYMBOL(tcf_police_lookup); | ||
| 613 | EXPORT_SYMBOL(tcf_police_new_index); | ||
| diff --git a/net/sched/simple.c b/net/sched/act_simple.c index 8a6ae4f491e8..e5f2e1f431e2 100644 --- a/net/sched/simple.c +++ b/net/sched/act_simple.c | |||
| @@ -44,9 +44,8 @@ static DEFINE_RWLOCK(simp_lock); | |||
| 44 | #include <net/pkt_act.h> | 44 | #include <net/pkt_act.h> | 
| 45 | #include <net/act_generic.h> | 45 | #include <net/act_generic.h> | 
| 46 | 46 | ||
| 47 | static int tcf_simp(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) | 47 | static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) | 
| 48 | { | 48 | { | 
| 49 | struct sk_buff *skb = *pskb; | ||
| 50 | struct tcf_defact *p = PRIV(a, defact); | 49 | struct tcf_defact *p = PRIV(a, defact); | 
| 51 | 50 | ||
| 52 | spin_lock(&p->lock); | 51 | spin_lock(&p->lock); | 
| diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 09453f997d8c..6cd81708bf71 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
| @@ -257,7 +257,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
| 257 | (cl = cbq_class_lookup(q, prio)) != NULL) | 257 | (cl = cbq_class_lookup(q, prio)) != NULL) | 
| 258 | return cl; | 258 | return cl; | 
| 259 | 259 | ||
| 260 | *qerr = NET_XMIT_DROP; | 260 | *qerr = NET_XMIT_BYPASS; | 
| 261 | for (;;) { | 261 | for (;;) { | 
| 262 | int result = 0; | 262 | int result = 0; | 
| 263 | defmap = head->defaults; | 263 | defmap = head->defaults; | 
| @@ -413,7 +413,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 413 | q->rx_class = cl; | 413 | q->rx_class = cl; | 
| 414 | #endif | 414 | #endif | 
| 415 | if (cl == NULL) { | 415 | if (cl == NULL) { | 
| 416 | if (ret == NET_XMIT_DROP) | 416 | if (ret == NET_XMIT_BYPASS) | 
| 417 | sch->qstats.drops++; | 417 | sch->qstats.drops++; | 
| 418 | kfree_skb(skb); | 418 | kfree_skb(skb); | 
| 419 | return ret; | 419 | return ret; | 
| diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index c26764bc4103..91132f6871d7 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
| @@ -208,7 +208,7 @@ struct hfsc_sched | |||
| 208 | do { \ | 208 | do { \ | 
| 209 | struct timeval tv; \ | 209 | struct timeval tv; \ | 
| 210 | do_gettimeofday(&tv); \ | 210 | do_gettimeofday(&tv); \ | 
| 211 | (stamp) = 1000000ULL * tv.tv_sec + tv.tv_usec; \ | 211 | (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \ | 
| 212 | } while (0) | 212 | } while (0) | 
| 213 | #endif | 213 | #endif | 
| 214 | 214 | ||
| @@ -502,8 +502,8 @@ d2dx(u32 d) | |||
| 502 | u64 dx; | 502 | u64 dx; | 
| 503 | 503 | ||
| 504 | dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); | 504 | dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); | 
| 505 | dx += 1000000 - 1; | 505 | dx += USEC_PER_SEC - 1; | 
| 506 | do_div(dx, 1000000); | 506 | do_div(dx, USEC_PER_SEC); | 
| 507 | return dx; | 507 | return dx; | 
| 508 | } | 508 | } | 
| 509 | 509 | ||
| @@ -523,7 +523,7 @@ dx2d(u64 dx) | |||
| 523 | { | 523 | { | 
| 524 | u64 d; | 524 | u64 d; | 
| 525 | 525 | ||
| 526 | d = dx * 1000000; | 526 | d = dx * USEC_PER_SEC; | 
| 527 | do_div(d, PSCHED_JIFFIE2US(HZ)); | 527 | do_div(d, PSCHED_JIFFIE2US(HZ)); | 
| 528 | return (u32)d; | 528 | return (u32)d; | 
| 529 | } | 529 | } | 
| @@ -1227,7 +1227,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
| 1227 | if (cl->level == 0) | 1227 | if (cl->level == 0) | 
| 1228 | return cl; | 1228 | return cl; | 
| 1229 | 1229 | ||
| 1230 | *qerr = NET_XMIT_DROP; | 1230 | *qerr = NET_XMIT_BYPASS; | 
| 1231 | tcf = q->root.filter_list; | 1231 | tcf = q->root.filter_list; | 
| 1232 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { | 1232 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { | 
| 1233 | #ifdef CONFIG_NET_CLS_ACT | 1233 | #ifdef CONFIG_NET_CLS_ACT | 
| @@ -1643,7 +1643,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 1643 | 1643 | ||
| 1644 | cl = hfsc_classify(skb, sch, &err); | 1644 | cl = hfsc_classify(skb, sch, &err); | 
| 1645 | if (cl == NULL) { | 1645 | if (cl == NULL) { | 
| 1646 | if (err == NET_XMIT_DROP) | 1646 | if (err == NET_XMIT_BYPASS) | 
| 1647 | sch->qstats.drops++; | 1647 | sch->qstats.drops++; | 
| 1648 | kfree_skb(skb); | 1648 | kfree_skb(skb); | 
| 1649 | return err; | 1649 | return err; | 
| diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 558cc087e602..3ec95df4a85e 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -321,7 +321,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in | |||
| 321 | if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) | 321 | if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) | 
| 322 | return cl; | 322 | return cl; | 
| 323 | 323 | ||
| 324 | *qerr = NET_XMIT_DROP; | 324 | *qerr = NET_XMIT_BYPASS; | 
| 325 | tcf = q->filter_list; | 325 | tcf = q->filter_list; | 
| 326 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { | 326 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { | 
| 327 | #ifdef CONFIG_NET_CLS_ACT | 327 | #ifdef CONFIG_NET_CLS_ACT | 
| @@ -724,7 +724,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 724 | } | 724 | } | 
| 725 | #ifdef CONFIG_NET_CLS_ACT | 725 | #ifdef CONFIG_NET_CLS_ACT | 
| 726 | } else if (!cl) { | 726 | } else if (!cl) { | 
| 727 | if (ret == NET_XMIT_DROP) | 727 | if (ret == NET_XMIT_BYPASS) | 
| 728 | sch->qstats.drops++; | 728 | sch->qstats.drops++; | 
| 729 | kfree_skb (skb); | 729 | kfree_skb (skb); | 
| 730 | return ret; | 730 | return ret; | 
| diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 3ac0f495bad0..5b3a3e48ed92 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
| @@ -54,7 +54,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
| 54 | u32 band = skb->priority; | 54 | u32 band = skb->priority; | 
| 55 | struct tcf_result res; | 55 | struct tcf_result res; | 
| 56 | 56 | ||
| 57 | *qerr = NET_XMIT_DROP; | 57 | *qerr = NET_XMIT_BYPASS; | 
| 58 | if (TC_H_MAJ(skb->priority) != sch->handle) { | 58 | if (TC_H_MAJ(skb->priority) != sch->handle) { | 
| 59 | #ifdef CONFIG_NET_CLS_ACT | 59 | #ifdef CONFIG_NET_CLS_ACT | 
| 60 | switch (tc_classify(skb, q->filter_list, &res)) { | 60 | switch (tc_classify(skb, q->filter_list, &res)) { | 
| @@ -91,7 +91,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 91 | qdisc = prio_classify(skb, sch, &ret); | 91 | qdisc = prio_classify(skb, sch, &ret); | 
| 92 | #ifdef CONFIG_NET_CLS_ACT | 92 | #ifdef CONFIG_NET_CLS_ACT | 
| 93 | if (qdisc == NULL) { | 93 | if (qdisc == NULL) { | 
| 94 | if (ret == NET_XMIT_DROP) | 94 | |
| 95 | if (ret == NET_XMIT_BYPASS) | ||
| 95 | sch->qstats.drops++; | 96 | sch->qstats.drops++; | 
| 96 | kfree_skb(skb); | 97 | kfree_skb(skb); | 
| 97 | return ret; | 98 | return ret; | 
| @@ -118,7 +119,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
| 118 | qdisc = prio_classify(skb, sch, &ret); | 119 | qdisc = prio_classify(skb, sch, &ret); | 
| 119 | #ifdef CONFIG_NET_CLS_ACT | 120 | #ifdef CONFIG_NET_CLS_ACT | 
| 120 | if (qdisc == NULL) { | 121 | if (qdisc == NULL) { | 
| 121 | if (ret == NET_XMIT_DROP) | 122 | if (ret == NET_XMIT_BYPASS) | 
| 122 | sch->qstats.drops++; | 123 | sch->qstats.drops++; | 
| 123 | kfree_skb(skb); | 124 | kfree_skb(skb); | 
| 124 | return ret; | 125 | return ret; | 
| diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index c4a2a8c4c339..79b8ef34c6e4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
| @@ -274,7 +274,7 @@ teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *de | |||
| 274 | 274 | ||
| 275 | static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 275 | static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 
| 276 | { | 276 | { | 
| 277 | struct teql_master *master = (void*)dev->priv; | 277 | struct teql_master *master = netdev_priv(dev); | 
| 278 | struct Qdisc *start, *q; | 278 | struct Qdisc *start, *q; | 
| 279 | int busy; | 279 | int busy; | 
| 280 | int nores; | 280 | int nores; | 
| @@ -350,7 +350,7 @@ drop: | |||
| 350 | static int teql_master_open(struct net_device *dev) | 350 | static int teql_master_open(struct net_device *dev) | 
| 351 | { | 351 | { | 
| 352 | struct Qdisc * q; | 352 | struct Qdisc * q; | 
| 353 | struct teql_master *m = (void*)dev->priv; | 353 | struct teql_master *m = netdev_priv(dev); | 
| 354 | int mtu = 0xFFFE; | 354 | int mtu = 0xFFFE; | 
| 355 | unsigned flags = IFF_NOARP|IFF_MULTICAST; | 355 | unsigned flags = IFF_NOARP|IFF_MULTICAST; | 
| 356 | 356 | ||
| @@ -397,13 +397,13 @@ static int teql_master_close(struct net_device *dev) | |||
| 397 | 397 | ||
| 398 | static struct net_device_stats *teql_master_stats(struct net_device *dev) | 398 | static struct net_device_stats *teql_master_stats(struct net_device *dev) | 
| 399 | { | 399 | { | 
| 400 | struct teql_master *m = (void*)dev->priv; | 400 | struct teql_master *m = netdev_priv(dev); | 
| 401 | return &m->stats; | 401 | return &m->stats; | 
| 402 | } | 402 | } | 
| 403 | 403 | ||
| 404 | static int teql_master_mtu(struct net_device *dev, int new_mtu) | 404 | static int teql_master_mtu(struct net_device *dev, int new_mtu) | 
| 405 | { | 405 | { | 
| 406 | struct teql_master *m = (void*)dev->priv; | 406 | struct teql_master *m = netdev_priv(dev); | 
| 407 | struct Qdisc *q; | 407 | struct Qdisc *q; | 
| 408 | 408 | ||
| 409 | if (new_mtu < 68) | 409 | if (new_mtu < 68) | 
| @@ -423,7 +423,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) | |||
| 423 | 423 | ||
| 424 | static __init void teql_master_setup(struct net_device *dev) | 424 | static __init void teql_master_setup(struct net_device *dev) | 
| 425 | { | 425 | { | 
| 426 | struct teql_master *master = dev->priv; | 426 | struct teql_master *master = netdev_priv(dev); | 
| 427 | struct Qdisc_ops *ops = &master->qops; | 427 | struct Qdisc_ops *ops = &master->qops; | 
| 428 | 428 | ||
| 429 | master->dev = dev; | 429 | master->dev = dev; | 
| @@ -476,7 +476,7 @@ static int __init teql_init(void) | |||
| 476 | break; | 476 | break; | 
| 477 | } | 477 | } | 
| 478 | 478 | ||
| 479 | master = dev->priv; | 479 | master = netdev_priv(dev); | 
| 480 | 480 | ||
| 481 | strlcpy(master->qops.id, dev->name, IFNAMSIZ); | 481 | strlcpy(master->qops.id, dev->name, IFNAMSIZ); | 
| 482 | err = register_qdisc(&master->qops); | 482 | err = register_qdisc(&master->qops); | 
| diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 2d7d8a5db2ac..b8b38aba92b3 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
| @@ -1250,8 +1250,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
| 1250 | case SCTP_CMD_TIMER_START: | 1250 | case SCTP_CMD_TIMER_START: | 
| 1251 | timer = &asoc->timers[cmd->obj.to]; | 1251 | timer = &asoc->timers[cmd->obj.to]; | 
| 1252 | timeout = asoc->timeouts[cmd->obj.to]; | 1252 | timeout = asoc->timeouts[cmd->obj.to]; | 
| 1253 | if (!timeout) | 1253 | BUG_ON(!timeout); | 
| 1254 | BUG(); | ||
| 1255 | 1254 | ||
| 1256 | timer->expires = jiffies + timeout; | 1255 | timer->expires = jiffies + timeout; | 
| 1257 | sctp_association_hold(asoc); | 1256 | sctp_association_hold(asoc); | 
| diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index f509e9992767..dcaa0c4453ff 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
| @@ -575,12 +575,11 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) | |||
| 575 | if (rp->q.list.next == &cd->queue) { | 575 | if (rp->q.list.next == &cd->queue) { | 
| 576 | spin_unlock(&queue_lock); | 576 | spin_unlock(&queue_lock); | 
| 577 | up(&queue_io_sem); | 577 | up(&queue_io_sem); | 
| 578 | if (rp->offset) | 578 | BUG_ON(rp->offset); | 
| 579 | BUG(); | ||
| 580 | return 0; | 579 | return 0; | 
| 581 | } | 580 | } | 
| 582 | rq = container_of(rp->q.list.next, struct cache_request, q.list); | 581 | rq = container_of(rp->q.list.next, struct cache_request, q.list); | 
| 583 | if (rq->q.reader) BUG(); | 582 | BUG_ON(rq->q.reader); | 
| 584 | if (rp->offset == 0) | 583 | if (rp->offset == 0) | 
| 585 | rq->readers++; | 584 | rq->readers++; | 
| 586 | spin_unlock(&queue_lock); | 585 | spin_unlock(&queue_lock); | 
| diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index e4296c8b861e..b19cc26fa9c2 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
| @@ -122,8 +122,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) | |||
| 122 | rqstp->rq_argused = 0; | 122 | rqstp->rq_argused = 0; | 
| 123 | rqstp->rq_resused = 0; | 123 | rqstp->rq_resused = 0; | 
| 124 | arghi = 0; | 124 | arghi = 0; | 
| 125 | if (pages > RPCSVC_MAXPAGES) | 125 | BUG_ON(pages > RPCSVC_MAXPAGES); | 
| 126 | BUG(); | ||
| 127 | while (pages) { | 126 | while (pages) { | 
| 128 | struct page *p = alloc_page(GFP_KERNEL); | 127 | struct page *p = alloc_page(GFP_KERNEL); | 
| 129 | if (!p) | 128 | if (!p) | 
| diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 2f4531fcaca2..6ed3302312fb 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
| @@ -540,8 +540,7 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, | |||
| 540 | start = end; | 540 | start = end; | 
| 541 | } | 541 | } | 
| 542 | } | 542 | } | 
| 543 | if (len) | 543 | BUG_ON(len); | 
| 544 | BUG(); | ||
| 545 | } | 544 | } | 
| 546 | EXPORT_SYMBOL_GPL(skb_icv_walk); | 545 | EXPORT_SYMBOL_GPL(skb_icv_walk); | 
| 547 | 546 | ||
| @@ -610,8 +609,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
| 610 | start = end; | 609 | start = end; | 
| 611 | } | 610 | } | 
| 612 | } | 611 | } | 
| 613 | if (len) | 612 | BUG_ON(len); | 
| 614 | BUG(); | ||
| 615 | return elt; | 613 | return elt; | 
| 616 | } | 614 | } | 
| 617 | EXPORT_SYMBOL_GPL(skb_to_sgvec); | 615 | EXPORT_SYMBOL_GPL(skb_to_sgvec); | 
| diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 59614a994b4e..077bbf9fb9b7 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -248,11 +248,9 @@ EXPORT_SYMBOL(xfrm_policy_alloc); | |||
| 248 | 248 | ||
| 249 | void __xfrm_policy_destroy(struct xfrm_policy *policy) | 249 | void __xfrm_policy_destroy(struct xfrm_policy *policy) | 
| 250 | { | 250 | { | 
| 251 | if (!policy->dead) | 251 | BUG_ON(!policy->dead); | 
| 252 | BUG(); | ||
| 253 | 252 | ||
| 254 | if (policy->bundles) | 253 | BUG_ON(policy->bundles); | 
| 255 | BUG(); | ||
| 256 | 254 | ||
| 257 | if (del_timer(&policy->timer)) | 255 | if (del_timer(&policy->timer)) | 
| 258 | BUG(); | 256 | BUG(); | 
