diff options
author | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:11:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:11:50 -0400 |
commit | 48467641bcc057f7cba3b6cbbe66cb834d64cc81 (patch) | |
tree | f7c5c5e964c220de30fcdcd06b0f1efdb3e22439 | |
parent | 3863e72414fa2ebf5f3b615d1bf99de32e59980a (diff) | |
parent | d70063c4634af060a5387337b7632f6334ca3458 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
43 files changed, 405 insertions, 283 deletions
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt index a2d5b4900772..74dffc68ff9f 100644 --- a/Documentation/crypto/api-intro.txt +++ b/Documentation/crypto/api-intro.txt | |||
@@ -223,6 +223,7 @@ CAST5 algorithm contributors: | |||
223 | 223 | ||
224 | TEA/XTEA algorithm contributors: | 224 | TEA/XTEA algorithm contributors: |
225 | Aaron Grothe | 225 | Aaron Grothe |
226 | Michael Ringe | ||
226 | 227 | ||
227 | Khazad algorithm contributors: | 228 | Khazad algorithm contributors: |
228 | Aaron Grothe | 229 | Aaron Grothe |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 256c0b1fed10..89299f4ffe12 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -219,7 +219,7 @@ config CRYPTO_CAST6 | |||
219 | described in RFC2612. | 219 | described in RFC2612. |
220 | 220 | ||
221 | config CRYPTO_TEA | 221 | config CRYPTO_TEA |
222 | tristate "TEA and XTEA cipher algorithms" | 222 | tristate "TEA, XTEA and XETA cipher algorithms" |
223 | depends on CRYPTO | 223 | depends on CRYPTO |
224 | help | 224 | help |
225 | TEA cipher algorithm. | 225 | TEA cipher algorithm. |
@@ -232,6 +232,9 @@ config CRYPTO_TEA | |||
232 | the TEA algorithm to address a potential key weakness | 232 | the TEA algorithm to address a potential key weakness |
233 | in the TEA algorithm. | 233 | in the TEA algorithm. |
234 | 234 | ||
235 | Xtendend Encryption Tiny Algorithm is a mis-implementation | ||
236 | of the XTEA algorithm for compatibility purposes. | ||
237 | |||
235 | config CRYPTO_ARC4 | 238 | config CRYPTO_ARC4 |
236 | tristate "ARC4 cipher algorithm" | 239 | tristate "ARC4 cipher algorithm" |
237 | depends on CRYPTO | 240 | depends on CRYPTO |
diff --git a/crypto/api.c b/crypto/api.c index b4728811ce3b..959c4e5f264f 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
@@ -66,7 +66,8 @@ static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name) | |||
66 | 66 | ||
67 | static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags) | 67 | static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags) |
68 | { | 68 | { |
69 | tfm->crt_flags = 0; | 69 | tfm->crt_flags = flags & CRYPTO_TFM_REQ_MASK; |
70 | flags &= ~CRYPTO_TFM_REQ_MASK; | ||
70 | 71 | ||
71 | switch (crypto_tfm_alg_type(tfm)) { | 72 | switch (crypto_tfm_alg_type(tfm)) { |
72 | case CRYPTO_ALG_TYPE_CIPHER: | 73 | case CRYPTO_ALG_TYPE_CIPHER: |
diff --git a/crypto/cipher.c b/crypto/cipher.c index 8da644364cb4..3df47f93c9db 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c | |||
@@ -377,11 +377,7 @@ static int nocrypt_iv(struct crypto_tfm *tfm, | |||
377 | int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags) | 377 | int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags) |
378 | { | 378 | { |
379 | u32 mode = flags & CRYPTO_TFM_MODE_MASK; | 379 | u32 mode = flags & CRYPTO_TFM_MODE_MASK; |
380 | |||
381 | tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB; | 380 | tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB; |
382 | if (flags & CRYPTO_TFM_REQ_WEAK_KEY) | ||
383 | tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY; | ||
384 | |||
385 | return 0; | 381 | return 0; |
386 | } | 382 | } |
387 | 383 | ||
diff --git a/crypto/internal.h b/crypto/internal.h index 37515beafc8c..37aa652ce5ce 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/slab.h> | ||
20 | #include <asm/kmap_types.h> | 21 | #include <asm/kmap_types.h> |
21 | 22 | ||
22 | extern enum km_type crypto_km_types[]; | 23 | extern enum km_type crypto_km_types[]; |
@@ -38,7 +39,7 @@ static inline void crypto_kunmap(void *vaddr, int out) | |||
38 | 39 | ||
39 | static inline void crypto_yield(struct crypto_tfm *tfm) | 40 | static inline void crypto_yield(struct crypto_tfm *tfm) |
40 | { | 41 | { |
41 | if (!in_atomic()) | 42 | if (tfm->crt_flags & CRYPTO_TFM_REQ_MAY_SLEEP) |
42 | cond_resched(); | 43 | cond_resched(); |
43 | } | 44 | } |
44 | 45 | ||
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index bd7524cfff33..68639419c5bd 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -72,7 +72,7 @@ static char *check[] = { | |||
72 | "des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish", | 72 | "des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish", |
73 | "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", | 73 | "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", |
74 | "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", | 74 | "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", |
75 | "khazad", "wp512", "wp384", "wp256", "tnepres", NULL | 75 | "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", NULL |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static void hexdump(unsigned char *buf, unsigned int len) | 78 | static void hexdump(unsigned char *buf, unsigned int len) |
@@ -859,6 +859,10 @@ static void do_test(void) | |||
859 | test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); | 859 | test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); |
860 | test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); | 860 | test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); |
861 | 861 | ||
862 | //XETA | ||
863 | test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); | ||
864 | test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); | ||
865 | |||
862 | test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); | 866 | test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); |
863 | test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); | 867 | test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); |
864 | test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS); | 868 | test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS); |
@@ -1016,6 +1020,11 @@ static void do_test(void) | |||
1016 | case 29: | 1020 | case 29: |
1017 | test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); | 1021 | test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); |
1018 | break; | 1022 | break; |
1023 | |||
1024 | case 30: | ||
1025 | test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); | ||
1026 | test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); | ||
1027 | break; | ||
1019 | 1028 | ||
1020 | #ifdef CONFIG_CRYPTO_HMAC | 1029 | #ifdef CONFIG_CRYPTO_HMAC |
1021 | case 100: | 1030 | case 100: |
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index c01a0ce9b40a..522ffd4b6f43 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h | |||
@@ -2211,7 +2211,7 @@ static struct cipher_testvec xtea_enc_tv_template[] = { | |||
2211 | .klen = 16, | 2211 | .klen = 16, |
2212 | .input = { [0 ... 8] = 0x00 }, | 2212 | .input = { [0 ... 8] = 0x00 }, |
2213 | .ilen = 8, | 2213 | .ilen = 8, |
2214 | .result = { 0xaa, 0x22, 0x96, 0xe5, 0x6c, 0x61, 0xf3, 0x45 }, | 2214 | .result = { 0xd8, 0xd4, 0xe9, 0xde, 0xd9, 0x1e, 0x13, 0xf7 }, |
2215 | .rlen = 8, | 2215 | .rlen = 8, |
2216 | }, { | 2216 | }, { |
2217 | .key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76, | 2217 | .key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76, |
@@ -2219,31 +2219,31 @@ static struct cipher_testvec xtea_enc_tv_template[] = { | |||
2219 | .klen = 16, | 2219 | .klen = 16, |
2220 | .input = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e }, | 2220 | .input = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e }, |
2221 | .ilen = 8, | 2221 | .ilen = 8, |
2222 | .result = { 0x82, 0x3e, 0xeb, 0x35, 0xdc, 0xdd, 0xd9, 0xc3 }, | 2222 | .result = { 0x94, 0xeb, 0xc8, 0x96, 0x84, 0x6a, 0x49, 0xa8 }, |
2223 | .rlen = 8, | 2223 | .rlen = 8, |
2224 | }, { | 2224 | }, { |
2225 | .key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25, | 2225 | .key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25, |
2226 | 0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e }, | 2226 | 0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e }, |
2227 | .klen = 16, | 2227 | .klen = 16, |
2228 | .input = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74, | 2228 | .input = { 0x3e, 0xce, 0xae, 0x22, 0x60, 0x56, 0xa8, 0x9d, |
2229 | 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 }, | 2229 | 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 }, |
2230 | .ilen = 16, | 2230 | .ilen = 16, |
2231 | .result = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea, | 2231 | .result = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea, |
2232 | 0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c }, | 2232 | 0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c }, |
2233 | .rlen = 16, | 2233 | .rlen = 16, |
2234 | }, { | 2234 | }, { |
2235 | .key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c, | 2235 | .key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c, |
2236 | 0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f }, | 2236 | 0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f }, |
2237 | .klen = 16, | 2237 | .klen = 16, |
2238 | .input = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67, | 2238 | .input = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67, |
2239 | 0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, | 2239 | 0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, |
2240 | 0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72, | 2240 | 0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72, |
2241 | 0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 }, | 2241 | 0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 }, |
2242 | .ilen = 32, | 2242 | .ilen = 32, |
2243 | .result = { 0x0b, 0x03, 0xcd, 0x8a, 0xbe, 0x95, 0xfd, 0xb1, | 2243 | .result = { 0x99, 0x81, 0x9f, 0x5d, 0x6f, 0x4b, 0x31, 0x3a, |
2244 | 0xc1, 0x44, 0x91, 0x0b, 0xa5, 0xc9, 0x1b, 0xb4, | 2244 | 0x86, 0xff, 0x6f, 0xd0, 0xe3, 0x87, 0x70, 0x07, |
2245 | 0xa9, 0xda, 0x1e, 0x9e, 0xb1, 0x3e, 0x2a, 0x8f, | 2245 | 0x4d, 0xb8, 0xcf, 0xf3, 0x99, 0x50, 0xb3, 0xd4, |
2246 | 0xea, 0xa5, 0x6a, 0x85, 0xd1, 0xf4, 0xa8, 0xa5 }, | 2246 | 0x73, 0xa2, 0xfa, 0xc9, 0x16, 0x59, 0x5d, 0x81 }, |
2247 | .rlen = 32, | 2247 | .rlen = 32, |
2248 | } | 2248 | } |
2249 | }; | 2249 | }; |
@@ -2252,7 +2252,7 @@ static struct cipher_testvec xtea_dec_tv_template[] = { | |||
2252 | { | 2252 | { |
2253 | .key = { [0 ... 15] = 0x00 }, | 2253 | .key = { [0 ... 15] = 0x00 }, |
2254 | .klen = 16, | 2254 | .klen = 16, |
2255 | .input = { 0xaa, 0x22, 0x96, 0xe5, 0x6c, 0x61, 0xf3, 0x45 }, | 2255 | .input = { 0xd8, 0xd4, 0xe9, 0xde, 0xd9, 0x1e, 0x13, 0xf7 }, |
2256 | .ilen = 8, | 2256 | .ilen = 8, |
2257 | .result = { [0 ... 8] = 0x00 }, | 2257 | .result = { [0 ... 8] = 0x00 }, |
2258 | .rlen = 8, | 2258 | .rlen = 8, |
@@ -2260,7 +2260,7 @@ static struct cipher_testvec xtea_dec_tv_template[] = { | |||
2260 | .key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76, | 2260 | .key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76, |
2261 | 0x77, 0x5d, 0x0e, 0x26, 0x6c, 0x28, 0x78, 0x43 }, | 2261 | 0x77, 0x5d, 0x0e, 0x26, 0x6c, 0x28, 0x78, 0x43 }, |
2262 | .klen = 16, | 2262 | .klen = 16, |
2263 | .input = { 0x82, 0x3e, 0xeb, 0x35, 0xdc, 0xdd, 0xd9, 0xc3 }, | 2263 | .input = { 0x94, 0xeb, 0xc8, 0x96, 0x84, 0x6a, 0x49, 0xa8 }, |
2264 | .ilen = 8, | 2264 | .ilen = 8, |
2265 | .result = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e }, | 2265 | .result = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e }, |
2266 | .rlen = 8, | 2266 | .rlen = 8, |
@@ -2268,24 +2268,24 @@ static struct cipher_testvec xtea_dec_tv_template[] = { | |||
2268 | .key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25, | 2268 | .key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25, |
2269 | 0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e }, | 2269 | 0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e }, |
2270 | .klen = 16, | 2270 | .klen = 16, |
2271 | .input = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea, | 2271 | .input = { 0x3e, 0xce, 0xae, 0x22, 0x60, 0x56, 0xa8, 0x9d, |
2272 | 0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c }, | 2272 | 0x77, 0x4d, 0xd4, 0xb4, 0x87, 0x24, 0xe3, 0x9a }, |
2273 | .ilen = 16, | 2273 | .ilen = 16, |
2274 | .result = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74, | 2274 | .result = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74, |
2275 | 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 }, | 2275 | 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 }, |
2276 | .rlen = 16, | 2276 | .rlen = 16, |
2277 | }, { | 2277 | }, { |
2278 | .key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c, | 2278 | .key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c, |
2279 | 0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f }, | 2279 | 0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f }, |
2280 | .klen = 16, | 2280 | .klen = 16, |
2281 | .input = { 0x0b, 0x03, 0xcd, 0x8a, 0xbe, 0x95, 0xfd, 0xb1, | 2281 | .input = { 0x99, 0x81, 0x9f, 0x5d, 0x6f, 0x4b, 0x31, 0x3a, |
2282 | 0xc1, 0x44, 0x91, 0x0b, 0xa5, 0xc9, 0x1b, 0xb4, | 2282 | 0x86, 0xff, 0x6f, 0xd0, 0xe3, 0x87, 0x70, 0x07, |
2283 | 0xa9, 0xda, 0x1e, 0x9e, 0xb1, 0x3e, 0x2a, 0x8f, | 2283 | 0x4d, 0xb8, 0xcf, 0xf3, 0x99, 0x50, 0xb3, 0xd4, |
2284 | 0xea, 0xa5, 0x6a, 0x85, 0xd1, 0xf4, 0xa8, 0xa5 }, | 2284 | 0x73, 0xa2, 0xfa, 0xc9, 0x16, 0x59, 0x5d, 0x81 }, |
2285 | .ilen = 32, | 2285 | .ilen = 32, |
2286 | .result = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67, | 2286 | .result = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67, |
2287 | 0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, | 2287 | 0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, |
2288 | 0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72, | 2288 | 0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72, |
2289 | 0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 }, | 2289 | 0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 }, |
2290 | .rlen = 32, | 2290 | .rlen = 32, |
2291 | } | 2291 | } |
@@ -2594,6 +2594,98 @@ static struct cipher_testvec anubis_cbc_dec_tv_template[] = { | |||
2594 | }, | 2594 | }, |
2595 | }; | 2595 | }; |
2596 | 2596 | ||
2597 | /* | ||
2598 | * XETA test vectors | ||
2599 | */ | ||
2600 | #define XETA_ENC_TEST_VECTORS 4 | ||
2601 | #define XETA_DEC_TEST_VECTORS 4 | ||
2602 | |||
2603 | static struct cipher_testvec xeta_enc_tv_template[] = { | ||
2604 | { | ||
2605 | .key = { [0 ... 15] = 0x00 }, | ||
2606 | .klen = 16, | ||
2607 | .input = { [0 ... 8] = 0x00 }, | ||
2608 | .ilen = 8, | ||
2609 | .result = { 0xaa, 0x22, 0x96, 0xe5, 0x6c, 0x61, 0xf3, 0x45 }, | ||
2610 | .rlen = 8, | ||
2611 | }, { | ||
2612 | .key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76, | ||
2613 | 0x77, 0x5d, 0x0e, 0x26, 0x6c, 0x28, 0x78, 0x43 }, | ||
2614 | .klen = 16, | ||
2615 | .input = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e }, | ||
2616 | .ilen = 8, | ||
2617 | .result = { 0x82, 0x3e, 0xeb, 0x35, 0xdc, 0xdd, 0xd9, 0xc3 }, | ||
2618 | .rlen = 8, | ||
2619 | }, { | ||
2620 | .key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25, | ||
2621 | 0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e }, | ||
2622 | .klen = 16, | ||
2623 | .input = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74, | ||
2624 | 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 }, | ||
2625 | .ilen = 16, | ||
2626 | .result = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea, | ||
2627 | 0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c }, | ||
2628 | .rlen = 16, | ||
2629 | }, { | ||
2630 | .key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c, | ||
2631 | 0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f }, | ||
2632 | .klen = 16, | ||
2633 | .input = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67, | ||
2634 | 0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, | ||
2635 | 0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72, | ||
2636 | 0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 }, | ||
2637 | .ilen = 32, | ||
2638 | .result = { 0x0b, 0x03, 0xcd, 0x8a, 0xbe, 0x95, 0xfd, 0xb1, | ||
2639 | 0xc1, 0x44, 0x91, 0x0b, 0xa5, 0xc9, 0x1b, 0xb4, | ||
2640 | 0xa9, 0xda, 0x1e, 0x9e, 0xb1, 0x3e, 0x2a, 0x8f, | ||
2641 | 0xea, 0xa5, 0x6a, 0x85, 0xd1, 0xf4, 0xa8, 0xa5 }, | ||
2642 | .rlen = 32, | ||
2643 | } | ||
2644 | }; | ||
2645 | |||
2646 | static struct cipher_testvec xeta_dec_tv_template[] = { | ||
2647 | { | ||
2648 | .key = { [0 ... 15] = 0x00 }, | ||
2649 | .klen = 16, | ||
2650 | .input = { 0xaa, 0x22, 0x96, 0xe5, 0x6c, 0x61, 0xf3, 0x45 }, | ||
2651 | .ilen = 8, | ||
2652 | .result = { [0 ... 8] = 0x00 }, | ||
2653 | .rlen = 8, | ||
2654 | }, { | ||
2655 | .key = { 0x2b, 0x02, 0x05, 0x68, 0x06, 0x14, 0x49, 0x76, | ||
2656 | 0x77, 0x5d, 0x0e, 0x26, 0x6c, 0x28, 0x78, 0x43 }, | ||
2657 | .klen = 16, | ||
2658 | .input = { 0x82, 0x3e, 0xeb, 0x35, 0xdc, 0xdd, 0xd9, 0xc3 }, | ||
2659 | .ilen = 8, | ||
2660 | .result = { 0x74, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x2e }, | ||
2661 | .rlen = 8, | ||
2662 | }, { | ||
2663 | .key = { 0x09, 0x65, 0x43, 0x11, 0x66, 0x44, 0x39, 0x25, | ||
2664 | 0x51, 0x3a, 0x16, 0x10, 0x0a, 0x08, 0x12, 0x6e }, | ||
2665 | .klen = 16, | ||
2666 | .input = { 0xe2, 0x04, 0xdb, 0xf2, 0x89, 0x85, 0x9e, 0xea, | ||
2667 | 0x61, 0x35, 0xaa, 0xed, 0xb5, 0xcb, 0x71, 0x2c }, | ||
2668 | .ilen = 16, | ||
2669 | .result = { 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x74, | ||
2670 | 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x63, 0x74 }, | ||
2671 | .rlen = 16, | ||
2672 | }, { | ||
2673 | .key = { 0x4d, 0x76, 0x32, 0x17, 0x05, 0x3f, 0x75, 0x2c, | ||
2674 | 0x5d, 0x04, 0x16, 0x36, 0x15, 0x72, 0x63, 0x2f }, | ||
2675 | .klen = 16, | ||
2676 | .input = { 0x0b, 0x03, 0xcd, 0x8a, 0xbe, 0x95, 0xfd, 0xb1, | ||
2677 | 0xc1, 0x44, 0x91, 0x0b, 0xa5, 0xc9, 0x1b, 0xb4, | ||
2678 | 0xa9, 0xda, 0x1e, 0x9e, 0xb1, 0x3e, 0x2a, 0x8f, | ||
2679 | 0xea, 0xa5, 0x6a, 0x85, 0xd1, 0xf4, 0xa8, 0xa5 }, | ||
2680 | .ilen = 32, | ||
2681 | .result = { 0x54, 0x65, 0x61, 0x20, 0x69, 0x73, 0x20, 0x67, | ||
2682 | 0x6f, 0x6f, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, | ||
2683 | 0x79, 0x6f, 0x75, 0x21, 0x21, 0x21, 0x20, 0x72, | ||
2684 | 0x65, 0x61, 0x6c, 0x6c, 0x79, 0x21, 0x21, 0x21 }, | ||
2685 | .rlen = 32, | ||
2686 | } | ||
2687 | }; | ||
2688 | |||
2597 | /* | 2689 | /* |
2598 | * Compression stuff. | 2690 | * Compression stuff. |
2599 | */ | 2691 | */ |
diff --git a/crypto/tea.c b/crypto/tea.c index 03c23cbd3afa..5924efdd3a16 100644 --- a/crypto/tea.c +++ b/crypto/tea.c | |||
@@ -1,11 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * TEA and Xtended TEA Algorithms | 4 | * TEA, XTEA, and XETA crypto alogrithms |
5 | * | 5 | * |
6 | * The TEA and Xtended TEA algorithms were developed by David Wheeler | 6 | * The TEA and Xtended TEA algorithms were developed by David Wheeler |
7 | * and Roger Needham at the Computer Laboratory of Cambridge University. | 7 | * and Roger Needham at the Computer Laboratory of Cambridge University. |
8 | * | 8 | * |
9 | * Due to the order of evaluation in XTEA many people have incorrectly | ||
10 | * implemented it. XETA (XTEA in the wrong order), exists for | ||
11 | * compatibility with these implementations. | ||
12 | * | ||
9 | * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com | 13 | * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com |
10 | * | 14 | * |
11 | * This program is free software; you can redistribute it and/or modify | 15 | * This program is free software; you can redistribute it and/or modify |
@@ -153,9 +157,9 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
153 | z = u32_in (src + 4); | 157 | z = u32_in (src + 4); |
154 | 158 | ||
155 | while (sum != limit) { | 159 | while (sum != limit) { |
156 | y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; | 160 | y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); |
157 | sum += XTEA_DELTA; | 161 | sum += XTEA_DELTA; |
158 | z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; | 162 | z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); |
159 | } | 163 | } |
160 | 164 | ||
161 | u32_out (dst, y); | 165 | u32_out (dst, y); |
@@ -175,6 +179,51 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | |||
175 | sum = XTEA_DELTA * XTEA_ROUNDS; | 179 | sum = XTEA_DELTA * XTEA_ROUNDS; |
176 | 180 | ||
177 | while (sum) { | 181 | while (sum) { |
182 | z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]); | ||
183 | sum -= XTEA_DELTA; | ||
184 | y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); | ||
185 | } | ||
186 | |||
187 | u32_out (dst, y); | ||
188 | u32_out (dst + 4, z); | ||
189 | |||
190 | } | ||
191 | |||
192 | |||
193 | static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) | ||
194 | { | ||
195 | |||
196 | u32 y, z, sum = 0; | ||
197 | u32 limit = XTEA_DELTA * XTEA_ROUNDS; | ||
198 | |||
199 | struct xtea_ctx *ctx = ctx_arg; | ||
200 | |||
201 | y = u32_in (src); | ||
202 | z = u32_in (src + 4); | ||
203 | |||
204 | while (sum != limit) { | ||
205 | y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; | ||
206 | sum += XTEA_DELTA; | ||
207 | z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; | ||
208 | } | ||
209 | |||
210 | u32_out (dst, y); | ||
211 | u32_out (dst + 4, z); | ||
212 | |||
213 | } | ||
214 | |||
215 | static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) | ||
216 | { | ||
217 | |||
218 | u32 y, z, sum; | ||
219 | struct tea_ctx *ctx = ctx_arg; | ||
220 | |||
221 | y = u32_in (src); | ||
222 | z = u32_in (src + 4); | ||
223 | |||
224 | sum = XTEA_DELTA * XTEA_ROUNDS; | ||
225 | |||
226 | while (sum) { | ||
178 | z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3]; | 227 | z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3]; |
179 | sum -= XTEA_DELTA; | 228 | sum -= XTEA_DELTA; |
180 | y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; | 229 | y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; |
@@ -215,6 +264,21 @@ static struct crypto_alg xtea_alg = { | |||
215 | .cia_decrypt = xtea_decrypt } } | 264 | .cia_decrypt = xtea_decrypt } } |
216 | }; | 265 | }; |
217 | 266 | ||
267 | static struct crypto_alg xeta_alg = { | ||
268 | .cra_name = "xeta", | ||
269 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
270 | .cra_blocksize = XTEA_BLOCK_SIZE, | ||
271 | .cra_ctxsize = sizeof (struct xtea_ctx), | ||
272 | .cra_module = THIS_MODULE, | ||
273 | .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), | ||
274 | .cra_u = { .cipher = { | ||
275 | .cia_min_keysize = XTEA_KEY_SIZE, | ||
276 | .cia_max_keysize = XTEA_KEY_SIZE, | ||
277 | .cia_setkey = xtea_setkey, | ||
278 | .cia_encrypt = xeta_encrypt, | ||
279 | .cia_decrypt = xeta_decrypt } } | ||
280 | }; | ||
281 | |||
218 | static int __init init(void) | 282 | static int __init init(void) |
219 | { | 283 | { |
220 | int ret = 0; | 284 | int ret = 0; |
@@ -229,6 +293,13 @@ static int __init init(void) | |||
229 | goto out; | 293 | goto out; |
230 | } | 294 | } |
231 | 295 | ||
296 | ret = crypto_register_alg(&xeta_alg); | ||
297 | if (ret < 0) { | ||
298 | crypto_unregister_alg(&tea_alg); | ||
299 | crypto_unregister_alg(&xtea_alg); | ||
300 | goto out; | ||
301 | } | ||
302 | |||
232 | out: | 303 | out: |
233 | return ret; | 304 | return ret; |
234 | } | 305 | } |
@@ -237,12 +308,14 @@ static void __exit fini(void) | |||
237 | { | 308 | { |
238 | crypto_unregister_alg(&tea_alg); | 309 | crypto_unregister_alg(&tea_alg); |
239 | crypto_unregister_alg(&xtea_alg); | 310 | crypto_unregister_alg(&xtea_alg); |
311 | crypto_unregister_alg(&xeta_alg); | ||
240 | } | 312 | } |
241 | 313 | ||
242 | MODULE_ALIAS("xtea"); | 314 | MODULE_ALIAS("xtea"); |
315 | MODULE_ALIAS("xeta"); | ||
243 | 316 | ||
244 | module_init(init); | 317 | module_init(init); |
245 | module_exit(fini); | 318 | module_exit(fini); |
246 | 319 | ||
247 | MODULE_LICENSE("GPL"); | 320 | MODULE_LICENSE("GPL"); |
248 | MODULE_DESCRIPTION("TEA & XTEA Cryptographic Algorithms"); | 321 | MODULE_DESCRIPTION("TEA, XTEA & XETA Cryptographic Algorithms"); |
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index c4b75ecf9460..55959e4d1cb7 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c | |||
@@ -417,9 +417,9 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); | |||
417 | chan = (here[3] & uPD98401_AAL5_CHAN) >> | 417 | chan = (here[3] & uPD98401_AAL5_CHAN) >> |
418 | uPD98401_AAL5_CHAN_SHIFT; | 418 | uPD98401_AAL5_CHAN_SHIFT; |
419 | if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { | 419 | if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { |
420 | int pos = ZATM_VCC(vcc)->pool; | 420 | int pos; |
421 | |||
422 | vcc = zatm_dev->rx_map[chan]; | 421 | vcc = zatm_dev->rx_map[chan]; |
422 | pos = ZATM_VCC(vcc)->pool; | ||
423 | if (skb == zatm_dev->last_free[pos]) | 423 | if (skb == zatm_dev->last_free[pos]) |
424 | zatm_dev->last_free[pos] = NULL; | 424 | zatm_dev->last_free[pos] = NULL; |
425 | skb_unlink(skb, zatm_dev->pool + pos); | 425 | skb_unlink(skb, zatm_dev->pool + pos); |
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 5be6f998d8c5..3d4261c39f16 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c | |||
@@ -57,9 +57,11 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) | |||
57 | mode = strsep(&cmsp, "-"); | 57 | mode = strsep(&cmsp, "-"); |
58 | 58 | ||
59 | if (mode == NULL || strcmp(mode, "cbc") == 0) | 59 | if (mode == NULL || strcmp(mode, "cbc") == 0) |
60 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC); | 60 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | |
61 | CRYPTO_TFM_REQ_MAY_SLEEP); | ||
61 | else if (strcmp(mode, "ecb") == 0) | 62 | else if (strcmp(mode, "ecb") == 0) |
62 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB); | 63 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | |
64 | CRYPTO_TFM_REQ_MAY_SLEEP); | ||
63 | if (tfm == NULL) | 65 | if (tfm == NULL) |
64 | return -EINVAL; | 66 | return -EINVAL; |
65 | 67 | ||
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d0a4bab220e5..b82bc3150476 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -144,7 +144,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
144 | } | 144 | } |
145 | 145 | ||
146 | /* Hash the cipher key with the given hash algorithm */ | 146 | /* Hash the cipher key with the given hash algorithm */ |
147 | hash_tfm = crypto_alloc_tfm(opts, 0); | 147 | hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); |
148 | if (hash_tfm == NULL) { | 148 | if (hash_tfm == NULL) { |
149 | ti->error = PFX "Error initializing ESSIV hash"; | 149 | ti->error = PFX "Error initializing ESSIV hash"; |
150 | return -EINVAL; | 150 | return -EINVAL; |
@@ -172,7 +172,8 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
172 | 172 | ||
173 | /* Setup the essiv_tfm with the given salt */ | 173 | /* Setup the essiv_tfm with the given salt */ |
174 | essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), | 174 | essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), |
175 | CRYPTO_TFM_MODE_ECB); | 175 | CRYPTO_TFM_MODE_ECB | |
176 | CRYPTO_TFM_REQ_MAY_SLEEP); | ||
176 | if (essiv_tfm == NULL) { | 177 | if (essiv_tfm == NULL) { |
177 | ti->error = PFX "Error allocating crypto tfm for ESSIV"; | 178 | ti->error = PFX "Error allocating crypto tfm for ESSIV"; |
178 | kfree(salt); | 179 | kfree(salt); |
@@ -587,7 +588,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
587 | goto bad1; | 588 | goto bad1; |
588 | } | 589 | } |
589 | 590 | ||
590 | tfm = crypto_alloc_tfm(cipher, crypto_flags); | 591 | tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); |
591 | if (!tfm) { | 592 | if (!tfm) { |
592 | ti->error = PFX "Error allocating crypto tfm"; | 593 | ti->error = PFX "Error allocating crypto tfm"; |
593 | goto bad1; | 594 | goto bad1; |
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 2608e7a3d214..3f67a42e8503 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -948,6 +948,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
948 | u32 gem_status = readl(gp->regs + GREG_STAT); | 948 | u32 gem_status = readl(gp->regs + GREG_STAT); |
949 | 949 | ||
950 | if (gem_status == 0) { | 950 | if (gem_status == 0) { |
951 | netif_poll_enable(dev); | ||
951 | spin_unlock_irqrestore(&gp->lock, flags); | 952 | spin_unlock_irqrestore(&gp->lock, flags); |
952 | return IRQ_NONE; | 953 | return IRQ_NONE; |
953 | } | 954 | } |
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index 7143fd7cf3f8..ff8ae5f79970 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h | |||
@@ -1020,7 +1020,7 @@ struct gem { | |||
1020 | 1020 | ||
1021 | struct gem_init_block *init_block; | 1021 | struct gem_init_block *init_block; |
1022 | struct sk_buff *rx_skbs[RX_RING_SIZE]; | 1022 | struct sk_buff *rx_skbs[RX_RING_SIZE]; |
1023 | struct sk_buff *tx_skbs[RX_RING_SIZE]; | 1023 | struct sk_buff *tx_skbs[TX_RING_SIZE]; |
1024 | dma_addr_t gblock_dvma; | 1024 | dma_addr_t gblock_dvma; |
1025 | 1025 | ||
1026 | struct pci_dev *pdev; | 1026 | struct pci_dev *pdev; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index af8263a1580e..3faf62310f84 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -66,8 +66,8 @@ | |||
66 | 66 | ||
67 | #define DRV_MODULE_NAME "tg3" | 67 | #define DRV_MODULE_NAME "tg3" |
68 | #define PFX DRV_MODULE_NAME ": " | 68 | #define PFX DRV_MODULE_NAME ": " |
69 | #define DRV_MODULE_VERSION "3.37" | 69 | #define DRV_MODULE_VERSION "3.38" |
70 | #define DRV_MODULE_RELDATE "August 25, 2005" | 70 | #define DRV_MODULE_RELDATE "September 1, 2005" |
71 | 71 | ||
72 | #define TG3_DEF_MAC_MODE 0 | 72 | #define TG3_DEF_MAC_MODE 0 |
73 | #define TG3_DEF_RX_MODE 0 | 73 | #define TG3_DEF_RX_MODE 0 |
@@ -121,12 +121,9 @@ | |||
121 | TG3_RX_RCB_RING_SIZE(tp)) | 121 | TG3_RX_RCB_RING_SIZE(tp)) |
122 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ | 122 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ |
123 | TG3_TX_RING_SIZE) | 123 | TG3_TX_RING_SIZE) |
124 | #define TX_RING_GAP(TP) \ | ||
125 | (TG3_TX_RING_SIZE - (TP)->tx_pending) | ||
126 | #define TX_BUFFS_AVAIL(TP) \ | 124 | #define TX_BUFFS_AVAIL(TP) \ |
127 | (((TP)->tx_cons <= (TP)->tx_prod) ? \ | 125 | ((TP)->tx_pending - \ |
128 | (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \ | 126 | (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) |
129 | (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP)) | ||
130 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 127 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) |
131 | 128 | ||
132 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) | 129 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) |
@@ -2880,9 +2877,13 @@ static void tg3_tx(struct tg3 *tp) | |||
2880 | 2877 | ||
2881 | tp->tx_cons = sw_idx; | 2878 | tp->tx_cons = sw_idx; |
2882 | 2879 | ||
2883 | if (netif_queue_stopped(tp->dev) && | 2880 | if (unlikely(netif_queue_stopped(tp->dev))) { |
2884 | (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) | 2881 | spin_lock(&tp->tx_lock); |
2885 | netif_wake_queue(tp->dev); | 2882 | if (netif_queue_stopped(tp->dev) && |
2883 | (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) | ||
2884 | netif_wake_queue(tp->dev); | ||
2885 | spin_unlock(&tp->tx_lock); | ||
2886 | } | ||
2886 | } | 2887 | } |
2887 | 2888 | ||
2888 | /* Returns size of skb allocated or < 0 on error. | 2889 | /* Returns size of skb allocated or < 0 on error. |
@@ -3198,9 +3199,7 @@ static int tg3_poll(struct net_device *netdev, int *budget) | |||
3198 | 3199 | ||
3199 | /* run TX completion thread */ | 3200 | /* run TX completion thread */ |
3200 | if (sblk->idx[0].tx_consumer != tp->tx_cons) { | 3201 | if (sblk->idx[0].tx_consumer != tp->tx_cons) { |
3201 | spin_lock(&tp->tx_lock); | ||
3202 | tg3_tx(tp); | 3202 | tg3_tx(tp); |
3203 | spin_unlock(&tp->tx_lock); | ||
3204 | } | 3203 | } |
3205 | 3204 | ||
3206 | /* run RX thread, within the bounds set by NAPI. | 3205 | /* run RX thread, within the bounds set by NAPI. |
@@ -3716,8 +3715,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3716 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 3715 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
3717 | 3716 | ||
3718 | tp->tx_prod = entry; | 3717 | tp->tx_prod = entry; |
3719 | if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) | 3718 | if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { |
3720 | netif_stop_queue(dev); | 3719 | netif_stop_queue(dev); |
3720 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | ||
3721 | netif_wake_queue(tp->dev); | ||
3722 | } | ||
3721 | 3723 | ||
3722 | out_unlock: | 3724 | out_unlock: |
3723 | mmiowb(); | 3725 | mmiowb(); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index effab0b9adca..50b8c6754b1e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -18,6 +18,9 @@ | |||
18 | /* | 18 | /* |
19 | * Changes: | 19 | * Changes: |
20 | * | 20 | * |
21 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 | ||
22 | * Add TUNSETLINK ioctl to set the link encapsulation | ||
23 | * | ||
21 | * Mark Smith <markzzzsmith@yahoo.com.au> | 24 | * Mark Smith <markzzzsmith@yahoo.com.au> |
22 | * Use random_ether_addr() for tap MAC address. | 25 | * Use random_ether_addr() for tap MAC address. |
23 | * | 26 | * |
@@ -612,6 +615,18 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
612 | DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); | 615 | DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); |
613 | break; | 616 | break; |
614 | 617 | ||
618 | case TUNSETLINK: | ||
619 | /* Only allow setting the type when the interface is down */ | ||
620 | if (tun->dev->flags & IFF_UP) { | ||
621 | DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", | ||
622 | tun->dev->name); | ||
623 | return -EBUSY; | ||
624 | } else { | ||
625 | tun->dev->type = (int) arg; | ||
626 | DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); | ||
627 | } | ||
628 | break; | ||
629 | |||
615 | #ifdef TUN_DEBUG | 630 | #ifdef TUN_DEBUG |
616 | case TUNSETDEBUG: | 631 | case TUNSETDEBUG: |
617 | tun->debug = arg; | 632 | tun->debug = arg; |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index abac1e40154b..dbcb5a8a2194 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -1308,7 +1308,7 @@ static int micsetup(struct airo_info *ai) { | |||
1308 | int i; | 1308 | int i; |
1309 | 1309 | ||
1310 | if (ai->tfm == NULL) | 1310 | if (ai->tfm == NULL) |
1311 | ai->tfm = crypto_alloc_tfm("aes", 0); | 1311 | ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); |
1312 | 1312 | ||
1313 | if (ai->tfm == NULL) { | 1313 | if (ai->tfm == NULL) { |
1314 | printk(KERN_ERR "airo: failed to load transform for AES\n"); | 1314 | printk(KERN_ERR "airo: failed to load transform for AES\n"); |
@@ -2410,8 +2410,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) | |||
2410 | } | 2410 | } |
2411 | } | 2411 | } |
2412 | #ifdef MICSUPPORT | 2412 | #ifdef MICSUPPORT |
2413 | if (ai->tfm) | 2413 | crypto_free_tfm(ai->tfm); |
2414 | crypto_free_tfm(ai->tfm); | ||
2415 | #endif | 2414 | #endif |
2416 | del_airo_dev( dev ); | 2415 | del_airo_dev( dev ); |
2417 | free_netdev( dev ); | 2416 | free_netdev( dev ); |
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 57ed50fe7f85..954cf893d50c 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c | |||
@@ -93,7 +93,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname) | |||
93 | 93 | ||
94 | dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n", | 94 | dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n", |
95 | clname->len, clname->data); | 95 | clname->len, clname->data); |
96 | tfm = crypto_alloc_tfm("md5", 0); | 96 | tfm = crypto_alloc_tfm("md5", CRYPTO_TFM_REQ_MAY_SLEEP); |
97 | if (tfm == NULL) | 97 | if (tfm == NULL) |
98 | goto out; | 98 | goto out; |
99 | cksum.len = crypto_tfm_alg_digestsize(tfm); | 99 | cksum.len = crypto_tfm_alg_digestsize(tfm); |
@@ -114,8 +114,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname) | |||
114 | kfree(cksum.data); | 114 | kfree(cksum.data); |
115 | status = nfs_ok; | 115 | status = nfs_ok; |
116 | out: | 116 | out: |
117 | if (tfm) | 117 | crypto_free_tfm(tfm); |
118 | crypto_free_tfm(tfm); | ||
119 | return status; | 118 | return status; |
120 | } | 119 | } |
121 | 120 | ||
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 5e2bcc636a02..3c89df6e7768 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define CRYPTO_TFM_MODE_CTR 0x00000008 | 45 | #define CRYPTO_TFM_MODE_CTR 0x00000008 |
46 | 46 | ||
47 | #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 | 47 | #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 |
48 | #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 | ||
48 | #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 | 49 | #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 |
49 | #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 | 50 | #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 |
50 | #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 | 51 | #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 |
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 096a85a58ae5..88aef7b86ef4 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h | |||
@@ -77,6 +77,7 @@ struct tun_struct { | |||
77 | #define TUNSETIFF _IOW('T', 202, int) | 77 | #define TUNSETIFF _IOW('T', 202, int) |
78 | #define TUNSETPERSIST _IOW('T', 203, int) | 78 | #define TUNSETPERSIST _IOW('T', 203, int) |
79 | #define TUNSETOWNER _IOW('T', 204, int) | 79 | #define TUNSETOWNER _IOW('T', 204, int) |
80 | #define TUNSETLINK _IOW('T', 205, int) | ||
80 | 81 | ||
81 | /* TUNSETIFF ifr flags */ | 82 | /* TUNSETIFF ifr flags */ |
82 | #define IFF_TUN 0x0001 | 83 | #define IFF_TUN 0x0001 |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 7a3c43711a17..e426641c519f 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -958,7 +958,7 @@ static __inline__ int ip_vs_todrop(void) | |||
958 | */ | 958 | */ |
959 | #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) | 959 | #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) |
960 | 960 | ||
961 | extern __inline__ char ip_vs_fwd_tag(struct ip_vs_conn *cp) | 961 | static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) |
962 | { | 962 | { |
963 | char fwd; | 963 | char fwd; |
964 | 964 | ||
diff --git a/include/net/sock.h b/include/net/sock.h index 312cb25cbd18..cf628261da52 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -709,6 +709,12 @@ static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) | |||
709 | sk_stream_mem_schedule(sk, skb->truesize, 1); | 709 | sk_stream_mem_schedule(sk, skb->truesize, 1); |
710 | } | 710 | } |
711 | 711 | ||
712 | static inline int sk_stream_wmem_schedule(struct sock *sk, int size) | ||
713 | { | ||
714 | return size <= sk->sk_forward_alloc || | ||
715 | sk_stream_mem_schedule(sk, size, 0); | ||
716 | } | ||
717 | |||
712 | /* Used by processes to "lock" a socket state, so that | 718 | /* Used by processes to "lock" a socket state, so that |
713 | * interrupts and bottom half handlers won't change it | 719 | * interrupts and bottom half handlers won't change it |
714 | * from under us. It essentially blocks any incoming | 720 | * from under us. It essentially blocks any incoming |
@@ -1203,8 +1209,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, | |||
1203 | skb = alloc_skb_fclone(size + hdr_len, gfp); | 1209 | skb = alloc_skb_fclone(size + hdr_len, gfp); |
1204 | if (skb) { | 1210 | if (skb) { |
1205 | skb->truesize += mem; | 1211 | skb->truesize += mem; |
1206 | if (sk->sk_forward_alloc >= (int)skb->truesize || | 1212 | if (sk_stream_wmem_schedule(sk, skb->truesize)) { |
1207 | sk_stream_mem_schedule(sk, skb->truesize, 0)) { | ||
1208 | skb_reserve(skb, hdr_len); | 1213 | skb_reserve(skb, hdr_len); |
1209 | return skb; | 1214 | return skb; |
1210 | } | 1215 | } |
@@ -1227,10 +1232,8 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) | |||
1227 | { | 1232 | { |
1228 | struct page *page = NULL; | 1233 | struct page *page = NULL; |
1229 | 1234 | ||
1230 | if (sk->sk_forward_alloc >= (int)PAGE_SIZE || | 1235 | page = alloc_pages(sk->sk_allocation, 0); |
1231 | sk_stream_mem_schedule(sk, PAGE_SIZE, 0)) | 1236 | if (!page) { |
1232 | page = alloc_pages(sk->sk_allocation, 0); | ||
1233 | else { | ||
1234 | sk->sk_prot->enter_memory_pressure(); | 1237 | sk->sk_prot->enter_memory_pressure(); |
1235 | sk_stream_moderate_sndbuf(sk); | 1238 | sk_stream_moderate_sndbuf(sk); |
1236 | } | 1239 | } |
diff --git a/include/net/tcp.h b/include/net/tcp.h index d6bcf1317a6a..97af77c4d096 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -454,6 +454,7 @@ extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | |||
454 | extern void tcp_xmit_retransmit_queue(struct sock *); | 454 | extern void tcp_xmit_retransmit_queue(struct sock *); |
455 | extern void tcp_simple_retransmit(struct sock *); | 455 | extern void tcp_simple_retransmit(struct sock *); |
456 | extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); | 456 | extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); |
457 | extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); | ||
457 | 458 | ||
458 | extern void tcp_send_probe0(struct sock *); | 459 | extern void tcp_send_probe0(struct sock *); |
459 | extern void tcp_send_partial(struct sock *); | 460 | extern void tcp_send_partial(struct sock *); |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 621680f127af..348f36b529f7 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -1876,8 +1876,27 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags) | |||
1876 | return mss_now; | 1876 | return mss_now; |
1877 | } | 1877 | } |
1878 | 1878 | ||
1879 | /* | ||
1880 | * N.B. We get the timeout wrong here, but then we always did get it | ||
1881 | * wrong before and this is another step along the road to correcting | ||
1882 | * it. It ought to get updated each time we pass through the routine, | ||
1883 | * but in practise it probably doesn't matter too much for now. | ||
1884 | */ | ||
1885 | static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk, | ||
1886 | unsigned long datalen, int noblock, | ||
1887 | int *errcode) | ||
1888 | { | ||
1889 | struct sk_buff *skb = sock_alloc_send_skb(sk, datalen, | ||
1890 | noblock, errcode); | ||
1891 | if (skb) { | ||
1892 | skb->protocol = __constant_htons(ETH_P_DNA_RT); | ||
1893 | skb->pkt_type = PACKET_OUTGOING; | ||
1894 | } | ||
1895 | return skb; | ||
1896 | } | ||
1897 | |||
1879 | static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | 1898 | static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, |
1880 | struct msghdr *msg, size_t size) | 1899 | struct msghdr *msg, size_t size) |
1881 | { | 1900 | { |
1882 | struct sock *sk = sock->sk; | 1901 | struct sock *sk = sock->sk; |
1883 | struct dn_scp *scp = DN_SK(sk); | 1902 | struct dn_scp *scp = DN_SK(sk); |
@@ -1892,7 +1911,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1892 | struct dn_skb_cb *cb; | 1911 | struct dn_skb_cb *cb; |
1893 | size_t len; | 1912 | size_t len; |
1894 | unsigned char fctype; | 1913 | unsigned char fctype; |
1895 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | 1914 | long timeo; |
1896 | 1915 | ||
1897 | if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT)) | 1916 | if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT)) |
1898 | return -EOPNOTSUPP; | 1917 | return -EOPNOTSUPP; |
@@ -1900,18 +1919,21 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1900 | if (addr_len && (addr_len != sizeof(struct sockaddr_dn))) | 1919 | if (addr_len && (addr_len != sizeof(struct sockaddr_dn))) |
1901 | return -EINVAL; | 1920 | return -EINVAL; |
1902 | 1921 | ||
1922 | lock_sock(sk); | ||
1923 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | ||
1903 | /* | 1924 | /* |
1904 | * The only difference between stream sockets and sequenced packet | 1925 | * The only difference between stream sockets and sequenced packet |
1905 | * sockets is that the stream sockets always behave as if MSG_EOR | 1926 | * sockets is that the stream sockets always behave as if MSG_EOR |
1906 | * has been set. | 1927 | * has been set. |
1907 | */ | 1928 | */ |
1908 | if (sock->type == SOCK_STREAM) { | 1929 | if (sock->type == SOCK_STREAM) { |
1909 | if (flags & MSG_EOR) | 1930 | if (flags & MSG_EOR) { |
1910 | return -EINVAL; | 1931 | err = -EINVAL; |
1932 | goto out; | ||
1933 | } | ||
1911 | flags |= MSG_EOR; | 1934 | flags |= MSG_EOR; |
1912 | } | 1935 | } |
1913 | 1936 | ||
1914 | lock_sock(sk); | ||
1915 | 1937 | ||
1916 | err = dn_check_state(sk, addr, addr_len, &timeo, flags); | 1938 | err = dn_check_state(sk, addr, addr_len, &timeo, flags); |
1917 | if (err) | 1939 | if (err) |
@@ -1980,8 +2002,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1980 | 2002 | ||
1981 | /* | 2003 | /* |
1982 | * Get a suitably sized skb. | 2004 | * Get a suitably sized skb. |
2005 | * 64 is a bit of a hack really, but its larger than any | ||
2006 | * link-layer headers and has served us well as a good | ||
2007 | * guess as to their real length. | ||
1983 | */ | 2008 | */ |
1984 | skb = dn_alloc_send_skb(sk, &len, flags & MSG_DONTWAIT, timeo, &err); | 2009 | skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER, |
2010 | flags & MSG_DONTWAIT, &err); | ||
1985 | 2011 | ||
1986 | if (err) | 2012 | if (err) |
1987 | break; | 2013 | break; |
@@ -1991,7 +2017,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1991 | 2017 | ||
1992 | cb = DN_SKB_CB(skb); | 2018 | cb = DN_SKB_CB(skb); |
1993 | 2019 | ||
1994 | skb_reserve(skb, DN_MAX_NSP_DATA_HEADER); | 2020 | skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER); |
1995 | 2021 | ||
1996 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { | 2022 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { |
1997 | err = -EFAULT; | 2023 | err = -EFAULT; |
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index e0bebf4bbcad..53633d352868 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c | |||
@@ -137,69 +137,6 @@ struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri) | |||
137 | } | 137 | } |
138 | 138 | ||
139 | /* | 139 | /* |
140 | * Wrapper for the above, for allocs of data skbs. We try and get the | ||
141 | * whole size thats been asked for (plus 11 bytes of header). If this | ||
142 | * fails, then we try for any size over 16 bytes for SOCK_STREAMS. | ||
143 | */ | ||
144 | struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err) | ||
145 | { | ||
146 | int space; | ||
147 | int len; | ||
148 | struct sk_buff *skb = NULL; | ||
149 | |||
150 | *err = 0; | ||
151 | |||
152 | while(skb == NULL) { | ||
153 | if (signal_pending(current)) { | ||
154 | *err = sock_intr_errno(timeo); | ||
155 | break; | ||
156 | } | ||
157 | |||
158 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | ||
159 | *err = EINVAL; | ||
160 | break; | ||
161 | } | ||
162 | |||
163 | if (sk->sk_err) | ||
164 | break; | ||
165 | |||
166 | len = *size + 11; | ||
167 | space = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | ||
168 | |||
169 | if (space < len) { | ||
170 | if ((sk->sk_socket->type == SOCK_STREAM) && | ||
171 | (space >= (16 + 11))) | ||
172 | len = space; | ||
173 | } | ||
174 | |||
175 | if (space < len) { | ||
176 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | ||
177 | if (noblock) { | ||
178 | *err = EWOULDBLOCK; | ||
179 | break; | ||
180 | } | ||
181 | |||
182 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | ||
183 | SOCK_SLEEP_PRE(sk) | ||
184 | |||
185 | if ((sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc)) < | ||
186 | len) | ||
187 | schedule(); | ||
188 | |||
189 | SOCK_SLEEP_POST(sk) | ||
190 | continue; | ||
191 | } | ||
192 | |||
193 | if ((skb = dn_alloc_skb(sk, len, sk->sk_allocation)) == NULL) | ||
194 | continue; | ||
195 | |||
196 | *size = len - 11; | ||
197 | } | ||
198 | |||
199 | return skb; | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Calculate persist timer based upon the smoothed round | 140 | * Calculate persist timer based upon the smoothed round |
204 | * trip time and the variance. Backoff according to the | 141 | * trip time and the variance. Backoff according to the |
205 | * nsp_backoff[] array. | 142 | * nsp_backoff[] array. |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 514c85b2631a..035ad2c9e1ba 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -263,10 +263,8 @@ static int ah_init_state(struct xfrm_state *x) | |||
263 | 263 | ||
264 | error: | 264 | error: |
265 | if (ahp) { | 265 | if (ahp) { |
266 | if (ahp->work_icv) | 266 | kfree(ahp->work_icv); |
267 | kfree(ahp->work_icv); | 267 | crypto_free_tfm(ahp->tfm); |
268 | if (ahp->tfm) | ||
269 | crypto_free_tfm(ahp->tfm); | ||
270 | kfree(ahp); | 268 | kfree(ahp); |
271 | } | 269 | } |
272 | return -EINVAL; | 270 | return -EINVAL; |
@@ -279,14 +277,10 @@ static void ah_destroy(struct xfrm_state *x) | |||
279 | if (!ahp) | 277 | if (!ahp) |
280 | return; | 278 | return; |
281 | 279 | ||
282 | if (ahp->work_icv) { | 280 | kfree(ahp->work_icv); |
283 | kfree(ahp->work_icv); | 281 | ahp->work_icv = NULL; |
284 | ahp->work_icv = NULL; | 282 | crypto_free_tfm(ahp->tfm); |
285 | } | 283 | ahp->tfm = NULL; |
286 | if (ahp->tfm) { | ||
287 | crypto_free_tfm(ahp->tfm); | ||
288 | ahp->tfm = NULL; | ||
289 | } | ||
290 | kfree(ahp); | 284 | kfree(ahp); |
291 | } | 285 | } |
292 | 286 | ||
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index b31ffc5053d2..1b5a09d1b90b 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -343,22 +343,14 @@ static void esp_destroy(struct xfrm_state *x) | |||
343 | if (!esp) | 343 | if (!esp) |
344 | return; | 344 | return; |
345 | 345 | ||
346 | if (esp->conf.tfm) { | 346 | crypto_free_tfm(esp->conf.tfm); |
347 | crypto_free_tfm(esp->conf.tfm); | 347 | esp->conf.tfm = NULL; |
348 | esp->conf.tfm = NULL; | 348 | kfree(esp->conf.ivec); |
349 | } | 349 | esp->conf.ivec = NULL; |
350 | if (esp->conf.ivec) { | 350 | crypto_free_tfm(esp->auth.tfm); |
351 | kfree(esp->conf.ivec); | 351 | esp->auth.tfm = NULL; |
352 | esp->conf.ivec = NULL; | 352 | kfree(esp->auth.work_icv); |
353 | } | 353 | esp->auth.work_icv = NULL; |
354 | if (esp->auth.tfm) { | ||
355 | crypto_free_tfm(esp->auth.tfm); | ||
356 | esp->auth.tfm = NULL; | ||
357 | } | ||
358 | if (esp->auth.work_icv) { | ||
359 | kfree(esp->auth.work_icv); | ||
360 | esp->auth.work_icv = NULL; | ||
361 | } | ||
362 | kfree(esp); | 354 | kfree(esp); |
363 | } | 355 | } |
364 | 356 | ||
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index dcb7ee6c4858..fc718df17b40 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -345,8 +345,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms) | |||
345 | 345 | ||
346 | for_each_cpu(cpu) { | 346 | for_each_cpu(cpu) { |
347 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); | 347 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); |
348 | if (tfm) | 348 | crypto_free_tfm(tfm); |
349 | crypto_free_tfm(tfm); | ||
350 | } | 349 | } |
351 | free_percpu(tfms); | 350 | free_percpu(tfms); |
352 | } | 351 | } |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 2d05cafec221..7d38913754b1 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -144,7 +144,7 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip, | |||
144 | memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); | 144 | memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); |
145 | c->num_total_nodes = i->num_total_nodes; | 145 | c->num_total_nodes = i->num_total_nodes; |
146 | c->num_local_nodes = i->num_local_nodes; | 146 | c->num_local_nodes = i->num_local_nodes; |
147 | memcpy(&c->local_nodes, &i->local_nodes, sizeof(&c->local_nodes)); | 147 | memcpy(&c->local_nodes, &i->local_nodes, sizeof(c->local_nodes)); |
148 | c->hash_mode = i->hash_mode; | 148 | c->hash_mode = i->hash_mode; |
149 | c->hash_initval = i->hash_initval; | 149 | c->hash_initval = i->hash_initval; |
150 | atomic_set(&c->refcount, 1); | 150 | atomic_set(&c->refcount, 1); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 02fdda68718d..cbcc9fc47783 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -552,8 +552,7 @@ new_segment: | |||
552 | tcp_mark_push(tp, skb); | 552 | tcp_mark_push(tp, skb); |
553 | goto new_segment; | 553 | goto new_segment; |
554 | } | 554 | } |
555 | if (sk->sk_forward_alloc < copy && | 555 | if (!sk_stream_wmem_schedule(sk, copy)) |
556 | !sk_stream_mem_schedule(sk, copy, 0)) | ||
557 | goto wait_for_memory; | 556 | goto wait_for_memory; |
558 | 557 | ||
559 | if (can_coalesce) { | 558 | if (can_coalesce) { |
@@ -770,19 +769,23 @@ new_segment: | |||
770 | if (off == PAGE_SIZE) { | 769 | if (off == PAGE_SIZE) { |
771 | put_page(page); | 770 | put_page(page); |
772 | TCP_PAGE(sk) = page = NULL; | 771 | TCP_PAGE(sk) = page = NULL; |
772 | TCP_OFF(sk) = off = 0; | ||
773 | } | 773 | } |
774 | } | 774 | } else |
775 | BUG_ON(off); | ||
776 | |||
777 | if (copy > PAGE_SIZE - off) | ||
778 | copy = PAGE_SIZE - off; | ||
779 | |||
780 | if (!sk_stream_wmem_schedule(sk, copy)) | ||
781 | goto wait_for_memory; | ||
775 | 782 | ||
776 | if (!page) { | 783 | if (!page) { |
777 | /* Allocate new cache page. */ | 784 | /* Allocate new cache page. */ |
778 | if (!(page = sk_stream_alloc_page(sk))) | 785 | if (!(page = sk_stream_alloc_page(sk))) |
779 | goto wait_for_memory; | 786 | goto wait_for_memory; |
780 | off = 0; | ||
781 | } | 787 | } |
782 | 788 | ||
783 | if (copy > PAGE_SIZE - off) | ||
784 | copy = PAGE_SIZE - off; | ||
785 | |||
786 | /* Time to copy data. We are close to | 789 | /* Time to copy data. We are close to |
787 | * the end! */ | 790 | * the end! */ |
788 | err = skb_copy_to_page(sk, from, skb, page, | 791 | err = skb_copy_to_page(sk, from, skb, page, |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1afb080bdf0c..29222b964951 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -923,14 +923,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
923 | int flag = 0; | 923 | int flag = 0; |
924 | int i; | 924 | int i; |
925 | 925 | ||
926 | /* So, SACKs for already sent large segments will be lost. | ||
927 | * Not good, but alternative is to resegment the queue. */ | ||
928 | if (sk->sk_route_caps & NETIF_F_TSO) { | ||
929 | sk->sk_route_caps &= ~NETIF_F_TSO; | ||
930 | sock_set_flag(sk, SOCK_NO_LARGESEND); | ||
931 | tp->mss_cache = tp->mss_cache; | ||
932 | } | ||
933 | |||
934 | if (!tp->sacked_out) | 926 | if (!tp->sacked_out) |
935 | tp->fackets_out = 0; | 927 | tp->fackets_out = 0; |
936 | prior_fackets = tp->fackets_out; | 928 | prior_fackets = tp->fackets_out; |
@@ -978,20 +970,40 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
978 | flag |= FLAG_DATA_LOST; | 970 | flag |= FLAG_DATA_LOST; |
979 | 971 | ||
980 | sk_stream_for_retrans_queue(skb, sk) { | 972 | sk_stream_for_retrans_queue(skb, sk) { |
981 | u8 sacked = TCP_SKB_CB(skb)->sacked; | 973 | int in_sack, pcount; |
982 | int in_sack; | 974 | u8 sacked; |
983 | 975 | ||
984 | /* The retransmission queue is always in order, so | 976 | /* The retransmission queue is always in order, so |
985 | * we can short-circuit the walk early. | 977 | * we can short-circuit the walk early. |
986 | */ | 978 | */ |
987 | if(!before(TCP_SKB_CB(skb)->seq, end_seq)) | 979 | if (!before(TCP_SKB_CB(skb)->seq, end_seq)) |
988 | break; | 980 | break; |
989 | 981 | ||
990 | fack_count += tcp_skb_pcount(skb); | 982 | pcount = tcp_skb_pcount(skb); |
983 | |||
984 | if (pcount > 1 && | ||
985 | (after(start_seq, TCP_SKB_CB(skb)->seq) || | ||
986 | before(end_seq, TCP_SKB_CB(skb)->end_seq))) { | ||
987 | unsigned int pkt_len; | ||
988 | |||
989 | if (after(start_seq, TCP_SKB_CB(skb)->seq)) | ||
990 | pkt_len = (start_seq - | ||
991 | TCP_SKB_CB(skb)->seq); | ||
992 | else | ||
993 | pkt_len = (end_seq - | ||
994 | TCP_SKB_CB(skb)->seq); | ||
995 | if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size)) | ||
996 | break; | ||
997 | pcount = tcp_skb_pcount(skb); | ||
998 | } | ||
999 | |||
1000 | fack_count += pcount; | ||
991 | 1001 | ||
992 | in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && | 1002 | in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && |
993 | !before(end_seq, TCP_SKB_CB(skb)->end_seq); | 1003 | !before(end_seq, TCP_SKB_CB(skb)->end_seq); |
994 | 1004 | ||
1005 | sacked = TCP_SKB_CB(skb)->sacked; | ||
1006 | |||
995 | /* Account D-SACK for retransmitted packet. */ | 1007 | /* Account D-SACK for retransmitted packet. */ |
996 | if ((dup_sack && in_sack) && | 1008 | if ((dup_sack && in_sack) && |
997 | (sacked & TCPCB_RETRANS) && | 1009 | (sacked & TCPCB_RETRANS) && |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 75b68116682a..6094db5e11be 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -428,11 +428,11 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned | |||
428 | * packet to the list. This won't be called frequently, I hope. | 428 | * packet to the list. This won't be called frequently, I hope. |
429 | * Remember, these are still headerless SKBs at this point. | 429 | * Remember, these are still headerless SKBs at this point. |
430 | */ | 430 | */ |
431 | static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) | 431 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) |
432 | { | 432 | { |
433 | struct tcp_sock *tp = tcp_sk(sk); | 433 | struct tcp_sock *tp = tcp_sk(sk); |
434 | struct sk_buff *buff; | 434 | struct sk_buff *buff; |
435 | int nsize; | 435 | int nsize, old_factor; |
436 | u16 flags; | 436 | u16 flags; |
437 | 437 | ||
438 | nsize = skb_headlen(skb) - len; | 438 | nsize = skb_headlen(skb) - len; |
@@ -490,18 +490,29 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned | |||
490 | tp->left_out -= tcp_skb_pcount(skb); | 490 | tp->left_out -= tcp_skb_pcount(skb); |
491 | } | 491 | } |
492 | 492 | ||
493 | old_factor = tcp_skb_pcount(skb); | ||
494 | |||
493 | /* Fix up tso_factor for both original and new SKB. */ | 495 | /* Fix up tso_factor for both original and new SKB. */ |
494 | tcp_set_skb_tso_segs(sk, skb, mss_now); | 496 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
495 | tcp_set_skb_tso_segs(sk, buff, mss_now); | 497 | tcp_set_skb_tso_segs(sk, buff, mss_now); |
496 | 498 | ||
497 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { | 499 | /* If this packet has been sent out already, we must |
498 | tp->lost_out += tcp_skb_pcount(skb); | 500 | * adjust the various packet counters. |
499 | tp->left_out += tcp_skb_pcount(skb); | 501 | */ |
500 | } | 502 | if (after(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { |
503 | int diff = old_factor - tcp_skb_pcount(skb) - | ||
504 | tcp_skb_pcount(buff); | ||
501 | 505 | ||
502 | if (TCP_SKB_CB(buff)->sacked&TCPCB_LOST) { | 506 | tp->packets_out -= diff; |
503 | tp->lost_out += tcp_skb_pcount(buff); | 507 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { |
504 | tp->left_out += tcp_skb_pcount(buff); | 508 | tp->lost_out -= diff; |
509 | tp->left_out -= diff; | ||
510 | } | ||
511 | if (diff > 0) { | ||
512 | tp->fackets_out -= diff; | ||
513 | if ((int)tp->fackets_out < 0) | ||
514 | tp->fackets_out = 0; | ||
515 | } | ||
505 | } | 516 | } |
506 | 517 | ||
507 | /* Link BUFF into the send queue. */ | 518 | /* Link BUFF into the send queue. */ |
@@ -1350,12 +1361,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1350 | if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { | 1361 | if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { |
1351 | if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) | 1362 | if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) |
1352 | BUG(); | 1363 | BUG(); |
1353 | |||
1354 | if (sk->sk_route_caps & NETIF_F_TSO) { | ||
1355 | sk->sk_route_caps &= ~NETIF_F_TSO; | ||
1356 | sock_set_flag(sk, SOCK_NO_LARGESEND); | ||
1357 | } | ||
1358 | |||
1359 | if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) | 1364 | if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) |
1360 | return -ENOMEM; | 1365 | return -ENOMEM; |
1361 | } | 1366 | } |
@@ -1370,22 +1375,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1370 | return -EAGAIN; | 1375 | return -EAGAIN; |
1371 | 1376 | ||
1372 | if (skb->len > cur_mss) { | 1377 | if (skb->len > cur_mss) { |
1373 | int old_factor = tcp_skb_pcount(skb); | ||
1374 | int diff; | ||
1375 | |||
1376 | if (tcp_fragment(sk, skb, cur_mss, cur_mss)) | 1378 | if (tcp_fragment(sk, skb, cur_mss, cur_mss)) |
1377 | return -ENOMEM; /* We'll try again later. */ | 1379 | return -ENOMEM; /* We'll try again later. */ |
1378 | |||
1379 | /* New SKB created, account for it. */ | ||
1380 | diff = old_factor - tcp_skb_pcount(skb) - | ||
1381 | tcp_skb_pcount(skb->next); | ||
1382 | tp->packets_out -= diff; | ||
1383 | |||
1384 | if (diff > 0) { | ||
1385 | tp->fackets_out -= diff; | ||
1386 | if ((int)tp->fackets_out < 0) | ||
1387 | tp->fackets_out = 0; | ||
1388 | } | ||
1389 | } | 1380 | } |
1390 | 1381 | ||
1391 | /* Collapse two adjacent packets if worthwhile and we can. */ | 1382 | /* Collapse two adjacent packets if worthwhile and we can. */ |
@@ -1993,12 +1984,6 @@ int tcp_write_wakeup(struct sock *sk) | |||
1993 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; | 1984 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; |
1994 | if (tcp_fragment(sk, skb, seg_size, mss)) | 1985 | if (tcp_fragment(sk, skb, seg_size, mss)) |
1995 | return -1; | 1986 | return -1; |
1996 | /* SWS override triggered forced fragmentation. | ||
1997 | * Disable TSO, the connection is too sick. */ | ||
1998 | if (sk->sk_route_caps & NETIF_F_TSO) { | ||
1999 | sock_set_flag(sk, SOCK_NO_LARGESEND); | ||
2000 | sk->sk_route_caps &= ~NETIF_F_TSO; | ||
2001 | } | ||
2002 | } else if (!tcp_skb_pcount(skb)) | 1987 | } else if (!tcp_skb_pcount(skb)) |
2003 | tcp_set_skb_tso_segs(sk, skb, mss); | 1988 | tcp_set_skb_tso_segs(sk, skb, mss); |
2004 | 1989 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 937ad32db77c..6d6fb74f3b52 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3593,10 +3593,8 @@ void __exit addrconf_cleanup(void) | |||
3593 | rtnl_unlock(); | 3593 | rtnl_unlock(); |
3594 | 3594 | ||
3595 | #ifdef CONFIG_IPV6_PRIVACY | 3595 | #ifdef CONFIG_IPV6_PRIVACY |
3596 | if (likely(md5_tfm != NULL)) { | 3596 | crypto_free_tfm(md5_tfm); |
3597 | crypto_free_tfm(md5_tfm); | 3597 | md5_tfm = NULL; |
3598 | md5_tfm = NULL; | ||
3599 | } | ||
3600 | #endif | 3598 | #endif |
3601 | 3599 | ||
3602 | #ifdef CONFIG_PROC_FS | 3600 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 0ebfad907a03..f3629730eb15 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -401,10 +401,8 @@ static int ah6_init_state(struct xfrm_state *x) | |||
401 | 401 | ||
402 | error: | 402 | error: |
403 | if (ahp) { | 403 | if (ahp) { |
404 | if (ahp->work_icv) | 404 | kfree(ahp->work_icv); |
405 | kfree(ahp->work_icv); | 405 | crypto_free_tfm(ahp->tfm); |
406 | if (ahp->tfm) | ||
407 | crypto_free_tfm(ahp->tfm); | ||
408 | kfree(ahp); | 406 | kfree(ahp); |
409 | } | 407 | } |
410 | return -EINVAL; | 408 | return -EINVAL; |
@@ -417,14 +415,10 @@ static void ah6_destroy(struct xfrm_state *x) | |||
417 | if (!ahp) | 415 | if (!ahp) |
418 | return; | 416 | return; |
419 | 417 | ||
420 | if (ahp->work_icv) { | 418 | kfree(ahp->work_icv); |
421 | kfree(ahp->work_icv); | 419 | ahp->work_icv = NULL; |
422 | ahp->work_icv = NULL; | 420 | crypto_free_tfm(ahp->tfm); |
423 | } | 421 | ahp->tfm = NULL; |
424 | if (ahp->tfm) { | ||
425 | crypto_free_tfm(ahp->tfm); | ||
426 | ahp->tfm = NULL; | ||
427 | } | ||
428 | kfree(ahp); | 422 | kfree(ahp); |
429 | } | 423 | } |
430 | 424 | ||
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index e8bff9d3d96c..9b27460f0cc7 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -276,22 +276,14 @@ static void esp6_destroy(struct xfrm_state *x) | |||
276 | if (!esp) | 276 | if (!esp) |
277 | return; | 277 | return; |
278 | 278 | ||
279 | if (esp->conf.tfm) { | 279 | crypto_free_tfm(esp->conf.tfm); |
280 | crypto_free_tfm(esp->conf.tfm); | 280 | esp->conf.tfm = NULL; |
281 | esp->conf.tfm = NULL; | 281 | kfree(esp->conf.ivec); |
282 | } | 282 | esp->conf.ivec = NULL; |
283 | if (esp->conf.ivec) { | 283 | crypto_free_tfm(esp->auth.tfm); |
284 | kfree(esp->conf.ivec); | 284 | esp->auth.tfm = NULL; |
285 | esp->conf.ivec = NULL; | 285 | kfree(esp->auth.work_icv); |
286 | } | 286 | esp->auth.work_icv = NULL; |
287 | if (esp->auth.tfm) { | ||
288 | crypto_free_tfm(esp->auth.tfm); | ||
289 | esp->auth.tfm = NULL; | ||
290 | } | ||
291 | if (esp->auth.work_icv) { | ||
292 | kfree(esp->auth.work_icv); | ||
293 | esp->auth.work_icv = NULL; | ||
294 | } | ||
295 | kfree(esp); | 287 | kfree(esp); |
296 | } | 288 | } |
297 | 289 | ||
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 5176fc655ea9..fa8f1bb0aa52 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -549,7 +549,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info) | |||
549 | read_lock(&raw_v6_lock); | 549 | read_lock(&raw_v6_lock); |
550 | if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) { | 550 | if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) { |
551 | while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, | 551 | while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, |
552 | skb->dev->ifindex))) { | 552 | IP6CB(skb)->iif))) { |
553 | rawv6_err(sk, skb, NULL, type, code, inner_offset, info); | 553 | rawv6_err(sk, skb, NULL, type, code, inner_offset, info); |
554 | sk = sk_next(sk); | 554 | sk = sk_next(sk); |
555 | } | 555 | } |
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 135383ef538f..85bfbc69b2c3 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
@@ -341,8 +341,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms) | |||
341 | 341 | ||
342 | for_each_cpu(cpu) { | 342 | for_each_cpu(cpu) { |
343 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); | 343 | struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); |
344 | if (tfm) | 344 | crypto_free_tfm(tfm); |
345 | crypto_free_tfm(tfm); | ||
346 | } | 345 | } |
347 | free_percpu(tfms); | 346 | free_percpu(tfms); |
348 | } | 347 | } |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 7a5863298f3f..ed3a76b30fd9 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -166,7 +166,7 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) | |||
166 | if (sk == NULL) | 166 | if (sk == NULL) |
167 | goto out; | 167 | goto out; |
168 | 168 | ||
169 | sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, skb->dev->ifindex); | 169 | sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, IP6CB(skb)->iif); |
170 | 170 | ||
171 | while (sk) { | 171 | while (sk) { |
172 | delivered = 1; | 172 | delivered = 1; |
@@ -178,7 +178,7 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) | |||
178 | rawv6_rcv(sk, clone); | 178 | rawv6_rcv(sk, clone); |
179 | } | 179 | } |
180 | sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr, | 180 | sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr, |
181 | skb->dev->ifindex); | 181 | IP6CB(skb)->iif); |
182 | } | 182 | } |
183 | out: | 183 | out: |
184 | read_unlock(&raw_v6_lock); | 184 | read_unlock(&raw_v6_lock); |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index e47ac0d1a6d6..e22ccd655965 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -193,8 +193,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) | |||
193 | sctp_unhash_endpoint(ep); | 193 | sctp_unhash_endpoint(ep); |
194 | 194 | ||
195 | /* Free up the HMAC transform. */ | 195 | /* Free up the HMAC transform. */ |
196 | if (sctp_sk(ep->base.sk)->hmac) | 196 | sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); |
197 | sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); | ||
198 | 197 | ||
199 | /* Cleanup. */ | 198 | /* Cleanup. */ |
200 | sctp_inq_free(&ep->base.inqueue); | 199 | sctp_inq_free(&ep->base.inqueue); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 4454afe4727e..91ec8c936913 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -4194,8 +4194,7 @@ out: | |||
4194 | sctp_release_sock(sk); | 4194 | sctp_release_sock(sk); |
4195 | return err; | 4195 | return err; |
4196 | cleanup: | 4196 | cleanup: |
4197 | if (tfm) | 4197 | sctp_crypto_free_tfm(tfm); |
4198 | sctp_crypto_free_tfm(tfm); | ||
4199 | goto out; | 4198 | goto out; |
4200 | } | 4199 | } |
4201 | 4200 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 5a7265aeaf83..ee6ae74cd1b2 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -160,7 +160,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | |||
160 | " unsupported checksum %d", cksumtype); | 160 | " unsupported checksum %d", cksumtype); |
161 | goto out; | 161 | goto out; |
162 | } | 162 | } |
163 | if (!(tfm = crypto_alloc_tfm(cksumname, 0))) | 163 | if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP))) |
164 | goto out; | 164 | goto out; |
165 | cksum->len = crypto_tfm_alg_digestsize(tfm); | 165 | cksum->len = crypto_tfm_alg_digestsize(tfm); |
166 | if ((cksum->data = kmalloc(cksum->len, GFP_KERNEL)) == NULL) | 166 | if ((cksum->data = kmalloc(cksum->len, GFP_KERNEL)) == NULL) |
@@ -199,8 +199,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | |||
199 | crypto_digest_final(tfm, cksum->data); | 199 | crypto_digest_final(tfm, cksum->data); |
200 | code = 0; | 200 | code = 0; |
201 | out: | 201 | out: |
202 | if (tfm) | 202 | crypto_free_tfm(tfm); |
203 | crypto_free_tfm(tfm); | ||
204 | return code; | 203 | return code; |
205 | } | 204 | } |
206 | 205 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index cf726510df8e..606a8a82cafb 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -185,12 +185,9 @@ static void | |||
185 | gss_delete_sec_context_kerberos(void *internal_ctx) { | 185 | gss_delete_sec_context_kerberos(void *internal_ctx) { |
186 | struct krb5_ctx *kctx = internal_ctx; | 186 | struct krb5_ctx *kctx = internal_ctx; |
187 | 187 | ||
188 | if (kctx->seq) | 188 | crypto_free_tfm(kctx->seq); |
189 | crypto_free_tfm(kctx->seq); | 189 | crypto_free_tfm(kctx->enc); |
190 | if (kctx->enc) | 190 | kfree(kctx->mech_used.data); |
191 | crypto_free_tfm(kctx->enc); | ||
192 | if (kctx->mech_used.data) | ||
193 | kfree(kctx->mech_used.data); | ||
194 | kfree(kctx); | 191 | kfree(kctx); |
195 | } | 192 | } |
196 | 193 | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index dad05994c3eb..6c97d61baa9b 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -214,14 +214,10 @@ static void | |||
214 | gss_delete_sec_context_spkm3(void *internal_ctx) { | 214 | gss_delete_sec_context_spkm3(void *internal_ctx) { |
215 | struct spkm3_ctx *sctx = internal_ctx; | 215 | struct spkm3_ctx *sctx = internal_ctx; |
216 | 216 | ||
217 | if(sctx->derived_integ_key) | 217 | crypto_free_tfm(sctx->derived_integ_key); |
218 | crypto_free_tfm(sctx->derived_integ_key); | 218 | crypto_free_tfm(sctx->derived_conf_key); |
219 | if(sctx->derived_conf_key) | 219 | kfree(sctx->share_key.data); |
220 | crypto_free_tfm(sctx->derived_conf_key); | 220 | kfree(sctx->mech_used.data); |
221 | if(sctx->share_key.data) | ||
222 | kfree(sctx->share_key.data); | ||
223 | if(sctx->mech_used.data) | ||
224 | kfree(sctx->mech_used.data); | ||
225 | kfree(sctx); | 221 | kfree(sctx); |
226 | } | 222 | } |
227 | 223 | ||
diff --git a/security/seclvl.c b/security/seclvl.c index c8e87b22c9bd..96b1f2122f67 100644 --- a/security/seclvl.c +++ b/security/seclvl.c | |||
@@ -321,7 +321,7 @@ plaintext_to_sha1(unsigned char *hash, const char *plaintext, int len) | |||
321 | "bytes.\n", len, PAGE_SIZE); | 321 | "bytes.\n", len, PAGE_SIZE); |
322 | return -ENOMEM; | 322 | return -ENOMEM; |
323 | } | 323 | } |
324 | tfm = crypto_alloc_tfm("sha1", 0); | 324 | tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); |
325 | if (tfm == NULL) { | 325 | if (tfm == NULL) { |
326 | seclvl_printk(0, KERN_ERR, | 326 | seclvl_printk(0, KERN_ERR, |
327 | "Failed to load transform for SHA1\n"); | 327 | "Failed to load transform for SHA1\n"); |