diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-12-06 11:17:24 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-12-06 11:17:24 -0500 |
commit | dd63af108f0814f0b589659f4e55a7a5af3b7e53 (patch) | |
tree | 0a7679fecb5b516cddb2153c632a4262899be6c3 /net | |
parent | 50d1e7d1077b276e8faa9eebf8b710edf31fdeea (diff) | |
parent | 009d0431c3914de64666bec0d350e54fdd59df6a (diff) |
Merge 3.18-rc7 into tty-next
This resolves the merge issue with drivers/tty/serial/of_serial.c
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
55 files changed, 450 insertions, 299 deletions
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 648d79ccf462..c465876c7861 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -813,10 +813,9 @@ static void __br_multicast_send_query(struct net_bridge *br, | |||
813 | return; | 813 | return; |
814 | 814 | ||
815 | if (port) { | 815 | if (port) { |
816 | __skb_push(skb, sizeof(struct ethhdr)); | ||
817 | skb->dev = port->dev; | 816 | skb->dev = port->dev; |
818 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 817 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, |
819 | dev_queue_xmit); | 818 | br_dev_queue_push_xmit); |
820 | } else { | 819 | } else { |
821 | br_multicast_select_own_querier(br, ip, skb); | 820 | br_multicast_select_own_querier(br, ip, skb); |
822 | netif_rx(skb); | 821 | netif_rx(skb); |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 2ff9706647f2..e5ec470b851f 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { | |||
280 | [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, | 280 | [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, |
281 | [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, | 281 | [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, |
282 | [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, | 282 | [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, |
283 | [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, | ||
283 | [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, | 284 | [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, |
284 | [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, | 285 | [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, |
285 | }; | 286 | }; |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 654c9018e3e7..48da2c54a69e 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <net/netfilter/ipv6/nf_reject.h> | 18 | #include <net/netfilter/ipv6/nf_reject.h> |
19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
20 | #include <net/ip.h> | 20 | #include <net/ip.h> |
21 | #include <net/ip6_checksum.h> | ||
21 | #include <linux/netfilter_bridge.h> | 22 | #include <linux/netfilter_bridge.h> |
22 | #include "../br_private.h" | 23 | #include "../br_private.h" |
23 | 24 | ||
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 62fc5e7a9acf..790fe89d90c0 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c | |||
@@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) | |||
90 | 90 | ||
91 | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; | 91 | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; |
92 | 92 | ||
93 | /* | ||
94 | * Should be used for buffers allocated with ceph_kvmalloc(). | ||
95 | * Currently these are encrypt out-buffer (ceph_buffer) and decrypt | ||
96 | * in-buffer (msg front). | ||
97 | * | ||
98 | * Dispose of @sgt with teardown_sgtable(). | ||
99 | * | ||
100 | * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() | ||
101 | * in cases where a single sg is sufficient. No attempt to reduce the | ||
102 | * number of sgs by squeezing physically contiguous pages together is | ||
103 | * made though, for simplicity. | ||
104 | */ | ||
105 | static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, | ||
106 | const void *buf, unsigned int buf_len) | ||
107 | { | ||
108 | struct scatterlist *sg; | ||
109 | const bool is_vmalloc = is_vmalloc_addr(buf); | ||
110 | unsigned int off = offset_in_page(buf); | ||
111 | unsigned int chunk_cnt = 1; | ||
112 | unsigned int chunk_len = PAGE_ALIGN(off + buf_len); | ||
113 | int i; | ||
114 | int ret; | ||
115 | |||
116 | if (buf_len == 0) { | ||
117 | memset(sgt, 0, sizeof(*sgt)); | ||
118 | return -EINVAL; | ||
119 | } | ||
120 | |||
121 | if (is_vmalloc) { | ||
122 | chunk_cnt = chunk_len >> PAGE_SHIFT; | ||
123 | chunk_len = PAGE_SIZE; | ||
124 | } | ||
125 | |||
126 | if (chunk_cnt > 1) { | ||
127 | ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); | ||
128 | if (ret) | ||
129 | return ret; | ||
130 | } else { | ||
131 | WARN_ON(chunk_cnt != 1); | ||
132 | sg_init_table(prealloc_sg, 1); | ||
133 | sgt->sgl = prealloc_sg; | ||
134 | sgt->nents = sgt->orig_nents = 1; | ||
135 | } | ||
136 | |||
137 | for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { | ||
138 | struct page *page; | ||
139 | unsigned int len = min(chunk_len - off, buf_len); | ||
140 | |||
141 | if (is_vmalloc) | ||
142 | page = vmalloc_to_page(buf); | ||
143 | else | ||
144 | page = virt_to_page(buf); | ||
145 | |||
146 | sg_set_page(sg, page, len, off); | ||
147 | |||
148 | off = 0; | ||
149 | buf += len; | ||
150 | buf_len -= len; | ||
151 | } | ||
152 | WARN_ON(buf_len != 0); | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static void teardown_sgtable(struct sg_table *sgt) | ||
158 | { | ||
159 | if (sgt->orig_nents > 1) | ||
160 | sg_free_table(sgt); | ||
161 | } | ||
162 | |||
93 | static int ceph_aes_encrypt(const void *key, int key_len, | 163 | static int ceph_aes_encrypt(const void *key, int key_len, |
94 | void *dst, size_t *dst_len, | 164 | void *dst, size_t *dst_len, |
95 | const void *src, size_t src_len) | 165 | const void *src, size_t src_len) |
96 | { | 166 | { |
97 | struct scatterlist sg_in[2], sg_out[1]; | 167 | struct scatterlist sg_in[2], prealloc_sg; |
168 | struct sg_table sg_out; | ||
98 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 169 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
99 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; | 170 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; |
100 | int ret; | 171 | int ret; |
@@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len, | |||
110 | 181 | ||
111 | *dst_len = src_len + zero_padding; | 182 | *dst_len = src_len + zero_padding; |
112 | 183 | ||
113 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
114 | sg_init_table(sg_in, 2); | 184 | sg_init_table(sg_in, 2); |
115 | sg_set_buf(&sg_in[0], src, src_len); | 185 | sg_set_buf(&sg_in[0], src, src_len); |
116 | sg_set_buf(&sg_in[1], pad, zero_padding); | 186 | sg_set_buf(&sg_in[1], pad, zero_padding); |
117 | sg_init_table(sg_out, 1); | 187 | ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
118 | sg_set_buf(sg_out, dst, *dst_len); | 188 | if (ret) |
189 | goto out_tfm; | ||
190 | |||
191 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
119 | iv = crypto_blkcipher_crt(tfm)->iv; | 192 | iv = crypto_blkcipher_crt(tfm)->iv; |
120 | ivsize = crypto_blkcipher_ivsize(tfm); | 193 | ivsize = crypto_blkcipher_ivsize(tfm); |
121 | |||
122 | memcpy(iv, aes_iv, ivsize); | 194 | memcpy(iv, aes_iv, ivsize); |
195 | |||
123 | /* | 196 | /* |
124 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, | 197 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
125 | key, key_len, 1); | 198 | key, key_len, 1); |
@@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len, | |||
128 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, | 201 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
129 | pad, zero_padding, 1); | 202 | pad, zero_padding, 1); |
130 | */ | 203 | */ |
131 | ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, | 204 | ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, |
132 | src_len + zero_padding); | 205 | src_len + zero_padding); |
133 | crypto_free_blkcipher(tfm); | 206 | if (ret < 0) { |
134 | if (ret < 0) | ||
135 | pr_err("ceph_aes_crypt failed %d\n", ret); | 207 | pr_err("ceph_aes_crypt failed %d\n", ret); |
208 | goto out_sg; | ||
209 | } | ||
136 | /* | 210 | /* |
137 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, | 211 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
138 | dst, *dst_len, 1); | 212 | dst, *dst_len, 1); |
139 | */ | 213 | */ |
140 | return 0; | 214 | |
215 | out_sg: | ||
216 | teardown_sgtable(&sg_out); | ||
217 | out_tfm: | ||
218 | crypto_free_blkcipher(tfm); | ||
219 | return ret; | ||
141 | } | 220 | } |
142 | 221 | ||
143 | static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | 222 | static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, |
@@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
145 | const void *src1, size_t src1_len, | 224 | const void *src1, size_t src1_len, |
146 | const void *src2, size_t src2_len) | 225 | const void *src2, size_t src2_len) |
147 | { | 226 | { |
148 | struct scatterlist sg_in[3], sg_out[1]; | 227 | struct scatterlist sg_in[3], prealloc_sg; |
228 | struct sg_table sg_out; | ||
149 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 229 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
150 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; | 230 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; |
151 | int ret; | 231 | int ret; |
@@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
161 | 241 | ||
162 | *dst_len = src1_len + src2_len + zero_padding; | 242 | *dst_len = src1_len + src2_len + zero_padding; |
163 | 243 | ||
164 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
165 | sg_init_table(sg_in, 3); | 244 | sg_init_table(sg_in, 3); |
166 | sg_set_buf(&sg_in[0], src1, src1_len); | 245 | sg_set_buf(&sg_in[0], src1, src1_len); |
167 | sg_set_buf(&sg_in[1], src2, src2_len); | 246 | sg_set_buf(&sg_in[1], src2, src2_len); |
168 | sg_set_buf(&sg_in[2], pad, zero_padding); | 247 | sg_set_buf(&sg_in[2], pad, zero_padding); |
169 | sg_init_table(sg_out, 1); | 248 | ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
170 | sg_set_buf(sg_out, dst, *dst_len); | 249 | if (ret) |
250 | goto out_tfm; | ||
251 | |||
252 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
171 | iv = crypto_blkcipher_crt(tfm)->iv; | 253 | iv = crypto_blkcipher_crt(tfm)->iv; |
172 | ivsize = crypto_blkcipher_ivsize(tfm); | 254 | ivsize = crypto_blkcipher_ivsize(tfm); |
173 | |||
174 | memcpy(iv, aes_iv, ivsize); | 255 | memcpy(iv, aes_iv, ivsize); |
256 | |||
175 | /* | 257 | /* |
176 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, | 258 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
177 | key, key_len, 1); | 259 | key, key_len, 1); |
@@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
182 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, | 264 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
183 | pad, zero_padding, 1); | 265 | pad, zero_padding, 1); |
184 | */ | 266 | */ |
185 | ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, | 267 | ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, |
186 | src1_len + src2_len + zero_padding); | 268 | src1_len + src2_len + zero_padding); |
187 | crypto_free_blkcipher(tfm); | 269 | if (ret < 0) { |
188 | if (ret < 0) | ||
189 | pr_err("ceph_aes_crypt2 failed %d\n", ret); | 270 | pr_err("ceph_aes_crypt2 failed %d\n", ret); |
271 | goto out_sg; | ||
272 | } | ||
190 | /* | 273 | /* |
191 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, | 274 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
192 | dst, *dst_len, 1); | 275 | dst, *dst_len, 1); |
193 | */ | 276 | */ |
194 | return 0; | 277 | |
278 | out_sg: | ||
279 | teardown_sgtable(&sg_out); | ||
280 | out_tfm: | ||
281 | crypto_free_blkcipher(tfm); | ||
282 | return ret; | ||
195 | } | 283 | } |
196 | 284 | ||
197 | static int ceph_aes_decrypt(const void *key, int key_len, | 285 | static int ceph_aes_decrypt(const void *key, int key_len, |
198 | void *dst, size_t *dst_len, | 286 | void *dst, size_t *dst_len, |
199 | const void *src, size_t src_len) | 287 | const void *src, size_t src_len) |
200 | { | 288 | { |
201 | struct scatterlist sg_in[1], sg_out[2]; | 289 | struct sg_table sg_in; |
290 | struct scatterlist sg_out[2], prealloc_sg; | ||
202 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 291 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
203 | struct blkcipher_desc desc = { .tfm = tfm }; | 292 | struct blkcipher_desc desc = { .tfm = tfm }; |
204 | char pad[16]; | 293 | char pad[16]; |
@@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
210 | if (IS_ERR(tfm)) | 299 | if (IS_ERR(tfm)) |
211 | return PTR_ERR(tfm); | 300 | return PTR_ERR(tfm); |
212 | 301 | ||
213 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
214 | sg_init_table(sg_in, 1); | ||
215 | sg_init_table(sg_out, 2); | 302 | sg_init_table(sg_out, 2); |
216 | sg_set_buf(sg_in, src, src_len); | ||
217 | sg_set_buf(&sg_out[0], dst, *dst_len); | 303 | sg_set_buf(&sg_out[0], dst, *dst_len); |
218 | sg_set_buf(&sg_out[1], pad, sizeof(pad)); | 304 | sg_set_buf(&sg_out[1], pad, sizeof(pad)); |
305 | ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); | ||
306 | if (ret) | ||
307 | goto out_tfm; | ||
219 | 308 | ||
309 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
220 | iv = crypto_blkcipher_crt(tfm)->iv; | 310 | iv = crypto_blkcipher_crt(tfm)->iv; |
221 | ivsize = crypto_blkcipher_ivsize(tfm); | 311 | ivsize = crypto_blkcipher_ivsize(tfm); |
222 | |||
223 | memcpy(iv, aes_iv, ivsize); | 312 | memcpy(iv, aes_iv, ivsize); |
224 | 313 | ||
225 | /* | 314 | /* |
@@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
228 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, | 317 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
229 | src, src_len, 1); | 318 | src, src_len, 1); |
230 | */ | 319 | */ |
231 | 320 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); | |
232 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); | ||
233 | crypto_free_blkcipher(tfm); | ||
234 | if (ret < 0) { | 321 | if (ret < 0) { |
235 | pr_err("ceph_aes_decrypt failed %d\n", ret); | 322 | pr_err("ceph_aes_decrypt failed %d\n", ret); |
236 | return ret; | 323 | goto out_sg; |
237 | } | 324 | } |
238 | 325 | ||
239 | if (src_len <= *dst_len) | 326 | if (src_len <= *dst_len) |
@@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
251 | print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, | 338 | print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, |
252 | dst, *dst_len, 1); | 339 | dst, *dst_len, 1); |
253 | */ | 340 | */ |
254 | return 0; | 341 | |
342 | out_sg: | ||
343 | teardown_sgtable(&sg_in); | ||
344 | out_tfm: | ||
345 | crypto_free_blkcipher(tfm); | ||
346 | return ret; | ||
255 | } | 347 | } |
256 | 348 | ||
257 | static int ceph_aes_decrypt2(const void *key, int key_len, | 349 | static int ceph_aes_decrypt2(const void *key, int key_len, |
@@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
259 | void *dst2, size_t *dst2_len, | 351 | void *dst2, size_t *dst2_len, |
260 | const void *src, size_t src_len) | 352 | const void *src, size_t src_len) |
261 | { | 353 | { |
262 | struct scatterlist sg_in[1], sg_out[3]; | 354 | struct sg_table sg_in; |
355 | struct scatterlist sg_out[3], prealloc_sg; | ||
263 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 356 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
264 | struct blkcipher_desc desc = { .tfm = tfm }; | 357 | struct blkcipher_desc desc = { .tfm = tfm }; |
265 | char pad[16]; | 358 | char pad[16]; |
@@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
271 | if (IS_ERR(tfm)) | 364 | if (IS_ERR(tfm)) |
272 | return PTR_ERR(tfm); | 365 | return PTR_ERR(tfm); |
273 | 366 | ||
274 | sg_init_table(sg_in, 1); | ||
275 | sg_set_buf(sg_in, src, src_len); | ||
276 | sg_init_table(sg_out, 3); | 367 | sg_init_table(sg_out, 3); |
277 | sg_set_buf(&sg_out[0], dst1, *dst1_len); | 368 | sg_set_buf(&sg_out[0], dst1, *dst1_len); |
278 | sg_set_buf(&sg_out[1], dst2, *dst2_len); | 369 | sg_set_buf(&sg_out[1], dst2, *dst2_len); |
279 | sg_set_buf(&sg_out[2], pad, sizeof(pad)); | 370 | sg_set_buf(&sg_out[2], pad, sizeof(pad)); |
371 | ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); | ||
372 | if (ret) | ||
373 | goto out_tfm; | ||
280 | 374 | ||
281 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | 375 | crypto_blkcipher_setkey((void *)tfm, key, key_len); |
282 | iv = crypto_blkcipher_crt(tfm)->iv; | 376 | iv = crypto_blkcipher_crt(tfm)->iv; |
283 | ivsize = crypto_blkcipher_ivsize(tfm); | 377 | ivsize = crypto_blkcipher_ivsize(tfm); |
284 | |||
285 | memcpy(iv, aes_iv, ivsize); | 378 | memcpy(iv, aes_iv, ivsize); |
286 | 379 | ||
287 | /* | 380 | /* |
@@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
290 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, | 383 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
291 | src, src_len, 1); | 384 | src, src_len, 1); |
292 | */ | 385 | */ |
293 | 386 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); | |
294 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); | ||
295 | crypto_free_blkcipher(tfm); | ||
296 | if (ret < 0) { | 387 | if (ret < 0) { |
297 | pr_err("ceph_aes_decrypt failed %d\n", ret); | 388 | pr_err("ceph_aes_decrypt failed %d\n", ret); |
298 | return ret; | 389 | goto out_sg; |
299 | } | 390 | } |
300 | 391 | ||
301 | if (src_len <= *dst1_len) | 392 | if (src_len <= *dst1_len) |
@@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
325 | dst2, *dst2_len, 1); | 416 | dst2, *dst2_len, 1); |
326 | */ | 417 | */ |
327 | 418 | ||
328 | return 0; | 419 | out_sg: |
420 | teardown_sgtable(&sg_in); | ||
421 | out_tfm: | ||
422 | crypto_free_blkcipher(tfm); | ||
423 | return ret; | ||
329 | } | 424 | } |
330 | 425 | ||
331 | 426 | ||
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f3fc54eac09d..6f164289bde8 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd) | |||
1007 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | 1007 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
1008 | { | 1008 | { |
1009 | dout("__remove_osd %p\n", osd); | 1009 | dout("__remove_osd %p\n", osd); |
1010 | BUG_ON(!list_empty(&osd->o_requests)); | 1010 | WARN_ON(!list_empty(&osd->o_requests)); |
1011 | BUG_ON(!list_empty(&osd->o_linger_requests)); | 1011 | WARN_ON(!list_empty(&osd->o_linger_requests)); |
1012 | 1012 | ||
1013 | rb_erase(&osd->o_node, &osdc->osds); | 1013 | rb_erase(&osd->o_node, &osdc->osds); |
1014 | list_del_init(&osd->o_osd_lru); | 1014 | list_del_init(&osd->o_osd_lru); |
@@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, | |||
1254 | if (list_empty(&req->r_osd_item)) | 1254 | if (list_empty(&req->r_osd_item)) |
1255 | req->r_osd = NULL; | 1255 | req->r_osd = NULL; |
1256 | } | 1256 | } |
1257 | |||
1258 | list_del_init(&req->r_req_lru_item); /* can be on notarget */ | ||
1257 | ceph_osdc_put_request(req); | 1259 | ceph_osdc_put_request(req); |
1258 | } | 1260 | } |
1259 | 1261 | ||
@@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc, | |||
1395 | if (req->r_osd) { | 1397 | if (req->r_osd) { |
1396 | __cancel_request(req); | 1398 | __cancel_request(req); |
1397 | list_del_init(&req->r_osd_item); | 1399 | list_del_init(&req->r_osd_item); |
1400 | list_del_init(&req->r_linger_osd_item); | ||
1398 | req->r_osd = NULL; | 1401 | req->r_osd = NULL; |
1399 | } | 1402 | } |
1400 | 1403 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a6882686ca3a..b9b7dfaf202b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2685,13 +2685,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
2685 | int idx = 0; | 2685 | int idx = 0; |
2686 | u32 portid = NETLINK_CB(cb->skb).portid; | 2686 | u32 portid = NETLINK_CB(cb->skb).portid; |
2687 | u32 seq = cb->nlh->nlmsg_seq; | 2687 | u32 seq = cb->nlh->nlmsg_seq; |
2688 | struct nlattr *extfilt; | ||
2689 | u32 filter_mask = 0; | 2688 | u32 filter_mask = 0; |
2690 | 2689 | ||
2691 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), | 2690 | if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { |
2692 | IFLA_EXT_MASK); | 2691 | struct nlattr *extfilt; |
2693 | if (extfilt) | 2692 | |
2694 | filter_mask = nla_get_u32(extfilt); | 2693 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), |
2694 | IFLA_EXT_MASK); | ||
2695 | if (extfilt) { | ||
2696 | if (nla_len(extfilt) < sizeof(filter_mask)) | ||
2697 | return -EINVAL; | ||
2698 | |||
2699 | filter_mask = nla_get_u32(extfilt); | ||
2700 | } | ||
2701 | } | ||
2695 | 2702 | ||
2696 | rcu_read_lock(); | 2703 | rcu_read_lock(); |
2697 | for_each_netdev_rcu(net, dev) { | 2704 | for_each_netdev_rcu(net, dev) { |
@@ -2798,6 +2805,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2798 | if (br_spec) { | 2805 | if (br_spec) { |
2799 | nla_for_each_nested(attr, br_spec, rem) { | 2806 | nla_for_each_nested(attr, br_spec, rem) { |
2800 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { | 2807 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { |
2808 | if (nla_len(attr) < sizeof(flags)) | ||
2809 | return -EINVAL; | ||
2810 | |||
2801 | have_flags = true; | 2811 | have_flags = true; |
2802 | flags = nla_get_u16(attr); | 2812 | flags = nla_get_u16(attr); |
2803 | break; | 2813 | break; |
@@ -2868,6 +2878,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2868 | if (br_spec) { | 2878 | if (br_spec) { |
2869 | nla_for_each_nested(attr, br_spec, rem) { | 2879 | nla_for_each_nested(attr, br_spec, rem) { |
2870 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { | 2880 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { |
2881 | if (nla_len(attr) < sizeof(flags)) | ||
2882 | return -EINVAL; | ||
2883 | |||
2871 | have_flags = true; | 2884 | have_flags = true; |
2872 | flags = nla_get_u16(attr); | 2885 | flags = nla_get_u16(attr); |
2873 | break; | 2886 | break; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c16615bfb61e..32e31c299631 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
552 | case SKB_FCLONE_CLONE: | 552 | case SKB_FCLONE_CLONE: |
553 | fclones = container_of(skb, struct sk_buff_fclones, skb2); | 553 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
554 | 554 | ||
555 | /* Warning : We must perform the atomic_dec_and_test() before | 555 | /* The clone portion is available for |
556 | * setting skb->fclone back to SKB_FCLONE_FREE, otherwise | 556 | * fast-cloning again. |
557 | * skb_clone() could set clone_ref to 2 before our decrement. | ||
558 | * Anyway, if we are going to free the structure, no need to | ||
559 | * rewrite skb->fclone. | ||
560 | */ | 557 | */ |
561 | if (atomic_dec_and_test(&fclones->fclone_ref)) { | 558 | skb->fclone = SKB_FCLONE_FREE; |
559 | |||
560 | if (atomic_dec_and_test(&fclones->fclone_ref)) | ||
562 | kmem_cache_free(skbuff_fclone_cache, fclones); | 561 | kmem_cache_free(skbuff_fclone_cache, fclones); |
563 | } else { | ||
564 | /* The clone portion is available for | ||
565 | * fast-cloning again. | ||
566 | */ | ||
567 | skb->fclone = SKB_FCLONE_FREE; | ||
568 | } | ||
569 | break; | 562 | break; |
570 | } | 563 | } |
571 | } | 564 | } |
@@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
887 | if (skb->fclone == SKB_FCLONE_ORIG && | 880 | if (skb->fclone == SKB_FCLONE_ORIG && |
888 | n->fclone == SKB_FCLONE_FREE) { | 881 | n->fclone == SKB_FCLONE_FREE) { |
889 | n->fclone = SKB_FCLONE_CLONE; | 882 | n->fclone = SKB_FCLONE_CLONE; |
890 | /* As our fastclone was free, clone_ref must be 1 at this point. | 883 | atomic_inc(&fclones->fclone_ref); |
891 | * We could use atomic_inc() here, but it is faster | ||
892 | * to set the final value. | ||
893 | */ | ||
894 | atomic_set(&fclones->fclone_ref, 2); | ||
895 | } else { | 884 | } else { |
896 | if (skb_pfmemalloc(skb)) | 885 | if (skb_pfmemalloc(skb)) |
897 | gfp_mask |= __GFP_MEMALLOC; | 886 | gfp_mask |= __GFP_MEMALLOC; |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index ca11d283bbeb..93ea80196f0e 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -1080,13 +1080,13 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1080 | if (!app) | 1080 | if (!app) |
1081 | return -EMSGSIZE; | 1081 | return -EMSGSIZE; |
1082 | 1082 | ||
1083 | spin_lock(&dcb_lock); | 1083 | spin_lock_bh(&dcb_lock); |
1084 | list_for_each_entry(itr, &dcb_app_list, list) { | 1084 | list_for_each_entry(itr, &dcb_app_list, list) { |
1085 | if (itr->ifindex == netdev->ifindex) { | 1085 | if (itr->ifindex == netdev->ifindex) { |
1086 | err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), | 1086 | err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), |
1087 | &itr->app); | 1087 | &itr->app); |
1088 | if (err) { | 1088 | if (err) { |
1089 | spin_unlock(&dcb_lock); | 1089 | spin_unlock_bh(&dcb_lock); |
1090 | return -EMSGSIZE; | 1090 | return -EMSGSIZE; |
1091 | } | 1091 | } |
1092 | } | 1092 | } |
@@ -1097,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1097 | else | 1097 | else |
1098 | dcbx = -EOPNOTSUPP; | 1098 | dcbx = -EOPNOTSUPP; |
1099 | 1099 | ||
1100 | spin_unlock(&dcb_lock); | 1100 | spin_unlock_bh(&dcb_lock); |
1101 | nla_nest_end(skb, app); | 1101 | nla_nest_end(skb, app); |
1102 | 1102 | ||
1103 | /* get peer info if available */ | 1103 | /* get peer info if available */ |
@@ -1234,7 +1234,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | /* local app */ | 1236 | /* local app */ |
1237 | spin_lock(&dcb_lock); | 1237 | spin_lock_bh(&dcb_lock); |
1238 | app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); | 1238 | app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); |
1239 | if (!app) | 1239 | if (!app) |
1240 | goto dcb_unlock; | 1240 | goto dcb_unlock; |
@@ -1271,7 +1271,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1271 | else | 1271 | else |
1272 | dcbx = -EOPNOTSUPP; | 1272 | dcbx = -EOPNOTSUPP; |
1273 | 1273 | ||
1274 | spin_unlock(&dcb_lock); | 1274 | spin_unlock_bh(&dcb_lock); |
1275 | 1275 | ||
1276 | /* features flags */ | 1276 | /* features flags */ |
1277 | if (ops->getfeatcfg) { | 1277 | if (ops->getfeatcfg) { |
@@ -1326,7 +1326,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1326 | return 0; | 1326 | return 0; |
1327 | 1327 | ||
1328 | dcb_unlock: | 1328 | dcb_unlock: |
1329 | spin_unlock(&dcb_lock); | 1329 | spin_unlock_bh(&dcb_lock); |
1330 | nla_put_failure: | 1330 | nla_put_failure: |
1331 | return err; | 1331 | return err; |
1332 | } | 1332 | } |
@@ -1762,10 +1762,10 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) | |||
1762 | struct dcb_app_type *itr; | 1762 | struct dcb_app_type *itr; |
1763 | u8 prio = 0; | 1763 | u8 prio = 0; |
1764 | 1764 | ||
1765 | spin_lock(&dcb_lock); | 1765 | spin_lock_bh(&dcb_lock); |
1766 | if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) | 1766 | if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) |
1767 | prio = itr->app.priority; | 1767 | prio = itr->app.priority; |
1768 | spin_unlock(&dcb_lock); | 1768 | spin_unlock_bh(&dcb_lock); |
1769 | 1769 | ||
1770 | return prio; | 1770 | return prio; |
1771 | } | 1771 | } |
@@ -1789,7 +1789,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) | |||
1789 | if (dev->dcbnl_ops->getdcbx) | 1789 | if (dev->dcbnl_ops->getdcbx) |
1790 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); | 1790 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); |
1791 | 1791 | ||
1792 | spin_lock(&dcb_lock); | 1792 | spin_lock_bh(&dcb_lock); |
1793 | /* Search for existing match and replace */ | 1793 | /* Search for existing match and replace */ |
1794 | if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { | 1794 | if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { |
1795 | if (new->priority) | 1795 | if (new->priority) |
@@ -1804,7 +1804,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) | |||
1804 | if (new->priority) | 1804 | if (new->priority) |
1805 | err = dcb_app_add(new, dev->ifindex); | 1805 | err = dcb_app_add(new, dev->ifindex); |
1806 | out: | 1806 | out: |
1807 | spin_unlock(&dcb_lock); | 1807 | spin_unlock_bh(&dcb_lock); |
1808 | if (!err) | 1808 | if (!err) |
1809 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); | 1809 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
1810 | return err; | 1810 | return err; |
@@ -1823,10 +1823,10 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) | |||
1823 | struct dcb_app_type *itr; | 1823 | struct dcb_app_type *itr; |
1824 | u8 prio = 0; | 1824 | u8 prio = 0; |
1825 | 1825 | ||
1826 | spin_lock(&dcb_lock); | 1826 | spin_lock_bh(&dcb_lock); |
1827 | if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) | 1827 | if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) |
1828 | prio |= 1 << itr->app.priority; | 1828 | prio |= 1 << itr->app.priority; |
1829 | spin_unlock(&dcb_lock); | 1829 | spin_unlock_bh(&dcb_lock); |
1830 | 1830 | ||
1831 | return prio; | 1831 | return prio; |
1832 | } | 1832 | } |
@@ -1850,7 +1850,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) | |||
1850 | if (dev->dcbnl_ops->getdcbx) | 1850 | if (dev->dcbnl_ops->getdcbx) |
1851 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); | 1851 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); |
1852 | 1852 | ||
1853 | spin_lock(&dcb_lock); | 1853 | spin_lock_bh(&dcb_lock); |
1854 | /* Search for existing match and abort if found */ | 1854 | /* Search for existing match and abort if found */ |
1855 | if (dcb_app_lookup(new, dev->ifindex, new->priority)) { | 1855 | if (dcb_app_lookup(new, dev->ifindex, new->priority)) { |
1856 | err = -EEXIST; | 1856 | err = -EEXIST; |
@@ -1859,7 +1859,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) | |||
1859 | 1859 | ||
1860 | err = dcb_app_add(new, dev->ifindex); | 1860 | err = dcb_app_add(new, dev->ifindex); |
1861 | out: | 1861 | out: |
1862 | spin_unlock(&dcb_lock); | 1862 | spin_unlock_bh(&dcb_lock); |
1863 | if (!err) | 1863 | if (!err) |
1864 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); | 1864 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
1865 | return err; | 1865 | return err; |
@@ -1882,7 +1882,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) | |||
1882 | if (dev->dcbnl_ops->getdcbx) | 1882 | if (dev->dcbnl_ops->getdcbx) |
1883 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); | 1883 | event.dcbx = dev->dcbnl_ops->getdcbx(dev); |
1884 | 1884 | ||
1885 | spin_lock(&dcb_lock); | 1885 | spin_lock_bh(&dcb_lock); |
1886 | /* Search for existing match and remove it. */ | 1886 | /* Search for existing match and remove it. */ |
1887 | if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { | 1887 | if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { |
1888 | list_del(&itr->list); | 1888 | list_del(&itr->list); |
@@ -1890,7 +1890,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) | |||
1890 | err = 0; | 1890 | err = 0; |
1891 | } | 1891 | } |
1892 | 1892 | ||
1893 | spin_unlock(&dcb_lock); | 1893 | spin_unlock_bh(&dcb_lock); |
1894 | if (!err) | 1894 | if (!err) |
1895 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); | 1895 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
1896 | return err; | 1896 | return err; |
@@ -1902,12 +1902,12 @@ static void dcb_flushapp(void) | |||
1902 | struct dcb_app_type *app; | 1902 | struct dcb_app_type *app; |
1903 | struct dcb_app_type *tmp; | 1903 | struct dcb_app_type *tmp; |
1904 | 1904 | ||
1905 | spin_lock(&dcb_lock); | 1905 | spin_lock_bh(&dcb_lock); |
1906 | list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { | 1906 | list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { |
1907 | list_del(&app->list); | 1907 | list_del(&app->list); |
1908 | kfree(app); | 1908 | kfree(app); |
1909 | } | 1909 | } |
1910 | spin_unlock(&dcb_lock); | 1910 | spin_unlock_bh(&dcb_lock); |
1911 | } | 1911 | } |
1912 | 1912 | ||
1913 | static int __init dcbnl_init(void) | 1913 | static int __init dcbnl_init(void) |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6d1817449c36..ab03e00ffe8f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p, | |||
489 | /* We could not connect to a designated PHY, so use the switch internal | 489 | /* We could not connect to a designated PHY, so use the switch internal |
490 | * MDIO bus instead | 490 | * MDIO bus instead |
491 | */ | 491 | */ |
492 | if (!p->phy) | 492 | if (!p->phy) { |
493 | p->phy = ds->slave_mii_bus->phy_map[p->port]; | 493 | p->phy = ds->slave_mii_bus->phy_map[p->port]; |
494 | else | 494 | phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, |
495 | p->phy_interface); | ||
496 | } else { | ||
495 | pr_info("attached PHY at address %d [%s]\n", | 497 | pr_info("attached PHY at address %d [%s]\n", |
496 | p->phy->addr, p->phy->drv->name); | 498 | p->phy->addr, p->phy->drv->name); |
499 | } | ||
497 | } | 500 | } |
498 | 501 | ||
499 | int dsa_slave_suspend(struct net_device *slave_dev) | 502 | int dsa_slave_suspend(struct net_device *slave_dev) |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8b7fe5b03906..e67da4e6c324 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1386,6 +1386,17 @@ out: | |||
1386 | return pp; | 1386 | return pp; |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | ||
1390 | { | ||
1391 | if (sk->sk_family == AF_INET) | ||
1392 | return ip_recv_error(sk, msg, len, addr_len); | ||
1393 | #if IS_ENABLED(CONFIG_IPV6) | ||
1394 | if (sk->sk_family == AF_INET6) | ||
1395 | return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len); | ||
1396 | #endif | ||
1397 | return -EINVAL; | ||
1398 | } | ||
1399 | |||
1389 | static int inet_gro_complete(struct sk_buff *skb, int nhoff) | 1400 | static int inet_gro_complete(struct sk_buff *skb, int nhoff) |
1390 | { | 1401 | { |
1391 | __be16 newlen = htons(skb->len - nhoff); | 1402 | __be16 newlen = htons(skb->len - nhoff); |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index f2e15738534d..8f7bd56955b0 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) | |||
62 | else | 62 | else |
63 | res->tclassid = 0; | 63 | res->tclassid = 0; |
64 | #endif | 64 | #endif |
65 | |||
66 | if (err == -ESRCH) | ||
67 | err = -ENETUNREACH; | ||
68 | |||
65 | return err; | 69 | return err; |
66 | } | 70 | } |
67 | EXPORT_SYMBOL_GPL(__fib_lookup); | 71 | EXPORT_SYMBOL_GPL(__fib_lookup); |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 32e78924e246..606c520ffd5a 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff) | |||
133 | int err = -ENOSYS; | 133 | int err = -ENOSYS; |
134 | const struct net_offload **offloads; | 134 | const struct net_offload **offloads; |
135 | 135 | ||
136 | udp_tunnel_gro_complete(skb, nhoff); | ||
137 | |||
136 | rcu_read_lock(); | 138 | rcu_read_lock(); |
137 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; | 139 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; |
138 | ops = rcu_dereference(offloads[proto]); | 140 | ops = rcu_dereference(offloads[proto]); |
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 065cd94c640c..dedb21e99914 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c | |||
@@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, | |||
144 | gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); | 144 | gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); |
145 | geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); | 145 | geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); |
146 | 146 | ||
147 | skb_set_inner_protocol(skb, htons(ETH_P_TEB)); | ||
148 | |||
147 | return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, | 149 | return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, |
148 | tos, ttl, df, src_port, dst_port, xnet); | 150 | tos, ttl, df, src_port, dst_port, xnet); |
149 | } | 151 | } |
@@ -364,6 +366,7 @@ late_initcall(geneve_init_module); | |||
364 | static void __exit geneve_cleanup_module(void) | 366 | static void __exit geneve_cleanup_module(void) |
365 | { | 367 | { |
366 | destroy_workqueue(geneve_wq); | 368 | destroy_workqueue(geneve_wq); |
369 | unregister_pernet_subsys(&geneve_net_ops); | ||
367 | } | 370 | } |
368 | module_exit(geneve_cleanup_module); | 371 | module_exit(geneve_cleanup_module); |
369 | 372 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index fb70e3ecc3e4..bb15d0e03d4f 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) | |||
318 | return scount; | 318 | return scount; |
319 | } | 319 | } |
320 | 320 | ||
321 | #define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb)) | 321 | static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) |
322 | |||
323 | static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | ||
324 | { | 322 | { |
325 | struct sk_buff *skb; | 323 | struct sk_buff *skb; |
326 | struct rtable *rt; | 324 | struct rtable *rt; |
@@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
330 | struct flowi4 fl4; | 328 | struct flowi4 fl4; |
331 | int hlen = LL_RESERVED_SPACE(dev); | 329 | int hlen = LL_RESERVED_SPACE(dev); |
332 | int tlen = dev->needed_tailroom; | 330 | int tlen = dev->needed_tailroom; |
331 | unsigned int size = mtu; | ||
333 | 332 | ||
334 | while (1) { | 333 | while (1) { |
335 | skb = alloc_skb(size + hlen + tlen, | 334 | skb = alloc_skb(size + hlen + tlen, |
@@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
341 | return NULL; | 340 | return NULL; |
342 | } | 341 | } |
343 | skb->priority = TC_PRIO_CONTROL; | 342 | skb->priority = TC_PRIO_CONTROL; |
344 | igmp_skb_size(skb) = size; | ||
345 | 343 | ||
346 | rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, | 344 | rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, |
347 | 0, 0, | 345 | 0, 0, |
@@ -354,6 +352,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
354 | skb_dst_set(skb, &rt->dst); | 352 | skb_dst_set(skb, &rt->dst); |
355 | skb->dev = dev; | 353 | skb->dev = dev; |
356 | 354 | ||
355 | skb->reserved_tailroom = skb_end_offset(skb) - | ||
356 | min(mtu, skb_end_offset(skb)); | ||
357 | skb_reserve(skb, hlen); | 357 | skb_reserve(skb, hlen); |
358 | 358 | ||
359 | skb_reset_network_header(skb); | 359 | skb_reset_network_header(skb); |
@@ -423,8 +423,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, | |||
423 | return skb; | 423 | return skb; |
424 | } | 424 | } |
425 | 425 | ||
426 | #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \ | 426 | #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) |
427 | skb_tailroom(skb)) : 0) | ||
428 | 427 | ||
429 | static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, | 428 | static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, |
430 | int type, int gdeleted, int sdeleted) | 429 | int type, int gdeleted, int sdeleted) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index c373a9ad4555..9daf2177dc00 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, | |||
195 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | 195 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { |
196 | if (!CMSG_OK(msg, cmsg)) | 196 | if (!CMSG_OK(msg, cmsg)) |
197 | return -EINVAL; | 197 | return -EINVAL; |
198 | #if defined(CONFIG_IPV6) | 198 | #if IS_ENABLED(CONFIG_IPV6) |
199 | if (allow_ipv6 && | 199 | if (allow_ipv6 && |
200 | cmsg->cmsg_level == SOL_IPV6 && | 200 | cmsg->cmsg_level == SOL_IPV6 && |
201 | cmsg->cmsg_type == IPV6_PKTINFO) { | 201 | cmsg->cmsg_type == IPV6_PKTINFO) { |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 3e861011e4a3..1a7e979e80ba 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { | |||
528 | .validate = vti_tunnel_validate, | 528 | .validate = vti_tunnel_validate, |
529 | .newlink = vti_newlink, | 529 | .newlink = vti_newlink, |
530 | .changelink = vti_changelink, | 530 | .changelink = vti_changelink, |
531 | .dellink = ip_tunnel_dellink, | ||
531 | .get_size = vti_get_size, | 532 | .get_size = vti_get_size, |
532 | .fill_info = vti_fill_info, | 533 | .fill_info = vti_fill_info, |
533 | }; | 534 | }; |
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index c1023c445920..665de06561cd 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c | |||
@@ -24,6 +24,7 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr, | |||
24 | struct nf_nat_range range; | 24 | struct nf_nat_range range; |
25 | unsigned int verdict; | 25 | unsigned int verdict; |
26 | 26 | ||
27 | memset(&range, 0, sizeof(range)); | ||
27 | range.flags = priv->flags; | 28 | range.flags = priv->flags; |
28 | 29 | ||
29 | verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, | 30 | verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 57f7c9804139..5d740cccf69e 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) | |||
217 | &ipv6_hdr(skb)->daddr)) | 217 | &ipv6_hdr(skb)->daddr)) |
218 | continue; | 218 | continue; |
219 | #endif | 219 | #endif |
220 | } else { | ||
221 | continue; | ||
220 | } | 222 | } |
221 | 223 | ||
222 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) | 224 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) |
@@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
853 | if (flags & MSG_OOB) | 855 | if (flags & MSG_OOB) |
854 | goto out; | 856 | goto out; |
855 | 857 | ||
856 | if (flags & MSG_ERRQUEUE) { | 858 | if (flags & MSG_ERRQUEUE) |
857 | if (family == AF_INET) { | 859 | return inet_recv_error(sk, msg, len, addr_len); |
858 | return ip_recv_error(sk, msg, len, addr_len); | ||
859 | #if IS_ENABLED(CONFIG_IPV6) | ||
860 | } else if (family == AF_INET6) { | ||
861 | return pingv6_ops.ipv6_recv_error(sk, msg, len, | ||
862 | addr_len); | ||
863 | #endif | ||
864 | } | ||
865 | } | ||
866 | 860 | ||
867 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 861 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
868 | if (!skb) | 862 | if (!skb) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 39ec0c379545..38c2bcb8dd5d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1598 | u32 urg_hole = 0; | 1598 | u32 urg_hole = 0; |
1599 | 1599 | ||
1600 | if (unlikely(flags & MSG_ERRQUEUE)) | 1600 | if (unlikely(flags & MSG_ERRQUEUE)) |
1601 | return ip_recv_error(sk, msg, len, addr_len); | 1601 | return inet_recv_error(sk, msg, len, addr_len); |
1602 | 1602 | ||
1603 | if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && | 1603 | if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && |
1604 | (sk->sk_state == TCP_ESTABLISHED)) | 1604 | (sk->sk_state == TCP_ESTABLISHED)) |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a12b455928e5..d107ee246a1d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) | |||
2315 | 2315 | ||
2316 | /* Undo procedures. */ | 2316 | /* Undo procedures. */ |
2317 | 2317 | ||
2318 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
2319 | * window. It would seem that it is trivially available for us in | ||
2320 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
2321 | * what will happen if errors occur when sending retransmission for the | ||
2322 | * second time. ...It could the that such segment has only | ||
2323 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
2324 | * the head skb is enough except for some reneging corner cases that | ||
2325 | * are not worth the effort. | ||
2326 | * | ||
2327 | * Main reason for all this complexity is the fact that connection dying | ||
2328 | * time now depends on the validity of the retrans_stamp, in particular, | ||
2329 | * that successive retransmissions of a segment must not advance | ||
2330 | * retrans_stamp under any conditions. | ||
2331 | */ | ||
2332 | static bool tcp_any_retrans_done(const struct sock *sk) | ||
2333 | { | ||
2334 | const struct tcp_sock *tp = tcp_sk(sk); | ||
2335 | struct sk_buff *skb; | ||
2336 | |||
2337 | if (tp->retrans_out) | ||
2338 | return true; | ||
2339 | |||
2340 | skb = tcp_write_queue_head(sk); | ||
2341 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
2342 | return true; | ||
2343 | |||
2344 | return false; | ||
2345 | } | ||
2346 | |||
2318 | #if FASTRETRANS_DEBUG > 1 | 2347 | #if FASTRETRANS_DEBUG > 1 |
2319 | static void DBGUNDO(struct sock *sk, const char *msg) | 2348 | static void DBGUNDO(struct sock *sk, const char *msg) |
2320 | { | 2349 | { |
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) | |||
2410 | * is ACKed. For Reno it is MUST to prevent false | 2439 | * is ACKed. For Reno it is MUST to prevent false |
2411 | * fast retransmits (RFC2582). SACK TCP is safe. */ | 2440 | * fast retransmits (RFC2582). SACK TCP is safe. */ |
2412 | tcp_moderate_cwnd(tp); | 2441 | tcp_moderate_cwnd(tp); |
2442 | if (!tcp_any_retrans_done(sk)) | ||
2443 | tp->retrans_stamp = 0; | ||
2413 | return true; | 2444 | return true; |
2414 | } | 2445 | } |
2415 | tcp_set_ca_state(sk, TCP_CA_Open); | 2446 | tcp_set_ca_state(sk, TCP_CA_Open); |
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk) | |||
2430 | return false; | 2461 | return false; |
2431 | } | 2462 | } |
2432 | 2463 | ||
2433 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
2434 | * window. It would seem that it is trivially available for us in | ||
2435 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
2436 | * what will happen if errors occur when sending retransmission for the | ||
2437 | * second time. ...It could the that such segment has only | ||
2438 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
2439 | * the head skb is enough except for some reneging corner cases that | ||
2440 | * are not worth the effort. | ||
2441 | * | ||
2442 | * Main reason for all this complexity is the fact that connection dying | ||
2443 | * time now depends on the validity of the retrans_stamp, in particular, | ||
2444 | * that successive retransmissions of a segment must not advance | ||
2445 | * retrans_stamp under any conditions. | ||
2446 | */ | ||
2447 | static bool tcp_any_retrans_done(const struct sock *sk) | ||
2448 | { | ||
2449 | const struct tcp_sock *tp = tcp_sk(sk); | ||
2450 | struct sk_buff *skb; | ||
2451 | |||
2452 | if (tp->retrans_out) | ||
2453 | return true; | ||
2454 | |||
2455 | skb = tcp_write_queue_head(sk); | ||
2456 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
2457 | return true; | ||
2458 | |||
2459 | return false; | ||
2460 | } | ||
2461 | |||
2462 | /* Undo during loss recovery after partial ACK or using F-RTO. */ | 2464 | /* Undo during loss recovery after partial ACK or using F-RTO. */ |
2463 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) | 2465 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) |
2464 | { | 2466 | { |
@@ -5229,7 +5231,7 @@ slow_path: | |||
5229 | if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) | 5231 | if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) |
5230 | goto csum_error; | 5232 | goto csum_error; |
5231 | 5233 | ||
5232 | if (!th->ack && !th->rst) | 5234 | if (!th->ack && !th->rst && !th->syn) |
5233 | goto discard; | 5235 | goto discard; |
5234 | 5236 | ||
5235 | /* | 5237 | /* |
@@ -5648,7 +5650,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5648 | goto discard; | 5650 | goto discard; |
5649 | } | 5651 | } |
5650 | 5652 | ||
5651 | if (!th->ack && !th->rst) | 5653 | if (!th->ack && !th->rst && !th->syn) |
5652 | goto discard; | 5654 | goto discard; |
5653 | 5655 | ||
5654 | if (!tcp_validate_incoming(sk, skb, th, 0)) | 5656 | if (!tcp_validate_incoming(sk, skb, th, 0)) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9c7d7621466b..147be2024290 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
598 | if (th->rst) | 598 | if (th->rst) |
599 | return; | 599 | return; |
600 | 600 | ||
601 | if (skb_rtable(skb)->rt_type != RTN_LOCAL) | 601 | /* If sk not NULL, it means we did a successful lookup and incoming |
602 | * route had to be correct. prequeue might have dropped our dst. | ||
603 | */ | ||
604 | if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) | ||
602 | return; | 605 | return; |
603 | 606 | ||
604 | /* Swap the send and the receive. */ | 607 | /* Swap the send and the receive. */ |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 12c3c8ef3849..0e32d2e1bdbf 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb) | |||
502 | 502 | ||
503 | skb->protocol = gre_proto; | 503 | skb->protocol = gre_proto; |
504 | /* WCCP version 1 and 2 protocol decoding. | 504 | /* WCCP version 1 and 2 protocol decoding. |
505 | * - Change protocol to IP | 505 | * - Change protocol to IPv6 |
506 | * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header | 506 | * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header |
507 | */ | 507 | */ |
508 | if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { | 508 | if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { |
509 | skb->protocol = htons(ETH_P_IP); | 509 | skb->protocol = htons(ETH_P_IPV6); |
510 | if ((*(h + offset) & 0xF0) != 0x40) | 510 | if ((*(h + offset) & 0xF0) != 0x40) |
511 | offset += 4; | 511 | offset += 4; |
512 | } | 512 | } |
@@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
961 | else | 961 | else |
962 | dev->flags &= ~IFF_POINTOPOINT; | 962 | dev->flags &= ~IFF_POINTOPOINT; |
963 | 963 | ||
964 | dev->iflink = p->link; | ||
965 | |||
966 | /* Precalculate GRE options length */ | 964 | /* Precalculate GRE options length */ |
967 | if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { | 965 | if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { |
968 | if (t->parms.o_flags&GRE_CSUM) | 966 | if (t->parms.o_flags&GRE_CSUM) |
@@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev) | |||
1272 | u64_stats_init(&ip6gre_tunnel_stats->syncp); | 1270 | u64_stats_init(&ip6gre_tunnel_stats->syncp); |
1273 | } | 1271 | } |
1274 | 1272 | ||
1273 | dev->iflink = tunnel->parms.link; | ||
1275 | 1274 | ||
1276 | return 0; | 1275 | return 0; |
1277 | } | 1276 | } |
@@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev) | |||
1481 | if (!dev->tstats) | 1480 | if (!dev->tstats) |
1482 | return -ENOMEM; | 1481 | return -ENOMEM; |
1483 | 1482 | ||
1483 | dev->iflink = tunnel->parms.link; | ||
1484 | |||
1484 | return 0; | 1485 | return 0; |
1485 | } | 1486 | } |
1486 | 1487 | ||
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index a071563a7e6e..01e12d0d8fcc 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
69 | int nhoff; | 69 | int nhoff; |
70 | 70 | ||
71 | if (unlikely(skb_shinfo(skb)->gso_type & | 71 | if (unlikely(skb_shinfo(skb)->gso_type & |
72 | ~(SKB_GSO_UDP | | 72 | ~(SKB_GSO_TCPV4 | |
73 | SKB_GSO_UDP | | ||
73 | SKB_GSO_DODGY | | 74 | SKB_GSO_DODGY | |
74 | SKB_GSO_TCP_ECN | | 75 | SKB_GSO_TCP_ECN | |
75 | SKB_GSO_GRE | | 76 | SKB_GSO_GRE | |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9409887fb664..9cb94cfa0ae7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev) | |||
272 | int err; | 272 | int err; |
273 | 273 | ||
274 | t = netdev_priv(dev); | 274 | t = netdev_priv(dev); |
275 | err = ip6_tnl_dev_init(dev); | ||
276 | if (err < 0) | ||
277 | goto out; | ||
278 | 275 | ||
279 | err = register_netdevice(dev); | 276 | err = register_netdevice(dev); |
280 | if (err < 0) | 277 | if (err < 0) |
@@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) | |||
1462 | 1459 | ||
1463 | 1460 | ||
1464 | static const struct net_device_ops ip6_tnl_netdev_ops = { | 1461 | static const struct net_device_ops ip6_tnl_netdev_ops = { |
1462 | .ndo_init = ip6_tnl_dev_init, | ||
1465 | .ndo_uninit = ip6_tnl_dev_uninit, | 1463 | .ndo_uninit = ip6_tnl_dev_uninit, |
1466 | .ndo_start_xmit = ip6_tnl_xmit, | 1464 | .ndo_start_xmit = ip6_tnl_xmit, |
1467 | .ndo_do_ioctl = ip6_tnl_ioctl, | 1465 | .ndo_do_ioctl = ip6_tnl_ioctl, |
@@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) | |||
1546 | struct ip6_tnl *t = netdev_priv(dev); | 1544 | struct ip6_tnl *t = netdev_priv(dev); |
1547 | struct net *net = dev_net(dev); | 1545 | struct net *net = dev_net(dev); |
1548 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 1546 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
1549 | int err = ip6_tnl_dev_init_gen(dev); | ||
1550 | |||
1551 | if (err) | ||
1552 | return err; | ||
1553 | 1547 | ||
1554 | t->parms.proto = IPPROTO_IPV6; | 1548 | t->parms.proto = IPPROTO_IPV6; |
1555 | dev_hold(dev); | 1549 | dev_hold(dev); |
1556 | 1550 | ||
1557 | ip6_tnl_link_config(t); | ||
1558 | |||
1559 | rcu_assign_pointer(ip6n->tnls_wc[0], t); | 1551 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
1560 | return 0; | 1552 | return 0; |
1561 | } | 1553 | } |
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index b04ed72c4542..8db6c98fe218 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c | |||
@@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, | |||
79 | uh->source = src_port; | 79 | uh->source = src_port; |
80 | 80 | ||
81 | uh->len = htons(skb->len); | 81 | uh->len = htons(skb->len); |
82 | uh->check = 0; | ||
83 | 82 | ||
84 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 83 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
85 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 84 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
86 | | IPSKB_REROUTED); | 85 | | IPSKB_REROUTED); |
87 | skb_dst_set(skb, dst); | 86 | skb_dst_set(skb, dst); |
88 | 87 | ||
89 | udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, | 88 | udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len); |
90 | &sk->sk_v6_daddr, skb->len); | ||
91 | 89 | ||
92 | __skb_push(skb, sizeof(*ip6h)); | 90 | __skb_push(skb, sizeof(*ip6h)); |
93 | skb_reset_network_header(skb); | 91 | skb_reset_network_header(skb); |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d440bb585524..bcda14de7f84 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev) | |||
172 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); | 172 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
173 | int err; | 173 | int err; |
174 | 174 | ||
175 | err = vti6_dev_init(dev); | ||
176 | if (err < 0) | ||
177 | goto out; | ||
178 | |||
179 | err = register_netdevice(dev); | 175 | err = register_netdevice(dev); |
180 | if (err < 0) | 176 | if (err < 0) |
181 | goto out; | 177 | goto out; |
@@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu) | |||
783 | } | 779 | } |
784 | 780 | ||
785 | static const struct net_device_ops vti6_netdev_ops = { | 781 | static const struct net_device_ops vti6_netdev_ops = { |
782 | .ndo_init = vti6_dev_init, | ||
786 | .ndo_uninit = vti6_dev_uninit, | 783 | .ndo_uninit = vti6_dev_uninit, |
787 | .ndo_start_xmit = vti6_tnl_xmit, | 784 | .ndo_start_xmit = vti6_tnl_xmit, |
788 | .ndo_do_ioctl = vti6_ioctl, | 785 | .ndo_do_ioctl = vti6_ioctl, |
@@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) | |||
852 | struct ip6_tnl *t = netdev_priv(dev); | 849 | struct ip6_tnl *t = netdev_priv(dev); |
853 | struct net *net = dev_net(dev); | 850 | struct net *net = dev_net(dev); |
854 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); | 851 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
855 | int err = vti6_dev_init_gen(dev); | ||
856 | |||
857 | if (err) | ||
858 | return err; | ||
859 | 852 | ||
860 | t->parms.proto = IPPROTO_IPV6; | 853 | t->parms.proto = IPPROTO_IPV6; |
861 | dev_hold(dev); | 854 | dev_hold(dev); |
862 | 855 | ||
863 | vti6_link_config(t); | ||
864 | |||
865 | rcu_assign_pointer(ip6n->tnls_wc[0], t); | 856 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
866 | return 0; | 857 | return 0; |
867 | } | 858 | } |
@@ -914,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev, | |||
914 | return vti6_tnl_create2(dev); | 905 | return vti6_tnl_create2(dev); |
915 | } | 906 | } |
916 | 907 | ||
908 | static void vti6_dellink(struct net_device *dev, struct list_head *head) | ||
909 | { | ||
910 | struct net *net = dev_net(dev); | ||
911 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); | ||
912 | |||
913 | if (dev != ip6n->fb_tnl_dev) | ||
914 | unregister_netdevice_queue(dev, head); | ||
915 | } | ||
916 | |||
917 | static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], | 917 | static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], |
918 | struct nlattr *data[]) | 918 | struct nlattr *data[]) |
919 | { | 919 | { |
@@ -989,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = { | |||
989 | .setup = vti6_dev_setup, | 989 | .setup = vti6_dev_setup, |
990 | .validate = vti6_validate, | 990 | .validate = vti6_validate, |
991 | .newlink = vti6_newlink, | 991 | .newlink = vti6_newlink, |
992 | .dellink = vti6_dellink, | ||
992 | .changelink = vti6_changelink, | 993 | .changelink = vti6_changelink, |
993 | .get_size = vti6_get_size, | 994 | .get_size = vti6_get_size, |
994 | .fill_info = vti6_fill_info, | 995 | .fill_info = vti6_fill_info, |
@@ -1029,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net) | |||
1029 | if (!ip6n->fb_tnl_dev) | 1030 | if (!ip6n->fb_tnl_dev) |
1030 | goto err_alloc_dev; | 1031 | goto err_alloc_dev; |
1031 | dev_net_set(ip6n->fb_tnl_dev, net); | 1032 | dev_net_set(ip6n->fb_tnl_dev, net); |
1033 | ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops; | ||
1032 | 1034 | ||
1033 | err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); | 1035 | err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); |
1034 | if (err < 0) | 1036 | if (err < 0) |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0171f08325c3..1a01d79b8698 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -1439,6 +1439,10 @@ reg_pernet_fail: | |||
1439 | 1439 | ||
1440 | void ip6_mr_cleanup(void) | 1440 | void ip6_mr_cleanup(void) |
1441 | { | 1441 | { |
1442 | rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE); | ||
1443 | #ifdef CONFIG_IPV6_PIMSM_V2 | ||
1444 | inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); | ||
1445 | #endif | ||
1442 | unregister_netdevice_notifier(&ip6_mr_notifier); | 1446 | unregister_netdevice_notifier(&ip6_mr_notifier); |
1443 | unregister_pernet_subsys(&ip6mr_net_ops); | 1447 | unregister_pernet_subsys(&ip6mr_net_ops); |
1444 | kmem_cache_destroy(mrt_cachep); | 1448 | kmem_cache_destroy(mrt_cachep); |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9648de2b6745..ed2c4e400b46 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1550,7 +1550,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, | |||
1550 | hdr->daddr = *daddr; | 1550 | hdr->daddr = *daddr; |
1551 | } | 1551 | } |
1552 | 1552 | ||
1553 | static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) | 1553 | static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) |
1554 | { | 1554 | { |
1555 | struct net_device *dev = idev->dev; | 1555 | struct net_device *dev = idev->dev; |
1556 | struct net *net = dev_net(dev); | 1556 | struct net *net = dev_net(dev); |
@@ -1561,13 +1561,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) | |||
1561 | const struct in6_addr *saddr; | 1561 | const struct in6_addr *saddr; |
1562 | int hlen = LL_RESERVED_SPACE(dev); | 1562 | int hlen = LL_RESERVED_SPACE(dev); |
1563 | int tlen = dev->needed_tailroom; | 1563 | int tlen = dev->needed_tailroom; |
1564 | unsigned int size = mtu + hlen + tlen; | ||
1564 | int err; | 1565 | int err; |
1565 | u8 ra[8] = { IPPROTO_ICMPV6, 0, | 1566 | u8 ra[8] = { IPPROTO_ICMPV6, 0, |
1566 | IPV6_TLV_ROUTERALERT, 2, 0, 0, | 1567 | IPV6_TLV_ROUTERALERT, 2, 0, 0, |
1567 | IPV6_TLV_PADN, 0 }; | 1568 | IPV6_TLV_PADN, 0 }; |
1568 | 1569 | ||
1569 | /* we assume size > sizeof(ra) here */ | 1570 | /* we assume size > sizeof(ra) here */ |
1570 | size += hlen + tlen; | ||
1571 | /* limit our allocations to order-0 page */ | 1571 | /* limit our allocations to order-0 page */ |
1572 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); | 1572 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); |
1573 | skb = sock_alloc_send_skb(sk, size, 1, &err); | 1573 | skb = sock_alloc_send_skb(sk, size, 1, &err); |
@@ -1576,6 +1576,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) | |||
1576 | return NULL; | 1576 | return NULL; |
1577 | 1577 | ||
1578 | skb->priority = TC_PRIO_CONTROL; | 1578 | skb->priority = TC_PRIO_CONTROL; |
1579 | skb->reserved_tailroom = skb_end_offset(skb) - | ||
1580 | min(mtu, skb_end_offset(skb)); | ||
1579 | skb_reserve(skb, hlen); | 1581 | skb_reserve(skb, hlen); |
1580 | 1582 | ||
1581 | if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { | 1583 | if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { |
@@ -1690,8 +1692,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
1690 | return skb; | 1692 | return skb; |
1691 | } | 1693 | } |
1692 | 1694 | ||
1693 | #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ | 1695 | #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) |
1694 | skb_tailroom(skb)) : 0) | ||
1695 | 1696 | ||
1696 | static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | 1697 | static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, |
1697 | int type, int gdeleted, int sdeleted, int crsend) | 1698 | int type, int gdeleted, int sdeleted, int crsend) |
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 8a7ac685076d..529c119cbb14 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c | |||
@@ -25,6 +25,7 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr, | |||
25 | struct nf_nat_range range; | 25 | struct nf_nat_range range; |
26 | unsigned int verdict; | 26 | unsigned int verdict; |
27 | 27 | ||
28 | memset(&range, 0, sizeof(range)); | ||
28 | range.flags = priv->flags; | 29 | range.flags = priv->flags; |
29 | 30 | ||
30 | verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); | 31 | verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 58e5b4710127..a24557a1c1d8 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev) | |||
195 | struct sit_net *sitn = net_generic(net, sit_net_id); | 195 | struct sit_net *sitn = net_generic(net, sit_net_id); |
196 | int err; | 196 | int err; |
197 | 197 | ||
198 | err = ipip6_tunnel_init(dev); | 198 | memcpy(dev->dev_addr, &t->parms.iph.saddr, 4); |
199 | if (err < 0) | 199 | memcpy(dev->broadcast, &t->parms.iph.daddr, 4); |
200 | goto out; | ||
201 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
202 | 200 | ||
203 | if ((__force u16)t->parms.i_flags & SIT_ISATAP) | 201 | if ((__force u16)t->parms.i_flags & SIT_ISATAP) |
204 | dev->priv_flags |= IFF_ISATAP; | 202 | dev->priv_flags |= IFF_ISATAP; |
@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev) | |||
207 | if (err < 0) | 205 | if (err < 0) |
208 | goto out; | 206 | goto out; |
209 | 207 | ||
210 | strcpy(t->parms.name, dev->name); | 208 | ipip6_tunnel_clone_6rd(dev, sitn); |
209 | |||
211 | dev->rtnl_link_ops = &sit_link_ops; | 210 | dev->rtnl_link_ops = &sit_link_ops; |
212 | 211 | ||
213 | dev_hold(dev); | 212 | dev_hold(dev); |
@@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) | |||
1330 | } | 1329 | } |
1331 | 1330 | ||
1332 | static const struct net_device_ops ipip6_netdev_ops = { | 1331 | static const struct net_device_ops ipip6_netdev_ops = { |
1332 | .ndo_init = ipip6_tunnel_init, | ||
1333 | .ndo_uninit = ipip6_tunnel_uninit, | 1333 | .ndo_uninit = ipip6_tunnel_uninit, |
1334 | .ndo_start_xmit = sit_tunnel_xmit, | 1334 | .ndo_start_xmit = sit_tunnel_xmit, |
1335 | .ndo_do_ioctl = ipip6_tunnel_ioctl, | 1335 | .ndo_do_ioctl = ipip6_tunnel_ioctl, |
@@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev) | |||
1378 | 1378 | ||
1379 | tunnel->dev = dev; | 1379 | tunnel->dev = dev; |
1380 | tunnel->net = dev_net(dev); | 1380 | tunnel->net = dev_net(dev); |
1381 | 1381 | strcpy(tunnel->parms.name, dev->name); | |
1382 | memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); | ||
1383 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); | ||
1384 | 1382 | ||
1385 | ipip6_tunnel_bind_dev(dev); | 1383 | ipip6_tunnel_bind_dev(dev); |
1386 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | 1384 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
@@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) | |||
1405 | 1403 | ||
1406 | tunnel->dev = dev; | 1404 | tunnel->dev = dev; |
1407 | tunnel->net = dev_net(dev); | 1405 | tunnel->net = dev_net(dev); |
1408 | strcpy(tunnel->parms.name, dev->name); | ||
1409 | 1406 | ||
1410 | iph->version = 4; | 1407 | iph->version = 4; |
1411 | iph->protocol = IPPROTO_IPV6; | 1408 | iph->protocol = IPPROTO_IPV6; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ace29b60813c..dc495ae2ead0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
903 | if (th->rst) | 903 | if (th->rst) |
904 | return; | 904 | return; |
905 | 905 | ||
906 | if (!ipv6_unicast_destination(skb)) | 906 | /* If sk not NULL, it means we did a successful lookup and incoming |
907 | * route had to be correct. prequeue might have dropped our dst. | ||
908 | */ | ||
909 | if (!sk && !ipv6_unicast_destination(skb)) | ||
907 | return; | 910 | return; |
908 | 911 | ||
909 | #ifdef CONFIG_TCP_MD5SIG | 912 | #ifdef CONFIG_TCP_MD5SIG |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 91729b807c7d..1b095ca37aa4 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -1764,6 +1764,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1764 | struct ipxhdr *ipx = NULL; | 1764 | struct ipxhdr *ipx = NULL; |
1765 | struct sk_buff *skb; | 1765 | struct sk_buff *skb; |
1766 | int copied, rc; | 1766 | int copied, rc; |
1767 | bool locked = true; | ||
1767 | 1768 | ||
1768 | lock_sock(sk); | 1769 | lock_sock(sk); |
1769 | /* put the autobinding in */ | 1770 | /* put the autobinding in */ |
@@ -1790,6 +1791,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1790 | if (sock_flag(sk, SOCK_ZAPPED)) | 1791 | if (sock_flag(sk, SOCK_ZAPPED)) |
1791 | goto out; | 1792 | goto out; |
1792 | 1793 | ||
1794 | release_sock(sk); | ||
1795 | locked = false; | ||
1793 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 1796 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
1794 | flags & MSG_DONTWAIT, &rc); | 1797 | flags & MSG_DONTWAIT, &rc); |
1795 | if (!skb) { | 1798 | if (!skb) { |
@@ -1826,7 +1829,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1826 | out_free: | 1829 | out_free: |
1827 | skb_free_datagram(sk, skb); | 1830 | skb_free_datagram(sk, skb); |
1828 | out: | 1831 | out: |
1829 | release_sock(sk); | 1832 | if (locked) |
1833 | release_sock(sk); | ||
1830 | return rc; | 1834 | return rc; |
1831 | } | 1835 | } |
1832 | 1836 | ||
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index ec24378caaaf..09d9caaec591 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c | |||
@@ -53,6 +53,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
53 | __aligned(__alignof__(struct aead_request)); | 53 | __aligned(__alignof__(struct aead_request)); |
54 | struct aead_request *aead_req = (void *) aead_req_data; | 54 | struct aead_request *aead_req = (void *) aead_req_data; |
55 | 55 | ||
56 | if (data_len == 0) | ||
57 | return -EINVAL; | ||
58 | |||
56 | memset(aead_req, 0, sizeof(aead_req_data)); | 59 | memset(aead_req, 0, sizeof(aead_req_data)); |
57 | 60 | ||
58 | sg_init_one(&pt, data, data_len); | 61 | sg_init_one(&pt, data, data_len); |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 56b53571c807..509bc157ce55 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
805 | 805 | ||
806 | memset(¶ms, 0, sizeof(params)); | 806 | memset(¶ms, 0, sizeof(params)); |
807 | memset(&csa_ie, 0, sizeof(csa_ie)); | 807 | memset(&csa_ie, 0, sizeof(csa_ie)); |
808 | err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, | 808 | err = ieee80211_parse_ch_switch_ie(sdata, elems, |
809 | ifibss->chandef.chan->band, | 809 | ifibss->chandef.chan->band, |
810 | sta_flags, ifibss->bssid, &csa_ie); | 810 | sta_flags, ifibss->bssid, &csa_ie); |
811 | /* can't switch to destination channel, fail */ | 811 | /* can't switch to destination channel, fail */ |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c2aaec4dfcf0..8c68da30595d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | |||
1642 | * ieee80211_parse_ch_switch_ie - parses channel switch IEs | 1642 | * ieee80211_parse_ch_switch_ie - parses channel switch IEs |
1643 | * @sdata: the sdata of the interface which has received the frame | 1643 | * @sdata: the sdata of the interface which has received the frame |
1644 | * @elems: parsed 802.11 elements received with the frame | 1644 | * @elems: parsed 802.11 elements received with the frame |
1645 | * @beacon: indicates if the frame was a beacon or probe response | ||
1646 | * @current_band: indicates the current band | 1645 | * @current_band: indicates the current band |
1647 | * @sta_flags: contains information about own capabilities and restrictions | 1646 | * @sta_flags: contains information about own capabilities and restrictions |
1648 | * to decide which channel switch announcements can be accepted. Only the | 1647 | * to decide which channel switch announcements can be accepted. Only the |
@@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | |||
1656 | * Return: 0 on success, <0 on error and >0 if there is nothing to parse. | 1655 | * Return: 0 on success, <0 on error and >0 if there is nothing to parse. |
1657 | */ | 1656 | */ |
1658 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | 1657 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, |
1659 | struct ieee802_11_elems *elems, bool beacon, | 1658 | struct ieee802_11_elems *elems, |
1660 | enum ieee80211_band current_band, | 1659 | enum ieee80211_band current_band, |
1661 | u32 sta_flags, u8 *bssid, | 1660 | u32 sta_flags, u8 *bssid, |
1662 | struct ieee80211_csa_ie *csa_ie); | 1661 | struct ieee80211_csa_ie *csa_ie); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index af237223a8cd..653f5eb07a27 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
766 | int i, flushed; | 766 | int i, flushed; |
767 | struct ps_data *ps; | 767 | struct ps_data *ps; |
768 | struct cfg80211_chan_def chandef; | 768 | struct cfg80211_chan_def chandef; |
769 | bool cancel_scan; | ||
769 | 770 | ||
770 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); | 771 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); |
771 | 772 | ||
772 | if (rcu_access_pointer(local->scan_sdata) == sdata) | 773 | cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata; |
774 | if (cancel_scan) | ||
773 | ieee80211_scan_cancel(local); | 775 | ieee80211_scan_cancel(local); |
774 | 776 | ||
775 | /* | 777 | /* |
@@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
898 | list_del(&sdata->u.vlan.list); | 900 | list_del(&sdata->u.vlan.list); |
899 | mutex_unlock(&local->mtx); | 901 | mutex_unlock(&local->mtx); |
900 | RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); | 902 | RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); |
903 | /* see comment in the default case below */ | ||
904 | ieee80211_free_keys(sdata, true); | ||
901 | /* no need to tell driver */ | 905 | /* no need to tell driver */ |
902 | break; | 906 | break; |
903 | case NL80211_IFTYPE_MONITOR: | 907 | case NL80211_IFTYPE_MONITOR: |
@@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
923 | /* | 927 | /* |
924 | * When we get here, the interface is marked down. | 928 | * When we get here, the interface is marked down. |
925 | * Free the remaining keys, if there are any | 929 | * Free the remaining keys, if there are any |
926 | * (shouldn't be, except maybe in WDS mode?) | 930 | * (which can happen in AP mode if userspace sets |
931 | * keys before the interface is operating, and maybe | ||
932 | * also in WDS mode) | ||
927 | * | 933 | * |
928 | * Force the key freeing to always synchronize_net() | 934 | * Force the key freeing to always synchronize_net() |
929 | * to wait for the RX path in case it is using this | 935 | * to wait for the RX path in case it is using this |
930 | * interface enqueuing frames * at this very time on | 936 | * interface enqueuing frames at this very time on |
931 | * another CPU. | 937 | * another CPU. |
932 | */ | 938 | */ |
933 | ieee80211_free_keys(sdata, true); | 939 | ieee80211_free_keys(sdata, true); |
934 | |||
935 | /* fall through */ | ||
936 | case NL80211_IFTYPE_AP: | ||
937 | skb_queue_purge(&sdata->skb_queue); | 940 | skb_queue_purge(&sdata->skb_queue); |
938 | } | 941 | } |
939 | 942 | ||
@@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
991 | 994 | ||
992 | ieee80211_recalc_ps(local, -1); | 995 | ieee80211_recalc_ps(local, -1); |
993 | 996 | ||
997 | if (cancel_scan) | ||
998 | flush_delayed_work(&local->scan_work); | ||
999 | |||
994 | if (local->open_count == 0) { | 1000 | if (local->open_count == 0) { |
995 | ieee80211_stop_device(local); | 1001 | ieee80211_stop_device(local); |
996 | 1002 | ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index e9f99c1e3fad..0c8b2a77d312 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, | |||
874 | 874 | ||
875 | memset(¶ms, 0, sizeof(params)); | 875 | memset(¶ms, 0, sizeof(params)); |
876 | memset(&csa_ie, 0, sizeof(csa_ie)); | 876 | memset(&csa_ie, 0, sizeof(csa_ie)); |
877 | err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band, | 877 | err = ieee80211_parse_ch_switch_ie(sdata, elems, band, |
878 | sta_flags, sdata->vif.addr, | 878 | sta_flags, sdata->vif.addr, |
879 | &csa_ie); | 879 | &csa_ie); |
880 | if (err < 0) | 880 | if (err < 0) |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2de88704278b..93af0f1c9d99 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1072 | 1072 | ||
1073 | current_band = cbss->channel->band; | 1073 | current_band = cbss->channel->band; |
1074 | memset(&csa_ie, 0, sizeof(csa_ie)); | 1074 | memset(&csa_ie, 0, sizeof(csa_ie)); |
1075 | res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band, | 1075 | res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, |
1076 | ifmgd->flags, | 1076 | ifmgd->flags, |
1077 | ifmgd->associated->bssid, &csa_ie); | 1077 | ifmgd->associated->bssid, &csa_ie); |
1078 | if (res < 0) | 1078 | if (res < 0) |
@@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1168 | ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); | 1168 | ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); |
1169 | else | 1169 | else |
1170 | mod_timer(&ifmgd->chswitch_timer, | 1170 | mod_timer(&ifmgd->chswitch_timer, |
1171 | TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval)); | 1171 | TU_TO_EXP_TIME((csa_ie.count - 1) * |
1172 | cbss->beacon_interval)); | ||
1172 | } | 1173 | } |
1173 | 1174 | ||
1174 | static bool | 1175 | static bool |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index df90ce2db00c..408fd8ab4eef 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -252,19 +252,16 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, | |||
252 | cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; | 252 | cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; |
253 | cur_prob = mi->groups[cur_group].rates[cur_idx].probability; | 253 | cur_prob = mi->groups[cur_group].rates[cur_idx].probability; |
254 | 254 | ||
255 | tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; | 255 | do { |
256 | tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; | ||
257 | tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | ||
258 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; | ||
259 | |||
260 | while (j > 0 && (cur_thr > tmp_thr || | ||
261 | (cur_thr == tmp_thr && cur_prob > tmp_prob))) { | ||
262 | j--; | ||
263 | tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; | 256 | tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; |
264 | tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; | 257 | tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; |
265 | tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | 258 | tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; |
266 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; | 259 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; |
267 | } | 260 | if (cur_thr < tmp_thr || |
261 | (cur_thr == tmp_thr && cur_prob <= tmp_prob)) | ||
262 | break; | ||
263 | j--; | ||
264 | } while (j > 0); | ||
268 | 265 | ||
269 | if (j < MAX_THR_RATES - 1) { | 266 | if (j < MAX_THR_RATES - 1) { |
270 | memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * | 267 | memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index b04ca4049c95..a37f9af634cb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1678 | sc = le16_to_cpu(hdr->seq_ctrl); | 1678 | sc = le16_to_cpu(hdr->seq_ctrl); |
1679 | frag = sc & IEEE80211_SCTL_FRAG; | 1679 | frag = sc & IEEE80211_SCTL_FRAG; |
1680 | 1680 | ||
1681 | if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || | 1681 | if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) |
1682 | is_multicast_ether_addr(hdr->addr1))) { | 1682 | goto out; |
1683 | /* not fragmented */ | 1683 | |
1684 | if (is_multicast_ether_addr(hdr->addr1)) { | ||
1685 | rx->local->dot11MulticastReceivedFrameCount++; | ||
1684 | goto out; | 1686 | goto out; |
1685 | } | 1687 | } |
1688 | |||
1686 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); | 1689 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); |
1687 | 1690 | ||
1688 | if (skb_linearize(rx->skb)) | 1691 | if (skb_linearize(rx->skb)) |
@@ -1775,10 +1778,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1775 | out: | 1778 | out: |
1776 | if (rx->sta) | 1779 | if (rx->sta) |
1777 | rx->sta->rx_packets++; | 1780 | rx->sta->rx_packets++; |
1778 | if (is_multicast_ether_addr(hdr->addr1)) | 1781 | ieee80211_led_rx(rx->local); |
1779 | rx->local->dot11MulticastReceivedFrameCount++; | ||
1780 | else | ||
1781 | ieee80211_led_rx(rx->local); | ||
1782 | return RX_CONTINUE; | 1782 | return RX_CONTINUE; |
1783 | } | 1783 | } |
1784 | 1784 | ||
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index 6ab009070084..efeba56c913b 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include "wme.h" | 22 | #include "wme.h" |
23 | 23 | ||
24 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | 24 | int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, |
25 | struct ieee802_11_elems *elems, bool beacon, | 25 | struct ieee802_11_elems *elems, |
26 | enum ieee80211_band current_band, | 26 | enum ieee80211_band current_band, |
27 | u32 sta_flags, u8 *bssid, | 27 | u32 sta_flags, u8 *bssid, |
28 | struct ieee80211_csa_ie *csa_ie) | 28 | struct ieee80211_csa_ie *csa_ie) |
@@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | |||
91 | return -EINVAL; | 91 | return -EINVAL; |
92 | } | 92 | } |
93 | 93 | ||
94 | if (!beacon && sec_chan_offs) { | 94 | if (sec_chan_offs) { |
95 | secondary_channel_offset = sec_chan_offs->sec_chan_offs; | 95 | secondary_channel_offset = sec_chan_offs->sec_chan_offs; |
96 | } else if (beacon && ht_oper) { | ||
97 | secondary_channel_offset = | ||
98 | ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET; | ||
99 | } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { | 96 | } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { |
100 | /* If it's not a beacon, HT is enabled and the IE not present, | 97 | /* If the secondary channel offset IE is not present, |
101 | * it's 20 MHz, 802.11-2012 8.5.2.6: | 98 | * we can't know what's the post-CSA offset, so the |
102 | * This element [the Secondary Channel Offset Element] is | 99 | * best we can do is use 20MHz. |
103 | * present when switching to a 40 MHz channel. It may be | 100 | */ |
104 | * present when switching to a 20 MHz channel (in which | ||
105 | * case the secondary channel offset is set to SCN). | ||
106 | */ | ||
107 | secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; | 101 | secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; |
108 | } | 102 | } |
109 | 103 | ||
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 86f9d76b1464..d259da3ce67a 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
@@ -1863,6 +1863,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) | |||
1863 | if (*op < IP_SET_OP_VERSION) { | 1863 | if (*op < IP_SET_OP_VERSION) { |
1864 | /* Check the version at the beginning of operations */ | 1864 | /* Check the version at the beginning of operations */ |
1865 | struct ip_set_req_version *req_version = data; | 1865 | struct ip_set_req_version *req_version = data; |
1866 | |||
1867 | if (*len < sizeof(struct ip_set_req_version)) { | ||
1868 | ret = -EINVAL; | ||
1869 | goto done; | ||
1870 | } | ||
1871 | |||
1866 | if (req_version->version != IPSET_PROTOCOL) { | 1872 | if (req_version->version != IPSET_PROTOCOL) { |
1867 | ret = -EPROTO; | 1873 | ret = -EPROTO; |
1868 | goto done; | 1874 | goto done; |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 437a3663ad03..bd90bf8107da 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -846,6 +846,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, | |||
846 | new_skb = skb_realloc_headroom(skb, max_headroom); | 846 | new_skb = skb_realloc_headroom(skb, max_headroom); |
847 | if (!new_skb) | 847 | if (!new_skb) |
848 | goto error; | 848 | goto error; |
849 | if (skb->sk) | ||
850 | skb_set_owner_w(new_skb, skb->sk); | ||
849 | consume_skb(skb); | 851 | consume_skb(skb); |
850 | skb = new_skb; | 852 | skb = new_skb; |
851 | } | 853 | } |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 11ab4b078f3b..66e8425dbfe7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -3484,13 +3484,8 @@ static void nft_chain_commit_update(struct nft_trans *trans) | |||
3484 | } | 3484 | } |
3485 | } | 3485 | } |
3486 | 3486 | ||
3487 | /* Schedule objects for release via rcu to make sure no packets are accesing | 3487 | static void nf_tables_commit_release(struct nft_trans *trans) |
3488 | * removed rules. | ||
3489 | */ | ||
3490 | static void nf_tables_commit_release_rcu(struct rcu_head *rt) | ||
3491 | { | 3488 | { |
3492 | struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); | ||
3493 | |||
3494 | switch (trans->msg_type) { | 3489 | switch (trans->msg_type) { |
3495 | case NFT_MSG_DELTABLE: | 3490 | case NFT_MSG_DELTABLE: |
3496 | nf_tables_table_destroy(&trans->ctx); | 3491 | nf_tables_table_destroy(&trans->ctx); |
@@ -3612,10 +3607,11 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
3612 | } | 3607 | } |
3613 | } | 3608 | } |
3614 | 3609 | ||
3610 | synchronize_rcu(); | ||
3611 | |||
3615 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | 3612 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { |
3616 | list_del(&trans->list); | 3613 | list_del(&trans->list); |
3617 | trans->ctx.nla = NULL; | 3614 | nf_tables_commit_release(trans); |
3618 | call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu); | ||
3619 | } | 3615 | } |
3620 | 3616 | ||
3621 | nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); | 3617 | nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); |
@@ -3623,13 +3619,8 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
3623 | return 0; | 3619 | return 0; |
3624 | } | 3620 | } |
3625 | 3621 | ||
3626 | /* Schedule objects for release via rcu to make sure no packets are accesing | 3622 | static void nf_tables_abort_release(struct nft_trans *trans) |
3627 | * aborted rules. | ||
3628 | */ | ||
3629 | static void nf_tables_abort_release_rcu(struct rcu_head *rt) | ||
3630 | { | 3623 | { |
3631 | struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); | ||
3632 | |||
3633 | switch (trans->msg_type) { | 3624 | switch (trans->msg_type) { |
3634 | case NFT_MSG_NEWTABLE: | 3625 | case NFT_MSG_NEWTABLE: |
3635 | nf_tables_table_destroy(&trans->ctx); | 3626 | nf_tables_table_destroy(&trans->ctx); |
@@ -3725,11 +3716,12 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3725 | } | 3716 | } |
3726 | } | 3717 | } |
3727 | 3718 | ||
3719 | synchronize_rcu(); | ||
3720 | |||
3728 | list_for_each_entry_safe_reverse(trans, next, | 3721 | list_for_each_entry_safe_reverse(trans, next, |
3729 | &net->nft.commit_list, list) { | 3722 | &net->nft.commit_list, list) { |
3730 | list_del(&trans->list); | 3723 | list_del(&trans->list); |
3731 | trans->ctx.nla = NULL; | 3724 | nf_tables_abort_release(trans); |
3732 | call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu); | ||
3733 | } | 3725 | } |
3734 | 3726 | ||
3735 | return 0; | 3727 | return 0; |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 6c5a915cfa75..13c2e17bbe27 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -47,6 +47,8 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = { | |||
47 | [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, | 47 | [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, |
48 | [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, | 48 | [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, |
49 | [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, | 49 | [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, |
50 | [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, | ||
51 | [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, | ||
50 | }; | 52 | }; |
51 | 53 | ||
52 | void nfnl_lock(__u8 subsys_id) | 54 | void nfnl_lock(__u8 subsys_id) |
@@ -464,7 +466,12 @@ static void nfnetlink_rcv(struct sk_buff *skb) | |||
464 | static int nfnetlink_bind(int group) | 466 | static int nfnetlink_bind(int group) |
465 | { | 467 | { |
466 | const struct nfnetlink_subsystem *ss; | 468 | const struct nfnetlink_subsystem *ss; |
467 | int type = nfnl_group2type[group]; | 469 | int type; |
470 | |||
471 | if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) | ||
472 | return -EINVAL; | ||
473 | |||
474 | type = nfnl_group2type[group]; | ||
468 | 475 | ||
469 | rcu_read_lock(); | 476 | rcu_read_lock(); |
470 | ss = nfnetlink_get_subsys(type); | 477 | ss = nfnetlink_get_subsys(type); |
@@ -514,6 +521,9 @@ static int __init nfnetlink_init(void) | |||
514 | { | 521 | { |
515 | int i; | 522 | int i; |
516 | 523 | ||
524 | for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) | ||
525 | BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); | ||
526 | |||
517 | for (i=0; i<NFNL_SUBSYS_COUNT; i++) | 527 | for (i=0; i<NFNL_SUBSYS_COUNT; i++) |
518 | mutex_init(&table[i].mutex); | 528 | mutex_init(&table[i].mutex); |
519 | 529 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 9d6d6f60a80f..265e190f2218 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -21,45 +21,17 @@ | |||
21 | #include <linux/netfilter_ipv6/ip6_tables.h> | 21 | #include <linux/netfilter_ipv6/ip6_tables.h> |
22 | #include <net/netfilter/nf_tables.h> | 22 | #include <net/netfilter/nf_tables.h> |
23 | 23 | ||
24 | static const struct { | ||
25 | const char *name; | ||
26 | u8 type; | ||
27 | } table_to_chaintype[] = { | ||
28 | { "filter", NFT_CHAIN_T_DEFAULT }, | ||
29 | { "raw", NFT_CHAIN_T_DEFAULT }, | ||
30 | { "security", NFT_CHAIN_T_DEFAULT }, | ||
31 | { "mangle", NFT_CHAIN_T_ROUTE }, | ||
32 | { "nat", NFT_CHAIN_T_NAT }, | ||
33 | { }, | ||
34 | }; | ||
35 | |||
36 | static int nft_compat_table_to_chaintype(const char *table) | ||
37 | { | ||
38 | int i; | ||
39 | |||
40 | for (i = 0; table_to_chaintype[i].name != NULL; i++) { | ||
41 | if (strcmp(table_to_chaintype[i].name, table) == 0) | ||
42 | return table_to_chaintype[i].type; | ||
43 | } | ||
44 | |||
45 | return -1; | ||
46 | } | ||
47 | |||
48 | static int nft_compat_chain_validate_dependency(const char *tablename, | 24 | static int nft_compat_chain_validate_dependency(const char *tablename, |
49 | const struct nft_chain *chain) | 25 | const struct nft_chain *chain) |
50 | { | 26 | { |
51 | enum nft_chain_type type; | ||
52 | const struct nft_base_chain *basechain; | 27 | const struct nft_base_chain *basechain; |
53 | 28 | ||
54 | if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) | 29 | if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) |
55 | return 0; | 30 | return 0; |
56 | 31 | ||
57 | type = nft_compat_table_to_chaintype(tablename); | ||
58 | if (type < 0) | ||
59 | return -EINVAL; | ||
60 | |||
61 | basechain = nft_base_chain(chain); | 32 | basechain = nft_base_chain(chain); |
62 | if (basechain->type->type != type) | 33 | if (strcmp(tablename, "nat") == 0 && |
34 | basechain->type->type != NFT_CHAIN_T_NAT) | ||
63 | return -EINVAL; | 35 | return -EINVAL; |
64 | 36 | ||
65 | return 0; | 37 | return 0; |
@@ -117,7 +89,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
117 | struct xt_target *target, void *info, | 89 | struct xt_target *target, void *info, |
118 | union nft_entry *entry, u8 proto, bool inv) | 90 | union nft_entry *entry, u8 proto, bool inv) |
119 | { | 91 | { |
120 | par->net = &init_net; | 92 | par->net = ctx->net; |
121 | par->table = ctx->table->name; | 93 | par->table = ctx->table->name; |
122 | switch (ctx->afi->family) { | 94 | switch (ctx->afi->family) { |
123 | case AF_INET: | 95 | case AF_INET: |
@@ -324,7 +296,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
324 | struct xt_match *match, void *info, | 296 | struct xt_match *match, void *info, |
325 | union nft_entry *entry, u8 proto, bool inv) | 297 | union nft_entry *entry, u8 proto, bool inv) |
326 | { | 298 | { |
327 | par->net = &init_net; | 299 | par->net = ctx->net; |
328 | par->table = ctx->table->name; | 300 | par->table = ctx->table->name; |
329 | switch (ctx->afi->family) { | 301 | switch (ctx->afi->family) { |
330 | case AF_INET: | 302 | case AF_INET: |
@@ -374,7 +346,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
374 | union nft_entry e = {}; | 346 | union nft_entry e = {}; |
375 | int ret; | 347 | int ret; |
376 | 348 | ||
377 | ret = nft_compat_chain_validate_dependency(match->name, ctx->chain); | 349 | ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); |
378 | if (ret < 0) | 350 | if (ret < 0) |
379 | goto err; | 351 | goto err; |
380 | 352 | ||
@@ -448,7 +420,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, | |||
448 | if (!(hook_mask & match->hooks)) | 420 | if (!(hook_mask & match->hooks)) |
449 | return -EINVAL; | 421 | return -EINVAL; |
450 | 422 | ||
451 | ret = nft_compat_chain_validate_dependency(match->name, | 423 | ret = nft_compat_chain_validate_dependency(match->table, |
452 | ctx->chain); | 424 | ctx->chain); |
453 | if (ret < 0) | 425 | if (ret < 0) |
454 | return ret; | 426 | return ret; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f1de72de273e..0007b8180397 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1440,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups, | |||
1440 | return; | 1440 | return; |
1441 | 1441 | ||
1442 | for (undo = 0; undo < group; undo++) | 1442 | for (undo = 0; undo < group; undo++) |
1443 | if (test_bit(group, &groups)) | 1443 | if (test_bit(undo, &groups)) |
1444 | nlk->netlink_unbind(undo); | 1444 | nlk->netlink_unbind(undo); |
1445 | } | 1445 | } |
1446 | 1446 | ||
@@ -1492,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
1492 | netlink_insert(sk, net, nladdr->nl_pid) : | 1492 | netlink_insert(sk, net, nladdr->nl_pid) : |
1493 | netlink_autobind(sock); | 1493 | netlink_autobind(sock); |
1494 | if (err) { | 1494 | if (err) { |
1495 | netlink_unbind(nlk->ngroups - 1, groups, nlk); | 1495 | netlink_unbind(nlk->ngroups, groups, nlk); |
1496 | return err; | 1496 | return err; |
1497 | } | 1497 | } |
1498 | } | 1498 | } |
@@ -2509,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, | |||
2509 | nl_table[unit].module = module; | 2509 | nl_table[unit].module = module; |
2510 | if (cfg) { | 2510 | if (cfg) { |
2511 | nl_table[unit].bind = cfg->bind; | 2511 | nl_table[unit].bind = cfg->bind; |
2512 | nl_table[unit].unbind = cfg->unbind; | ||
2512 | nl_table[unit].flags = cfg->flags; | 2513 | nl_table[unit].flags = cfg->flags; |
2513 | if (cfg->compare) | 2514 | if (cfg->compare) |
2514 | nl_table[unit].compare = cfg->compare; | 2515 | nl_table[unit].compare = cfg->compare; |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 006886dbee36..8c4229b11c34 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -246,11 +246,11 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, | |||
246 | { | 246 | { |
247 | int transport_len = skb->len - skb_transport_offset(skb); | 247 | int transport_len = skb->len - skb_transport_offset(skb); |
248 | 248 | ||
249 | if (l4_proto == IPPROTO_TCP) { | 249 | if (l4_proto == NEXTHDR_TCP) { |
250 | if (likely(transport_len >= sizeof(struct tcphdr))) | 250 | if (likely(transport_len >= sizeof(struct tcphdr))) |
251 | inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, | 251 | inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, |
252 | addr, new_addr, 1); | 252 | addr, new_addr, 1); |
253 | } else if (l4_proto == IPPROTO_UDP) { | 253 | } else if (l4_proto == NEXTHDR_UDP) { |
254 | if (likely(transport_len >= sizeof(struct udphdr))) { | 254 | if (likely(transport_len >= sizeof(struct udphdr))) { |
255 | struct udphdr *uh = udp_hdr(skb); | 255 | struct udphdr *uh = udp_hdr(skb); |
256 | 256 | ||
@@ -261,6 +261,10 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, | |||
261 | uh->check = CSUM_MANGLED_0; | 261 | uh->check = CSUM_MANGLED_0; |
262 | } | 262 | } |
263 | } | 263 | } |
264 | } else if (l4_proto == NEXTHDR_ICMP) { | ||
265 | if (likely(transport_len >= sizeof(struct icmp6hdr))) | ||
266 | inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, | ||
267 | skb, addr, new_addr, 1); | ||
264 | } | 268 | } |
265 | } | 269 | } |
266 | 270 | ||
@@ -722,8 +726,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
722 | 726 | ||
723 | case OVS_ACTION_ATTR_SAMPLE: | 727 | case OVS_ACTION_ATTR_SAMPLE: |
724 | err = sample(dp, skb, key, a); | 728 | err = sample(dp, skb, key, a); |
725 | if (unlikely(err)) /* skb already freed. */ | ||
726 | return err; | ||
727 | break; | 729 | break; |
728 | } | 730 | } |
729 | 731 | ||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e6d7255183eb..f9e556b56086 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -1265,7 +1265,7 @@ static size_t ovs_dp_cmd_msg_size(void) | |||
1265 | return msgsize; | 1265 | return msgsize; |
1266 | } | 1266 | } |
1267 | 1267 | ||
1268 | /* Called with ovs_mutex or RCU read lock. */ | 1268 | /* Called with ovs_mutex. */ |
1269 | static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, | 1269 | static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, |
1270 | u32 portid, u32 seq, u32 flags, u8 cmd) | 1270 | u32 portid, u32 seq, u32 flags, u8 cmd) |
1271 | { | 1271 | { |
@@ -1555,7 +1555,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1555 | if (!reply) | 1555 | if (!reply) |
1556 | return -ENOMEM; | 1556 | return -ENOMEM; |
1557 | 1557 | ||
1558 | rcu_read_lock(); | 1558 | ovs_lock(); |
1559 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); | 1559 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
1560 | if (IS_ERR(dp)) { | 1560 | if (IS_ERR(dp)) { |
1561 | err = PTR_ERR(dp); | 1561 | err = PTR_ERR(dp); |
@@ -1564,12 +1564,12 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1564 | err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, | 1564 | err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, |
1565 | info->snd_seq, 0, OVS_DP_CMD_NEW); | 1565 | info->snd_seq, 0, OVS_DP_CMD_NEW); |
1566 | BUG_ON(err < 0); | 1566 | BUG_ON(err < 0); |
1567 | rcu_read_unlock(); | 1567 | ovs_unlock(); |
1568 | 1568 | ||
1569 | return genlmsg_reply(reply, info); | 1569 | return genlmsg_reply(reply, info); |
1570 | 1570 | ||
1571 | err_unlock_free: | 1571 | err_unlock_free: |
1572 | rcu_read_unlock(); | 1572 | ovs_unlock(); |
1573 | kfree_skb(reply); | 1573 | kfree_skb(reply); |
1574 | return err; | 1574 | return err; |
1575 | } | 1575 | } |
@@ -1581,8 +1581,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1581 | int skip = cb->args[0]; | 1581 | int skip = cb->args[0]; |
1582 | int i = 0; | 1582 | int i = 0; |
1583 | 1583 | ||
1584 | rcu_read_lock(); | 1584 | ovs_lock(); |
1585 | list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { | 1585 | list_for_each_entry(dp, &ovs_net->dps, list_node) { |
1586 | if (i >= skip && | 1586 | if (i >= skip && |
1587 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, | 1587 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, |
1588 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1588 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
@@ -1590,7 +1590,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1590 | break; | 1590 | break; |
1591 | i++; | 1591 | i++; |
1592 | } | 1592 | } |
1593 | rcu_read_unlock(); | 1593 | ovs_unlock(); |
1594 | 1594 | ||
1595 | cb->args[0] = i; | 1595 | cb->args[0] = i; |
1596 | 1596 | ||
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 939bcb32100f..089b195c064a 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -145,7 +145,7 @@ static bool match_validate(const struct sw_flow_match *match, | |||
145 | if (match->key->eth.type == htons(ETH_P_ARP) | 145 | if (match->key->eth.type == htons(ETH_P_ARP) |
146 | || match->key->eth.type == htons(ETH_P_RARP)) { | 146 | || match->key->eth.type == htons(ETH_P_RARP)) { |
147 | key_expected |= 1 << OVS_KEY_ATTR_ARP; | 147 | key_expected |= 1 << OVS_KEY_ATTR_ARP; |
148 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | 148 | if (match->mask && (match->mask->key.tp.src == htons(0xff))) |
149 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | 149 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; |
150 | } | 150 | } |
151 | 151 | ||
@@ -689,6 +689,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
689 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); | 689 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); |
690 | return -EINVAL; | 690 | return -EINVAL; |
691 | } | 691 | } |
692 | |||
693 | if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) { | ||
694 | OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n", | ||
695 | ntohl(ipv6_key->ipv6_label), (1 << 20) - 1); | ||
696 | return -EINVAL; | ||
697 | } | ||
698 | |||
692 | SW_FLOW_KEY_PUT(match, ipv6.label, | 699 | SW_FLOW_KEY_PUT(match, ipv6.label, |
693 | ipv6_key->ipv6_label, is_mask); | 700 | ipv6_key->ipv6_label, is_mask); |
694 | SW_FLOW_KEY_PUT(match, ip.proto, | 701 | SW_FLOW_KEY_PUT(match, ip.proto, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 87d20f48ff06..07c04a841ba0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync) | |||
378 | __unregister_prot_hook(sk, sync); | 378 | __unregister_prot_hook(sk, sync); |
379 | } | 379 | } |
380 | 380 | ||
381 | static inline __pure struct page *pgv_to_page(void *addr) | 381 | static inline struct page * __pure pgv_to_page(void *addr) |
382 | { | 382 | { |
383 | if (is_vmalloc_addr(addr)) | 383 | if (is_vmalloc_addr(addr)) |
384 | return vmalloc_to_page(addr); | 384 | return vmalloc_to_page(addr); |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 0e8529113dc5..fb7976aee61c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, | |||
862 | list_add(&cur_key->key_list, sh_keys); | 862 | list_add(&cur_key->key_list, sh_keys); |
863 | 863 | ||
864 | cur_key->key = key; | 864 | cur_key->key = key; |
865 | sctp_auth_key_hold(key); | ||
866 | |||
867 | return 0; | 865 | return 0; |
868 | nomem: | 866 | nomem: |
869 | if (!replace) | 867 | if (!replace) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ab734be8cb20..9f32741abb1c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2609,6 +2609,9 @@ do_addr_param: | |||
2609 | addr_param = param.v + sizeof(sctp_addip_param_t); | 2609 | addr_param = param.v + sizeof(sctp_addip_param_t); |
2610 | 2610 | ||
2611 | af = sctp_get_af_specific(param_type2af(param.p->type)); | 2611 | af = sctp_get_af_specific(param_type2af(param.p->type)); |
2612 | if (af == NULL) | ||
2613 | break; | ||
2614 | |||
2612 | af->from_addr_param(&addr, addr_param, | 2615 | af->from_addr_param(&addr, addr_param, |
2613 | htons(asoc->peer.port), 0); | 2616 | htons(asoc->peer.port), 0); |
2614 | 2617 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index afb292cd797d..53ed8d3f8897 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -1353,6 +1353,7 @@ gss_stringify_acceptor(struct rpc_cred *cred) | |||
1353 | char *string = NULL; | 1353 | char *string = NULL; |
1354 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); | 1354 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); |
1355 | struct gss_cl_ctx *ctx; | 1355 | struct gss_cl_ctx *ctx; |
1356 | unsigned int len; | ||
1356 | struct xdr_netobj *acceptor; | 1357 | struct xdr_netobj *acceptor; |
1357 | 1358 | ||
1358 | rcu_read_lock(); | 1359 | rcu_read_lock(); |
@@ -1360,15 +1361,39 @@ gss_stringify_acceptor(struct rpc_cred *cred) | |||
1360 | if (!ctx) | 1361 | if (!ctx) |
1361 | goto out; | 1362 | goto out; |
1362 | 1363 | ||
1363 | acceptor = &ctx->gc_acceptor; | 1364 | len = ctx->gc_acceptor.len; |
1365 | rcu_read_unlock(); | ||
1364 | 1366 | ||
1365 | /* no point if there's no string */ | 1367 | /* no point if there's no string */ |
1366 | if (!acceptor->len) | 1368 | if (!len) |
1367 | goto out; | 1369 | return NULL; |
1368 | 1370 | realloc: | |
1369 | string = kmalloc(acceptor->len + 1, GFP_KERNEL); | 1371 | string = kmalloc(len + 1, GFP_KERNEL); |
1370 | if (!string) | 1372 | if (!string) |
1373 | return NULL; | ||
1374 | |||
1375 | rcu_read_lock(); | ||
1376 | ctx = rcu_dereference(gss_cred->gc_ctx); | ||
1377 | |||
1378 | /* did the ctx disappear or was it replaced by one with no acceptor? */ | ||
1379 | if (!ctx || !ctx->gc_acceptor.len) { | ||
1380 | kfree(string); | ||
1381 | string = NULL; | ||
1371 | goto out; | 1382 | goto out; |
1383 | } | ||
1384 | |||
1385 | acceptor = &ctx->gc_acceptor; | ||
1386 | |||
1387 | /* | ||
1388 | * Did we find a new acceptor that's longer than the original? Allocate | ||
1389 | * a longer buffer and try again. | ||
1390 | */ | ||
1391 | if (len < acceptor->len) { | ||
1392 | len = acceptor->len; | ||
1393 | rcu_read_unlock(); | ||
1394 | kfree(string); | ||
1395 | goto realloc; | ||
1396 | } | ||
1372 | 1397 | ||
1373 | memcpy(string, acceptor->data, acceptor->len); | 1398 | memcpy(string, acceptor->data, acceptor->len); |
1374 | string[acceptor->len] = '\0'; | 1399 | string[acceptor->len] = '\0'; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 3f959c681885..f9c052d508f0 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
1019 | xid = *p++; | 1019 | xid = *p++; |
1020 | calldir = *p; | 1020 | calldir = *p; |
1021 | 1021 | ||
1022 | if (bc_xprt) | 1022 | if (!bc_xprt) |
1023 | req = xprt_lookup_rqst(bc_xprt, xid); | ||
1024 | |||
1025 | if (!req) { | ||
1026 | printk(KERN_NOTICE | ||
1027 | "%s: Got unrecognized reply: " | ||
1028 | "calldir 0x%x xpt_bc_xprt %p xid %08x\n", | ||
1029 | __func__, ntohl(calldir), | ||
1030 | bc_xprt, ntohl(xid)); | ||
1031 | return -EAGAIN; | 1023 | return -EAGAIN; |
1032 | } | 1024 | spin_lock_bh(&bc_xprt->transport_lock); |
1025 | req = xprt_lookup_rqst(bc_xprt, xid); | ||
1026 | if (!req) | ||
1027 | goto unlock_notfound; | ||
1033 | 1028 | ||
1034 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); | 1029 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); |
1035 | /* | 1030 | /* |
@@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
1040 | dst = &req->rq_private_buf.head[0]; | 1035 | dst = &req->rq_private_buf.head[0]; |
1041 | src = &rqstp->rq_arg.head[0]; | 1036 | src = &rqstp->rq_arg.head[0]; |
1042 | if (dst->iov_len < src->iov_len) | 1037 | if (dst->iov_len < src->iov_len) |
1043 | return -EAGAIN; /* whatever; just giving up. */ | 1038 | goto unlock_eagain; /* whatever; just giving up. */ |
1044 | memcpy(dst->iov_base, src->iov_base, src->iov_len); | 1039 | memcpy(dst->iov_base, src->iov_base, src->iov_len); |
1045 | xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); | 1040 | xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); |
1046 | rqstp->rq_arg.len = 0; | 1041 | rqstp->rq_arg.len = 0; |
1042 | spin_unlock_bh(&bc_xprt->transport_lock); | ||
1047 | return 0; | 1043 | return 0; |
1044 | unlock_notfound: | ||
1045 | printk(KERN_NOTICE | ||
1046 | "%s: Got unrecognized reply: " | ||
1047 | "calldir 0x%x xpt_bc_xprt %p xid %08x\n", | ||
1048 | __func__, ntohl(calldir), | ||
1049 | bc_xprt, ntohl(xid)); | ||
1050 | unlock_eagain: | ||
1051 | spin_unlock_bh(&bc_xprt->transport_lock); | ||
1052 | return -EAGAIN; | ||
1048 | } | 1053 | } |
1049 | 1054 | ||
1050 | static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) | 1055 | static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) |