diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-09-04 05:01:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-09-04 05:01:18 -0400 |
commit | edc2988c548db05e33b921fed15821010bc74895 (patch) | |
tree | b35860428acea35e5866d4cf007519ed943a85de /net | |
parent | d82fed75294229abc9d757f08a4817febae6c4f4 (diff) | |
parent | 81a84ad3cb5711cec79f4dd53a4ce026b092c432 (diff) |
Merge branch 'linus' into locking/core, to fix up conflicts
Conflicts:
mm/page_alloc.c
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net')
58 files changed, 363 insertions, 248 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 861ae2a165f4..5a7be3bddfa9 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -53,6 +53,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
53 | brstats->tx_bytes += skb->len; | 53 | brstats->tx_bytes += skb->len; |
54 | u64_stats_update_end(&brstats->syncp); | 54 | u64_stats_update_end(&brstats->syncp); |
55 | 55 | ||
56 | #ifdef CONFIG_NET_SWITCHDEV | ||
57 | skb->offload_fwd_mark = 0; | ||
58 | #endif | ||
56 | BR_INPUT_SKB_CB(skb)->brdev = dev; | 59 | BR_INPUT_SKB_CB(skb)->brdev = dev; |
57 | 60 | ||
58 | skb_reset_mac_header(skb); | 61 | skb_reset_mac_header(skb); |
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c index 181a44d0f1da..f6b1c7de059d 100644 --- a/net/bridge/br_switchdev.c +++ b/net/bridge/br_switchdev.c | |||
@@ -115,7 +115,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac, | |||
115 | void | 115 | void |
116 | br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) | 116 | br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) |
117 | { | 117 | { |
118 | if (!fdb->added_by_user) | 118 | if (!fdb->added_by_user || !fdb->dst) |
119 | return; | 119 | return; |
120 | 120 | ||
121 | switch (type) { | 121 | switch (type) { |
diff --git a/net/core/datagram.c b/net/core/datagram.c index a21ca8dee5ea..8c2f4489ff8f 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -362,7 +362,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, | |||
362 | if (flags & MSG_PEEK) { | 362 | if (flags & MSG_PEEK) { |
363 | err = -ENOENT; | 363 | err = -ENOENT; |
364 | spin_lock_bh(&sk_queue->lock); | 364 | spin_lock_bh(&sk_queue->lock); |
365 | if (skb == skb_peek(sk_queue)) { | 365 | if (skb->next) { |
366 | __skb_unlink(skb, sk_queue); | 366 | __skb_unlink(skb, sk_queue); |
367 | refcount_dec(&skb->users); | 367 | refcount_dec(&skb->users); |
368 | if (destructor) | 368 | if (destructor) |
diff --git a/net/core/dev.c b/net/core/dev.c index ce15a06d5558..86b4b0a79e7a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -5289,6 +5289,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) | |||
5289 | * Ideally, a new ndo_busy_poll_stop() could avoid another round. | 5289 | * Ideally, a new ndo_busy_poll_stop() could avoid another round. |
5290 | */ | 5290 | */ |
5291 | rc = napi->poll(napi, BUSY_POLL_BUDGET); | 5291 | rc = napi->poll(napi, BUSY_POLL_BUDGET); |
5292 | trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); | ||
5292 | netpoll_poll_unlock(have_poll_lock); | 5293 | netpoll_poll_unlock(have_poll_lock); |
5293 | if (rc == BUSY_POLL_BUDGET) | 5294 | if (rc == BUSY_POLL_BUDGET) |
5294 | __napi_schedule(napi); | 5295 | __napi_schedule(napi); |
@@ -5667,12 +5668,13 @@ EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); | |||
5667 | * Find out if a device is linked to an upper device and return true in case | 5668 | * Find out if a device is linked to an upper device and return true in case |
5668 | * it is. The caller must hold the RTNL lock. | 5669 | * it is. The caller must hold the RTNL lock. |
5669 | */ | 5670 | */ |
5670 | static bool netdev_has_any_upper_dev(struct net_device *dev) | 5671 | bool netdev_has_any_upper_dev(struct net_device *dev) |
5671 | { | 5672 | { |
5672 | ASSERT_RTNL(); | 5673 | ASSERT_RTNL(); |
5673 | 5674 | ||
5674 | return !list_empty(&dev->adj_list.upper); | 5675 | return !list_empty(&dev->adj_list.upper); |
5675 | } | 5676 | } |
5677 | EXPORT_SYMBOL(netdev_has_any_upper_dev); | ||
5676 | 5678 | ||
5677 | /** | 5679 | /** |
5678 | * netdev_master_upper_dev_get - Get master upper device | 5680 | * netdev_master_upper_dev_get - Get master upper device |
diff --git a/net/core/filter.c b/net/core/filter.c index 6280a602604c..169974998c76 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2836,15 +2836,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
2836 | sk->sk_prot->setsockopt == tcp_setsockopt) { | 2836 | sk->sk_prot->setsockopt == tcp_setsockopt) { |
2837 | if (optname == TCP_CONGESTION) { | 2837 | if (optname == TCP_CONGESTION) { |
2838 | char name[TCP_CA_NAME_MAX]; | 2838 | char name[TCP_CA_NAME_MAX]; |
2839 | bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN; | ||
2839 | 2840 | ||
2840 | strncpy(name, optval, min_t(long, optlen, | 2841 | strncpy(name, optval, min_t(long, optlen, |
2841 | TCP_CA_NAME_MAX-1)); | 2842 | TCP_CA_NAME_MAX-1)); |
2842 | name[TCP_CA_NAME_MAX-1] = 0; | 2843 | name[TCP_CA_NAME_MAX-1] = 0; |
2843 | ret = tcp_set_congestion_control(sk, name, false); | 2844 | ret = tcp_set_congestion_control(sk, name, false, reinit); |
2844 | if (!ret && bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN) | ||
2845 | /* replacing an existing ca */ | ||
2846 | tcp_reinit_congestion_control(sk, | ||
2847 | inet_csk(sk)->icsk_ca_ops); | ||
2848 | } else { | 2845 | } else { |
2849 | struct tcp_sock *tp = tcp_sk(sk); | 2846 | struct tcp_sock *tp = tcp_sk(sk); |
2850 | 2847 | ||
@@ -2872,7 +2869,6 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
2872 | ret = -EINVAL; | 2869 | ret = -EINVAL; |
2873 | } | 2870 | } |
2874 | } | 2871 | } |
2875 | ret = -EINVAL; | ||
2876 | #endif | 2872 | #endif |
2877 | } else { | 2873 | } else { |
2878 | ret = -EINVAL; | 2874 | ret = -EINVAL; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f990eb8b30a9..e07556606284 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -1363,18 +1363,20 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | |||
1363 | EXPORT_SYMBOL(skb_copy_expand); | 1363 | EXPORT_SYMBOL(skb_copy_expand); |
1364 | 1364 | ||
1365 | /** | 1365 | /** |
1366 | * skb_pad - zero pad the tail of an skb | 1366 | * __skb_pad - zero pad the tail of an skb |
1367 | * @skb: buffer to pad | 1367 | * @skb: buffer to pad |
1368 | * @pad: space to pad | 1368 | * @pad: space to pad |
1369 | * @free_on_error: free buffer on error | ||
1369 | * | 1370 | * |
1370 | * Ensure that a buffer is followed by a padding area that is zero | 1371 | * Ensure that a buffer is followed by a padding area that is zero |
1371 | * filled. Used by network drivers which may DMA or transfer data | 1372 | * filled. Used by network drivers which may DMA or transfer data |
1372 | * beyond the buffer end onto the wire. | 1373 | * beyond the buffer end onto the wire. |
1373 | * | 1374 | * |
1374 | * May return error in out of memory cases. The skb is freed on error. | 1375 | * May return error in out of memory cases. The skb is freed on error |
1376 | * if @free_on_error is true. | ||
1375 | */ | 1377 | */ |
1376 | 1378 | ||
1377 | int skb_pad(struct sk_buff *skb, int pad) | 1379 | int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) |
1378 | { | 1380 | { |
1379 | int err; | 1381 | int err; |
1380 | int ntail; | 1382 | int ntail; |
@@ -1403,10 +1405,11 @@ int skb_pad(struct sk_buff *skb, int pad) | |||
1403 | return 0; | 1405 | return 0; |
1404 | 1406 | ||
1405 | free_skb: | 1407 | free_skb: |
1406 | kfree_skb(skb); | 1408 | if (free_on_error) |
1409 | kfree_skb(skb); | ||
1407 | return err; | 1410 | return err; |
1408 | } | 1411 | } |
1409 | EXPORT_SYMBOL(skb_pad); | 1412 | EXPORT_SYMBOL(__skb_pad); |
1410 | 1413 | ||
1411 | /** | 1414 | /** |
1412 | * pskb_put - add data to the tail of a potentially fragmented buffer | 1415 | * pskb_put - add data to the tail of a potentially fragmented buffer |
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index c442051d5a55..20bc9c56fca0 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c | |||
@@ -577,7 +577,7 @@ static int dsa_dst_parse(struct dsa_switch_tree *dst) | |||
577 | return err; | 577 | return err; |
578 | } | 578 | } |
579 | 579 | ||
580 | if (!dst->cpu_dp->netdev) { | 580 | if (!dst->cpu_dp) { |
581 | pr_warn("Tree has no master device\n"); | 581 | pr_warn("Tree has no master device\n"); |
582 | return -EINVAL; | 582 | return -EINVAL; |
583 | } | 583 | } |
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index de66ca8e6201..fcd90f79458e 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c | |||
@@ -42,7 +42,8 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev) | |||
42 | padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len; | 42 | padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len; |
43 | 43 | ||
44 | if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) { | 44 | if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) { |
45 | if (skb_put_padto(skb, skb->len + padlen)) | 45 | /* Let dsa_slave_xmit() free skb */ |
46 | if (__skb_put_padto(skb, skb->len + padlen, false)) | ||
46 | return NULL; | 47 | return NULL; |
47 | 48 | ||
48 | nskb = skb; | 49 | nskb = skb; |
@@ -60,12 +61,13 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev) | |||
60 | skb_transport_header(skb) - skb->head); | 61 | skb_transport_header(skb) - skb->head); |
61 | skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); | 62 | skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); |
62 | 63 | ||
63 | if (skb_put_padto(nskb, nskb->len + padlen)) { | 64 | /* Let skb_put_padto() free nskb, and let dsa_slave_xmit() free |
64 | kfree_skb(nskb); | 65 | * skb |
66 | */ | ||
67 | if (skb_put_padto(nskb, nskb->len + padlen)) | ||
65 | return NULL; | 68 | return NULL; |
66 | } | ||
67 | 69 | ||
68 | kfree_skb(skb); | 70 | consume_skb(skb); |
69 | } | 71 | } |
70 | 72 | ||
71 | tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN); | 73 | tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN); |
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index b09e56214005..9c7b1d74a5c6 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
@@ -40,7 +40,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev) | |||
40 | skb_set_network_header(nskb, skb_network_header(skb) - skb->head); | 40 | skb_set_network_header(nskb, skb_network_header(skb) - skb->head); |
41 | skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); | 41 | skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); |
42 | skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); | 42 | skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); |
43 | kfree_skb(skb); | 43 | consume_skb(skb); |
44 | 44 | ||
45 | if (padlen) { | 45 | if (padlen) { |
46 | skb_put_zero(nskb, padlen); | 46 | skb_put_zero(nskb, padlen); |
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 4e7bdb213cd0..172d8309f89e 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c | |||
@@ -314,7 +314,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master, | |||
314 | hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); | 314 | hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); |
315 | ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); | 315 | ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); |
316 | 316 | ||
317 | skb_put_padto(skb, ETH_ZLEN + HSR_HLEN); | 317 | if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) |
318 | return; | ||
318 | 319 | ||
319 | hsr_forward_skb(skb, master); | 320 | hsr_forward_skb(skb, master); |
320 | return; | 321 | return; |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 0cbee0a666ff..df68963dc90a 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -258,7 +258,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
258 | esp_output_udp_encap(x, skb, esp); | 258 | esp_output_udp_encap(x, skb, esp); |
259 | 259 | ||
260 | if (!skb_cloned(skb)) { | 260 | if (!skb_cloned(skb)) { |
261 | if (tailen <= skb_availroom(skb)) { | 261 | if (tailen <= skb_tailroom(skb)) { |
262 | nfrags = 1; | 262 | nfrags = 1; |
263 | trailer = skb; | 263 | trailer = skb; |
264 | tail = skb_tail_pointer(trailer); | 264 | tail = skb_tail_pointer(trailer); |
@@ -292,8 +292,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
292 | 292 | ||
293 | kunmap_atomic(vaddr); | 293 | kunmap_atomic(vaddr); |
294 | 294 | ||
295 | spin_unlock_bh(&x->lock); | ||
296 | |||
297 | nfrags = skb_shinfo(skb)->nr_frags; | 295 | nfrags = skb_shinfo(skb)->nr_frags; |
298 | 296 | ||
299 | __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, | 297 | __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, |
@@ -301,6 +299,9 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
301 | skb_shinfo(skb)->nr_frags = ++nfrags; | 299 | skb_shinfo(skb)->nr_frags = ++nfrags; |
302 | 300 | ||
303 | pfrag->offset = pfrag->offset + allocsize; | 301 | pfrag->offset = pfrag->offset + allocsize; |
302 | |||
303 | spin_unlock_bh(&x->lock); | ||
304 | |||
304 | nfrags++; | 305 | nfrags++; |
305 | 306 | ||
306 | skb->len += tailen; | 307 | skb->len += tailen; |
@@ -381,7 +382,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
381 | (unsigned char *)esph - skb->data, | 382 | (unsigned char *)esph - skb->data, |
382 | assoclen + ivlen + esp->clen + alen); | 383 | assoclen + ivlen + esp->clen + alen); |
383 | if (unlikely(err < 0)) | 384 | if (unlikely(err < 0)) |
384 | goto error; | 385 | goto error_free; |
385 | 386 | ||
386 | if (!esp->inplace) { | 387 | if (!esp->inplace) { |
387 | int allocsize; | 388 | int allocsize; |
@@ -392,7 +393,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
392 | spin_lock_bh(&x->lock); | 393 | spin_lock_bh(&x->lock); |
393 | if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { | 394 | if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { |
394 | spin_unlock_bh(&x->lock); | 395 | spin_unlock_bh(&x->lock); |
395 | goto error; | 396 | goto error_free; |
396 | } | 397 | } |
397 | 398 | ||
398 | skb_shinfo(skb)->nr_frags = 1; | 399 | skb_shinfo(skb)->nr_frags = 1; |
@@ -409,7 +410,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
409 | (unsigned char *)esph - skb->data, | 410 | (unsigned char *)esph - skb->data, |
410 | assoclen + ivlen + esp->clen + alen); | 411 | assoclen + ivlen + esp->clen + alen); |
411 | if (unlikely(err < 0)) | 412 | if (unlikely(err < 0)) |
412 | goto error; | 413 | goto error_free; |
413 | } | 414 | } |
414 | 415 | ||
415 | if ((x->props.flags & XFRM_STATE_ESN)) | 416 | if ((x->props.flags & XFRM_STATE_ESN)) |
@@ -442,8 +443,9 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
442 | 443 | ||
443 | if (sg != dsg) | 444 | if (sg != dsg) |
444 | esp_ssg_unref(x, tmp); | 445 | esp_ssg_unref(x, tmp); |
445 | kfree(tmp); | ||
446 | 446 | ||
447 | error_free: | ||
448 | kfree(tmp); | ||
447 | error: | 449 | error: |
448 | return err; | 450 | return err; |
449 | } | 451 | } |
@@ -695,8 +697,10 @@ skip_cow: | |||
695 | 697 | ||
696 | sg_init_table(sg, nfrags); | 698 | sg_init_table(sg, nfrags); |
697 | err = skb_to_sgvec(skb, sg, 0, skb->len); | 699 | err = skb_to_sgvec(skb, sg, 0, skb->len); |
698 | if (unlikely(err < 0)) | 700 | if (unlikely(err < 0)) { |
701 | kfree(tmp); | ||
699 | goto out; | 702 | goto out; |
703 | } | ||
700 | 704 | ||
701 | skb->ip_summed = CHECKSUM_NONE; | 705 | skb->ip_summed = CHECKSUM_NONE; |
702 | 706 | ||
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index e0666016a764..50112324fa5c 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c | |||
@@ -257,7 +257,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ | |||
257 | esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); | 257 | esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); |
258 | 258 | ||
259 | err = esp_output_tail(x, skb, &esp); | 259 | err = esp_output_tail(x, skb, &esp); |
260 | if (err < 0) | 260 | if (err) |
261 | return err; | 261 | return err; |
262 | 262 | ||
263 | secpath_reset(skb); | 263 | secpath_reset(skb); |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 0bc3c3d73e61..9e9d9afd18f7 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -268,14 +268,14 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
268 | acpar.targinfo = t->data; | 268 | acpar.targinfo = t->data; |
269 | verdict = t->u.kernel.target->target(skb, &acpar); | 269 | verdict = t->u.kernel.target->target(skb, &acpar); |
270 | 270 | ||
271 | /* Target might have changed stuff. */ | 271 | if (verdict == XT_CONTINUE) { |
272 | arp = arp_hdr(skb); | 272 | /* Target might have changed stuff. */ |
273 | 273 | arp = arp_hdr(skb); | |
274 | if (verdict == XT_CONTINUE) | ||
275 | e = arpt_next_entry(e); | 274 | e = arpt_next_entry(e); |
276 | else | 275 | } else { |
277 | /* Verdict */ | 276 | /* Verdict */ |
278 | break; | 277 | break; |
278 | } | ||
279 | } while (!acpar.hotdrop); | 279 | } while (!acpar.hotdrop); |
280 | xt_write_recseq_end(addend); | 280 | xt_write_recseq_end(addend); |
281 | local_bh_enable(); | 281 | local_bh_enable(); |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 2a55a40211cb..622ed2887cd5 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -352,13 +352,14 @@ ipt_do_table(struct sk_buff *skb, | |||
352 | acpar.targinfo = t->data; | 352 | acpar.targinfo = t->data; |
353 | 353 | ||
354 | verdict = t->u.kernel.target->target(skb, &acpar); | 354 | verdict = t->u.kernel.target->target(skb, &acpar); |
355 | /* Target might have changed stuff. */ | 355 | if (verdict == XT_CONTINUE) { |
356 | ip = ip_hdr(skb); | 356 | /* Target might have changed stuff. */ |
357 | if (verdict == XT_CONTINUE) | 357 | ip = ip_hdr(skb); |
358 | e = ipt_next_entry(e); | 358 | e = ipt_next_entry(e); |
359 | else | 359 | } else { |
360 | /* Verdict */ | 360 | /* Verdict */ |
361 | break; | 361 | break; |
362 | } | ||
362 | } while (!acpar.hotdrop); | 363 | } while (!acpar.hotdrop); |
363 | 364 | ||
364 | xt_write_recseq_end(addend); | 365 | xt_write_recseq_end(addend); |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 7d72decb80f9..efaa04dcc80e 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -117,7 +117,8 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c) | |||
117 | * functions are also incrementing the refcount on their own, | 117 | * functions are also incrementing the refcount on their own, |
118 | * so it's safe to remove the entry even if it's in use. */ | 118 | * so it's safe to remove the entry even if it's in use. */ |
119 | #ifdef CONFIG_PROC_FS | 119 | #ifdef CONFIG_PROC_FS |
120 | proc_remove(c->pde); | 120 | if (cn->procdir) |
121 | proc_remove(c->pde); | ||
121 | #endif | 122 | #endif |
122 | return; | 123 | return; |
123 | } | 124 | } |
@@ -815,6 +816,7 @@ static void clusterip_net_exit(struct net *net) | |||
815 | #ifdef CONFIG_PROC_FS | 816 | #ifdef CONFIG_PROC_FS |
816 | struct clusterip_net *cn = net_generic(net, clusterip_net_id); | 817 | struct clusterip_net *cn = net_generic(net, clusterip_net_id); |
817 | proc_remove(cn->procdir); | 818 | proc_remove(cn->procdir); |
819 | cn->procdir = NULL; | ||
818 | #endif | 820 | #endif |
819 | nf_unregister_net_hook(net, &cip_arp_ops); | 821 | nf_unregister_net_hook(net, &cip_arp_ops); |
820 | } | 822 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 71ce33decd97..a3e91b552edc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2481,7 +2481,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2481 | name[val] = 0; | 2481 | name[val] = 0; |
2482 | 2482 | ||
2483 | lock_sock(sk); | 2483 | lock_sock(sk); |
2484 | err = tcp_set_congestion_control(sk, name, true); | 2484 | err = tcp_set_congestion_control(sk, name, true, true); |
2485 | release_sock(sk); | 2485 | release_sock(sk); |
2486 | return err; | 2486 | return err; |
2487 | } | 2487 | } |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index fde983f6376b..421ea1b918da 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -189,8 +189,8 @@ void tcp_init_congestion_control(struct sock *sk) | |||
189 | INET_ECN_dontxmit(sk); | 189 | INET_ECN_dontxmit(sk); |
190 | } | 190 | } |
191 | 191 | ||
192 | void tcp_reinit_congestion_control(struct sock *sk, | 192 | static void tcp_reinit_congestion_control(struct sock *sk, |
193 | const struct tcp_congestion_ops *ca) | 193 | const struct tcp_congestion_ops *ca) |
194 | { | 194 | { |
195 | struct inet_connection_sock *icsk = inet_csk(sk); | 195 | struct inet_connection_sock *icsk = inet_csk(sk); |
196 | 196 | ||
@@ -338,7 +338,7 @@ out: | |||
338 | * tcp_reinit_congestion_control (if the current congestion control was | 338 | * tcp_reinit_congestion_control (if the current congestion control was |
339 | * already initialized. | 339 | * already initialized. |
340 | */ | 340 | */ |
341 | int tcp_set_congestion_control(struct sock *sk, const char *name, bool load) | 341 | int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit) |
342 | { | 342 | { |
343 | struct inet_connection_sock *icsk = inet_csk(sk); | 343 | struct inet_connection_sock *icsk = inet_csk(sk); |
344 | const struct tcp_congestion_ops *ca; | 344 | const struct tcp_congestion_ops *ca; |
@@ -360,9 +360,18 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load) | |||
360 | if (!ca) { | 360 | if (!ca) { |
361 | err = -ENOENT; | 361 | err = -ENOENT; |
362 | } else if (!load) { | 362 | } else if (!load) { |
363 | icsk->icsk_ca_ops = ca; | 363 | const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops; |
364 | if (!try_module_get(ca->owner)) | 364 | |
365 | if (try_module_get(ca->owner)) { | ||
366 | if (reinit) { | ||
367 | tcp_reinit_congestion_control(sk, ca); | ||
368 | } else { | ||
369 | icsk->icsk_ca_ops = ca; | ||
370 | module_put(old_ca->owner); | ||
371 | } | ||
372 | } else { | ||
365 | err = -EBUSY; | 373 | err = -EBUSY; |
374 | } | ||
366 | } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || | 375 | } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || |
367 | ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) { | 376 | ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) { |
368 | err = -EPERM; | 377 | err = -EPERM; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ebe46ed997cb..38e795e0c4bf 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1176,7 +1176,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb) | |||
1176 | scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); | 1176 | scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); |
1177 | scratch->is_linear = !skb_is_nonlinear(skb); | 1177 | scratch->is_linear = !skb_is_nonlinear(skb); |
1178 | #endif | 1178 | #endif |
1179 | if (likely(!skb->_skb_refdst)) | 1179 | if (likely(!skb->_skb_refdst && !skb_sec_path(skb))) |
1180 | scratch->_tsize_state |= UDP_SKB_IS_STATELESS; | 1180 | scratch->_tsize_state |= UDP_SKB_IS_STATELESS; |
1181 | } | 1181 | } |
1182 | 1182 | ||
@@ -1928,14 +1928,16 @@ drop: | |||
1928 | /* For TCP sockets, sk_rx_dst is protected by socket lock | 1928 | /* For TCP sockets, sk_rx_dst is protected by socket lock |
1929 | * For UDP, we use xchg() to guard against concurrent changes. | 1929 | * For UDP, we use xchg() to guard against concurrent changes. |
1930 | */ | 1930 | */ |
1931 | void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | 1931 | bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) |
1932 | { | 1932 | { |
1933 | struct dst_entry *old; | 1933 | struct dst_entry *old; |
1934 | 1934 | ||
1935 | if (dst_hold_safe(dst)) { | 1935 | if (dst_hold_safe(dst)) { |
1936 | old = xchg(&sk->sk_rx_dst, dst); | 1936 | old = xchg(&sk->sk_rx_dst, dst); |
1937 | dst_release(old); | 1937 | dst_release(old); |
1938 | return old != dst; | ||
1938 | } | 1939 | } |
1940 | return false; | ||
1939 | } | 1941 | } |
1940 | EXPORT_SYMBOL(udp_sk_rx_dst_set); | 1942 | EXPORT_SYMBOL(udp_sk_rx_dst_set); |
1941 | 1943 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3c46e9513a31..936e9ab4dda5 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -5556,7 +5556,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
5556 | * our DAD process, so we don't need | 5556 | * our DAD process, so we don't need |
5557 | * to do it again | 5557 | * to do it again |
5558 | */ | 5558 | */ |
5559 | if (!(ifp->rt->rt6i_node)) | 5559 | if (!rcu_access_pointer(ifp->rt->rt6i_node)) |
5560 | ip6_ins_rt(ifp->rt); | 5560 | ip6_ins_rt(ifp->rt); |
5561 | if (ifp->idev->cnf.forwarding) | 5561 | if (ifp->idev->cnf.forwarding) |
5562 | addrconf_join_anycast(ifp); | 5562 | addrconf_join_anycast(ifp); |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 9ed35473dcb5..ab64f367d11c 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -226,7 +226,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
226 | int tailen = esp->tailen; | 226 | int tailen = esp->tailen; |
227 | 227 | ||
228 | if (!skb_cloned(skb)) { | 228 | if (!skb_cloned(skb)) { |
229 | if (tailen <= skb_availroom(skb)) { | 229 | if (tailen <= skb_tailroom(skb)) { |
230 | nfrags = 1; | 230 | nfrags = 1; |
231 | trailer = skb; | 231 | trailer = skb; |
232 | tail = skb_tail_pointer(trailer); | 232 | tail = skb_tail_pointer(trailer); |
@@ -260,8 +260,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
260 | 260 | ||
261 | kunmap_atomic(vaddr); | 261 | kunmap_atomic(vaddr); |
262 | 262 | ||
263 | spin_unlock_bh(&x->lock); | ||
264 | |||
265 | nfrags = skb_shinfo(skb)->nr_frags; | 263 | nfrags = skb_shinfo(skb)->nr_frags; |
266 | 264 | ||
267 | __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, | 265 | __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, |
@@ -269,6 +267,9 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
269 | skb_shinfo(skb)->nr_frags = ++nfrags; | 267 | skb_shinfo(skb)->nr_frags = ++nfrags; |
270 | 268 | ||
271 | pfrag->offset = pfrag->offset + allocsize; | 269 | pfrag->offset = pfrag->offset + allocsize; |
270 | |||
271 | spin_unlock_bh(&x->lock); | ||
272 | |||
272 | nfrags++; | 273 | nfrags++; |
273 | 274 | ||
274 | skb->len += tailen; | 275 | skb->len += tailen; |
@@ -345,7 +346,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
345 | (unsigned char *)esph - skb->data, | 346 | (unsigned char *)esph - skb->data, |
346 | assoclen + ivlen + esp->clen + alen); | 347 | assoclen + ivlen + esp->clen + alen); |
347 | if (unlikely(err < 0)) | 348 | if (unlikely(err < 0)) |
348 | goto error; | 349 | goto error_free; |
349 | 350 | ||
350 | if (!esp->inplace) { | 351 | if (!esp->inplace) { |
351 | int allocsize; | 352 | int allocsize; |
@@ -356,7 +357,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
356 | spin_lock_bh(&x->lock); | 357 | spin_lock_bh(&x->lock); |
357 | if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { | 358 | if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { |
358 | spin_unlock_bh(&x->lock); | 359 | spin_unlock_bh(&x->lock); |
359 | goto error; | 360 | goto error_free; |
360 | } | 361 | } |
361 | 362 | ||
362 | skb_shinfo(skb)->nr_frags = 1; | 363 | skb_shinfo(skb)->nr_frags = 1; |
@@ -373,7 +374,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
373 | (unsigned char *)esph - skb->data, | 374 | (unsigned char *)esph - skb->data, |
374 | assoclen + ivlen + esp->clen + alen); | 375 | assoclen + ivlen + esp->clen + alen); |
375 | if (unlikely(err < 0)) | 376 | if (unlikely(err < 0)) |
376 | goto error; | 377 | goto error_free; |
377 | } | 378 | } |
378 | 379 | ||
379 | if ((x->props.flags & XFRM_STATE_ESN)) | 380 | if ((x->props.flags & XFRM_STATE_ESN)) |
@@ -406,8 +407,9 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
406 | 407 | ||
407 | if (sg != dsg) | 408 | if (sg != dsg) |
408 | esp_ssg_unref(x, tmp); | 409 | esp_ssg_unref(x, tmp); |
409 | kfree(tmp); | ||
410 | 410 | ||
411 | error_free: | ||
412 | kfree(tmp); | ||
411 | error: | 413 | error: |
412 | return err; | 414 | return err; |
413 | } | 415 | } |
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index f02f131f6435..1cf437f75b0b 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c | |||
@@ -286,7 +286,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features | |||
286 | esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); | 286 | esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); |
287 | 287 | ||
288 | err = esp6_output_tail(x, skb, &esp); | 288 | err = esp6_output_tail(x, skb, &esp); |
289 | if (err < 0) | 289 | if (err) |
290 | return err; | 290 | return err; |
291 | 291 | ||
292 | secpath_reset(skb); | 292 | secpath_reset(skb); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 5cc0ea038198..e1c85bb4eac0 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -148,11 +148,23 @@ static struct fib6_node *node_alloc(void) | |||
148 | return fn; | 148 | return fn; |
149 | } | 149 | } |
150 | 150 | ||
151 | static void node_free(struct fib6_node *fn) | 151 | static void node_free_immediate(struct fib6_node *fn) |
152 | { | ||
153 | kmem_cache_free(fib6_node_kmem, fn); | ||
154 | } | ||
155 | |||
156 | static void node_free_rcu(struct rcu_head *head) | ||
152 | { | 157 | { |
158 | struct fib6_node *fn = container_of(head, struct fib6_node, rcu); | ||
159 | |||
153 | kmem_cache_free(fib6_node_kmem, fn); | 160 | kmem_cache_free(fib6_node_kmem, fn); |
154 | } | 161 | } |
155 | 162 | ||
163 | static void node_free(struct fib6_node *fn) | ||
164 | { | ||
165 | call_rcu(&fn->rcu, node_free_rcu); | ||
166 | } | ||
167 | |||
156 | static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) | 168 | static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) |
157 | { | 169 | { |
158 | int cpu; | 170 | int cpu; |
@@ -601,9 +613,9 @@ insert_above: | |||
601 | 613 | ||
602 | if (!in || !ln) { | 614 | if (!in || !ln) { |
603 | if (in) | 615 | if (in) |
604 | node_free(in); | 616 | node_free_immediate(in); |
605 | if (ln) | 617 | if (ln) |
606 | node_free(ln); | 618 | node_free_immediate(ln); |
607 | return ERR_PTR(-ENOMEM); | 619 | return ERR_PTR(-ENOMEM); |
608 | } | 620 | } |
609 | 621 | ||
@@ -877,7 +889,7 @@ add: | |||
877 | 889 | ||
878 | rt->dst.rt6_next = iter; | 890 | rt->dst.rt6_next = iter; |
879 | *ins = rt; | 891 | *ins = rt; |
880 | rt->rt6i_node = fn; | 892 | rcu_assign_pointer(rt->rt6i_node, fn); |
881 | atomic_inc(&rt->rt6i_ref); | 893 | atomic_inc(&rt->rt6i_ref); |
882 | if (!info->skip_notify) | 894 | if (!info->skip_notify) |
883 | inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); | 895 | inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); |
@@ -903,7 +915,7 @@ add: | |||
903 | return err; | 915 | return err; |
904 | 916 | ||
905 | *ins = rt; | 917 | *ins = rt; |
906 | rt->rt6i_node = fn; | 918 | rcu_assign_pointer(rt->rt6i_node, fn); |
907 | rt->dst.rt6_next = iter->dst.rt6_next; | 919 | rt->dst.rt6_next = iter->dst.rt6_next; |
908 | atomic_inc(&rt->rt6i_ref); | 920 | atomic_inc(&rt->rt6i_ref); |
909 | if (!info->skip_notify) | 921 | if (!info->skip_notify) |
@@ -1038,7 +1050,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, | |||
1038 | root, and then (in failure) stale node | 1050 | root, and then (in failure) stale node |
1039 | in main tree. | 1051 | in main tree. |
1040 | */ | 1052 | */ |
1041 | node_free(sfn); | 1053 | node_free_immediate(sfn); |
1042 | err = PTR_ERR(sn); | 1054 | err = PTR_ERR(sn); |
1043 | goto failure; | 1055 | goto failure; |
1044 | } | 1056 | } |
@@ -1468,8 +1480,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1468 | 1480 | ||
1469 | int fib6_del(struct rt6_info *rt, struct nl_info *info) | 1481 | int fib6_del(struct rt6_info *rt, struct nl_info *info) |
1470 | { | 1482 | { |
1483 | struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node, | ||
1484 | lockdep_is_held(&rt->rt6i_table->tb6_lock)); | ||
1471 | struct net *net = info->nl_net; | 1485 | struct net *net = info->nl_net; |
1472 | struct fib6_node *fn = rt->rt6i_node; | ||
1473 | struct rt6_info **rtp; | 1486 | struct rt6_info **rtp; |
1474 | 1487 | ||
1475 | #if RT6_DEBUG >= 2 | 1488 | #if RT6_DEBUG >= 2 |
@@ -1658,7 +1671,9 @@ static int fib6_clean_node(struct fib6_walker *w) | |||
1658 | if (res) { | 1671 | if (res) { |
1659 | #if RT6_DEBUG >= 2 | 1672 | #if RT6_DEBUG >= 2 |
1660 | pr_debug("%s: del failed: rt=%p@%p err=%d\n", | 1673 | pr_debug("%s: del failed: rt=%p@%p err=%d\n", |
1661 | __func__, rt, rt->rt6i_node, res); | 1674 | __func__, rt, |
1675 | rcu_access_pointer(rt->rt6i_node), | ||
1676 | res); | ||
1662 | #endif | 1677 | #endif |
1663 | continue; | 1678 | continue; |
1664 | } | 1679 | } |
@@ -1780,8 +1795,10 @@ static int fib6_age(struct rt6_info *rt, void *arg) | |||
1780 | } | 1795 | } |
1781 | gc_args->more++; | 1796 | gc_args->more++; |
1782 | } else if (rt->rt6i_flags & RTF_CACHE) { | 1797 | } else if (rt->rt6i_flags & RTF_CACHE) { |
1798 | if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) | ||
1799 | rt->dst.obsolete = DST_OBSOLETE_KILL; | ||
1783 | if (atomic_read(&rt->dst.__refcnt) == 1 && | 1800 | if (atomic_read(&rt->dst.__refcnt) == 1 && |
1784 | time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) { | 1801 | rt->dst.obsolete == DST_OBSOLETE_KILL) { |
1785 | RT6_TRACE("aging clone %p\n", rt); | 1802 | RT6_TRACE("aging clone %p\n", rt); |
1786 | return -1; | 1803 | return -1; |
1787 | } else if (rt->rt6i_flags & RTF_GATEWAY) { | 1804 | } else if (rt->rt6i_flags & RTF_GATEWAY) { |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 02d795fe3d7f..a5e466d4e093 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -242,7 +242,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
242 | pktopt = xchg(&np->pktoptions, NULL); | 242 | pktopt = xchg(&np->pktoptions, NULL); |
243 | kfree_skb(pktopt); | 243 | kfree_skb(pktopt); |
244 | 244 | ||
245 | sk->sk_destruct = inet_sock_destruct; | ||
246 | /* | 245 | /* |
247 | * ... and add it to the refcnt debug socks count | 246 | * ... and add it to the refcnt debug socks count |
248 | * in the new family. -acme | 247 | * in the new family. -acme |
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index abb2c307fbe8..a338bbc33cf3 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c | |||
@@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | |||
86 | 86 | ||
87 | while (offset <= packet_len) { | 87 | while (offset <= packet_len) { |
88 | struct ipv6_opt_hdr *exthdr; | 88 | struct ipv6_opt_hdr *exthdr; |
89 | unsigned int len; | ||
90 | 89 | ||
91 | switch (**nexthdr) { | 90 | switch (**nexthdr) { |
92 | 91 | ||
@@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | |||
112 | 111 | ||
113 | exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + | 112 | exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + |
114 | offset); | 113 | offset); |
115 | len = ipv6_optlen(exthdr); | 114 | offset += ipv6_optlen(exthdr); |
116 | if (len + offset >= IPV6_MAXPLEN) | 115 | if (offset > IPV6_MAXPLEN) |
117 | return -EINVAL; | 116 | return -EINVAL; |
118 | offset += len; | ||
119 | *nexthdr = &exthdr->nexthdr; | 117 | *nexthdr = &exthdr->nexthdr; |
120 | } | 118 | } |
121 | 119 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 94d6a13d47f0..2d0e7798c793 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -440,7 +440,8 @@ static bool rt6_check_expired(const struct rt6_info *rt) | |||
440 | if (time_after(jiffies, rt->dst.expires)) | 440 | if (time_after(jiffies, rt->dst.expires)) |
441 | return true; | 441 | return true; |
442 | } else if (rt->dst.from) { | 442 | } else if (rt->dst.from) { |
443 | return rt6_check_expired((struct rt6_info *) rt->dst.from); | 443 | return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK || |
444 | rt6_check_expired((struct rt6_info *)rt->dst.from); | ||
444 | } | 445 | } |
445 | return false; | 446 | return false; |
446 | } | 447 | } |
@@ -1289,7 +1290,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt) | |||
1289 | 1290 | ||
1290 | static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) | 1291 | static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) |
1291 | { | 1292 | { |
1292 | if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) | 1293 | u32 rt_cookie = 0; |
1294 | |||
1295 | if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie) | ||
1293 | return NULL; | 1296 | return NULL; |
1294 | 1297 | ||
1295 | if (rt6_check_expired(rt)) | 1298 | if (rt6_check_expired(rt)) |
@@ -1357,8 +1360,14 @@ static void ip6_link_failure(struct sk_buff *skb) | |||
1357 | if (rt->rt6i_flags & RTF_CACHE) { | 1360 | if (rt->rt6i_flags & RTF_CACHE) { |
1358 | if (dst_hold_safe(&rt->dst)) | 1361 | if (dst_hold_safe(&rt->dst)) |
1359 | ip6_del_rt(rt); | 1362 | ip6_del_rt(rt); |
1360 | } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { | 1363 | } else { |
1361 | rt->rt6i_node->fn_sernum = -1; | 1364 | struct fib6_node *fn; |
1365 | |||
1366 | rcu_read_lock(); | ||
1367 | fn = rcu_dereference(rt->rt6i_node); | ||
1368 | if (fn && (rt->rt6i_flags & RTF_DEFAULT)) | ||
1369 | fn->fn_sernum = -1; | ||
1370 | rcu_read_unlock(); | ||
1362 | } | 1371 | } |
1363 | } | 1372 | } |
1364 | } | 1373 | } |
@@ -1375,7 +1384,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) | |||
1375 | static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) | 1384 | static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) |
1376 | { | 1385 | { |
1377 | return !(rt->rt6i_flags & RTF_CACHE) && | 1386 | return !(rt->rt6i_flags & RTF_CACHE) && |
1378 | (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node); | 1387 | (rt->rt6i_flags & RTF_PCPU || |
1388 | rcu_access_pointer(rt->rt6i_node)); | ||
1379 | } | 1389 | } |
1380 | 1390 | ||
1381 | static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | 1391 | static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8cd9b628cdc7..56030d45823a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -767,6 +767,15 @@ start_lookup: | |||
767 | return 0; | 767 | return 0; |
768 | } | 768 | } |
769 | 769 | ||
770 | static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | ||
771 | { | ||
772 | if (udp_sk_rx_dst_set(sk, dst)) { | ||
773 | const struct rt6_info *rt = (const struct rt6_info *)dst; | ||
774 | |||
775 | inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); | ||
776 | } | ||
777 | } | ||
778 | |||
770 | int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | 779 | int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, |
771 | int proto) | 780 | int proto) |
772 | { | 781 | { |
@@ -816,7 +825,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
816 | int ret; | 825 | int ret; |
817 | 826 | ||
818 | if (unlikely(sk->sk_rx_dst != dst)) | 827 | if (unlikely(sk->sk_rx_dst != dst)) |
819 | udp_sk_rx_dst_set(sk, dst); | 828 | udp6_sk_rx_dst_set(sk, dst); |
820 | 829 | ||
821 | ret = udpv6_queue_rcv_skb(sk, skb); | 830 | ret = udpv6_queue_rcv_skb(sk, skb); |
822 | sock_put(sk); | 831 | sock_put(sk); |
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index da49191f7ad0..4abf6287d7e1 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c | |||
@@ -1383,6 +1383,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock, | |||
1383 | if (!csk) | 1383 | if (!csk) |
1384 | return -EINVAL; | 1384 | return -EINVAL; |
1385 | 1385 | ||
1386 | /* We must prevent loops or risk deadlock ! */ | ||
1387 | if (csk->sk_family == PF_KCM) | ||
1388 | return -EOPNOTSUPP; | ||
1389 | |||
1386 | psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); | 1390 | psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); |
1387 | if (!psock) | 1391 | if (!psock) |
1388 | return -ENOMEM; | 1392 | return -ENOMEM; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index b0c2d4ae781d..90165a6874bc 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -113,7 +113,6 @@ struct l2tp_net { | |||
113 | spinlock_t l2tp_session_hlist_lock; | 113 | spinlock_t l2tp_session_hlist_lock; |
114 | }; | 114 | }; |
115 | 115 | ||
116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | ||
117 | 116 | ||
118 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) | 117 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) |
119 | { | 118 | { |
@@ -127,39 +126,6 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net) | |||
127 | return net_generic(net, l2tp_net_id); | 126 | return net_generic(net, l2tp_net_id); |
128 | } | 127 | } |
129 | 128 | ||
130 | /* Tunnel reference counts. Incremented per session that is added to | ||
131 | * the tunnel. | ||
132 | */ | ||
133 | static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) | ||
134 | { | ||
135 | refcount_inc(&tunnel->ref_count); | ||
136 | } | ||
137 | |||
138 | static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) | ||
139 | { | ||
140 | if (refcount_dec_and_test(&tunnel->ref_count)) | ||
141 | l2tp_tunnel_free(tunnel); | ||
142 | } | ||
143 | #ifdef L2TP_REFCNT_DEBUG | ||
144 | #define l2tp_tunnel_inc_refcount(_t) \ | ||
145 | do { \ | ||
146 | pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \ | ||
147 | __func__, __LINE__, (_t)->name, \ | ||
148 | refcount_read(&_t->ref_count)); \ | ||
149 | l2tp_tunnel_inc_refcount_1(_t); \ | ||
150 | } while (0) | ||
151 | #define l2tp_tunnel_dec_refcount(_t) \ | ||
152 | do { \ | ||
153 | pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ | ||
154 | __func__, __LINE__, (_t)->name, \ | ||
155 | refcount_read(&_t->ref_count)); \ | ||
156 | l2tp_tunnel_dec_refcount_1(_t); \ | ||
157 | } while (0) | ||
158 | #else | ||
159 | #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) | ||
160 | #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) | ||
161 | #endif | ||
162 | |||
163 | /* Session hash global list for L2TPv3. | 129 | /* Session hash global list for L2TPv3. |
164 | * The session_id SHOULD be random according to RFC3931, but several | 130 | * The session_id SHOULD be random according to RFC3931, but several |
165 | * L2TP implementations use incrementing session_ids. So we do a real | 131 | * L2TP implementations use incrementing session_ids. So we do a real |
@@ -229,6 +195,27 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) | |||
229 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; | 195 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; |
230 | } | 196 | } |
231 | 197 | ||
198 | /* Lookup a tunnel. A new reference is held on the returned tunnel. */ | ||
199 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) | ||
200 | { | ||
201 | const struct l2tp_net *pn = l2tp_pernet(net); | ||
202 | struct l2tp_tunnel *tunnel; | ||
203 | |||
204 | rcu_read_lock_bh(); | ||
205 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
206 | if (tunnel->tunnel_id == tunnel_id) { | ||
207 | l2tp_tunnel_inc_refcount(tunnel); | ||
208 | rcu_read_unlock_bh(); | ||
209 | |||
210 | return tunnel; | ||
211 | } | ||
212 | } | ||
213 | rcu_read_unlock_bh(); | ||
214 | |||
215 | return NULL; | ||
216 | } | ||
217 | EXPORT_SYMBOL_GPL(l2tp_tunnel_get); | ||
218 | |||
232 | /* Lookup a session. A new reference is held on the returned session. | 219 | /* Lookup a session. A new reference is held on the returned session. |
233 | * Optionally calls session->ref() too if do_ref is true. | 220 | * Optionally calls session->ref() too if do_ref is true. |
234 | */ | 221 | */ |
@@ -1348,17 +1335,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk) | |||
1348 | } | 1335 | } |
1349 | } | 1336 | } |
1350 | 1337 | ||
1351 | /* Really kill the tunnel. | ||
1352 | * Come here only when all sessions have been cleared from the tunnel. | ||
1353 | */ | ||
1354 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | ||
1355 | { | ||
1356 | BUG_ON(refcount_read(&tunnel->ref_count) != 0); | ||
1357 | BUG_ON(tunnel->sock != NULL); | ||
1358 | l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); | ||
1359 | kfree_rcu(tunnel, rcu); | ||
1360 | } | ||
1361 | |||
1362 | /* Workqueue tunnel deletion function */ | 1338 | /* Workqueue tunnel deletion function */ |
1363 | static void l2tp_tunnel_del_work(struct work_struct *work) | 1339 | static void l2tp_tunnel_del_work(struct work_struct *work) |
1364 | { | 1340 | { |
@@ -1844,6 +1820,8 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn | |||
1844 | 1820 | ||
1845 | l2tp_session_set_header_len(session, tunnel->version); | 1821 | l2tp_session_set_header_len(session, tunnel->version); |
1846 | 1822 | ||
1823 | refcount_set(&session->ref_count, 1); | ||
1824 | |||
1847 | err = l2tp_session_add_to_tunnel(tunnel, session); | 1825 | err = l2tp_session_add_to_tunnel(tunnel, session); |
1848 | if (err) { | 1826 | if (err) { |
1849 | kfree(session); | 1827 | kfree(session); |
@@ -1851,10 +1829,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn | |||
1851 | return ERR_PTR(err); | 1829 | return ERR_PTR(err); |
1852 | } | 1830 | } |
1853 | 1831 | ||
1854 | /* Bump the reference count. The session context is deleted | ||
1855 | * only when this drops to zero. | ||
1856 | */ | ||
1857 | refcount_set(&session->ref_count, 1); | ||
1858 | l2tp_tunnel_inc_refcount(tunnel); | 1832 | l2tp_tunnel_inc_refcount(tunnel); |
1859 | 1833 | ||
1860 | /* Ensure tunnel socket isn't deleted */ | 1834 | /* Ensure tunnel socket isn't deleted */ |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index cdb6e3327f74..9101297f27ad 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -231,6 +231,8 @@ out: | |||
231 | return tunnel; | 231 | return tunnel; |
232 | } | 232 | } |
233 | 233 | ||
234 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); | ||
235 | |||
234 | struct l2tp_session *l2tp_session_get(const struct net *net, | 236 | struct l2tp_session *l2tp_session_get(const struct net *net, |
235 | struct l2tp_tunnel *tunnel, | 237 | struct l2tp_tunnel *tunnel, |
236 | u32 session_id, bool do_ref); | 238 | u32 session_id, bool do_ref); |
@@ -269,6 +271,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, | |||
269 | void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); | 271 | void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); |
270 | int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); | 272 | int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
271 | 273 | ||
274 | static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel) | ||
275 | { | ||
276 | refcount_inc(&tunnel->ref_count); | ||
277 | } | ||
278 | |||
279 | static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) | ||
280 | { | ||
281 | if (refcount_dec_and_test(&tunnel->ref_count)) | ||
282 | kfree_rcu(tunnel, rcu); | ||
283 | } | ||
284 | |||
272 | /* Session reference counts. Incremented when code obtains a reference | 285 | /* Session reference counts. Incremented when code obtains a reference |
273 | * to a session. | 286 | * to a session. |
274 | */ | 287 | */ |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 12cfcd0ca807..57427d430f10 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -65,10 +65,12 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info, | |||
65 | (info->attrs[L2TP_ATTR_CONN_ID])) { | 65 | (info->attrs[L2TP_ATTR_CONN_ID])) { |
66 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | 66 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); |
67 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); | 67 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); |
68 | tunnel = l2tp_tunnel_find(net, tunnel_id); | 68 | tunnel = l2tp_tunnel_get(net, tunnel_id); |
69 | if (tunnel) | 69 | if (tunnel) { |
70 | session = l2tp_session_get(net, tunnel, session_id, | 70 | session = l2tp_session_get(net, tunnel, session_id, |
71 | do_ref); | 71 | do_ref); |
72 | l2tp_tunnel_dec_refcount(tunnel); | ||
73 | } | ||
72 | } | 74 | } |
73 | 75 | ||
74 | return session; | 76 | return session; |
@@ -271,8 +273,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info | |||
271 | } | 273 | } |
272 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | 274 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); |
273 | 275 | ||
274 | tunnel = l2tp_tunnel_find(net, tunnel_id); | 276 | tunnel = l2tp_tunnel_get(net, tunnel_id); |
275 | if (tunnel == NULL) { | 277 | if (!tunnel) { |
276 | ret = -ENODEV; | 278 | ret = -ENODEV; |
277 | goto out; | 279 | goto out; |
278 | } | 280 | } |
@@ -282,6 +284,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info | |||
282 | 284 | ||
283 | (void) l2tp_tunnel_delete(tunnel); | 285 | (void) l2tp_tunnel_delete(tunnel); |
284 | 286 | ||
287 | l2tp_tunnel_dec_refcount(tunnel); | ||
288 | |||
285 | out: | 289 | out: |
286 | return ret; | 290 | return ret; |
287 | } | 291 | } |
@@ -299,8 +303,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info | |||
299 | } | 303 | } |
300 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | 304 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); |
301 | 305 | ||
302 | tunnel = l2tp_tunnel_find(net, tunnel_id); | 306 | tunnel = l2tp_tunnel_get(net, tunnel_id); |
303 | if (tunnel == NULL) { | 307 | if (!tunnel) { |
304 | ret = -ENODEV; | 308 | ret = -ENODEV; |
305 | goto out; | 309 | goto out; |
306 | } | 310 | } |
@@ -311,6 +315,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info | |||
311 | ret = l2tp_tunnel_notify(&l2tp_nl_family, info, | 315 | ret = l2tp_tunnel_notify(&l2tp_nl_family, info, |
312 | tunnel, L2TP_CMD_TUNNEL_MODIFY); | 316 | tunnel, L2TP_CMD_TUNNEL_MODIFY); |
313 | 317 | ||
318 | l2tp_tunnel_dec_refcount(tunnel); | ||
319 | |||
314 | out: | 320 | out: |
315 | return ret; | 321 | return ret; |
316 | } | 322 | } |
@@ -438,34 +444,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) | |||
438 | 444 | ||
439 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | 445 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { |
440 | ret = -EINVAL; | 446 | ret = -EINVAL; |
441 | goto out; | 447 | goto err; |
442 | } | 448 | } |
443 | 449 | ||
444 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | 450 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); |
445 | 451 | ||
446 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
447 | if (tunnel == NULL) { | ||
448 | ret = -ENODEV; | ||
449 | goto out; | ||
450 | } | ||
451 | |||
452 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 452 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
453 | if (!msg) { | 453 | if (!msg) { |
454 | ret = -ENOMEM; | 454 | ret = -ENOMEM; |
455 | goto out; | 455 | goto err; |
456 | } | ||
457 | |||
458 | tunnel = l2tp_tunnel_get(net, tunnel_id); | ||
459 | if (!tunnel) { | ||
460 | ret = -ENODEV; | ||
461 | goto err_nlmsg; | ||
456 | } | 462 | } |
457 | 463 | ||
458 | ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, | 464 | ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, |
459 | NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET); | 465 | NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET); |
460 | if (ret < 0) | 466 | if (ret < 0) |
461 | goto err_out; | 467 | goto err_nlmsg_tunnel; |
468 | |||
469 | l2tp_tunnel_dec_refcount(tunnel); | ||
462 | 470 | ||
463 | return genlmsg_unicast(net, msg, info->snd_portid); | 471 | return genlmsg_unicast(net, msg, info->snd_portid); |
464 | 472 | ||
465 | err_out: | 473 | err_nlmsg_tunnel: |
474 | l2tp_tunnel_dec_refcount(tunnel); | ||
475 | err_nlmsg: | ||
466 | nlmsg_free(msg); | 476 | nlmsg_free(msg); |
467 | 477 | err: | |
468 | out: | ||
469 | return ret; | 478 | return ret; |
470 | } | 479 | } |
471 | 480 | ||
@@ -509,8 +518,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf | |||
509 | ret = -EINVAL; | 518 | ret = -EINVAL; |
510 | goto out; | 519 | goto out; |
511 | } | 520 | } |
521 | |||
512 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | 522 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); |
513 | tunnel = l2tp_tunnel_find(net, tunnel_id); | 523 | tunnel = l2tp_tunnel_get(net, tunnel_id); |
514 | if (!tunnel) { | 524 | if (!tunnel) { |
515 | ret = -ENODEV; | 525 | ret = -ENODEV; |
516 | goto out; | 526 | goto out; |
@@ -518,24 +528,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf | |||
518 | 528 | ||
519 | if (!info->attrs[L2TP_ATTR_SESSION_ID]) { | 529 | if (!info->attrs[L2TP_ATTR_SESSION_ID]) { |
520 | ret = -EINVAL; | 530 | ret = -EINVAL; |
521 | goto out; | 531 | goto out_tunnel; |
522 | } | 532 | } |
523 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); | 533 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); |
524 | 534 | ||
525 | if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { | 535 | if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { |
526 | ret = -EINVAL; | 536 | ret = -EINVAL; |
527 | goto out; | 537 | goto out_tunnel; |
528 | } | 538 | } |
529 | peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); | 539 | peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); |
530 | 540 | ||
531 | if (!info->attrs[L2TP_ATTR_PW_TYPE]) { | 541 | if (!info->attrs[L2TP_ATTR_PW_TYPE]) { |
532 | ret = -EINVAL; | 542 | ret = -EINVAL; |
533 | goto out; | 543 | goto out_tunnel; |
534 | } | 544 | } |
535 | cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); | 545 | cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); |
536 | if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { | 546 | if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { |
537 | ret = -EINVAL; | 547 | ret = -EINVAL; |
538 | goto out; | 548 | goto out_tunnel; |
539 | } | 549 | } |
540 | 550 | ||
541 | if (tunnel->version > 2) { | 551 | if (tunnel->version > 2) { |
@@ -557,7 +567,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf | |||
557 | u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); | 567 | u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); |
558 | if (len > 8) { | 568 | if (len > 8) { |
559 | ret = -EINVAL; | 569 | ret = -EINVAL; |
560 | goto out; | 570 | goto out_tunnel; |
561 | } | 571 | } |
562 | cfg.cookie_len = len; | 572 | cfg.cookie_len = len; |
563 | memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); | 573 | memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); |
@@ -566,7 +576,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf | |||
566 | u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); | 576 | u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); |
567 | if (len > 8) { | 577 | if (len > 8) { |
568 | ret = -EINVAL; | 578 | ret = -EINVAL; |
569 | goto out; | 579 | goto out_tunnel; |
570 | } | 580 | } |
571 | cfg.peer_cookie_len = len; | 581 | cfg.peer_cookie_len = len; |
572 | memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); | 582 | memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); |
@@ -609,7 +619,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf | |||
609 | if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || | 619 | if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || |
610 | (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { | 620 | (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { |
611 | ret = -EPROTONOSUPPORT; | 621 | ret = -EPROTONOSUPPORT; |
612 | goto out; | 622 | goto out_tunnel; |
613 | } | 623 | } |
614 | 624 | ||
615 | /* Check that pseudowire-specific params are present */ | 625 | /* Check that pseudowire-specific params are present */ |
@@ -619,7 +629,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf | |||
619 | case L2TP_PWTYPE_ETH_VLAN: | 629 | case L2TP_PWTYPE_ETH_VLAN: |
620 | if (!info->attrs[L2TP_ATTR_VLAN_ID]) { | 630 | if (!info->attrs[L2TP_ATTR_VLAN_ID]) { |
621 | ret = -EINVAL; | 631 | ret = -EINVAL; |
622 | goto out; | 632 | goto out_tunnel; |
623 | } | 633 | } |
624 | break; | 634 | break; |
625 | case L2TP_PWTYPE_ETH: | 635 | case L2TP_PWTYPE_ETH: |
@@ -647,6 +657,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf | |||
647 | } | 657 | } |
648 | } | 658 | } |
649 | 659 | ||
660 | out_tunnel: | ||
661 | l2tp_tunnel_dec_refcount(tunnel); | ||
650 | out: | 662 | out: |
651 | return ret; | 663 | return ret; |
652 | } | 664 | } |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index eb541786ccb7..b1d3740ae36a 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
@@ -441,7 +441,7 @@ nf_nat_setup_info(struct nf_conn *ct, | |||
441 | else | 441 | else |
442 | ct->status |= IPS_DST_NAT; | 442 | ct->status |= IPS_DST_NAT; |
443 | 443 | ||
444 | if (nfct_help(ct)) | 444 | if (nfct_help(ct) && !nfct_seqadj(ct)) |
445 | if (!nfct_seqadj_ext_add(ct)) | 445 | if (!nfct_seqadj_ext_add(ct)) |
446 | return NF_DROP; | 446 | return NF_DROP; |
447 | } | 447 | } |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index f5a7cb68694e..b89f4f65b2a0 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -305,7 +305,7 @@ static int nft_target_validate(const struct nft_ctx *ctx, | |||
305 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 305 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
306 | 306 | ||
307 | hook_mask = 1 << ops->hooknum; | 307 | hook_mask = 1 << ops->hooknum; |
308 | if (!(hook_mask & target->hooks)) | 308 | if (target->hooks && !(hook_mask & target->hooks)) |
309 | return -EINVAL; | 309 | return -EINVAL; |
310 | 310 | ||
311 | ret = nft_compat_chain_validate_dependency(target->table, | 311 | ret = nft_compat_chain_validate_dependency(target->table, |
@@ -484,7 +484,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, | |||
484 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 484 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
485 | 485 | ||
486 | hook_mask = 1 << ops->hooknum; | 486 | hook_mask = 1 << ops->hooknum; |
487 | if (!(hook_mask & match->hooks)) | 487 | if (match->hooks && !(hook_mask & match->hooks)) |
488 | return -EINVAL; | 488 | return -EINVAL; |
489 | 489 | ||
490 | ret = nft_compat_chain_validate_dependency(match->table, | 490 | ret = nft_compat_chain_validate_dependency(match->table, |
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index 18dd57a52651..14538b1d4d11 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c | |||
@@ -65,19 +65,23 @@ static int nft_limit_init(struct nft_limit *limit, | |||
65 | limit->nsecs = unit * NSEC_PER_SEC; | 65 | limit->nsecs = unit * NSEC_PER_SEC; |
66 | if (limit->rate == 0 || limit->nsecs < unit) | 66 | if (limit->rate == 0 || limit->nsecs < unit) |
67 | return -EOVERFLOW; | 67 | return -EOVERFLOW; |
68 | limit->tokens = limit->tokens_max = limit->nsecs; | ||
69 | |||
70 | if (tb[NFTA_LIMIT_BURST]) { | ||
71 | u64 rate; | ||
72 | 68 | ||
69 | if (tb[NFTA_LIMIT_BURST]) | ||
73 | limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); | 70 | limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); |
71 | else | ||
72 | limit->burst = 0; | ||
73 | |||
74 | if (limit->rate + limit->burst < limit->rate) | ||
75 | return -EOVERFLOW; | ||
74 | 76 | ||
75 | rate = limit->rate + limit->burst; | 77 | /* The token bucket size limits the number of tokens can be |
76 | if (rate < limit->rate) | 78 | * accumulated. tokens_max specifies the bucket size. |
77 | return -EOVERFLOW; | 79 | * tokens_max = unit * (rate + burst) / rate. |
80 | */ | ||
81 | limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), | ||
82 | limit->rate); | ||
83 | limit->tokens_max = limit->tokens; | ||
78 | 84 | ||
79 | limit->rate = rate; | ||
80 | } | ||
81 | if (tb[NFTA_LIMIT_FLAGS]) { | 85 | if (tb[NFTA_LIMIT_FLAGS]) { |
82 | u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS])); | 86 | u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS])); |
83 | 87 | ||
@@ -95,9 +99,8 @@ static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit, | |||
95 | { | 99 | { |
96 | u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0; | 100 | u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0; |
97 | u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC); | 101 | u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC); |
98 | u64 rate = limit->rate - limit->burst; | ||
99 | 102 | ||
100 | if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate), | 103 | if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate), |
101 | NFTA_LIMIT_PAD) || | 104 | NFTA_LIMIT_PAD) || |
102 | nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs), | 105 | nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs), |
103 | NFTA_LIMIT_PAD) || | 106 | NFTA_LIMIT_PAD) || |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 008a45ca3112..1c61af9af67d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2191,6 +2191,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2191 | struct timespec ts; | 2191 | struct timespec ts; |
2192 | __u32 ts_status; | 2192 | __u32 ts_status; |
2193 | bool is_drop_n_account = false; | 2193 | bool is_drop_n_account = false; |
2194 | bool do_vnet = false; | ||
2194 | 2195 | ||
2195 | /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. | 2196 | /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. |
2196 | * We may add members to them until current aligned size without forcing | 2197 | * We may add members to them until current aligned size without forcing |
@@ -2241,8 +2242,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2241 | netoff = TPACKET_ALIGN(po->tp_hdrlen + | 2242 | netoff = TPACKET_ALIGN(po->tp_hdrlen + |
2242 | (maclen < 16 ? 16 : maclen)) + | 2243 | (maclen < 16 ? 16 : maclen)) + |
2243 | po->tp_reserve; | 2244 | po->tp_reserve; |
2244 | if (po->has_vnet_hdr) | 2245 | if (po->has_vnet_hdr) { |
2245 | netoff += sizeof(struct virtio_net_hdr); | 2246 | netoff += sizeof(struct virtio_net_hdr); |
2247 | do_vnet = true; | ||
2248 | } | ||
2246 | macoff = netoff - maclen; | 2249 | macoff = netoff - maclen; |
2247 | } | 2250 | } |
2248 | if (po->tp_version <= TPACKET_V2) { | 2251 | if (po->tp_version <= TPACKET_V2) { |
@@ -2259,8 +2262,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2259 | skb_set_owner_r(copy_skb, sk); | 2262 | skb_set_owner_r(copy_skb, sk); |
2260 | } | 2263 | } |
2261 | snaplen = po->rx_ring.frame_size - macoff; | 2264 | snaplen = po->rx_ring.frame_size - macoff; |
2262 | if ((int)snaplen < 0) | 2265 | if ((int)snaplen < 0) { |
2263 | snaplen = 0; | 2266 | snaplen = 0; |
2267 | do_vnet = false; | ||
2268 | } | ||
2264 | } | 2269 | } |
2265 | } else if (unlikely(macoff + snaplen > | 2270 | } else if (unlikely(macoff + snaplen > |
2266 | GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { | 2271 | GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { |
@@ -2273,6 +2278,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2273 | if (unlikely((int)snaplen < 0)) { | 2278 | if (unlikely((int)snaplen < 0)) { |
2274 | snaplen = 0; | 2279 | snaplen = 0; |
2275 | macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; | 2280 | macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; |
2281 | do_vnet = false; | ||
2276 | } | 2282 | } |
2277 | } | 2283 | } |
2278 | spin_lock(&sk->sk_receive_queue.lock); | 2284 | spin_lock(&sk->sk_receive_queue.lock); |
@@ -2298,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2298 | } | 2304 | } |
2299 | spin_unlock(&sk->sk_receive_queue.lock); | 2305 | spin_unlock(&sk->sk_receive_queue.lock); |
2300 | 2306 | ||
2301 | if (po->has_vnet_hdr) { | 2307 | if (do_vnet) { |
2302 | if (virtio_net_hdr_from_skb(skb, h.raw + macoff - | 2308 | if (virtio_net_hdr_from_skb(skb, h.raw + macoff - |
2303 | sizeof(struct virtio_net_hdr), | 2309 | sizeof(struct virtio_net_hdr), |
2304 | vio_le(), true)) { | 2310 | vio_le(), true)) { |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 9fd44c221347..6c5ea84d2682 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -215,9 +215,15 @@ static void tcf_chain_flush(struct tcf_chain *chain) | |||
215 | 215 | ||
216 | static void tcf_chain_destroy(struct tcf_chain *chain) | 216 | static void tcf_chain_destroy(struct tcf_chain *chain) |
217 | { | 217 | { |
218 | list_del(&chain->list); | 218 | /* May be already removed from the list by the previous call. */ |
219 | tcf_chain_flush(chain); | 219 | if (!list_empty(&chain->list)) |
220 | kfree(chain); | 220 | list_del_init(&chain->list); |
221 | |||
222 | /* There might still be a reference held when we got here from | ||
223 | * tcf_block_put. Wait for the user to drop reference before free. | ||
224 | */ | ||
225 | if (!chain->refcnt) | ||
226 | kfree(chain); | ||
221 | } | 227 | } |
222 | 228 | ||
223 | struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, | 229 | struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, |
@@ -288,8 +294,10 @@ void tcf_block_put(struct tcf_block *block) | |||
288 | if (!block) | 294 | if (!block) |
289 | return; | 295 | return; |
290 | 296 | ||
291 | list_for_each_entry_safe(chain, tmp, &block->chain_list, list) | 297 | list_for_each_entry_safe(chain, tmp, &block->chain_list, list) { |
298 | tcf_chain_flush(chain); | ||
292 | tcf_chain_destroy(chain); | 299 | tcf_chain_destroy(chain); |
300 | } | ||
293 | kfree(block); | 301 | kfree(block); |
294 | } | 302 | } |
295 | EXPORT_SYMBOL(tcf_block_put); | 303 | EXPORT_SYMBOL(tcf_block_put); |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index a3fa144b8648..4fb5a3222d0d 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -836,7 +836,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
836 | 836 | ||
837 | old = dev_graft_qdisc(dev_queue, new); | 837 | old = dev_graft_qdisc(dev_queue, new); |
838 | if (new && i > 0) | 838 | if (new && i > 0) |
839 | refcount_inc(&new->refcnt); | 839 | qdisc_refcount_inc(new); |
840 | 840 | ||
841 | if (!ingress) | 841 | if (!ingress) |
842 | qdisc_destroy(old); | 842 | qdisc_destroy(old); |
@@ -847,7 +847,7 @@ skip: | |||
847 | notify_and_destroy(net, skb, n, classid, | 847 | notify_and_destroy(net, skb, n, classid, |
848 | dev->qdisc, new); | 848 | dev->qdisc, new); |
849 | if (new && !new->ops->attach) | 849 | if (new && !new->ops->attach) |
850 | refcount_inc(&new->refcnt); | 850 | qdisc_refcount_inc(new); |
851 | dev->qdisc = new ? : &noop_qdisc; | 851 | dev->qdisc = new ? : &noop_qdisc; |
852 | 852 | ||
853 | if (new && new->ops->attach) | 853 | if (new && new->ops->attach) |
@@ -1256,7 +1256,7 @@ replay: | |||
1256 | if (q == p || | 1256 | if (q == p || |
1257 | (p && check_loop(q, p, 0))) | 1257 | (p && check_loop(q, p, 0))) |
1258 | return -ELOOP; | 1258 | return -ELOOP; |
1259 | refcount_inc(&q->refcnt); | 1259 | qdisc_refcount_inc(q); |
1260 | goto graft; | 1260 | goto graft; |
1261 | } else { | 1261 | } else { |
1262 | if (!q) | 1262 | if (!q) |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 780db43300b1..156c8a33c677 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1139,6 +1139,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1139 | struct tc_ratespec *r; | 1139 | struct tc_ratespec *r; |
1140 | int err; | 1140 | int err; |
1141 | 1141 | ||
1142 | qdisc_watchdog_init(&q->watchdog, sch); | ||
1143 | hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | ||
1144 | q->delay_timer.function = cbq_undelay; | ||
1145 | |||
1146 | if (!opt) | ||
1147 | return -EINVAL; | ||
1148 | |||
1142 | err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL); | 1149 | err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL); |
1143 | if (err < 0) | 1150 | if (err < 0) |
1144 | return err; | 1151 | return err; |
@@ -1177,9 +1184,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1177 | q->link.avpkt = q->link.allot/2; | 1184 | q->link.avpkt = q->link.allot/2; |
1178 | q->link.minidle = -0x7FFFFFFF; | 1185 | q->link.minidle = -0x7FFFFFFF; |
1179 | 1186 | ||
1180 | qdisc_watchdog_init(&q->watchdog, sch); | ||
1181 | hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | ||
1182 | q->delay_timer.function = cbq_undelay; | ||
1183 | q->toplevel = TC_CBQ_MAXLEVEL; | 1187 | q->toplevel = TC_CBQ_MAXLEVEL; |
1184 | q->now = psched_get_time(); | 1188 | q->now = psched_get_time(); |
1185 | 1189 | ||
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 337f2d6d81e4..2c0c05f2cc34 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -491,10 +491,8 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) | |||
491 | if (!q->flows) | 491 | if (!q->flows) |
492 | return -ENOMEM; | 492 | return -ENOMEM; |
493 | q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL); | 493 | q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL); |
494 | if (!q->backlogs) { | 494 | if (!q->backlogs) |
495 | kvfree(q->flows); | ||
496 | return -ENOMEM; | 495 | return -ENOMEM; |
497 | } | ||
498 | for (i = 0; i < q->flows_cnt; i++) { | 496 | for (i = 0; i < q->flows_cnt; i++) { |
499 | struct fq_codel_flow *flow = q->flows + i; | 497 | struct fq_codel_flow *flow = q->flows + i; |
500 | 498 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 57ba406f1437..4ba6da5fb254 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -785,7 +785,7 @@ static void attach_default_qdiscs(struct net_device *dev) | |||
785 | dev->priv_flags & IFF_NO_QUEUE) { | 785 | dev->priv_flags & IFF_NO_QUEUE) { |
786 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); | 786 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
787 | dev->qdisc = txq->qdisc_sleeping; | 787 | dev->qdisc = txq->qdisc_sleeping; |
788 | refcount_inc(&dev->qdisc->refcnt); | 788 | qdisc_refcount_inc(dev->qdisc); |
789 | } else { | 789 | } else { |
790 | qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); | 790 | qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); |
791 | if (qdisc) { | 791 | if (qdisc) { |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index fd15200f8627..11ab8dace901 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1418,6 +1418,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1418 | struct tc_hfsc_qopt *qopt; | 1418 | struct tc_hfsc_qopt *qopt; |
1419 | int err; | 1419 | int err; |
1420 | 1420 | ||
1421 | qdisc_watchdog_init(&q->watchdog, sch); | ||
1422 | |||
1421 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) | 1423 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) |
1422 | return -EINVAL; | 1424 | return -EINVAL; |
1423 | qopt = nla_data(opt); | 1425 | qopt = nla_data(opt); |
@@ -1430,7 +1432,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1430 | 1432 | ||
1431 | err = tcf_block_get(&q->root.block, &q->root.filter_list); | 1433 | err = tcf_block_get(&q->root.block, &q->root.filter_list); |
1432 | if (err) | 1434 | if (err) |
1433 | goto err_tcf; | 1435 | return err; |
1434 | 1436 | ||
1435 | q->root.cl_common.classid = sch->handle; | 1437 | q->root.cl_common.classid = sch->handle; |
1436 | q->root.refcnt = 1; | 1438 | q->root.refcnt = 1; |
@@ -1448,13 +1450,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1448 | qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); | 1450 | qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); |
1449 | qdisc_class_hash_grow(sch, &q->clhash); | 1451 | qdisc_class_hash_grow(sch, &q->clhash); |
1450 | 1452 | ||
1451 | qdisc_watchdog_init(&q->watchdog, sch); | ||
1452 | |||
1453 | return 0; | 1453 | return 0; |
1454 | |||
1455 | err_tcf: | ||
1456 | qdisc_class_hash_destroy(&q->clhash); | ||
1457 | return err; | ||
1458 | } | 1454 | } |
1459 | 1455 | ||
1460 | static int | 1456 | static int |
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 51d3ba682af9..73a53c08091b 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c | |||
@@ -477,6 +477,9 @@ static void hhf_destroy(struct Qdisc *sch) | |||
477 | kvfree(q->hhf_valid_bits[i]); | 477 | kvfree(q->hhf_valid_bits[i]); |
478 | } | 478 | } |
479 | 479 | ||
480 | if (!q->hh_flows) | ||
481 | return; | ||
482 | |||
480 | for (i = 0; i < HH_FLOWS_CNT; i++) { | 483 | for (i = 0; i < HH_FLOWS_CNT; i++) { |
481 | struct hh_flow_state *flow, *next; | 484 | struct hh_flow_state *flow, *next; |
482 | struct list_head *head = &q->hh_flows[i]; | 485 | struct list_head *head = &q->hh_flows[i]; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 5d65ec5207e9..5bf5177b2bd3 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1017,6 +1017,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) | |||
1017 | int err; | 1017 | int err; |
1018 | int i; | 1018 | int i; |
1019 | 1019 | ||
1020 | qdisc_watchdog_init(&q->watchdog, sch); | ||
1021 | INIT_WORK(&q->work, htb_work_func); | ||
1022 | |||
1020 | if (!opt) | 1023 | if (!opt) |
1021 | return -EINVAL; | 1024 | return -EINVAL; |
1022 | 1025 | ||
@@ -1041,8 +1044,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) | |||
1041 | for (i = 0; i < TC_HTB_NUMPRIO; i++) | 1044 | for (i = 0; i < TC_HTB_NUMPRIO; i++) |
1042 | INIT_LIST_HEAD(q->drops + i); | 1045 | INIT_LIST_HEAD(q->drops + i); |
1043 | 1046 | ||
1044 | qdisc_watchdog_init(&q->watchdog, sch); | ||
1045 | INIT_WORK(&q->work, htb_work_func); | ||
1046 | qdisc_skb_head_init(&q->direct_queue); | 1047 | qdisc_skb_head_init(&q->direct_queue); |
1047 | 1048 | ||
1048 | if (tb[TCA_HTB_DIRECT_QLEN]) | 1049 | if (tb[TCA_HTB_DIRECT_QLEN]) |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index f143b7bbaa0d..9c454f5d6c38 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -257,12 +257,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt) | |||
257 | for (i = 0; i < q->max_bands; i++) | 257 | for (i = 0; i < q->max_bands; i++) |
258 | q->queues[i] = &noop_qdisc; | 258 | q->queues[i] = &noop_qdisc; |
259 | 259 | ||
260 | err = multiq_tune(sch, opt); | 260 | return multiq_tune(sch, opt); |
261 | |||
262 | if (err) | ||
263 | kfree(q->queues); | ||
264 | |||
265 | return err; | ||
266 | } | 261 | } |
267 | 262 | ||
268 | static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) | 263 | static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 1b3dd6190e93..14d1724e0dc4 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -933,11 +933,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt) | |||
933 | struct netem_sched_data *q = qdisc_priv(sch); | 933 | struct netem_sched_data *q = qdisc_priv(sch); |
934 | int ret; | 934 | int ret; |
935 | 935 | ||
936 | qdisc_watchdog_init(&q->watchdog, sch); | ||
937 | |||
936 | if (!opt) | 938 | if (!opt) |
937 | return -EINVAL; | 939 | return -EINVAL; |
938 | 940 | ||
939 | qdisc_watchdog_init(&q->watchdog, sch); | ||
940 | |||
941 | q->loss_model = CLG_RANDOM; | 941 | q->loss_model = CLG_RANDOM; |
942 | ret = netem_change(sch, opt); | 942 | ret = netem_change(sch, opt); |
943 | if (ret) | 943 | if (ret) |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 82469ef9655e..fc69fc5956e9 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -716,13 +716,13 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) | |||
716 | int i; | 716 | int i; |
717 | int err; | 717 | int err; |
718 | 718 | ||
719 | setup_deferrable_timer(&q->perturb_timer, sfq_perturbation, | ||
720 | (unsigned long)sch); | ||
721 | |||
719 | err = tcf_block_get(&q->block, &q->filter_list); | 722 | err = tcf_block_get(&q->block, &q->filter_list); |
720 | if (err) | 723 | if (err) |
721 | return err; | 724 | return err; |
722 | 725 | ||
723 | setup_deferrable_timer(&q->perturb_timer, sfq_perturbation, | ||
724 | (unsigned long)sch); | ||
725 | |||
726 | for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) { | 726 | for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) { |
727 | q->dep[i].next = i + SFQ_MAX_FLOWS; | 727 | q->dep[i].next = i + SFQ_MAX_FLOWS; |
728 | q->dep[i].prev = i + SFQ_MAX_FLOWS; | 728 | q->dep[i].prev = i + SFQ_MAX_FLOWS; |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index b2e4b6ad241a..493270f0d5b0 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -425,12 +425,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt) | |||
425 | { | 425 | { |
426 | struct tbf_sched_data *q = qdisc_priv(sch); | 426 | struct tbf_sched_data *q = qdisc_priv(sch); |
427 | 427 | ||
428 | qdisc_watchdog_init(&q->watchdog, sch); | ||
429 | q->qdisc = &noop_qdisc; | ||
430 | |||
428 | if (opt == NULL) | 431 | if (opt == NULL) |
429 | return -EINVAL; | 432 | return -EINVAL; |
430 | 433 | ||
431 | q->t_c = ktime_get_ns(); | 434 | q->t_c = ktime_get_ns(); |
432 | qdisc_watchdog_init(&q->watchdog, sch); | ||
433 | q->qdisc = &noop_qdisc; | ||
434 | 435 | ||
435 | return tbf_change(sch, opt); | 436 | return tbf_change(sch, opt); |
436 | } | 437 | } |
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 9a647214a91e..e99518e79b52 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c | |||
@@ -70,7 +70,8 @@ static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, | |||
70 | 70 | ||
71 | info = nla_data(attr); | 71 | info = nla_data(attr); |
72 | list_for_each_entry_rcu(laddr, address_list, list) { | 72 | list_for_each_entry_rcu(laddr, address_list, list) { |
73 | memcpy(info, &laddr->a, addrlen); | 73 | memcpy(info, &laddr->a, sizeof(laddr->a)); |
74 | memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a)); | ||
74 | info += addrlen; | 75 | info += addrlen; |
75 | } | 76 | } |
76 | 77 | ||
@@ -93,7 +94,9 @@ static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb, | |||
93 | info = nla_data(attr); | 94 | info = nla_data(attr); |
94 | list_for_each_entry(from, &asoc->peer.transport_addr_list, | 95 | list_for_each_entry(from, &asoc->peer.transport_addr_list, |
95 | transports) { | 96 | transports) { |
96 | memcpy(info, &from->ipaddr, addrlen); | 97 | memcpy(info, &from->ipaddr, sizeof(from->ipaddr)); |
98 | memset(info + sizeof(from->ipaddr), 0, | ||
99 | addrlen - sizeof(from->ipaddr)); | ||
97 | info += addrlen; | 100 | info += addrlen; |
98 | } | 101 | } |
99 | 102 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1db478e34520..8d760863bc41 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -4538,8 +4538,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, | |||
4538 | info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; | 4538 | info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; |
4539 | 4539 | ||
4540 | prim = asoc->peer.primary_path; | 4540 | prim = asoc->peer.primary_path; |
4541 | memcpy(&info->sctpi_p_address, &prim->ipaddr, | 4541 | memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr)); |
4542 | sizeof(struct sockaddr_storage)); | ||
4543 | info->sctpi_p_state = prim->state; | 4542 | info->sctpi_p_state = prim->state; |
4544 | info->sctpi_p_cwnd = prim->cwnd; | 4543 | info->sctpi_p_cwnd = prim->cwnd; |
4545 | info->sctpi_p_srtt = prim->srtt; | 4544 | info->sctpi_p_srtt = prim->srtt; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 2b720fa35c4f..e18500151236 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -421,6 +421,9 @@ static void svc_data_ready(struct sock *sk) | |||
421 | dprintk("svc: socket %p(inet %p), busy=%d\n", | 421 | dprintk("svc: socket %p(inet %p), busy=%d\n", |
422 | svsk, sk, | 422 | svsk, sk, |
423 | test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); | 423 | test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); |
424 | |||
425 | /* Refer to svc_setup_socket() for details. */ | ||
426 | rmb(); | ||
424 | svsk->sk_odata(sk); | 427 | svsk->sk_odata(sk); |
425 | if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) | 428 | if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) |
426 | svc_xprt_enqueue(&svsk->sk_xprt); | 429 | svc_xprt_enqueue(&svsk->sk_xprt); |
@@ -437,6 +440,9 @@ static void svc_write_space(struct sock *sk) | |||
437 | if (svsk) { | 440 | if (svsk) { |
438 | dprintk("svc: socket %p(inet %p), write_space busy=%d\n", | 441 | dprintk("svc: socket %p(inet %p), write_space busy=%d\n", |
439 | svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); | 442 | svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); |
443 | |||
444 | /* Refer to svc_setup_socket() for details. */ | ||
445 | rmb(); | ||
440 | svsk->sk_owspace(sk); | 446 | svsk->sk_owspace(sk); |
441 | svc_xprt_enqueue(&svsk->sk_xprt); | 447 | svc_xprt_enqueue(&svsk->sk_xprt); |
442 | } | 448 | } |
@@ -760,8 +766,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk) | |||
760 | dprintk("svc: socket %p TCP (listen) state change %d\n", | 766 | dprintk("svc: socket %p TCP (listen) state change %d\n", |
761 | sk, sk->sk_state); | 767 | sk, sk->sk_state); |
762 | 768 | ||
763 | if (svsk) | 769 | if (svsk) { |
770 | /* Refer to svc_setup_socket() for details. */ | ||
771 | rmb(); | ||
764 | svsk->sk_odata(sk); | 772 | svsk->sk_odata(sk); |
773 | } | ||
774 | |||
765 | /* | 775 | /* |
766 | * This callback may called twice when a new connection | 776 | * This callback may called twice when a new connection |
767 | * is established as a child socket inherits everything | 777 | * is established as a child socket inherits everything |
@@ -794,6 +804,8 @@ static void svc_tcp_state_change(struct sock *sk) | |||
794 | if (!svsk) | 804 | if (!svsk) |
795 | printk("svc: socket %p: no user data\n", sk); | 805 | printk("svc: socket %p: no user data\n", sk); |
796 | else { | 806 | else { |
807 | /* Refer to svc_setup_socket() for details. */ | ||
808 | rmb(); | ||
797 | svsk->sk_ostate(sk); | 809 | svsk->sk_ostate(sk); |
798 | if (sk->sk_state != TCP_ESTABLISHED) { | 810 | if (sk->sk_state != TCP_ESTABLISHED) { |
799 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 811 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
@@ -1381,12 +1393,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, | |||
1381 | return ERR_PTR(err); | 1393 | return ERR_PTR(err); |
1382 | } | 1394 | } |
1383 | 1395 | ||
1384 | inet->sk_user_data = svsk; | ||
1385 | svsk->sk_sock = sock; | 1396 | svsk->sk_sock = sock; |
1386 | svsk->sk_sk = inet; | 1397 | svsk->sk_sk = inet; |
1387 | svsk->sk_ostate = inet->sk_state_change; | 1398 | svsk->sk_ostate = inet->sk_state_change; |
1388 | svsk->sk_odata = inet->sk_data_ready; | 1399 | svsk->sk_odata = inet->sk_data_ready; |
1389 | svsk->sk_owspace = inet->sk_write_space; | 1400 | svsk->sk_owspace = inet->sk_write_space; |
1401 | /* | ||
1402 | * This barrier is necessary in order to prevent race condition | ||
1403 | * with svc_data_ready(), svc_listen_data_ready() and others | ||
1404 | * when calling callbacks above. | ||
1405 | */ | ||
1406 | wmb(); | ||
1407 | inet->sk_user_data = svsk; | ||
1390 | 1408 | ||
1391 | /* Initialize the socket */ | 1409 | /* Initialize the socket */ |
1392 | if (sock->type == SOCK_DGRAM) | 1410 | if (sock->type == SOCK_DGRAM) |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 767e0537dde5..89cd061c4468 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -65,6 +65,8 @@ static struct tipc_bearer *bearer_get(struct net *net, int bearer_id) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | static void bearer_disable(struct net *net, struct tipc_bearer *b); | 67 | static void bearer_disable(struct net *net, struct tipc_bearer *b); |
68 | static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, | ||
69 | struct packet_type *pt, struct net_device *orig_dev); | ||
68 | 70 | ||
69 | /** | 71 | /** |
70 | * tipc_media_find - locates specified media object by name | 72 | * tipc_media_find - locates specified media object by name |
@@ -428,6 +430,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, | |||
428 | 430 | ||
429 | /* Associate TIPC bearer with L2 bearer */ | 431 | /* Associate TIPC bearer with L2 bearer */ |
430 | rcu_assign_pointer(b->media_ptr, dev); | 432 | rcu_assign_pointer(b->media_ptr, dev); |
433 | b->pt.dev = dev; | ||
434 | b->pt.type = htons(ETH_P_TIPC); | ||
435 | b->pt.func = tipc_l2_rcv_msg; | ||
436 | dev_add_pack(&b->pt); | ||
431 | memset(&b->bcast_addr, 0, sizeof(b->bcast_addr)); | 437 | memset(&b->bcast_addr, 0, sizeof(b->bcast_addr)); |
432 | memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len); | 438 | memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len); |
433 | b->bcast_addr.media_id = b->media->type_id; | 439 | b->bcast_addr.media_id = b->media->type_id; |
@@ -447,6 +453,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b) | |||
447 | struct net_device *dev; | 453 | struct net_device *dev; |
448 | 454 | ||
449 | dev = (struct net_device *)rtnl_dereference(b->media_ptr); | 455 | dev = (struct net_device *)rtnl_dereference(b->media_ptr); |
456 | dev_remove_pack(&b->pt); | ||
450 | RCU_INIT_POINTER(dev->tipc_ptr, NULL); | 457 | RCU_INIT_POINTER(dev->tipc_ptr, NULL); |
451 | synchronize_net(); | 458 | synchronize_net(); |
452 | dev_put(dev); | 459 | dev_put(dev); |
@@ -594,11 +601,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, | |||
594 | struct tipc_bearer *b; | 601 | struct tipc_bearer *b; |
595 | 602 | ||
596 | rcu_read_lock(); | 603 | rcu_read_lock(); |
597 | b = rcu_dereference_rtnl(dev->tipc_ptr); | 604 | b = rcu_dereference_rtnl(dev->tipc_ptr) ?: |
605 | rcu_dereference_rtnl(orig_dev->tipc_ptr); | ||
598 | if (likely(b && test_bit(0, &b->up) && | 606 | if (likely(b && test_bit(0, &b->up) && |
599 | (skb->pkt_type <= PACKET_MULTICAST))) { | 607 | (skb->pkt_type <= PACKET_MULTICAST))) { |
600 | skb->next = NULL; | 608 | skb->next = NULL; |
601 | tipc_rcv(dev_net(dev), skb, b); | 609 | tipc_rcv(dev_net(b->pt.dev), skb, b); |
602 | rcu_read_unlock(); | 610 | rcu_read_unlock(); |
603 | return NET_RX_SUCCESS; | 611 | return NET_RX_SUCCESS; |
604 | } | 612 | } |
@@ -659,11 +667,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, | |||
659 | return NOTIFY_OK; | 667 | return NOTIFY_OK; |
660 | } | 668 | } |
661 | 669 | ||
662 | static struct packet_type tipc_packet_type __read_mostly = { | ||
663 | .type = htons(ETH_P_TIPC), | ||
664 | .func = tipc_l2_rcv_msg, | ||
665 | }; | ||
666 | |||
667 | static struct notifier_block notifier = { | 670 | static struct notifier_block notifier = { |
668 | .notifier_call = tipc_l2_device_event, | 671 | .notifier_call = tipc_l2_device_event, |
669 | .priority = 0, | 672 | .priority = 0, |
@@ -671,19 +674,12 @@ static struct notifier_block notifier = { | |||
671 | 674 | ||
672 | int tipc_bearer_setup(void) | 675 | int tipc_bearer_setup(void) |
673 | { | 676 | { |
674 | int err; | 677 | return register_netdevice_notifier(¬ifier); |
675 | |||
676 | err = register_netdevice_notifier(¬ifier); | ||
677 | if (err) | ||
678 | return err; | ||
679 | dev_add_pack(&tipc_packet_type); | ||
680 | return 0; | ||
681 | } | 678 | } |
682 | 679 | ||
683 | void tipc_bearer_cleanup(void) | 680 | void tipc_bearer_cleanup(void) |
684 | { | 681 | { |
685 | unregister_netdevice_notifier(¬ifier); | 682 | unregister_netdevice_notifier(¬ifier); |
686 | dev_remove_pack(&tipc_packet_type); | ||
687 | } | 683 | } |
688 | 684 | ||
689 | void tipc_bearer_stop(struct net *net) | 685 | void tipc_bearer_stop(struct net *net) |
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 635c9086e19a..e07a55a80c18 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
@@ -131,6 +131,7 @@ struct tipc_media { | |||
131 | * @name: bearer name (format = media:interface) | 131 | * @name: bearer name (format = media:interface) |
132 | * @media: ptr to media structure associated with bearer | 132 | * @media: ptr to media structure associated with bearer |
133 | * @bcast_addr: media address used in broadcasting | 133 | * @bcast_addr: media address used in broadcasting |
134 | * @pt: packet type for bearer | ||
134 | * @rcu: rcu struct for tipc_bearer | 135 | * @rcu: rcu struct for tipc_bearer |
135 | * @priority: default link priority for bearer | 136 | * @priority: default link priority for bearer |
136 | * @window: default window size for bearer | 137 | * @window: default window size for bearer |
@@ -151,6 +152,7 @@ struct tipc_bearer { | |||
151 | char name[TIPC_MAX_BEARER_NAME]; | 152 | char name[TIPC_MAX_BEARER_NAME]; |
152 | struct tipc_media *media; | 153 | struct tipc_media *media; |
153 | struct tipc_media_addr bcast_addr; | 154 | struct tipc_media_addr bcast_addr; |
155 | struct packet_type pt; | ||
154 | struct rcu_head rcu; | 156 | struct rcu_head rcu; |
155 | u32 priority; | 157 | u32 priority; |
156 | u32 window; | 158 | u32 window; |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index dcd90e6fa7c3..6ef379f004ac 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -479,13 +479,14 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, | |||
479 | bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) | 479 | bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) |
480 | { | 480 | { |
481 | struct sk_buff *_skb = *skb; | 481 | struct sk_buff *_skb = *skb; |
482 | struct tipc_msg *hdr = buf_msg(_skb); | 482 | struct tipc_msg *hdr; |
483 | struct tipc_msg ohdr; | 483 | struct tipc_msg ohdr; |
484 | int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); | 484 | int dlen; |
485 | 485 | ||
486 | if (skb_linearize(_skb)) | 486 | if (skb_linearize(_skb)) |
487 | goto exit; | 487 | goto exit; |
488 | hdr = buf_msg(_skb); | 488 | hdr = buf_msg(_skb); |
489 | dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); | ||
489 | if (msg_dest_droppable(hdr)) | 490 | if (msg_dest_droppable(hdr)) |
490 | goto exit; | 491 | goto exit; |
491 | if (msg_errcode(hdr)) | 492 | if (msg_errcode(hdr)) |
@@ -511,6 +512,8 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) | |||
511 | pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) | 512 | pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) |
512 | goto exit; | 513 | goto exit; |
513 | 514 | ||
515 | /* reassign after skb header modifications */ | ||
516 | hdr = buf_msg(_skb); | ||
514 | /* Now reverse the concerned fields */ | 517 | /* Now reverse the concerned fields */ |
515 | msg_set_errcode(hdr, err); | 518 | msg_set_errcode(hdr, err); |
516 | msg_set_non_seq(hdr, 0); | 519 | msg_set_non_seq(hdr, 0); |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 9b4dcb6a16b5..7dd22330a6b4 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -1126,8 +1126,8 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, | |||
1126 | strncpy(linkname, tipc_link_name(link), len); | 1126 | strncpy(linkname, tipc_link_name(link), len); |
1127 | err = 0; | 1127 | err = 0; |
1128 | } | 1128 | } |
1129 | exit: | ||
1130 | tipc_node_read_unlock(node); | 1129 | tipc_node_read_unlock(node); |
1130 | exit: | ||
1131 | tipc_node_put(node); | 1131 | tipc_node_put(node); |
1132 | return err; | 1132 | return err; |
1133 | } | 1133 | } |
@@ -1557,6 +1557,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) | |||
1557 | 1557 | ||
1558 | /* Check/update node state before receiving */ | 1558 | /* Check/update node state before receiving */ |
1559 | if (unlikely(skb)) { | 1559 | if (unlikely(skb)) { |
1560 | if (unlikely(skb_linearize(skb))) | ||
1561 | goto discard; | ||
1560 | tipc_node_write_lock(n); | 1562 | tipc_node_write_lock(n); |
1561 | if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { | 1563 | if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { |
1562 | if (le->link) { | 1564 | if (le->link) { |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 101e3597338f..d50edd6e0019 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -2255,8 +2255,8 @@ void tipc_sk_reinit(struct net *net) | |||
2255 | 2255 | ||
2256 | do { | 2256 | do { |
2257 | tsk = ERR_PTR(rhashtable_walk_start(&iter)); | 2257 | tsk = ERR_PTR(rhashtable_walk_start(&iter)); |
2258 | if (tsk) | 2258 | if (IS_ERR(tsk)) |
2259 | continue; | 2259 | goto walk_stop; |
2260 | 2260 | ||
2261 | while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { | 2261 | while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { |
2262 | spin_lock_bh(&tsk->sk.sk_lock.slock); | 2262 | spin_lock_bh(&tsk->sk.sk_lock.slock); |
@@ -2265,7 +2265,7 @@ void tipc_sk_reinit(struct net *net) | |||
2265 | msg_set_orignode(msg, tn->own_addr); | 2265 | msg_set_orignode(msg, tn->own_addr); |
2266 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 2266 | spin_unlock_bh(&tsk->sk.sk_lock.slock); |
2267 | } | 2267 | } |
2268 | 2268 | walk_stop: | |
2269 | rhashtable_walk_stop(&iter); | 2269 | rhashtable_walk_stop(&iter); |
2270 | } while (tsk == ERR_PTR(-EAGAIN)); | 2270 | } while (tsk == ERR_PTR(-EAGAIN)); |
2271 | } | 2271 | } |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 0bf91cd3733c..be3d9e3183dc 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -52,7 +52,6 @@ struct tipc_subscriber { | |||
52 | struct list_head subscrp_list; | 52 | struct list_head subscrp_list; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | static void tipc_subscrp_delete(struct tipc_subscription *sub); | ||
56 | static void tipc_subscrb_put(struct tipc_subscriber *subscriber); | 55 | static void tipc_subscrb_put(struct tipc_subscriber *subscriber); |
57 | 56 | ||
58 | /** | 57 | /** |
@@ -197,15 +196,19 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber, | |||
197 | { | 196 | { |
198 | struct list_head *subscription_list = &subscriber->subscrp_list; | 197 | struct list_head *subscription_list = &subscriber->subscrp_list; |
199 | struct tipc_subscription *sub, *temp; | 198 | struct tipc_subscription *sub, *temp; |
199 | u32 timeout; | ||
200 | 200 | ||
201 | spin_lock_bh(&subscriber->lock); | 201 | spin_lock_bh(&subscriber->lock); |
202 | list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) { | 202 | list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) { |
203 | if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) | 203 | if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) |
204 | continue; | 204 | continue; |
205 | 205 | ||
206 | tipc_nametbl_unsubscribe(sub); | 206 | timeout = htohl(sub->evt.s.timeout, sub->swap); |
207 | list_del(&sub->subscrp_list); | 207 | if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) { |
208 | tipc_subscrp_delete(sub); | 208 | tipc_nametbl_unsubscribe(sub); |
209 | list_del(&sub->subscrp_list); | ||
210 | tipc_subscrp_put(sub); | ||
211 | } | ||
209 | 212 | ||
210 | if (s) | 213 | if (s) |
211 | break; | 214 | break; |
@@ -236,18 +239,12 @@ static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) | |||
236 | tipc_subscrb_put(subscriber); | 239 | tipc_subscrb_put(subscriber); |
237 | } | 240 | } |
238 | 241 | ||
239 | static void tipc_subscrp_delete(struct tipc_subscription *sub) | ||
240 | { | ||
241 | u32 timeout = htohl(sub->evt.s.timeout, sub->swap); | ||
242 | |||
243 | if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) | ||
244 | tipc_subscrp_put(sub); | ||
245 | } | ||
246 | |||
247 | static void tipc_subscrp_cancel(struct tipc_subscr *s, | 242 | static void tipc_subscrp_cancel(struct tipc_subscr *s, |
248 | struct tipc_subscriber *subscriber) | 243 | struct tipc_subscriber *subscriber) |
249 | { | 244 | { |
245 | tipc_subscrb_get(subscriber); | ||
250 | tipc_subscrb_subscrp_delete(subscriber, s); | 246 | tipc_subscrb_subscrp_delete(subscriber, s); |
247 | tipc_subscrb_put(subscriber); | ||
251 | } | 248 | } |
252 | 249 | ||
253 | static struct tipc_subscription *tipc_subscrp_create(struct net *net, | 250 | static struct tipc_subscription *tipc_subscrp_create(struct net *net, |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ff61d8557929..69b16ee327d9 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2226,7 +2226,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2226 | goto no_transform; | 2226 | goto no_transform; |
2227 | } | 2227 | } |
2228 | 2228 | ||
2229 | dst_hold(&xdst->u.dst); | ||
2230 | route = xdst->route; | 2229 | route = xdst->route; |
2231 | } | 2230 | } |
2232 | } | 2231 | } |
@@ -3308,9 +3307,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, | |||
3308 | struct xfrm_state *x_new[XFRM_MAX_DEPTH]; | 3307 | struct xfrm_state *x_new[XFRM_MAX_DEPTH]; |
3309 | struct xfrm_migrate *mp; | 3308 | struct xfrm_migrate *mp; |
3310 | 3309 | ||
3310 | /* Stage 0 - sanity checks */ | ||
3311 | if ((err = xfrm_migrate_check(m, num_migrate)) < 0) | 3311 | if ((err = xfrm_migrate_check(m, num_migrate)) < 0) |
3312 | goto out; | 3312 | goto out; |
3313 | 3313 | ||
3314 | if (dir >= XFRM_POLICY_MAX) { | ||
3315 | err = -EINVAL; | ||
3316 | goto out; | ||
3317 | } | ||
3318 | |||
3314 | /* Stage 1 - find policy */ | 3319 | /* Stage 1 - find policy */ |
3315 | if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { | 3320 | if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { |
3316 | err = -ENOENT; | 3321 | err = -ENOENT; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 6c0956d10db6..a792effdb0b5 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1620,6 +1620,7 @@ int | |||
1620 | xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, | 1620 | xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, |
1621 | unsigned short family, struct net *net) | 1621 | unsigned short family, struct net *net) |
1622 | { | 1622 | { |
1623 | int i; | ||
1623 | int err = 0; | 1624 | int err = 0; |
1624 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); | 1625 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); |
1625 | if (!afinfo) | 1626 | if (!afinfo) |
@@ -1628,6 +1629,9 @@ xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, | |||
1628 | spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/ | 1629 | spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/ |
1629 | if (afinfo->tmpl_sort) | 1630 | if (afinfo->tmpl_sort) |
1630 | err = afinfo->tmpl_sort(dst, src, n); | 1631 | err = afinfo->tmpl_sort(dst, src, n); |
1632 | else | ||
1633 | for (i = 0; i < n; i++) | ||
1634 | dst[i] = src[i]; | ||
1631 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | 1635 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1632 | rcu_read_unlock(); | 1636 | rcu_read_unlock(); |
1633 | return err; | 1637 | return err; |
@@ -1638,6 +1642,7 @@ int | |||
1638 | xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, | 1642 | xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, |
1639 | unsigned short family) | 1643 | unsigned short family) |
1640 | { | 1644 | { |
1645 | int i; | ||
1641 | int err = 0; | 1646 | int err = 0; |
1642 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); | 1647 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); |
1643 | struct net *net = xs_net(*src); | 1648 | struct net *net = xs_net(*src); |
@@ -1648,6 +1653,9 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, | |||
1648 | spin_lock_bh(&net->xfrm.xfrm_state_lock); | 1653 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1649 | if (afinfo->state_sort) | 1654 | if (afinfo->state_sort) |
1650 | err = afinfo->state_sort(dst, src, n); | 1655 | err = afinfo->state_sort(dst, src, n); |
1656 | else | ||
1657 | for (i = 0; i < n; i++) | ||
1658 | dst[i] = src[i]; | ||
1651 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | 1659 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1652 | rcu_read_unlock(); | 1660 | rcu_read_unlock(); |
1653 | return err; | 1661 | return err; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 2be4c6af008a..9391ced05259 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -796,7 +796,7 @@ static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb | |||
796 | return -EMSGSIZE; | 796 | return -EMSGSIZE; |
797 | 797 | ||
798 | xuo = nla_data(attr); | 798 | xuo = nla_data(attr); |
799 | 799 | memset(xuo, 0, sizeof(*xuo)); | |
800 | xuo->ifindex = xso->dev->ifindex; | 800 | xuo->ifindex = xso->dev->ifindex; |
801 | xuo->flags = xso->flags; | 801 | xuo->flags = xso->flags; |
802 | 802 | ||
@@ -1869,6 +1869,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct | |||
1869 | return -EMSGSIZE; | 1869 | return -EMSGSIZE; |
1870 | 1870 | ||
1871 | id = nlmsg_data(nlh); | 1871 | id = nlmsg_data(nlh); |
1872 | memset(&id->sa_id, 0, sizeof(id->sa_id)); | ||
1872 | memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); | 1873 | memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); |
1873 | id->sa_id.spi = x->id.spi; | 1874 | id->sa_id.spi = x->id.spi; |
1874 | id->sa_id.family = x->props.family; | 1875 | id->sa_id.family = x->props.family; |
@@ -2578,6 +2579,8 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct | |||
2578 | ue = nlmsg_data(nlh); | 2579 | ue = nlmsg_data(nlh); |
2579 | copy_to_user_state(x, &ue->state); | 2580 | copy_to_user_state(x, &ue->state); |
2580 | ue->hard = (c->data.hard != 0) ? 1 : 0; | 2581 | ue->hard = (c->data.hard != 0) ? 1 : 0; |
2582 | /* clear the padding bytes */ | ||
2583 | memset(&ue->hard + 1, 0, sizeof(*ue) - offsetofend(typeof(*ue), hard)); | ||
2581 | 2584 | ||
2582 | err = xfrm_mark_put(skb, &x->mark); | 2585 | err = xfrm_mark_put(skb, &x->mark); |
2583 | if (err) | 2586 | if (err) |
@@ -2715,6 +2718,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) | |||
2715 | struct nlattr *attr; | 2718 | struct nlattr *attr; |
2716 | 2719 | ||
2717 | id = nlmsg_data(nlh); | 2720 | id = nlmsg_data(nlh); |
2721 | memset(id, 0, sizeof(*id)); | ||
2718 | memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); | 2722 | memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); |
2719 | id->spi = x->id.spi; | 2723 | id->spi = x->id.spi; |
2720 | id->family = x->props.family; | 2724 | id->family = x->props.family; |