aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-04-21 15:57:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-21 15:57:34 -0400
commitc5edde3a81149d29ceae4221f09f4c7bc2f70846 (patch)
tree5260db3beec59da2a2276231d083e8e2b63b8c2a /net
parentf862d66a1a4609e61cd87c7ff4c7d9a234103d67 (diff)
parentb4f70527f052b0c00be4d7cac562baa75b212df5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix memory leak in iwlwifi, from Matti Gottlieb. 2) Add missing registration of netfilter arp_tables into initial namespace, from Florian Westphal. 3) Fix potential NULL deref in DecNET routing code. 4) Restrict NETLINK_URELEASE to truly bound sockets only, from Dmitry Ivanov. 5) Fix dst ref counting in VRF, from David Ahern. 6) Fix TSO segmenting limits in i40e driver, from Alexander Duyck. 7) Fix heap leak in PACKET_DIAG_MCLIST, from Mathias Krause. 8) Ravalidate IPV6 datagram socket cached routes properly, particularly with UDP, from Martin KaFai Lau. 9) Fix endian bug in RDS dp_ack_seq handling, from Qing Huang. 10) Fix stats typing in bcmgenet driver, from Eric Dumazet. 11) Openvswitch needs to orphan SKBs before ipv6 fragmentation handing, from Joe Stringer. 12) SPI device reference leak in spi_ks8895 PHY driver, from Mark Brown. 13) atl2 doesn't actually support scatter-gather, so don't advertise the feature. From Ben Hucthings. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (72 commits) openvswitch: use flow protocol when recalculating ipv6 checksums Driver: Vmxnet3: set CHECKSUM_UNNECESSARY for IPv6 packets atl2: Disable unimplemented scatter/gather feature net/mlx4_en: Split SW RX dropped counter per RX ring net/mlx4_core: Don't allow to VF change global pause settings net/mlx4_core: Avoid repeated calls to pci enable/disable net/mlx4_core: Implement pci_resume callback net: phy: spi_ks8895: Don't leak references to SPI devices net: ethernet: davinci_emac: Fix platform_data overwrite net: ethernet: davinci_emac: Fix Unbalanced pm_runtime_enable qede: Fix single MTU sized packet from firmware GRO flow qede: Fix setting Skb network header qede: Fix various memory allocation error flows for fastpath tcp: Merge tx_flags and tskey in tcp_shifted_skb tcp: Merge tx_flags and tskey in tcp_collapse_retrans drivers: net: cpsw: fix wrong regs access in cpsw_ndo_open tcp: Fix SOF_TIMESTAMPING_TX_ACK when handling dup acks openvswitch: Orphan skbs before IPv6 defrag Revert "Prevent NUll pointer dereference with two PHYs on cpsw" VSOCK: Only check error on skb_recv_datagram when skb is NULL ...
Diffstat (limited to 'net')
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/ipv4/netfilter/arptable_filter.c6
-rw-r--r--net/ipv4/route.c19
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c16
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/addrconf.c22
-rw-r--r--net/ipv6/datagram.c169
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/conntrack.c1
-rw-r--r--net/packet/af_packet.c1
-rw-r--r--net/rds/cong.c4
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sctp/outqueue.c15
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sctp/sm_sideeffect.c36
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/vmw_vsock/vmci_transport.c7
-rw-r--r--net/wireless/nl80211.c2
29 files changed, 298 insertions, 133 deletions
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 8570bc7744c2..5a61f35412a0 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
370 left - sizeof(struct ebt_entry_match) < m->match_size) 370 left - sizeof(struct ebt_entry_match) < m->match_size)
371 return -EINVAL; 371 return -EINVAL;
372 372
373 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); 373 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
375 request_module("ebt_%s", m->u.name);
376 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
377 }
374 if (IS_ERR(match)) 378 if (IS_ERR(match))
375 return PTR_ERR(match); 379 return PTR_ERR(match);
376 m->u.match = match; 380 m->u.match = match;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d04c2d1c8c87..e561f9f07d6d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
4502 __skb_push(skb, offset); 4502 __skb_push(skb, offset);
4503 err = __vlan_insert_tag(skb, skb->vlan_proto, 4503 err = __vlan_insert_tag(skb, skb->vlan_proto,
4504 skb_vlan_tag_get(skb)); 4504 skb_vlan_tag_get(skb));
4505 if (err) 4505 if (err) {
4506 __skb_pull(skb, offset);
4506 return err; 4507 return err;
4508 }
4509
4507 skb->protocol = skb->vlan_proto; 4510 skb->protocol = skb->vlan_proto;
4508 skb->mac_len += VLAN_HLEN; 4511 skb->mac_len += VLAN_HLEN;
4509 __skb_pull(skb, offset);
4510 4512
4511 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4513 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4514 __skb_pull(skb, offset);
4512 } 4515 }
4513 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4516 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4514 return 0; 4517 return 0;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 607a14f20d88..b1dc096d22f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1034,10 +1034,13 @@ source_ok:
1034 if (!fld.daddr) { 1034 if (!fld.daddr) {
1035 fld.daddr = fld.saddr; 1035 fld.daddr = fld.saddr;
1036 1036
1037 err = -EADDRNOTAVAIL;
1038 if (dev_out) 1037 if (dev_out)
1039 dev_put(dev_out); 1038 dev_put(dev_out);
1039 err = -EINVAL;
1040 dev_out = init_net.loopback_dev; 1040 dev_out = init_net.loopback_dev;
1041 if (!dev_out->dn_ptr)
1042 goto out;
1043 err = -EADDRNOTAVAIL;
1041 dev_hold(dev_out); 1044 dev_hold(dev_out);
1042 if (!fld.daddr) { 1045 if (!fld.daddr) {
1043 fld.daddr = 1046 fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
1110 if (dev_out == NULL) 1113 if (dev_out == NULL)
1111 goto out; 1114 goto out;
1112 dn_db = rcu_dereference_raw(dev_out->dn_ptr); 1115 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1116 if (!dn_db)
1117 goto e_inval;
1113 /* Possible improvement - check all devices for local addr */ 1118 /* Possible improvement - check all devices for local addr */
1114 if (dn_dev_islocal(dev_out, fld.daddr)) { 1119 if (dn_dev_islocal(dev_out, fld.daddr)) {
1115 dev_put(dev_out); 1120 dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
1151 dev_put(dev_out); 1156 dev_put(dev_out);
1152 dev_out = init_net.loopback_dev; 1157 dev_out = init_net.loopback_dev;
1153 dev_hold(dev_out); 1158 dev_hold(dev_out);
1159 if (!dev_out->dn_ptr)
1160 goto e_inval;
1154 fld.flowidn_oif = dev_out->ifindex; 1161 fld.flowidn_oif = dev_out->ifindex;
1155 if (res.fi) 1162 if (res.fi)
1156 dn_fib_info_put(res.fi); 1163 dn_fib_info_put(res.fi);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index dd8c80dc32a2..8f8713b4388f 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
81 return ret; 81 return ret;
82 } 82 }
83 83
84 ret = arptable_filter_table_init(&init_net);
85 if (ret) {
86 unregister_pernet_subsys(&arptable_filter_net_ops);
87 kfree(arpfilter_ops);
88 }
89
84 return ret; 90 return ret;
85} 91}
86 92
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 02c62299d717..60398a9370e7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1438#endif 1438#endif
1439} 1439}
1440 1440
1441static struct rtable *rt_dst_alloc(struct net_device *dev, 1441struct rtable *rt_dst_alloc(struct net_device *dev,
1442 unsigned int flags, u16 type, 1442 unsigned int flags, u16 type,
1443 bool nopolicy, bool noxfrm, bool will_cache) 1443 bool nopolicy, bool noxfrm, bool will_cache)
1444{ 1444{
1445 struct rtable *rt; 1445 struct rtable *rt;
1446 1446
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
1468 1468
1469 return rt; 1469 return rt;
1470} 1470}
1471EXPORT_SYMBOL(rt_dst_alloc);
1471 1472
1472/* called in rcu_read_lock() section */ 1473/* called in rcu_read_lock() section */
1473static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1474static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2045 */ 2046 */
2046 if (fi && res->prefixlen < 4) 2047 if (fi && res->prefixlen < 4)
2047 fi = NULL; 2048 fi = NULL;
2049 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2050 (orig_oif != dev_out->ifindex)) {
2051 /* For local routes that require a particular output interface
2052 * we do not want to cache the result. Caching the result
2053 * causes incorrect behaviour when there are multiple source
2054 * addresses on the interface, the end result being that if the
2055 * intended recipient is waiting on that interface for the
2056 * packet he won't receive it because it will be delivered on
2057 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2058 * be set to the loopback interface as well.
2059 */
2060 fi = NULL;
2048 } 2061 }
2049 2062
2050 fnhe = NULL; 2063 fnhe = NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6e65f79ade8..c124c3c12f7c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1309 if (skb == tcp_highest_sack(sk)) 1309 if (skb == tcp_highest_sack(sk))
1310 tcp_advance_highest_sack(sk, skb); 1310 tcp_advance_highest_sack(sk, skb);
1311 1311
1312 tcp_skb_collapse_tstamp(prev, skb);
1312 tcp_unlink_write_queue(skb, sk); 1313 tcp_unlink_write_queue(skb, sk);
1313 sk_wmem_free_skb(sk, skb); 1314 sk_wmem_free_skb(sk, skb);
1314 1315
@@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3098 3099
3099 shinfo = skb_shinfo(skb); 3100 shinfo = skb_shinfo(skb);
3100 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && 3101 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
3101 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) 3102 !before(shinfo->tskey, prior_snd_una) &&
3103 before(shinfo->tskey, tcp_sk(sk)->snd_una))
3102 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3104 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3103} 3105}
3104 3106
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d2dc015cd19..441ae9da3a23 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk)
2441 return window; 2441 return window;
2442} 2442}
2443 2443
2444void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2445 const struct sk_buff *next_skb)
2446{
2447 const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
2448 u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2449
2450 if (unlikely(tsflags)) {
2451 struct skb_shared_info *shinfo = skb_shinfo(skb);
2452
2453 shinfo->tx_flags |= tsflags;
2454 shinfo->tskey = next_shinfo->tskey;
2455 }
2456}
2457
2444/* Collapses two adjacent SKB's during retransmission. */ 2458/* Collapses two adjacent SKB's during retransmission. */
2445static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2459static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2446{ 2460{
@@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2484 2498
2485 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2499 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2486 2500
2501 tcp_skb_collapse_tstamp(skb, next_skb);
2502
2487 sk_wmem_free_skb(sk, next_skb); 2503 sk_wmem_free_skb(sk, next_skb);
2488} 2504}
2489 2505
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08eed5e16df0..a2e7f55a1f61 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -339,8 +339,13 @@ found:
339 339
340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
341 spin_lock(&hslot2->lock); 341 spin_lock(&hslot2->lock);
342 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 342 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
343 &hslot2->head); 343 sk->sk_family == AF_INET6)
344 hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
345 &hslot2->head);
346 else
347 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
348 &hslot2->head);
344 hslot2->count++; 349 hslot2->count++;
345 spin_unlock(&hslot2->lock); 350 spin_unlock(&hslot2->lock);
346 } 351 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 27aed1afcf81..23cec53b568a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3255,6 +3255,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3255 void *ptr) 3255 void *ptr)
3256{ 3256{
3257 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3257 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3258 struct netdev_notifier_changeupper_info *info;
3258 struct inet6_dev *idev = __in6_dev_get(dev); 3259 struct inet6_dev *idev = __in6_dev_get(dev);
3259 int run_pending = 0; 3260 int run_pending = 0;
3260 int err; 3261 int err;
@@ -3413,6 +3414,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3413 if (idev) 3414 if (idev)
3414 addrconf_type_change(dev, event); 3415 addrconf_type_change(dev, event);
3415 break; 3416 break;
3417
3418 case NETDEV_CHANGEUPPER:
3419 info = ptr;
3420
3421 /* flush all routes if dev is linked to or unlinked from
3422 * an L3 master device (e.g., VRF)
3423 */
3424 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3425 addrconf_ifdown(dev, 0);
3416 } 3426 }
3417 3427
3418 return NOTIFY_OK; 3428 return NOTIFY_OK;
@@ -3438,6 +3448,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
3438 ipv6_mc_unmap(idev); 3448 ipv6_mc_unmap(idev);
3439} 3449}
3440 3450
3451static bool addr_is_local(const struct in6_addr *addr)
3452{
3453 return ipv6_addr_type(addr) &
3454 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3455}
3456
3441static int addrconf_ifdown(struct net_device *dev, int how) 3457static int addrconf_ifdown(struct net_device *dev, int how)
3442{ 3458{
3443 struct net *net = dev_net(dev); 3459 struct net *net = dev_net(dev);
@@ -3495,7 +3511,8 @@ restart:
3495 * address is retained on a down event 3511 * address is retained on a down event
3496 */ 3512 */
3497 if (!keep_addr || 3513 if (!keep_addr ||
3498 !(ifa->flags & IFA_F_PERMANENT)) { 3514 !(ifa->flags & IFA_F_PERMANENT) ||
3515 addr_is_local(&ifa->addr)) {
3499 hlist_del_init_rcu(&ifa->addr_lst); 3516 hlist_del_init_rcu(&ifa->addr_lst);
3500 goto restart; 3517 goto restart;
3501 } 3518 }
@@ -3544,7 +3561,8 @@ restart:
3544 write_unlock_bh(&idev->lock); 3561 write_unlock_bh(&idev->lock);
3545 spin_lock_bh(&ifa->lock); 3562 spin_lock_bh(&ifa->lock);
3546 3563
3547 if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) { 3564 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3565 !addr_is_local(&ifa->addr)) {
3548 /* set state to skip the notifier below */ 3566 /* set state to skip the notifier below */
3549 state = INET6_IFADDR_STATE_DEAD; 3567 state = INET6_IFADDR_STATE_DEAD;
3550 ifa->state = 0; 3568 ifa->state = 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 428162155280..9dd3882fe6bf 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
44{
45 struct inet_sock *inet = inet_sk(sk);
46 struct ipv6_pinfo *np = inet6_sk(sk);
47
48 memset(fl6, 0, sizeof(*fl6));
49 fl6->flowi6_proto = sk->sk_protocol;
50 fl6->daddr = sk->sk_v6_daddr;
51 fl6->saddr = np->saddr;
52 fl6->flowi6_oif = sk->sk_bound_dev_if;
53 fl6->flowi6_mark = sk->sk_mark;
54 fl6->fl6_dport = inet->inet_dport;
55 fl6->fl6_sport = inet->inet_sport;
56 fl6->flowlabel = np->flow_label;
57
58 if (!fl6->flowi6_oif)
59 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
60
61 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
62 fl6->flowi6_oif = np->mcast_oif;
63
64 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
65}
66
67int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
68{
69 struct ip6_flowlabel *flowlabel = NULL;
70 struct in6_addr *final_p, final;
71 struct ipv6_txoptions *opt;
72 struct dst_entry *dst;
73 struct inet_sock *inet = inet_sk(sk);
74 struct ipv6_pinfo *np = inet6_sk(sk);
75 struct flowi6 fl6;
76 int err = 0;
77
78 if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
79 flowlabel = fl6_sock_lookup(sk, np->flow_label);
80 if (!flowlabel)
81 return -EINVAL;
82 }
83 ip6_datagram_flow_key_init(&fl6, sk);
84
85 rcu_read_lock();
86 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
87 final_p = fl6_update_dst(&fl6, opt, &final);
88 rcu_read_unlock();
89
90 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
91 if (IS_ERR(dst)) {
92 err = PTR_ERR(dst);
93 goto out;
94 }
95
96 if (fix_sk_saddr) {
97 if (ipv6_addr_any(&np->saddr))
98 np->saddr = fl6.saddr;
99
100 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
101 sk->sk_v6_rcv_saddr = fl6.saddr;
102 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
103 if (sk->sk_prot->rehash)
104 sk->sk_prot->rehash(sk);
105 }
106 }
107
108 ip6_dst_store(sk, dst,
109 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
110 &sk->sk_v6_daddr : NULL,
111#ifdef CONFIG_IPV6_SUBTREES
112 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
113 &np->saddr :
114#endif
115 NULL);
116
117out:
118 fl6_sock_release(flowlabel);
119 return err;
120}
121
122void ip6_datagram_release_cb(struct sock *sk)
123{
124 struct dst_entry *dst;
125
126 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
127 return;
128
129 rcu_read_lock();
130 dst = __sk_dst_get(sk);
131 if (!dst || !dst->obsolete ||
132 dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
133 rcu_read_unlock();
134 return;
135 }
136 rcu_read_unlock();
137
138 ip6_datagram_dst_update(sk, false);
139}
140EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
141
43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 142static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 143{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
47 struct ipv6_pinfo *np = inet6_sk(sk); 146 struct ipv6_pinfo *np = inet6_sk(sk);
48 struct in6_addr *daddr, *final_p, final; 147 struct in6_addr *daddr;
49 struct dst_entry *dst;
50 struct flowi6 fl6;
51 struct ip6_flowlabel *flowlabel = NULL;
52 struct ipv6_txoptions *opt;
53 int addr_type; 148 int addr_type;
54 int err; 149 int err;
150 __be32 fl6_flowlabel = 0;
55 151
56 if (usin->sin6_family == AF_INET) { 152 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 153 if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
66 if (usin->sin6_family != AF_INET6) 162 if (usin->sin6_family != AF_INET6)
67 return -EAFNOSUPPORT; 163 return -EAFNOSUPPORT;
68 164
69 memset(&fl6, 0, sizeof(fl6)); 165 if (np->sndflow)
70 if (np->sndflow) { 166 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
71 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
72 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
74 if (!flowlabel)
75 return -EINVAL;
76 }
77 }
78 167
79 addr_type = ipv6_addr_type(&usin->sin6_addr); 168 addr_type = ipv6_addr_type(&usin->sin6_addr);
80 169
@@ -145,7 +234,7 @@ ipv4_connected:
145 } 234 }
146 235
147 sk->sk_v6_daddr = *daddr; 236 sk->sk_v6_daddr = *daddr;
148 np->flow_label = fl6.flowlabel; 237 np->flow_label = fl6_flowlabel;
149 238
150 inet->inet_dport = usin->sin6_port; 239 inet->inet_dport = usin->sin6_port;
151 240
@@ -154,59 +243,13 @@ ipv4_connected:
154 * destination cache for it. 243 * destination cache for it.
155 */ 244 */
156 245
157 fl6.flowi6_proto = sk->sk_protocol; 246 err = ip6_datagram_dst_update(sk, true);
158 fl6.daddr = sk->sk_v6_daddr; 247 if (err)
159 fl6.saddr = np->saddr;
160 fl6.flowi6_oif = sk->sk_bound_dev_if;
161 fl6.flowi6_mark = sk->sk_mark;
162 fl6.fl6_dport = inet->inet_dport;
163 fl6.fl6_sport = inet->inet_sport;
164
165 if (!fl6.flowi6_oif)
166 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
167
168 if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
169 fl6.flowi6_oif = np->mcast_oif;
170
171 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
172
173 rcu_read_lock();
174 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
175 final_p = fl6_update_dst(&fl6, opt, &final);
176 rcu_read_unlock();
177
178 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
179 err = 0;
180 if (IS_ERR(dst)) {
181 err = PTR_ERR(dst);
182 goto out; 248 goto out;
183 }
184
185 /* source address lookup done in ip6_dst_lookup */
186
187 if (ipv6_addr_any(&np->saddr))
188 np->saddr = fl6.saddr;
189
190 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
191 sk->sk_v6_rcv_saddr = fl6.saddr;
192 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
193 if (sk->sk_prot->rehash)
194 sk->sk_prot->rehash(sk);
195 }
196
197 ip6_dst_store(sk, dst,
198 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
199 &sk->sk_v6_daddr : NULL,
200#ifdef CONFIG_IPV6_SUBTREES
201 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
202 &np->saddr :
203#endif
204 NULL);
205 249
206 sk->sk_state = TCP_ESTABLISHED; 250 sk->sk_state = TCP_ESTABLISHED;
207 sk_set_txhash(sk); 251 sk_set_txhash(sk);
208out: 252out:
209 fl6_sock_release(flowlabel);
210 return err; 253 return err;
211} 254}
212 255
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ed446639219c..d916d6ab9ad2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
338 return rt; 338 return rt;
339} 339}
340 340
341static struct rt6_info *ip6_dst_alloc(struct net *net, 341struct rt6_info *ip6_dst_alloc(struct net *net,
342 struct net_device *dev, 342 struct net_device *dev,
343 int flags) 343 int flags)
344{ 344{
345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
346 346
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
364 364
365 return rt; 365 return rt;
366} 366}
367EXPORT_SYMBOL(ip6_dst_alloc);
367 368
368static void ip6_dst_destroy(struct dst_entry *dst) 369static void ip6_dst_destroy(struct dst_entry *dst)
369{ 370{
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1417 1418
1418void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 1419void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1419{ 1420{
1421 struct dst_entry *dst;
1422
1420 ip6_update_pmtu(skb, sock_net(sk), mtu, 1423 ip6_update_pmtu(skb, sock_net(sk), mtu,
1421 sk->sk_bound_dev_if, sk->sk_mark); 1424 sk->sk_bound_dev_if, sk->sk_mark);
1425
1426 dst = __sk_dst_get(sk);
1427 if (!dst || !dst->obsolete ||
1428 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1429 return;
1430
1431 bh_lock_sock(sk);
1432 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1433 ip6_datagram_dst_update(sk, false);
1434 bh_unlock_sock(sk);
1422} 1435}
1423EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 1436EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1424 1437
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8125931106be..6bc5c664fa46 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1539,6 +1539,7 @@ struct proto udpv6_prot = {
1539 .sendmsg = udpv6_sendmsg, 1539 .sendmsg = udpv6_sendmsg,
1540 .recvmsg = udpv6_recvmsg, 1540 .recvmsg = udpv6_recvmsg,
1541 .backlog_rcv = __udpv6_queue_rcv_skb, 1541 .backlog_rcv = __udpv6_queue_rcv_skb,
1542 .release_cb = ip6_datagram_release_cb,
1542 .hash = udp_lib_hash, 1543 .hash = udp_lib_hash,
1543 .unhash = udp_lib_unhash, 1544 .unhash = udp_lib_unhash,
1544 .rehash = udp_v6_rehash, 1545 .rehash = udp_v6_rehash,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 278f3b9356ef..7cc1d9c22a9f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
410 length--; 410 length--;
411 continue; 411 continue;
412 default: 412 default:
413 if (length < 2)
414 return;
413 opsize=*ptr++; 415 opsize=*ptr++;
414 if (opsize < 2) /* "silly options" */ 416 if (opsize < 2) /* "silly options" */
415 return; 417 return;
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
470 length--; 472 length--;
471 continue; 473 continue;
472 default: 474 default:
475 if (length < 2)
476 return;
473 opsize = *ptr++; 477 opsize = *ptr++;
474 if (opsize < 2) /* "silly options" */ 478 if (opsize < 2) /* "silly options" */
475 return; 479 return;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 215fc08c02ab..330ebd600f25 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
688 688
689 skb_queue_purge(&sk->sk_write_queue); 689 skb_queue_purge(&sk->sk_write_queue);
690 690
691 if (nlk->portid) { 691 if (nlk->portid && nlk->bound) {
692 struct netlink_notify n = { 692 struct netlink_notify n = {
693 .net = sock_net(sk), 693 .net = sock_net(sk),
694 .protocol = sk->sk_protocol, 694 .protocol = sk->sk_protocol,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index e9dd47b2a85b..879185fe183f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); 461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
462 462
463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { 463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
464 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, 464 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
465 true); 465 true);
466 memcpy(&flow_key->ipv6.addr.src, masked, 466 memcpy(&flow_key->ipv6.addr.src, masked,
467 sizeof(flow_key->ipv6.addr.src)); 467 sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
483 NULL, &flags) 483 NULL, &flags)
484 != NEXTHDR_ROUTING); 484 != NEXTHDR_ROUTING);
485 485
486 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, 486 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
487 recalc_csum); 487 recalc_csum);
488 memcpy(&flow_key->ipv6.addr.dst, masked, 488 memcpy(&flow_key->ipv6.addr.dst, masked,
489 sizeof(flow_key->ipv6.addr.dst)); 489 sizeof(flow_key->ipv6.addr.dst));
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 1b9d286756be..b5fea1101faa 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
367 } else if (key->eth.type == htons(ETH_P_IPV6)) { 367 } else if (key->eth.type == htons(ETH_P_IPV6)) {
368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
369 369
370 skb_orphan(skb);
370 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
371 err = nf_ct_frag6_gather(net, skb, user); 372 err = nf_ct_frag6_gather(net, skb, user);
372 if (err) 373 if (err)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f12c17f355d9..18d0becbc46d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3521 i->ifindex = mreq->mr_ifindex; 3521 i->ifindex = mreq->mr_ifindex;
3522 i->alen = mreq->mr_alen; 3522 i->alen = mreq->mr_alen;
3523 memcpy(i->addr, mreq->mr_address, i->alen); 3523 memcpy(i->addr, mreq->mr_address, i->alen);
3524 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3524 i->count = 1; 3525 i->count = 1;
3525 i->next = po->mclist; 3526 i->next = po->mclist;
3526 po->mclist = i; 3527 po->mclist = i;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b8246fd..6641bcf7c185 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
301 301
302 __set_bit_le(off, (void *)map->m_page_addrs[i]); 302 set_bit_le(off, (void *)map->m_page_addrs[i]);
303} 303}
304 304
305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) 305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
315 315
316 __clear_bit_le(off, (void *)map->m_page_addrs[i]); 316 clear_bit_le(off, (void *)map->m_page_addrs[i]);
317} 317}
318 318
319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) 319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 8764970f0c24..310cabce2311 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); 194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); 195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); 196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
197 dp->dp_ack_seq = rds_ib_piggyb_ack(ic); 197 dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
198 198
199 /* Advertise flow control */ 199 /* Advertise flow control */
200 if (ic->i_flowctl) { 200 if (ic->i_flowctl) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f18c35024207..80742edea96f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
159 if (validate) 159 if (validate)
160 skb = validate_xmit_skb_list(skb, dev); 160 skb = validate_xmit_skb_list(skb, dev);
161 161
162 if (skb) { 162 if (likely(skb)) {
163 HARD_TX_LOCK(dev, txq, smp_processor_id()); 163 HARD_TX_LOCK(dev, txq, smp_processor_id());
164 if (!netif_xmit_frozen_or_stopped(txq)) 164 if (!netif_xmit_frozen_or_stopped(txq))
165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 165 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
166 166
167 HARD_TX_UNLOCK(dev, txq); 167 HARD_TX_UNLOCK(dev, txq);
168 } else {
169 spin_lock(root_lock);
170 return qdisc_qlen(q);
168 } 171 }
169 spin_lock(root_lock); 172 spin_lock(root_lock);
170 173
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 8d3d3625130e..084718f9b3da 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
866 * sender MUST assure that at least one T3-rtx 866 * sender MUST assure that at least one T3-rtx
867 * timer is running. 867 * timer is running.
868 */ 868 */
869 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) 869 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
870 sctp_transport_reset_timers(transport); 870 sctp_transport_reset_t3_rtx(transport);
871 transport->last_time_sent = jiffies;
872 }
871 } 873 }
872 break; 874 break;
873 875
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
924 error = sctp_outq_flush_rtx(q, packet, 926 error = sctp_outq_flush_rtx(q, packet,
925 rtx_timeout, &start_timer); 927 rtx_timeout, &start_timer);
926 928
927 if (start_timer) 929 if (start_timer) {
928 sctp_transport_reset_timers(transport); 930 sctp_transport_reset_t3_rtx(transport);
931 transport->last_time_sent = jiffies;
932 }
929 933
930 /* This can happen on COOKIE-ECHO resend. Only 934 /* This can happen on COOKIE-ECHO resend. Only
931 * one chunk can get bundled with a COOKIE-ECHO. 935 * one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1062 list_add_tail(&chunk->transmitted_list, 1066 list_add_tail(&chunk->transmitted_list,
1063 &transport->transmitted); 1067 &transport->transmitted);
1064 1068
1065 sctp_transport_reset_timers(transport); 1069 sctp_transport_reset_t3_rtx(transport);
1070 transport->last_time_sent = jiffies;
1066 1071
1067 /* Only let one DATA chunk get bundled with a 1072 /* Only let one DATA chunk get bundled with a
1068 * COOKIE-ECHO chunk. 1073 * COOKIE-ECHO chunk.
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7f0bf798205b..56f364d8f932 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3080 return SCTP_ERROR_RSRC_LOW; 3080 return SCTP_ERROR_RSRC_LOW;
3081 3081
3082 /* Start the heartbeat timer. */ 3082 /* Start the heartbeat timer. */
3083 if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) 3083 sctp_transport_reset_hb_timer(peer);
3084 sctp_transport_hold(peer);
3085 asoc->new_transport = peer; 3084 asoc->new_transport = peer;
3086 break; 3085 break;
3087 case SCTP_PARAM_DEL_IP: 3086 case SCTP_PARAM_DEL_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 7fe56d0acabf..41b081a64752 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
69 sctp_cmd_seq_t *commands, 69 sctp_cmd_seq_t *commands,
70 gfp_t gfp); 70 gfp_t gfp);
71 71
72static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
73 struct sctp_transport *t);
74/******************************************************************** 72/********************************************************************
75 * Helper functions 73 * Helper functions
76 ********************************************************************/ 74 ********************************************************************/
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
367 struct sctp_association *asoc = transport->asoc; 365 struct sctp_association *asoc = transport->asoc;
368 struct sock *sk = asoc->base.sk; 366 struct sock *sk = asoc->base.sk;
369 struct net *net = sock_net(sk); 367 struct net *net = sock_net(sk);
368 u32 elapsed, timeout;
370 369
371 bh_lock_sock(sk); 370 bh_lock_sock(sk);
372 if (sock_owned_by_user(sk)) { 371 if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
378 goto out_unlock; 377 goto out_unlock;
379 } 378 }
380 379
380 /* Check if we should still send the heartbeat or reschedule */
381 elapsed = jiffies - transport->last_time_sent;
382 timeout = sctp_transport_timeout(transport);
383 if (elapsed < timeout) {
384 elapsed = timeout - elapsed;
385 if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
386 sctp_transport_hold(transport);
387 goto out_unlock;
388 }
389
381 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 390 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
382 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), 391 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
383 asoc->state, asoc->ep, asoc, 392 asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
507 0); 516 0);
508 517
509 /* Update the hb timer to resend a heartbeat every rto */ 518 /* Update the hb timer to resend a heartbeat every rto */
510 sctp_cmd_hb_timer_update(commands, transport); 519 sctp_transport_reset_hb_timer(transport);
511 } 520 }
512 521
513 if (transport->state != SCTP_INACTIVE && 522 if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
634 * hold a reference on the transport to make sure none of 643 * hold a reference on the transport to make sure none of
635 * the needed data structures go away. 644 * the needed data structures go away.
636 */ 645 */
637 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 646 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
638 647 sctp_transport_reset_hb_timer(t);
639 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
640 sctp_transport_hold(t);
641 }
642} 648}
643 649
644static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, 650static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
669} 675}
670 676
671 677
672/* Helper function to update the heartbeat timer. */
673static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
674 struct sctp_transport *t)
675{
676 /* Update the heartbeat timer. */
677 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
678 sctp_transport_hold(t);
679}
680
681/* Helper function to handle the reception of an HEARTBEAT ACK. */ 678/* Helper function to handle the reception of an HEARTBEAT ACK. */
682static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, 679static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
683 struct sctp_association *asoc, 680 struct sctp_association *asoc,
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
742 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 739 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
743 740
744 /* Update the heartbeat timer. */ 741 /* Update the heartbeat timer. */
745 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 742 sctp_transport_reset_hb_timer(t);
746 sctp_transport_hold(t);
747 743
748 if (was_unconfirmed && asoc->peer.transport_count == 1) 744 if (was_unconfirmed && asoc->peer.transport_count == 1)
749 sctp_transport_immediate_rtx(t); 745 sctp_transport_immediate_rtx(t);
@@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1614 1610
1615 case SCTP_CMD_HB_TIMER_UPDATE: 1611 case SCTP_CMD_HB_TIMER_UPDATE:
1616 t = cmd->obj.transport; 1612 t = cmd->obj.transport;
1617 sctp_cmd_hb_timer_update(commands, t); 1613 sctp_transport_reset_hb_timer(t);
1618 break; 1614 break;
1619 1615
1620 case SCTP_CMD_HB_TIMERS_STOP: 1616 case SCTP_CMD_HB_TIMERS_STOP:
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 9b6b48c7524e..81b86678be4d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
183/* Start T3_rtx timer if it is not already running and update the heartbeat 183/* Start T3_rtx timer if it is not already running and update the heartbeat
184 * timer. This routine is called every time a DATA chunk is sent. 184 * timer. This routine is called every time a DATA chunk is sent.
185 */ 185 */
186void sctp_transport_reset_timers(struct sctp_transport *transport) 186void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
187{ 187{
188 /* RFC 2960 6.3.2 Retransmission Timer Rules 188 /* RFC 2960 6.3.2 Retransmission Timer Rules
189 * 189 *
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
197 if (!mod_timer(&transport->T3_rtx_timer, 197 if (!mod_timer(&transport->T3_rtx_timer,
198 jiffies + transport->rto)) 198 jiffies + transport->rto))
199 sctp_transport_hold(transport); 199 sctp_transport_hold(transport);
200}
201
202void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
203{
204 unsigned long expires;
200 205
201 /* When a data chunk is sent, reset the heartbeat interval. */ 206 /* When a data chunk is sent, reset the heartbeat interval. */
202 if (!mod_timer(&transport->hb_timer, 207 expires = jiffies + sctp_transport_timeout(transport);
203 sctp_transport_timeout(transport))) 208 if (time_before(transport->hb_timer.expires, expires) &&
204 sctp_transport_hold(transport); 209 !mod_timer(&transport->hb_timer,
210 expires + prandom_u32_max(transport->rto)))
211 sctp_transport_hold(transport);
205} 212}
206 213
207/* This transport has been assigned to an association. 214/* This transport has been assigned to an association.
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
595unsigned long sctp_transport_timeout(struct sctp_transport *trans) 602unsigned long sctp_transport_timeout(struct sctp_transport *trans)
596{ 603{
597 /* RTO + timer slack +/- 50% of RTO */ 604 /* RTO + timer slack +/- 50% of RTO */
598 unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto); 605 unsigned long timeout = trans->rto >> 1;
599 606
600 if (trans->state != SCTP_UNCONFIRMED && 607 if (trans->state != SCTP_UNCONFIRMED &&
601 trans->state != SCTP_PF) 608 trans->state != SCTP_PF)
602 timeout += trans->hbinterval; 609 timeout += trans->hbinterval;
603 610
604 return timeout + jiffies; 611 return timeout;
605} 612}
606 613
607/* Reset transport variables to their initial values */ 614/* Reset transport variables to their initial values */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a842870c52..e2bdb07a49a2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
69 if (err) 69 if (err)
70 goto out_nametbl; 70 goto out_nametbl;
71 71
72 INIT_LIST_HEAD(&tn->dist_queue);
72 err = tipc_topsrv_start(net); 73 err = tipc_topsrv_start(net);
73 if (err) 74 if (err)
74 goto out_subscr; 75 goto out_subscr;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5504d63503df..eff58dc53aa1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@ struct tipc_net {
103 spinlock_t nametbl_lock; 103 spinlock_t nametbl_lock;
104 struct name_table *nametbl; 104 struct name_table *nametbl;
105 105
106 /* Name dist queue */
107 struct list_head dist_queue;
108
106 /* Topology subscription server */ 109 /* Topology subscription server */
107 struct tipc_server *topsrv; 110 struct tipc_server *topsrv;
108 atomic_t subscription_count; 111 atomic_t subscription_count;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index ebe9d0ff6e9e..6b626a64b517 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
40 40
41int sysctl_tipc_named_timeout __read_mostly = 2000; 41int sysctl_tipc_named_timeout __read_mostly = 2000;
42 42
43/**
44 * struct tipc_dist_queue - queue holding deferred name table updates
45 */
46static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
47
48struct distr_queue_item { 43struct distr_queue_item {
49 struct distr_item i; 44 struct distr_item i;
50 u32 dtype; 45 u32 dtype;
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
229 kfree_rcu(p, rcu); 224 kfree_rcu(p, rcu);
230} 225}
231 226
227/**
228 * tipc_dist_queue_purge - remove deferred updates from a node that went down
229 */
230static void tipc_dist_queue_purge(struct net *net, u32 addr)
231{
232 struct tipc_net *tn = net_generic(net, tipc_net_id);
233 struct distr_queue_item *e, *tmp;
234
235 spin_lock_bh(&tn->nametbl_lock);
236 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
237 if (e->node != addr)
238 continue;
239 list_del(&e->next);
240 kfree(e);
241 }
242 spin_unlock_bh(&tn->nametbl_lock);
243}
244
232void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) 245void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
233{ 246{
234 struct publication *publ, *tmp; 247 struct publication *publ, *tmp;
235 248
236 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) 249 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
237 tipc_publ_purge(net, publ, addr); 250 tipc_publ_purge(net, publ, addr);
251 tipc_dist_queue_purge(net, addr);
238} 252}
239 253
240/** 254/**
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
279 * tipc_named_add_backlog - add a failed name table update to the backlog 293 * tipc_named_add_backlog - add a failed name table update to the backlog
280 * 294 *
281 */ 295 */
282static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) 296static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
297 u32 type, u32 node)
283{ 298{
284 struct distr_queue_item *e; 299 struct distr_queue_item *e;
300 struct tipc_net *tn = net_generic(net, tipc_net_id);
285 unsigned long now = get_jiffies_64(); 301 unsigned long now = get_jiffies_64();
286 302
287 e = kzalloc(sizeof(*e), GFP_ATOMIC); 303 e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
291 e->node = node; 307 e->node = node;
292 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); 308 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
293 memcpy(e, i, sizeof(*i)); 309 memcpy(e, i, sizeof(*i));
294 list_add_tail(&e->next, &tipc_dist_queue); 310 list_add_tail(&e->next, &tn->dist_queue);
295} 311}
296 312
297/** 313/**
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
301void tipc_named_process_backlog(struct net *net) 317void tipc_named_process_backlog(struct net *net)
302{ 318{
303 struct distr_queue_item *e, *tmp; 319 struct distr_queue_item *e, *tmp;
320 struct tipc_net *tn = net_generic(net, tipc_net_id);
304 char addr[16]; 321 char addr[16];
305 unsigned long now = get_jiffies_64(); 322 unsigned long now = get_jiffies_64();
306 323
307 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 324 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
308 if (time_after(e->expires, now)) { 325 if (time_after(e->expires, now)) {
309 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) 326 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
310 continue; 327 continue;
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
344 node = msg_orignode(msg); 361 node = msg_orignode(msg);
345 while (count--) { 362 while (count--) {
346 if (!tipc_update_nametbl(net, item, node, mtype)) 363 if (!tipc_update_nametbl(net, item, node, mtype))
347 tipc_named_add_backlog(item, mtype, node); 364 tipc_named_add_backlog(net, item, mtype, node);
348 item++; 365 item++;
349 } 366 }
350 kfree_skb(skb); 367 kfree_skb(skb);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 662bdd20a748..56214736fe88 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
1735 /* Retrieve the head sk_buff from the socket's receive queue. */ 1735 /* Retrieve the head sk_buff from the socket's receive queue. */
1736 err = 0; 1736 err = 0;
1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1738 if (err)
1739 return err;
1740
1741 if (!skb) 1738 if (!skb)
1742 return -EAGAIN; 1739 return err;
1743 1740
1744 dg = (struct vmci_datagram *)skb->data; 1741 dg = (struct vmci_datagram *)skb->data;
1745 if (!dg) 1742 if (!dg)
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
2154 2151
2155MODULE_AUTHOR("VMware, Inc."); 2152MODULE_AUTHOR("VMware, Inc.");
2156MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2153MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2157MODULE_VERSION("1.0.3.0-k"); 2154MODULE_VERSION("1.0.4.0-k");
2158MODULE_LICENSE("GPL v2"); 2155MODULE_LICENSE("GPL v2");
2159MODULE_ALIAS("vmware_vsock"); 2156MODULE_ALIAS("vmware_vsock");
2160MODULE_ALIAS_NETPROTO(PF_VSOCK); 2157MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98c924260b3d..056a7307862b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
13216 struct wireless_dev *wdev; 13216 struct wireless_dev *wdev;
13217 struct cfg80211_beacon_registration *reg, *tmp; 13217 struct cfg80211_beacon_registration *reg, *tmp;
13218 13218
13219 if (state != NETLINK_URELEASE) 13219 if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
13220 return NOTIFY_DONE; 13220 return NOTIFY_DONE;
13221 13221
13222 rcu_read_lock(); 13222 rcu_read_lock();