aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/9p/trans_xen.c8
-rw-r--r--net/batman-adv/distributed-arp-table.c5
-rw-r--r--net/batman-adv/routing.c2
-rw-r--r--net/batman-adv/soft-interface.c5
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_netlink.c9
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c3
-rw-r--r--net/bridge/netfilter/ebtables.c9
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/caif/cfpkt_skbuff.c6
-rw-r--r--net/caif/chnl_net.c4
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/ceph/auth_x.c13
-rw-r--r--net/ceph/ceph_common.c13
-rw-r--r--net/ceph/messenger.c26
-rw-r--r--net/ceph/mon_client.c4
-rw-r--r--net/ceph/osdmap.c1
-rw-r--r--net/core/dev.c131
-rw-r--r--net/core/dev_ioctl.c19
-rw-r--r--net/core/devlink.c8
-rw-r--r--net/core/dst.c37
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/filter.c1
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net_namespace.c19
-rw-r--r--net/core/rtnetlink.c93
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c23
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c4
-rw-r--r--net/dsa/dsa.c47
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/legacy.c47
-rw-r--r--net/hsr/hsr_device.c4
-rw-r--r--net/hsr/hsr_forward.c3
-rw-r--r--net/hsr/hsr_framereg.c9
-rw-r--r--net/hsr/hsr_framereg.h2
-rw-r--r--net/ieee802154/6lowpan/core.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c48
-rw-r--r--net/ipv4/esp4.c5
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_semantics.c17
-rw-r--r--net/ipv4/fib_trie.c26
-rw-r--r--net/ipv4/icmp.c8
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_tunnel.c6
-rw-r--r--net/ipv4/ipmr.c52
-rw-r--r--net/ipv4/route.c10
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/calipso.c6
-rw-r--r--net/ipv6/datagram.c8
-rw-r--r--net/ipv6/esp6_offload.c25
-rw-r--r--net/ipv6/fib6_rules.c22
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ila/ila_xlat.c1
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/ipv6/ip6_gre.c22
-rw-r--r--net/ipv6/ip6_offload.c9
-rw-r--r--net/ipv6/ip6_output.c22
-rw-r--r--net/ipv6/ip6_tunnel.c34
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/output_core.c14
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c7
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c7
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udp_offload.c6
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/ipv6/xfrm6_mode_transport.c2
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/key/af_key.c21
-rw-r--r--net/l2tp/l2tp_eth.c15
-rw-r--r--net/llc/af_llc.c3
-rw-r--r--net/mac80211/agg-tx.c128
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ht.c16
-rw-r--r--net/mac80211/ieee80211_i.h16
-rw-r--r--net/mac80211/iface.c18
-rw-r--r--net/mac80211/mlme.c62
-rw-r--r--net/mac80211/rx.c9
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/wpa.c9
-rw-r--r--net/mac802154/iface.c7
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c19
-rw-r--r--net/netfilter/nf_conntrack_helper.c12
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c9
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_tables_api.c160
-rw-r--r--net/netfilter/nfnetlink_cthelper.c17
-rw-r--r--net/netfilter/nft_bitwise.c19
-rw-r--r--net/netfilter/nft_cmp.c12
-rw-r--r--net/netfilter/nft_ct.c4
-rw-r--r--net/netfilter/nft_immediate.c5
-rw-r--r--net/netfilter/nft_range.c4
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c22
-rw-r--r--net/netfilter/x_tables.c24
-rw-r--r--net/netfilter/xt_CT.c6
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/vport-internal_dev.c4
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/phonet/pep-gprs.c2
-rw-r--r--net/rxrpc/key.c64
-rw-r--r--net/sched/act_pedit.c4
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/sch_api.c9
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/input.c16
-rw-r--r--net/sctp/ipv6.c49
-rw-r--r--net/sctp/sctp_diag.c5
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_statefuns.c3
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/smc/Kconfig4
-rw-r--r--net/smc/smc_clc.c4
-rw-r--r--net/smc/smc_core.c16
-rw-r--r--net/smc/smc_core.h2
-rw-r--r--net/smc/smc_ib.c21
-rw-r--r--net/smc/smc_ib.h2
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c6
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/tipc/msg.c2
-rw-r--r--net/tipc/socket.c38
-rw-r--r--net/unix/af_unix.c7
-rw-r--r--net/vmw_vsock/af_vsock.c21
-rw-r--r--net/wireless/scan.c8
-rw-r--r--net/wireless/util.c10
-rw-r--r--net/wireless/wext-core.c22
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/sysctl_net_x25.c5
-rw-r--r--net/xfrm/Makefile3
-rw-r--r--net/xfrm/xfrm_device.c4
-rw-r--r--net/xfrm/xfrm_policy.c51
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--net/xfrm/xfrm_user.c1
161 files changed, 1345 insertions, 914 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 467069b73ce1..9649579b5b9f 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
277 return 0; 277 return 0;
278 278
279out_free_newdev: 279out_free_newdev:
280 free_netdev(new_dev); 280 if (new_dev->reg_state == NETREG_UNINITIALIZED)
281 free_netdev(new_dev);
281 return err; 282 return err;
282} 283}
283 284
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 953b6728bd00..abc5f400fc71 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -813,7 +813,6 @@ static void vlan_dev_free(struct net_device *dev)
813 813
814 free_percpu(vlan->vlan_pcpu_stats); 814 free_percpu(vlan->vlan_pcpu_stats);
815 vlan->vlan_pcpu_stats = NULL; 815 vlan->vlan_pcpu_stats = NULL;
816 free_netdev(dev);
817} 816}
818 817
819void vlan_setup(struct net_device *dev) 818void vlan_setup(struct net_device *dev)
@@ -826,7 +825,8 @@ void vlan_setup(struct net_device *dev)
826 netif_keep_dst(dev); 825 netif_keep_dst(dev);
827 826
828 dev->netdev_ops = &vlan_netdev_ops; 827 dev->netdev_ops = &vlan_netdev_ops;
829 dev->destructor = vlan_dev_free; 828 dev->needs_free_netdev = true;
829 dev->priv_destructor = vlan_dev_free;
830 dev->ethtool_ops = &vlan_ethtool_ops; 830 dev->ethtool_ops = &vlan_ethtool_ops;
831 831
832 dev->min_mtu = 0; 832 dev->min_mtu = 0;
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 71e85643b3f9..6ad3e043c617 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -454,8 +454,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
454 goto error_xenbus; 454 goto error_xenbus;
455 } 455 }
456 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL); 456 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
457 if (!priv->tag) { 457 if (IS_ERR(priv->tag)) {
458 ret = -EINVAL; 458 ret = PTR_ERR(priv->tag);
459 goto error_xenbus; 459 goto error_xenbus;
460 } 460 }
461 ret = xenbus_transaction_end(xbt, 0); 461 ret = xenbus_transaction_end(xbt, 0);
@@ -525,7 +525,7 @@ static struct xenbus_driver xen_9pfs_front_driver = {
525 .otherend_changed = xen_9pfs_front_changed, 525 .otherend_changed = xen_9pfs_front_changed,
526}; 526};
527 527
528int p9_trans_xen_init(void) 528static int p9_trans_xen_init(void)
529{ 529{
530 if (!xen_domain()) 530 if (!xen_domain())
531 return -ENODEV; 531 return -ENODEV;
@@ -537,7 +537,7 @@ int p9_trans_xen_init(void)
537} 537}
538module_init(p9_trans_xen_init); 538module_init(p9_trans_xen_init);
539 539
540void p9_trans_xen_exit(void) 540static void p9_trans_xen_exit(void)
541{ 541{
542 v9fs_unregister_trans(&p9_xen_trans); 542 v9fs_unregister_trans(&p9_xen_trans);
543 return xenbus_unregister_driver(&xen_9pfs_front_driver); 543 return xenbus_unregister_driver(&xen_9pfs_front_driver);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 013e970eff39..000ca2f113ab 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1064,8 +1064,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
1064 1064
1065 skb_new->protocol = eth_type_trans(skb_new, soft_iface); 1065 skb_new->protocol = eth_type_trans(skb_new, soft_iface);
1066 1066
1067 soft_iface->stats.rx_packets++; 1067 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
1068 soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; 1068 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
1069 skb->len + ETH_HLEN + hdr_size);
1069 1070
1070 netif_rx(skb_new); 1071 netif_rx(skb_new);
1071 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); 1072 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index e1ebe14ee2a6..ae9f4d37d34f 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -987,7 +987,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
987 batadv_dbg(BATADV_DBG_BLA, bat_priv, 987 batadv_dbg(BATADV_DBG_BLA, bat_priv,
988 "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", 988 "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n",
989 orig_addr_gw); 989 orig_addr_gw);
990 return NET_RX_DROP; 990 goto free_skb;
991 } 991 }
992 } 992 }
993 993
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b25789abf7b9..10f7edfb176e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1034,8 +1034,6 @@ static void batadv_softif_free(struct net_device *dev)
1034 * netdev and its private data (bat_priv) 1034 * netdev and its private data (bat_priv)
1035 */ 1035 */
1036 rcu_barrier(); 1036 rcu_barrier();
1037
1038 free_netdev(dev);
1039} 1037}
1040 1038
1041/** 1039/**
@@ -1047,7 +1045,8 @@ static void batadv_softif_init_early(struct net_device *dev)
1047 ether_setup(dev); 1045 ether_setup(dev);
1048 1046
1049 dev->netdev_ops = &batadv_netdev_ops; 1047 dev->netdev_ops = &batadv_netdev_ops;
1050 dev->destructor = batadv_softif_free; 1048 dev->needs_free_netdev = true;
1049 dev->priv_destructor = batadv_softif_free;
1051 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; 1050 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
1052 dev->priv_flags |= IFF_NO_QUEUE; 1051 dev->priv_flags |= IFF_NO_QUEUE;
1053 1052
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 608959989f8e..ab3b654b05cc 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -598,7 +598,7 @@ static void netdev_setup(struct net_device *dev)
598 598
599 dev->netdev_ops = &netdev_ops; 599 dev->netdev_ops = &netdev_ops;
600 dev->header_ops = &header_ops; 600 dev->header_ops = &header_ops;
601 dev->destructor = free_netdev; 601 dev->needs_free_netdev = true;
602} 602}
603 603
604static struct device_type bt_type = { 604static struct device_type bt_type = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 430b53e7d941..f0f3447e8aa4 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev)
379 ether_setup(dev); 379 ether_setup(dev);
380 380
381 dev->netdev_ops = &br_netdev_ops; 381 dev->netdev_ops = &br_netdev_ops;
382 dev->destructor = free_netdev; 382 dev->needs_free_netdev = true;
383 dev->ethtool_ops = &br_ethtool_ops; 383 dev->ethtool_ops = &br_ethtool_ops;
384 SET_NETDEV_DEVTYPE(dev, &br_type); 384 SET_NETDEV_DEVTYPE(dev, &br_type);
385 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 385 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index c5ce7745b230..32bd3ead9ba1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
595 err = 0; 595 err = 0;
596 switch (nla_type(attr)) { 596 switch (nla_type(attr)) {
597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
598 if (!(p->flags & BR_VLAN_TUNNEL)) 598 if (!p || !(p->flags & BR_VLAN_TUNNEL))
599 return -EINVAL; 599 return -EINVAL;
600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
601 if (err) 601 if (err)
@@ -835,6 +835,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
835 return -EPROTONOSUPPORT; 835 return -EPROTONOSUPPORT;
836 } 836 }
837 } 837 }
838
839 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
840 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
841
842 if (defpvid >= VLAN_VID_MASK)
843 return -EINVAL;
844 }
838#endif 845#endif
839 846
840 return 0; 847 return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 08341d2aa9c9..6f12a5271219 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -179,6 +179,8 @@ static void br_stp_start(struct net_bridge *br)
179 br_debug(br, "using kernel STP\n"); 179 br_debug(br, "using kernel STP\n");
180 180
181 /* To start timers on any ports left in blocking */ 181 /* To start timers on any ports left in blocking */
182 if (br->dev->flags & IFF_UP)
183 mod_timer(&br->hello_timer, jiffies + br->hello_time);
182 br_port_state_selection(br); 184 br_port_state_selection(br);
183 } 185 }
184 186
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index c98b3e5c140a..60b6fe277a8b 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 if (br->stp_enabled != BR_USER_STP) 43 if (br->stp_enabled == BR_KERNEL_STP)
44 mod_timer(&br->hello_timer, 44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time)); 45 round_jiffies(jiffies + br->hello_time));
46 } 46 }
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 5929309beaa1..db85230e49c3 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -68,6 +68,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
68 if (e->ethproto != htons(ETH_P_ARP) || 68 if (e->ethproto != htons(ETH_P_ARP) ||
69 e->invflags & EBT_IPROTO) 69 e->invflags & EBT_IPROTO)
70 return -EINVAL; 70 return -EINVAL;
71 if (ebt_invalid_target(info->target))
72 return -EINVAL;
73
71 return 0; 74 return 0;
72} 75}
73 76
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 9ec0c9f908fa..9c6e619f452b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1373,7 +1373,8 @@ static inline int ebt_obj_to_user(char __user *um, const char *_name,
1373 strlcpy(name, _name, sizeof(name)); 1373 strlcpy(name, _name, sizeof(name));
1374 if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || 1374 if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) ||
1375 put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || 1375 put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) ||
1376 xt_data_to_user(um + entrysize, data, usersize, datasize)) 1376 xt_data_to_user(um + entrysize, data, usersize, datasize,
1377 XT_ALIGN(datasize)))
1377 return -EFAULT; 1378 return -EFAULT;
1378 1379
1379 return 0; 1380 return 0;
@@ -1658,7 +1659,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1658 if (match->compat_to_user(cm->data, m->data)) 1659 if (match->compat_to_user(cm->data, m->data))
1659 return -EFAULT; 1660 return -EFAULT;
1660 } else { 1661 } else {
1661 if (xt_data_to_user(cm->data, m->data, match->usersize, msize)) 1662 if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
1663 COMPAT_XT_ALIGN(msize)))
1662 return -EFAULT; 1664 return -EFAULT;
1663 } 1665 }
1664 1666
@@ -1687,7 +1689,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
1687 if (target->compat_to_user(cm->data, t->data)) 1689 if (target->compat_to_user(cm->data, t->data))
1688 return -EFAULT; 1690 return -EFAULT;
1689 } else { 1691 } else {
1690 if (xt_data_to_user(cm->data, t->data, target->usersize, tsize)) 1692 if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
1693 COMPAT_XT_ALIGN(tsize)))
1691 return -EFAULT; 1694 return -EFAULT;
1692 } 1695 }
1693 1696
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index adcad344c843..21f18ea2fce4 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -754,6 +754,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
754 754
755 lock_sock(sk); 755 lock_sock(sk);
756 756
757 err = -EINVAL;
758 if (addr_len < offsetofend(struct sockaddr, sa_family))
759 goto out;
760
757 err = -EAFNOSUPPORT; 761 err = -EAFNOSUPPORT;
758 if (uaddr->sa_family != AF_CAIF) 762 if (uaddr->sa_family != AF_CAIF)
759 goto out; 763 goto out;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fcc220c..71b6ab240dea 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
81{ 81{
82 struct sk_buff *skb; 82 struct sk_buff *skb;
83 83
84 if (likely(in_interrupt())) 84 skb = alloc_skb(len + pfx, GFP_ATOMIC);
85 skb = alloc_skb(len + pfx, GFP_ATOMIC);
86 else
87 skb = alloc_skb(len + pfx, GFP_KERNEL);
88
89 if (unlikely(skb == NULL)) 85 if (unlikely(skb == NULL))
90 return NULL; 86 return NULL;
91 87
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 1816fc9f1ee7..fe3c53efb949 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev)
392{ 392{
393 struct chnl_net *priv = netdev_priv(dev); 393 struct chnl_net *priv = netdev_priv(dev);
394 caif_free_client(&priv->chnl); 394 caif_free_client(&priv->chnl);
395 free_netdev(dev);
396} 395}
397 396
398static void ipcaif_net_setup(struct net_device *dev) 397static void ipcaif_net_setup(struct net_device *dev)
399{ 398{
400 struct chnl_net *priv; 399 struct chnl_net *priv;
401 dev->netdev_ops = &netdev_ops; 400 dev->netdev_ops = &netdev_ops;
402 dev->destructor = chnl_net_destructor; 401 dev->needs_free_netdev = true;
402 dev->priv_destructor = chnl_net_destructor;
403 dev->flags |= IFF_NOARP; 403 dev->flags |= IFF_NOARP;
404 dev->flags |= IFF_POINTOPOINT; 404 dev->flags |= IFF_POINTOPOINT;
405 dev->mtu = GPRS_PDP_MTU; 405 dev->mtu = GPRS_PDP_MTU;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index b6406fe33c76..88edac0f3e36 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -872,8 +872,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
872 872
873static int can_pernet_init(struct net *net) 873static int can_pernet_init(struct net *net)
874{ 874{
875 net->can.can_rcvlists_lock = 875 spin_lock_init(&net->can.can_rcvlists_lock);
876 __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock);
877 net->can.can_rx_alldev_list = 876 net->can.can_rx_alldev_list =
878 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
879 878
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 2034fb926670..8757fb87dab8 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -151,7 +151,7 @@ static int process_one_ticket(struct ceph_auth_client *ac,
151 struct timespec validity; 151 struct timespec validity;
152 void *tp, *tpend; 152 void *tp, *tpend;
153 void **ptp; 153 void **ptp;
154 struct ceph_crypto_key new_session_key; 154 struct ceph_crypto_key new_session_key = { 0 };
155 struct ceph_buffer *new_ticket_blob; 155 struct ceph_buffer *new_ticket_blob;
156 unsigned long new_expires, new_renew_after; 156 unsigned long new_expires, new_renew_after;
157 u64 new_secret_id; 157 u64 new_secret_id;
@@ -215,6 +215,9 @@ static int process_one_ticket(struct ceph_auth_client *ac,
215 dout(" ticket blob is %d bytes\n", dlen); 215 dout(" ticket blob is %d bytes\n", dlen);
216 ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); 216 ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
217 blob_struct_v = ceph_decode_8(ptp); 217 blob_struct_v = ceph_decode_8(ptp);
218 if (blob_struct_v != 1)
219 goto bad;
220
218 new_secret_id = ceph_decode_64(ptp); 221 new_secret_id = ceph_decode_64(ptp);
219 ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); 222 ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
220 if (ret) 223 if (ret)
@@ -234,13 +237,13 @@ static int process_one_ticket(struct ceph_auth_client *ac,
234 type, ceph_entity_type_name(type), th->secret_id, 237 type, ceph_entity_type_name(type), th->secret_id,
235 (int)th->ticket_blob->vec.iov_len); 238 (int)th->ticket_blob->vec.iov_len);
236 xi->have_keys |= th->service; 239 xi->have_keys |= th->service;
237 240 return 0;
238out:
239 return ret;
240 241
241bad: 242bad:
242 ret = -EINVAL; 243 ret = -EINVAL;
243 goto out; 244out:
245 ceph_crypto_key_destroy(&new_session_key);
246 return ret;
244} 247}
245 248
246static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, 249static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 4fd02831beed..47e94b560ba0 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -56,19 +56,6 @@ static const struct kernel_param_ops param_ops_supported_features = {
56module_param_cb(supported_features, &param_ops_supported_features, NULL, 56module_param_cb(supported_features, &param_ops_supported_features, NULL,
57 S_IRUGO); 57 S_IRUGO);
58 58
59/*
60 * find filename portion of a path (/foo/bar/baz -> baz)
61 */
62const char *ceph_file_part(const char *s, int len)
63{
64 const char *e = s + len;
65
66 while (e != s && *(e-1) != '/')
67 e--;
68 return e;
69}
70EXPORT_SYMBOL(ceph_file_part);
71
72const char *ceph_msg_type_name(int type) 59const char *ceph_msg_type_name(int type)
73{ 60{
74 switch (type) { 61 switch (type) {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 5766a6c896c4..588a91930051 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1174,8 +1174,8 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1174 * Returns true if the result moves the cursor on to the next piece 1174 * Returns true if the result moves the cursor on to the next piece
1175 * of the data item. 1175 * of the data item.
1176 */ 1176 */
1177static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1177static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1178 size_t bytes) 1178 size_t bytes)
1179{ 1179{
1180 bool new_piece; 1180 bool new_piece;
1181 1181
@@ -1207,8 +1207,6 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1207 new_piece = true; 1207 new_piece = true;
1208 } 1208 }
1209 cursor->need_crc = new_piece; 1209 cursor->need_crc = new_piece;
1210
1211 return new_piece;
1212} 1210}
1213 1211
1214static size_t sizeof_footer(struct ceph_connection *con) 1212static size_t sizeof_footer(struct ceph_connection *con)
@@ -1577,7 +1575,6 @@ static int write_partial_message_data(struct ceph_connection *con)
1577 size_t page_offset; 1575 size_t page_offset;
1578 size_t length; 1576 size_t length;
1579 bool last_piece; 1577 bool last_piece;
1580 bool need_crc;
1581 int ret; 1578 int ret;
1582 1579
1583 page = ceph_msg_data_next(cursor, &page_offset, &length, 1580 page = ceph_msg_data_next(cursor, &page_offset, &length,
@@ -1592,7 +1589,7 @@ static int write_partial_message_data(struct ceph_connection *con)
1592 } 1589 }
1593 if (do_datacrc && cursor->need_crc) 1590 if (do_datacrc && cursor->need_crc)
1594 crc = ceph_crc32c_page(crc, page, page_offset, length); 1591 crc = ceph_crc32c_page(crc, page, page_offset, length);
1595 need_crc = ceph_msg_data_advance(cursor, (size_t)ret); 1592 ceph_msg_data_advance(cursor, (size_t)ret);
1596 } 1593 }
1597 1594
1598 dout("%s %p msg %p done\n", __func__, con, msg); 1595 dout("%s %p msg %p done\n", __func__, con, msg);
@@ -2231,10 +2228,18 @@ static void process_ack(struct ceph_connection *con)
2231 struct ceph_msg *m; 2228 struct ceph_msg *m;
2232 u64 ack = le64_to_cpu(con->in_temp_ack); 2229 u64 ack = le64_to_cpu(con->in_temp_ack);
2233 u64 seq; 2230 u64 seq;
2231 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ);
2232 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent;
2234 2233
2235 while (!list_empty(&con->out_sent)) { 2234 /*
2236 m = list_first_entry(&con->out_sent, struct ceph_msg, 2235 * In the reconnect case, con_fault() has requeued messages
2237 list_head); 2236 * in out_sent. We should cleanup old messages according to
2237 * the reconnect seq.
2238 */
2239 while (!list_empty(list)) {
2240 m = list_first_entry(list, struct ceph_msg, list_head);
2241 if (reconnect && m->needs_out_seq)
2242 break;
2238 seq = le64_to_cpu(m->hdr.seq); 2243 seq = le64_to_cpu(m->hdr.seq);
2239 if (seq > ack) 2244 if (seq > ack)
2240 break; 2245 break;
@@ -2243,6 +2248,7 @@ static void process_ack(struct ceph_connection *con)
2243 m->ack_stamp = jiffies; 2248 m->ack_stamp = jiffies;
2244 ceph_msg_remove(m); 2249 ceph_msg_remove(m);
2245 } 2250 }
2251
2246 prepare_read_tag(con); 2252 prepare_read_tag(con);
2247} 2253}
2248 2254
@@ -2299,7 +2305,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
2299 2305
2300 if (do_datacrc) 2306 if (do_datacrc)
2301 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2307 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2302 (void) ceph_msg_data_advance(cursor, (size_t)ret); 2308 ceph_msg_data_advance(cursor, (size_t)ret);
2303 } 2309 }
2304 if (do_datacrc) 2310 if (do_datacrc)
2305 con->in_data_crc = crc; 2311 con->in_data_crc = crc;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 29a0ef351c5e..250f11f78609 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -43,15 +43,13 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
43 int i, err = -EINVAL; 43 int i, err = -EINVAL;
44 struct ceph_fsid fsid; 44 struct ceph_fsid fsid;
45 u32 epoch, num_mon; 45 u32 epoch, num_mon;
46 u16 version;
47 u32 len; 46 u32 len;
48 47
49 ceph_decode_32_safe(&p, end, len, bad); 48 ceph_decode_32_safe(&p, end, len, bad);
50 ceph_decode_need(&p, end, len, bad); 49 ceph_decode_need(&p, end, len, bad);
51 50
52 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); 51 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
53 52 p += sizeof(u16); /* skip version */
54 ceph_decode_16_safe(&p, end, version, bad);
55 53
56 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); 54 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
57 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 55 ceph_decode_copy(&p, &fsid, sizeof(fsid));
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index ffe9e904d4d1..55e3a477f92d 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -317,6 +317,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
317 u32 yes; 317 u32 yes;
318 struct crush_rule *r; 318 struct crush_rule *r;
319 319
320 err = -EINVAL;
320 ceph_decode_32_safe(p, end, yes, bad); 321 ceph_decode_32_safe(p, end, yes, bad);
321 if (!yes) { 322 if (!yes) {
322 dout("crush_decode NO rule %d off %x %p to %p\n", 323 dout("crush_decode NO rule %d off %x %p to %p\n",
diff --git a/net/core/dev.c b/net/core/dev.c
index 96cf83da0d66..416137c64bf8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1253,8 +1253,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1253 if (!new_ifalias) 1253 if (!new_ifalias)
1254 return -ENOMEM; 1254 return -ENOMEM;
1255 dev->ifalias = new_ifalias; 1255 dev->ifalias = new_ifalias;
1256 memcpy(dev->ifalias, alias, len);
1257 dev->ifalias[len] = 0;
1256 1258
1257 strlcpy(dev->ifalias, alias, len+1);
1258 return len; 1259 return len;
1259} 1260}
1260 1261
@@ -4766,6 +4767,13 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
4766} 4767}
4767EXPORT_SYMBOL(gro_find_complete_by_type); 4768EXPORT_SYMBOL(gro_find_complete_by_type);
4768 4769
4770static void napi_skb_free_stolen_head(struct sk_buff *skb)
4771{
4772 skb_dst_drop(skb);
4773 secpath_reset(skb);
4774 kmem_cache_free(skbuff_head_cache, skb);
4775}
4776
4769static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 4777static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4770{ 4778{
4771 switch (ret) { 4779 switch (ret) {
@@ -4779,13 +4787,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4779 break; 4787 break;
4780 4788
4781 case GRO_MERGED_FREE: 4789 case GRO_MERGED_FREE:
4782 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { 4790 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4783 skb_dst_drop(skb); 4791 napi_skb_free_stolen_head(skb);
4784 secpath_reset(skb); 4792 else
4785 kmem_cache_free(skbuff_head_cache, skb);
4786 } else {
4787 __kfree_skb(skb); 4793 __kfree_skb(skb);
4788 }
4789 break; 4794 break;
4790 4795
4791 case GRO_HELD: 4796 case GRO_HELD:
@@ -4857,10 +4862,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
4857 break; 4862 break;
4858 4863
4859 case GRO_DROP: 4864 case GRO_DROP:
4860 case GRO_MERGED_FREE:
4861 napi_reuse_skb(napi, skb); 4865 napi_reuse_skb(napi, skb);
4862 break; 4866 break;
4863 4867
4868 case GRO_MERGED_FREE:
4869 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4870 napi_skb_free_stolen_head(skb);
4871 else
4872 napi_reuse_skb(napi, skb);
4873 break;
4874
4864 case GRO_MERGED: 4875 case GRO_MERGED:
4865 case GRO_CONSUMED: 4876 case GRO_CONSUMED:
4866 break; 4877 break;
@@ -4948,6 +4959,19 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4948} 4959}
4949EXPORT_SYMBOL(__skb_gro_checksum_complete); 4960EXPORT_SYMBOL(__skb_gro_checksum_complete);
4950 4961
4962static void net_rps_send_ipi(struct softnet_data *remsd)
4963{
4964#ifdef CONFIG_RPS
4965 while (remsd) {
4966 struct softnet_data *next = remsd->rps_ipi_next;
4967
4968 if (cpu_online(remsd->cpu))
4969 smp_call_function_single_async(remsd->cpu, &remsd->csd);
4970 remsd = next;
4971 }
4972#endif
4973}
4974
4951/* 4975/*
4952 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 4976 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4953 * Note: called with local irq disabled, but exits with local irq enabled. 4977 * Note: called with local irq disabled, but exits with local irq enabled.
@@ -4963,14 +4987,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4963 local_irq_enable(); 4987 local_irq_enable();
4964 4988
4965 /* Send pending IPI's to kick RPS processing on remote cpus. */ 4989 /* Send pending IPI's to kick RPS processing on remote cpus. */
4966 while (remsd) { 4990 net_rps_send_ipi(remsd);
4967 struct softnet_data *next = remsd->rps_ipi_next;
4968
4969 if (cpu_online(remsd->cpu))
4970 smp_call_function_single_async(remsd->cpu,
4971 &remsd->csd);
4972 remsd = next;
4973 }
4974 } else 4991 } else
4975#endif 4992#endif
4976 local_irq_enable(); 4993 local_irq_enable();
@@ -5199,8 +5216,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5199 if (rc == BUSY_POLL_BUDGET) 5216 if (rc == BUSY_POLL_BUDGET)
5200 __napi_schedule(napi); 5217 __napi_schedule(napi);
5201 local_bh_enable(); 5218 local_bh_enable();
5202 if (local_softirq_pending())
5203 do_softirq();
5204} 5219}
5205 5220
5206void napi_busy_loop(unsigned int napi_id, 5221void napi_busy_loop(unsigned int napi_id,
@@ -6852,6 +6867,32 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
6852} 6867}
6853EXPORT_SYMBOL(dev_change_proto_down); 6868EXPORT_SYMBOL(dev_change_proto_down);
6854 6869
6870bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
6871{
6872 struct netdev_xdp xdp;
6873
6874 memset(&xdp, 0, sizeof(xdp));
6875 xdp.command = XDP_QUERY_PROG;
6876
6877 /* Query must always succeed. */
6878 WARN_ON(xdp_op(dev, &xdp) < 0);
6879 return xdp.prog_attached;
6880}
6881
6882static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
6883 struct netlink_ext_ack *extack,
6884 struct bpf_prog *prog)
6885{
6886 struct netdev_xdp xdp;
6887
6888 memset(&xdp, 0, sizeof(xdp));
6889 xdp.command = XDP_SETUP_PROG;
6890 xdp.extack = extack;
6891 xdp.prog = prog;
6892
6893 return xdp_op(dev, &xdp);
6894}
6895
6855/** 6896/**
6856 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 6897 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6857 * @dev: device 6898 * @dev: device
@@ -6864,41 +6905,34 @@ EXPORT_SYMBOL(dev_change_proto_down);
6864int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 6905int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6865 int fd, u32 flags) 6906 int fd, u32 flags)
6866{ 6907{
6867 int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp);
6868 const struct net_device_ops *ops = dev->netdev_ops; 6908 const struct net_device_ops *ops = dev->netdev_ops;
6869 struct bpf_prog *prog = NULL; 6909 struct bpf_prog *prog = NULL;
6870 struct netdev_xdp xdp; 6910 xdp_op_t xdp_op, xdp_chk;
6871 int err; 6911 int err;
6872 6912
6873 ASSERT_RTNL(); 6913 ASSERT_RTNL();
6874 6914
6875 xdp_op = ops->ndo_xdp; 6915 xdp_op = xdp_chk = ops->ndo_xdp;
6916 if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE))
6917 return -EOPNOTSUPP;
6876 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) 6918 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6877 xdp_op = generic_xdp_install; 6919 xdp_op = generic_xdp_install;
6920 if (xdp_op == xdp_chk)
6921 xdp_chk = generic_xdp_install;
6878 6922
6879 if (fd >= 0) { 6923 if (fd >= 0) {
6880 if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { 6924 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
6881 memset(&xdp, 0, sizeof(xdp)); 6925 return -EEXIST;
6882 xdp.command = XDP_QUERY_PROG; 6926 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
6883 6927 __dev_xdp_attached(dev, xdp_op))
6884 err = xdp_op(dev, &xdp); 6928 return -EBUSY;
6885 if (err < 0)
6886 return err;
6887 if (xdp.prog_attached)
6888 return -EBUSY;
6889 }
6890 6929
6891 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 6930 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6892 if (IS_ERR(prog)) 6931 if (IS_ERR(prog))
6893 return PTR_ERR(prog); 6932 return PTR_ERR(prog);
6894 } 6933 }
6895 6934
6896 memset(&xdp, 0, sizeof(xdp)); 6935 err = dev_xdp_install(dev, xdp_op, extack, prog);
6897 xdp.command = XDP_SETUP_PROG;
6898 xdp.extack = extack;
6899 xdp.prog = prog;
6900
6901 err = xdp_op(dev, &xdp);
6902 if (err < 0 && prog) 6936 if (err < 0 && prog)
6903 bpf_prog_put(prog); 6937 bpf_prog_put(prog);
6904 6938
@@ -7482,6 +7516,8 @@ out:
7482err_uninit: 7516err_uninit:
7483 if (dev->netdev_ops->ndo_uninit) 7517 if (dev->netdev_ops->ndo_uninit)
7484 dev->netdev_ops->ndo_uninit(dev); 7518 dev->netdev_ops->ndo_uninit(dev);
7519 if (dev->priv_destructor)
7520 dev->priv_destructor(dev);
7485 goto out; 7521 goto out;
7486} 7522}
7487EXPORT_SYMBOL(register_netdevice); 7523EXPORT_SYMBOL(register_netdevice);
@@ -7689,8 +7725,10 @@ void netdev_run_todo(void)
7689 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 7725 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
7690 WARN_ON(dev->dn_ptr); 7726 WARN_ON(dev->dn_ptr);
7691 7727
7692 if (dev->destructor) 7728 if (dev->priv_destructor)
7693 dev->destructor(dev); 7729 dev->priv_destructor(dev);
7730 if (dev->needs_free_netdev)
7731 free_netdev(dev);
7694 7732
7695 /* Report a network device has been unregistered */ 7733 /* Report a network device has been unregistered */
7696 rtnl_lock(); 7734 rtnl_lock();
@@ -7755,9 +7793,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7755 } else { 7793 } else {
7756 netdev_stats_to_stats64(storage, &dev->stats); 7794 netdev_stats_to_stats64(storage, &dev->stats);
7757 } 7795 }
7758 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 7796 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
7759 storage->tx_dropped += atomic_long_read(&dev->tx_dropped); 7797 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
7760 storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler); 7798 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
7761 return storage; 7799 return storage;
7762} 7800}
7763EXPORT_SYMBOL(dev_get_stats); 7801EXPORT_SYMBOL(dev_get_stats);
@@ -8173,7 +8211,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
8173 struct sk_buff **list_skb; 8211 struct sk_buff **list_skb;
8174 struct sk_buff *skb; 8212 struct sk_buff *skb;
8175 unsigned int cpu; 8213 unsigned int cpu;
8176 struct softnet_data *sd, *oldsd; 8214 struct softnet_data *sd, *oldsd, *remsd = NULL;
8177 8215
8178 local_irq_disable(); 8216 local_irq_disable();
8179 cpu = smp_processor_id(); 8217 cpu = smp_processor_id();
@@ -8214,6 +8252,13 @@ static int dev_cpu_dead(unsigned int oldcpu)
8214 raise_softirq_irqoff(NET_TX_SOFTIRQ); 8252 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8215 local_irq_enable(); 8253 local_irq_enable();
8216 8254
8255#ifdef CONFIG_RPS
8256 remsd = oldsd->rps_ipi_list;
8257 oldsd->rps_ipi_list = NULL;
8258#endif
8259 /* send out pending IPI's on offline CPU */
8260 net_rps_send_ipi(remsd);
8261
8217 /* Process offline CPU's input_pkt_queue */ 8262 /* Process offline CPU's input_pkt_queue */
8218 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 8263 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
8219 netif_rx_ni(skb); 8264 netif_rx_ni(skb);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b94b1d293506..27fad31784a8 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
410 if (cmd == SIOCGIFNAME) 410 if (cmd == SIOCGIFNAME)
411 return dev_ifname(net, (struct ifreq __user *)arg); 411 return dev_ifname(net, (struct ifreq __user *)arg);
412 412
413 /*
414 * Take care of Wireless Extensions. Unfortunately struct iwreq
415 * isn't a proper subset of struct ifreq (it's 8 byte shorter)
416 * so we need to treat it specially, otherwise applications may
417 * fault if the struct they're passing happens to land at the
418 * end of a mapped page.
419 */
420 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
421 struct iwreq iwr;
422
423 if (copy_from_user(&iwr, arg, sizeof(iwr)))
424 return -EFAULT;
425
426 return wext_handle_ioctl(net, &iwr, cmd, arg);
427 }
428
413 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 429 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
414 return -EFAULT; 430 return -EFAULT;
415 431
@@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
559 ret = -EFAULT; 575 ret = -EFAULT;
560 return ret; 576 return ret;
561 } 577 }
562 /* Take care of Wireless Extensions */
563 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
564 return wext_handle_ioctl(net, &ifr, cmd, arg);
565 return -ENOTTY; 578 return -ENOTTY;
566 } 579 }
567} 580}
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b0b87a292e7c..a0adfc31a3fe 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1680,8 +1680,10 @@ start_again:
1680 1680
1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
1682 &devlink_nl_family, NLM_F_MULTI, cmd); 1682 &devlink_nl_family, NLM_F_MULTI, cmd);
1683 if (!hdr) 1683 if (!hdr) {
1684 nlmsg_free(skb);
1684 return -EMSGSIZE; 1685 return -EMSGSIZE;
1686 }
1685 1687
1686 if (devlink_nl_put_handle(skb, devlink)) 1688 if (devlink_nl_put_handle(skb, devlink))
1687 goto nla_put_failure; 1689 goto nla_put_failure;
@@ -2098,8 +2100,10 @@ start_again:
2098 2100
2099 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 2101 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
2100 &devlink_nl_family, NLM_F_MULTI, cmd); 2102 &devlink_nl_family, NLM_F_MULTI, cmd);
2101 if (!hdr) 2103 if (!hdr) {
2104 nlmsg_free(skb);
2102 return -EMSGSIZE; 2105 return -EMSGSIZE;
2106 }
2103 2107
2104 if (devlink_nl_put_handle(skb, devlink)) 2108 if (devlink_nl_put_handle(skb, devlink))
2105 goto nla_put_failure; 2109 goto nla_put_failure;
diff --git a/net/core/dst.c b/net/core/dst.c
index 960e503b5a52..13ba4a090c41 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
151} 151}
152EXPORT_SYMBOL(dst_discard_out); 152EXPORT_SYMBOL(dst_discard_out);
153 153
154const u32 dst_default_metrics[RTAX_MAX + 1] = { 154const struct dst_metrics dst_default_metrics = {
155 /* This initializer is needed to force linker to place this variable 155 /* This initializer is needed to force linker to place this variable
156 * into const section. Otherwise it might end into bss section. 156 * into const section. Otherwise it might end into bss section.
157 * We really want to avoid false sharing on this variable, and catch 157 * We really want to avoid false sharing on this variable, and catch
158 * any writes on it. 158 * any writes on it.
159 */ 159 */
160 [RTAX_MAX] = 0xdeadbeef, 160 .refcnt = ATOMIC_INIT(1),
161}; 161};
162 162
163void dst_init(struct dst_entry *dst, struct dst_ops *ops, 163void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
169 if (dev) 169 if (dev)
170 dev_hold(dev); 170 dev_hold(dev);
171 dst->ops = ops; 171 dst->ops = ops;
172 dst_init_metrics(dst, dst_default_metrics, true); 172 dst_init_metrics(dst, dst_default_metrics.metrics, true);
173 dst->expires = 0UL; 173 dst->expires = 0UL;
174 dst->path = dst; 174 dst->path = dst;
175 dst->from = NULL; 175 dst->from = NULL;
@@ -314,25 +314,30 @@ EXPORT_SYMBOL(dst_release);
314 314
315u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) 315u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
316{ 316{
317 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); 317 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
318 318
319 if (p) { 319 if (p) {
320 u32 *old_p = __DST_METRICS_PTR(old); 320 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
321 unsigned long prev, new; 321 unsigned long prev, new;
322 322
323 memcpy(p, old_p, sizeof(u32) * RTAX_MAX); 323 atomic_set(&p->refcnt, 1);
324 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
324 325
325 new = (unsigned long) p; 326 new = (unsigned long) p;
326 prev = cmpxchg(&dst->_metrics, old, new); 327 prev = cmpxchg(&dst->_metrics, old, new);
327 328
328 if (prev != old) { 329 if (prev != old) {
329 kfree(p); 330 kfree(p);
330 p = __DST_METRICS_PTR(prev); 331 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
331 if (prev & DST_METRICS_READ_ONLY) 332 if (prev & DST_METRICS_READ_ONLY)
332 p = NULL; 333 p = NULL;
334 } else if (prev & DST_METRICS_REFCOUNTED) {
335 if (atomic_dec_and_test(&old_p->refcnt))
336 kfree(old_p);
333 } 337 }
334 } 338 }
335 return p; 339 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
340 return (u32 *)p;
336} 341}
337EXPORT_SYMBOL(dst_cow_metrics_generic); 342EXPORT_SYMBOL(dst_cow_metrics_generic);
338 343
@@ -341,7 +346,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
341{ 346{
342 unsigned long prev, new; 347 unsigned long prev, new;
343 348
344 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; 349 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
345 prev = cmpxchg(&dst->_metrics, old, new); 350 prev = cmpxchg(&dst->_metrics, old, new);
346 if (prev == old) 351 if (prev == old)
347 kfree(__DST_METRICS_PTR(old)); 352 kfree(__DST_METRICS_PTR(old));
@@ -464,6 +469,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
464 spin_lock_bh(&dst_garbage.lock); 469 spin_lock_bh(&dst_garbage.lock);
465 dst = dst_garbage.list; 470 dst = dst_garbage.list;
466 dst_garbage.list = NULL; 471 dst_garbage.list = NULL;
472 /* The code in dst_ifdown places a hold on the loopback device.
473 * If the gc entry processing is set to expire after a lengthy
474 * interval, this hold can cause netdev_wait_allrefs() to hang
475 * out and wait for a long time -- until the the loopback
476 * interface is released. If we're really unlucky, it'll emit
477 * pr_emerg messages to console too. Reset the interval here,
478 * so dst cleanups occur in a more timely fashion.
479 */
480 if (dst_garbage.timer_inc > DST_GC_INC) {
481 dst_garbage.timer_inc = DST_GC_INC;
482 dst_garbage.timer_expires = DST_GC_MIN;
483 mod_delayed_work(system_wq, &dst_gc_work,
484 dst_garbage.timer_expires);
485 }
467 spin_unlock_bh(&dst_garbage.lock); 486 spin_unlock_bh(&dst_garbage.lock);
468 487
469 if (last) 488 if (last)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f21c4d3aeae0..3bba291c6c32 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
568 struct net *net = sock_net(skb->sk); 568 struct net *net = sock_net(skb->sk);
569 struct fib_rule_hdr *frh = nlmsg_data(nlh); 569 struct fib_rule_hdr *frh = nlmsg_data(nlh);
570 struct fib_rules_ops *ops = NULL; 570 struct fib_rules_ops *ops = NULL;
571 struct fib_rule *rule, *tmp; 571 struct fib_rule *rule, *r;
572 struct nlattr *tb[FRA_MAX+1]; 572 struct nlattr *tb[FRA_MAX+1];
573 struct fib_kuid_range range; 573 struct fib_kuid_range range;
574 int err = -EINVAL; 574 int err = -EINVAL;
@@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
668 668
669 /* 669 /*
670 * Check if this rule is a target to any of them. If so, 670 * Check if this rule is a target to any of them. If so,
671 * adjust to the next one with the same preference or
671 * disable them. As this operation is eventually very 672 * disable them. As this operation is eventually very
672 * expensive, it is only performed if goto rules have 673 * expensive, it is only performed if goto rules, except
673 * actually been added. 674 * current if it is goto rule, have actually been added.
674 */ 675 */
675 if (ops->nr_goto_rules > 0) { 676 if (ops->nr_goto_rules > 0) {
676 list_for_each_entry(tmp, &ops->rules_list, list) { 677 struct fib_rule *n;
677 if (rtnl_dereference(tmp->ctarget) == rule) { 678
678 RCU_INIT_POINTER(tmp->ctarget, NULL); 679 n = list_next_entry(rule, list);
680 if (&n->list == &ops->rules_list || n->pref != rule->pref)
681 n = NULL;
682 list_for_each_entry(r, &ops->rules_list, list) {
683 if (rtnl_dereference(r->ctarget) != rule)
684 continue;
685 rcu_assign_pointer(r->ctarget, n);
686 if (!n)
679 ops->unresolved_rules++; 687 ops->unresolved_rules++;
680 }
681 } 688 }
682 } 689 }
683 690
diff --git a/net/core/filter.c b/net/core/filter.c
index a253a6197e6b..a6bb95fa87b2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func)
2281 func == bpf_skb_change_head || 2281 func == bpf_skb_change_head ||
2282 func == bpf_skb_change_tail || 2282 func == bpf_skb_change_tail ||
2283 func == bpf_skb_pull_data || 2283 func == bpf_skb_pull_data ||
2284 func == bpf_clone_redirect ||
2284 func == bpf_l3_csum_replace || 2285 func == bpf_l3_csum_replace ||
2285 func == bpf_l4_csum_replace || 2286 func == bpf_l4_csum_replace ||
2286 func == bpf_xdp_adjust_head) 2287 func == bpf_xdp_adjust_head)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 58b0bcc125b5..d274f81fcc2c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1132,10 +1132,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1132 lladdr = neigh->ha; 1132 lladdr = neigh->ha;
1133 } 1133 }
1134 1134
1135 if (new & NUD_CONNECTED)
1136 neigh->confirmed = jiffies;
1137 neigh->updated = jiffies;
1138
1139 /* If entry was valid and address is not changed, 1135 /* If entry was valid and address is not changed,
1140 do not change entry state, if new one is STALE. 1136 do not change entry state, if new one is STALE.
1141 */ 1137 */
@@ -1157,6 +1153,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1157 } 1153 }
1158 } 1154 }
1159 1155
1156 /* Update timestamps only once we know we will make a change to the
1157 * neighbour entry. Otherwise we risk to move the locktime window with
1158 * noop updates and ignore relevant ARP updates.
1159 */
1160 if (new != old || lladdr != neigh->ha) {
1161 if (new & NUD_CONNECTED)
1162 neigh->confirmed = jiffies;
1163 neigh->updated = jiffies;
1164 }
1165
1160 if (new != old) { 1166 if (new != old) {
1161 neigh_del_timer(neigh); 1167 neigh_del_timer(neigh);
1162 if (new & NUD_PROBE) 1168 if (new & NUD_PROBE)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 1934efd4a9d4..26bbfababff2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -315,6 +315,25 @@ out_undo:
315 goto out; 315 goto out;
316} 316}
317 317
318static int __net_init net_defaults_init_net(struct net *net)
319{
320 net->core.sysctl_somaxconn = SOMAXCONN;
321 return 0;
322}
323
324static struct pernet_operations net_defaults_ops = {
325 .init = net_defaults_init_net,
326};
327
328static __init int net_defaults_init(void)
329{
330 if (register_pernet_subsys(&net_defaults_ops))
331 panic("Cannot initialize net default settings");
332
333 return 0;
334}
335
336core_initcall(net_defaults_init);
318 337
319#ifdef CONFIG_NET_NS 338#ifdef CONFIG_NET_NS
320static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 339static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bcb0f610ee42..467a2f4510a7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -899,8 +899,7 @@ static size_t rtnl_port_size(const struct net_device *dev,
899static size_t rtnl_xdp_size(void) 899static size_t rtnl_xdp_size(void)
900{ 900{
901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
902 nla_total_size(1) + /* XDP_ATTACHED */ 902 nla_total_size(1); /* XDP_ATTACHED */
903 nla_total_size(4); /* XDP_FLAGS */
904 903
905 return xdp_size; 904 return xdp_size;
906} 905}
@@ -932,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
932 + nla_total_size(1) /* IFLA_LINKMODE */ 931 + nla_total_size(1) /* IFLA_LINKMODE */
933 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 932 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
934 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 933 + nla_total_size(4) /* IFLA_LINK_NETNSID */
934 + nla_total_size(4) /* IFLA_GROUP */
935 + nla_total_size(ext_filter_mask 935 + nla_total_size(ext_filter_mask
936 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 936 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
937 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 937 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1125,6 +1125,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1125 struct ifla_vf_mac vf_mac; 1125 struct ifla_vf_mac vf_mac;
1126 struct ifla_vf_info ivi; 1126 struct ifla_vf_info ivi;
1127 1127
1128 memset(&ivi, 0, sizeof(ivi));
1129
1128 /* Not all SR-IOV capable drivers support the 1130 /* Not all SR-IOV capable drivers support the
1129 * spoofcheck and "RSS query enable" query. Preset to 1131 * spoofcheck and "RSS query enable" query. Preset to
1130 * -1 so the user space tool can detect that the driver 1132 * -1 so the user space tool can detect that the driver
@@ -1133,7 +1135,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1133 ivi.spoofchk = -1; 1135 ivi.spoofchk = -1;
1134 ivi.rss_query_en = -1; 1136 ivi.rss_query_en = -1;
1135 ivi.trusted = -1; 1137 ivi.trusted = -1;
1136 memset(ivi.mac, 0, sizeof(ivi.mac));
1137 /* The default value for VF link state is "auto" 1138 /* The default value for VF link state is "auto"
1138 * IFLA_VF_LINK_STATE_AUTO which equals zero 1139 * IFLA_VF_LINK_STATE_AUTO which equals zero
1139 */ 1140 */
@@ -1247,37 +1248,34 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1247 return 0; 1248 return 0;
1248} 1249}
1249 1250
1251static u8 rtnl_xdp_attached_mode(struct net_device *dev)
1252{
1253 const struct net_device_ops *ops = dev->netdev_ops;
1254
1255 ASSERT_RTNL();
1256
1257 if (rcu_access_pointer(dev->xdp_prog))
1258 return XDP_ATTACHED_SKB;
1259 if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp))
1260 return XDP_ATTACHED_DRV;
1261
1262 return XDP_ATTACHED_NONE;
1263}
1264
1250static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1265static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1251{ 1266{
1252 struct nlattr *xdp; 1267 struct nlattr *xdp;
1253 u32 xdp_flags = 0;
1254 u8 val = 0;
1255 int err; 1268 int err;
1256 1269
1257 xdp = nla_nest_start(skb, IFLA_XDP); 1270 xdp = nla_nest_start(skb, IFLA_XDP);
1258 if (!xdp) 1271 if (!xdp)
1259 return -EMSGSIZE; 1272 return -EMSGSIZE;
1260 if (rcu_access_pointer(dev->xdp_prog)) { 1273
1261 xdp_flags = XDP_FLAGS_SKB_MODE; 1274 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1262 val = 1; 1275 rtnl_xdp_attached_mode(dev));
1263 } else if (dev->netdev_ops->ndo_xdp) {
1264 struct netdev_xdp xdp_op = {};
1265
1266 xdp_op.command = XDP_QUERY_PROG;
1267 err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
1268 if (err)
1269 goto err_cancel;
1270 val = xdp_op.prog_attached;
1271 }
1272 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, val);
1273 if (err) 1276 if (err)
1274 goto err_cancel; 1277 goto err_cancel;
1275 1278
1276 if (xdp_flags) {
1277 err = nla_put_u32(skb, IFLA_XDP_FLAGS, xdp_flags);
1278 if (err)
1279 goto err_cancel;
1280 }
1281 nla_nest_end(skb, xdp); 1279 nla_nest_end(skb, xdp);
1282 return 0; 1280 return 0;
1283 1281
@@ -1471,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1471 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1469 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1472 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1470 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1473 [IFLA_XDP] = { .type = NLA_NESTED }, 1471 [IFLA_XDP] = { .type = NLA_NESTED },
1472 [IFLA_GROUP] = { .type = NLA_U32 },
1474}; 1473};
1475 1474
1476static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1475static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1631,13 +1630,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1631 cb->nlh->nlmsg_seq, 0, 1630 cb->nlh->nlmsg_seq, 0,
1632 flags, 1631 flags,
1633 ext_filter_mask); 1632 ext_filter_mask);
1634 /* If we ran out of room on the first message,
1635 * we're in trouble
1636 */
1637 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
1638 1633
1639 if (err < 0) 1634 if (err < 0) {
1640 goto out; 1635 if (likely(skb->len))
1636 goto out;
1637
1638 goto out_err;
1639 }
1641 1640
1642 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1641 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1643cont: 1642cont:
@@ -1645,10 +1644,12 @@ cont:
1645 } 1644 }
1646 } 1645 }
1647out: 1646out:
1647 err = skb->len;
1648out_err:
1648 cb->args[1] = idx; 1649 cb->args[1] = idx;
1649 cb->args[0] = h; 1650 cb->args[0] = h;
1650 1651
1651 return skb->len; 1652 return err;
1652} 1653}
1653 1654
1654int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 1655int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
@@ -2199,6 +2200,11 @@ static int do_setlink(const struct sk_buff *skb,
2199 err = -EINVAL; 2200 err = -EINVAL;
2200 goto errout; 2201 goto errout;
2201 } 2202 }
2203 if ((xdp_flags & XDP_FLAGS_SKB_MODE) &&
2204 (xdp_flags & XDP_FLAGS_DRV_MODE)) {
2205 err = -EINVAL;
2206 goto errout;
2207 }
2202 } 2208 }
2203 2209
2204 if (xdp[IFLA_XDP_FD]) { 2210 if (xdp[IFLA_XDP_FD]) {
@@ -3228,8 +3234,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3228 int err = 0; 3234 int err = 0;
3229 int fidx = 0; 3235 int fidx = 0;
3230 3236
3231 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, 3237 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3232 IFLA_MAX, ifla_policy, NULL) == 0) { 3238 IFLA_MAX, ifla_policy, NULL);
3239 if (err < 0) {
3240 return -EINVAL;
3241 } else if (err == 0) {
3233 if (tb[IFLA_MASTER]) 3242 if (tb[IFLA_MASTER])
3234 br_idx = nla_get_u32(tb[IFLA_MASTER]); 3243 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3235 } 3244 }
@@ -3452,8 +3461,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3452 err = br_dev->netdev_ops->ndo_bridge_getlink( 3461 err = br_dev->netdev_ops->ndo_bridge_getlink(
3453 skb, portid, seq, dev, 3462 skb, portid, seq, dev,
3454 filter_mask, NLM_F_MULTI); 3463 filter_mask, NLM_F_MULTI);
3455 if (err < 0 && err != -EOPNOTSUPP) 3464 if (err < 0 && err != -EOPNOTSUPP) {
3456 break; 3465 if (likely(skb->len))
3466 break;
3467
3468 goto out_err;
3469 }
3457 } 3470 }
3458 idx++; 3471 idx++;
3459 } 3472 }
@@ -3464,16 +3477,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3464 seq, dev, 3477 seq, dev,
3465 filter_mask, 3478 filter_mask,
3466 NLM_F_MULTI); 3479 NLM_F_MULTI);
3467 if (err < 0 && err != -EOPNOTSUPP) 3480 if (err < 0 && err != -EOPNOTSUPP) {
3468 break; 3481 if (likely(skb->len))
3482 break;
3483
3484 goto out_err;
3485 }
3469 } 3486 }
3470 idx++; 3487 idx++;
3471 } 3488 }
3472 } 3489 }
3490 err = skb->len;
3491out_err:
3473 rcu_read_unlock(); 3492 rcu_read_unlock();
3474 cb->args[0] = idx; 3493 cb->args[0] = idx;
3475 3494
3476 return skb->len; 3495 return err;
3477} 3496}
3478 3497
3479static inline size_t bridge_nlmsg_size(void) 3498static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 346d3e85dfbc..b1be7c01efe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3754 3754
3755 spin_lock_irqsave(&q->lock, flags); 3755 spin_lock_irqsave(&q->lock, flags);
3756 skb = __skb_dequeue(q); 3756 skb = __skb_dequeue(q);
3757 if (skb && (skb_next = skb_peek(q))) 3757 if (skb && (skb_next = skb_peek(q))) {
3758 icmp_next = is_icmp_err_skb(skb_next); 3758 icmp_next = is_icmp_err_skb(skb_next);
3759 if (icmp_next)
3760 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
3761 }
3759 spin_unlock_irqrestore(&q->lock, flags); 3762 spin_unlock_irqrestore(&q->lock, flags);
3760 3763
3761 if (is_icmp_err_skb(skb) && !icmp_next) 3764 if (is_icmp_err_skb(skb) && !icmp_next)
diff --git a/net/core/sock.c b/net/core/sock.c
index 79c6aee6af9b..727f924b7f91 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -139,10 +139,7 @@
139 139
140#include <trace/events/sock.h> 140#include <trace/events/sock.h>
141 141
142#ifdef CONFIG_INET
143#include <net/tcp.h> 142#include <net/tcp.h>
144#endif
145
146#include <net/busy_poll.h> 143#include <net/busy_poll.h>
147 144
148static DEFINE_MUTEX(proto_list_mutex); 145static DEFINE_MUTEX(proto_list_mutex);
@@ -1803,28 +1800,24 @@ EXPORT_SYMBOL(skb_set_owner_w);
1803 * delay queue. We want to allow the owner socket to send more 1800 * delay queue. We want to allow the owner socket to send more
1804 * packets, as if they were already TX completed by a typical driver. 1801 * packets, as if they were already TX completed by a typical driver.
1805 * But we also want to keep skb->sk set because some packet schedulers 1802 * But we also want to keep skb->sk set because some packet schedulers
1806 * rely on it (sch_fq for example). So we set skb->truesize to a small 1803 * rely on it (sch_fq for example).
1807 * amount (1) and decrease sk_wmem_alloc accordingly.
1808 */ 1804 */
1809void skb_orphan_partial(struct sk_buff *skb) 1805void skb_orphan_partial(struct sk_buff *skb)
1810{ 1806{
1811 /* If this skb is a TCP pure ACK or already went here, 1807 if (skb_is_tcp_pure_ack(skb))
1812 * we have nothing to do. 2 is already a very small truesize.
1813 */
1814 if (skb->truesize <= 2)
1815 return; 1808 return;
1816 1809
1817 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1818 * so we do not completely orphan skb, but transfert all
1819 * accounted bytes but one, to avoid unexpected reorders.
1820 */
1821 if (skb->destructor == sock_wfree 1810 if (skb->destructor == sock_wfree
1822#ifdef CONFIG_INET 1811#ifdef CONFIG_INET
1823 || skb->destructor == tcp_wfree 1812 || skb->destructor == tcp_wfree
1824#endif 1813#endif
1825 ) { 1814 ) {
1826 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1815 struct sock *sk = skb->sk;
1827 skb->truesize = 1; 1816
1817 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1818 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1819 skb->destructor = sock_efree;
1820 }
1828 } else { 1821 } else {
1829 skb_orphan(skb); 1822 skb_orphan(skb);
1830 } 1823 }
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index ea23254b2457..b7cd9aafe99e 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -479,8 +479,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
479{ 479{
480 struct ctl_table *tbl; 480 struct ctl_table *tbl;
481 481
482 net->core.sysctl_somaxconn = SOMAXCONN;
483
484 tbl = netns_core_table; 482 tbl = netns_core_table;
485 if (!net_eq(net, &init_net)) { 483 if (!net_eq(net, &init_net)) {
486 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); 484 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 840f14aaa016..992621172220 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -426,6 +426,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
426 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 426 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
427 newnp->pktoptions = NULL; 427 newnp->pktoptions = NULL;
428 newnp->opt = NULL; 428 newnp->opt = NULL;
429 newnp->ipv6_mc_list = NULL;
430 newnp->ipv6_ac_list = NULL;
431 newnp->ipv6_fl_list = NULL;
429 newnp->mcast_oif = inet6_iif(skb); 432 newnp->mcast_oif = inet6_iif(skb);
430 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 433 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
431 434
@@ -490,6 +493,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
490 /* Clone RX bits */ 493 /* Clone RX bits */
491 newnp->rxopt.all = np->rxopt.all; 494 newnp->rxopt.all = np->rxopt.all;
492 495
496 newnp->ipv6_mc_list = NULL;
497 newnp->ipv6_ac_list = NULL;
498 newnp->ipv6_fl_list = NULL;
493 newnp->pktoptions = NULL; 499 newnp->pktoptions = NULL;
494 newnp->opt = NULL; 500 newnp->opt = NULL;
495 newnp->mcast_oif = inet6_iif(skb); 501 newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 4b9518a0d248..6f95612b4d32 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
189} 189}
190 190
191static inline void dnrt_drop(struct dn_route *rt)
192{
193 dst_release(&rt->dst);
194 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
195}
196
197static void dn_dst_check_expire(unsigned long dummy) 191static void dn_dst_check_expire(unsigned long dummy)
198{ 192{
199 int i; 193 int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
248 } 242 }
249 *rtp = rt->dst.dn_next; 243 *rtp = rt->dst.dn_next;
250 rt->dst.dn_next = NULL; 244 rt->dst.dn_next = NULL;
251 dnrt_drop(rt); 245 dnrt_free(rt);
252 break; 246 break;
253 } 247 }
254 spin_unlock_bh(&dn_rt_hash_table[i].lock); 248 spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
350 dst_use(&rth->dst, now); 344 dst_use(&rth->dst, now);
351 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 345 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
352 346
353 dnrt_drop(rt); 347 dst_free(&rt->dst);
354 *rp = rth; 348 *rp = rth;
355 return 0; 349 return 0;
356 } 350 }
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
380 for(; rt; rt = next) { 374 for(; rt; rt = next) {
381 next = rcu_dereference_raw(rt->dst.dn_next); 375 next = rcu_dereference_raw(rt->dst.dn_next);
382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); 376 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
383 dst_free((struct dst_entry *)rt); 377 dnrt_free(rt);
384 } 378 }
385 379
386nothing_to_declare: 380nothing_to_declare:
@@ -1187,7 +1181,7 @@ make_route:
1187 if (dev_out->flags & IFF_LOOPBACK) 1181 if (dev_out->flags & IFF_LOOPBACK)
1188 flags |= RTCF_LOCAL; 1182 flags |= RTCF_LOCAL;
1189 1183
1190 rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); 1184 rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
1191 if (rt == NULL) 1185 if (rt == NULL)
1192 goto e_nobufs; 1186 goto e_nobufs;
1193 1187
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1ed81ac6dd1a..aa8ffecc46a4 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
102{ 102{
103 struct nlmsghdr *nlh = nlmsg_hdr(skb); 103 struct nlmsghdr *nlh = nlmsg_hdr(skb);
104 104
105 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 105 if (skb->len < sizeof(*nlh) ||
106 nlh->nlmsg_len < sizeof(*nlh) ||
107 skb->len < nlh->nlmsg_len)
106 return; 108 return;
107 109
108 if (!netlink_capable(skb, CAP_NET_ADMIN)) 110 if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 26130ae438da..90038d45a547 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
223 return 0; 223 return 0;
224} 224}
225 225
226#ifdef CONFIG_PM_SLEEP
227int dsa_switch_suspend(struct dsa_switch *ds)
228{
229 int i, ret = 0;
230
231 /* Suspend slave network devices */
232 for (i = 0; i < ds->num_ports; i++) {
233 if (!dsa_is_port_initialized(ds, i))
234 continue;
235
236 ret = dsa_slave_suspend(ds->ports[i].netdev);
237 if (ret)
238 return ret;
239 }
240
241 if (ds->ops->suspend)
242 ret = ds->ops->suspend(ds);
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(dsa_switch_suspend);
247
248int dsa_switch_resume(struct dsa_switch *ds)
249{
250 int i, ret = 0;
251
252 if (ds->ops->resume)
253 ret = ds->ops->resume(ds);
254
255 if (ret)
256 return ret;
257
258 /* Resume slave network devices */
259 for (i = 0; i < ds->num_ports; i++) {
260 if (!dsa_is_port_initialized(ds, i))
261 continue;
262
263 ret = dsa_slave_resume(ds->ports[i].netdev);
264 if (ret)
265 return ret;
266 }
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(dsa_switch_resume);
271#endif
272
226static struct packet_type dsa_pack_type __read_mostly = { 273static struct packet_type dsa_pack_type __read_mostly = {
227 .type = cpu_to_be16(ETH_P_XDSA), 274 .type = cpu_to_be16(ETH_P_XDSA),
228 .func = dsa_switch_rcv, 275 .func = dsa_switch_rcv,
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 033b3bfb63dc..7796580e99ee 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
484 dsa_ds_unapply(dst, ds); 484 dsa_ds_unapply(dst, ds);
485 } 485 }
486 486
487 if (dst->cpu_switch) 487 if (dst->cpu_switch) {
488 dsa_cpu_port_ethtool_restore(dst->cpu_switch); 488 dsa_cpu_port_ethtool_restore(dst->cpu_switch);
489 dst->cpu_switch = NULL;
490 }
489 491
490 pr_info("DSA: tree %d unapplied\n", dst->tree); 492 pr_info("DSA: tree %d unapplied\n", dst->tree);
491 dst->applied = false; 493 dst->applied = false;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index ad345c8b0b06..7281098df04e 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
289 dsa_switch_unregister_notifier(ds); 289 dsa_switch_unregister_notifier(ds);
290} 290}
291 291
292#ifdef CONFIG_PM_SLEEP
293int dsa_switch_suspend(struct dsa_switch *ds)
294{
295 int i, ret = 0;
296
297 /* Suspend slave network devices */
298 for (i = 0; i < ds->num_ports; i++) {
299 if (!dsa_is_port_initialized(ds, i))
300 continue;
301
302 ret = dsa_slave_suspend(ds->ports[i].netdev);
303 if (ret)
304 return ret;
305 }
306
307 if (ds->ops->suspend)
308 ret = ds->ops->suspend(ds);
309
310 return ret;
311}
312EXPORT_SYMBOL_GPL(dsa_switch_suspend);
313
314int dsa_switch_resume(struct dsa_switch *ds)
315{
316 int i, ret = 0;
317
318 if (ds->ops->resume)
319 ret = ds->ops->resume(ds);
320
321 if (ret)
322 return ret;
323
324 /* Resume slave network devices */
325 for (i = 0; i < ds->num_ports; i++) {
326 if (!dsa_is_port_initialized(ds, i))
327 continue;
328
329 ret = dsa_slave_resume(ds->ports[i].netdev);
330 if (ret)
331 return ret;
332 }
333
334 return 0;
335}
336EXPORT_SYMBOL_GPL(dsa_switch_resume);
337#endif
338
339/* platform driver init and cleanup *****************************************/ 292/* platform driver init and cleanup *****************************************/
340static int dev_is_class(struct device *dev, void *class) 293static int dev_is_class(struct device *dev, void *class)
341{ 294{
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c73160fb11e7..0a0a392dc2bd 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
378 del_timer_sync(&hsr->announce_timer); 378 del_timer_sync(&hsr->announce_timer);
379 379
380 synchronize_rcu(); 380 synchronize_rcu();
381 free_netdev(hsr_dev);
382} 381}
383 382
384static const struct net_device_ops hsr_device_ops = { 383static const struct net_device_ops hsr_device_ops = {
@@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev)
404 SET_NETDEV_DEVTYPE(dev, &hsr_type); 403 SET_NETDEV_DEVTYPE(dev, &hsr_type);
405 dev->priv_flags |= IFF_NO_QUEUE; 404 dev->priv_flags |= IFF_NO_QUEUE;
406 405
407 dev->destructor = hsr_dev_destroy; 406 dev->needs_free_netdev = true;
407 dev->priv_destructor = hsr_dev_destroy;
408 408
409 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 409 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
410 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | 410 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 4ebe2aa3e7d3..04b5450c5a55 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
324 unsigned long irqflags; 324 unsigned long irqflags;
325 325
326 frame->is_supervision = is_supervision_frame(port->hsr, skb); 326 frame->is_supervision = is_supervision_frame(port->hsr, skb);
327 frame->node_src = hsr_get_node(&port->hsr->node_db, skb, 327 frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
328 frame->is_supervision);
329 if (frame->node_src == NULL) 328 if (frame->node_src == NULL)
330 return -1; /* Unknown node and !is_supervision, or no mem */ 329 return -1; /* Unknown node and !is_supervision, or no mem */
331 330
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 7ea925816f79..284a9b820df8 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
158 158
159/* Get the hsr_node from which 'skb' was sent. 159/* Get the hsr_node from which 'skb' was sent.
160 */ 160 */
161struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, 161struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
162 bool is_sup) 162 bool is_sup)
163{ 163{
164 struct list_head *node_db = &port->hsr->node_db;
164 struct hsr_node *node; 165 struct hsr_node *node;
165 struct ethhdr *ethhdr; 166 struct ethhdr *ethhdr;
166 u16 seq_out; 167 u16 seq_out;
@@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
186 */ 187 */
187 seq_out = hsr_get_skb_sequence_nr(skb) - 1; 188 seq_out = hsr_get_skb_sequence_nr(skb) - 1;
188 } else { 189 } else {
189 WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); 190 /* this is called also for frames from master port and
191 * so warn only for non master ports
192 */
193 if (port->type != HSR_PT_MASTER)
194 WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
190 seq_out = HSR_SEQNR_START; 195 seq_out = HSR_SEQNR_START;
191 } 196 }
192 197
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 438b40f98f5a..4e04f0e868e9 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -18,7 +18,7 @@ struct hsr_node;
18 18
19struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], 19struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
20 u16 seq_out); 20 u16 seq_out);
21struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, 21struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
22 bool is_sup); 22 bool is_sup);
23void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, 23void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
24 struct hsr_port *port); 24 struct hsr_port *port);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index d7efbf0dad20..0a866f332290 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev)
107 107
108 ldev->netdev_ops = &lowpan_netdev_ops; 108 ldev->netdev_ops = &lowpan_netdev_ops;
109 ldev->header_ops = &lowpan_header_ops; 109 ldev->header_ops = &lowpan_header_ops;
110 ldev->destructor = free_netdev; 110 ldev->needs_free_netdev = true;
111 ldev->features |= NETIF_F_NETNS_LOCAL; 111 ldev->features |= NETIF_F_NETNS_LOCAL;
112} 112}
113 113
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f3dad1661343..58925b6597de 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
1043 .type = SOCK_DGRAM, 1043 .type = SOCK_DGRAM,
1044 .protocol = IPPROTO_ICMP, 1044 .protocol = IPPROTO_ICMP,
1045 .prot = &ping_prot, 1045 .prot = &ping_prot,
1046 .ops = &inet_dgram_ops, 1046 .ops = &inet_sockraw_ops,
1047 .flags = INET_PROTOSW_REUSE, 1047 .flags = INET_PROTOSW_REUSE,
1048 }, 1048 },
1049 1049
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 0937b34c27ca..e9f3386a528b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -641,6 +641,32 @@ void arp_xmit(struct sk_buff *skb)
641} 641}
642EXPORT_SYMBOL(arp_xmit); 642EXPORT_SYMBOL(arp_xmit);
643 643
644static bool arp_is_garp(struct net *net, struct net_device *dev,
645 int *addr_type, __be16 ar_op,
646 __be32 sip, __be32 tip,
647 unsigned char *sha, unsigned char *tha)
648{
649 bool is_garp = tip == sip;
650
651 /* Gratuitous ARP _replies_ also require target hwaddr to be
652 * the same as source.
653 */
654 if (is_garp && ar_op == htons(ARPOP_REPLY))
655 is_garp =
656 /* IPv4 over IEEE 1394 doesn't provide target
657 * hardware address field in its ARP payload.
658 */
659 tha &&
660 !memcmp(tha, sha, dev->addr_len);
661
662 if (is_garp) {
663 *addr_type = inet_addr_type_dev_table(net, dev, sip);
664 if (*addr_type != RTN_UNICAST)
665 is_garp = false;
666 }
667 return is_garp;
668}
669
644/* 670/*
645 * Process an arp request. 671 * Process an arp request.
646 */ 672 */
@@ -653,6 +679,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
653 unsigned char *arp_ptr; 679 unsigned char *arp_ptr;
654 struct rtable *rt; 680 struct rtable *rt;
655 unsigned char *sha; 681 unsigned char *sha;
682 unsigned char *tha = NULL;
656 __be32 sip, tip; 683 __be32 sip, tip;
657 u16 dev_type = dev->type; 684 u16 dev_type = dev->type;
658 int addr_type; 685 int addr_type;
@@ -724,6 +751,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
724 break; 751 break;
725#endif 752#endif
726 default: 753 default:
754 tha = arp_ptr;
727 arp_ptr += dev->addr_len; 755 arp_ptr += dev->addr_len;
728 } 756 }
729 memcpy(&tip, arp_ptr, 4); 757 memcpy(&tip, arp_ptr, 4);
@@ -835,19 +863,25 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
835 863
836 n = __neigh_lookup(&arp_tbl, &sip, dev, 0); 864 n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
837 865
838 if (IN_DEV_ARP_ACCEPT(in_dev)) { 866 addr_type = -1;
839 unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip); 867 if (n || IN_DEV_ARP_ACCEPT(in_dev)) {
868 is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op,
869 sip, tip, sha, tha);
870 }
840 871
872 if (IN_DEV_ARP_ACCEPT(in_dev)) {
841 /* Unsolicited ARP is not accepted by default. 873 /* Unsolicited ARP is not accepted by default.
842 It is possible, that this option should be enabled for some 874 It is possible, that this option should be enabled for some
843 devices (strip is candidate) 875 devices (strip is candidate)
844 */ 876 */
845 is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
846 addr_type == RTN_UNICAST;
847
848 if (!n && 877 if (!n &&
849 ((arp->ar_op == htons(ARPOP_REPLY) && 878 (is_garp ||
850 addr_type == RTN_UNICAST) || is_garp)) 879 (arp->ar_op == htons(ARPOP_REPLY) &&
880 (addr_type == RTN_UNICAST ||
881 (addr_type < 0 &&
882 /* postpone calculation to as late as possible */
883 inet_addr_type_dev_table(net, dev, sip) ==
884 RTN_UNICAST)))))
851 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 885 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
852 } 886 }
853 887
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 65cc02bd82bc..93322f895eab 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -248,6 +248,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
248 u8 *tail; 248 u8 *tail;
249 u8 *vaddr; 249 u8 *vaddr;
250 int nfrags; 250 int nfrags;
251 int esph_offset;
251 struct page *page; 252 struct page *page;
252 struct sk_buff *trailer; 253 struct sk_buff *trailer;
253 int tailen = esp->tailen; 254 int tailen = esp->tailen;
@@ -313,11 +314,13 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
313 } 314 }
314 315
315cow: 316cow:
317 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
318
316 nfrags = skb_cow_data(skb, tailen, &trailer); 319 nfrags = skb_cow_data(skb, tailen, &trailer);
317 if (nfrags < 0) 320 if (nfrags < 0)
318 goto out; 321 goto out;
319 tail = skb_tail_pointer(trailer); 322 tail = skb_tail_pointer(trailer);
320 esp->esph = ip_esp_hdr(skb); 323 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
321 324
322skip_cow: 325skip_cow:
323 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 326 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 39bd1edee676..83e3ed258467 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -763,7 +763,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
763 unsigned int e = 0, s_e; 763 unsigned int e = 0, s_e;
764 struct fib_table *tb; 764 struct fib_table *tb;
765 struct hlist_head *head; 765 struct hlist_head *head;
766 int dumped = 0; 766 int dumped = 0, err;
767 767
768 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && 768 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
769 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) 769 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -783,20 +783,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
783 if (dumped) 783 if (dumped)
784 memset(&cb->args[2], 0, sizeof(cb->args) - 784 memset(&cb->args[2], 0, sizeof(cb->args) -
785 2 * sizeof(cb->args[0])); 785 2 * sizeof(cb->args[0]));
786 if (fib_table_dump(tb, skb, cb) < 0) 786 err = fib_table_dump(tb, skb, cb);
787 goto out; 787 if (err < 0) {
788 if (likely(skb->len))
789 goto out;
790
791 goto out_err;
792 }
788 dumped = 1; 793 dumped = 1;
789next: 794next:
790 e++; 795 e++;
791 } 796 }
792 } 797 }
793out: 798out:
799 err = skb->len;
800out_err:
794 rcu_read_unlock(); 801 rcu_read_unlock();
795 802
796 cb->args[1] = e; 803 cb->args[1] = e;
797 cb->args[0] = h; 804 cb->args[0] = h;
798 805
799 return skb->len; 806 return err;
800} 807}
801 808
802/* Prepare and feed intra-kernel routing request. 809/* Prepare and feed intra-kernel routing request.
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index da449ddb8cc1..ad9ad4aab5da 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -203,6 +203,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
203static void free_fib_info_rcu(struct rcu_head *head) 203static void free_fib_info_rcu(struct rcu_head *head)
204{ 204{
205 struct fib_info *fi = container_of(head, struct fib_info, rcu); 205 struct fib_info *fi = container_of(head, struct fib_info, rcu);
206 struct dst_metrics *m;
206 207
207 change_nexthops(fi) { 208 change_nexthops(fi) {
208 if (nexthop_nh->nh_dev) 209 if (nexthop_nh->nh_dev)
@@ -213,8 +214,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
213 rt_fibinfo_free(&nexthop_nh->nh_rth_input); 214 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
214 } endfor_nexthops(fi); 215 } endfor_nexthops(fi);
215 216
216 if (fi->fib_metrics != (u32 *) dst_default_metrics) 217 m = fi->fib_metrics;
217 kfree(fi->fib_metrics); 218 if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
219 kfree(m);
218 kfree(fi); 220 kfree(fi);
219} 221}
220 222
@@ -971,11 +973,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
971 val = 255; 973 val = 255;
972 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) 974 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
973 return -EINVAL; 975 return -EINVAL;
974 fi->fib_metrics[type - 1] = val; 976 fi->fib_metrics->metrics[type - 1] = val;
975 } 977 }
976 978
977 if (ecn_ca) 979 if (ecn_ca)
978 fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; 980 fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
979 981
980 return 0; 982 return 0;
981} 983}
@@ -1033,11 +1035,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
1033 goto failure; 1035 goto failure;
1034 fib_info_cnt++; 1036 fib_info_cnt++;
1035 if (cfg->fc_mx) { 1037 if (cfg->fc_mx) {
1036 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 1038 fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
1037 if (!fi->fib_metrics) 1039 if (!fi->fib_metrics)
1038 goto failure; 1040 goto failure;
1041 atomic_set(&fi->fib_metrics->refcnt, 1);
1039 } else 1042 } else
1040 fi->fib_metrics = (u32 *) dst_default_metrics; 1043 fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
1041 1044
1042 fi->fib_net = net; 1045 fi->fib_net = net;
1043 fi->fib_protocol = cfg->fc_protocol; 1046 fi->fib_protocol = cfg->fc_protocol;
@@ -1238,7 +1241,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1238 if (fi->fib_priority && 1241 if (fi->fib_priority &&
1239 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) 1242 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1240 goto nla_put_failure; 1243 goto nla_put_failure;
1241 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 1244 if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
1242 goto nla_put_failure; 1245 goto nla_put_failure;
1243 1246
1244 if (fi->fib_prefsrc && 1247 if (fi->fib_prefsrc &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1201409ba1dc..51182ff2b441 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1983,6 +1983,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
1983 1983
1984 /* rcu_read_lock is hold by caller */ 1984 /* rcu_read_lock is hold by caller */
1985 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { 1985 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
1986 int err;
1987
1986 if (i < s_i) { 1988 if (i < s_i) {
1987 i++; 1989 i++;
1988 continue; 1990 continue;
@@ -1993,17 +1995,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
1993 continue; 1995 continue;
1994 } 1996 }
1995 1997
1996 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, 1998 err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1997 cb->nlh->nlmsg_seq, 1999 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1998 RTM_NEWROUTE, 2000 tb->tb_id, fa->fa_type,
1999 tb->tb_id, 2001 xkey, KEYLENGTH - fa->fa_slen,
2000 fa->fa_type, 2002 fa->fa_tos, fa->fa_info, NLM_F_MULTI);
2001 xkey, 2003 if (err < 0) {
2002 KEYLENGTH - fa->fa_slen,
2003 fa->fa_tos,
2004 fa->fa_info, NLM_F_MULTI) < 0) {
2005 cb->args[4] = i; 2004 cb->args[4] = i;
2006 return -1; 2005 return err;
2007 } 2006 }
2008 i++; 2007 i++;
2009 } 2008 }
@@ -2025,10 +2024,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
2025 t_key key = cb->args[3]; 2024 t_key key = cb->args[3];
2026 2025
2027 while ((l = leaf_walk_rcu(&tp, key)) != NULL) { 2026 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
2028 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { 2027 int err;
2028
2029 err = fn_trie_dump_leaf(l, tb, skb, cb);
2030 if (err < 0) {
2029 cb->args[3] = key; 2031 cb->args[3] = key;
2030 cb->args[2] = count; 2032 cb->args[2] = count;
2031 return -1; 2033 return err;
2032 } 2034 }
2033 2035
2034 ++count; 2036 ++count;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 43318b5f5647..9144fa7df2ad 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -657,8 +657,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
657 /* Needed by both icmp_global_allow and icmp_xmit_lock */ 657 /* Needed by both icmp_global_allow and icmp_xmit_lock */
658 local_bh_disable(); 658 local_bh_disable();
659 659
660 /* Check global sysctl_icmp_msgs_per_sec ratelimit */ 660 /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
661 if (!icmpv4_global_allow(net, type, code)) 661 * incoming dev is loopback. If outgoing dev change to not be
662 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
663 */
664 if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
665 !icmpv4_global_allow(net, type, code))
662 goto out_bh_enable; 666 goto out_bh_enable;
663 667
664 sk = icmp_xmit_lock(net); 668 sk = icmp_xmit_lock(net);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 44fd86de2823..ec9a396fa466 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1113 if (!pmc) 1113 if (!pmc)
1114 return; 1114 return;
1115 spin_lock_init(&pmc->lock);
1115 spin_lock_bh(&im->lock); 1116 spin_lock_bh(&im->lock);
1116 pmc->interface = im->interface; 1117 pmc->interface = im->interface;
1117 in_dev_hold(in_dev); 1118 in_dev_hold(in_dev);
@@ -2071,21 +2072,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2071 2072
2072static void ip_mc_clear_src(struct ip_mc_list *pmc) 2073static void ip_mc_clear_src(struct ip_mc_list *pmc)
2073{ 2074{
2074 struct ip_sf_list *psf, *nextpsf; 2075 struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
2075 2076
2076 for (psf = pmc->tomb; psf; psf = nextpsf) { 2077 spin_lock_bh(&pmc->lock);
2078 tomb = pmc->tomb;
2079 pmc->tomb = NULL;
2080 sources = pmc->sources;
2081 pmc->sources = NULL;
2082 pmc->sfmode = MCAST_EXCLUDE;
2083 pmc->sfcount[MCAST_INCLUDE] = 0;
2084 pmc->sfcount[MCAST_EXCLUDE] = 1;
2085 spin_unlock_bh(&pmc->lock);
2086
2087 for (psf = tomb; psf; psf = nextpsf) {
2077 nextpsf = psf->sf_next; 2088 nextpsf = psf->sf_next;
2078 kfree(psf); 2089 kfree(psf);
2079 } 2090 }
2080 pmc->tomb = NULL; 2091 for (psf = sources; psf; psf = nextpsf) {
2081 for (psf = pmc->sources; psf; psf = nextpsf) {
2082 nextpsf = psf->sf_next; 2092 nextpsf = psf->sf_next;
2083 kfree(psf); 2093 kfree(psf);
2084 } 2094 }
2085 pmc->sources = NULL;
2086 pmc->sfmode = MCAST_EXCLUDE;
2087 pmc->sfcount[MCAST_INCLUDE] = 0;
2088 pmc->sfcount[MCAST_EXCLUDE] = 1;
2089} 2095}
2090 2096
2091/* Join a multicast group 2097/* Join a multicast group
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7a3fd25e8913..532b36e9ce2a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -964,7 +964,8 @@ static int __ip_append_data(struct sock *sk,
964 csummode = CHECKSUM_PARTIAL; 964 csummode = CHECKSUM_PARTIAL;
965 965
966 cork->length += length; 966 cork->length += length;
967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && 967 if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
968 (skb && skb_is_gso(skb))) &&
968 (sk->sk_protocol == IPPROTO_UDP) && 969 (sk->sk_protocol == IPPROTO_UDP) &&
969 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 970 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { 971 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b878ecbc0608..129d1a3616f8 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
446 return 0; 446 return 0;
447 447
448drop: 448drop:
449 if (tun_dst)
450 dst_release((struct dst_entry *)tun_dst);
449 kfree_skb(skb); 451 kfree_skb(skb);
450 return 0; 452 return 0;
451} 453}
@@ -967,7 +969,6 @@ static void ip_tunnel_dev_free(struct net_device *dev)
967 gro_cells_destroy(&tunnel->gro_cells); 969 gro_cells_destroy(&tunnel->gro_cells);
968 dst_cache_destroy(&tunnel->dst_cache); 970 dst_cache_destroy(&tunnel->dst_cache);
969 free_percpu(dev->tstats); 971 free_percpu(dev->tstats);
970 free_netdev(dev);
971} 972}
972 973
973void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) 974void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
@@ -1155,7 +1156,8 @@ int ip_tunnel_init(struct net_device *dev)
1155 struct iphdr *iph = &tunnel->parms.iph; 1156 struct iphdr *iph = &tunnel->parms.iph;
1156 int err; 1157 int err;
1157 1158
1158 dev->destructor = ip_tunnel_dev_free; 1159 dev->needs_free_netdev = true;
1160 dev->priv_destructor = ip_tunnel_dev_free;
1159 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1161 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1160 if (!dev->tstats) 1162 if (!dev->tstats)
1161 return -ENOMEM; 1163 return -ENOMEM;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3a02d52ed50e..8ae425cad818 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -101,8 +101,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id);
101static void ipmr_free_table(struct mr_table *mrt); 101static void ipmr_free_table(struct mr_table *mrt);
102 102
103static void ip_mr_forward(struct net *net, struct mr_table *mrt, 103static void ip_mr_forward(struct net *net, struct mr_table *mrt,
104 struct sk_buff *skb, struct mfc_cache *cache, 104 struct net_device *dev, struct sk_buff *skb,
105 int local); 105 struct mfc_cache *cache, int local);
106static int ipmr_cache_report(struct mr_table *mrt, 106static int ipmr_cache_report(struct mr_table *mrt,
107 struct sk_buff *pkt, vifi_t vifi, int assert); 107 struct sk_buff *pkt, vifi_t vifi, int assert);
108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev)
501 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 501 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
502 dev->flags = IFF_NOARP; 502 dev->flags = IFF_NOARP;
503 dev->netdev_ops = &reg_vif_netdev_ops; 503 dev->netdev_ops = &reg_vif_netdev_ops;
504 dev->destructor = free_netdev; 504 dev->needs_free_netdev = true;
505 dev->features |= NETIF_F_NETNS_LOCAL; 505 dev->features |= NETIF_F_NETNS_LOCAL;
506} 506}
507 507
@@ -988,7 +988,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
988 988
989 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 989 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
990 } else { 990 } else {
991 ip_mr_forward(net, mrt, skb, c, 0); 991 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
992 } 992 }
993 } 993 }
994} 994}
@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
1073 1073
1074/* Queue a packet for resolution. It gets locked cache entry! */ 1074/* Queue a packet for resolution. It gets locked cache entry! */
1075static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, 1075static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1076 struct sk_buff *skb) 1076 struct sk_buff *skb, struct net_device *dev)
1077{ 1077{
1078 const struct iphdr *iph = ip_hdr(skb); 1078 const struct iphdr *iph = ip_hdr(skb);
1079 struct mfc_cache *c; 1079 struct mfc_cache *c;
@@ -1130,6 +1130,10 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1130 kfree_skb(skb); 1130 kfree_skb(skb);
1131 err = -ENOBUFS; 1131 err = -ENOBUFS;
1132 } else { 1132 } else {
1133 if (dev) {
1134 skb->dev = dev;
1135 skb->skb_iif = dev->ifindex;
1136 }
1133 skb_queue_tail(&c->mfc_un.unres.unresolved, skb); 1137 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1134 err = 0; 1138 err = 0;
1135 } 1139 }
@@ -1828,10 +1832,10 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1828 1832
1829/* "local" means that we should preserve one skb (for local delivery) */ 1833/* "local" means that we should preserve one skb (for local delivery) */
1830static void ip_mr_forward(struct net *net, struct mr_table *mrt, 1834static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1831 struct sk_buff *skb, struct mfc_cache *cache, 1835 struct net_device *dev, struct sk_buff *skb,
1832 int local) 1836 struct mfc_cache *cache, int local)
1833{ 1837{
1834 int true_vifi = ipmr_find_vif(mrt, skb->dev); 1838 int true_vifi = ipmr_find_vif(mrt, dev);
1835 int psend = -1; 1839 int psend = -1;
1836 int vif, ct; 1840 int vif, ct;
1837 1841
@@ -1853,13 +1857,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1853 } 1857 }
1854 1858
1855 /* Wrong interface: drop packet and (maybe) send PIM assert. */ 1859 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1856 if (mrt->vif_table[vif].dev != skb->dev) { 1860 if (mrt->vif_table[vif].dev != dev) {
1857 struct net_device *mdev;
1858
1859 mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev);
1860 if (mdev == skb->dev)
1861 goto forward;
1862
1863 if (rt_is_output_route(skb_rtable(skb))) { 1861 if (rt_is_output_route(skb_rtable(skb))) {
1864 /* It is our own packet, looped back. 1862 /* It is our own packet, looped back.
1865 * Very complicated situation... 1863 * Very complicated situation...
@@ -1980,6 +1978,20 @@ int ip_mr_input(struct sk_buff *skb)
1980 struct net *net = dev_net(skb->dev); 1978 struct net *net = dev_net(skb->dev);
1981 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1979 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1982 struct mr_table *mrt; 1980 struct mr_table *mrt;
1981 struct net_device *dev;
1982
1983 /* skb->dev passed in is the loX master dev for vrfs.
1984 * As there are no vifs associated with loopback devices,
1985 * get the proper interface that does have a vif associated with it.
1986 */
1987 dev = skb->dev;
1988 if (netif_is_l3_master(skb->dev)) {
1989 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
1990 if (!dev) {
1991 kfree_skb(skb);
1992 return -ENODEV;
1993 }
1994 }
1983 1995
1984 /* Packet is looped back after forward, it should not be 1996 /* Packet is looped back after forward, it should not be
1985 * forwarded second time, but still can be delivered locally. 1997 * forwarded second time, but still can be delivered locally.
@@ -2017,7 +2029,7 @@ int ip_mr_input(struct sk_buff *skb)
2017 /* already under rcu_read_lock() */ 2029 /* already under rcu_read_lock() */
2018 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 2030 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2019 if (!cache) { 2031 if (!cache) {
2020 int vif = ipmr_find_vif(mrt, skb->dev); 2032 int vif = ipmr_find_vif(mrt, dev);
2021 2033
2022 if (vif >= 0) 2034 if (vif >= 0)
2023 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, 2035 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
@@ -2037,9 +2049,9 @@ int ip_mr_input(struct sk_buff *skb)
2037 } 2049 }
2038 2050
2039 read_lock(&mrt_lock); 2051 read_lock(&mrt_lock);
2040 vif = ipmr_find_vif(mrt, skb->dev); 2052 vif = ipmr_find_vif(mrt, dev);
2041 if (vif >= 0) { 2053 if (vif >= 0) {
2042 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 2054 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
2043 read_unlock(&mrt_lock); 2055 read_unlock(&mrt_lock);
2044 2056
2045 return err2; 2057 return err2;
@@ -2050,7 +2062,7 @@ int ip_mr_input(struct sk_buff *skb)
2050 } 2062 }
2051 2063
2052 read_lock(&mrt_lock); 2064 read_lock(&mrt_lock);
2053 ip_mr_forward(net, mrt, skb, cache, local); 2065 ip_mr_forward(net, mrt, dev, skb, cache, local);
2054 read_unlock(&mrt_lock); 2066 read_unlock(&mrt_lock);
2055 2067
2056 if (local) 2068 if (local)
@@ -2224,7 +2236,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2224 iph->saddr = saddr; 2236 iph->saddr = saddr;
2225 iph->daddr = daddr; 2237 iph->daddr = daddr;
2226 iph->version = 0; 2238 iph->version = 0;
2227 err = ipmr_cache_unresolved(mrt, vif, skb2); 2239 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2228 read_unlock(&mrt_lock); 2240 read_unlock(&mrt_lock);
2229 rcu_read_unlock(); 2241 rcu_read_unlock();
2230 return err; 2242 return err;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 655d9eebe43e..6883b3d4ba8f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1385,8 +1385,12 @@ static void rt_add_uncached_list(struct rtable *rt)
1385 1385
1386static void ipv4_dst_destroy(struct dst_entry *dst) 1386static void ipv4_dst_destroy(struct dst_entry *dst)
1387{ 1387{
1388 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1388 struct rtable *rt = (struct rtable *) dst; 1389 struct rtable *rt = (struct rtable *) dst;
1389 1390
1391 if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
1392 kfree(p);
1393
1390 if (!list_empty(&rt->rt_uncached)) { 1394 if (!list_empty(&rt->rt_uncached)) {
1391 struct uncached_list *ul = rt->rt_uncached_list; 1395 struct uncached_list *ul = rt->rt_uncached_list;
1392 1396
@@ -1438,7 +1442,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1438 rt->rt_gateway = nh->nh_gw; 1442 rt->rt_gateway = nh->nh_gw;
1439 rt->rt_uses_gateway = 1; 1443 rt->rt_uses_gateway = 1;
1440 } 1444 }
1441 dst_init_metrics(&rt->dst, fi->fib_metrics, true); 1445 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1446 if (fi->fib_metrics != &dst_default_metrics) {
1447 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1448 atomic_inc(&fi->fib_metrics->refcnt);
1449 }
1442#ifdef CONFIG_IP_ROUTE_CLASSID 1450#ifdef CONFIG_IP_ROUTE_CLASSID
1443 rt->dst.tclassid = nh->nh_tclassid; 1451 rt->dst.tclassid = nh->nh_tclassid;
1444#endif 1452#endif
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1e4c76d2b827..40aca7803cf2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1084,9 +1084,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1084{ 1084{
1085 struct tcp_sock *tp = tcp_sk(sk); 1085 struct tcp_sock *tp = tcp_sk(sk);
1086 struct inet_sock *inet = inet_sk(sk); 1086 struct inet_sock *inet = inet_sk(sk);
1087 struct sockaddr *uaddr = msg->msg_name;
1087 int err, flags; 1088 int err, flags;
1088 1089
1089 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1090 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
1091 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1092 uaddr->sa_family == AF_UNSPEC))
1090 return -EOPNOTSUPP; 1093 return -EOPNOTSUPP;
1091 if (tp->fastopen_req) 1094 if (tp->fastopen_req)
1092 return -EALREADY; /* Another Fast Open is in progress */ 1095 return -EALREADY; /* Another Fast Open is in progress */
@@ -1108,7 +1111,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1108 } 1111 }
1109 } 1112 }
1110 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1113 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1111 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1114 err = __inet_stream_connect(sk->sk_socket, uaddr,
1112 msg->msg_namelen, flags, 1); 1115 msg->msg_namelen, flags, 1);
1113 /* fastopen_req could already be freed in __inet_stream_connect 1116 /* fastopen_req could already be freed in __inet_stream_connect
1114 * if the connection times out or gets rst 1117 * if the connection times out or gets rst
@@ -2320,9 +2323,15 @@ int tcp_disconnect(struct sock *sk, int flags)
2320 tcp_set_ca_state(sk, TCP_CA_Open); 2323 tcp_set_ca_state(sk, TCP_CA_Open);
2321 tcp_clear_retrans(tp); 2324 tcp_clear_retrans(tp);
2322 inet_csk_delack_init(sk); 2325 inet_csk_delack_init(sk);
2326 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2327 * issue in __tcp_select_window()
2328 */
2329 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
2323 tcp_init_send_head(sk); 2330 tcp_init_send_head(sk);
2324 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2331 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2325 __sk_dst_reset(sk); 2332 __sk_dst_reset(sk);
2333 dst_release(sk->sk_rx_dst);
2334 sk->sk_rx_dst = NULL;
2326 tcp_saved_syn_free(tp); 2335 tcp_saved_syn_free(tp);
2327 2336
2328 /* Clean up fastopen related fields */ 2337 /* Clean up fastopen related fields */
@@ -2374,9 +2383,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
2374 return 0; 2383 return 0;
2375} 2384}
2376 2385
2377static int tcp_repair_options_est(struct tcp_sock *tp, 2386static int tcp_repair_options_est(struct sock *sk,
2378 struct tcp_repair_opt __user *optbuf, unsigned int len) 2387 struct tcp_repair_opt __user *optbuf, unsigned int len)
2379{ 2388{
2389 struct tcp_sock *tp = tcp_sk(sk);
2380 struct tcp_repair_opt opt; 2390 struct tcp_repair_opt opt;
2381 2391
2382 while (len >= sizeof(opt)) { 2392 while (len >= sizeof(opt)) {
@@ -2389,6 +2399,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
2389 switch (opt.opt_code) { 2399 switch (opt.opt_code) {
2390 case TCPOPT_MSS: 2400 case TCPOPT_MSS:
2391 tp->rx_opt.mss_clamp = opt.opt_val; 2401 tp->rx_opt.mss_clamp = opt.opt_val;
2402 tcp_mtup_init(sk);
2392 break; 2403 break;
2393 case TCPOPT_WINDOW: 2404 case TCPOPT_WINDOW:
2394 { 2405 {
@@ -2548,7 +2559,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2548 if (!tp->repair) 2559 if (!tp->repair)
2549 err = -EINVAL; 2560 err = -EINVAL;
2550 else if (sk->sk_state == TCP_ESTABLISHED) 2561 else if (sk->sk_state == TCP_ESTABLISHED)
2551 err = tcp_repair_options_est(tp, 2562 err = tcp_repair_options_est(sk,
2552 (struct tcp_repair_opt __user *)optval, 2563 (struct tcp_repair_opt __user *)optval,
2553 optlen); 2564 optlen);
2554 else 2565 else
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6e3c512054a6..324c9bcc5456 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
180{ 180{
181 const struct inet_connection_sock *icsk = inet_csk(sk); 181 const struct inet_connection_sock *icsk = inet_csk(sk);
182 182
183 tcp_sk(sk)->prior_ssthresh = 0;
183 if (icsk->icsk_ca_ops->init) 184 if (icsk->icsk_ca_ops->init)
184 icsk->icsk_ca_ops->init(sk); 185 icsk->icsk_ca_ops->init(sk);
185 if (tcp_ca_needs_ecn(sk)) 186 if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5a3ad09e2786..174d4376baa5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1179,13 +1179,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1179 */ 1179 */
1180 if (pkt_len > mss) { 1180 if (pkt_len > mss) {
1181 unsigned int new_len = (pkt_len / mss) * mss; 1181 unsigned int new_len = (pkt_len / mss) * mss;
1182 if (!in_sack && new_len < pkt_len) { 1182 if (!in_sack && new_len < pkt_len)
1183 new_len += mss; 1183 new_len += mss;
1184 if (new_len >= skb->len)
1185 return 0;
1186 }
1187 pkt_len = new_len; 1184 pkt_len = new_len;
1188 } 1185 }
1186
1187 if (pkt_len >= skb->len && !in_sack)
1188 return 0;
1189
1189 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); 1190 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
1190 if (err < 0) 1191 if (err < 0)
1191 return err; 1192 return err;
@@ -3189,7 +3190,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3189 int delta; 3190 int delta;
3190 3191
3191 /* Non-retransmitted hole got filled? That's reordering */ 3192 /* Non-retransmitted hole got filled? That's reordering */
3192 if (reord < prior_fackets) 3193 if (reord < prior_fackets && reord <= tp->fackets_out)
3193 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3194 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
3194 3195
3195 delta = tcp_is_fack(tp) ? pkts_acked : 3196 delta = tcp_is_fack(tp) ? pkts_acked :
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ea6e4cff9faf..1d6219bf2d6b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1612,7 +1612,7 @@ static void udp_v4_rehash(struct sock *sk)
1612 udp_lib_rehash(sk, new_hash); 1612 udp_lib_rehash(sk, new_hash);
1613} 1613}
1614 1614
1615int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1615static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1616{ 1616{
1617 int rc; 1617 int rc;
1618 1618
@@ -1657,7 +1657,7 @@ EXPORT_SYMBOL(udp_encap_enable);
1657 * Note that in the success and error cases, the skb is assumed to 1657 * Note that in the success and error cases, the skb is assumed to
1658 * have either been requeued or freed. 1658 * have either been requeued or freed.
1659 */ 1659 */
1660int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1660static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1661{ 1661{
1662 struct udp_sock *up = udp_sk(sk); 1662 struct udp_sock *up = udp_sk(sk);
1663 int is_udplite = IS_UDPLITE(sk); 1663 int is_udplite = IS_UDPLITE(sk);
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index feb50a16398d..a8cf8c6fb60c 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,6 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
25 int flags, int *addr_len); 25 int flags, int *addr_len);
26int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 26int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
27 int flags); 27 int flags);
28int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
29void udp_destroy_sock(struct sock *sk); 28void udp_destroy_sock(struct sock *sk);
30 29
31#ifdef CONFIG_PROC_FS 30#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8d297a79b568..1d2dbace42ff 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
332static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, 332static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
333 unsigned long delay) 333 unsigned long delay)
334{ 334{
335 if (!delayed_work_pending(&ifp->dad_work)) 335 in6_ifa_hold(ifp);
336 in6_ifa_hold(ifp); 336 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
337 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); 337 in6_ifa_put(ifp);
338} 338}
339 339
340static int snmp6_alloc_dev(struct inet6_dev *idev) 340static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -1022,7 +1022,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
1022 INIT_HLIST_NODE(&ifa->addr_lst); 1022 INIT_HLIST_NODE(&ifa->addr_lst);
1023 ifa->scope = scope; 1023 ifa->scope = scope;
1024 ifa->prefix_len = pfxlen; 1024 ifa->prefix_len = pfxlen;
1025 ifa->flags = flags | IFA_F_TENTATIVE; 1025 ifa->flags = flags;
1026 /* No need to add the TENTATIVE flag for addresses with NODAD */
1027 if (!(flags & IFA_F_NODAD))
1028 ifa->flags |= IFA_F_TENTATIVE;
1026 ifa->valid_lft = valid_lft; 1029 ifa->valid_lft = valid_lft;
1027 ifa->prefered_lft = prefered_lft; 1030 ifa->prefered_lft = prefered_lft;
1028 ifa->cstamp = ifa->tstamp = jiffies; 1031 ifa->cstamp = ifa->tstamp = jiffies;
@@ -3366,6 +3369,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3366 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3369 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3367 struct netdev_notifier_changeupper_info *info; 3370 struct netdev_notifier_changeupper_info *info;
3368 struct inet6_dev *idev = __in6_dev_get(dev); 3371 struct inet6_dev *idev = __in6_dev_get(dev);
3372 struct net *net = dev_net(dev);
3369 int run_pending = 0; 3373 int run_pending = 0;
3370 int err; 3374 int err;
3371 3375
@@ -3381,7 +3385,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3381 case NETDEV_CHANGEMTU: 3385 case NETDEV_CHANGEMTU:
3382 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ 3386 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3383 if (dev->mtu < IPV6_MIN_MTU) { 3387 if (dev->mtu < IPV6_MIN_MTU) {
3384 addrconf_ifdown(dev, 1); 3388 addrconf_ifdown(dev, dev != net->loopback_dev);
3385 break; 3389 break;
3386 } 3390 }
3387 3391
@@ -3497,7 +3501,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3497 * IPV6_MIN_MTU stop IPv6 on this interface. 3501 * IPV6_MIN_MTU stop IPv6 on this interface.
3498 */ 3502 */
3499 if (dev->mtu < IPV6_MIN_MTU) 3503 if (dev->mtu < IPV6_MIN_MTU)
3500 addrconf_ifdown(dev, 1); 3504 addrconf_ifdown(dev, dev != net->loopback_dev);
3501 } 3505 }
3502 break; 3506 break;
3503 3507
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 37ac9de713c6..8d772fea1dde 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1319 struct ipv6hdr *ip6_hdr; 1319 struct ipv6hdr *ip6_hdr;
1320 struct ipv6_opt_hdr *hop; 1320 struct ipv6_opt_hdr *hop;
1321 unsigned char buf[CALIPSO_MAX_BUFFER]; 1321 unsigned char buf[CALIPSO_MAX_BUFFER];
1322 int len_delta, new_end, pad; 1322 int len_delta, new_end, pad, payload;
1323 unsigned int start, end; 1323 unsigned int start, end;
1324 1324
1325 ip6_hdr = ipv6_hdr(skb); 1325 ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1346 if (ret_val < 0) 1346 if (ret_val < 0)
1347 return ret_val; 1347 return ret_val;
1348 1348
1349 ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
1350
1349 if (len_delta) { 1351 if (len_delta) {
1350 if (len_delta > 0) 1352 if (len_delta > 0)
1351 skb_push(skb, len_delta); 1353 skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1355 sizeof(*ip6_hdr) + start); 1357 sizeof(*ip6_hdr) + start);
1356 skb_reset_network_header(skb); 1358 skb_reset_network_header(skb);
1357 ip6_hdr = ipv6_hdr(skb); 1359 ip6_hdr = ipv6_hdr(skb);
1360 payload = ntohs(ip6_hdr->payload_len);
1361 ip6_hdr->payload_len = htons(payload + len_delta);
1358 } 1362 }
1359 1363
1360 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); 1364 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e011122ebd43..5c786f5ab961 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -250,8 +250,14 @@ ipv4_connected:
250 */ 250 */
251 251
252 err = ip6_datagram_dst_update(sk, true); 252 err = ip6_datagram_dst_update(sk, true);
253 if (err) 253 if (err) {
254 /* Reset daddr and dport so that udp_v6_early_demux()
255 * fails to find this socket
256 */
257 memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
258 inet->inet_dport = 0;
254 goto out; 259 goto out;
260 }
255 261
256 sk->sk_state = TCP_ESTABLISHED; 262 sk->sk_state = TCP_ESTABLISHED;
257 sk_set_txhash(sk); 263 sk_set_txhash(sk);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index d950d43ba255..f02f131f6435 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -30,6 +30,25 @@
30#include <net/ipv6.h> 30#include <net/ipv6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32 32
33static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
34{
35 int off = sizeof(struct ipv6hdr);
36 struct ipv6_opt_hdr *exthdr;
37
38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
39 return offsetof(struct ipv6hdr, nexthdr);
40
41 while (off < nhlen) {
42 exthdr = (void *)ipv6_hdr + off;
43 if (exthdr->nexthdr == NEXTHDR_ESP)
44 return off;
45
46 off += ipv6_optlen(exthdr);
47 }
48
49 return 0;
50}
51
33static struct sk_buff **esp6_gro_receive(struct sk_buff **head, 52static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
34 struct sk_buff *skb) 53 struct sk_buff *skb)
35{ 54{
@@ -38,6 +57,7 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
38 struct xfrm_state *x; 57 struct xfrm_state *x;
39 __be32 seq; 58 __be32 seq;
40 __be32 spi; 59 __be32 spi;
60 int nhoff;
41 int err; 61 int err;
42 62
43 skb_pull(skb, offset); 63 skb_pull(skb, offset);
@@ -72,6 +92,11 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
72 92
73 xo->flags |= XFRM_GRO; 93 xo->flags |= XFRM_GRO;
74 94
95 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
96 if (!nhoff)
97 goto out;
98
99 IP6CB(skb)->nhoff = nhoff;
75 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 100 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
76 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 101 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
77 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 102 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index eea23b57c6a5..ec849d88a662 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@ struct fib6_rule {
32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
33 int flags, pol_lookup_t lookup) 33 int flags, pol_lookup_t lookup)
34{ 34{
35 struct rt6_info *rt;
36 struct fib_lookup_arg arg = { 35 struct fib_lookup_arg arg = {
37 .lookup_ptr = lookup, 36 .lookup_ptr = lookup,
38 .flags = FIB_LOOKUP_NOREF, 37 .flags = FIB_LOOKUP_NOREF,
@@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
44 fib_rules_lookup(net->ipv6.fib6_rules_ops, 43 fib_rules_lookup(net->ipv6.fib6_rules_ops,
45 flowi6_to_flowi(fl6), flags, &arg); 44 flowi6_to_flowi(fl6), flags, &arg);
46 45
47 rt = arg.result; 46 if (arg.result)
47 return arg.result;
48 48
49 if (!rt) { 49 dst_hold(&net->ipv6.ip6_null_entry->dst);
50 dst_hold(&net->ipv6.ip6_null_entry->dst); 50 return &net->ipv6.ip6_null_entry->dst;
51 return &net->ipv6.ip6_null_entry->dst;
52 }
53
54 if (rt->rt6i_flags & RTF_REJECT &&
55 rt->dst.error == -EAGAIN) {
56 ip6_rt_put(rt);
57 rt = net->ipv6.ip6_null_entry;
58 dst_hold(&rt->dst);
59 }
60
61 return &rt->dst;
62} 51}
63 52
64static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 53static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
121 flp6->saddr = saddr; 110 flp6->saddr = saddr;
122 } 111 }
123 err = rt->dst.error; 112 err = rt->dst.error;
124 goto out; 113 if (err != -EAGAIN)
114 goto out;
125 } 115 }
126again: 116again:
127 ip6_rt_put(rt); 117 ip6_rt_put(rt);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 230b5aac9f03..8d7b113958b1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -491,7 +491,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
491 local_bh_disable(); 491 local_bh_disable();
492 492
493 /* Check global sysctl_icmp_msgs_per_sec ratelimit */ 493 /* Check global sysctl_icmp_msgs_per_sec ratelimit */
494 if (!icmpv6_global_allow(type)) 494 if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type))
495 goto out_bh_enable; 495 goto out_bh_enable;
496 496
497 mip6_addr_swap(skb); 497 mip6_addr_swap(skb);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 2fd5ca151dcf..77f7f8c7d93d 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -62,6 +62,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc)
62{ 62{
63 u32 *v = (u32 *)loc.v32; 63 u32 *v = (u32 *)loc.v32;
64 64
65 __ila_hash_secret_init();
65 return jhash_2words(v[0], v[1], hashrnd); 66 return jhash_2words(v[0], v[1], hashrnd);
66} 67}
67 68
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d4bf2c68a545..e6b78ba0e636 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
289 struct rt6_info *rt; 289 struct rt6_info *rt;
290 290
291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
292 if (rt->rt6i_flags & RTF_REJECT && 292 if (rt->dst.error == -EAGAIN) {
293 rt->dst.error == -EAGAIN) {
294 ip6_rt_put(rt); 293 ip6_rt_put(rt);
295 rt = net->ipv6.ip6_null_entry; 294 rt = net->ipv6.ip6_null_entry;
296 dst_hold(&rt->dst); 295 dst_hold(&rt->dst);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 8d128ba79b66..64eea3962733 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -537,11 +537,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
537 537
538 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 538 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
539 539
540 dsfield = ipv4_get_dsfield(iph);
541
542 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 540 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
543 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 541 dsfield = ipv4_get_dsfield(iph);
544 & IPV6_TCLASS_MASK; 542 else
543 dsfield = ip6_tclass(t->parms.flowinfo);
545 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 544 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
546 fl6.flowi6_mark = skb->mark; 545 fl6.flowi6_mark = skb->mark;
547 else 546 else
@@ -598,9 +597,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
598 597
599 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 598 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
600 599
601 dsfield = ipv6_get_dsfield(ipv6h);
602 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 600 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
603 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); 601 dsfield = ipv6_get_dsfield(ipv6h);
602 else
603 dsfield = ip6_tclass(t->parms.flowinfo);
604
604 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 605 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
605 fl6.flowlabel |= ip6_flowlabel(ipv6h); 606 fl6.flowlabel |= ip6_flowlabel(ipv6h);
606 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 607 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@@ -990,13 +991,13 @@ static void ip6gre_dev_free(struct net_device *dev)
990 991
991 dst_cache_destroy(&t->dst_cache); 992 dst_cache_destroy(&t->dst_cache);
992 free_percpu(dev->tstats); 993 free_percpu(dev->tstats);
993 free_netdev(dev);
994} 994}
995 995
996static void ip6gre_tunnel_setup(struct net_device *dev) 996static void ip6gre_tunnel_setup(struct net_device *dev)
997{ 997{
998 dev->netdev_ops = &ip6gre_netdev_ops; 998 dev->netdev_ops = &ip6gre_netdev_ops;
999 dev->destructor = ip6gre_dev_free; 999 dev->needs_free_netdev = true;
1000 dev->priv_destructor = ip6gre_dev_free;
1000 1001
1001 dev->type = ARPHRD_IP6GRE; 1002 dev->type = ARPHRD_IP6GRE;
1002 1003
@@ -1147,7 +1148,7 @@ static int __net_init ip6gre_init_net(struct net *net)
1147 return 0; 1148 return 0;
1148 1149
1149err_reg_dev: 1150err_reg_dev:
1150 ip6gre_dev_free(ign->fb_tunnel_dev); 1151 free_netdev(ign->fb_tunnel_dev);
1151err_alloc_dev: 1152err_alloc_dev:
1152 return err; 1153 return err;
1153} 1154}
@@ -1299,7 +1300,8 @@ static void ip6gre_tap_setup(struct net_device *dev)
1299 ether_setup(dev); 1300 ether_setup(dev);
1300 1301
1301 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1302 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1302 dev->destructor = ip6gre_dev_free; 1303 dev->needs_free_netdev = true;
1304 dev->priv_destructor = ip6gre_dev_free;
1303 1305
1304 dev->features |= NETIF_F_NETNS_LOCAL; 1306 dev->features |= NETIF_F_NETNS_LOCAL;
1305 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1307 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 93e58a5e1837..cdb3728faca7 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -63,7 +63,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
63 const struct net_offload *ops; 63 const struct net_offload *ops;
64 int proto; 64 int proto;
65 struct frag_hdr *fptr; 65 struct frag_hdr *fptr;
66 unsigned int unfrag_ip6hlen;
67 unsigned int payload_len; 66 unsigned int payload_len;
68 u8 *prevhdr; 67 u8 *prevhdr;
69 int offset = 0; 68 int offset = 0;
@@ -116,8 +115,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
116 skb->network_header = (u8 *)ipv6h - skb->head; 115 skb->network_header = (u8 *)ipv6h - skb->head;
117 116
118 if (udpfrag) { 117 if (udpfrag) {
119 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 118 int err = ip6_find_1stfragopt(skb, &prevhdr);
120 fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); 119 if (err < 0) {
120 kfree_skb_list(segs);
121 return ERR_PTR(err);
122 }
123 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
121 fptr->frag_off = htons(offset); 124 fptr->frag_off = htons(offset);
122 if (skb->next) 125 if (skb->next)
123 fptr->frag_off |= htons(IP6_MF); 126 fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 58f6288e9ba5..1699acb2fa2c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -597,7 +597,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
597 int ptr, offset = 0, err = 0; 597 int ptr, offset = 0, err = 0;
598 u8 *prevhdr, nexthdr = 0; 598 u8 *prevhdr, nexthdr = 0;
599 599
600 hlen = ip6_find_1stfragopt(skb, &prevhdr); 600 err = ip6_find_1stfragopt(skb, &prevhdr);
601 if (err < 0)
602 goto fail;
603 hlen = err;
601 nexthdr = *prevhdr; 604 nexthdr = *prevhdr;
602 605
603 mtu = ip6_skb_dst_mtu(skb); 606 mtu = ip6_skb_dst_mtu(skb);
@@ -1387,7 +1390,7 @@ emsgsize:
1387 */ 1390 */
1388 1391
1389 cork->length += length; 1392 cork->length += length;
1390 if ((((length + fragheaderlen) > mtu) || 1393 if ((((length + (skb ? skb->len : headersize)) > mtu) ||
1391 (skb && skb_is_gso(skb))) && 1394 (skb && skb_is_gso(skb))) &&
1392 (sk->sk_protocol == IPPROTO_UDP) && 1395 (sk->sk_protocol == IPPROTO_UDP) &&
1393 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 1396 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
@@ -1463,6 +1466,11 @@ alloc_new_skb:
1463 */ 1466 */
1464 alloclen += sizeof(struct frag_hdr); 1467 alloclen += sizeof(struct frag_hdr);
1465 1468
1469 copy = datalen - transhdrlen - fraggap;
1470 if (copy < 0) {
1471 err = -EINVAL;
1472 goto error;
1473 }
1466 if (transhdrlen) { 1474 if (transhdrlen) {
1467 skb = sock_alloc_send_skb(sk, 1475 skb = sock_alloc_send_skb(sk,
1468 alloclen + hh_len, 1476 alloclen + hh_len,
@@ -1512,13 +1520,9 @@ alloc_new_skb:
1512 data += fraggap; 1520 data += fraggap;
1513 pskb_trim_unique(skb_prev, maxfraglen); 1521 pskb_trim_unique(skb_prev, maxfraglen);
1514 } 1522 }
1515 copy = datalen - transhdrlen - fraggap; 1523 if (copy > 0 &&
1516 1524 getfrag(from, data + transhdrlen, offset,
1517 if (copy < 0) { 1525 copy, fraggap, skb) < 0) {
1518 err = -EINVAL;
1519 kfree_skb(skb);
1520 goto error;
1521 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1522 err = -EFAULT; 1526 err = -EFAULT;
1523 kfree_skb(skb); 1527 kfree_skb(skb);
1524 goto error; 1528 goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6eb2ae507500..8c6c3c8e7eef 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev)
254 gro_cells_destroy(&t->gro_cells); 254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache); 255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats); 256 free_percpu(dev->tstats);
257 free_netdev(dev);
258} 257}
259 258
260static int ip6_tnl_create2(struct net_device *dev) 259static int ip6_tnl_create2(struct net_device *dev)
@@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
322 return t; 321 return t;
323 322
324failed_free: 323failed_free:
325 ip6_dev_free(dev); 324 free_netdev(dev);
326failed: 325failed:
327 return ERR_PTR(err); 326 return ERR_PTR(err);
328} 327}
@@ -859,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
859 return 0; 858 return 0;
860 859
861drop: 860drop:
861 if (tun_dst)
862 dst_release((struct dst_entry *)tun_dst);
862 kfree_skb(skb); 863 kfree_skb(skb);
863 return 0; 864 return 0;
864} 865}
@@ -1095,6 +1096,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1095 1096
1096 if (!dst) { 1097 if (!dst) {
1097route_lookup: 1098route_lookup:
1099 /* add dsfield to flowlabel for route lookup */
1100 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1101
1098 dst = ip6_route_output(net, NULL, fl6); 1102 dst = ip6_route_output(net, NULL, fl6);
1099 1103
1100 if (dst->error) 1104 if (dst->error)
@@ -1196,7 +1200,7 @@ route_lookup:
1196 skb_push(skb, sizeof(struct ipv6hdr)); 1200 skb_push(skb, sizeof(struct ipv6hdr));
1197 skb_reset_network_header(skb); 1201 skb_reset_network_header(skb);
1198 ipv6h = ipv6_hdr(skb); 1202 ipv6h = ipv6_hdr(skb);
1199 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), 1203 ip6_flow_hdr(ipv6h, dsfield,
1200 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1204 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1201 ipv6h->hop_limit = hop_limit; 1205 ipv6h->hop_limit = hop_limit;
1202 ipv6h->nexthdr = proto; 1206 ipv6h->nexthdr = proto;
@@ -1231,8 +1235,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1231 if (tproto != IPPROTO_IPIP && tproto != 0) 1235 if (tproto != IPPROTO_IPIP && tproto != 0)
1232 return -1; 1236 return -1;
1233 1237
1234 dsfield = ipv4_get_dsfield(iph);
1235
1236 if (t->parms.collect_md) { 1238 if (t->parms.collect_md) {
1237 struct ip_tunnel_info *tun_info; 1239 struct ip_tunnel_info *tun_info;
1238 const struct ip_tunnel_key *key; 1240 const struct ip_tunnel_key *key;
@@ -1246,6 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1246 fl6.flowi6_proto = IPPROTO_IPIP; 1248 fl6.flowi6_proto = IPPROTO_IPIP;
1247 fl6.daddr = key->u.ipv6.dst; 1249 fl6.daddr = key->u.ipv6.dst;
1248 fl6.flowlabel = key->label; 1250 fl6.flowlabel = key->label;
1251 dsfield = key->tos;
1249 } else { 1252 } else {
1250 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1253 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1251 encap_limit = t->parms.encap_limit; 1254 encap_limit = t->parms.encap_limit;
@@ -1254,8 +1257,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1254 fl6.flowi6_proto = IPPROTO_IPIP; 1257 fl6.flowi6_proto = IPPROTO_IPIP;
1255 1258
1256 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1259 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1257 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1260 dsfield = ipv4_get_dsfield(iph);
1258 & IPV6_TCLASS_MASK; 1261 else
1262 dsfield = ip6_tclass(t->parms.flowinfo);
1259 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1263 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1260 fl6.flowi6_mark = skb->mark; 1264 fl6.flowi6_mark = skb->mark;
1261 else 1265 else
@@ -1267,6 +1271,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1267 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1271 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1268 return -1; 1272 return -1;
1269 1273
1274 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1275
1270 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1276 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1271 1277
1272 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1278 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1300,8 +1306,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1300 ip6_tnl_addr_conflict(t, ipv6h)) 1306 ip6_tnl_addr_conflict(t, ipv6h))
1301 return -1; 1307 return -1;
1302 1308
1303 dsfield = ipv6_get_dsfield(ipv6h);
1304
1305 if (t->parms.collect_md) { 1309 if (t->parms.collect_md) {
1306 struct ip_tunnel_info *tun_info; 1310 struct ip_tunnel_info *tun_info;
1307 const struct ip_tunnel_key *key; 1311 const struct ip_tunnel_key *key;
@@ -1315,6 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1315 fl6.flowi6_proto = IPPROTO_IPV6; 1319 fl6.flowi6_proto = IPPROTO_IPV6;
1316 fl6.daddr = key->u.ipv6.dst; 1320 fl6.daddr = key->u.ipv6.dst;
1317 fl6.flowlabel = key->label; 1321 fl6.flowlabel = key->label;
1322 dsfield = key->tos;
1318 } else { 1323 } else {
1319 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1324 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1320 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 1325 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
@@ -1337,7 +1342,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1337 fl6.flowi6_proto = IPPROTO_IPV6; 1342 fl6.flowi6_proto = IPPROTO_IPV6;
1338 1343
1339 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1344 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1340 fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK); 1345 dsfield = ipv6_get_dsfield(ipv6h);
1346 else
1347 dsfield = ip6_tclass(t->parms.flowinfo);
1341 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1348 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1342 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1349 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1343 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1350 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@@ -1351,6 +1358,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1351 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1358 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1352 return -1; 1359 return -1;
1353 1360
1361 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1362
1354 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1363 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1355 1364
1356 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1365 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1769,7 +1778,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
1769static void ip6_tnl_dev_setup(struct net_device *dev) 1778static void ip6_tnl_dev_setup(struct net_device *dev)
1770{ 1779{
1771 dev->netdev_ops = &ip6_tnl_netdev_ops; 1780 dev->netdev_ops = &ip6_tnl_netdev_ops;
1772 dev->destructor = ip6_dev_free; 1781 dev->needs_free_netdev = true;
1782 dev->priv_destructor = ip6_dev_free;
1773 1783
1774 dev->type = ARPHRD_TUNNEL6; 1784 dev->type = ARPHRD_TUNNEL6;
1775 dev->flags |= IFF_NOARP; 1785 dev->flags |= IFF_NOARP;
@@ -2216,7 +2226,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
2216 return 0; 2226 return 0;
2217 2227
2218err_register: 2228err_register:
2219 ip6_dev_free(ip6n->fb_tnl_dev); 2229 free_netdev(ip6n->fb_tnl_dev);
2220err_alloc_dev: 2230err_alloc_dev:
2221 return err; 2231 return err;
2222} 2232}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d67ef56454b2..837ea1eefe7f 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
180static void vti6_dev_free(struct net_device *dev) 180static void vti6_dev_free(struct net_device *dev)
181{ 181{
182 free_percpu(dev->tstats); 182 free_percpu(dev->tstats);
183 free_netdev(dev);
184} 183}
185 184
186static int vti6_tnl_create2(struct net_device *dev) 185static int vti6_tnl_create2(struct net_device *dev)
@@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
235 return t; 234 return t;
236 235
237failed_free: 236failed_free:
238 vti6_dev_free(dev); 237 free_netdev(dev);
239failed: 238failed:
240 return NULL; 239 return NULL;
241} 240}
@@ -842,7 +841,8 @@ static const struct net_device_ops vti6_netdev_ops = {
842static void vti6_dev_setup(struct net_device *dev) 841static void vti6_dev_setup(struct net_device *dev)
843{ 842{
844 dev->netdev_ops = &vti6_netdev_ops; 843 dev->netdev_ops = &vti6_netdev_ops;
845 dev->destructor = vti6_dev_free; 844 dev->needs_free_netdev = true;
845 dev->priv_destructor = vti6_dev_free;
846 846
847 dev->type = ARPHRD_TUNNEL6; 847 dev->type = ARPHRD_TUNNEL6;
848 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); 848 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
@@ -1100,7 +1100,7 @@ static int __net_init vti6_init_net(struct net *net)
1100 return 0; 1100 return 0;
1101 1101
1102err_register: 1102err_register:
1103 vti6_dev_free(ip6n->fb_tnl_dev); 1103 free_netdev(ip6n->fb_tnl_dev);
1104err_alloc_dev: 1104err_alloc_dev:
1105 return err; 1105 return err;
1106} 1106}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 374997d26488..2ecb39b943b5 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev)
733 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 733 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
734 dev->flags = IFF_NOARP; 734 dev->flags = IFF_NOARP;
735 dev->netdev_ops = &reg_vif_netdev_ops; 735 dev->netdev_ops = &reg_vif_netdev_ops;
736 dev->destructor = free_netdev; 736 dev->needs_free_netdev = true;
737 dev->features |= NETIF_F_NETNS_LOCAL; 737 dev->features |= NETIF_F_NETNS_LOCAL;
738} 738}
739 739
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index cd4252346a32..e9065b8d3af8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident);
79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
80{ 80{
81 u16 offset = sizeof(struct ipv6hdr); 81 u16 offset = sizeof(struct ipv6hdr);
82 struct ipv6_opt_hdr *exthdr =
83 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
84 unsigned int packet_len = skb_tail_pointer(skb) - 82 unsigned int packet_len = skb_tail_pointer(skb) -
85 skb_network_header(skb); 83 skb_network_header(skb);
86 int found_rhdr = 0; 84 int found_rhdr = 0;
87 *nexthdr = &ipv6_hdr(skb)->nexthdr; 85 *nexthdr = &ipv6_hdr(skb)->nexthdr;
88 86
89 while (offset + 1 <= packet_len) { 87 while (offset <= packet_len) {
88 struct ipv6_opt_hdr *exthdr;
90 89
91 switch (**nexthdr) { 90 switch (**nexthdr) {
92 91
@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
107 return offset; 106 return offset;
108 } 107 }
109 108
110 offset += ipv6_optlen(exthdr); 109 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
111 *nexthdr = &exthdr->nexthdr; 110 return -EINVAL;
111
112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
113 offset); 113 offset);
114 offset += ipv6_optlen(exthdr);
115 *nexthdr = &exthdr->nexthdr;
114 } 116 }
115 117
116 return offset; 118 return -EINVAL;
117} 119}
118EXPORT_SYMBOL(ip6_find_1stfragopt); 120EXPORT_SYMBOL(ip6_find_1stfragopt);
119 121
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 9b522fa90e6d..ac826dd338ff 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
192 .type = SOCK_DGRAM, 192 .type = SOCK_DGRAM,
193 .protocol = IPPROTO_ICMPV6, 193 .protocol = IPPROTO_ICMPV6,
194 .prot = &pingv6_prot, 194 .prot = &pingv6_prot,
195 .ops = &inet6_dgram_ops, 195 .ops = &inet6_sockraw_ops,
196 .flags = INET_PROTOSW_REUSE, 196 .flags = INET_PROTOSW_REUSE,
197}; 197};
198 198
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index cc8e3ae9ca73..e88bcb8ff0fd 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
219 u64 buff64[SNMP_MIB_MAX]; 219 u64 buff64[SNMP_MIB_MAX];
220 int i; 220 int i;
221 221
222 memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX); 222 memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
223 223
224 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); 224 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
225 for (i = 0; itemlist[i].name; i++) 225 for (i = 0; itemlist[i].name; i++)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f992d9e261d..60be012fe708 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
1338#endif /* CONFIG_PROC_FS */ 1338#endif /* CONFIG_PROC_FS */
1339 1339
1340/* Same as inet6_dgram_ops, sans udp_poll. */ 1340/* Same as inet6_dgram_ops, sans udp_poll. */
1341static const struct proto_ops inet6_sockraw_ops = { 1341const struct proto_ops inet6_sockraw_ops = {
1342 .family = PF_INET6, 1342 .family = PF_INET6,
1343 .owner = THIS_MODULE, 1343 .owner = THIS_MODULE,
1344 .release = inet6_release, 1344 .release = inet6_release,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dc61b0b5e64e..322bd62e688b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
2804 if ((rt->dst.dev == dev || !dev) && 2804 if ((rt->dst.dev == dev || !dev) &&
2805 rt != adn->net->ipv6.ip6_null_entry && 2805 rt != adn->net->ipv6.ip6_null_entry &&
2806 (rt->rt6i_nsiblings == 0 || 2806 (rt->rt6i_nsiblings == 0 ||
2807 (dev && netdev_unregistering(dev)) ||
2807 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) 2808 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
2808 return -1; 2809 return -1;
2809 2810
@@ -3721,7 +3722,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
3721 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 3722 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3722 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 3723 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3723#endif 3724#endif
3724 } else if (event == NETDEV_UNREGISTER) { 3725 } else if (event == NETDEV_UNREGISTER &&
3726 dev->reg_state != NETREG_UNREGISTERED) {
3727 /* NETDEV_UNREGISTER could be fired for multiple times by
3728 * netdev_wait_allrefs(). Make sure we only call this once.
3729 */
3725 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); 3730 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
3726#ifdef CONFIG_IPV6_MULTIPLE_TABLES 3731#ifdef CONFIG_IPV6_MULTIPLE_TABLES
3727 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); 3732 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 61e5902f0687..f8ad15891cd7 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
265 return nt; 265 return nt;
266 266
267failed_free: 267failed_free:
268 ipip6_dev_free(dev); 268 free_netdev(dev);
269failed: 269failed:
270 return NULL; 270 return NULL;
271} 271}
@@ -305,7 +305,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
305 * we try harder to allocate. 305 * we try harder to allocate.
306 */ 306 */
307 kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ? 307 kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
308 kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : 308 kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) :
309 NULL; 309 NULL;
310 310
311 rcu_read_lock(); 311 rcu_read_lock();
@@ -1336,7 +1336,6 @@ static void ipip6_dev_free(struct net_device *dev)
1336 1336
1337 dst_cache_destroy(&tunnel->dst_cache); 1337 dst_cache_destroy(&tunnel->dst_cache);
1338 free_percpu(dev->tstats); 1338 free_percpu(dev->tstats);
1339 free_netdev(dev);
1340} 1339}
1341 1340
1342#define SIT_FEATURES (NETIF_F_SG | \ 1341#define SIT_FEATURES (NETIF_F_SG | \
@@ -1351,7 +1350,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1351 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 1350 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
1352 1351
1353 dev->netdev_ops = &ipip6_netdev_ops; 1352 dev->netdev_ops = &ipip6_netdev_ops;
1354 dev->destructor = ipip6_dev_free; 1353 dev->needs_free_netdev = true;
1354 dev->priv_destructor = ipip6_dev_free;
1355 1355
1356 dev->type = ARPHRD_SIT; 1356 dev->type = ARPHRD_SIT;
1357 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1357 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7a8237acd210..4f4310a36a04 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1062,6 +1062,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1063#endif 1063#endif
1064 1064
1065 newnp->ipv6_mc_list = NULL;
1065 newnp->ipv6_ac_list = NULL; 1066 newnp->ipv6_ac_list = NULL;
1066 newnp->ipv6_fl_list = NULL; 1067 newnp->ipv6_fl_list = NULL;
1067 newnp->pktoptions = NULL; 1068 newnp->pktoptions = NULL;
@@ -1131,6 +1132,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1131 First: no IPv4 options. 1132 First: no IPv4 options.
1132 */ 1133 */
1133 newinet->inet_opt = NULL; 1134 newinet->inet_opt = NULL;
1135 newnp->ipv6_mc_list = NULL;
1134 newnp->ipv6_ac_list = NULL; 1136 newnp->ipv6_ac_list = NULL;
1135 newnp->ipv6_fl_list = NULL; 1137 newnp->ipv6_fl_list = NULL;
1136 1138
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 04862abfe4ec..75703fda23e7 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -526,7 +526,7 @@ out:
526 return; 526 return;
527} 527}
528 528
529int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 529static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
530{ 530{
531 int rc; 531 int rc;
532 532
@@ -569,7 +569,7 @@ void udpv6_encap_enable(void)
569} 569}
570EXPORT_SYMBOL(udpv6_encap_enable); 570EXPORT_SYMBOL(udpv6_encap_enable);
571 571
572int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 572static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
573{ 573{
574 struct udp_sock *up = udp_sk(sk); 574 struct udp_sock *up = udp_sk(sk);
575 int is_udplite = IS_UDPLITE(sk); 575 int is_udplite = IS_UDPLITE(sk);
@@ -879,7 +879,8 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
879 struct sock *sk; 879 struct sock *sk;
880 880
881 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 881 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
882 if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) 882 if (sk->sk_state == TCP_ESTABLISHED &&
883 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
883 return sk; 884 return sk;
884 /* Only check first socket in chain */ 885 /* Only check first socket in chain */
885 break; 886 break;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index e78bdc76dcc3..f180b3d85e31 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -26,7 +26,6 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
26int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 26int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
27int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 27int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
28 int flags, int *addr_len); 28 int flags, int *addr_len);
29int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
30void udpv6_destroy_sock(struct sock *sk); 29void udpv6_destroy_sock(struct sock *sk);
31 30
32#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ac858c480f2f..a2267f80febb 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
29 u8 frag_hdr_sz = sizeof(struct frag_hdr); 29 u8 frag_hdr_sz = sizeof(struct frag_hdr);
30 __wsum csum; 30 __wsum csum;
31 int tnl_hlen; 31 int tnl_hlen;
32 int err;
32 33
33 mss = skb_shinfo(skb)->gso_size; 34 mss = skb_shinfo(skb)->gso_size;
34 if (unlikely(skb->len <= mss)) 35 if (unlikely(skb->len <= mss))
@@ -90,7 +91,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
90 /* Find the unfragmentable header and shift it left by frag_hdr_sz 91 /* Find the unfragmentable header and shift it left by frag_hdr_sz
91 * bytes to insert fragment header. 92 * bytes to insert fragment header.
92 */ 93 */
93 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 94 err = ip6_find_1stfragopt(skb, &prevhdr);
95 if (err < 0)
96 return ERR_PTR(err);
97 unfrag_ip6hlen = err;
94 nexthdr = *prevhdr; 98 nexthdr = *prevhdr;
95 *prevhdr = NEXTHDR_FRAGMENT; 99 *prevhdr = NEXTHDR_FRAGMENT;
96 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + 100 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 08a807b29298..3ef5d913e7a3 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -43,8 +43,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
43 return 1; 43 return 1;
44#endif 44#endif
45 45
46 ipv6_hdr(skb)->payload_len = htons(skb->len);
47 __skb_push(skb, skb->data - skb_network_header(skb)); 46 __skb_push(skb, skb->data - skb_network_header(skb));
47 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
48 48
49 if (xo && (xo->flags & XFRM_GRO)) { 49 if (xo && (xo->flags & XFRM_GRO)) {
50 skb_mac_header_rebuild(skb); 50 skb_mac_header_rebuild(skb);
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e015906f9ca..07d36573f50b 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
47 iph = ipv6_hdr(skb); 47 iph = ipv6_hdr(skb);
48 48
49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
50 if (hdr_len < 0)
51 return hdr_len;
50 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 52 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
51 skb_set_network_header(skb, -x->props.header_len); 53 skb_set_network_header(skb, -x->props.header_len);
52 skb->transport_header = skb->network_header + hdr_len; 54 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 7a92c0f31912..9ad07a91708e 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
30 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 30 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
31 31
32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
33 if (hdr_len < 0)
34 return hdr_len;
33 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 35 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
34 skb_set_network_header(skb, -x->props.header_len); 36 skb_set_network_header(skb, -x->props.header_len);
35 skb->transport_header = skb->network_header + hdr_len; 37 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 74d09f91709e..3be852808a9d 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev)
65 ether_setup(dev); 65 ether_setup(dev);
66 66
67 dev->netdev_ops = &irlan_eth_netdev_ops; 67 dev->netdev_ops = &irlan_eth_netdev_ops;
68 dev->destructor = free_netdev; 68 dev->needs_free_netdev = true;
69 dev->min_mtu = 0; 69 dev->min_mtu = 0;
70 dev->max_mtu = ETH_MAX_MTU; 70 dev->max_mtu = ETH_MAX_MTU;
71 71
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c1950bb14735..b1432b668033 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1157,6 +1157,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1157 goto out; 1157 goto out;
1158 } 1158 }
1159 1159
1160 err = -ENOBUFS;
1160 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; 1161 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
1161 if (sa->sadb_sa_auth) { 1162 if (sa->sadb_sa_auth) {
1162 int keysize = 0; 1163 int keysize = 0;
@@ -1168,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1168 if (key) 1169 if (key)
1169 keysize = (key->sadb_key_bits + 7) / 8; 1170 keysize = (key->sadb_key_bits + 7) / 8;
1170 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); 1171 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
1171 if (!x->aalg) 1172 if (!x->aalg) {
1173 err = -ENOMEM;
1172 goto out; 1174 goto out;
1175 }
1173 strcpy(x->aalg->alg_name, a->name); 1176 strcpy(x->aalg->alg_name, a->name);
1174 x->aalg->alg_key_len = 0; 1177 x->aalg->alg_key_len = 0;
1175 if (key) { 1178 if (key) {
@@ -1188,8 +1191,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1188 goto out; 1191 goto out;
1189 } 1192 }
1190 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); 1193 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
1191 if (!x->calg) 1194 if (!x->calg) {
1195 err = -ENOMEM;
1192 goto out; 1196 goto out;
1197 }
1193 strcpy(x->calg->alg_name, a->name); 1198 strcpy(x->calg->alg_name, a->name);
1194 x->props.calgo = sa->sadb_sa_encrypt; 1199 x->props.calgo = sa->sadb_sa_encrypt;
1195 } else { 1200 } else {
@@ -1203,8 +1208,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1203 if (key) 1208 if (key)
1204 keysize = (key->sadb_key_bits + 7) / 8; 1209 keysize = (key->sadb_key_bits + 7) / 8;
1205 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); 1210 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
1206 if (!x->ealg) 1211 if (!x->ealg) {
1212 err = -ENOMEM;
1207 goto out; 1213 goto out;
1214 }
1208 strcpy(x->ealg->alg_name, a->name); 1215 strcpy(x->ealg->alg_name, a->name);
1209 x->ealg->alg_key_len = 0; 1216 x->ealg->alg_key_len = 0;
1210 if (key) { 1217 if (key) {
@@ -1249,8 +1256,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1249 struct xfrm_encap_tmpl *natt; 1256 struct xfrm_encap_tmpl *natt;
1250 1257
1251 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); 1258 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
1252 if (!x->encap) 1259 if (!x->encap) {
1260 err = -ENOMEM;
1253 goto out; 1261 goto out;
1262 }
1254 1263
1255 natt = x->encap; 1264 natt = x->encap;
1256 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; 1265 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
@@ -2755,6 +2764,8 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
2755 int err, err2; 2764 int err, err2;
2756 2765
2757 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true); 2766 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
2767 if (!err)
2768 xfrm_garbage_collect(net);
2758 err2 = unicast_flush_resp(sk, hdr); 2769 err2 = unicast_flush_resp(sk, hdr);
2759 if (err || err2) { 2770 if (err || err2) {
2760 if (err == -ESRCH) /* empty table - old silent behavior */ 2771 if (err == -ESRCH) /* empty table - old silent behavior */
@@ -3285,7 +3296,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3285 p += pol->sadb_x_policy_len*8; 3296 p += pol->sadb_x_policy_len*8;
3286 sec_ctx = (struct sadb_x_sec_ctx *)p; 3297 sec_ctx = (struct sadb_x_sec_ctx *)p;
3287 if (len < pol->sadb_x_policy_len*8 + 3298 if (len < pol->sadb_x_policy_len*8 +
3288 sec_ctx->sadb_x_sec_len) { 3299 sec_ctx->sadb_x_sec_len*8) {
3289 *dir = -EINVAL; 3300 *dir = -EINVAL;
3290 goto out; 3301 goto out;
3291 } 3302 }
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8b21af7321b9..4de2ec94b08c 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -114,12 +114,13 @@ static void l2tp_eth_get_stats64(struct net_device *dev,
114{ 114{
115 struct l2tp_eth *priv = netdev_priv(dev); 115 struct l2tp_eth *priv = netdev_priv(dev);
116 116
117 stats->tx_bytes = atomic_long_read(&priv->tx_bytes); 117 stats->tx_bytes = (unsigned long) atomic_long_read(&priv->tx_bytes);
118 stats->tx_packets = atomic_long_read(&priv->tx_packets); 118 stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets);
119 stats->tx_dropped = atomic_long_read(&priv->tx_dropped); 119 stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped);
120 stats->rx_bytes = atomic_long_read(&priv->rx_bytes); 120 stats->rx_bytes = (unsigned long) atomic_long_read(&priv->rx_bytes);
121 stats->rx_packets = atomic_long_read(&priv->rx_packets); 121 stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets);
122 stats->rx_errors = atomic_long_read(&priv->rx_errors); 122 stats->rx_errors = (unsigned long) atomic_long_read(&priv->rx_errors);
123
123} 124}
124 125
125static const struct net_device_ops l2tp_eth_netdev_ops = { 126static const struct net_device_ops l2tp_eth_netdev_ops = {
@@ -141,7 +142,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
141 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 142 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
142 dev->features |= NETIF_F_LLTX; 143 dev->features |= NETIF_F_LLTX;
143 dev->netdev_ops = &l2tp_eth_netdev_ops; 144 dev->netdev_ops = &l2tp_eth_netdev_ops;
144 dev->destructor = free_netdev; 145 dev->needs_free_netdev = true;
145} 146}
146 147
147static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) 148static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8364fe5b59e4..c38d16f22d2a 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -311,6 +311,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
311 int rc = -EINVAL; 311 int rc = -EINVAL;
312 312
313 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); 313 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
314
315 lock_sock(sk);
314 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) 316 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
315 goto out; 317 goto out;
316 rc = -EAFNOSUPPORT; 318 rc = -EAFNOSUPPORT;
@@ -382,6 +384,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
382out_put: 384out_put:
383 llc_sap_put(sap); 385 llc_sap_put(sap);
384out: 386out:
387 release_sock(sk);
385 return rc; 388 return rc;
386} 389}
387 390
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 60e2a62f7bef..cf2392b2ac71 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,7 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015 Intel Deutschland GmbH 10 * Copyright(c) 2015-2017 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
741 ieee80211_agg_start_txq(sta, tid, true); 741 ieee80211_agg_start_txq(sta, tid, true);
742} 742}
743 743
744void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 744void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
745 struct tid_ampdu_tx *tid_tx)
745{ 746{
746 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 747 struct ieee80211_sub_if_data *sdata = sta->sdata;
747 struct ieee80211_local *local = sdata->local; 748 struct ieee80211_local *local = sdata->local;
748 struct sta_info *sta;
749 struct tid_ampdu_tx *tid_tx;
750 749
751 trace_api_start_tx_ba_cb(sdata, ra, tid); 750 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
751 return;
752
753 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
754 ieee80211_agg_tx_operational(local, sta, tid);
755}
756
757static struct tid_ampdu_tx *
758ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
759 const u8 *ra, u16 tid, struct sta_info **sta)
760{
761 struct tid_ampdu_tx *tid_tx;
752 762
753 if (tid >= IEEE80211_NUM_TIDS) { 763 if (tid >= IEEE80211_NUM_TIDS) {
754 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 764 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
755 tid, IEEE80211_NUM_TIDS); 765 tid, IEEE80211_NUM_TIDS);
756 return; 766 return NULL;
757 } 767 }
758 768
759 mutex_lock(&local->sta_mtx); 769 *sta = sta_info_get_bss(sdata, ra);
760 sta = sta_info_get_bss(sdata, ra); 770 if (!*sta) {
761 if (!sta) {
762 mutex_unlock(&local->sta_mtx);
763 ht_dbg(sdata, "Could not find station: %pM\n", ra); 771 ht_dbg(sdata, "Could not find station: %pM\n", ra);
764 return; 772 return NULL;
765 } 773 }
766 774
767 mutex_lock(&sta->ampdu_mlme.mtx); 775 tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
768 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
769 776
770 if (WARN_ON(!tid_tx)) { 777 if (WARN_ON(!tid_tx))
771 ht_dbg(sdata, "addBA was not requested!\n"); 778 ht_dbg(sdata, "addBA was not requested!\n");
772 goto unlock;
773 }
774 779
775 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 780 return tid_tx;
776 goto unlock;
777
778 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
779 ieee80211_agg_tx_operational(local, sta, tid);
780
781 unlock:
782 mutex_unlock(&sta->ampdu_mlme.mtx);
783 mutex_unlock(&local->sta_mtx);
784} 781}
785 782
786void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 783void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
788{ 785{
789 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 786 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
790 struct ieee80211_local *local = sdata->local; 787 struct ieee80211_local *local = sdata->local;
791 struct ieee80211_ra_tid *ra_tid; 788 struct sta_info *sta;
792 struct sk_buff *skb = dev_alloc_skb(0); 789 struct tid_ampdu_tx *tid_tx;
793 790
794 if (unlikely(!skb)) 791 trace_api_start_tx_ba_cb(sdata, ra, tid);
795 return;
796 792
797 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 793 rcu_read_lock();
798 memcpy(&ra_tid->ra, ra, ETH_ALEN); 794 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
799 ra_tid->tid = tid; 795 if (!tid_tx)
796 goto out;
800 797
801 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 798 set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
802 skb_queue_tail(&sdata->skb_queue, skb); 799 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
803 ieee80211_queue_work(&local->hw, &sdata->work); 800 out:
801 rcu_read_unlock();
804} 802}
805EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 803EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
806 804
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
860} 858}
861EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 859EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
862 860
863void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 861void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
862 struct tid_ampdu_tx *tid_tx)
864{ 863{
865 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 864 struct ieee80211_sub_if_data *sdata = sta->sdata;
866 struct ieee80211_local *local = sdata->local;
867 struct sta_info *sta;
868 struct tid_ampdu_tx *tid_tx;
869 bool send_delba = false; 865 bool send_delba = false;
870 866
871 trace_api_stop_tx_ba_cb(sdata, ra, tid); 867 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
872 868 sta->sta.addr, tid);
873 if (tid >= IEEE80211_NUM_TIDS) {
874 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
875 tid, IEEE80211_NUM_TIDS);
876 return;
877 }
878
879 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
880
881 mutex_lock(&local->sta_mtx);
882
883 sta = sta_info_get_bss(sdata, ra);
884 if (!sta) {
885 ht_dbg(sdata, "Could not find station: %pM\n", ra);
886 goto unlock;
887 }
888 869
889 mutex_lock(&sta->ampdu_mlme.mtx);
890 spin_lock_bh(&sta->lock); 870 spin_lock_bh(&sta->lock);
891 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
892 871
893 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 872 if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
894 ht_dbg(sdata, 873 ht_dbg(sdata,
895 "unexpected callback to A-MPDU stop for %pM tid %d\n", 874 "unexpected callback to A-MPDU stop for %pM tid %d\n",
896 sta->sta.addr, tid); 875 sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
906 spin_unlock_bh(&sta->lock); 885 spin_unlock_bh(&sta->lock);
907 886
908 if (send_delba) 887 if (send_delba)
909 ieee80211_send_delba(sdata, ra, tid, 888 ieee80211_send_delba(sdata, sta->sta.addr, tid,
910 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 889 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
911
912 mutex_unlock(&sta->ampdu_mlme.mtx);
913 unlock:
914 mutex_unlock(&local->sta_mtx);
915} 890}
916 891
917void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 892void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
919{ 894{
920 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 895 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
921 struct ieee80211_local *local = sdata->local; 896 struct ieee80211_local *local = sdata->local;
922 struct ieee80211_ra_tid *ra_tid; 897 struct sta_info *sta;
923 struct sk_buff *skb = dev_alloc_skb(0); 898 struct tid_ampdu_tx *tid_tx;
924 899
925 if (unlikely(!skb)) 900 trace_api_stop_tx_ba_cb(sdata, ra, tid);
926 return;
927 901
928 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 902 rcu_read_lock();
929 memcpy(&ra_tid->ra, ra, ETH_ALEN); 903 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
930 ra_tid->tid = tid; 904 if (!tid_tx)
905 goto out;
931 906
932 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 907 set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
933 skb_queue_tail(&sdata->skb_queue, skb); 908 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
934 ieee80211_queue_work(&local->hw, &sdata->work); 909 out:
910 rcu_read_unlock();
935} 911}
936EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 912EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
937 913
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6c2e6060cd54..4a388fe8c2d1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -902,6 +902,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
902 default: 902 default:
903 return -EINVAL; 903 return -EINVAL;
904 } 904 }
905 sdata->u.ap.req_smps = sdata->smps_mode;
906
905 sdata->needed_rx_chains = sdata->local->rx_chains; 907 sdata->needed_rx_chains = sdata->local->rx_chains;
906 908
907 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 909 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f4a528773563..6ca5442b1e03 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -7,6 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright 2017 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
289{ 290{
290 int i; 291 int i;
291 292
292 cancel_work_sync(&sta->ampdu_mlme.work);
293
294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 293 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
295 __ieee80211_stop_tx_ba_session(sta, i, reason); 294 __ieee80211_stop_tx_ba_session(sta, i, reason);
296 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 295 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
298 reason != AGG_STOP_DESTROY_STA && 297 reason != AGG_STOP_DESTROY_STA &&
299 reason != AGG_STOP_PEER_REQUEST); 298 reason != AGG_STOP_PEER_REQUEST);
300 } 299 }
300
301 /* stopping might queue the work again - so cancel only afterwards */
302 cancel_work_sync(&sta->ampdu_mlme.work);
301} 303}
302 304
303void ieee80211_ba_session_work(struct work_struct *work) 305void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
352 spin_unlock_bh(&sta->lock); 354 spin_unlock_bh(&sta->lock);
353 355
354 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 356 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
355 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 357 if (!tid_tx)
356 &tid_tx->state)) 358 continue;
359
360 if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
361 ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
362 if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
357 ___ieee80211_stop_tx_ba_session(sta, tid, 363 ___ieee80211_stop_tx_ba_session(sta, tid,
358 AGG_STOP_LOCAL_REQUEST); 364 AGG_STOP_LOCAL_REQUEST);
365 if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
366 ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
359 } 367 }
360 mutex_unlock(&sta->ampdu_mlme.mtx); 368 mutex_unlock(&sta->ampdu_mlme.mtx);
361} 369}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f8f6c148f554..5e002f62c235 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
1036 1036
1037enum sdata_queue_type { 1037enum sdata_queue_type {
1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, 1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
1039 IEEE80211_SDATA_QUEUE_AGG_START = 1,
1040 IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
1041 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, 1039 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
1042 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, 1040 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
1043}; 1041};
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
1427 return local->hw.wiphy->bands[band]; 1425 return local->hw.wiphy->bands[band];
1428} 1426}
1429 1427
1430/* this struct represents 802.11n's RA/TID combination */
1431struct ieee80211_ra_tid {
1432 u8 ra[ETH_ALEN];
1433 u16 tid;
1434};
1435
1436/* this struct holds the value parsing from channel switch IE */ 1428/* this struct holds the value parsing from channel switch IE */
1437struct ieee80211_csa_ie { 1429struct ieee80211_csa_ie {
1438 struct cfg80211_chan_def chandef; 1430 struct cfg80211_chan_def chandef;
@@ -1539,7 +1531,7 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
1539 return true; 1531 return true;
1540 /* can't handle non-legacy preamble yet */ 1532 /* can't handle non-legacy preamble yet */
1541 if (status->flag & RX_FLAG_MACTIME_PLCP_START && 1533 if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
1542 status->encoding != RX_ENC_LEGACY) 1534 status->encoding == RX_ENC_LEGACY)
1543 return true; 1535 return true;
1544 return false; 1536 return false;
1545} 1537}
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1794 enum ieee80211_agg_stop_reason reason); 1786 enum ieee80211_agg_stop_reason reason);
1795int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1787int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1796 enum ieee80211_agg_stop_reason reason); 1788 enum ieee80211_agg_stop_reason reason);
1797void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); 1789void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
1798void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1790 struct tid_ampdu_tx *tid_tx);
1791void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
1792 struct tid_ampdu_tx *tid_tx);
1799void ieee80211_ba_session_work(struct work_struct *work); 1793void ieee80211_ba_session_work(struct work_struct *work);
1800void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1794void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1801void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); 1795void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3bd5b81f5d81..f5f50150ba1c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1213,7 +1213,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
1213static void ieee80211_if_free(struct net_device *dev) 1213static void ieee80211_if_free(struct net_device *dev)
1214{ 1214{
1215 free_percpu(dev->tstats); 1215 free_percpu(dev->tstats);
1216 free_netdev(dev);
1217} 1216}
1218 1217
1219static void ieee80211_if_setup(struct net_device *dev) 1218static void ieee80211_if_setup(struct net_device *dev)
@@ -1221,7 +1220,8 @@ static void ieee80211_if_setup(struct net_device *dev)
1221 ether_setup(dev); 1220 ether_setup(dev);
1222 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1221 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1223 dev->netdev_ops = &ieee80211_dataif_ops; 1222 dev->netdev_ops = &ieee80211_dataif_ops;
1224 dev->destructor = ieee80211_if_free; 1223 dev->needs_free_netdev = true;
1224 dev->priv_destructor = ieee80211_if_free;
1225} 1225}
1226 1226
1227static void ieee80211_if_setup_no_queue(struct net_device *dev) 1227static void ieee80211_if_setup_no_queue(struct net_device *dev)
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
1237 struct ieee80211_local *local = sdata->local; 1237 struct ieee80211_local *local = sdata->local;
1238 struct sk_buff *skb; 1238 struct sk_buff *skb;
1239 struct sta_info *sta; 1239 struct sta_info *sta;
1240 struct ieee80211_ra_tid *ra_tid;
1241 struct ieee80211_rx_agg *rx_agg; 1240 struct ieee80211_rx_agg *rx_agg;
1242 1241
1243 if (!ieee80211_sdata_running(sdata)) 1242 if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
1253 while ((skb = skb_dequeue(&sdata->skb_queue))) { 1252 while ((skb = skb_dequeue(&sdata->skb_queue))) {
1254 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1253 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1255 1254
1256 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { 1255 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1257 ra_tid = (void *)&skb->cb;
1258 ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
1259 ra_tid->tid);
1260 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
1261 ra_tid = (void *)&skb->cb;
1262 ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
1263 ra_tid->tid);
1264 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1265 rx_agg = (void *)&skb->cb; 1256 rx_agg = (void *)&skb->cb;
1266 mutex_lock(&local->sta_mtx); 1257 mutex_lock(&local->sta_mtx);
1267 sta = sta_info_get_bss(sdata, rx_agg->addr); 1258 sta = sta_info_get_bss(sdata, rx_agg->addr);
@@ -1825,6 +1816,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1825 ret = dev_alloc_name(ndev, ndev->name); 1816 ret = dev_alloc_name(ndev, ndev->name);
1826 if (ret < 0) { 1817 if (ret < 0) {
1827 ieee80211_if_free(ndev); 1818 ieee80211_if_free(ndev);
1819 free_netdev(ndev);
1828 return ret; 1820 return ret;
1829 } 1821 }
1830 1822
@@ -1914,7 +1906,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1914 1906
1915 ret = register_netdevice(ndev); 1907 ret = register_netdevice(ndev);
1916 if (ret) { 1908 if (ret) {
1917 ieee80211_if_free(ndev); 1909 free_netdev(ndev);
1918 return ret; 1910 return ret;
1919 } 1911 }
1920 } 1912 }
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0ea9712bd99e..cc8e6ea1b27e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -601,7 +601,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
601 struct ieee80211_supported_band *sband; 601 struct ieee80211_supported_band *sband;
602 struct ieee80211_chanctx_conf *chanctx_conf; 602 struct ieee80211_chanctx_conf *chanctx_conf;
603 struct ieee80211_channel *chan; 603 struct ieee80211_channel *chan;
604 u32 rate_flags, rates = 0; 604 u32 rates = 0;
605 605
606 sdata_assert_lock(sdata); 606 sdata_assert_lock(sdata);
607 607
@@ -612,7 +612,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
612 return; 612 return;
613 } 613 }
614 chan = chanctx_conf->def.chan; 614 chan = chanctx_conf->def.chan;
615 rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
616 rcu_read_unlock(); 615 rcu_read_unlock();
617 sband = local->hw.wiphy->bands[chan->band]; 616 sband = local->hw.wiphy->bands[chan->band];
618 shift = ieee80211_vif_get_shift(&sdata->vif); 617 shift = ieee80211_vif_get_shift(&sdata->vif);
@@ -636,9 +635,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
636 */ 635 */
637 rates_len = 0; 636 rates_len = 0;
638 for (i = 0; i < sband->n_bitrates; i++) { 637 for (i = 0; i < sband->n_bitrates; i++) {
639 if ((rate_flags & sband->bitrates[i].flags)
640 != rate_flags)
641 continue;
642 rates |= BIT(i); 638 rates |= BIT(i);
643 rates_len++; 639 rates_len++;
644 } 640 }
@@ -2818,7 +2814,7 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
2818 u32 *rates, u32 *basic_rates, 2814 u32 *rates, u32 *basic_rates,
2819 bool *have_higher_than_11mbit, 2815 bool *have_higher_than_11mbit,
2820 int *min_rate, int *min_rate_index, 2816 int *min_rate, int *min_rate_index,
2821 int shift, u32 rate_flags) 2817 int shift)
2822{ 2818{
2823 int i, j; 2819 int i, j;
2824 2820
@@ -2846,8 +2842,6 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
2846 int brate; 2842 int brate;
2847 2843
2848 br = &sband->bitrates[j]; 2844 br = &sband->bitrates[j];
2849 if ((rate_flags & br->flags) != rate_flags)
2850 continue;
2851 2845
2852 brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); 2846 brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
2853 if (brate == rate) { 2847 if (brate == rate) {
@@ -4398,40 +4392,32 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
4398 return -ENOMEM; 4392 return -ENOMEM;
4399 } 4393 }
4400 4394
4401 if (new_sta || override) { 4395 /*
4402 err = ieee80211_prep_channel(sdata, cbss); 4396 * Set up the information for the new channel before setting the
4403 if (err) { 4397 * new channel. We can't - completely race-free - change the basic
4404 if (new_sta) 4398 * rates bitmap and the channel (sband) that it refers to, but if
4405 sta_info_free(local, new_sta); 4399 * we set it up before we at least avoid calling into the driver's
4406 return -EINVAL; 4400 * bss_info_changed() method with invalid information (since we do
4407 } 4401 * call that from changing the channel - only for IDLE and perhaps
4408 } 4402 * some others, but ...).
4409 4403 *
4404 * So to avoid that, just set up all the new information before the
4405 * channel, but tell the driver to apply it only afterwards, since
4406 * it might need the new channel for that.
4407 */
4410 if (new_sta) { 4408 if (new_sta) {
4411 u32 rates = 0, basic_rates = 0; 4409 u32 rates = 0, basic_rates = 0;
4412 bool have_higher_than_11mbit; 4410 bool have_higher_than_11mbit;
4413 int min_rate = INT_MAX, min_rate_index = -1; 4411 int min_rate = INT_MAX, min_rate_index = -1;
4414 struct ieee80211_chanctx_conf *chanctx_conf;
4415 const struct cfg80211_bss_ies *ies; 4412 const struct cfg80211_bss_ies *ies;
4416 int shift = ieee80211_vif_get_shift(&sdata->vif); 4413 int shift = ieee80211_vif_get_shift(&sdata->vif);
4417 u32 rate_flags;
4418
4419 rcu_read_lock();
4420 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
4421 if (WARN_ON(!chanctx_conf)) {
4422 rcu_read_unlock();
4423 sta_info_free(local, new_sta);
4424 return -EINVAL;
4425 }
4426 rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
4427 rcu_read_unlock();
4428 4414
4429 ieee80211_get_rates(sband, bss->supp_rates, 4415 ieee80211_get_rates(sband, bss->supp_rates,
4430 bss->supp_rates_len, 4416 bss->supp_rates_len,
4431 &rates, &basic_rates, 4417 &rates, &basic_rates,
4432 &have_higher_than_11mbit, 4418 &have_higher_than_11mbit,
4433 &min_rate, &min_rate_index, 4419 &min_rate, &min_rate_index,
4434 shift, rate_flags); 4420 shift);
4435 4421
4436 /* 4422 /*
4437 * This used to be a workaround for basic rates missing 4423 * This used to be a workaround for basic rates missing
@@ -4489,8 +4475,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
4489 sdata->vif.bss_conf.sync_dtim_count = 0; 4475 sdata->vif.bss_conf.sync_dtim_count = 0;
4490 } 4476 }
4491 rcu_read_unlock(); 4477 rcu_read_unlock();
4478 }
4492 4479
4493 /* tell driver about BSSID, basic rates and timing */ 4480 if (new_sta || override) {
4481 err = ieee80211_prep_channel(sdata, cbss);
4482 if (err) {
4483 if (new_sta)
4484 sta_info_free(local, new_sta);
4485 return -EINVAL;
4486 }
4487 }
4488
4489 if (new_sta) {
4490 /*
4491 * tell driver about BSSID, basic rates and timing
4492 * this was set up above, before setting the channel
4493 */
4494 ieee80211_bss_info_change_notify(sdata, 4494 ieee80211_bss_info_change_notify(sdata,
4495 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | 4495 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
4496 BSS_CHANGED_BEACON_INT); 4496 BSS_CHANGED_BEACON_INT);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 35f4c7d7a500..3674fe3d67dc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1613,12 +1613,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1613 */ 1613 */
1614 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1614 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1615 !ieee80211_has_morefrags(hdr->frame_control) && 1615 !ieee80211_has_morefrags(hdr->frame_control) &&
1616 !ieee80211_is_back_req(hdr->frame_control) &&
1616 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1617 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1617 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1618 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1618 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1619 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1619 /* PM bit is only checked in frames where it isn't reserved, 1620 /*
1621 * PM bit is only checked in frames where it isn't reserved,
1620 * in AP mode it's reserved in non-bufferable management frames 1622 * in AP mode it's reserved in non-bufferable management frames
1621 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1623 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
1624 * BAR frames should be ignored as specified in
1625 * IEEE 802.11-2012 10.2.1.2.
1622 */ 1626 */
1623 (!ieee80211_is_mgmt(hdr->frame_control) || 1627 (!ieee80211_is_mgmt(hdr->frame_control) ||
1624 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { 1628 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
@@ -2492,7 +2496,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2492 if (is_multicast_ether_addr(hdr->addr1)) { 2496 if (is_multicast_ether_addr(hdr->addr1)) {
2493 mpp_addr = hdr->addr3; 2497 mpp_addr = hdr->addr3;
2494 proxied_addr = mesh_hdr->eaddr1; 2498 proxied_addr = mesh_hdr->eaddr1;
2495 } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 2499 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2500 MESH_FLAGS_AE_A5_A6) {
2496 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2501 /* has_a4 already checked in ieee80211_rx_mesh_check */
2497 mpp_addr = hdr->addr4; 2502 mpp_addr = hdr->addr4;
2498 proxied_addr = mesh_hdr->eaddr2; 2503 proxied_addr = mesh_hdr->eaddr2;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7cdf7a835bb0..403e3cc58b57 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
2155 struct ieee80211_sta_rx_stats *cpurxs; 2155 struct ieee80211_sta_rx_stats *cpurxs;
2156 2156
2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2158 sinfo->rx_packets += cpurxs->dropped; 2158 sinfo->rx_dropped_misc += cpurxs->dropped;
2159 } 2159 }
2160 } 2160 }
2161 2161
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5609cacb20d5..ea0747d6a6da 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
116#define HT_AGG_STATE_STOPPING 3 116#define HT_AGG_STATE_STOPPING 3
117#define HT_AGG_STATE_WANT_START 4 117#define HT_AGG_STATE_WANT_START 4
118#define HT_AGG_STATE_WANT_STOP 5 118#define HT_AGG_STATE_WANT_STOP 5
119#define HT_AGG_STATE_START_CB 6
120#define HT_AGG_STATE_STOP_CB 7
119 121
120enum ieee80211_agg_stop_reason { 122enum ieee80211_agg_stop_reason {
121 AGG_STOP_DECLINED, 123 AGG_STOP_DECLINED,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c1ef22df865f..cc19614ff4e6 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -17,6 +17,7 @@
17#include <asm/unaligned.h> 17#include <asm/unaligned.h>
18#include <net/mac80211.h> 18#include <net/mac80211.h>
19#include <crypto/aes.h> 19#include <crypto/aes.h>
20#include <crypto/algapi.h>
20 21
21#include "ieee80211_i.h" 22#include "ieee80211_i.h"
22#include "michael.h" 23#include "michael.h"
@@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
153 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 154 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
154 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 155 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
155 michael_mic(key, hdr, data, data_len, mic); 156 michael_mic(key, hdr, data, data_len, mic);
156 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) 157 if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
157 goto mic_fail; 158 goto mic_fail;
158 159
159 /* remove Michael MIC from payload */ 160 /* remove Michael MIC from payload */
@@ -1048,7 +1049,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
1048 bip_aad(skb, aad); 1049 bip_aad(skb, aad);
1049 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, 1050 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
1050 skb->data + 24, skb->len - 24, mic); 1051 skb->data + 24, skb->len - 24, mic);
1051 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1052 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1052 key->u.aes_cmac.icverrors++; 1053 key->u.aes_cmac.icverrors++;
1053 return RX_DROP_UNUSABLE; 1054 return RX_DROP_UNUSABLE;
1054 } 1055 }
@@ -1098,7 +1099,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
1098 bip_aad(skb, aad); 1099 bip_aad(skb, aad);
1099 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, 1100 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
1100 skb->data + 24, skb->len - 24, mic); 1101 skb->data + 24, skb->len - 24, mic);
1101 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1102 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1102 key->u.aes_cmac.icverrors++; 1103 key->u.aes_cmac.icverrors++;
1103 return RX_DROP_UNUSABLE; 1104 return RX_DROP_UNUSABLE;
1104 } 1105 }
@@ -1202,7 +1203,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
1202 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, 1203 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
1203 skb->data + 24, skb->len - 24, 1204 skb->data + 24, skb->len - 24,
1204 mic) < 0 || 1205 mic) < 0 ||
1205 memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1206 crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1206 key->u.aes_gmac.icverrors++; 1207 key->u.aes_gmac.icverrors++;
1207 return RX_DROP_UNUSABLE; 1208 return RX_DROP_UNUSABLE;
1208 } 1209 }
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 06019dba4b10..bd88a9b80773 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev)
526 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 526 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
527 527
528 mac802154_llsec_destroy(&sdata->sec); 528 mac802154_llsec_destroy(&sdata->sec);
529
530 free_netdev(dev);
531} 529}
532 530
533static void ieee802154_if_setup(struct net_device *dev) 531static void ieee802154_if_setup(struct net_device *dev)
@@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
593 sdata->dev->dev_addr); 591 sdata->dev->dev_addr);
594 592
595 sdata->dev->header_ops = &mac802154_header_ops; 593 sdata->dev->header_ops = &mac802154_header_ops;
596 sdata->dev->destructor = mac802154_wpan_free; 594 sdata->dev->needs_free_netdev = true;
595 sdata->dev->priv_destructor = mac802154_wpan_free;
597 sdata->dev->netdev_ops = &mac802154_wpan_ops; 596 sdata->dev->netdev_ops = &mac802154_wpan_ops;
598 sdata->dev->ml_priv = &mac802154_mlme_wpan; 597 sdata->dev->ml_priv = &mac802154_mlme_wpan;
599 wpan_dev->promiscuous_mode = false; 598 wpan_dev->promiscuous_mode = false;
@@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
608 607
609 break; 608 break;
610 case NL802154_IFTYPE_MONITOR: 609 case NL802154_IFTYPE_MONITOR:
611 sdata->dev->destructor = free_netdev; 610 sdata->dev->needs_free_netdev = true;
612 sdata->dev->netdev_ops = &mac802154_monitor_ops; 611 sdata->dev->netdev_ops = &mac802154_monitor_ops;
613 wpan_dev->promiscuous_mode = true; 612 wpan_dev->promiscuous_mode = true;
614 break; 613 break;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 257ec66009da..7b05fd1497ce 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
1418 continue; 1418 continue;
1419 alive++; 1419 alive++;
1420 nh_flags &= ~flags; 1420 nh_flags &= ~flags;
1421 WRITE_ONCE(nh->nh_flags, flags); 1421 WRITE_ONCE(nh->nh_flags, nh_flags);
1422 } endfor_nexthops(rt); 1422 } endfor_nexthops(rt);
1423 1423
1424 WRITE_ONCE(rt->rt_nhn_alive, alive); 1424 WRITE_ONCE(rt->rt_nhn_alive, alive);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index d2d7bdf1d510..ad99c1ceea6f 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -849,10 +849,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
849{ 849{
850 unsigned int verdict = NF_DROP; 850 unsigned int verdict = NF_DROP;
851 851
852 if (IP_VS_FWD_METHOD(cp) != 0) { 852 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
853 pr_err("shouldn't reach here, because the box is on the " 853 goto ignore_cp;
854 "half connection in the tun/dr module.\n");
855 }
856 854
857 /* Ensure the checksum is correct */ 855 /* Ensure the checksum is correct */
858 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { 856 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -886,6 +884,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
886 ip_vs_notrack(skb); 884 ip_vs_notrack(skb);
887 else 885 else
888 ip_vs_update_conntrack(skb, cp, 0); 886 ip_vs_update_conntrack(skb, cp, 0);
887
888ignore_cp:
889 verdict = NF_ACCEPT; 889 verdict = NF_ACCEPT;
890 890
891out: 891out:
@@ -1385,8 +1385,11 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1385 */ 1385 */
1386 cp = pp->conn_out_get(ipvs, af, skb, &iph); 1386 cp = pp->conn_out_get(ipvs, af, skb, &iph);
1387 1387
1388 if (likely(cp)) 1388 if (likely(cp)) {
1389 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1390 goto ignore_cp;
1389 return handle_response(af, skb, pd, cp, &iph, hooknum); 1391 return handle_response(af, skb, pd, cp, &iph, hooknum);
1392 }
1390 1393
1391 /* Check for real-server-started requests */ 1394 /* Check for real-server-started requests */
1392 if (atomic_read(&ipvs->conn_out_counter)) { 1395 if (atomic_read(&ipvs->conn_out_counter)) {
@@ -1444,9 +1447,15 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1444 } 1447 }
1445 } 1448 }
1446 } 1449 }
1450
1451out:
1447 IP_VS_DBG_PKT(12, af, pp, skb, iph.off, 1452 IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1448 "ip_vs_out: packet continues traversal as normal"); 1453 "ip_vs_out: packet continues traversal as normal");
1449 return NF_ACCEPT; 1454 return NF_ACCEPT;
1455
1456ignore_cp:
1457 __ip_vs_conn_put(cp);
1458 goto out;
1450} 1459}
1451 1460
1452/* 1461/*
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 3a60efa7799b..7f6100ca63be 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -174,6 +174,10 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
174#endif 174#endif
175 if (h != NULL && !try_module_get(h->me)) 175 if (h != NULL && !try_module_get(h->me))
176 h = NULL; 176 h = NULL;
177 if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) {
178 module_put(h->me);
179 h = NULL;
180 }
177 181
178 rcu_read_unlock(); 182 rcu_read_unlock();
179 183
@@ -181,6 +185,13 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
181} 185}
182EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 186EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
183 187
188void nf_conntrack_helper_put(struct nf_conntrack_helper *helper)
189{
190 refcount_dec(&helper->refcnt);
191 module_put(helper->me);
192}
193EXPORT_SYMBOL_GPL(nf_conntrack_helper_put);
194
184struct nf_conn_help * 195struct nf_conn_help *
185nf_ct_helper_ext_add(struct nf_conn *ct, 196nf_ct_helper_ext_add(struct nf_conn *ct,
186 struct nf_conntrack_helper *helper, gfp_t gfp) 197 struct nf_conntrack_helper *helper, gfp_t gfp)
@@ -417,6 +428,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
417 } 428 }
418 } 429 }
419 } 430 }
431 refcount_set(&me->refcnt, 1);
420 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); 432 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
421 nf_ct_helper_count++; 433 nf_ct_helper_count++;
422out: 434out:
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index dcf561b5c97a..a8be9b72e6cd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,6 +45,8 @@
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_labels.h> 47#include <net/netfilter/nf_conntrack_labels.h>
48#include <net/netfilter/nf_conntrack_seqadj.h>
49#include <net/netfilter/nf_conntrack_synproxy.h>
48#ifdef CONFIG_NF_NAT_NEEDED 50#ifdef CONFIG_NF_NAT_NEEDED
49#include <net/netfilter/nf_nat_core.h> 51#include <net/netfilter/nf_nat_core.h>
50#include <net/netfilter/nf_nat_l4proto.h> 52#include <net/netfilter/nf_nat_l4proto.h>
@@ -888,8 +890,13 @@ restart:
888 } 890 }
889out: 891out:
890 local_bh_enable(); 892 local_bh_enable();
891 if (last) 893 if (last) {
894 /* nf ct hash resize happened, now clear the leftover. */
895 if ((struct nf_conn *)cb->args[1] == last)
896 cb->args[1] = 0;
897
892 nf_ct_put(last); 898 nf_ct_put(last);
899 }
893 900
894 while (i) { 901 while (i) {
895 i--; 902 i--;
@@ -1007,9 +1014,8 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1007 1014
1008static int 1015static int
1009ctnetlink_parse_tuple(const struct nlattr * const cda[], 1016ctnetlink_parse_tuple(const struct nlattr * const cda[],
1010 struct nf_conntrack_tuple *tuple, 1017 struct nf_conntrack_tuple *tuple, u32 type,
1011 enum ctattr_type type, u_int8_t l3num, 1018 u_int8_t l3num, struct nf_conntrack_zone *zone)
1012 struct nf_conntrack_zone *zone)
1013{ 1019{
1014 struct nlattr *tb[CTA_TUPLE_MAX+1]; 1020 struct nlattr *tb[CTA_TUPLE_MAX+1];
1015 int err; 1021 int err;
@@ -1828,6 +1834,8 @@ ctnetlink_create_conntrack(struct net *net,
1828 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 1834 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1829 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); 1835 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1830 nf_ct_labels_ext_add(ct); 1836 nf_ct_labels_ext_add(ct);
1837 nfct_seqadj_ext_add(ct);
1838 nfct_synproxy_ext_add(ct);
1831 1839
1832 /* we must add conntrack extensions before confirmation. */ 1840 /* we must add conntrack extensions before confirmation. */
1833 ct->status |= IPS_CONFIRMED; 1841 ct->status |= IPS_CONFIRMED;
@@ -2447,7 +2455,7 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = {
2447 2455
2448static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, 2456static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2449 const struct nf_conntrack_tuple *tuple, 2457 const struct nf_conntrack_tuple *tuple,
2450 enum ctattr_expect type) 2458 u32 type)
2451{ 2459{
2452 struct nlattr *nest_parms; 2460 struct nlattr *nest_parms;
2453 2461
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 13875d599a85..1c5b14a6cab3 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
512 u8 pf, unsigned int hooknum) 512 u8 pf, unsigned int hooknum)
513{ 513{
514 const struct sctphdr *sh; 514 const struct sctphdr *sh;
515 struct sctphdr _sctph;
516 const char *logmsg; 515 const char *logmsg;
517 516
518 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 517 if (skb->len < dataoff + sizeof(struct sctphdr)) {
519 if (!sh) {
520 logmsg = "nf_ct_sctp: short packet "; 518 logmsg = "nf_ct_sctp: short packet ";
521 goto out_invalid; 519 goto out_invalid;
522 } 520 }
523 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 521 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
524 skb->ip_summed == CHECKSUM_NONE) { 522 skb->ip_summed == CHECKSUM_NONE) {
523 if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
524 logmsg = "nf_ct_sctp: failed to read header ";
525 goto out_invalid;
526 }
527 sh = (const struct sctphdr *)(skb->data + dataoff);
525 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { 528 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
526 logmsg = "nf_ct_sctp: bad CRC "; 529 logmsg = "nf_ct_sctp: bad CRC ";
527 goto out_invalid; 530 goto out_invalid;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index b48d6b5aae8a..6c72922d20ca 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -409,6 +409,10 @@ nf_nat_setup_info(struct nf_conn *ct,
409{ 409{
410 struct nf_conntrack_tuple curr_tuple, new_tuple; 410 struct nf_conntrack_tuple curr_tuple, new_tuple;
411 411
412 /* Can't setup nat info for confirmed ct. */
413 if (nf_ct_is_confirmed(ct))
414 return NF_ACCEPT;
415
412 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || 416 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
413 maniptype == NF_NAT_MANIP_DST); 417 maniptype == NF_NAT_MANIP_DST);
414 BUG_ON(nf_nat_initialized(ct, maniptype)); 418 BUG_ON(nf_nat_initialized(ct, maniptype));
@@ -562,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
562 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 566 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
563 * will delete entry from already-freed table. 567 * will delete entry from already-freed table.
564 */ 568 */
565 ct->status &= ~IPS_NAT_DONE_MASK; 569 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
566 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 570 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
567 nf_nat_bysource_params); 571 nf_nat_bysource_params);
568 572
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 559225029740..da314be0c048 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3367,35 +3367,50 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3367 return nf_tables_fill_setelem(args->skb, set, elem); 3367 return nf_tables_fill_setelem(args->skb, set, elem);
3368} 3368}
3369 3369
3370struct nft_set_dump_ctx {
3371 const struct nft_set *set;
3372 struct nft_ctx ctx;
3373};
3374
3370static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) 3375static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3371{ 3376{
3377 struct nft_set_dump_ctx *dump_ctx = cb->data;
3372 struct net *net = sock_net(skb->sk); 3378 struct net *net = sock_net(skb->sk);
3373 u8 genmask = nft_genmask_cur(net); 3379 struct nft_af_info *afi;
3380 struct nft_table *table;
3374 struct nft_set *set; 3381 struct nft_set *set;
3375 struct nft_set_dump_args args; 3382 struct nft_set_dump_args args;
3376 struct nft_ctx ctx; 3383 bool set_found = false;
3377 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
3378 struct nfgenmsg *nfmsg; 3384 struct nfgenmsg *nfmsg;
3379 struct nlmsghdr *nlh; 3385 struct nlmsghdr *nlh;
3380 struct nlattr *nest; 3386 struct nlattr *nest;
3381 u32 portid, seq; 3387 u32 portid, seq;
3382 int event, err; 3388 int event;
3383 3389
3384 err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla, 3390 rcu_read_lock();
3385 NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy, 3391 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
3386 NULL); 3392 if (afi != dump_ctx->ctx.afi)
3387 if (err < 0) 3393 continue;
3388 return err;
3389 3394
3390 err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, 3395 list_for_each_entry_rcu(table, &afi->tables, list) {
3391 (void *)nla, genmask); 3396 if (table != dump_ctx->ctx.table)
3392 if (err < 0) 3397 continue;
3393 return err;
3394 3398
3395 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], 3399 list_for_each_entry_rcu(set, &table->sets, list) {
3396 genmask); 3400 if (set == dump_ctx->set) {
3397 if (IS_ERR(set)) 3401 set_found = true;
3398 return PTR_ERR(set); 3402 break;
3403 }
3404 }
3405 break;
3406 }
3407 break;
3408 }
3409
3410 if (!set_found) {
3411 rcu_read_unlock();
3412 return -ENOENT;
3413 }
3399 3414
3400 event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM); 3415 event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM);
3401 portid = NETLINK_CB(cb->skb).portid; 3416 portid = NETLINK_CB(cb->skb).portid;
@@ -3407,11 +3422,11 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3407 goto nla_put_failure; 3422 goto nla_put_failure;
3408 3423
3409 nfmsg = nlmsg_data(nlh); 3424 nfmsg = nlmsg_data(nlh);
3410 nfmsg->nfgen_family = ctx.afi->family; 3425 nfmsg->nfgen_family = afi->family;
3411 nfmsg->version = NFNETLINK_V0; 3426 nfmsg->version = NFNETLINK_V0;
3412 nfmsg->res_id = htons(ctx.net->nft.base_seq & 0xffff); 3427 nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
3413 3428
3414 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name)) 3429 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name))
3415 goto nla_put_failure; 3430 goto nla_put_failure;
3416 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) 3431 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
3417 goto nla_put_failure; 3432 goto nla_put_failure;
@@ -3422,12 +3437,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3422 3437
3423 args.cb = cb; 3438 args.cb = cb;
3424 args.skb = skb; 3439 args.skb = skb;
3425 args.iter.genmask = nft_genmask_cur(ctx.net); 3440 args.iter.genmask = nft_genmask_cur(net);
3426 args.iter.skip = cb->args[0]; 3441 args.iter.skip = cb->args[0];
3427 args.iter.count = 0; 3442 args.iter.count = 0;
3428 args.iter.err = 0; 3443 args.iter.err = 0;
3429 args.iter.fn = nf_tables_dump_setelem; 3444 args.iter.fn = nf_tables_dump_setelem;
3430 set->ops->walk(&ctx, set, &args.iter); 3445 set->ops->walk(&dump_ctx->ctx, set, &args.iter);
3446 rcu_read_unlock();
3431 3447
3432 nla_nest_end(skb, nest); 3448 nla_nest_end(skb, nest);
3433 nlmsg_end(skb, nlh); 3449 nlmsg_end(skb, nlh);
@@ -3441,9 +3457,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3441 return skb->len; 3457 return skb->len;
3442 3458
3443nla_put_failure: 3459nla_put_failure:
3460 rcu_read_unlock();
3444 return -ENOSPC; 3461 return -ENOSPC;
3445} 3462}
3446 3463
3464static int nf_tables_dump_set_done(struct netlink_callback *cb)
3465{
3466 kfree(cb->data);
3467 return 0;
3468}
3469
3447static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, 3470static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
3448 struct sk_buff *skb, const struct nlmsghdr *nlh, 3471 struct sk_buff *skb, const struct nlmsghdr *nlh,
3449 const struct nlattr * const nla[]) 3472 const struct nlattr * const nla[])
@@ -3465,7 +3488,18 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
3465 if (nlh->nlmsg_flags & NLM_F_DUMP) { 3488 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3466 struct netlink_dump_control c = { 3489 struct netlink_dump_control c = {
3467 .dump = nf_tables_dump_set, 3490 .dump = nf_tables_dump_set,
3491 .done = nf_tables_dump_set_done,
3468 }; 3492 };
3493 struct nft_set_dump_ctx *dump_ctx;
3494
3495 dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_KERNEL);
3496 if (!dump_ctx)
3497 return -ENOMEM;
3498
3499 dump_ctx->set = set;
3500 dump_ctx->ctx = ctx;
3501
3502 c.data = dump_ctx;
3469 return netlink_dump_start(nlsk, skb, nlh, &c); 3503 return netlink_dump_start(nlsk, skb, nlh, &c);
3470 } 3504 }
3471 return -EOPNOTSUPP; 3505 return -EOPNOTSUPP;
@@ -3593,9 +3627,9 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
3593{ 3627{
3594 struct nft_set_ext *ext = nft_set_elem_ext(set, elem); 3628 struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
3595 3629
3596 nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); 3630 nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
3597 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) 3631 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3598 nft_data_uninit(nft_set_ext_data(ext), set->dtype); 3632 nft_data_release(nft_set_ext_data(ext), set->dtype);
3599 if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) 3633 if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
3600 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); 3634 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
3601 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) 3635 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
@@ -3604,6 +3638,18 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
3604} 3638}
3605EXPORT_SYMBOL_GPL(nft_set_elem_destroy); 3639EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
3606 3640
3641/* Only called from commit path, nft_set_elem_deactivate() already deals with
3642 * the refcounting from the preparation phase.
3643 */
3644static void nf_tables_set_elem_destroy(const struct nft_set *set, void *elem)
3645{
3646 struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
3647
3648 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
3649 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
3650 kfree(elem);
3651}
3652
3607static int nft_setelem_parse_flags(const struct nft_set *set, 3653static int nft_setelem_parse_flags(const struct nft_set *set,
3608 const struct nlattr *attr, u32 *flags) 3654 const struct nlattr *attr, u32 *flags)
3609{ 3655{
@@ -3815,9 +3861,9 @@ err4:
3815 kfree(elem.priv); 3861 kfree(elem.priv);
3816err3: 3862err3:
3817 if (nla[NFTA_SET_ELEM_DATA] != NULL) 3863 if (nla[NFTA_SET_ELEM_DATA] != NULL)
3818 nft_data_uninit(&data, d2.type); 3864 nft_data_release(&data, d2.type);
3819err2: 3865err2:
3820 nft_data_uninit(&elem.key.val, d1.type); 3866 nft_data_release(&elem.key.val, d1.type);
3821err1: 3867err1:
3822 return err; 3868 return err;
3823} 3869}
@@ -3862,6 +3908,53 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3862 return err; 3908 return err;
3863} 3909}
3864 3910
3911/**
3912 * nft_data_hold - hold a nft_data item
3913 *
3914 * @data: struct nft_data to release
3915 * @type: type of data
3916 *
3917 * Hold a nft_data item. NFT_DATA_VALUE types can be silently discarded,
3918 * NFT_DATA_VERDICT bumps the reference to chains in case of NFT_JUMP and
3919 * NFT_GOTO verdicts. This function must be called on active data objects
3920 * from the second phase of the commit protocol.
3921 */
3922static void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
3923{
3924 if (type == NFT_DATA_VERDICT) {
3925 switch (data->verdict.code) {
3926 case NFT_JUMP:
3927 case NFT_GOTO:
3928 data->verdict.chain->use++;
3929 break;
3930 }
3931 }
3932}
3933
3934static void nft_set_elem_activate(const struct net *net,
3935 const struct nft_set *set,
3936 struct nft_set_elem *elem)
3937{
3938 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3939
3940 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3941 nft_data_hold(nft_set_ext_data(ext), set->dtype);
3942 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
3943 (*nft_set_ext_obj(ext))->use++;
3944}
3945
3946static void nft_set_elem_deactivate(const struct net *net,
3947 const struct nft_set *set,
3948 struct nft_set_elem *elem)
3949{
3950 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3951
3952 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3953 nft_data_release(nft_set_ext_data(ext), set->dtype);
3954 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
3955 (*nft_set_ext_obj(ext))->use--;
3956}
3957
3865static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, 3958static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
3866 const struct nlattr *attr) 3959 const struct nlattr *attr)
3867{ 3960{
@@ -3927,6 +4020,8 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
3927 kfree(elem.priv); 4020 kfree(elem.priv);
3928 elem.priv = priv; 4021 elem.priv = priv;
3929 4022
4023 nft_set_elem_deactivate(ctx->net, set, &elem);
4024
3930 nft_trans_elem(trans) = elem; 4025 nft_trans_elem(trans) = elem;
3931 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 4026 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3932 return 0; 4027 return 0;
@@ -3936,7 +4031,7 @@ err4:
3936err3: 4031err3:
3937 kfree(elem.priv); 4032 kfree(elem.priv);
3938err2: 4033err2:
3939 nft_data_uninit(&elem.key.val, desc.type); 4034 nft_data_release(&elem.key.val, desc.type);
3940err1: 4035err1:
3941 return err; 4036 return err;
3942} 4037}
@@ -4743,8 +4838,8 @@ static void nf_tables_commit_release(struct nft_trans *trans)
4743 nft_set_destroy(nft_trans_set(trans)); 4838 nft_set_destroy(nft_trans_set(trans));
4744 break; 4839 break;
4745 case NFT_MSG_DELSETELEM: 4840 case NFT_MSG_DELSETELEM:
4746 nft_set_elem_destroy(nft_trans_elem_set(trans), 4841 nf_tables_set_elem_destroy(nft_trans_elem_set(trans),
4747 nft_trans_elem(trans).priv, true); 4842 nft_trans_elem(trans).priv);
4748 break; 4843 break;
4749 case NFT_MSG_DELOBJ: 4844 case NFT_MSG_DELOBJ:
4750 nft_obj_destroy(nft_trans_obj(trans)); 4845 nft_obj_destroy(nft_trans_obj(trans));
@@ -4979,6 +5074,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
4979 case NFT_MSG_DELSETELEM: 5074 case NFT_MSG_DELSETELEM:
4980 te = (struct nft_trans_elem *)trans->data; 5075 te = (struct nft_trans_elem *)trans->data;
4981 5076
5077 nft_set_elem_activate(net, te->set, &te->elem);
4982 te->set->ops->activate(net, te->set, &te->elem); 5078 te->set->ops->activate(net, te->set, &te->elem);
4983 te->set->ndeact--; 5079 te->set->ndeact--;
4984 5080
@@ -5464,7 +5560,7 @@ int nft_data_init(const struct nft_ctx *ctx,
5464EXPORT_SYMBOL_GPL(nft_data_init); 5560EXPORT_SYMBOL_GPL(nft_data_init);
5465 5561
5466/** 5562/**
5467 * nft_data_uninit - release a nft_data item 5563 * nft_data_release - release a nft_data item
5468 * 5564 *
5469 * @data: struct nft_data to release 5565 * @data: struct nft_data to release
5470 * @type: type of data 5566 * @type: type of data
@@ -5472,7 +5568,7 @@ EXPORT_SYMBOL_GPL(nft_data_init);
5472 * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded, 5568 * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
5473 * all others need to be released by calling this function. 5569 * all others need to be released by calling this function.
5474 */ 5570 */
5475void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) 5571void nft_data_release(const struct nft_data *data, enum nft_data_types type)
5476{ 5572{
5477 if (type < NFT_DATA_VERDICT) 5573 if (type < NFT_DATA_VERDICT)
5478 return; 5574 return;
@@ -5483,7 +5579,7 @@ void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
5483 WARN_ON(1); 5579 WARN_ON(1);
5484 } 5580 }
5485} 5581}
5486EXPORT_SYMBOL_GPL(nft_data_uninit); 5582EXPORT_SYMBOL_GPL(nft_data_release);
5487 5583
5488int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, 5584int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
5489 enum nft_data_types type, unsigned int len) 5585 enum nft_data_types type, unsigned int len)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 950bf6eadc65..be678a323598 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -686,6 +686,7 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
686 tuple_set = true; 686 tuple_set = true;
687 } 687 }
688 688
689 ret = -ENOENT;
689 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { 690 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
690 cur = &nlcth->helper; 691 cur = &nlcth->helper;
691 j++; 692 j++;
@@ -699,16 +700,20 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
699 tuple.dst.protonum != cur->tuple.dst.protonum)) 700 tuple.dst.protonum != cur->tuple.dst.protonum))
700 continue; 701 continue;
701 702
702 found = true; 703 if (refcount_dec_if_one(&cur->refcnt)) {
703 nf_conntrack_helper_unregister(cur); 704 found = true;
704 kfree(cur->expect_policy); 705 nf_conntrack_helper_unregister(cur);
706 kfree(cur->expect_policy);
705 707
706 list_del(&nlcth->list); 708 list_del(&nlcth->list);
707 kfree(nlcth); 709 kfree(nlcth);
710 } else {
711 ret = -EBUSY;
712 }
708 } 713 }
709 714
710 /* Make sure we return success if we flush and there is no helpers */ 715 /* Make sure we return success if we flush and there is no helpers */
711 return (found || j == 0) ? 0 : -ENOENT; 716 return (found || j == 0) ? 0 : ret;
712} 717}
713 718
714static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = { 719static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 877d9acd91ef..fff8073e2a56 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -83,17 +83,26 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
83 tb[NFTA_BITWISE_MASK]); 83 tb[NFTA_BITWISE_MASK]);
84 if (err < 0) 84 if (err < 0)
85 return err; 85 return err;
86 if (d1.len != priv->len) 86 if (d1.len != priv->len) {
87 return -EINVAL; 87 err = -EINVAL;
88 goto err1;
89 }
88 90
89 err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2, 91 err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2,
90 tb[NFTA_BITWISE_XOR]); 92 tb[NFTA_BITWISE_XOR]);
91 if (err < 0) 93 if (err < 0)
92 return err; 94 goto err1;
93 if (d2.len != priv->len) 95 if (d2.len != priv->len) {
94 return -EINVAL; 96 err = -EINVAL;
97 goto err2;
98 }
95 99
96 return 0; 100 return 0;
101err2:
102 nft_data_release(&priv->xor, d2.type);
103err1:
104 nft_data_release(&priv->mask, d1.type);
105 return err;
97} 106}
98 107
99static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr) 108static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 2b96effeadc1..c2945eb3397c 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -201,10 +201,18 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
201 if (err < 0) 201 if (err < 0)
202 return ERR_PTR(err); 202 return ERR_PTR(err);
203 203
204 if (desc.type != NFT_DATA_VALUE) {
205 err = -EINVAL;
206 goto err1;
207 }
208
204 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ) 209 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
205 return &nft_cmp_fast_ops; 210 return &nft_cmp_fast_ops;
206 else 211
207 return &nft_cmp_ops; 212 return &nft_cmp_ops;
213err1:
214 nft_data_release(&data, desc.type);
215 return ERR_PTR(-EINVAL);
208} 216}
209 217
210struct nft_expr_type nft_cmp_type __read_mostly = { 218struct nft_expr_type nft_cmp_type __read_mostly = {
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index a34ceb38fc55..1678e9e75e8e 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -826,9 +826,9 @@ static void nft_ct_helper_obj_destroy(struct nft_object *obj)
826 struct nft_ct_helper_obj *priv = nft_obj_data(obj); 826 struct nft_ct_helper_obj *priv = nft_obj_data(obj);
827 827
828 if (priv->helper4) 828 if (priv->helper4)
829 module_put(priv->helper4->me); 829 nf_conntrack_helper_put(priv->helper4);
830 if (priv->helper6) 830 if (priv->helper6)
831 module_put(priv->helper6->me); 831 nf_conntrack_helper_put(priv->helper6);
832} 832}
833 833
834static void nft_ct_helper_obj_eval(struct nft_object *obj, 834static void nft_ct_helper_obj_eval(struct nft_object *obj,
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 728baf88295a..4717d7796927 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -65,7 +65,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
65 return 0; 65 return 0;
66 66
67err1: 67err1:
68 nft_data_uninit(&priv->data, desc.type); 68 nft_data_release(&priv->data, desc.type);
69 return err; 69 return err;
70} 70}
71 71
@@ -73,7 +73,8 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
73 const struct nft_expr *expr) 73 const struct nft_expr *expr)
74{ 74{
75 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 75 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
76 return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg)); 76
77 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
77} 78}
78 79
79static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) 80static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index 9edc74eedc10..cedb96c3619f 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -102,9 +102,9 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
102 priv->len = desc_from.len; 102 priv->len = desc_from.len;
103 return 0; 103 return 0;
104err2: 104err2:
105 nft_data_uninit(&priv->data_to, desc_to.type); 105 nft_data_release(&priv->data_to, desc_to.type);
106err1: 106err1:
107 nft_data_uninit(&priv->data_from, desc_from.type); 107 nft_data_release(&priv->data_from, desc_from.type);
108 return err; 108 return err;
109} 109}
110 110
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 8ec086b6b56b..3d3a6df4ce70 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -222,7 +222,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
222 struct nft_set_elem elem; 222 struct nft_set_elem elem;
223 int err; 223 int err;
224 224
225 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); 225 err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC);
226 iter->err = err; 226 iter->err = err;
227 if (err) 227 if (err)
228 return; 228 return;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index e97e2fb53f0a..fbdbaa00dd5f 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
116 else if (d > 0) 116 else if (d > 0)
117 p = &parent->rb_right; 117 p = &parent->rb_right;
118 else { 118 else {
119 if (nft_set_elem_active(&rbe->ext, genmask)) { 119 if (nft_rbtree_interval_end(rbe) &&
120 if (nft_rbtree_interval_end(rbe) && 120 !nft_rbtree_interval_end(new)) {
121 !nft_rbtree_interval_end(new)) 121 p = &parent->rb_left;
122 p = &parent->rb_left; 122 } else if (!nft_rbtree_interval_end(rbe) &&
123 else if (!nft_rbtree_interval_end(rbe) && 123 nft_rbtree_interval_end(new)) {
124 nft_rbtree_interval_end(new)) 124 p = &parent->rb_right;
125 p = &parent->rb_right; 125 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
126 else { 126 *ext = &rbe->ext;
127 *ext = &rbe->ext; 127 return -EEXIST;
128 return -EEXIST; 128 } else {
129 } 129 p = &parent->rb_left;
130 } 130 }
131 } 131 }
132 } 132 }
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8876b7da6884..1770c1d9b37f 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -283,28 +283,30 @@ static int xt_obj_to_user(u16 __user *psize, u16 size,
283 &U->u.user.revision, K->u.kernel.TYPE->revision) 283 &U->u.user.revision, K->u.kernel.TYPE->revision)
284 284
285int xt_data_to_user(void __user *dst, const void *src, 285int xt_data_to_user(void __user *dst, const void *src,
286 int usersize, int size) 286 int usersize, int size, int aligned_size)
287{ 287{
288 usersize = usersize ? : size; 288 usersize = usersize ? : size;
289 if (copy_to_user(dst, src, usersize)) 289 if (copy_to_user(dst, src, usersize))
290 return -EFAULT; 290 return -EFAULT;
291 if (usersize != size && clear_user(dst + usersize, size - usersize)) 291 if (usersize != aligned_size &&
292 clear_user(dst + usersize, aligned_size - usersize))
292 return -EFAULT; 293 return -EFAULT;
293 294
294 return 0; 295 return 0;
295} 296}
296EXPORT_SYMBOL_GPL(xt_data_to_user); 297EXPORT_SYMBOL_GPL(xt_data_to_user);
297 298
298#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ 299#define XT_DATA_TO_USER(U, K, TYPE) \
299 xt_data_to_user(U->data, K->data, \ 300 xt_data_to_user(U->data, K->data, \
300 K->u.kernel.TYPE->usersize, \ 301 K->u.kernel.TYPE->usersize, \
301 C_SIZE ? : K->u.kernel.TYPE->TYPE##size) 302 K->u.kernel.TYPE->TYPE##size, \
303 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
302 304
303int xt_match_to_user(const struct xt_entry_match *m, 305int xt_match_to_user(const struct xt_entry_match *m,
304 struct xt_entry_match __user *u) 306 struct xt_entry_match __user *u)
305{ 307{
306 return XT_OBJ_TO_USER(u, m, match, 0) || 308 return XT_OBJ_TO_USER(u, m, match, 0) ||
307 XT_DATA_TO_USER(u, m, match, 0); 309 XT_DATA_TO_USER(u, m, match);
308} 310}
309EXPORT_SYMBOL_GPL(xt_match_to_user); 311EXPORT_SYMBOL_GPL(xt_match_to_user);
310 312
@@ -312,7 +314,7 @@ int xt_target_to_user(const struct xt_entry_target *t,
312 struct xt_entry_target __user *u) 314 struct xt_entry_target __user *u)
313{ 315{
314 return XT_OBJ_TO_USER(u, t, target, 0) || 316 return XT_OBJ_TO_USER(u, t, target, 0) ||
315 XT_DATA_TO_USER(u, t, target, 0); 317 XT_DATA_TO_USER(u, t, target);
316} 318}
317EXPORT_SYMBOL_GPL(xt_target_to_user); 319EXPORT_SYMBOL_GPL(xt_target_to_user);
318 320
@@ -611,6 +613,12 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
611} 613}
612EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 614EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
613 615
616#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
617 xt_data_to_user(U->data, K->data, \
618 K->u.kernel.TYPE->usersize, \
619 C_SIZE, \
620 COMPAT_XT_ALIGN(C_SIZE))
621
614int xt_compat_match_to_user(const struct xt_entry_match *m, 622int xt_compat_match_to_user(const struct xt_entry_match *m,
615 void __user **dstptr, unsigned int *size) 623 void __user **dstptr, unsigned int *size)
616{ 624{
@@ -626,7 +634,7 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
626 if (match->compat_to_user((void __user *)cm->data, m->data)) 634 if (match->compat_to_user((void __user *)cm->data, m->data))
627 return -EFAULT; 635 return -EFAULT;
628 } else { 636 } else {
629 if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) 637 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
630 return -EFAULT; 638 return -EFAULT;
631 } 639 }
632 640
@@ -972,7 +980,7 @@ int xt_compat_target_to_user(const struct xt_entry_target *t,
972 if (target->compat_to_user((void __user *)ct->data, t->data)) 980 if (target->compat_to_user((void __user *)ct->data, t->data))
973 return -EFAULT; 981 return -EFAULT;
974 } else { 982 } else {
975 if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) 983 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
976 return -EFAULT; 984 return -EFAULT;
977 } 985 }
978 986
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index bb7ad82dcd56..623ef37de886 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -96,7 +96,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
96 96
97 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL); 97 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
98 if (help == NULL) { 98 if (help == NULL) {
99 module_put(helper->me); 99 nf_conntrack_helper_put(helper);
100 return -ENOMEM; 100 return -ENOMEM;
101 } 101 }
102 102
@@ -263,7 +263,7 @@ out:
263err4: 263err4:
264 help = nfct_help(ct); 264 help = nfct_help(ct);
265 if (help) 265 if (help)
266 module_put(help->helper->me); 266 nf_conntrack_helper_put(help->helper);
267err3: 267err3:
268 nf_ct_tmpl_free(ct); 268 nf_ct_tmpl_free(ct);
269err2: 269err2:
@@ -346,7 +346,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
346 if (ct) { 346 if (ct) {
347 help = nfct_help(ct); 347 help = nfct_help(ct);
348 if (help) 348 if (help)
349 module_put(help->helper->me); 349 nf_conntrack_helper_put(help->helper);
350 350
351 nf_ct_netns_put(par->net, par->family); 351 nf_ct_netns_put(par->net, par->family);
352 352
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ee841f00a6ec..7586d446d7dc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
62#include <asm/cacheflush.h> 62#include <asm/cacheflush.h>
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h> 64#include <linux/genetlink.h>
65#include <linux/net_namespace.h>
65 66
66#include <net/net_namespace.h> 67#include <net/net_namespace.h>
67#include <net/sock.h> 68#include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
1415 goto out; 1416 goto out;
1416 } 1417 }
1417 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); 1418 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1418 NETLINK_CB(p->skb2).nsid_is_set = true; 1419 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1420 NETLINK_CB(p->skb2).nsid_is_set = true;
1419 val = netlink_broadcast_deliver(sk, p->skb2); 1421 val = netlink_broadcast_deliver(sk, p->skb2);
1420 if (val < 0) { 1422 if (val < 0) {
1421 netlink_overrun(sk); 1423 netlink_overrun(sk);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index bf602e33c40a..08679ebb3068 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1123,7 +1123,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
1123 1123
1124 help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL); 1124 help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
1125 if (!help) { 1125 if (!help) {
1126 module_put(helper->me); 1126 nf_conntrack_helper_put(helper);
1127 return -ENOMEM; 1127 return -ENOMEM;
1128 } 1128 }
1129 1129
@@ -1584,7 +1584,7 @@ void ovs_ct_free_action(const struct nlattr *a)
1584static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) 1584static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1585{ 1585{
1586 if (ct_info->helper) 1586 if (ct_info->helper)
1587 module_put(ct_info->helper->me); 1587 nf_conntrack_helper_put(ct_info->helper);
1588 if (ct_info->ct) 1588 if (ct_info->ct)
1589 nf_ct_tmpl_free(ct_info->ct); 1589 nf_ct_tmpl_free(ct_info->ct);
1590} 1590}
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 89193a634da4..04a3128adcf0 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev)
94 struct vport *vport = ovs_internal_dev_get_vport(dev); 94 struct vport *vport = ovs_internal_dev_get_vport(dev);
95 95
96 ovs_vport_free(vport); 96 ovs_vport_free(vport);
97 free_netdev(dev);
98} 97}
99 98
100static void 99static void
@@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev)
156 netdev->priv_flags &= ~IFF_TX_SKB_SHARING; 155 netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
157 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | 156 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
158 IFF_PHONY_HEADROOM | IFF_NO_QUEUE; 157 IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
159 netdev->destructor = internal_dev_destructor; 158 netdev->needs_free_netdev = true;
159 netdev->priv_destructor = internal_dev_destructor;
160 netdev->ethtool_ops = &internal_dev_ethtool_ops; 160 netdev->ethtool_ops = &internal_dev_ethtool_ops;
161 netdev->rtnl_link_ops = &internal_dev_link_ops; 161 netdev->rtnl_link_ops = &internal_dev_link_ops;
162 162
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f4001763134d..e3eeed19cc7a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2658,13 +2658,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2659 } 2659 }
2660 2660
2661 sockc.tsflags = po->sk.sk_tsflags;
2662 if (msg->msg_controllen) {
2663 err = sock_cmsg_send(&po->sk, msg, &sockc);
2664 if (unlikely(err))
2665 goto out;
2666 }
2667
2668 err = -ENXIO; 2661 err = -ENXIO;
2669 if (unlikely(dev == NULL)) 2662 if (unlikely(dev == NULL))
2670 goto out; 2663 goto out;
@@ -2672,6 +2665,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2672 if (unlikely(!(dev->flags & IFF_UP))) 2665 if (unlikely(!(dev->flags & IFF_UP)))
2673 goto out_put; 2666 goto out_put;
2674 2667
2668 sockc.tsflags = po->sk.sk_tsflags;
2669 if (msg->msg_controllen) {
2670 err = sock_cmsg_send(&po->sk, msg, &sockc);
2671 if (unlikely(err))
2672 goto out_put;
2673 }
2674
2675 if (po->sk.sk_socket->type == SOCK_RAW) 2675 if (po->sk.sk_socket->type == SOCK_RAW)
2676 reserve = dev->hard_header_len; 2676 reserve = dev->hard_header_len;
2677 size_max = po->tx_ring.frame_size 2677 size_max = po->tx_ring.frame_size
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 21c28b51be94..2c9337946e30 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev)
236 dev->tx_queue_len = 10; 236 dev->tx_queue_len = 10;
237 237
238 dev->netdev_ops = &gprs_netdev_ops; 238 dev->netdev_ops = &gprs_netdev_ops;
239 dev->destructor = free_netdev; 239 dev->needs_free_netdev = true;
240} 240}
241 241
242/* 242/*
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 0a4e28477ad9..54369225766e 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
217 unsigned int *_toklen) 217 unsigned int *_toklen)
218{ 218{
219 const __be32 *xdr = *_xdr; 219 const __be32 *xdr = *_xdr;
220 unsigned int toklen = *_toklen, n_parts, loop, tmp; 220 unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
221 221
222 /* there must be at least one name, and at least #names+1 length 222 /* there must be at least one name, and at least #names+1 length
223 * words */ 223 * words */
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
247 toklen -= 4; 247 toklen -= 4;
248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) 248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
249 return -EINVAL; 249 return -EINVAL;
250 if (tmp > toklen) 250 paddedlen = (tmp + 3) & ~3;
251 if (paddedlen > toklen)
251 return -EINVAL; 252 return -EINVAL;
252 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); 253 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
253 if (!princ->name_parts[loop]) 254 if (!princ->name_parts[loop])
254 return -ENOMEM; 255 return -ENOMEM;
255 memcpy(princ->name_parts[loop], xdr, tmp); 256 memcpy(princ->name_parts[loop], xdr, tmp);
256 princ->name_parts[loop][tmp] = 0; 257 princ->name_parts[loop][tmp] = 0;
257 tmp = (tmp + 3) & ~3; 258 toklen -= paddedlen;
258 toklen -= tmp; 259 xdr += paddedlen >> 2;
259 xdr += tmp >> 2;
260 } 260 }
261 261
262 if (toklen < 4) 262 if (toklen < 4)
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
265 toklen -= 4; 265 toklen -= 4;
266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) 266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
267 return -EINVAL; 267 return -EINVAL;
268 if (tmp > toklen) 268 paddedlen = (tmp + 3) & ~3;
269 if (paddedlen > toklen)
269 return -EINVAL; 270 return -EINVAL;
270 princ->realm = kmalloc(tmp + 1, GFP_KERNEL); 271 princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
271 if (!princ->realm) 272 if (!princ->realm)
272 return -ENOMEM; 273 return -ENOMEM;
273 memcpy(princ->realm, xdr, tmp); 274 memcpy(princ->realm, xdr, tmp);
274 princ->realm[tmp] = 0; 275 princ->realm[tmp] = 0;
275 tmp = (tmp + 3) & ~3; 276 toklen -= paddedlen;
276 toklen -= tmp; 277 xdr += paddedlen >> 2;
277 xdr += tmp >> 2;
278 278
279 _debug("%s/...@%s", princ->name_parts[0], princ->realm); 279 _debug("%s/...@%s", princ->name_parts[0], princ->realm);
280 280
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
293 unsigned int *_toklen) 293 unsigned int *_toklen)
294{ 294{
295 const __be32 *xdr = *_xdr; 295 const __be32 *xdr = *_xdr;
296 unsigned int toklen = *_toklen, len; 296 unsigned int toklen = *_toklen, len, paddedlen;
297 297
298 /* there must be at least one tag and one length word */ 298 /* there must be at least one tag and one length word */
299 if (toklen <= 8) 299 if (toklen <= 8)
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
307 toklen -= 8; 307 toklen -= 8;
308 if (len > max_data_size) 308 if (len > max_data_size)
309 return -EINVAL; 309 return -EINVAL;
310 paddedlen = (len + 3) & ~3;
311 if (paddedlen > toklen)
312 return -EINVAL;
310 td->data_len = len; 313 td->data_len = len;
311 314
312 if (len > 0) { 315 if (len > 0) {
313 td->data = kmemdup(xdr, len, GFP_KERNEL); 316 td->data = kmemdup(xdr, len, GFP_KERNEL);
314 if (!td->data) 317 if (!td->data)
315 return -ENOMEM; 318 return -ENOMEM;
316 len = (len + 3) & ~3; 319 toklen -= paddedlen;
317 toklen -= len; 320 xdr += paddedlen >> 2;
318 xdr += len >> 2;
319 } 321 }
320 322
321 _debug("tag %x len %x", td->tag, td->data_len); 323 _debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
387 const __be32 **_xdr, unsigned int *_toklen) 389 const __be32 **_xdr, unsigned int *_toklen)
388{ 390{
389 const __be32 *xdr = *_xdr; 391 const __be32 *xdr = *_xdr;
390 unsigned int toklen = *_toklen, len; 392 unsigned int toklen = *_toklen, len, paddedlen;
391 393
392 /* there must be at least one length word */ 394 /* there must be at least one length word */
393 if (toklen <= 4) 395 if (toklen <= 4)
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
399 toklen -= 4; 401 toklen -= 4;
400 if (len > AFSTOKEN_K5_TIX_MAX) 402 if (len > AFSTOKEN_K5_TIX_MAX)
401 return -EINVAL; 403 return -EINVAL;
404 paddedlen = (len + 3) & ~3;
405 if (paddedlen > toklen)
406 return -EINVAL;
402 *_tktlen = len; 407 *_tktlen = len;
403 408
404 _debug("ticket len %u", len); 409 _debug("ticket len %u", len);
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
407 *_ticket = kmemdup(xdr, len, GFP_KERNEL); 412 *_ticket = kmemdup(xdr, len, GFP_KERNEL);
408 if (!*_ticket) 413 if (!*_ticket)
409 return -ENOMEM; 414 return -ENOMEM;
410 len = (len + 3) & ~3; 415 toklen -= paddedlen;
411 toklen -= len; 416 xdr += paddedlen >> 2;
412 xdr += len >> 2;
413 } 417 }
414 418
415 *_xdr = xdr; 419 *_xdr = xdr;
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
552{ 556{
553 const __be32 *xdr = prep->data, *token; 557 const __be32 *xdr = prep->data, *token;
554 const char *cp; 558 const char *cp;
555 unsigned int len, tmp, loop, ntoken, toklen, sec_ix; 559 unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
556 size_t datalen = prep->datalen; 560 size_t datalen = prep->datalen;
557 int ret; 561 int ret;
558 562
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
578 if (len < 1 || len > AFSTOKEN_CELL_MAX) 582 if (len < 1 || len > AFSTOKEN_CELL_MAX)
579 goto not_xdr; 583 goto not_xdr;
580 datalen -= 4; 584 datalen -= 4;
581 tmp = (len + 3) & ~3; 585 paddedlen = (len + 3) & ~3;
582 if (tmp > datalen) 586 if (paddedlen > datalen)
583 goto not_xdr; 587 goto not_xdr;
584 588
585 cp = (const char *) xdr; 589 cp = (const char *) xdr;
586 for (loop = 0; loop < len; loop++) 590 for (loop = 0; loop < len; loop++)
587 if (!isprint(cp[loop])) 591 if (!isprint(cp[loop]))
588 goto not_xdr; 592 goto not_xdr;
589 if (len < tmp) 593 for (; loop < paddedlen; loop++)
590 for (; loop < tmp; loop++) 594 if (cp[loop])
591 if (cp[loop]) 595 goto not_xdr;
592 goto not_xdr;
593 _debug("cellname: [%u/%u] '%*.*s'", 596 _debug("cellname: [%u/%u] '%*.*s'",
594 len, tmp, len, len, (const char *) xdr); 597 len, paddedlen, len, len, (const char *) xdr);
595 datalen -= tmp; 598 datalen -= paddedlen;
596 xdr += tmp >> 2; 599 xdr += paddedlen >> 2;
597 600
598 /* get the token count */ 601 /* get the token count */
599 if (datalen < 12) 602 if (datalen < 12)
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
614 sec_ix = ntohl(*xdr); 617 sec_ix = ntohl(*xdr);
615 datalen -= 4; 618 datalen -= 4;
616 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); 619 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
617 if (toklen < 20 || toklen > datalen) 620 paddedlen = (toklen + 3) & ~3;
621 if (toklen < 20 || toklen > datalen || paddedlen > datalen)
618 goto not_xdr; 622 goto not_xdr;
619 datalen -= (toklen + 3) & ~3; 623 datalen -= paddedlen;
620 xdr += (toklen + 3) >> 2; 624 xdr += paddedlen >> 2;
621 625
622 } while (--loop > 0); 626 } while (--loop > 0);
623 627
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 164b5ac094be..7dc5892671c8 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -94,8 +94,10 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
94 k++; 94 k++;
95 } 95 }
96 96
97 if (n) 97 if (n) {
98 err = -EINVAL;
98 goto err_out; 99 goto err_out;
100 }
99 101
100 return keys_ex; 102 return keys_ex;
101 103
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index f42008b29311..b062bc80c7cb 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -132,21 +132,21 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
132 } 132 }
133 } 133 }
134 134
135 spin_lock_bh(&police->tcf_lock);
136 if (est) { 135 if (est) {
137 err = gen_replace_estimator(&police->tcf_bstats, NULL, 136 err = gen_replace_estimator(&police->tcf_bstats, NULL,
138 &police->tcf_rate_est, 137 &police->tcf_rate_est,
139 &police->tcf_lock, 138 &police->tcf_lock,
140 NULL, est); 139 NULL, est);
141 if (err) 140 if (err)
142 goto failure_unlock; 141 goto failure;
143 } else if (tb[TCA_POLICE_AVRATE] && 142 } else if (tb[TCA_POLICE_AVRATE] &&
144 (ret == ACT_P_CREATED || 143 (ret == ACT_P_CREATED ||
145 !gen_estimator_active(&police->tcf_rate_est))) { 144 !gen_estimator_active(&police->tcf_rate_est))) {
146 err = -EINVAL; 145 err = -EINVAL;
147 goto failure_unlock; 146 goto failure;
148 } 147 }
149 148
149 spin_lock_bh(&police->tcf_lock);
150 /* No failure allowed after this point */ 150 /* No failure allowed after this point */
151 police->tcfp_mtu = parm->mtu; 151 police->tcfp_mtu = parm->mtu;
152 if (police->tcfp_mtu == 0) { 152 if (police->tcfp_mtu == 0) {
@@ -192,8 +192,6 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
192 192
193 return ret; 193 return ret;
194 194
195failure_unlock:
196 spin_unlock_bh(&police->tcf_lock);
197failure: 195failure:
198 qdisc_put_rtab(P_tab); 196 qdisc_put_rtab(P_tab);
199 qdisc_put_rtab(R_tab); 197 qdisc_put_rtab(R_tab);
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index dee469fed967..51859b8edd7e 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -203,7 +203,6 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
203 203
204 *arg = (unsigned long) head; 204 *arg = (unsigned long) head;
205 rcu_assign_pointer(tp->root, new); 205 rcu_assign_pointer(tp->root, new);
206 call_rcu(&head->rcu, mall_destroy_rcu);
207 return 0; 206 return 0;
208 207
209err_replace_hw_filter: 208err_replace_hw_filter:
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bbe57d57b67f..cfdbfa18a95e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1019,7 +1019,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
1019 return sch; 1019 return sch;
1020 } 1020 }
1021 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ 1021 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1022 ops->destroy(sch); 1022 if (ops->destroy)
1023 ops->destroy(sch);
1023err_out3: 1024err_out3:
1024 dev_put(dev); 1025 dev_put(dev);
1025 kfree((char *) sch - sch->padded); 1026 kfree((char *) sch - sch->padded);
@@ -1831,6 +1832,12 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1831 if (!qdisc_dev(root)) 1832 if (!qdisc_dev(root))
1832 return 0; 1833 return 0;
1833 1834
1835 if (tcm->tcm_parent) {
1836 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
1837 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1838 return -1;
1839 return 0;
1840 }
1834 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1841 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1835 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 1842 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1836 return -1; 1843 return -1;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index a9708da28eb5..95238284c422 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1176,7 +1176,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
1176 1176
1177 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1177 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1178 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 1178 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1179 if (!asoc->stream) { 1179
1180 if (sctp_state(asoc, COOKIE_WAIT)) {
1181 sctp_stream_free(asoc->stream);
1180 asoc->stream = new->stream; 1182 asoc->stream = new->stream;
1181 new->stream = NULL; 1183 new->stream = NULL;
1182 } 1184 }
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 8c589230794f..3dcd0ecf3d99 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
275 if (sctp_sk(sk)->bind_hash) 275 if (sctp_sk(sk)->bind_hash)
276 sctp_put_port(sk); 276 sctp_put_port(sk);
277 277
278 sctp_sk(sk)->ep = NULL;
278 sock_put(sk); 279 sock_put(sk);
279 } 280 }
280 281
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0e06a278d2a9..ba9ad32fc447 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
473 struct sctp_association **app, 473 struct sctp_association **app,
474 struct sctp_transport **tpp) 474 struct sctp_transport **tpp)
475{ 475{
476 struct sctp_init_chunk *chunkhdr, _chunkhdr;
476 union sctp_addr saddr; 477 union sctp_addr saddr;
477 union sctp_addr daddr; 478 union sctp_addr daddr;
478 struct sctp_af *af; 479 struct sctp_af *af;
479 struct sock *sk = NULL; 480 struct sock *sk = NULL;
480 struct sctp_association *asoc; 481 struct sctp_association *asoc;
481 struct sctp_transport *transport = NULL; 482 struct sctp_transport *transport = NULL;
482 struct sctp_init_chunk *chunkhdr;
483 __u32 vtag = ntohl(sctphdr->vtag); 483 __u32 vtag = ntohl(sctphdr->vtag);
484 int len = skb->len - ((void *)sctphdr - (void *)skb->data);
485 484
486 *app = NULL; *tpp = NULL; 485 *app = NULL; *tpp = NULL;
487 486
@@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
516 * discard the packet. 515 * discard the packet.
517 */ 516 */
518 if (vtag == 0) { 517 if (vtag == 0) {
519 chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); 518 /* chunk header + first 4 octects of init header */
520 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) 519 chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
521 + sizeof(__be32) || 520 sizeof(struct sctphdr),
521 sizeof(struct sctp_chunkhdr) +
522 sizeof(__be32), &_chunkhdr);
523 if (!chunkhdr ||
522 chunkhdr->chunk_hdr.type != SCTP_CID_INIT || 524 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
523 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { 525 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
524 goto out; 526 goto out;
525 } 527
526 } else if (vtag != asoc->c.peer_vtag) { 528 } else if (vtag != asoc->c.peer_vtag) {
527 goto out; 529 goto out;
528 } 530 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 961ee59f696a..f5b45b8b8b16 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -240,12 +240,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
240 struct sctp_bind_addr *bp; 240 struct sctp_bind_addr *bp;
241 struct ipv6_pinfo *np = inet6_sk(sk); 241 struct ipv6_pinfo *np = inet6_sk(sk);
242 struct sctp_sockaddr_entry *laddr; 242 struct sctp_sockaddr_entry *laddr;
243 union sctp_addr *baddr = NULL;
244 union sctp_addr *daddr = &t->ipaddr; 243 union sctp_addr *daddr = &t->ipaddr;
245 union sctp_addr dst_saddr; 244 union sctp_addr dst_saddr;
246 struct in6_addr *final_p, final; 245 struct in6_addr *final_p, final;
247 __u8 matchlen = 0; 246 __u8 matchlen = 0;
248 __u8 bmatchlen;
249 sctp_scope_t scope; 247 sctp_scope_t scope;
250 248
251 memset(fl6, 0, sizeof(struct flowi6)); 249 memset(fl6, 0, sizeof(struct flowi6));
@@ -312,23 +310,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
312 */ 310 */
313 rcu_read_lock(); 311 rcu_read_lock();
314 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 312 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
315 if (!laddr->valid) 313 struct dst_entry *bdst;
314 __u8 bmatchlen;
315
316 if (!laddr->valid ||
317 laddr->state != SCTP_ADDR_SRC ||
318 laddr->a.sa.sa_family != AF_INET6 ||
319 scope > sctp_scope(&laddr->a))
316 continue; 320 continue;
317 if ((laddr->state == SCTP_ADDR_SRC) && 321
318 (laddr->a.sa.sa_family == AF_INET6) && 322 fl6->saddr = laddr->a.v6.sin6_addr;
319 (scope <= sctp_scope(&laddr->a))) { 323 fl6->fl6_sport = laddr->a.v6.sin6_port;
320 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
321 if (!baddr || (matchlen < bmatchlen)) {
322 baddr = &laddr->a;
323 matchlen = bmatchlen;
324 }
325 }
326 }
327 if (baddr) {
328 fl6->saddr = baddr->v6.sin6_addr;
329 fl6->fl6_sport = baddr->v6.sin6_port;
330 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 324 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
331 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 325 bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
326
327 if (!IS_ERR(bdst) &&
328 ipv6_chk_addr(dev_net(bdst->dev),
329 &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
330 if (!IS_ERR_OR_NULL(dst))
331 dst_release(dst);
332 dst = bdst;
333 break;
334 }
335
336 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
337 if (matchlen > bmatchlen)
338 continue;
339
340 if (!IS_ERR_OR_NULL(dst))
341 dst_release(dst);
342 dst = bdst;
343 matchlen = bmatchlen;
332 } 344 }
333 rcu_read_unlock(); 345 rcu_read_unlock();
334 346
@@ -665,6 +677,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
665 newnp = inet6_sk(newsk); 677 newnp = inet6_sk(newsk);
666 678
667 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 679 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
680 newnp->ipv6_mc_list = NULL;
681 newnp->ipv6_ac_list = NULL;
682 newnp->ipv6_fl_list = NULL;
668 683
669 rcu_read_lock(); 684 rcu_read_lock();
670 opt = rcu_dereference(np->opt); 685 opt = rcu_dereference(np->opt);
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 048954eee984..9a647214a91e 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -278,7 +278,6 @@ out:
278 278
279static int sctp_sock_dump(struct sock *sk, void *p) 279static int sctp_sock_dump(struct sock *sk, void *p)
280{ 280{
281 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
282 struct sctp_comm_param *commp = p; 281 struct sctp_comm_param *commp = p;
283 struct sk_buff *skb = commp->skb; 282 struct sk_buff *skb = commp->skb;
284 struct netlink_callback *cb = commp->cb; 283 struct netlink_callback *cb = commp->cb;
@@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p)
287 int err = 0; 286 int err = 0;
288 287
289 lock_sock(sk); 288 lock_sock(sk);
290 list_for_each_entry(assoc, &ep->asocs, asocs) { 289 if (!sctp_sk(sk)->ep)
290 goto release;
291 list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
291 if (cb->args[4] < cb->args[1]) 292 if (cb->args[4] < cb->args[1])
292 goto next; 293 goto next;
293 294
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 8a08f13469c4..92e332e17391 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2454,16 +2454,11 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2454 * stream sequence number shall be set to 0. 2454 * stream sequence number shall be set to 0.
2455 */ 2455 */
2456 2456
2457 /* Allocate storage for the negotiated streams if it is not a temporary 2457 if (sctp_stream_init(asoc, gfp))
2458 * association. 2458 goto clean_up;
2459 */
2460 if (!asoc->temp) {
2461 if (sctp_stream_init(asoc, gfp))
2462 goto clean_up;
2463 2459
2464 if (sctp_assoc_set_id(asoc, gfp)) 2460 if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
2465 goto clean_up; 2461 goto clean_up;
2466 }
2467 2462
2468 /* ADDIP Section 4.1 ASCONF Chunk Procedures 2463 /* ADDIP Section 4.1 ASCONF Chunk Procedures
2469 * 2464 *
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 4f5e6cfc7f60..f863b5573e42 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2088,6 +2088,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
2088 } 2088 }
2089 } 2089 }
2090 2090
2091 /* Set temp so that it won't be added into hashtable */
2092 new_asoc->temp = 1;
2093
2091 /* Compare the tie_tag in cookie with the verification tag of 2094 /* Compare the tie_tag in cookie with the verification tag of
2092 * current association. 2095 * current association.
2093 */ 2096 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f16c8d97b7f3..3a8318e518f1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4622,13 +4622,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
4622 4622
4623 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; 4623 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
4624 hash++, head++) { 4624 hash++, head++) {
4625 read_lock(&head->lock); 4625 read_lock_bh(&head->lock);
4626 sctp_for_each_hentry(epb, &head->chain) { 4626 sctp_for_each_hentry(epb, &head->chain) {
4627 err = cb(sctp_ep(epb), p); 4627 err = cb(sctp_ep(epb), p);
4628 if (err) 4628 if (err)
4629 break; 4629 break;
4630 } 4630 }
4631 read_unlock(&head->lock); 4631 read_unlock_bh(&head->lock);
4632 } 4632 }
4633 4633
4634 return err; 4634 return err;
@@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
4666 if (err) 4666 if (err)
4667 return err; 4667 return err;
4668 4668
4669 sctp_transport_get_idx(net, &hti, pos); 4669 obj = sctp_transport_get_idx(net, &hti, pos + 1);
4670 obj = sctp_transport_get_next(net, &hti); 4670 for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
4671 for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
4672 struct sctp_transport *transport = obj; 4671 struct sctp_transport *transport = obj;
4673 4672
4674 if (!sctp_transport_hold(transport)) 4673 if (!sctp_transport_hold(transport))
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
index c717ef0896aa..33954852f3f8 100644
--- a/net/smc/Kconfig
+++ b/net/smc/Kconfig
@@ -8,6 +8,10 @@ config SMC
8 The Linux implementation of the SMC-R solution is designed as 8 The Linux implementation of the SMC-R solution is designed as
9 a separate socket family SMC. 9 a separate socket family SMC.
10 10
11 Warning: SMC will expose all memory for remote reads and writes
12 once a connection is established. Don't enable this option except
13 for tightly controlled lab environment.
14
11 Select this option if you want to run SMC socket applications 15 Select this option if you want to run SMC socket applications
12 16
13config SMC_DIAG 17config SMC_DIAG
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index e41f594a1e1d..03ec058d18df 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -204,7 +204,7 @@ int smc_clc_send_confirm(struct smc_sock *smc)
204 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 204 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
205 hton24(cclc.qpn, link->roce_qp->qp_num); 205 hton24(cclc.qpn, link->roce_qp->qp_num);
206 cclc.rmb_rkey = 206 cclc.rmb_rkey =
207 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); 207 htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]);
208 cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ 208 cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */
209 cclc.rmbe_alert_token = htonl(conn->alert_token_local); 209 cclc.rmbe_alert_token = htonl(conn->alert_token_local);
210 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); 210 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
@@ -256,7 +256,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
256 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 256 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
257 hton24(aclc.qpn, link->roce_qp->qp_num); 257 hton24(aclc.qpn, link->roce_qp->qp_num);
258 aclc.rmb_rkey = 258 aclc.rmb_rkey =
259 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); 259 htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]);
260 aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ 260 aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */
261 aclc.rmbe_alert_token = htonl(conn->alert_token_local); 261 aclc.rmbe_alert_token = htonl(conn->alert_token_local);
262 aclc.qp_mtu = link->path_mtu; 262 aclc.qp_mtu = link->path_mtu;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 65020e93ff21..3ac09a629ea1 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -613,19 +613,8 @@ int smc_rmb_create(struct smc_sock *smc)
613 rmb_desc = NULL; 613 rmb_desc = NULL;
614 continue; /* if mapping failed, try smaller one */ 614 continue; /* if mapping failed, try smaller one */
615 } 615 }
616 rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, 616 rmb_desc->rkey[SMC_SINGLE_LINK] =
617 IB_ACCESS_REMOTE_WRITE | 617 lgr->lnk[SMC_SINGLE_LINK].roce_pd->unsafe_global_rkey;
618 IB_ACCESS_LOCAL_WRITE,
619 &rmb_desc->mr_rx[SMC_SINGLE_LINK]);
620 if (rc) {
621 smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
622 tmp_bufsize, rmb_desc,
623 DMA_FROM_DEVICE);
624 kfree(rmb_desc->cpu_addr);
625 kfree(rmb_desc);
626 rmb_desc = NULL;
627 continue;
628 }
629 rmb_desc->used = 1; 618 rmb_desc->used = 1;
630 write_lock_bh(&lgr->rmbs_lock); 619 write_lock_bh(&lgr->rmbs_lock);
631 list_add(&rmb_desc->list, 620 list_add(&rmb_desc->list,
@@ -668,6 +657,7 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn,
668 657
669 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { 658 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
670 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && 659 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
660 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
671 test_bit(i, lgr->rtokens_used_mask)) { 661 test_bit(i, lgr->rtokens_used_mask)) {
672 conn->rtoken_idx = i; 662 conn->rtoken_idx = i;
673 return 0; 663 return 0;
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 27eb38056a27..b013cb43a327 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -93,7 +93,7 @@ struct smc_buf_desc {
93 u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; 93 u64 dma_addr[SMC_LINKS_PER_LGR_MAX];
94 /* mapped address of buffer */ 94 /* mapped address of buffer */
95 void *cpu_addr; /* virtual address of buffer */ 95 void *cpu_addr; /* virtual address of buffer */
96 struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; 96 u32 rkey[SMC_LINKS_PER_LGR_MAX];
97 /* for rmb only: 97 /* for rmb only:
98 * rkey provided to peer 98 * rkey provided to peer
99 */ 99 */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index cb69ab977cd7..b31715505a35 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -37,24 +37,6 @@ u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
37 * identifier 37 * identifier
38 */ 38 */
39 39
40int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
41 struct ib_mr **mr)
42{
43 int rc;
44
45 if (*mr)
46 return 0; /* already done */
47
48 /* obtain unique key -
49 * next invocation of get_dma_mr returns a different key!
50 */
51 *mr = pd->device->get_dma_mr(pd, access_flags);
52 rc = PTR_ERR_OR_ZERO(*mr);
53 if (IS_ERR(*mr))
54 *mr = NULL;
55 return rc;
56}
57
58static int smc_ib_modify_qp_init(struct smc_link *lnk) 40static int smc_ib_modify_qp_init(struct smc_link *lnk)
59{ 41{
60 struct ib_qp_attr qp_attr; 42 struct ib_qp_attr qp_attr;
@@ -210,7 +192,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
210{ 192{
211 int rc; 193 int rc;
212 194
213 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); 195 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev,
196 IB_PD_UNSAFE_GLOBAL_RKEY);
214 rc = PTR_ERR_OR_ZERO(lnk->roce_pd); 197 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
215 if (IS_ERR(lnk->roce_pd)) 198 if (IS_ERR(lnk->roce_pd))
216 lnk->roce_pd = NULL; 199 lnk->roce_pd = NULL;
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 7e1f0e24d177..b567152a526d 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -61,8 +61,6 @@ void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
61int smc_ib_create_protection_domain(struct smc_link *lnk); 61int smc_ib_create_protection_domain(struct smc_link *lnk);
62void smc_ib_destroy_queue_pair(struct smc_link *lnk); 62void smc_ib_destroy_queue_pair(struct smc_link *lnk);
63int smc_ib_create_queue_pair(struct smc_link *lnk); 63int smc_ib_create_queue_pair(struct smc_link *lnk);
64int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
65 struct ib_mr **mr);
66int smc_ib_ready_link(struct smc_link *lnk); 64int smc_ib_ready_link(struct smc_link *lnk);
67int smc_ib_modify_qp_rts(struct smc_link *lnk); 65int smc_ib_modify_qp_rts(struct smc_link *lnk);
68int smc_ib_modify_qp_reset(struct smc_link *lnk); 66int smc_ib_modify_qp_reset(struct smc_link *lnk);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 24fedd4b117e..03f6b5840764 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -119,11 +119,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
119 119
120 for (i = 0; i < (reqs << 1); i++) { 120 for (i = 0; i < (reqs << 1); i++) {
121 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 121 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
122 if (!rqst) { 122 if (!rqst)
123 pr_err("RPC: %s: Failed to create bc rpc_rqst\n",
124 __func__);
125 goto out_free; 123 goto out_free;
126 } 124
127 dprintk("RPC: %s: new rqst %p\n", __func__, rqst); 125 dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
128 126
129 rqst->rq_xprt = &r_xprt->rx_xprt; 127 rqst->rq_xprt = &r_xprt->rx_xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 16aff8ddc16f..d5b54c020dec 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2432,7 +2432,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2432 case -ENETUNREACH: 2432 case -ENETUNREACH:
2433 case -EADDRINUSE: 2433 case -EADDRINUSE:
2434 case -ENOBUFS: 2434 case -ENOBUFS:
2435 /* retry with existing socket, after a delay */ 2435 /*
2436 * xs_tcp_force_close() wakes tasks with -EIO.
2437 * We need to wake them first to ensure the
2438 * correct error code.
2439 */
2440 xprt_wake_pending_tasks(xprt, status);
2436 xs_tcp_force_close(xprt); 2441 xs_tcp_force_close(xprt);
2437 goto out; 2442 goto out;
2438 } 2443 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 312ef7de57d7..ab3087687a32 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
508 } 508 }
509 509
510 if (skb_cloned(_skb) && 510 if (skb_cloned(_skb) &&
511 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) 511 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
512 goto exit; 512 goto exit;
513 513
514 /* Now reverse the concerned fields */ 514 /* Now reverse the concerned fields */
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 0d4f2f455a7c..1b92b72e812f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -362,25 +362,25 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
362 return 0; 362 return 0;
363} 363}
364 364
365#define tipc_wait_for_cond(sock_, timeout_, condition_) \ 365#define tipc_wait_for_cond(sock_, timeo_, condition_) \
366({ \ 366({ \
367 int rc_ = 0; \ 367 struct sock *sk_; \
368 int done_ = 0; \ 368 int rc_; \
369 \ 369 \
370 while (!(condition_) && !done_) { \ 370 while ((rc_ = !(condition_))) { \
371 struct sock *sk_ = sock->sk; \ 371 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
372 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 372 sk_ = (sock_)->sk; \
373 \ 373 rc_ = tipc_sk_sock_err((sock_), timeo_); \
374 rc_ = tipc_sk_sock_err(sock_, timeout_); \ 374 if (rc_) \
375 if (rc_) \ 375 break; \
376 break; \ 376 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
377 prepare_to_wait(sk_sleep(sk_), &wait_, \ 377 release_sock(sk_); \
378 TASK_INTERRUPTIBLE); \ 378 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
379 done_ = sk_wait_event(sk_, timeout_, \ 379 sched_annotate_sleep(); \
380 (condition_), &wait_); \ 380 lock_sock(sk_); \
381 remove_wait_queue(sk_sleep(sk_), &wait_); \ 381 remove_wait_queue(sk_sleep(sk_), &wait_); \
382 } \ 382 } \
383 rc_; \ 383 rc_; \
384}) 384})
385 385
386/** 386/**
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 6a7fe7660551..1a0c961f4ffe 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -999,7 +999,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
999 struct path path = { }; 999 struct path path = { };
1000 1000
1001 err = -EINVAL; 1001 err = -EINVAL;
1002 if (sunaddr->sun_family != AF_UNIX) 1002 if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
1003 sunaddr->sun_family != AF_UNIX)
1003 goto out; 1004 goto out;
1004 1005
1005 if (addr_len == sizeof(short)) { 1006 if (addr_len == sizeof(short)) {
@@ -1110,6 +1111,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1110 unsigned int hash; 1111 unsigned int hash;
1111 int err; 1112 int err;
1112 1113
1114 err = -EINVAL;
1115 if (alen < offsetofend(struct sockaddr, sa_family))
1116 goto out;
1117
1113 if (addr->sa_family != AF_UNSPEC) { 1118 if (addr->sa_family != AF_UNSPEC) {
1114 err = unix_mkname(sunaddr, alen, &hash); 1119 err = unix_mkname(sunaddr, alen, &hash);
1115 if (err < 0) 1120 if (err < 0)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 6f7f6757ceef..dfc8c51e4d74 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1540,8 +1540,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1540 long timeout; 1540 long timeout;
1541 int err; 1541 int err;
1542 struct vsock_transport_send_notify_data send_data; 1542 struct vsock_transport_send_notify_data send_data;
1543 1543 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1544 DEFINE_WAIT(wait);
1545 1544
1546 sk = sock->sk; 1545 sk = sock->sk;
1547 vsk = vsock_sk(sk); 1546 vsk = vsock_sk(sk);
@@ -1584,11 +1583,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1584 if (err < 0) 1583 if (err < 0)
1585 goto out; 1584 goto out;
1586 1585
1587
1588 while (total_written < len) { 1586 while (total_written < len) {
1589 ssize_t written; 1587 ssize_t written;
1590 1588
1591 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1589 add_wait_queue(sk_sleep(sk), &wait);
1592 while (vsock_stream_has_space(vsk) == 0 && 1590 while (vsock_stream_has_space(vsk) == 0 &&
1593 sk->sk_err == 0 && 1591 sk->sk_err == 0 &&
1594 !(sk->sk_shutdown & SEND_SHUTDOWN) && 1592 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
@@ -1597,33 +1595,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1597 /* Don't wait for non-blocking sockets. */ 1595 /* Don't wait for non-blocking sockets. */
1598 if (timeout == 0) { 1596 if (timeout == 0) {
1599 err = -EAGAIN; 1597 err = -EAGAIN;
1600 finish_wait(sk_sleep(sk), &wait); 1598 remove_wait_queue(sk_sleep(sk), &wait);
1601 goto out_err; 1599 goto out_err;
1602 } 1600 }
1603 1601
1604 err = transport->notify_send_pre_block(vsk, &send_data); 1602 err = transport->notify_send_pre_block(vsk, &send_data);
1605 if (err < 0) { 1603 if (err < 0) {
1606 finish_wait(sk_sleep(sk), &wait); 1604 remove_wait_queue(sk_sleep(sk), &wait);
1607 goto out_err; 1605 goto out_err;
1608 } 1606 }
1609 1607
1610 release_sock(sk); 1608 release_sock(sk);
1611 timeout = schedule_timeout(timeout); 1609 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
1612 lock_sock(sk); 1610 lock_sock(sk);
1613 if (signal_pending(current)) { 1611 if (signal_pending(current)) {
1614 err = sock_intr_errno(timeout); 1612 err = sock_intr_errno(timeout);
1615 finish_wait(sk_sleep(sk), &wait); 1613 remove_wait_queue(sk_sleep(sk), &wait);
1616 goto out_err; 1614 goto out_err;
1617 } else if (timeout == 0) { 1615 } else if (timeout == 0) {
1618 err = -EAGAIN; 1616 err = -EAGAIN;
1619 finish_wait(sk_sleep(sk), &wait); 1617 remove_wait_queue(sk_sleep(sk), &wait);
1620 goto out_err; 1618 goto out_err;
1621 } 1619 }
1622
1623 prepare_to_wait(sk_sleep(sk), &wait,
1624 TASK_INTERRUPTIBLE);
1625 } 1620 }
1626 finish_wait(sk_sleep(sk), &wait); 1621 remove_wait_queue(sk_sleep(sk), &wait);
1627 1622
1628 /* These checks occur both as part of and after the loop 1623 /* These checks occur both as part of and after the loop
1629 * conditional since we need to check before and after 1624 * conditional since we need to check before and after
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 14d5f0c8c45f..9f0901f3e42b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -322,9 +322,9 @@ cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid)
322{ 322{
323 struct cfg80211_sched_scan_request *pos; 323 struct cfg80211_sched_scan_request *pos;
324 324
325 ASSERT_RTNL(); 325 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
326 326
327 list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { 327 list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) {
328 if (pos->reqid == reqid) 328 if (pos->reqid == reqid)
329 return pos; 329 return pos;
330 } 330 }
@@ -398,13 +398,13 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid)
398 trace_cfg80211_sched_scan_results(wiphy, reqid); 398 trace_cfg80211_sched_scan_results(wiphy, reqid);
399 /* ignore if we're not scanning */ 399 /* ignore if we're not scanning */
400 400
401 rtnl_lock(); 401 rcu_read_lock();
402 request = cfg80211_find_sched_scan_req(rdev, reqid); 402 request = cfg80211_find_sched_scan_req(rdev, reqid);
403 if (request) { 403 if (request) {
404 request->report_results = true; 404 request->report_results = true;
405 queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); 405 queue_work(cfg80211_wq, &rdev->sched_scan_res_wk);
406 } 406 }
407 rtnl_unlock(); 407 rcu_read_unlock();
408} 408}
409EXPORT_SYMBOL(cfg80211_sched_scan_results); 409EXPORT_SYMBOL(cfg80211_sched_scan_results);
410 410
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7198373e2920..4992f1025c9d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -454,6 +454,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
454 if (iftype == NL80211_IFTYPE_MESH_POINT) 454 if (iftype == NL80211_IFTYPE_MESH_POINT)
455 skb_copy_bits(skb, hdrlen, &mesh_flags, 1); 455 skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
456 456
457 mesh_flags &= MESH_FLAGS_AE;
458
457 switch (hdr->frame_control & 459 switch (hdr->frame_control &
458 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 460 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
459 case cpu_to_le16(IEEE80211_FCTL_TODS): 461 case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -469,9 +471,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
469 iftype != NL80211_IFTYPE_STATION)) 471 iftype != NL80211_IFTYPE_STATION))
470 return -1; 472 return -1;
471 if (iftype == NL80211_IFTYPE_MESH_POINT) { 473 if (iftype == NL80211_IFTYPE_MESH_POINT) {
472 if (mesh_flags & MESH_FLAGS_AE_A4) 474 if (mesh_flags == MESH_FLAGS_AE_A4)
473 return -1; 475 return -1;
474 if (mesh_flags & MESH_FLAGS_AE_A5_A6) { 476 if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
475 skb_copy_bits(skb, hdrlen + 477 skb_copy_bits(skb, hdrlen +
476 offsetof(struct ieee80211s_hdr, eaddr1), 478 offsetof(struct ieee80211s_hdr, eaddr1),
477 tmp.h_dest, 2 * ETH_ALEN); 479 tmp.h_dest, 2 * ETH_ALEN);
@@ -487,9 +489,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
487 ether_addr_equal(tmp.h_source, addr))) 489 ether_addr_equal(tmp.h_source, addr)))
488 return -1; 490 return -1;
489 if (iftype == NL80211_IFTYPE_MESH_POINT) { 491 if (iftype == NL80211_IFTYPE_MESH_POINT) {
490 if (mesh_flags & MESH_FLAGS_AE_A5_A6) 492 if (mesh_flags == MESH_FLAGS_AE_A5_A6)
491 return -1; 493 return -1;
492 if (mesh_flags & MESH_FLAGS_AE_A4) 494 if (mesh_flags == MESH_FLAGS_AE_A4)
493 skb_copy_bits(skb, hdrlen + 495 skb_copy_bits(skb, hdrlen +
494 offsetof(struct ieee80211s_hdr, eaddr1), 496 offsetof(struct ieee80211s_hdr, eaddr1),
495 tmp.h_source, ETH_ALEN); 497 tmp.h_source, ETH_ALEN);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 1a4db6790e20..6cdb054484d6 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev)
914 * Main IOCTl dispatcher. 914 * Main IOCTl dispatcher.
915 * Check the type of IOCTL and call the appropriate wrapper... 915 * Check the type of IOCTL and call the appropriate wrapper...
916 */ 916 */
917static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, 917static int wireless_process_ioctl(struct net *net, struct iwreq *iwr,
918 unsigned int cmd, 918 unsigned int cmd,
919 struct iw_request_info *info, 919 struct iw_request_info *info,
920 wext_ioctl_func standard, 920 wext_ioctl_func standard,
921 wext_ioctl_func private) 921 wext_ioctl_func private)
922{ 922{
923 struct iwreq *iwr = (struct iwreq *) ifr;
924 struct net_device *dev; 923 struct net_device *dev;
925 iw_handler handler; 924 iw_handler handler;
926 925
@@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
928 * The copy_to/from_user() of ifr is also dealt with in there */ 927 * The copy_to/from_user() of ifr is also dealt with in there */
929 928
930 /* Make sure the device exist */ 929 /* Make sure the device exist */
931 if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) 930 if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL)
932 return -ENODEV; 931 return -ENODEV;
933 932
934 /* A bunch of special cases, then the generic case... 933 /* A bunch of special cases, then the generic case...
@@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
957 else if (private) 956 else if (private)
958 return private(dev, iwr, cmd, info, handler); 957 return private(dev, iwr, cmd, info, handler);
959 } 958 }
960 /* Old driver API : call driver ioctl handler */
961 if (dev->netdev_ops->ndo_do_ioctl)
962 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
963 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
964} 960}
965 961
@@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd)
977} 973}
978 974
979/* entry point from dev ioctl */ 975/* entry point from dev ioctl */
980static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, 976static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr,
981 unsigned int cmd, struct iw_request_info *info, 977 unsigned int cmd, struct iw_request_info *info,
982 wext_ioctl_func standard, 978 wext_ioctl_func standard,
983 wext_ioctl_func private) 979 wext_ioctl_func private)
@@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
987 if (ret) 983 if (ret)
988 return ret; 984 return ret;
989 985
990 dev_load(net, ifr->ifr_name); 986 dev_load(net, iwr->ifr_name);
991 rtnl_lock(); 987 rtnl_lock();
992 ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); 988 ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private);
993 rtnl_unlock(); 989 rtnl_unlock();
994 990
995 return ret; 991 return ret;
@@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device * dev,
1039} 1035}
1040 1036
1041 1037
1042int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 1038int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
1043 void __user *arg) 1039 void __user *arg)
1044{ 1040{
1045 struct iw_request_info info = { .cmd = cmd, .flags = 0 }; 1041 struct iw_request_info info = { .cmd = cmd, .flags = 0 };
1046 int ret; 1042 int ret;
1047 1043
1048 ret = wext_ioctl_dispatch(net, ifr, cmd, &info, 1044 ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
1049 ioctl_standard_call, 1045 ioctl_standard_call,
1050 ioctl_private_call); 1046 ioctl_private_call);
1051 if (ret >= 0 && 1047 if (ret >= 0 &&
1052 IW_IS_GET(cmd) && 1048 IW_IS_GET(cmd) &&
1053 copy_to_user(arg, ifr, sizeof(struct iwreq))) 1049 copy_to_user(arg, iwr, sizeof(struct iwreq)))
1054 return -EFAULT; 1050 return -EFAULT;
1055 1051
1056 return ret; 1052 return ret;
@@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
1107 info.cmd = cmd; 1103 info.cmd = cmd;
1108 info.flags = IW_REQUEST_FLAG_COMPAT; 1104 info.flags = IW_REQUEST_FLAG_COMPAT;
1109 1105
1110 ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, 1106 ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
1111 compat_standard_call, 1107 compat_standard_call,
1112 compat_private_call); 1108 compat_private_call);
1113 1109
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 8b911c29860e..5a1a98df3499 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1791,32 +1791,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
1791 1791
1792static int __init x25_init(void) 1792static int __init x25_init(void)
1793{ 1793{
1794 int rc = proto_register(&x25_proto, 0); 1794 int rc;
1795 1795
1796 if (rc != 0) 1796 rc = proto_register(&x25_proto, 0);
1797 if (rc)
1797 goto out; 1798 goto out;
1798 1799
1799 rc = sock_register(&x25_family_ops); 1800 rc = sock_register(&x25_family_ops);
1800 if (rc != 0) 1801 if (rc)
1801 goto out_proto; 1802 goto out_proto;
1802 1803
1803 dev_add_pack(&x25_packet_type); 1804 dev_add_pack(&x25_packet_type);
1804 1805
1805 rc = register_netdevice_notifier(&x25_dev_notifier); 1806 rc = register_netdevice_notifier(&x25_dev_notifier);
1806 if (rc != 0) 1807 if (rc)
1807 goto out_sock; 1808 goto out_sock;
1808 1809
1809 pr_info("Linux Version 0.2\n"); 1810 rc = x25_register_sysctl();
1811 if (rc)
1812 goto out_dev;
1810 1813
1811 x25_register_sysctl();
1812 rc = x25_proc_init(); 1814 rc = x25_proc_init();
1813 if (rc != 0) 1815 if (rc)
1814 goto out_dev; 1816 goto out_sysctl;
1817
1818 pr_info("Linux Version 0.2\n");
1819
1815out: 1820out:
1816 return rc; 1821 return rc;
1822out_sysctl:
1823 x25_unregister_sysctl();
1817out_dev: 1824out_dev:
1818 unregister_netdevice_notifier(&x25_dev_notifier); 1825 unregister_netdevice_notifier(&x25_dev_notifier);
1819out_sock: 1826out_sock:
1827 dev_remove_pack(&x25_packet_type);
1820 sock_unregister(AF_X25); 1828 sock_unregister(AF_X25);
1821out_proto: 1829out_proto:
1822 proto_unregister(&x25_proto); 1830 proto_unregister(&x25_proto);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index a06dfe143c67..ba078c85f0a1 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
73 { }, 73 { },
74}; 74};
75 75
76void __init x25_register_sysctl(void) 76int __init x25_register_sysctl(void)
77{ 77{
78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); 78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
79 if (!x25_table_header)
80 return -ENOMEM;
81 return 0;
79} 82}
80 83
81void x25_unregister_sysctl(void) 84void x25_unregister_sysctl(void)
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index abf81b329dc1..55b2ac300995 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -4,8 +4,7 @@
4 4
5obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ 5obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
6 xfrm_input.o xfrm_output.o \ 6 xfrm_input.o xfrm_output.o \
7 xfrm_sysctl.o xfrm_replay.o 7 xfrm_sysctl.o xfrm_replay.o xfrm_device.o
8obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o
9obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o 8obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
10obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o 9obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
11obj-$(CONFIG_XFRM_USER) += xfrm_user.o 10obj-$(CONFIG_XFRM_USER) += xfrm_user.o
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 8ec8a3fcf8d4..5aba03685d7d 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -22,6 +22,7 @@
22#include <net/xfrm.h> 22#include <net/xfrm.h>
23#include <linux/notifier.h> 23#include <linux/notifier.h>
24 24
25#ifdef CONFIG_XFRM_OFFLOAD
25int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 26int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
26{ 27{
27 int err; 28 int err;
@@ -137,6 +138,7 @@ ok:
137 return true; 138 return true;
138} 139}
139EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 140EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
141#endif
140 142
141int xfrm_dev_register(struct net_device *dev) 143int xfrm_dev_register(struct net_device *dev)
142{ 144{
@@ -170,7 +172,7 @@ static int xfrm_dev_feat_change(struct net_device *dev)
170 172
171static int xfrm_dev_down(struct net_device *dev) 173static int xfrm_dev_down(struct net_device *dev)
172{ 174{
173 if (dev->hw_features & NETIF_F_HW_ESP) 175 if (dev->features & NETIF_F_HW_ESP)
174 xfrm_dev_state_flush(dev_net(dev), dev, true); 176 xfrm_dev_state_flush(dev_net(dev), dev, true);
175 177
176 xfrm_garbage_collect(dev_net(dev)); 178 xfrm_garbage_collect(dev_net(dev));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b00a1d5a7f52..643a18f72032 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1006,10 +1006,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1006 err = -ESRCH; 1006 err = -ESRCH;
1007out: 1007out:
1008 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1008 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1009
1010 if (cnt)
1011 xfrm_garbage_collect(net);
1012
1013 return err; 1009 return err;
1014} 1010}
1015EXPORT_SYMBOL(xfrm_policy_flush); 1011EXPORT_SYMBOL(xfrm_policy_flush);
@@ -1797,43 +1793,6 @@ free_dst:
1797 goto out; 1793 goto out;
1798} 1794}
1799 1795
1800#ifdef CONFIG_XFRM_SUB_POLICY
1801static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1802{
1803 if (!*target) {
1804 *target = kmalloc(size, GFP_ATOMIC);
1805 if (!*target)
1806 return -ENOMEM;
1807 }
1808
1809 memcpy(*target, src, size);
1810 return 0;
1811}
1812#endif
1813
1814static int xfrm_dst_update_parent(struct dst_entry *dst,
1815 const struct xfrm_selector *sel)
1816{
1817#ifdef CONFIG_XFRM_SUB_POLICY
1818 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1819 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1820 sel, sizeof(*sel));
1821#else
1822 return 0;
1823#endif
1824}
1825
1826static int xfrm_dst_update_origin(struct dst_entry *dst,
1827 const struct flowi *fl)
1828{
1829#ifdef CONFIG_XFRM_SUB_POLICY
1830 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1831 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1832#else
1833 return 0;
1834#endif
1835}
1836
1837static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1796static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1838 struct xfrm_policy **pols, 1797 struct xfrm_policy **pols,
1839 int *num_pols, int *num_xfrms) 1798 int *num_pols, int *num_xfrms)
@@ -1905,16 +1864,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1905 1864
1906 xdst = (struct xfrm_dst *)dst; 1865 xdst = (struct xfrm_dst *)dst;
1907 xdst->num_xfrms = err; 1866 xdst->num_xfrms = err;
1908 if (num_pols > 1)
1909 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1910 else
1911 err = xfrm_dst_update_origin(dst, fl);
1912 if (unlikely(err)) {
1913 dst_free(dst);
1914 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1915 return ERR_PTR(err);
1916 }
1917
1918 xdst->num_pols = num_pols; 1867 xdst->num_pols = num_pols;
1919 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 1868 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1920 xdst->policy_genid = atomic_read(&pols[0]->genid); 1869 xdst->policy_genid = atomic_read(&pols[0]->genid);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index fc3c5aa38754..2e291bc5f1fc 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1383,6 +1383,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
1383 x->curlft.add_time = orig->curlft.add_time; 1383 x->curlft.add_time = orig->curlft.add_time;
1384 x->km.state = orig->km.state; 1384 x->km.state = orig->km.state;
1385 x->km.seq = orig->km.seq; 1385 x->km.seq = orig->km.seq;
1386 x->replay = orig->replay;
1387 x->preplay = orig->preplay;
1386 1388
1387 return x; 1389 return x;
1388 1390
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 38614df33ec8..86116e9aaf3d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2027,6 +2027,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2027 return 0; 2027 return 0;
2028 return err; 2028 return err;
2029 } 2029 }
2030 xfrm_garbage_collect(net);
2030 2031
2031 c.data.type = type; 2032 c.data.type = type;
2032 c.event = nlh->nlmsg_type; 2033 c.event = nlh->nlmsg_type;