aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c52
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/fragmentation.c11
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/batman-adv/originator.c62
-rw-r--r--net/bluetooth/hci_event.c4
-rw-r--r--net/bridge/br_netfilter.c4
-rw-r--r--net/core/dev.c110
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c33
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/utils.c8
-rw-r--r--net/dsa/dsa.c3
-rw-r--r--net/ipv4/af_inet.c36
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_forward.c54
-rw-r--r--net/ipv4/ip_fragment.c5
-rw-r--r--net/ipv4/ip_output.c51
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ip_vti.c5
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c5
-rw-r--r--net/ipv4/ping.c6
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c42
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/xfrm4_output.c32
-rw-r--r--net/ipv4/xfrm4_protocol.c19
-rw-r--r--net/ipv6/ip6_offload.c6
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/ipv6/netfilter.c6
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/xfrm6_output.c22
-rw-r--r--net/ipv6/xfrm6_protocol.c11
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mlme.c20
-rw-r--r--net/mac80211/offchannel.c27
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/mac80211/status.c5
-rw-r--r--net/mac80211/trace.h4
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mac80211/vht.c9
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_tables_core.c49
-rw-r--r--net/netfilter/nfnetlink.c8
-rw-r--r--net/rds/ib_recv.c4
-rw-r--r--net/rds/iw_recv.c4
-rw-r--r--net/rds/send.c6
-rw-r--r--net/rds/tcp_send.c2
-rw-r--r--net/rxrpc/ar-key.c2
-rw-r--r--net/sched/cls_tcindex.c30
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/backchannel_rqst.c4
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtsock.c16
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/scan.c12
-rw-r--r--net/wireless/sme.c2
73 files changed, 590 insertions, 333 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 175273f38cb1..44ebd5c2cd4a 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
169 if (err < 0) 169 if (err < 0)
170 goto out_uninit_mvrp; 170 goto out_uninit_mvrp;
171 171
172 vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
172 err = register_netdevice(dev); 173 err = register_netdevice(dev);
173 if (err < 0) 174 if (err < 0)
174 goto out_uninit_mvrp; 175 goto out_uninit_mvrp;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 733ec283ed1b..019efb79708f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
493 } 493 }
494} 494}
495 495
496static int vlan_calculate_locking_subclass(struct net_device *real_dev)
497{
498 int subclass = 0;
499
500 while (is_vlan_dev(real_dev)) {
501 subclass++;
502 real_dev = vlan_dev_priv(real_dev)->real_dev;
503 }
504
505 return subclass;
506}
507
508static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
509{
510 int err = 0, subclass;
511
512 subclass = vlan_calculate_locking_subclass(to);
513
514 spin_lock_nested(&to->addr_list_lock, subclass);
515 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
516 if (!err)
517 __dev_set_rx_mode(to);
518 spin_unlock(&to->addr_list_lock);
519}
520
521static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
522{
523 int err = 0, subclass;
524
525 subclass = vlan_calculate_locking_subclass(to);
526
527 spin_lock_nested(&to->addr_list_lock, subclass);
528 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
529 if (!err)
530 __dev_set_rx_mode(to);
531 spin_unlock(&to->addr_list_lock);
532}
533
534static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) 496static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
535{ 497{
536 vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 498 dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
537 vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 499 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
538} 500}
539 501
540/* 502/*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
562 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); 524 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
563} 525}
564 526
527static int vlan_dev_get_lock_subclass(struct net_device *dev)
528{
529 return vlan_dev_priv(dev)->nest_level;
530}
531
565static const struct header_ops vlan_header_ops = { 532static const struct header_ops vlan_header_ops = {
566 .create = vlan_dev_hard_header, 533 .create = vlan_dev_hard_header,
567 .rebuild = vlan_dev_rebuild_header, 534 .rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
597static int vlan_dev_init(struct net_device *dev) 564static int vlan_dev_init(struct net_device *dev)
598{ 565{
599 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 566 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
600 int subclass = 0;
601 567
602 netif_carrier_off(dev); 568 netif_carrier_off(dev);
603 569
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
646 612
647 SET_NETDEV_DEVTYPE(dev, &vlan_type); 613 SET_NETDEV_DEVTYPE(dev, &vlan_type);
648 614
649 subclass = vlan_calculate_locking_subclass(dev); 615 vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
650 vlan_dev_set_lockdep_class(dev, subclass);
651 616
652 vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 617 vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
653 if (!vlan_dev_priv(dev)->vlan_pcpu_stats) 618 if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
819 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, 784 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
820#endif 785#endif
821 .ndo_fix_features = vlan_dev_fix_features, 786 .ndo_fix_features = vlan_dev_fix_features,
787 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
822}; 788};
823 789
824void vlan_setup(struct net_device *dev) 790void vlan_setup(struct net_device *dev)
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 8c93267ce969..c4e09846d1de 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -252,7 +252,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
252 * we need to ensure there's a memory barrier after it. The bit 252 * we need to ensure there's a memory barrier after it. The bit
253 * *must* be set before we do the atomic_inc() on pvcc->inflight. 253 * *must* be set before we do the atomic_inc() on pvcc->inflight.
254 * There's no smp_mb__after_set_bit(), so it's this or abuse 254 * There's no smp_mb__after_set_bit(), so it's this or abuse
255 * smp_mb__after_clear_bit(). 255 * smp_mb__after_atomic().
256 */ 256 */
257 test_and_set_bit(BLOCKED, &pvcc->blocked); 257 test_and_set_bit(BLOCKED, &pvcc->blocked);
258 258
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index b3bd4ec3fd94..f04224c32005 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1545,6 +1545,8 @@ out_neigh:
1545 if ((orig_neigh_node) && (!is_single_hop_neigh)) 1545 if ((orig_neigh_node) && (!is_single_hop_neigh))
1546 batadv_orig_node_free_ref(orig_neigh_node); 1546 batadv_orig_node_free_ref(orig_neigh_node);
1547out: 1547out:
1548 if (router_ifinfo)
1549 batadv_neigh_ifinfo_free_ref(router_ifinfo);
1548 if (router) 1550 if (router)
1549 batadv_neigh_node_free_ref(router); 1551 batadv_neigh_node_free_ref(router);
1550 if (router_router) 1552 if (router_router)
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index b25fd64d727b..aa5d4946d0d7 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
940 * additional DAT answer may trigger kernel warnings about 940 * additional DAT answer may trigger kernel warnings about
941 * a packet coming from the wrong port. 941 * a packet coming from the wrong port.
942 */ 942 */
943 if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, 943 if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
944 BATADV_NO_FLAGS)) {
945 ret = true; 944 ret = true;
946 goto out; 945 goto out;
947 } 946 }
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index bcc4bea632fa..f14e54a05691 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
418 struct batadv_neigh_node *neigh_node) 418 struct batadv_neigh_node *neigh_node)
419{ 419{
420 struct batadv_priv *bat_priv; 420 struct batadv_priv *bat_priv;
421 struct batadv_hard_iface *primary_if; 421 struct batadv_hard_iface *primary_if = NULL;
422 struct batadv_frag_packet frag_header; 422 struct batadv_frag_packet frag_header;
423 struct sk_buff *skb_fragment; 423 struct sk_buff *skb_fragment;
424 unsigned mtu = neigh_node->if_incoming->net_dev->mtu; 424 unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
425 unsigned header_size = sizeof(frag_header); 425 unsigned header_size = sizeof(frag_header);
426 unsigned max_fragment_size, max_packet_size; 426 unsigned max_fragment_size, max_packet_size;
427 bool ret = false;
427 428
428 /* To avoid merge and refragmentation at next-hops we never send 429 /* To avoid merge and refragmentation at next-hops we never send
429 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 430 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
483 skb->len + ETH_HLEN); 484 skb->len + ETH_HLEN);
484 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 485 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
485 486
486 return true; 487 ret = true;
488
487out_err: 489out_err:
488 return false; 490 if (primary_if)
491 batadv_hardif_free_ref(primary_if);
492
493 return ret;
489} 494}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index c835e137423b..90cff585b37d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -42,8 +42,10 @@
42 42
43static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) 43static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
44{ 44{
45 if (atomic_dec_and_test(&gw_node->refcount)) 45 if (atomic_dec_and_test(&gw_node->refcount)) {
46 batadv_orig_node_free_ref(gw_node->orig_node);
46 kfree_rcu(gw_node, rcu); 47 kfree_rcu(gw_node, rcu);
48 }
47} 49}
48 50
49static struct batadv_gw_node * 51static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
406 if (gateway->bandwidth_down == 0) 408 if (gateway->bandwidth_down == 0)
407 return; 409 return;
408 410
411 if (!atomic_inc_not_zero(&orig_node->refcount))
412 return;
413
409 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); 414 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
410 if (!gw_node) 415 if (!gw_node) {
416 batadv_orig_node_free_ref(orig_node);
411 return; 417 return;
418 }
412 419
413 INIT_HLIST_NODE(&gw_node->list); 420 INIT_HLIST_NODE(&gw_node->list);
414 gw_node->orig_node = orig_node; 421 gw_node->orig_node = orig_node;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b851cc580853..fbda6b54baff 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
83 return true; 83 return true;
84 84
85 /* no more parents..stop recursion */ 85 /* no more parents..stop recursion */
86 if (net_dev->iflink == net_dev->ifindex) 86 if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
87 return false; 87 return false;
88 88
89 /* recurse over the parent device */ 89 /* recurse over the parent device */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ffd9dfbd9b0e..6a484514cd3e 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
501static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu) 501static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
502{ 502{
503 struct batadv_orig_ifinfo *orig_ifinfo; 503 struct batadv_orig_ifinfo *orig_ifinfo;
504 struct batadv_neigh_node *router;
504 505
505 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu); 506 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
506 507
507 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) 508 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
508 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing); 509 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
509 510
511 /* this is the last reference to this object */
512 router = rcu_dereference_protected(orig_ifinfo->router, true);
513 if (router)
514 batadv_neigh_node_free_ref_now(router);
510 kfree(orig_ifinfo); 515 kfree(orig_ifinfo);
511} 516}
512 517
@@ -702,6 +707,47 @@ free_orig_node:
702} 707}
703 708
704/** 709/**
710 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
711 * @bat_priv: the bat priv with all the soft interface information
712 * @neigh: orig node which is to be checked
713 */
714static void
715batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
716 struct batadv_neigh_node *neigh)
717{
718 struct batadv_neigh_ifinfo *neigh_ifinfo;
719 struct batadv_hard_iface *if_outgoing;
720 struct hlist_node *node_tmp;
721
722 spin_lock_bh(&neigh->ifinfo_lock);
723
724 /* for all ifinfo objects for this neighinator */
725 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
726 &neigh->ifinfo_list, list) {
727 if_outgoing = neigh_ifinfo->if_outgoing;
728
729 /* always keep the default interface */
730 if (if_outgoing == BATADV_IF_DEFAULT)
731 continue;
732
733 /* don't purge if the interface is not (going) down */
734 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
735 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
736 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
737 continue;
738
739 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
740 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
741 neigh->addr, if_outgoing->net_dev->name);
742
743 hlist_del_rcu(&neigh_ifinfo->list);
744 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
745 }
746
747 spin_unlock_bh(&neigh->ifinfo_lock);
748}
749
750/**
705 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator 751 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
706 * @bat_priv: the bat priv with all the soft interface information 752 * @bat_priv: the bat priv with all the soft interface information
707 * @orig_node: orig node which is to be checked 753 * @orig_node: orig node which is to be checked
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
800 846
801 hlist_del_rcu(&neigh_node->list); 847 hlist_del_rcu(&neigh_node->list);
802 batadv_neigh_node_free_ref(neigh_node); 848 batadv_neigh_node_free_ref(neigh_node);
849 } else {
850 /* only necessary if not the whole neighbor is to be
851 * deleted, but some interface has been removed.
852 */
853 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
803 } 854 }
804 } 855 }
805 856
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
857{ 908{
858 struct batadv_neigh_node *best_neigh_node; 909 struct batadv_neigh_node *best_neigh_node;
859 struct batadv_hard_iface *hard_iface; 910 struct batadv_hard_iface *hard_iface;
860 bool changed; 911 bool changed_ifinfo, changed_neigh;
861 912
862 if (batadv_has_timed_out(orig_node->last_seen, 913 if (batadv_has_timed_out(orig_node->last_seen,
863 2 * BATADV_PURGE_TIMEOUT)) { 914 2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
867 jiffies_to_msecs(orig_node->last_seen)); 918 jiffies_to_msecs(orig_node->last_seen));
868 return true; 919 return true;
869 } 920 }
870 changed = batadv_purge_orig_ifinfo(bat_priv, orig_node); 921 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
871 changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node); 922 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
872 923
873 if (!changed) 924 if (!changed_ifinfo && !changed_neigh)
874 return false; 925 return false;
875 926
876 /* first for NULL ... */ 927 /* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1028 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); 1079 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1029 1080
1030out: 1081out:
1031 batadv_hardif_free_ref(hard_iface); 1082 if (hard_iface)
1083 batadv_hardif_free_ref(hard_iface);
1032 return 0; 1084 return 0;
1033} 1085}
1034 1086
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 15010a230b6d..682f33a38366 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,7 +45,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 return; 45 return;
46 46
47 clear_bit(HCI_INQUIRY, &hdev->flags); 47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY); 49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 50
51 hci_conn_check_pending(hdev); 51 hci_conn_check_pending(hdev);
@@ -1768,7 +1768,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1768 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1768 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1769 return; 1769 return;
1770 1770
1771 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 1771 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1772 wake_up_bit(&hdev->flags, HCI_INQUIRY); 1772 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1773 1773
1774 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1774 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 80e1b0f60a30..2acf7fa1fec6 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
859 return NF_STOLEN; 859 return NF_STOLEN;
860} 860}
861 861
862#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4) 862#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
863static int br_nf_dev_queue_xmit(struct sk_buff *skb) 863static int br_nf_dev_queue_xmit(struct sk_buff *skb)
864{ 864{
865 int ret; 865 int ret;
866 866
867 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && 867 if (skb->protocol == htons(ETH_P_IP) &&
868 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && 868 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
869 !skb_is_gso(skb)) { 869 !skb_is_gso(skb)) {
870 if (br_parse_ip_options(skb)) 870 if (br_parse_ip_options(skb))
diff --git a/net/core/dev.c b/net/core/dev.c
index d2c8a06b3a98..8b07db37dc10 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1326,7 +1326,7 @@ static int __dev_close_many(struct list_head *head)
1326 * dev->stop() will invoke napi_disable() on all of it's 1326 * dev->stop() will invoke napi_disable() on all of it's
1327 * napi_struct instances on this device. 1327 * napi_struct instances on this device.
1328 */ 1328 */
1329 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1329 smp_mb__after_atomic(); /* Commit netif_running(). */
1330 } 1330 }
1331 1331
1332 dev_deactivate_many(head); 1332 dev_deactivate_many(head);
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2418 * 2. No high memory really exists on this machine. 2418 * 2. No high memory really exists on this machine.
2419 */ 2419 */
2420 2420
2421static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2421static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2422{ 2422{
2423#ifdef CONFIG_HIGHMEM 2423#ifdef CONFIG_HIGHMEM
2424 int i; 2424 int i;
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2493} 2493}
2494 2494
2495static netdev_features_t harmonize_features(struct sk_buff *skb, 2495static netdev_features_t harmonize_features(struct sk_buff *skb,
2496 const struct net_device *dev, 2496 netdev_features_t features)
2497 netdev_features_t features)
2498{ 2497{
2499 int tmp; 2498 int tmp;
2500 2499
2501 if (skb->ip_summed != CHECKSUM_NONE && 2500 if (skb->ip_summed != CHECKSUM_NONE &&
2502 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { 2501 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
2503 features &= ~NETIF_F_ALL_CSUM; 2502 features &= ~NETIF_F_ALL_CSUM;
2504 } else if (illegal_highdma(dev, skb)) { 2503 } else if (illegal_highdma(skb->dev, skb)) {
2505 features &= ~NETIF_F_SG; 2504 features &= ~NETIF_F_SG;
2506 } 2505 }
2507 2506
2508 return features; 2507 return features;
2509} 2508}
2510 2509
2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2510netdev_features_t netif_skb_features(struct sk_buff *skb)
2512 const struct net_device *dev)
2513{ 2511{
2514 __be16 protocol = skb->protocol; 2512 __be16 protocol = skb->protocol;
2515 netdev_features_t features = dev->features; 2513 netdev_features_t features = skb->dev->features;
2516 2514
2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2515 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2518 features &= ~NETIF_F_GSO_MASK; 2516 features &= ~NETIF_F_GSO_MASK;
2519 2517
2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2518 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2519 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2522 protocol = veh->h_vlan_encapsulated_proto; 2520 protocol = veh->h_vlan_encapsulated_proto;
2523 } else if (!vlan_tx_tag_present(skb)) { 2521 } else if (!vlan_tx_tag_present(skb)) {
2524 return harmonize_features(skb, dev, features); 2522 return harmonize_features(skb, features);
2525 } 2523 }
2526 2524
2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2525 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2528 NETIF_F_HW_VLAN_STAG_TX); 2526 NETIF_F_HW_VLAN_STAG_TX);
2529 2527
2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2528 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2533 NETIF_F_HW_VLAN_STAG_TX; 2531 NETIF_F_HW_VLAN_STAG_TX;
2534 2532
2535 return harmonize_features(skb, dev, features); 2533 return harmonize_features(skb, features);
2536} 2534}
2537EXPORT_SYMBOL(netif_skb_dev_features); 2535EXPORT_SYMBOL(netif_skb_features);
2538 2536
2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2537int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2540 struct netdev_queue *txq) 2538 struct netdev_queue *txq)
@@ -3343,7 +3341,7 @@ static void net_tx_action(struct softirq_action *h)
3343 3341
3344 root_lock = qdisc_lock(q); 3342 root_lock = qdisc_lock(q);
3345 if (spin_trylock(root_lock)) { 3343 if (spin_trylock(root_lock)) {
3346 smp_mb__before_clear_bit(); 3344 smp_mb__before_atomic();
3347 clear_bit(__QDISC_STATE_SCHED, 3345 clear_bit(__QDISC_STATE_SCHED,
3348 &q->state); 3346 &q->state);
3349 qdisc_run(q); 3347 qdisc_run(q);
@@ -3353,7 +3351,7 @@ static void net_tx_action(struct softirq_action *h)
3353 &q->state)) { 3351 &q->state)) {
3354 __netif_reschedule(q); 3352 __netif_reschedule(q);
3355 } else { 3353 } else {
3356 smp_mb__before_clear_bit(); 3354 smp_mb__before_atomic();
3357 clear_bit(__QDISC_STATE_SCHED, 3355 clear_bit(__QDISC_STATE_SCHED,
3358 &q->state); 3356 &q->state);
3359 } 3357 }
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3953 } 3951 }
3954 NAPI_GRO_CB(skb)->count = 1; 3952 NAPI_GRO_CB(skb)->count = 1;
3955 NAPI_GRO_CB(skb)->age = jiffies; 3953 NAPI_GRO_CB(skb)->age = jiffies;
3954 NAPI_GRO_CB(skb)->last = skb;
3956 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3955 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3957 skb->next = napi->gro_list; 3956 skb->next = napi->gro_list;
3958 napi->gro_list = skb; 3957 napi->gro_list = skb;
@@ -4244,7 +4243,7 @@ void __napi_complete(struct napi_struct *n)
4244 BUG_ON(n->gro_list); 4243 BUG_ON(n->gro_list);
4245 4244
4246 list_del(&n->poll_list); 4245 list_del(&n->poll_list);
4247 smp_mb__before_clear_bit(); 4246 smp_mb__before_atomic();
4248 clear_bit(NAPI_STATE_SCHED, &n->state); 4247 clear_bit(NAPI_STATE_SCHED, &n->state);
4249} 4248}
4250EXPORT_SYMBOL(__napi_complete); 4249EXPORT_SYMBOL(__napi_complete);
@@ -4543,6 +4542,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
4543EXPORT_SYMBOL(netdev_adjacent_get_private); 4542EXPORT_SYMBOL(netdev_adjacent_get_private);
4544 4543
4545/** 4544/**
4545 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4546 * @dev: device
4547 * @iter: list_head ** of the current position
4548 *
4549 * Gets the next device from the dev's upper list, starting from iter
4550 * position. The caller must hold RCU read lock.
4551 */
4552struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4553 struct list_head **iter)
4554{
4555 struct netdev_adjacent *upper;
4556
4557 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4558
4559 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4560
4561 if (&upper->list == &dev->adj_list.upper)
4562 return NULL;
4563
4564 *iter = &upper->list;
4565
4566 return upper->dev;
4567}
4568EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4569
4570/**
4546 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list 4571 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4547 * @dev: device 4572 * @dev: device
4548 * @iter: list_head ** of the current position 4573 * @iter: list_head ** of the current position
@@ -4624,6 +4649,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4624EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 4649EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4625 4650
4626/** 4651/**
4652 * netdev_lower_get_next - Get the next device from the lower neighbour
4653 * list
4654 * @dev: device
4655 * @iter: list_head ** of the current position
4656 *
4657 * Gets the next netdev_adjacent from the dev's lower neighbour
4658 * list, starting from iter position. The caller must hold RTNL lock or
4659 * its own locking that guarantees that the neighbour lower
4660 * list will remain unchainged.
4661 */
4662void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4663{
4664 struct netdev_adjacent *lower;
4665
4666 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4667
4668 if (&lower->list == &dev->adj_list.lower)
4669 return NULL;
4670
4671 *iter = &lower->list;
4672
4673 return lower->dev;
4674}
4675EXPORT_SYMBOL(netdev_lower_get_next);
4676
4677/**
4627 * netdev_lower_get_first_private_rcu - Get the first ->private from the 4678 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4628 * lower neighbour list, RCU 4679 * lower neighbour list, RCU
4629 * variant 4680 * variant
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
5073} 5124}
5074EXPORT_SYMBOL(netdev_lower_dev_get_private); 5125EXPORT_SYMBOL(netdev_lower_dev_get_private);
5075 5126
5127
5128int dev_get_nest_level(struct net_device *dev,
5129 bool (*type_check)(struct net_device *dev))
5130{
5131 struct net_device *lower = NULL;
5132 struct list_head *iter;
5133 int max_nest = -1;
5134 int nest;
5135
5136 ASSERT_RTNL();
5137
5138 netdev_for_each_lower_dev(dev, lower, iter) {
5139 nest = dev_get_nest_level(lower, type_check);
5140 if (max_nest < nest)
5141 max_nest = nest;
5142 }
5143
5144 if (type_check(dev))
5145 max_nest++;
5146
5147 return max_nest;
5148}
5149EXPORT_SYMBOL(dev_get_nest_level);
5150
5076static void dev_change_rx_flags(struct net_device *dev, int flags) 5151static void dev_change_rx_flags(struct net_device *dev, int flags)
5077{ 5152{
5078 const struct net_device_ops *ops = dev->netdev_ops; 5153 const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
5238 if (ops->ndo_set_rx_mode) 5313 if (ops->ndo_set_rx_mode)
5239 ops->ndo_set_rx_mode(dev); 5314 ops->ndo_set_rx_mode(dev);
5240} 5315}
5241EXPORT_SYMBOL(__dev_set_rx_mode);
5242 5316
5243void dev_set_rx_mode(struct net_device *dev) 5317void dev_set_rx_mode(struct net_device *dev)
5244{ 5318{
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
5543 5617
5544/* Delayed registration/unregisteration */ 5618/* Delayed registration/unregisteration */
5545static LIST_HEAD(net_todo_list); 5619static LIST_HEAD(net_todo_list);
5546static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 5620DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5547 5621
5548static void net_set_todo(struct net_device *dev) 5622static void net_set_todo(struct net_device *dev)
5549{ 5623{
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 9c3a839322ba..bd0767e6b2b3 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -147,7 +147,7 @@ static void linkwatch_do_dev(struct net_device *dev)
147 * Make sure the above read is complete since it can be 147 * Make sure the above read is complete since it can be
148 * rewritten as soon as we clear the bit below. 148 * rewritten as soon as we clear the bit below.
149 */ 149 */
150 smp_mb__before_clear_bit(); 150 smp_mb__before_atomic();
151 151
152 /* We are about to handle this device, 152 /* We are about to handle this device,
153 * so new events can be accepted 153 * so new events can be accepted
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8f8a96ef9f3f..32d872eec7f5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
1248 neigh->updated = jiffies; 1248 neigh->updated = jiffies;
1249 if (!(neigh->nud_state & NUD_FAILED)) 1249 if (!(neigh->nud_state & NUD_FAILED))
1250 return; 1250 return;
1251 neigh->nud_state = NUD_PROBE; 1251 neigh->nud_state = NUD_INCOMPLETE;
1252 atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1252 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 neigh_add_timer(neigh, 1253 neigh_add_timer(neigh,
1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); 1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255} 1255}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 81d3a9a08453..7c8ffd974961 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -24,7 +24,7 @@
24 24
25static LIST_HEAD(pernet_list); 25static LIST_HEAD(pernet_list);
26static struct list_head *first_device = &pernet_list; 26static struct list_head *first_device = &pernet_list;
27static DEFINE_MUTEX(net_mutex); 27DEFINE_MUTEX(net_mutex);
28 28
29LIST_HEAD(net_namespace_list); 29LIST_HEAD(net_namespace_list);
30EXPORT_SYMBOL_GPL(net_namespace_list); 30EXPORT_SYMBOL_GPL(net_namespace_list);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9837bebf93ce..2d8d8fcfa060 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
353} 353}
354EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 354EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
355 355
356/* Return with the rtnl_lock held when there are no network
357 * devices unregistering in any network namespace.
358 */
359static void rtnl_lock_unregistering_all(void)
360{
361 struct net *net;
362 bool unregistering;
363 DEFINE_WAIT(wait);
364
365 for (;;) {
366 prepare_to_wait(&netdev_unregistering_wq, &wait,
367 TASK_UNINTERRUPTIBLE);
368 unregistering = false;
369 rtnl_lock();
370 for_each_net(net) {
371 if (net->dev_unreg_count > 0) {
372 unregistering = true;
373 break;
374 }
375 }
376 if (!unregistering)
377 break;
378 __rtnl_unlock();
379 schedule();
380 }
381 finish_wait(&netdev_unregistering_wq, &wait);
382}
383
356/** 384/**
357 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 385 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
358 * @ops: struct rtnl_link_ops * to unregister 386 * @ops: struct rtnl_link_ops * to unregister
359 */ 387 */
360void rtnl_link_unregister(struct rtnl_link_ops *ops) 388void rtnl_link_unregister(struct rtnl_link_ops *ops)
361{ 389{
362 rtnl_lock(); 390 /* Close the race with cleanup_net() */
391 mutex_lock(&net_mutex);
392 rtnl_lock_unregistering_all();
363 __rtnl_link_unregister(ops); 393 __rtnl_link_unregister(ops);
364 rtnl_unlock(); 394 rtnl_unlock();
395 mutex_unlock(&net_mutex);
365} 396}
366EXPORT_SYMBOL_GPL(rtnl_link_unregister); 397EXPORT_SYMBOL_GPL(rtnl_link_unregister);
367 398
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1b62343f5837..8383b2bddeb9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3076 if (unlikely(p->len + len >= 65536)) 3076 if (unlikely(p->len + len >= 65536))
3077 return -E2BIG; 3077 return -E2BIG;
3078 3078
3079 lp = NAPI_GRO_CB(p)->last ?: p; 3079 lp = NAPI_GRO_CB(p)->last;
3080 pinfo = skb_shinfo(lp); 3080 pinfo = skb_shinfo(lp);
3081 3081
3082 if (headlen <= offset) { 3082 if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
3192 3192
3193 __skb_pull(skb, offset); 3193 __skb_pull(skb, offset);
3194 3194
3195 if (!NAPI_GRO_CB(p)->last) 3195 if (NAPI_GRO_CB(p)->last == p)
3196 skb_shinfo(p)->frag_list = skb; 3196 skb_shinfo(p)->frag_list = skb;
3197 else 3197 else
3198 NAPI_GRO_CB(p)->last->next = skb; 3198 NAPI_GRO_CB(p)->last->next = skb;
diff --git a/net/core/utils.c b/net/core/utils.c
index 2f737bf90b3f..eed34338736c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
348{ 348{
349 struct __net_random_once_work *work = 349 struct __net_random_once_work *work =
350 container_of(w, struct __net_random_once_work, work); 350 container_of(w, struct __net_random_once_work, work);
351 if (!static_key_enabled(work->key)) 351 BUG_ON(!static_key_enabled(work->key));
352 static_key_slow_inc(work->key); 352 static_key_slow_dec(work->key);
353 kfree(work); 353 kfree(work);
354} 354}
355 355
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
367} 367}
368 368
369bool __net_get_random_once(void *buf, int nbytes, bool *done, 369bool __net_get_random_once(void *buf, int nbytes, bool *done,
370 struct static_key *done_key) 370 struct static_key *once_key)
371{ 371{
372 static DEFINE_SPINLOCK(lock); 372 static DEFINE_SPINLOCK(lock);
373 unsigned long flags; 373 unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
382 *done = true; 382 *done = true;
383 spin_unlock_irqrestore(&lock, flags); 383 spin_unlock_irqrestore(&lock, flags);
384 384
385 __net_random_once_disable_jump(done_key); 385 __net_random_once_disable_jump(once_key);
386 386
387 return true; 387 return true;
388} 388}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0eb5d5e76dfb..5db37cef50a9 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
406 goto out_free; 406 goto out_free;
407 } 407 }
408 408
409 chip_index = 0; 409 chip_index = -1;
410 for_each_available_child_of_node(np, child) { 410 for_each_available_child_of_node(np, child) {
411 chip_index++;
411 cd = &pd->chip[chip_index]; 412 cd = &pd->chip[chip_index];
412 413
413 cd->mii_bus = &mdio_bus->dev; 414 cd->mii_bus = &mdio_bus->dev;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8c54870db792..6d6dd345bc4d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void)
1650 return register_pernet_subsys(&ipv4_mib_ops); 1650 return register_pernet_subsys(&ipv4_mib_ops);
1651} 1651}
1652 1652
1653static __net_init int inet_init_net(struct net *net)
1654{
1655 /*
1656 * Set defaults for local port range
1657 */
1658 seqlock_init(&net->ipv4.ip_local_ports.lock);
1659 net->ipv4.ip_local_ports.range[0] = 32768;
1660 net->ipv4.ip_local_ports.range[1] = 61000;
1661
1662 seqlock_init(&net->ipv4.ping_group_range.lock);
1663 /*
1664 * Sane defaults - nobody may create ping sockets.
1665 * Boot scripts should set this to distro-specific group.
1666 */
1667 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1668 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1669 return 0;
1670}
1671
1672static __net_exit void inet_exit_net(struct net *net)
1673{
1674}
1675
1676static __net_initdata struct pernet_operations af_inet_ops = {
1677 .init = inet_init_net,
1678 .exit = inet_exit_net,
1679};
1680
1681static int __init init_inet_pernet_ops(void)
1682{
1683 return register_pernet_subsys(&af_inet_ops);
1684}
1685
1653static int ipv4_proc_init(void); 1686static int ipv4_proc_init(void);
1654 1687
1655/* 1688/*
@@ -1794,6 +1827,9 @@ static int __init inet_init(void)
1794 if (ip_mr_init()) 1827 if (ip_mr_init())
1795 pr_crit("%s: Cannot init ipv4 mroute\n", __func__); 1828 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1796#endif 1829#endif
1830
1831 if (init_inet_pernet_ops())
1832 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1797 /* 1833 /*
1798 * Initialise per-cpu ipv4 mibs 1834 * Initialise per-cpu ipv4 mibs
1799 */ 1835 */
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 8a043f03c88e..b10cd43a4722 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
821 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 821 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
822 if (fi == NULL) 822 if (fi == NULL)
823 goto failure; 823 goto failure;
824 fib_info_cnt++;
824 if (cfg->fc_mx) { 825 if (cfg->fc_mx) {
825 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 826 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
826 if (!fi->fib_metrics) 827 if (!fi->fib_metrics)
827 goto failure; 828 goto failure;
828 } else 829 } else
829 fi->fib_metrics = (u32 *) dst_default_metrics; 830 fi->fib_metrics = (u32 *) dst_default_metrics;
830 fib_info_cnt++;
831 831
832 fi->fib_net = hold_net(net); 832 fi->fib_net = hold_net(net);
833 fi->fib_protocol = cfg->fc_protocol; 833 fi->fib_protocol = cfg->fc_protocol;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 0d1e2cb877ec..a56b8e6e866a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
37 unsigned int seq; 37 unsigned int seq;
38 38
39 do { 39 do {
40 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 40 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
41 41
42 *low = net->ipv4.sysctl_local_ports.range[0]; 42 *low = net->ipv4.ip_local_ports.range[0];
43 *high = net->ipv4.sysctl_local_ports.range[1]; 43 *high = net->ipv4.ip_local_ports.range[1];
44 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 44 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
45} 45}
46EXPORT_SYMBOL(inet_get_local_port_range); 46EXPORT_SYMBOL(inet_get_local_port_range);
47 47
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 48f424465112..56cd458a1b8c 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -522,7 +522,7 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
522void inet_putpeer(struct inet_peer *p) 522void inet_putpeer(struct inet_peer *p)
523{ 523{
524 p->dtime = (__u32)jiffies; 524 p->dtime = (__u32)jiffies;
525 smp_mb__before_atomic_dec(); 525 smp_mb__before_atomic();
526 atomic_dec(&p->refcnt); 526 atomic_dec(&p->refcnt);
527} 527}
528EXPORT_SYMBOL_GPL(inet_putpeer); 528EXPORT_SYMBOL_GPL(inet_putpeer);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index be8abe73bb9f..6f111e48e11c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -42,12 +42,12 @@
42static bool ip_may_fragment(const struct sk_buff *skb) 42static bool ip_may_fragment(const struct sk_buff *skb)
43{ 43{
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || 44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 !skb->local_df; 45 skb->local_df;
46} 46}
47 47
48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
49{ 49{
50 if (skb->len <= mtu || skb->local_df) 50 if (skb->len <= mtu)
51 return false; 51 return false;
52 52
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
56 return true; 56 return true;
57} 57}
58 58
59static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
60{
61 unsigned int mtu;
62
63 if (skb->local_df || !skb_is_gso(skb))
64 return false;
65
66 mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
67
68 /* if seglen > mtu, do software segmentation for IP fragmentation on
69 * output. DF bit cannot be set since ip_forward would have sent
70 * icmp error.
71 */
72 return skb_gso_network_seglen(skb) > mtu;
73}
74
75/* called if GSO skb needs to be fragmented on forward */
76static int ip_forward_finish_gso(struct sk_buff *skb)
77{
78 struct dst_entry *dst = skb_dst(skb);
79 netdev_features_t features;
80 struct sk_buff *segs;
81 int ret = 0;
82
83 features = netif_skb_dev_features(skb, dst->dev);
84 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
85 if (IS_ERR(segs)) {
86 kfree_skb(skb);
87 return -ENOMEM;
88 }
89
90 consume_skb(skb);
91
92 do {
93 struct sk_buff *nskb = segs->next;
94 int err;
95
96 segs->next = NULL;
97 err = dst_output(segs);
98
99 if (err && ret == 0)
100 ret = err;
101 segs = nskb;
102 } while (segs);
103
104 return ret;
105}
106 59
107static int ip_forward_finish(struct sk_buff *skb) 60static int ip_forward_finish(struct sk_buff *skb)
108{ 61{
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
114 if (unlikely(opt->optlen)) 67 if (unlikely(opt->optlen))
115 ip_forward_options(skb); 68 ip_forward_options(skb);
116 69
117 if (ip_gso_exceeds_dst_mtu(skb))
118 return ip_forward_finish_gso(skb);
119
120 return dst_output(skb); 70 return dst_output(skb);
121} 71}
122 72
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index c10a3ce5cbff..ed32313e307c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
232 * "Fragment Reassembly Timeout" message, per RFC792. 232 * "Fragment Reassembly Timeout" message, per RFC792.
233 */ 233 */
234 if (qp->user == IP_DEFRAG_AF_PACKET || 234 if (qp->user == IP_DEFRAG_AF_PACKET ||
235 (qp->user == IP_DEFRAG_CONNTRACK_IN && 235 ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
236 skb_rtable(head)->rt_type != RTN_LOCAL)) 236 (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
237 (skb_rtable(head)->rt_type != RTN_LOCAL)))
237 goto out_rcu_unlock; 238 goto out_rcu_unlock;
238 239
239 240
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 1cbeba5edff9..a52f50187b54 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
211 return -EINVAL; 211 return -EINVAL;
212} 212}
213 213
214static int ip_finish_output_gso(struct sk_buff *skb)
215{
216 netdev_features_t features;
217 struct sk_buff *segs;
218 int ret = 0;
219
220 /* common case: locally created skb or seglen is <= mtu */
221 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
222 skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
223 return ip_finish_output2(skb);
224
225 /* Slowpath - GSO segment length is exceeding the dst MTU.
226 *
227 * This can happen in two cases:
228 * 1) TCP GRO packet, DF bit not set
229 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
230 * from host network stack.
231 */
232 features = netif_skb_features(skb);
233 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
234 if (IS_ERR(segs)) {
235 kfree_skb(skb);
236 return -ENOMEM;
237 }
238
239 consume_skb(skb);
240
241 do {
242 struct sk_buff *nskb = segs->next;
243 int err;
244
245 segs->next = NULL;
246 err = ip_fragment(segs, ip_finish_output2);
247
248 if (err && ret == 0)
249 ret = err;
250 segs = nskb;
251 } while (segs);
252
253 return ret;
254}
255
214static int ip_finish_output(struct sk_buff *skb) 256static int ip_finish_output(struct sk_buff *skb)
215{ 257{
216#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 258#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
220 return dst_output(skb); 262 return dst_output(skb);
221 } 263 }
222#endif 264#endif
223 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb)) 265 if (skb_is_gso(skb))
266 return ip_finish_output_gso(skb);
267
268 if (skb->len > ip_skb_dst_mtu(skb))
224 return ip_fragment(skb, ip_finish_output2); 269 return ip_fragment(skb, ip_finish_output2);
225 else 270
226 return ip_finish_output2(skb); 271 return ip_finish_output2(skb);
227} 272}
228 273
229int ip_mc_output(struct sock *sk, struct sk_buff *skb) 274int ip_mc_output(struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b3f859731c60..2acc2337d38b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
540 unsigned int max_headroom; /* The extra header space needed */ 540 unsigned int max_headroom; /* The extra header space needed */
541 __be32 dst; 541 __be32 dst;
542 int err; 542 int err;
543 bool connected = true; 543 bool connected;
544 544
545 inner_iph = (const struct iphdr *)skb_inner_network_header(skb); 545 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
546 connected = (tunnel->parms.iph.daddr != 0);
546 547
547 dst = tnl_params->daddr; 548 dst = tnl_params->daddr;
548 if (dst == 0) { 549 if (dst == 0) {
@@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
882 */ 883 */
883 if (!IS_ERR(itn->fb_tunnel_dev)) { 884 if (!IS_ERR(itn->fb_tunnel_dev)) {
884 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 885 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
886 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
885 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); 887 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
886 } 888 }
887 rtnl_unlock(); 889 rtnl_unlock();
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index afcee51b90ed..13ef00f1e17b 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
239static int vti4_err(struct sk_buff *skb, u32 info) 239static int vti4_err(struct sk_buff *skb, u32 info)
240{ 240{
241 __be32 spi; 241 __be32 spi;
242 __u32 mark;
242 struct xfrm_state *x; 243 struct xfrm_state *x;
243 struct ip_tunnel *tunnel; 244 struct ip_tunnel *tunnel;
244 struct ip_esp_hdr *esph; 245 struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
254 if (!tunnel) 255 if (!tunnel)
255 return -1; 256 return -1;
256 257
258 mark = be32_to_cpu(tunnel->parms.o_key);
259
257 switch (protocol) { 260 switch (protocol) {
258 case IPPROTO_ESP: 261 case IPPROTO_ESP:
259 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 262 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
281 return 0; 284 return 0;
282 } 285 }
283 286
284 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 287 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
285 spi, protocol, AF_INET); 288 spi, protocol, AF_INET);
286 if (!x) 289 if (!x)
287 return 0; 290 return 0;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 12e13bd82b5b..f40f321b41fc 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -22,7 +22,6 @@
22#endif 22#endif
23#include <net/netfilter/nf_conntrack_zones.h> 23#include <net/netfilter/nf_conntrack_zones.h>
24 24
25/* Returns new sk_buff, or NULL */
26static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) 25static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
27{ 26{
28 int err; 27 int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
33 err = ip_defrag(skb, user); 32 err = ip_defrag(skb, user);
34 local_bh_enable(); 33 local_bh_enable();
35 34
36 if (!err) 35 if (!err) {
37 ip_send_check(ip_hdr(skb)); 36 ip_send_check(ip_hdr(skb));
37 skb->local_df = 1;
38 }
38 39
39 return err; 40 return err;
40} 41}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8210964a9f19..044a0ddf6a79 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -236,15 +236,15 @@ exit:
236static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, 236static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
237 kgid_t *high) 237 kgid_t *high)
238{ 238{
239 kgid_t *data = net->ipv4.sysctl_ping_group_range; 239 kgid_t *data = net->ipv4.ping_group_range.range;
240 unsigned int seq; 240 unsigned int seq;
241 241
242 do { 242 do {
243 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 243 seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
244 244
245 *low = data[0]; 245 *low = data[0];
246 *high = data[1]; 246 *high = data[1];
247 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 247 } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
248} 248}
249 249
250 250
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index db1e0da871f4..5e676be3daeb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb,
1519 struct in_device *out_dev; 1519 struct in_device *out_dev;
1520 unsigned int flags = 0; 1520 unsigned int flags = 0;
1521 bool do_cache; 1521 bool do_cache;
1522 u32 itag; 1522 u32 itag = 0;
1523 1523
1524 /* get a working reference to the output device */ 1524 /* get a working reference to the output device */
1525 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); 1525 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 44eba052b43d..5cde8f263d40 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
45/* Update system visible IP port range */ 45/* Update system visible IP port range */
46static void set_local_port_range(struct net *net, int range[2]) 46static void set_local_port_range(struct net *net, int range[2])
47{ 47{
48 write_seqlock(&net->ipv4.sysctl_local_ports.lock); 48 write_seqlock(&net->ipv4.ip_local_ports.lock);
49 net->ipv4.sysctl_local_ports.range[0] = range[0]; 49 net->ipv4.ip_local_ports.range[0] = range[0];
50 net->ipv4.sysctl_local_ports.range[1] = range[1]; 50 net->ipv4.ip_local_ports.range[1] = range[1];
51 write_sequnlock(&net->ipv4.sysctl_local_ports.lock); 51 write_sequnlock(&net->ipv4.ip_local_ports.lock);
52} 52}
53 53
54/* Validate changes from /proc interface. */ 54/* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
57 size_t *lenp, loff_t *ppos) 57 size_t *lenp, loff_t *ppos)
58{ 58{
59 struct net *net = 59 struct net *net =
60 container_of(table->data, struct net, ipv4.sysctl_local_ports.range); 60 container_of(table->data, struct net, ipv4.ip_local_ports.range);
61 int ret; 61 int ret;
62 int range[2]; 62 int range[2];
63 struct ctl_table tmp = { 63 struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
87{ 87{
88 kgid_t *data = table->data; 88 kgid_t *data = table->data;
89 struct net *net = 89 struct net *net =
90 container_of(table->data, struct net, ipv4.sysctl_ping_group_range); 90 container_of(table->data, struct net, ipv4.ping_group_range.range);
91 unsigned int seq; 91 unsigned int seq;
92 do { 92 do {
93 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 93 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
94 94
95 *low = data[0]; 95 *low = data[0];
96 *high = data[1]; 96 *high = data[1];
97 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 97 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
98} 98}
99 99
100/* Update system visible IP port range */ 100/* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
102{ 102{
103 kgid_t *data = table->data; 103 kgid_t *data = table->data;
104 struct net *net = 104 struct net *net =
105 container_of(table->data, struct net, ipv4.sysctl_ping_group_range); 105 container_of(table->data, struct net, ipv4.ping_group_range.range);
106 write_seqlock(&net->ipv4.sysctl_local_ports.lock); 106 write_seqlock(&net->ipv4.ip_local_ports.lock);
107 data[0] = low; 107 data[0] = low;
108 data[1] = high; 108 data[1] = high;
109 write_sequnlock(&net->ipv4.sysctl_local_ports.lock); 109 write_sequnlock(&net->ipv4.ip_local_ports.lock);
110} 110}
111 111
112/* Validate changes from /proc interface. */ 112/* Validate changes from /proc interface. */
@@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = {
805 }, 805 },
806 { 806 {
807 .procname = "ping_group_range", 807 .procname = "ping_group_range",
808 .data = &init_net.ipv4.sysctl_ping_group_range, 808 .data = &init_net.ipv4.ping_group_range.range,
809 .maxlen = sizeof(gid_t)*2, 809 .maxlen = sizeof(gid_t)*2,
810 .mode = 0644, 810 .mode = 0644,
811 .proc_handler = ipv4_ping_group_range, 811 .proc_handler = ipv4_ping_group_range,
@@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = {
819 }, 819 },
820 { 820 {
821 .procname = "ip_local_port_range", 821 .procname = "ip_local_port_range",
822 .maxlen = sizeof(init_net.ipv4.sysctl_local_ports.range), 822 .maxlen = sizeof(init_net.ipv4.ip_local_ports.range),
823 .data = &init_net.ipv4.sysctl_local_ports.range, 823 .data = &init_net.ipv4.ip_local_ports.range,
824 .mode = 0644, 824 .mode = 0644,
825 .proc_handler = ipv4_local_port_range, 825 .proc_handler = ipv4_local_port_range,
826 }, 826 },
@@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
858 table[i].data += (void *)net - (void *)&init_net; 858 table[i].data += (void *)net - (void *)&init_net;
859 } 859 }
860 860
861 /*
862 * Sane defaults - nobody may create ping sockets.
863 * Boot scripts should set this to distro-specific group.
864 */
865 net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
866 net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
867
868 /*
869 * Set defaults for local port range
870 */
871 seqlock_init(&net->ipv4.sysctl_local_ports.lock);
872 net->ipv4.sysctl_local_ports.range[0] = 32768;
873 net->ipv4.sysctl_local_ports.range[1] = 61000;
874
875 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); 861 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
876 if (net->ipv4.ipv4_hdr == NULL) 862 if (net->ipv4.ipv4_hdr == NULL)
877 goto err_reg; 863 goto err_reg;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 12d6016bdd9a..2d340bd2cd3d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1930,10 +1930,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1930 /* It is possible TX completion already happened 1930 /* It is possible TX completion already happened
1931 * before we set TSQ_THROTTLED, so we must 1931 * before we set TSQ_THROTTLED, so we must
1932 * test again the condition. 1932 * test again the condition.
1933 * We abuse smp_mb__after_clear_bit() because
1934 * there is no smp_mb__after_set_bit() yet
1935 */ 1933 */
1936 smp_mb__after_clear_bit(); 1934 smp_mb__after_atomic();
1937 if (atomic_read(&sk->sk_wmem_alloc) > limit) 1935 if (atomic_read(&sk->sk_wmem_alloc) > limit)
1938 break; 1936 break;
1939 } 1937 }
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 40e701f2e1e0..186a8ecf92fa 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
62 if (err) 62 if (err)
63 return err; 63 return err;
64 64
65 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
66 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
67
68 skb->protocol = htons(ETH_P_IP);
69 66
70 return x->outer_mode->output2(x, skb); 67 return x->outer_mode->output2(x, skb);
71} 68}
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
73 70
74int xfrm4_output_finish(struct sk_buff *skb) 71int xfrm4_output_finish(struct sk_buff *skb)
75{ 72{
73 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
74 skb->protocol = htons(ETH_P_IP);
75
76#ifdef CONFIG_NETFILTER
77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
78#endif
79
80 return xfrm_output(skb);
81}
82
83static int __xfrm4_output(struct sk_buff *skb)
84{
85 struct xfrm_state *x = skb_dst(skb)->xfrm;
86
76#ifdef CONFIG_NETFILTER 87#ifdef CONFIG_NETFILTER
77 if (!skb_dst(skb)->xfrm) { 88 if (!x) {
78 IPCB(skb)->flags |= IPSKB_REROUTED; 89 IPCB(skb)->flags |= IPSKB_REROUTED;
79 return dst_output(skb); 90 return dst_output(skb);
80 } 91 }
81
82 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
83#endif 92#endif
84 93
85 skb->protocol = htons(ETH_P_IP); 94 return x->outer_mode->afinfo->output_finish(skb);
86 return xfrm_output(skb);
87} 95}
88 96
89int xfrm4_output(struct sock *sk, struct sk_buff *skb) 97int xfrm4_output(struct sock *sk, struct sk_buff *skb)
90{ 98{
91 struct dst_entry *dst = skb_dst(skb);
92 struct xfrm_state *x = dst->xfrm;
93
94 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, 99 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
95 NULL, dst->dev, 100 NULL, skb_dst(skb)->dev, __xfrm4_output,
96 x->outer_mode->afinfo->output_finish,
97 !(IPCB(skb)->flags & IPSKB_REROUTED)); 101 !(IPCB(skb)->flags & IPSKB_REROUTED));
98} 102}
99 103
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index 7f7b243e8139..a2ce0101eaac 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
50{ 50{
51 int ret; 51 int ret;
52 struct xfrm4_protocol *handler; 52 struct xfrm4_protocol *handler;
53 struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
53 54
54 for_each_protocol_rcu(*proto_handlers(protocol), handler) 55 if (!head)
56 return 0;
57
58 for_each_protocol_rcu(*head, handler)
55 if ((ret = handler->cb_handler(skb, err)) <= 0) 59 if ((ret = handler->cb_handler(skb, err)) <= 0)
56 return ret; 60 return ret;
57 61
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
64{ 68{
65 int ret; 69 int ret;
66 struct xfrm4_protocol *handler; 70 struct xfrm4_protocol *handler;
71 struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
67 72
68 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 73 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
69 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 74 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
70 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 75 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
71 76
72 for_each_protocol_rcu(*proto_handlers(nexthdr), handler) 77 if (!head)
78 goto out;
79
80 for_each_protocol_rcu(*head, handler)
73 if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL) 81 if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
74 return ret; 82 return ret;
75 83
84out:
76 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 85 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
77 86
78 kfree_skb(skb); 87 kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
208 int ret = -EEXIST; 217 int ret = -EEXIST;
209 int priority = handler->priority; 218 int priority = handler->priority;
210 219
220 if (!proto_handlers(protocol) || !netproto(protocol))
221 return -EINVAL;
222
211 mutex_lock(&xfrm4_protocol_mutex); 223 mutex_lock(&xfrm4_protocol_mutex);
212 224
213 if (!rcu_dereference_protected(*proto_handlers(protocol), 225 if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
250 struct xfrm4_protocol *t; 262 struct xfrm4_protocol *t;
251 int ret = -ENOENT; 263 int ret = -ENOENT;
252 264
265 if (!proto_handlers(protocol) || !netproto(protocol))
266 return -EINVAL;
267
253 mutex_lock(&xfrm4_protocol_mutex); 268 mutex_lock(&xfrm4_protocol_mutex);
254 269
255 for (pprev = proto_handlers(protocol); 270 for (pprev = proto_handlers(protocol);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 59f95affceb0..b2f091566f88 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
196 unsigned int off; 196 unsigned int off;
197 u16 flush = 1; 197 u16 flush = 1;
198 int proto; 198 int proto;
199 __wsum csum;
200 199
201 off = skb_gro_offset(skb); 200 off = skb_gro_offset(skb);
202 hlen = off + sizeof(*iph); 201 hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
264 263
265 NAPI_GRO_CB(skb)->flush |= flush; 264 NAPI_GRO_CB(skb)->flush |= flush;
266 265
267 csum = skb->csum; 266 skb_gro_postpull_rcsum(skb, iph, nlen);
268 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
269 267
270 pp = ops->callbacks.gro_receive(head, skb); 268 pp = ops->callbacks.gro_receive(head, skb);
271 269
272 skb->csum = csum;
273
274out_unlock: 270out_unlock:
275 rcu_read_unlock(); 271 rcu_read_unlock();
276 272
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 40e7581374f7..fbf11562b54c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
344 344
345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) 345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346{ 346{
347 if (skb->len <= mtu || skb->local_df) 347 if (skb->len <= mtu)
348 return false; 348 return false;
349 349
350 /* ipv6 conntrack defrag sets max_frag_size + local_df */
350 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) 351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
351 return true; 352 return true;
352 353
354 if (skb->local_df)
355 return false;
356
353 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
354 return false; 358 return false;
355 359
@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1225 unsigned int maxnonfragsize, headersize; 1229 unsigned int maxnonfragsize, headersize;
1226 1230
1227 headersize = sizeof(struct ipv6hdr) + 1231 headersize = sizeof(struct ipv6hdr) +
1228 (opt ? opt->tot_len : 0) + 1232 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1229 (dst_allfrag(&rt->dst) ? 1233 (dst_allfrag(&rt->dst) ?
1230 sizeof(struct frag_hdr) : 0) + 1234 sizeof(struct frag_hdr) : 0) +
1231 rt->rt6i_nfheader_len; 1235 rt->rt6i_nfheader_len;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b05b609f69d1..f6a66bb4114d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1557{ 1557{
1558 u8 proto; 1558 u8 proto;
1559 1559
1560 if (!data) 1560 if (!data || !data[IFLA_IPTUN_PROTO])
1561 return 0; 1561 return 0;
1562 1562
1563 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1563 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b7c0f827140b..6cc9f9371cc5 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
511 u8 type, u8 code, int offset, __be32 info) 511 u8 type, u8 code, int offset, __be32 info)
512{ 512{
513 __be32 spi; 513 __be32 spi;
514 __u32 mark;
514 struct xfrm_state *x; 515 struct xfrm_state *x;
515 struct ip6_tnl *t; 516 struct ip6_tnl *t;
516 struct ip_esp_hdr *esph; 517 struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
524 if (!t) 525 if (!t)
525 return -1; 526 return -1;
526 527
528 mark = be32_to_cpu(t->parms.o_key);
529
527 switch (protocol) { 530 switch (protocol) {
528 case IPPROTO_ESP: 531 case IPPROTO_ESP:
529 esph = (struct ip_esp_hdr *)(skb->data + offset); 532 esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
545 type != NDISC_REDIRECT) 548 type != NDISC_REDIRECT)
546 return 0; 549 return 0;
547 550
548 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 551 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
549 spi, protocol, AF_INET6); 552 spi, protocol, AF_INET6);
550 if (!x) 553 if (!x)
551 return 0; 554 return 0;
@@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void)
1097 1100
1098 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP); 1101 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
1099 if (err < 0) { 1102 if (err < 0) {
1100 unregister_pernet_device(&vti6_net_ops);
1101 pr_err("%s: can't register vti6 protocol\n", __func__); 1103 pr_err("%s: can't register vti6 protocol\n", __func__);
1102 1104
1103 goto out; 1105 goto out;
@@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void)
1106 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH); 1108 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
1107 if (err < 0) { 1109 if (err < 0) {
1108 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1110 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
1109 unregister_pernet_device(&vti6_net_ops);
1110 pr_err("%s: can't register vti6 protocol\n", __func__); 1111 pr_err("%s: can't register vti6 protocol\n", __func__);
1111 1112
1112 goto out; 1113 goto out;
@@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void)
1116 if (err < 0) { 1117 if (err < 0) {
1117 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1118 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
1118 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1119 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
1119 unregister_pernet_device(&vti6_net_ops);
1120 pr_err("%s: can't register vti6 protocol\n", __func__); 1120 pr_err("%s: can't register vti6 protocol\n", __func__);
1121 1121
1122 goto out; 1122 goto out;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 09a22f4f36c9..ca8d4ea48a5d 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -851,7 +851,7 @@ out:
851static void ndisc_recv_na(struct sk_buff *skb) 851static void ndisc_recv_na(struct sk_buff *skb)
852{ 852{
853 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 853 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
854 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 854 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
855 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 855 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
856 u8 *lladdr = NULL; 856 u8 *lladdr = NULL;
857 u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) + 857 u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
944 /* 944 /*
945 * Change: router to host 945 * Change: router to host
946 */ 946 */
947 struct rt6_info *rt; 947 rt6_clean_tohost(dev_net(dev), saddr);
948 rt = rt6_get_dflt_router(saddr, dev);
949 if (rt)
950 ip6_del_rt(rt);
951 } 948 }
952 949
953out: 950out:
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 95f3f1da0d7f..d38e6a8d8b9f 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
30 .daddr = iph->daddr, 30 .daddr = iph->daddr,
31 .saddr = iph->saddr, 31 .saddr = iph->saddr,
32 }; 32 };
33 int err;
33 34
34 dst = ip6_route_output(net, skb->sk, &fl6); 35 dst = ip6_route_output(net, skb->sk, &fl6);
35 if (dst->error) { 36 err = dst->error;
37 if (err) {
36 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 38 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
37 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); 39 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
38 dst_release(dst); 40 dst_release(dst);
39 return dst->error; 41 return err;
40 } 42 }
41 43
42 /* Drop old route. */ 44 /* Drop old route. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 004fffb6c221..6ebdb7b6744c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2234 fib6_clean_all(net, fib6_remove_prefsrc, &adni); 2234 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2235} 2235}
2236 2236
2237#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2238#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2239
2240/* Remove routers and update dst entries when gateway turn into host. */
2241static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2242{
2243 struct in6_addr *gateway = (struct in6_addr *)arg;
2244
2245 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2246 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2247 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2248 return -1;
2249 }
2250 return 0;
2251}
2252
2253void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2254{
2255 fib6_clean_all(net, fib6_clean_tohost, gateway);
2256}
2257
2237struct arg_dev_net { 2258struct arg_dev_net {
2238 struct net_device *dev; 2259 struct net_device *dev;
2239 struct net *net; 2260 struct net *net;
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2709 if (tb[RTA_OIF]) 2730 if (tb[RTA_OIF])
2710 oif = nla_get_u32(tb[RTA_OIF]); 2731 oif = nla_get_u32(tb[RTA_OIF]);
2711 2732
2733 if (tb[RTA_MARK])
2734 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
2735
2712 if (iif) { 2736 if (iif) {
2713 struct net_device *dev; 2737 struct net_device *dev;
2714 int flags = 0; 2738 int flags = 0;
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 0d78132ff18a..8517d3cd1aed 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
42 if (NAPI_GRO_CB(skb)->flush) 42 if (NAPI_GRO_CB(skb)->flush)
43 goto skip_csum; 43 goto skip_csum;
44 44
45 wsum = skb->csum; 45 wsum = NAPI_GRO_CB(skb)->csum;
46 46
47 switch (skb->ip_summed) { 47 switch (skb->ip_summed) {
48 case CHECKSUM_NONE: 48 case CHECKSUM_NONE:
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 19ef329bdbf8..b930d080c66f 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
114 if (err) 114 if (err)
115 return err; 115 return err;
116 116
117 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
118#ifdef CONFIG_NETFILTER
119 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
120#endif
121
122 skb->protocol = htons(ETH_P_IPV6);
123 skb->local_df = 1; 117 skb->local_df = 1;
124 118
125 return x->outer_mode->output2(x, skb); 119 return x->outer_mode->output2(x, skb);
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
128 122
129int xfrm6_output_finish(struct sk_buff *skb) 123int xfrm6_output_finish(struct sk_buff *skb)
130{ 124{
125 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
126 skb->protocol = htons(ETH_P_IPV6);
127
131#ifdef CONFIG_NETFILTER 128#ifdef CONFIG_NETFILTER
132 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; 129 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
133#endif 130#endif
134 131
135 skb->protocol = htons(ETH_P_IPV6);
136 return xfrm_output(skb); 132 return xfrm_output(skb);
137} 133}
138 134
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
142 struct xfrm_state *x = dst->xfrm; 138 struct xfrm_state *x = dst->xfrm;
143 int mtu; 139 int mtu;
144 140
141#ifdef CONFIG_NETFILTER
142 if (!x) {
143 IP6CB(skb)->flags |= IP6SKB_REROUTED;
144 return dst_output(skb);
145 }
146#endif
147
145 if (skb->protocol == htons(ETH_P_IPV6)) 148 if (skb->protocol == htons(ETH_P_IPV6))
146 mtu = ip6_skb_dst_mtu(skb); 149 mtu = ip6_skb_dst_mtu(skb);
147 else 150 else
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
165 168
166int xfrm6_output(struct sock *sk, struct sk_buff *skb) 169int xfrm6_output(struct sock *sk, struct sk_buff *skb)
167{ 170{
168 return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, 171 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
169 skb_dst(skb)->dev, __xfrm6_output); 172 NULL, skb_dst(skb)->dev, __xfrm6_output,
173 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
170} 174}
diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c
index 6ab989c486f7..54d13f8dbbae 100644
--- a/net/ipv6/xfrm6_protocol.c
+++ b/net/ipv6/xfrm6_protocol.c
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
50{ 50{
51 int ret; 51 int ret;
52 struct xfrm6_protocol *handler; 52 struct xfrm6_protocol *handler;
53 struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
54
55 if (!head)
56 return 0;
53 57
54 for_each_protocol_rcu(*proto_handlers(protocol), handler) 58 for_each_protocol_rcu(*proto_handlers(protocol), handler)
55 if ((ret = handler->cb_handler(skb, err)) <= 0) 59 if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
184 struct xfrm6_protocol __rcu **pprev; 188 struct xfrm6_protocol __rcu **pprev;
185 struct xfrm6_protocol *t; 189 struct xfrm6_protocol *t;
186 bool add_netproto = false; 190 bool add_netproto = false;
187
188 int ret = -EEXIST; 191 int ret = -EEXIST;
189 int priority = handler->priority; 192 int priority = handler->priority;
190 193
194 if (!proto_handlers(protocol) || !netproto(protocol))
195 return -EINVAL;
196
191 mutex_lock(&xfrm6_protocol_mutex); 197 mutex_lock(&xfrm6_protocol_mutex);
192 198
193 if (!rcu_dereference_protected(*proto_handlers(protocol), 199 if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
230 struct xfrm6_protocol *t; 236 struct xfrm6_protocol *t;
231 int ret = -ENOENT; 237 int ret = -ENOENT;
232 238
239 if (!proto_handlers(protocol) || !netproto(protocol))
240 return -EINVAL;
241
233 mutex_lock(&xfrm6_protocol_mutex); 242 mutex_lock(&xfrm6_protocol_mutex);
234 243
235 for (pprev = proto_handlers(protocol); 244 for (pprev = proto_handlers(protocol);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 01e77b0ae075..8c9d7302c846 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1830 spin_lock_irqsave(&list->lock, flags); 1830 spin_lock_irqsave(&list->lock, flags);
1831 1831
1832 while (list_skb != (struct sk_buff *)list) { 1832 while (list_skb != (struct sk_buff *)list) {
1833 if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { 1833 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1834 this = list_skb; 1834 this = list_skb;
1835 break; 1835 break;
1836 } 1836 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 222c28b75315..f169b6ee94ee 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
317 317
318 bool started, abort, hw_begun, notified; 318 bool started, abort, hw_begun, notified;
319 bool to_be_freed; 319 bool to_be_freed;
320 bool on_channel;
320 321
321 unsigned long hw_start_time; 322 unsigned long hw_start_time;
322 323
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index dee50aefd6e8..27600a9808ba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
3598 3598
3599 sdata_lock(sdata); 3599 sdata_lock(sdata);
3600 3600
3601 if (ifmgd->auth_data) { 3601 if (ifmgd->auth_data || ifmgd->assoc_data) {
3602 const u8 *bssid = ifmgd->auth_data ?
3603 ifmgd->auth_data->bss->bssid :
3604 ifmgd->assoc_data->bss->bssid;
3605
3602 /* 3606 /*
3603 * If we are trying to authenticate while suspending, cfg80211 3607 * If we are trying to authenticate / associate while suspending,
3604 * won't know and won't actually abort those attempts, thus we 3608 * cfg80211 won't know and won't actually abort those attempts,
3605 * need to do that ourselves. 3609 * thus we need to do that ourselves.
3606 */ 3610 */
3607 ieee80211_send_deauth_disassoc(sdata, 3611 ieee80211_send_deauth_disassoc(sdata, bssid,
3608 ifmgd->auth_data->bss->bssid,
3609 IEEE80211_STYPE_DEAUTH, 3612 IEEE80211_STYPE_DEAUTH,
3610 WLAN_REASON_DEAUTH_LEAVING, 3613 WLAN_REASON_DEAUTH_LEAVING,
3611 false, frame_buf); 3614 false, frame_buf);
3612 ieee80211_destroy_auth_data(sdata, false); 3615 if (ifmgd->assoc_data)
3616 ieee80211_destroy_assoc_data(sdata, false);
3617 if (ifmgd->auth_data)
3618 ieee80211_destroy_auth_data(sdata, false);
3613 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, 3619 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
3614 IEEE80211_DEAUTH_FRAME_LEN); 3620 IEEE80211_DEAUTH_FRAME_LEN);
3615 } 3621 }
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 6fb38558a5e6..7a17decd27f9 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
333 container_of(work, struct ieee80211_roc_work, work.work); 333 container_of(work, struct ieee80211_roc_work, work.work);
334 struct ieee80211_sub_if_data *sdata = roc->sdata; 334 struct ieee80211_sub_if_data *sdata = roc->sdata;
335 struct ieee80211_local *local = sdata->local; 335 struct ieee80211_local *local = sdata->local;
336 bool started; 336 bool started, on_channel;
337 337
338 mutex_lock(&local->mtx); 338 mutex_lock(&local->mtx);
339 339
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
354 if (!roc->started) { 354 if (!roc->started) {
355 struct ieee80211_roc_work *dep; 355 struct ieee80211_roc_work *dep;
356 356
357 /* start this ROC */ 357 WARN_ON(local->use_chanctx);
358 ieee80211_offchannel_stop_vifs(local); 358
359 /* If actually operating on the desired channel (with at least
360 * 20 MHz channel width) don't stop all the operations but still
361 * treat it as though the ROC operation started properly, so
362 * other ROC operations won't interfere with this one.
363 */
364 roc->on_channel = roc->chan == local->_oper_chandef.chan &&
365 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
366 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
359 367
360 /* switch channel etc */ 368 /* start this ROC */
361 ieee80211_recalc_idle(local); 369 ieee80211_recalc_idle(local);
362 370
363 local->tmp_channel = roc->chan; 371 if (!roc->on_channel) {
364 ieee80211_hw_config(local, 0); 372 ieee80211_offchannel_stop_vifs(local);
373
374 local->tmp_channel = roc->chan;
375 ieee80211_hw_config(local, 0);
376 }
365 377
366 /* tell userspace or send frame */ 378 /* tell userspace or send frame */
367 ieee80211_handle_roc_started(roc); 379 ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
380 finish: 392 finish:
381 list_del(&roc->list); 393 list_del(&roc->list);
382 started = roc->started; 394 started = roc->started;
395 on_channel = roc->on_channel;
383 ieee80211_roc_notify_destroy(roc, !roc->abort); 396 ieee80211_roc_notify_destroy(roc, !roc->abort);
384 397
385 if (started) { 398 if (started && !on_channel) {
386 ieee80211_flush_queues(local, NULL); 399 ieee80211_flush_queues(local, NULL);
387 400
388 local->tmp_channel = NULL; 401 local->tmp_channel = NULL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 216c45b949e5..2b608b2b70ec 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1231 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1231 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1232 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1232 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1233 sta->last_rx = jiffies; 1233 sta->last_rx = jiffies;
1234 if (ieee80211_is_data(hdr->frame_control)) { 1234 if (ieee80211_is_data(hdr->frame_control) &&
1235 !is_multicast_ether_addr(hdr->addr1)) {
1235 sta->last_rx_rate_idx = status->rate_idx; 1236 sta->last_rx_rate_idx = status->rate_idx;
1236 sta->last_rx_rate_flag = status->flag; 1237 sta->last_rx_rate_flag = status->flag;
1237 sta->last_rx_rate_vht_flag = status->vht_flag; 1238 sta->last_rx_rate_vht_flag = status->vht_flag;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 137a192e64bc..847d92f6bef6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
1148 atomic_dec(&ps->num_sta_ps); 1148 atomic_dec(&ps->num_sta_ps);
1149 1149
1150 /* This station just woke up and isn't aware of our SMPS state */ 1150 /* This station just woke up and isn't aware of our SMPS state */
1151 if (!ieee80211_smps_is_restrictive(sta->known_smps_mode, 1151 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
1152 !ieee80211_smps_is_restrictive(sta->known_smps_mode,
1152 sdata->smps_mode) && 1153 sdata->smps_mode) &&
1153 sta->known_smps_mode != sdata->bss->req_smps && 1154 sta->known_smps_mode != sdata->bss->req_smps &&
1154 sta_info_tx_streams(sta) != 1) { 1155 sta_info_tx_streams(sta) != 1) {
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 00ba90b02ab2..60cb7a665976 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
314 !is_multicast_ether_addr(hdr->addr1)) 314 !is_multicast_ether_addr(hdr->addr1))
315 txflags |= IEEE80211_RADIOTAP_F_TX_FAIL; 315 txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
316 316
317 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || 317 if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
318 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
319 txflags |= IEEE80211_RADIOTAP_F_TX_CTS; 318 txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
320 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) 319 if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
321 txflags |= IEEE80211_RADIOTAP_F_TX_RTS; 320 txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
322 321
323 put_unaligned_le16(txflags, pos); 322 put_unaligned_le16(txflags, pos);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index a0b0aea76525..cec5b60487a4 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -21,10 +21,10 @@
21 21
22#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \ 22#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
23 __field(bool, p2p) \ 23 __field(bool, p2p) \
24 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 24 __string(vif_name, sdata->name)
25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ 25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
26 __entry->p2p = sdata->vif.p2p; \ 26 __entry->p2p = sdata->vif.p2p; \
27 __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name) 27 __assign_str(vif_name, sdata->name)
28#define VIF_PR_FMT " vif:%s(%d%s)" 28#define VIF_PR_FMT " vif:%s(%d%s)"
29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" 29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
30 30
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 275c94f995f7..3c365837e910 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1780 mutex_unlock(&local->mtx); 1780 mutex_unlock(&local->mtx);
1781 1781
1782 if (sched_scan_stopped) 1782 if (sched_scan_stopped)
1783 cfg80211_sched_scan_stopped(local->hw.wiphy); 1783 cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
1784 1784
1785 /* 1785 /*
1786 * If this is for hw restart things are still running. 1786 * If this is for hw restart things are still running.
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index e9e36a256165..9265adfdabfc 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
129 if (!vht_cap_ie || !sband->vht_cap.vht_supported) 129 if (!vht_cap_ie || !sband->vht_cap.vht_supported)
130 return; 130 return;
131 131
132 /* A VHT STA must support 40 MHz */ 132 /*
133 if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 133 * A VHT STA must support 40 MHz, but if we verify that here
134 return; 134 * then we break a few things - some APs (e.g. Netgear R6300v2
135 * and others based on the BCM4360 chipset) will unset this
136 * capability bit when operating in 20 MHz.
137 */
135 138
136 vht_cap->vht_supported = true; 139 vht_cap->vht_supported = true;
137 140
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 75421f2ba8be..1f4f954c4b47 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -914,7 +914,7 @@ void nf_conntrack_free(struct nf_conn *ct)
914 nf_ct_ext_destroy(ct); 914 nf_ct_ext_destroy(ct);
915 nf_ct_ext_free(ct); 915 nf_ct_ext_free(ct);
916 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 916 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
917 smp_mb__before_atomic_dec(); 917 smp_mb__before_atomic();
918 atomic_dec(&net->ct.count); 918 atomic_dec(&net->ct.count);
919} 919}
920EXPORT_SYMBOL_GPL(nf_conntrack_free); 920EXPORT_SYMBOL_GPL(nf_conntrack_free);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ccc46fa5edbc..58579634427d 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1336#ifdef CONFIG_NF_NAT_NEEDED 1336#ifdef CONFIG_NF_NAT_NEEDED
1337 int ret; 1337 int ret;
1338 1338
1339 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1340 return 0;
1341
1339 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, 1342 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1340 cda[CTA_NAT_DST]); 1343 cda[CTA_NAT_DST]);
1341 if (ret < 0) 1344 if (ret < 0)
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 804105391b9a..345acfb1720b 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -66,20 +66,6 @@ struct nft_jumpstack {
66 int rulenum; 66 int rulenum;
67}; 67};
68 68
69static inline void
70nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
71 struct nft_jumpstack *jumpstack, unsigned int stackptr)
72{
73 struct nft_stats __percpu *stats;
74 const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
75
76 rcu_read_lock_bh();
77 stats = rcu_dereference(nft_base_chain(chain)->stats);
78 __this_cpu_inc(stats->pkts);
79 __this_cpu_add(stats->bytes, pkt->skb->len);
80 rcu_read_unlock_bh();
81}
82
83enum nft_trace { 69enum nft_trace {
84 NFT_TRACE_RULE, 70 NFT_TRACE_RULE,
85 NFT_TRACE_RETURN, 71 NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
117unsigned int 103unsigned int
118nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops) 104nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
119{ 105{
120 const struct nft_chain *chain = ops->priv; 106 const struct nft_chain *chain = ops->priv, *basechain = chain;
121 const struct nft_rule *rule; 107 const struct nft_rule *rule;
122 const struct nft_expr *expr, *last; 108 const struct nft_expr *expr, *last;
123 struct nft_data data[NFT_REG_MAX + 1]; 109 struct nft_data data[NFT_REG_MAX + 1];
124 unsigned int stackptr = 0; 110 unsigned int stackptr = 0;
125 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; 111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
126 int rulenum = 0; 112 struct nft_stats __percpu *stats;
113 int rulenum;
127 /* 114 /*
128 * Cache cursor to avoid problems in case that the cursor is updated 115 * Cache cursor to avoid problems in case that the cursor is updated
129 * while traversing the ruleset. 116 * while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
131 unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor); 118 unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
132 119
133do_chain: 120do_chain:
121 rulenum = 0;
134 rule = list_entry(&chain->rules, struct nft_rule, list); 122 rule = list_entry(&chain->rules, struct nft_rule, list);
135next_rule: 123next_rule:
136 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; 124 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
156 switch (data[NFT_REG_VERDICT].verdict) { 144 switch (data[NFT_REG_VERDICT].verdict) {
157 case NFT_BREAK: 145 case NFT_BREAK:
158 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; 146 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
159 /* fall through */ 147 continue;
160 case NFT_CONTINUE: 148 case NFT_CONTINUE:
149 if (unlikely(pkt->skb->nf_trace))
150 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
161 continue; 151 continue;
162 } 152 }
163 break; 153 break;
@@ -183,37 +173,44 @@ next_rule:
183 jumpstack[stackptr].rule = rule; 173 jumpstack[stackptr].rule = rule;
184 jumpstack[stackptr].rulenum = rulenum; 174 jumpstack[stackptr].rulenum = rulenum;
185 stackptr++; 175 stackptr++;
186 /* fall through */ 176 chain = data[NFT_REG_VERDICT].chain;
177 goto do_chain;
187 case NFT_GOTO: 178 case NFT_GOTO:
179 if (unlikely(pkt->skb->nf_trace))
180 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
181
188 chain = data[NFT_REG_VERDICT].chain; 182 chain = data[NFT_REG_VERDICT].chain;
189 goto do_chain; 183 goto do_chain;
190 case NFT_RETURN: 184 case NFT_RETURN:
191 if (unlikely(pkt->skb->nf_trace)) 185 if (unlikely(pkt->skb->nf_trace))
192 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN); 186 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
193 187 break;
194 /* fall through */
195 case NFT_CONTINUE: 188 case NFT_CONTINUE:
189 if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
190 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
196 break; 191 break;
197 default: 192 default:
198 WARN_ON(1); 193 WARN_ON(1);
199 } 194 }
200 195
201 if (stackptr > 0) { 196 if (stackptr > 0) {
202 if (unlikely(pkt->skb->nf_trace))
203 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
204
205 stackptr--; 197 stackptr--;
206 chain = jumpstack[stackptr].chain; 198 chain = jumpstack[stackptr].chain;
207 rule = jumpstack[stackptr].rule; 199 rule = jumpstack[stackptr].rule;
208 rulenum = jumpstack[stackptr].rulenum; 200 rulenum = jumpstack[stackptr].rulenum;
209 goto next_rule; 201 goto next_rule;
210 } 202 }
211 nft_chain_stats(chain, pkt, jumpstack, stackptr);
212 203
213 if (unlikely(pkt->skb->nf_trace)) 204 if (unlikely(pkt->skb->nf_trace))
214 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY); 205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
206
207 rcu_read_lock_bh();
208 stats = rcu_dereference(nft_base_chain(basechain)->stats);
209 __this_cpu_inc(stats->pkts);
210 __this_cpu_add(stats->bytes, pkt->skb->len);
211 rcu_read_unlock_bh();
215 212
216 return nft_base_chain(chain)->policy; 213 return nft_base_chain(basechain)->policy;
217} 214}
218EXPORT_SYMBOL_GPL(nft_do_chain); 215EXPORT_SYMBOL_GPL(nft_do_chain);
219 216
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e009087620e3..23ef77c60fff 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -256,15 +256,15 @@ replay:
256#endif 256#endif
257 { 257 {
258 nfnl_unlock(subsys_id); 258 nfnl_unlock(subsys_id);
259 kfree_skb(nskb); 259 netlink_ack(skb, nlh, -EOPNOTSUPP);
260 return netlink_ack(skb, nlh, -EOPNOTSUPP); 260 return kfree_skb(nskb);
261 } 261 }
262 } 262 }
263 263
264 if (!ss->commit || !ss->abort) { 264 if (!ss->commit || !ss->abort) {
265 nfnl_unlock(subsys_id); 265 nfnl_unlock(subsys_id);
266 kfree_skb(nskb); 266 netlink_ack(skb, nlh, -EOPNOTSUPP);
267 return netlink_ack(skb, nlh, -EOPNOTSUPP); 267 return kfree_skb(skb);
268 } 268 }
269 269
270 while (skb->len >= nlmsg_total_size(0)) { 270 while (skb->len >= nlmsg_total_size(0)) {
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index b7ebe23cdedf..d67de453c35a 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -598,7 +598,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
598{ 598{
599 atomic64_set(&ic->i_ack_next, seq); 599 atomic64_set(&ic->i_ack_next, seq);
600 if (ack_required) { 600 if (ack_required) {
601 smp_mb__before_clear_bit(); 601 smp_mb__before_atomic();
602 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 602 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
603 } 603 }
604} 604}
@@ -606,7 +606,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
606static u64 rds_ib_get_ack(struct rds_ib_connection *ic) 606static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
607{ 607{
608 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 608 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
609 smp_mb__after_clear_bit(); 609 smp_mb__after_atomic();
610 610
611 return atomic64_read(&ic->i_ack_next); 611 return atomic64_read(&ic->i_ack_next);
612} 612}
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 45033358358e..aa8bf6786008 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -429,7 +429,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
429{ 429{
430 atomic64_set(&ic->i_ack_next, seq); 430 atomic64_set(&ic->i_ack_next, seq);
431 if (ack_required) { 431 if (ack_required) {
432 smp_mb__before_clear_bit(); 432 smp_mb__before_atomic();
433 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 433 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
434 } 434 }
435} 435}
@@ -437,7 +437,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
437static u64 rds_iw_get_ack(struct rds_iw_connection *ic) 437static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
438{ 438{
439 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 439 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
440 smp_mb__after_clear_bit(); 440 smp_mb__after_atomic();
441 441
442 return atomic64_read(&ic->i_ack_next); 442 return atomic64_read(&ic->i_ack_next);
443} 443}
diff --git a/net/rds/send.c b/net/rds/send.c
index a82fb660ec00..23718160d71e 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -107,7 +107,7 @@ static int acquire_in_xmit(struct rds_connection *conn)
107static void release_in_xmit(struct rds_connection *conn) 107static void release_in_xmit(struct rds_connection *conn)
108{ 108{
109 clear_bit(RDS_IN_XMIT, &conn->c_flags); 109 clear_bit(RDS_IN_XMIT, &conn->c_flags);
110 smp_mb__after_clear_bit(); 110 smp_mb__after_atomic();
111 /* 111 /*
112 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a 112 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
113 * hot path and finding waiters is very rare. We don't want to walk 113 * hot path and finding waiters is very rare. We don't want to walk
@@ -661,7 +661,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
661 661
662 /* order flag updates with spin locks */ 662 /* order flag updates with spin locks */
663 if (!list_empty(&list)) 663 if (!list_empty(&list))
664 smp_mb__after_clear_bit(); 664 smp_mb__after_atomic();
665 665
666 spin_unlock_irqrestore(&conn->c_lock, flags); 666 spin_unlock_irqrestore(&conn->c_lock, flags);
667 667
@@ -691,7 +691,7 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
691 } 691 }
692 692
693 /* order flag updates with the rs lock */ 693 /* order flag updates with the rs lock */
694 smp_mb__after_clear_bit(); 694 smp_mb__after_atomic();
695 695
696 spin_unlock_irqrestore(&rs->rs_lock, flags); 696 spin_unlock_irqrestore(&rs->rs_lock, flags);
697 697
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 81cf5a4c5e40..53b17ca0dff5 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -93,7 +93,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
93 rm->m_ack_seq = tc->t_last_sent_nxt + 93 rm->m_ack_seq = tc->t_last_sent_nxt +
94 sizeof(struct rds_header) + 94 sizeof(struct rds_header) +
95 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; 95 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
96 smp_mb__before_clear_bit(); 96 smp_mb__before_atomic();
97 set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags); 97 set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
98 tc->t_last_expected_una = rm->m_ack_seq + 1; 98 tc->t_last_expected_una = rm->m_ack_seq + 1;
99 99
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 7633a752c65e..0ad080790a32 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
99 _debug("tktlen: %x", tktlen); 99 _debug("tktlen: %x", tktlen);
100 if (tktlen > AFSTOKEN_RK_TIX_MAX) 100 if (tktlen > AFSTOKEN_RK_TIX_MAX)
101 return -EKEYREJECTED; 101 return -EKEYREJECTED;
102 if (8 * 4 + tktlen != toklen) 102 if (toklen < 8 * 4 + tktlen)
103 return -EKEYREJECTED; 103 return -EKEYREJECTED;
104 104
105 plen = sizeof(*token) + sizeof(*token->kad) + tktlen; 105 plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index eed8404443d8..f435a88d899a 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
188 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, 188 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
189}; 189};
190 190
191static void tcindex_filter_result_init(struct tcindex_filter_result *r)
192{
193 memset(r, 0, sizeof(*r));
194 tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
195}
196
191static int 197static int
192tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, 198tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
193 u32 handle, struct tcindex_data *p, 199 u32 handle, struct tcindex_data *p,
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
207 return err; 213 return err;
208 214
209 memcpy(&cp, p, sizeof(cp)); 215 memcpy(&cp, p, sizeof(cp));
210 memset(&new_filter_result, 0, sizeof(new_filter_result)); 216 tcindex_filter_result_init(&new_filter_result);
211 tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
212 217
218 tcindex_filter_result_init(&cr);
213 if (old_r) 219 if (old_r)
214 memcpy(&cr, r, sizeof(cr)); 220 cr.res = r->res;
215 else {
216 memset(&cr, 0, sizeof(cr));
217 tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
218 }
219 221
220 if (tb[TCA_TCINDEX_HASH]) 222 if (tb[TCA_TCINDEX_HASH])
221 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); 223 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
267 err = -ENOMEM; 269 err = -ENOMEM;
268 if (!cp.perfect && !cp.h) { 270 if (!cp.perfect && !cp.h) {
269 if (valid_perfect_hash(&cp)) { 271 if (valid_perfect_hash(&cp)) {
272 int i;
273
270 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); 274 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
271 if (!cp.perfect) 275 if (!cp.perfect)
272 goto errout; 276 goto errout;
277 for (i = 0; i < cp.hash; i++)
278 tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
279 TCA_TCINDEX_POLICE);
273 balloc = 1; 280 balloc = 1;
274 } else { 281 } else {
275 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); 282 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
295 tcf_bind_filter(tp, &cr.res, base); 302 tcf_bind_filter(tp, &cr.res, base);
296 } 303 }
297 304
298 tcf_exts_change(tp, &cr.exts, &e); 305 if (old_r)
306 tcf_exts_change(tp, &r->exts, &e);
307 else
308 tcf_exts_change(tp, &cr.exts, &e);
299 309
300 tcf_tree_lock(tp); 310 tcf_tree_lock(tp);
301 if (old_r && old_r != r) 311 if (old_r && old_r != r)
302 memset(old_r, 0, sizeof(*old_r)); 312 tcindex_filter_result_init(old_r);
303 313
304 memcpy(p, &cp, sizeof(cp)); 314 memcpy(p, &cp, sizeof(cp));
305 memcpy(r, &cr, sizeof(cr)); 315 r->res = cr.res;
306 316
307 if (r == &new_filter_result) { 317 if (r == &new_filter_result) {
308 struct tcindex_filter **fp; 318 struct tcindex_filter **fp;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 5285ead196c0..247e973544bf 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -296,7 +296,7 @@ static void
296rpcauth_unhash_cred_locked(struct rpc_cred *cred) 296rpcauth_unhash_cred_locked(struct rpc_cred *cred)
297{ 297{
298 hlist_del_rcu(&cred->cr_hash); 298 hlist_del_rcu(&cred->cr_hash);
299 smp_mb__before_clear_bit(); 299 smp_mb__before_atomic();
300 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); 300 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
301} 301}
302 302
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 36e431ee1c90..b6e440baccc3 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -143,7 +143,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
143 gss_get_ctx(ctx); 143 gss_get_ctx(ctx);
144 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 144 rcu_assign_pointer(gss_cred->gc_ctx, ctx);
145 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 145 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
146 smp_mb__before_clear_bit(); 146 smp_mb__before_atomic();
147 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 147 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
148} 148}
149 149
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 3513d559bc45..9761a0da964d 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -244,10 +244,10 @@ void xprt_free_bc_request(struct rpc_rqst *req)
244 dprintk("RPC: free backchannel req=%p\n", req); 244 dprintk("RPC: free backchannel req=%p\n", req);
245 245
246 req->rq_connect_cookie = xprt->connect_cookie - 1; 246 req->rq_connect_cookie = xprt->connect_cookie - 1;
247 smp_mb__before_clear_bit(); 247 smp_mb__before_atomic();
248 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 248 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
249 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 249 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
250 smp_mb__after_clear_bit(); 250 smp_mb__after_atomic();
251 251
252 if (!xprt_need_to_requeue(xprt)) { 252 if (!xprt_need_to_requeue(xprt)) {
253 /* 253 /*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index d173f79947c6..89d051de6b3e 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -230,9 +230,9 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
230{ 230{
231 xprt->snd_task = NULL; 231 xprt->snd_task = NULL;
232 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 232 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
233 smp_mb__before_clear_bit(); 233 smp_mb__before_atomic();
234 clear_bit(XPRT_LOCKED, &xprt->state); 234 clear_bit(XPRT_LOCKED, &xprt->state);
235 smp_mb__after_clear_bit(); 235 smp_mb__after_atomic();
236 } else 236 } else
237 queue_work(rpciod_workqueue, &xprt->task_cleanup); 237 queue_work(rpciod_workqueue, &xprt->task_cleanup);
238} 238}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 25a3dcf15cae..402a7e9a16b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -893,11 +893,11 @@ static void xs_close(struct rpc_xprt *xprt)
893 xs_reset_transport(transport); 893 xs_reset_transport(transport);
894 xprt->reestablish_timeout = 0; 894 xprt->reestablish_timeout = 0;
895 895
896 smp_mb__before_clear_bit(); 896 smp_mb__before_atomic();
897 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 897 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
898 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 898 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
899 clear_bit(XPRT_CLOSING, &xprt->state); 899 clear_bit(XPRT_CLOSING, &xprt->state);
900 smp_mb__after_clear_bit(); 900 smp_mb__after_atomic();
901 xprt_disconnect_done(xprt); 901 xprt_disconnect_done(xprt);
902} 902}
903 903
@@ -1497,12 +1497,12 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
1497 1497
1498static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 1498static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1499{ 1499{
1500 smp_mb__before_clear_bit(); 1500 smp_mb__before_atomic();
1501 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1501 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1502 clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); 1502 clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1503 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1503 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1504 clear_bit(XPRT_CLOSING, &xprt->state); 1504 clear_bit(XPRT_CLOSING, &xprt->state);
1505 smp_mb__after_clear_bit(); 1505 smp_mb__after_atomic();
1506} 1506}
1507 1507
1508static void xs_sock_mark_closed(struct rpc_xprt *xprt) 1508static void xs_sock_mark_closed(struct rpc_xprt *xprt)
@@ -1556,10 +1556,10 @@ static void xs_tcp_state_change(struct sock *sk)
1556 xprt->connect_cookie++; 1556 xprt->connect_cookie++;
1557 xprt->reestablish_timeout = 0; 1557 xprt->reestablish_timeout = 0;
1558 set_bit(XPRT_CLOSING, &xprt->state); 1558 set_bit(XPRT_CLOSING, &xprt->state);
1559 smp_mb__before_clear_bit(); 1559 smp_mb__before_atomic();
1560 clear_bit(XPRT_CONNECTED, &xprt->state); 1560 clear_bit(XPRT_CONNECTED, &xprt->state);
1561 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1561 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1562 smp_mb__after_clear_bit(); 1562 smp_mb__after_atomic();
1563 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1563 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1564 break; 1564 break;
1565 case TCP_CLOSE_WAIT: 1565 case TCP_CLOSE_WAIT:
@@ -1578,9 +1578,9 @@ static void xs_tcp_state_change(struct sock *sk)
1578 case TCP_LAST_ACK: 1578 case TCP_LAST_ACK:
1579 set_bit(XPRT_CLOSING, &xprt->state); 1579 set_bit(XPRT_CLOSING, &xprt->state);
1580 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1580 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1581 smp_mb__before_clear_bit(); 1581 smp_mb__before_atomic();
1582 clear_bit(XPRT_CONNECTED, &xprt->state); 1582 clear_bit(XPRT_CONNECTED, &xprt->state);
1583 smp_mb__after_clear_bit(); 1583 smp_mb__after_atomic();
1584 break; 1584 break;
1585 case TCP_CLOSE: 1585 case TCP_CLOSE:
1586 xs_tcp_cancel_linger_timeout(xprt); 1586 xs_tcp_cancel_linger_timeout(xprt);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index bb7e8ba821f4..749f80c21e22 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1207,7 +1207,7 @@ restart:
1207 sk->sk_state = TCP_ESTABLISHED; 1207 sk->sk_state = TCP_ESTABLISHED;
1208 sock_hold(newsk); 1208 sock_hold(newsk);
1209 1209
1210 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */ 1210 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1211 unix_peer(sk) = newsk; 1211 unix_peer(sk) = newsk;
1212 1212
1213 unix_state_unlock(sk); 1213 unix_state_unlock(sk);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 7d09a712cb1f..88f108edfb58 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
284} 284}
285EXPORT_SYMBOL(cfg80211_sched_scan_results); 285EXPORT_SYMBOL(cfg80211_sched_scan_results);
286 286
287void cfg80211_sched_scan_stopped(struct wiphy *wiphy) 287void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
288{ 288{
289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
290 290
291 ASSERT_RTNL();
292
291 trace_cfg80211_sched_scan_stopped(wiphy); 293 trace_cfg80211_sched_scan_stopped(wiphy);
292 294
293 rtnl_lock();
294 __cfg80211_stop_sched_scan(rdev, true); 295 __cfg80211_stop_sched_scan(rdev, true);
296}
297EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
298
299void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
300{
301 rtnl_lock();
302 cfg80211_sched_scan_stopped_rtnl(wiphy);
295 rtnl_unlock(); 303 rtnl_unlock();
296} 304}
297EXPORT_SYMBOL(cfg80211_sched_scan_stopped); 305EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index acdcb4a81817..3546a77033de 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
234 NULL, 0, NULL, 0, 234 NULL, 0, NULL, 0,
235 WLAN_STATUS_UNSPECIFIED_FAILURE, 235 WLAN_STATUS_UNSPECIFIED_FAILURE,
236 false, NULL); 236 false, NULL);
237 cfg80211_sme_free(wdev);
238 } 237 }
239 wdev_unlock(wdev); 238 wdev_unlock(wdev);
240 } 239 }
@@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
648 cfg80211_unhold_bss(bss_from_pub(bss)); 647 cfg80211_unhold_bss(bss_from_pub(bss));
649 cfg80211_put_bss(wdev->wiphy, bss); 648 cfg80211_put_bss(wdev->wiphy, bss);
650 } 649 }
650 cfg80211_sme_free(wdev);
651 return; 651 return;
652 } 652 }
653 653