summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2019-11-04 05:32:04 -0500
committerThomas Gleixner <tglx@linutronix.de>2019-11-04 05:32:04 -0500
commitca8888d7ae6fa18454c9e4f192c56bc6c8ca9b33 (patch)
treec2b86b162f8fc2adf8a98f7e074500bee155d603 /net
parentdb616173d787395787ecc93eef075fa975227b10 (diff)
parenta99d8080aaf358d5d23581244e5da23b35e340b9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
to pick up the KVM fix which is required for the NX series.
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c33
-rw-r--r--net/atm/common.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c61
-rw-r--r--net/batman-adv/bat_v_ogm.c41
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/batman-adv/soft-interface.c32
-rw-r--r--net/batman-adv/types.h7
-rw-r--r--net/bluetooth/6lowpan.c8
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bridge/br_device.c8
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c2
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/dev.c623
-rw-r--r--net/core/dev_addr_lists.c12
-rw-r--r--net/core/ethtool.c4
-rw-r--r--net/core/flow_dissector.c38
-rw-r--r--net/core/lwt_bpf.c7
-rw-r--r--net/core/net_namespace.c18
-rw-r--r--net/core/rtnetlink.c17
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/dsa/master.c5
-rw-r--r--net/dsa/slave.c12
-rw-r--r--net/ieee802154/6lowpan/core.c8
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_output.c11
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/udp.c29
-rw-r--r--net/ipv6/addrconf_core.c1
-rw-r--r--net/ipv6/inet6_hashtables.c2
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/l2tp/l2tp_eth.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c29
-rw-r--r--net/netfilter/ipvs/ip_vs_pe.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c13
-rw-r--r--net/netfilter/nf_flow_table_core.c3
-rw-r--r--net/netfilter/nf_tables_offload.c2
-rw-r--r--net/netfilter/nft_payload.c38
-rw-r--r--net/netrom/af_netrom.c23
-rw-r--r--net/nfc/llcp_sock.c4
-rw-r--r--net/openvswitch/datapath.c20
-rw-r--r--net/openvswitch/vport-internal_dev.c11
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rose/af_rose.c23
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/recvmsg.c18
-rw-r--r--net/sched/cls_bpf.c8
-rw-r--r--net/sched/sch_generic.c19
-rw-r--r--net/sched/sch_hhf.c8
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sched/sch_sfq.c14
-rw-r--r--net/sched/sch_taprio.c2
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/smc/af_smc.c13
-rw-r--r--net/smc/smc_core.c2
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/sunrpc/backchannel_rqst.c7
-rw-r--r--net/sunrpc/xprt.c5
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c2
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/wireless/chan.c5
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/util.c3
-rw-r--r--net/xdp/xdp_umem.c6
76 files changed, 856 insertions, 517 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 54728d2eda18..d4bcfd8f95bf 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -172,7 +172,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
172 if (err < 0) 172 if (err < 0)
173 goto out_uninit_mvrp; 173 goto out_uninit_mvrp;
174 174
175 vlan->nest_level = dev_get_nest_level(real_dev) + 1;
176 err = register_netdevice(dev); 175 err = register_netdevice(dev);
177 if (err < 0) 176 if (err < 0)
178 goto out_uninit_mvrp; 177 goto out_uninit_mvrp;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 93eadf179123..e5bff5cc6f97 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -489,36 +489,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
489 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 489 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
490} 490}
491 491
492/*
493 * vlan network devices have devices nesting below it, and are a special
494 * "super class" of normal network devices; split their locks off into a
495 * separate class since they always nest.
496 */
497static struct lock_class_key vlan_netdev_xmit_lock_key;
498static struct lock_class_key vlan_netdev_addr_lock_key;
499
500static void vlan_dev_set_lockdep_one(struct net_device *dev,
501 struct netdev_queue *txq,
502 void *_subclass)
503{
504 lockdep_set_class_and_subclass(&txq->_xmit_lock,
505 &vlan_netdev_xmit_lock_key,
506 *(int *)_subclass);
507}
508
509static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
510{
511 lockdep_set_class_and_subclass(&dev->addr_list_lock,
512 &vlan_netdev_addr_lock_key,
513 subclass);
514 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
515}
516
517static int vlan_dev_get_lock_subclass(struct net_device *dev)
518{
519 return vlan_dev_priv(dev)->nest_level;
520}
521
522static const struct header_ops vlan_header_ops = { 492static const struct header_ops vlan_header_ops = {
523 .create = vlan_dev_hard_header, 493 .create = vlan_dev_hard_header,
524 .parse = eth_header_parse, 494 .parse = eth_header_parse,
@@ -609,8 +579,6 @@ static int vlan_dev_init(struct net_device *dev)
609 579
610 SET_NETDEV_DEVTYPE(dev, &vlan_type); 580 SET_NETDEV_DEVTYPE(dev, &vlan_type);
611 581
612 vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
613
614 vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 582 vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
615 if (!vlan->vlan_pcpu_stats) 583 if (!vlan->vlan_pcpu_stats)
616 return -ENOMEM; 584 return -ENOMEM;
@@ -812,7 +780,6 @@ static const struct net_device_ops vlan_netdev_ops = {
812 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, 780 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
813#endif 781#endif
814 .ndo_fix_features = vlan_dev_fix_features, 782 .ndo_fix_features = vlan_dev_fix_features,
815 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
816 .ndo_get_iflink = vlan_dev_get_iflink, 783 .ndo_get_iflink = vlan_dev_get_iflink,
817}; 784};
818 785
diff --git a/net/atm/common.c b/net/atm/common.c
index b7528e77997c..0ce530af534d 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -668,7 +668,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
668 mask |= EPOLLHUP; 668 mask |= EPOLLHUP;
669 669
670 /* readable? */ 670 /* readable? */
671 if (!skb_queue_empty(&sk->sk_receive_queue)) 671 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
672 mask |= EPOLLIN | EPOLLRDNORM; 672 mask |= EPOLLIN | EPOLLRDNORM;
673 673
674 /* writable? */ 674 /* writable? */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index d78938e3e008..5b0b20e6da95 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -22,6 +22,8 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kref.h> 23#include <linux/kref.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/lockdep.h>
26#include <linux/mutex.h>
25#include <linux/netdevice.h> 27#include <linux/netdevice.h>
26#include <linux/netlink.h> 28#include <linux/netlink.h>
27#include <linux/pkt_sched.h> 29#include <linux/pkt_sched.h>
@@ -193,14 +195,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
193 unsigned char *ogm_buff; 195 unsigned char *ogm_buff;
194 u32 random_seqno; 196 u32 random_seqno;
195 197
198 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
199
196 /* randomize initial seqno to avoid collision */ 200 /* randomize initial seqno to avoid collision */
197 get_random_bytes(&random_seqno, sizeof(random_seqno)); 201 get_random_bytes(&random_seqno, sizeof(random_seqno));
198 atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno); 202 atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
199 203
200 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; 204 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
201 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); 205 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
202 if (!ogm_buff) 206 if (!ogm_buff) {
207 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
203 return -ENOMEM; 208 return -ENOMEM;
209 }
204 210
205 hard_iface->bat_iv.ogm_buff = ogm_buff; 211 hard_iface->bat_iv.ogm_buff = ogm_buff;
206 212
@@ -212,35 +218,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
212 batadv_ogm_packet->reserved = 0; 218 batadv_ogm_packet->reserved = 0;
213 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; 219 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
214 220
221 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
222
215 return 0; 223 return 0;
216} 224}
217 225
218static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) 226static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
219{ 227{
228 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
229
220 kfree(hard_iface->bat_iv.ogm_buff); 230 kfree(hard_iface->bat_iv.ogm_buff);
221 hard_iface->bat_iv.ogm_buff = NULL; 231 hard_iface->bat_iv.ogm_buff = NULL;
232
233 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
222} 234}
223 235
224static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface) 236static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
225{ 237{
226 struct batadv_ogm_packet *batadv_ogm_packet; 238 struct batadv_ogm_packet *batadv_ogm_packet;
227 unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff; 239 void *ogm_buff;
228 240
229 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; 241 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
242
243 ogm_buff = hard_iface->bat_iv.ogm_buff;
244 if (!ogm_buff)
245 goto unlock;
246
247 batadv_ogm_packet = ogm_buff;
230 ether_addr_copy(batadv_ogm_packet->orig, 248 ether_addr_copy(batadv_ogm_packet->orig,
231 hard_iface->net_dev->dev_addr); 249 hard_iface->net_dev->dev_addr);
232 ether_addr_copy(batadv_ogm_packet->prev_sender, 250 ether_addr_copy(batadv_ogm_packet->prev_sender,
233 hard_iface->net_dev->dev_addr); 251 hard_iface->net_dev->dev_addr);
252
253unlock:
254 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
234} 255}
235 256
236static void 257static void
237batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) 258batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
238{ 259{
239 struct batadv_ogm_packet *batadv_ogm_packet; 260 struct batadv_ogm_packet *batadv_ogm_packet;
240 unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff; 261 void *ogm_buff;
241 262
242 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; 263 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
264
265 ogm_buff = hard_iface->bat_iv.ogm_buff;
266 if (!ogm_buff)
267 goto unlock;
268
269 batadv_ogm_packet = ogm_buff;
243 batadv_ogm_packet->ttl = BATADV_TTL; 270 batadv_ogm_packet->ttl = BATADV_TTL;
271
272unlock:
273 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
244} 274}
245 275
246/* when do we schedule our own ogm to be sent */ 276/* when do we schedule our own ogm to be sent */
@@ -742,7 +772,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
742 } 772 }
743} 773}
744 774
745static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) 775/**
776 * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
777 * @hard_iface: interface whose ogm buffer should be transmitted
778 */
779static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
746{ 780{
747 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 781 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
748 unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff; 782 unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
@@ -753,9 +787,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
753 u16 tvlv_len = 0; 787 u16 tvlv_len = 0;
754 unsigned long send_time; 788 unsigned long send_time;
755 789
756 if (hard_iface->if_status == BATADV_IF_NOT_IN_USE || 790 lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
757 hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
758 return;
759 791
760 /* the interface gets activated here to avoid race conditions between 792 /* the interface gets activated here to avoid race conditions between
761 * the moment of activating the interface in 793 * the moment of activating the interface in
@@ -823,6 +855,17 @@ out:
823 batadv_hardif_put(primary_if); 855 batadv_hardif_put(primary_if);
824} 856}
825 857
858static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
859{
860 if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
861 hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
862 return;
863
864 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
865 batadv_iv_ogm_schedule_buff(hard_iface);
866 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
867}
868
826/** 869/**
827 * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface 870 * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface
828 * @orig_node: originator which reproadcasted the OGMs directly 871 * @orig_node: originator which reproadcasted the OGMs directly
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index dc4f7430cb5a..8033f24f506c 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -18,6 +18,7 @@
18#include <linux/kref.h> 18#include <linux/kref.h>
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/lockdep.h> 20#include <linux/lockdep.h>
21#include <linux/mutex.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
22#include <linux/random.h> 23#include <linux/random.h>
23#include <linux/rculist.h> 24#include <linux/rculist.h>
@@ -256,14 +257,12 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
256} 257}
257 258
258/** 259/**
259 * batadv_v_ogm_send() - periodic worker broadcasting the own OGM 260 * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
260 * @work: work queue item 261 * @bat_priv: the bat priv with all the soft interface information
261 */ 262 */
262static void batadv_v_ogm_send(struct work_struct *work) 263static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
263{ 264{
264 struct batadv_hard_iface *hard_iface; 265 struct batadv_hard_iface *hard_iface;
265 struct batadv_priv_bat_v *bat_v;
266 struct batadv_priv *bat_priv;
267 struct batadv_ogm2_packet *ogm_packet; 266 struct batadv_ogm2_packet *ogm_packet;
268 struct sk_buff *skb, *skb_tmp; 267 struct sk_buff *skb, *skb_tmp;
269 unsigned char *ogm_buff; 268 unsigned char *ogm_buff;
@@ -271,8 +270,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
271 u16 tvlv_len = 0; 270 u16 tvlv_len = 0;
272 int ret; 271 int ret;
273 272
274 bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work); 273 lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
275 bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
276 274
277 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) 275 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
278 goto out; 276 goto out;
@@ -364,6 +362,23 @@ out:
364} 362}
365 363
366/** 364/**
365 * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
366 * @work: work queue item
367 */
368static void batadv_v_ogm_send(struct work_struct *work)
369{
370 struct batadv_priv_bat_v *bat_v;
371 struct batadv_priv *bat_priv;
372
373 bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
374 bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
375
376 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
377 batadv_v_ogm_send_softif(bat_priv);
378 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
379}
380
381/**
367 * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface 382 * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
368 * @work: work queue item 383 * @work: work queue item
369 * 384 *
@@ -424,11 +439,15 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
424 struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface); 439 struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
425 struct batadv_ogm2_packet *ogm_packet; 440 struct batadv_ogm2_packet *ogm_packet;
426 441
442 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
427 if (!bat_priv->bat_v.ogm_buff) 443 if (!bat_priv->bat_v.ogm_buff)
428 return; 444 goto unlock;
429 445
430 ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff; 446 ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
431 ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr); 447 ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
448
449unlock:
450 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
432} 451}
433 452
434/** 453/**
@@ -1050,6 +1069,8 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
1050 atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno); 1069 atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
1051 INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send); 1070 INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
1052 1071
1072 mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
1073
1053 return 0; 1074 return 0;
1054} 1075}
1055 1076
@@ -1061,7 +1082,11 @@ void batadv_v_ogm_free(struct batadv_priv *bat_priv)
1061{ 1082{
1062 cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq); 1083 cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
1063 1084
1085 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
1086
1064 kfree(bat_priv->bat_v.ogm_buff); 1087 kfree(bat_priv->bat_v.ogm_buff);
1065 bat_priv->bat_v.ogm_buff = NULL; 1088 bat_priv->bat_v.ogm_buff = NULL;
1066 bat_priv->bat_v.ogm_buff_len = 0; 1089 bat_priv->bat_v.ogm_buff_len = 0;
1090
1091 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
1067} 1092}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index c90e47342bb0..afb52282d5bd 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -18,6 +18,7 @@
18#include <linux/kref.h> 18#include <linux/kref.h>
19#include <linux/limits.h> 19#include <linux/limits.h>
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/mutex.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
22#include <linux/printk.h> 23#include <linux/printk.h>
23#include <linux/rculist.h> 24#include <linux/rculist.h>
@@ -929,6 +930,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
929 INIT_LIST_HEAD(&hard_iface->list); 930 INIT_LIST_HEAD(&hard_iface->list);
930 INIT_HLIST_HEAD(&hard_iface->neigh_list); 931 INIT_HLIST_HEAD(&hard_iface->neigh_list);
931 932
933 mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
932 spin_lock_init(&hard_iface->neigh_list_lock); 934 spin_lock_init(&hard_iface->neigh_list_lock);
933 kref_init(&hard_iface->refcount); 935 kref_init(&hard_iface->refcount);
934 936
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9cbed6f5a85a..5ee8e9a100f9 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -740,36 +740,6 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
740 return 0; 740 return 0;
741} 741}
742 742
743/* batman-adv network devices have devices nesting below it and are a special
744 * "super class" of normal network devices; split their locks off into a
745 * separate class since they always nest.
746 */
747static struct lock_class_key batadv_netdev_xmit_lock_key;
748static struct lock_class_key batadv_netdev_addr_lock_key;
749
750/**
751 * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
752 * @dev: device which owns the tx queue
753 * @txq: tx queue to modify
754 * @_unused: always NULL
755 */
756static void batadv_set_lockdep_class_one(struct net_device *dev,
757 struct netdev_queue *txq,
758 void *_unused)
759{
760 lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
761}
762
763/**
764 * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
765 * @dev: network device to modify
766 */
767static void batadv_set_lockdep_class(struct net_device *dev)
768{
769 lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
770 netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
771}
772
773/** 743/**
774 * batadv_softif_init_late() - late stage initialization of soft interface 744 * batadv_softif_init_late() - late stage initialization of soft interface
775 * @dev: registered network device to modify 745 * @dev: registered network device to modify
@@ -783,8 +753,6 @@ static int batadv_softif_init_late(struct net_device *dev)
783 int ret; 753 int ret;
784 size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM; 754 size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
785 755
786 batadv_set_lockdep_class(dev);
787
788 bat_priv = netdev_priv(dev); 756 bat_priv = netdev_priv(dev);
789 bat_priv->soft_iface = dev; 757 bat_priv->soft_iface = dev;
790 758
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index be7c02aa91e2..4d7f1baee7b7 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -17,6 +17,7 @@
17#include <linux/if.h> 17#include <linux/if.h>
18#include <linux/if_ether.h> 18#include <linux/if_ether.h>
19#include <linux/kref.h> 19#include <linux/kref.h>
20#include <linux/mutex.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
21#include <linux/netlink.h> 22#include <linux/netlink.h>
22#include <linux/sched.h> /* for linux/wait.h */ 23#include <linux/sched.h> /* for linux/wait.h */
@@ -81,6 +82,9 @@ struct batadv_hard_iface_bat_iv {
81 82
82 /** @ogm_seqno: OGM sequence number - used to identify each OGM */ 83 /** @ogm_seqno: OGM sequence number - used to identify each OGM */
83 atomic_t ogm_seqno; 84 atomic_t ogm_seqno;
85
86 /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
87 struct mutex ogm_buff_mutex;
84}; 88};
85 89
86/** 90/**
@@ -1539,6 +1543,9 @@ struct batadv_priv_bat_v {
1539 /** @ogm_seqno: OGM sequence number - used to identify each OGM */ 1543 /** @ogm_seqno: OGM sequence number - used to identify each OGM */
1540 atomic_t ogm_seqno; 1544 atomic_t ogm_seqno;
1541 1545
1546 /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
1547 struct mutex ogm_buff_mutex;
1548
1542 /** @ogm_wq: workqueue used to schedule OGM transmissions */ 1549 /** @ogm_wq: workqueue used to schedule OGM transmissions */
1543 struct delayed_work ogm_wq; 1550 struct delayed_work ogm_wq;
1544}; 1551};
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index bb55d92691b0..4febc82a7c76 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -571,15 +571,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
571 return err < 0 ? NET_XMIT_DROP : err; 571 return err < 0 ? NET_XMIT_DROP : err;
572} 572}
573 573
574static int bt_dev_init(struct net_device *dev)
575{
576 netdev_lockdep_set_classes(dev);
577
578 return 0;
579}
580
581static const struct net_device_ops netdev_ops = { 574static const struct net_device_ops netdev_ops = {
582 .ndo_init = bt_dev_init,
583 .ndo_start_xmit = bt_xmit, 575 .ndo_start_xmit = bt_xmit,
584}; 576};
585 577
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 94ddf19998c7..5f508c50649d 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -460,7 +460,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
460 if (sk->sk_state == BT_LISTEN) 460 if (sk->sk_state == BT_LISTEN)
461 return bt_accept_poll(sk); 461 return bt_accept_poll(sk);
462 462
463 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 463 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
464 mask |= EPOLLERR | 464 mask |= EPOLLERR |
465 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 465 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
466 466
@@ -470,7 +470,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
470 if (sk->sk_shutdown == SHUTDOWN_MASK) 470 if (sk->sk_shutdown == SHUTDOWN_MASK)
471 mask |= EPOLLHUP; 471 mask |= EPOLLHUP;
472 472
473 if (!skb_queue_empty(&sk->sk_receive_queue)) 473 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
474 mask |= EPOLLIN | EPOLLRDNORM; 474 mask |= EPOLLIN | EPOLLRDNORM;
475 475
476 if (sk->sk_state == BT_CLOSED) 476 if (sk->sk_state == BT_CLOSED)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 681b72862c16..e804a3016902 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -24,8 +24,6 @@
24const struct nf_br_ops __rcu *nf_br_ops __read_mostly; 24const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
25EXPORT_SYMBOL_GPL(nf_br_ops); 25EXPORT_SYMBOL_GPL(nf_br_ops);
26 26
27static struct lock_class_key bridge_netdev_addr_lock_key;
28
29/* net device transmit always called with BH disabled */ 27/* net device transmit always called with BH disabled */
30netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 28netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
31{ 29{
@@ -108,11 +106,6 @@ out:
108 return NETDEV_TX_OK; 106 return NETDEV_TX_OK;
109} 107}
110 108
111static void br_set_lockdep_class(struct net_device *dev)
112{
113 lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
114}
115
116static int br_dev_init(struct net_device *dev) 109static int br_dev_init(struct net_device *dev)
117{ 110{
118 struct net_bridge *br = netdev_priv(dev); 111 struct net_bridge *br = netdev_priv(dev);
@@ -150,7 +143,6 @@ static int br_dev_init(struct net_device *dev)
150 br_mdb_hash_fini(br); 143 br_mdb_hash_fini(br);
151 br_fdb_hash_fini(br); 144 br_fdb_hash_fini(br);
152 } 145 }
153 br_set_lockdep_class(dev);
154 146
155 return err; 147 return err;
156} 148}
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 506d6141e44e..809673222382 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -95,7 +95,7 @@ slow_path:
95 * This may also be a clone skbuff, we could preserve the geometry for 95 * This may also be a clone skbuff, we could preserve the geometry for
96 * the copies but probably not worth the effort. 96 * the copies but probably not worth the effort.
97 */ 97 */
98 ip_frag_init(skb, hlen, ll_rs, frag_max_size, &state); 98 ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
99 99
100 while (state.left > 0) { 100 while (state.left > 0) {
101 struct sk_buff *skb2; 101 struct sk_buff *skb2;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 13ea920600ae..ef14da50a981 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -953,7 +953,7 @@ static __poll_t caif_poll(struct file *file,
953 mask |= EPOLLRDHUP; 953 mask |= EPOLLRDHUP;
954 954
955 /* readable? */ 955 /* readable? */
956 if (!skb_queue_empty(&sk->sk_receive_queue) || 956 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
957 (sk->sk_shutdown & RCV_SHUTDOWN)) 957 (sk->sk_shutdown & RCV_SHUTDOWN))
958 mask |= EPOLLIN | EPOLLRDNORM; 958 mask |= EPOLLIN | EPOLLRDNORM;
959 959
diff --git a/net/core/datagram.c b/net/core/datagram.c
index c210fc116103..da3c24ed129c 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -97,7 +97,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
97 if (error) 97 if (error)
98 goto out_err; 98 goto out_err;
99 99
100 if (sk->sk_receive_queue.prev != skb) 100 if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
101 goto out; 101 goto out;
102 102
103 /* Socket shut down? */ 103 /* Socket shut down? */
@@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
278 break; 278 break;
279 279
280 sk_busy_loop(sk, flags & MSG_DONTWAIT); 280 sk_busy_loop(sk, flags & MSG_DONTWAIT);
281 } while (sk->sk_receive_queue.prev != *last); 281 } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
282 282
283 error = -EAGAIN; 283 error = -EAGAIN;
284 284
@@ -767,7 +767,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
767 mask = 0; 767 mask = 0;
768 768
769 /* exceptional events? */ 769 /* exceptional events? */
770 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 770 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
771 mask |= EPOLLERR | 771 mask |= EPOLLERR |
772 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 772 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
773 773
@@ -777,7 +777,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
777 mask |= EPOLLHUP; 777 mask |= EPOLLHUP;
778 778
779 /* readable? */ 779 /* readable? */
780 if (!skb_queue_empty(&sk->sk_receive_queue)) 780 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
781 mask |= EPOLLIN | EPOLLRDNORM; 781 mask |= EPOLLIN | EPOLLRDNORM;
782 782
783 /* Connection-based need to check for termination and startup */ 783 /* Connection-based need to check for termination and startup */
diff --git a/net/core/dev.c b/net/core/dev.c
index bf3ed413abaf..99ac84ff398f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -146,6 +146,7 @@
146#include "net-sysfs.h" 146#include "net-sysfs.h"
147 147
148#define MAX_GRO_SKBS 8 148#define MAX_GRO_SKBS 8
149#define MAX_NEST_DEV 8
149 150
150/* This should be increased if a protocol with a bigger head is added. */ 151/* This should be increased if a protocol with a bigger head is added. */
151#define GRO_MAX_HEAD (MAX_HEADER + 128) 152#define GRO_MAX_HEAD (MAX_HEADER + 128)
@@ -276,88 +277,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
276DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 277DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
277EXPORT_PER_CPU_SYMBOL(softnet_data); 278EXPORT_PER_CPU_SYMBOL(softnet_data);
278 279
279#ifdef CONFIG_LOCKDEP
280/*
281 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
282 * according to dev->type
283 */
284static const unsigned short netdev_lock_type[] = {
285 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
286 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
287 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
288 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
289 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
290 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
291 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
292 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
293 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
294 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
295 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
296 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
297 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
298 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
299 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
300
301static const char *const netdev_lock_name[] = {
302 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
303 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
304 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
305 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
306 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
307 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
308 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
309 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
310 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
311 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
312 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
313 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
314 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
315 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
316 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
317
318static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
319static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
320
321static inline unsigned short netdev_lock_pos(unsigned short dev_type)
322{
323 int i;
324
325 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
326 if (netdev_lock_type[i] == dev_type)
327 return i;
328 /* the last key is used by default */
329 return ARRAY_SIZE(netdev_lock_type) - 1;
330}
331
332static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
333 unsigned short dev_type)
334{
335 int i;
336
337 i = netdev_lock_pos(dev_type);
338 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
339 netdev_lock_name[i]);
340}
341
342static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
343{
344 int i;
345
346 i = netdev_lock_pos(dev->type);
347 lockdep_set_class_and_name(&dev->addr_list_lock,
348 &netdev_addr_lock_key[i],
349 netdev_lock_name[i]);
350}
351#else
352static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
353 unsigned short dev_type)
354{
355}
356static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
357{
358}
359#endif
360
361/******************************************************************************* 280/*******************************************************************************
362 * 281 *
363 * Protocol management and registration routines 282 * Protocol management and registration routines
@@ -6489,6 +6408,9 @@ struct netdev_adjacent {
6489 /* upper master flag, there can only be one master device per list */ 6408 /* upper master flag, there can only be one master device per list */
6490 bool master; 6409 bool master;
6491 6410
6411 /* lookup ignore flag */
6412 bool ignore;
6413
6492 /* counter for the number of times this device was added to us */ 6414 /* counter for the number of times this device was added to us */
6493 u16 ref_nr; 6415 u16 ref_nr;
6494 6416
@@ -6511,7 +6433,7 @@ static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6511 return NULL; 6433 return NULL;
6512} 6434}
6513 6435
6514static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data) 6436static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6515{ 6437{
6516 struct net_device *dev = data; 6438 struct net_device *dev = data;
6517 6439
@@ -6532,7 +6454,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
6532{ 6454{
6533 ASSERT_RTNL(); 6455 ASSERT_RTNL();
6534 6456
6535 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6457 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6536 upper_dev); 6458 upper_dev);
6537} 6459}
6538EXPORT_SYMBOL(netdev_has_upper_dev); 6460EXPORT_SYMBOL(netdev_has_upper_dev);
@@ -6550,7 +6472,7 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
6550bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6472bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6551 struct net_device *upper_dev) 6473 struct net_device *upper_dev)
6552{ 6474{
6553 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6475 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6554 upper_dev); 6476 upper_dev);
6555} 6477}
6556EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6478EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
@@ -6594,6 +6516,22 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6594} 6516}
6595EXPORT_SYMBOL(netdev_master_upper_dev_get); 6517EXPORT_SYMBOL(netdev_master_upper_dev_get);
6596 6518
6519static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6520{
6521 struct netdev_adjacent *upper;
6522
6523 ASSERT_RTNL();
6524
6525 if (list_empty(&dev->adj_list.upper))
6526 return NULL;
6527
6528 upper = list_first_entry(&dev->adj_list.upper,
6529 struct netdev_adjacent, list);
6530 if (likely(upper->master) && !upper->ignore)
6531 return upper->dev;
6532 return NULL;
6533}
6534
6597/** 6535/**
6598 * netdev_has_any_lower_dev - Check if device is linked to some device 6536 * netdev_has_any_lower_dev - Check if device is linked to some device
6599 * @dev: device 6537 * @dev: device
@@ -6644,6 +6582,23 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6644} 6582}
6645EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6583EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6646 6584
6585static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6586 struct list_head **iter,
6587 bool *ignore)
6588{
6589 struct netdev_adjacent *upper;
6590
6591 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6592
6593 if (&upper->list == &dev->adj_list.upper)
6594 return NULL;
6595
6596 *iter = &upper->list;
6597 *ignore = upper->ignore;
6598
6599 return upper->dev;
6600}
6601
6647static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6602static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6648 struct list_head **iter) 6603 struct list_head **iter)
6649{ 6604{
@@ -6661,34 +6616,111 @@ static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6661 return upper->dev; 6616 return upper->dev;
6662} 6617}
6663 6618
6619static int __netdev_walk_all_upper_dev(struct net_device *dev,
6620 int (*fn)(struct net_device *dev,
6621 void *data),
6622 void *data)
6623{
6624 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6625 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6626 int ret, cur = 0;
6627 bool ignore;
6628
6629 now = dev;
6630 iter = &dev->adj_list.upper;
6631
6632 while (1) {
6633 if (now != dev) {
6634 ret = fn(now, data);
6635 if (ret)
6636 return ret;
6637 }
6638
6639 next = NULL;
6640 while (1) {
6641 udev = __netdev_next_upper_dev(now, &iter, &ignore);
6642 if (!udev)
6643 break;
6644 if (ignore)
6645 continue;
6646
6647 next = udev;
6648 niter = &udev->adj_list.upper;
6649 dev_stack[cur] = now;
6650 iter_stack[cur++] = iter;
6651 break;
6652 }
6653
6654 if (!next) {
6655 if (!cur)
6656 return 0;
6657 next = dev_stack[--cur];
6658 niter = iter_stack[cur];
6659 }
6660
6661 now = next;
6662 iter = niter;
6663 }
6664
6665 return 0;
6666}
6667
6664int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6668int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6665 int (*fn)(struct net_device *dev, 6669 int (*fn)(struct net_device *dev,
6666 void *data), 6670 void *data),
6667 void *data) 6671 void *data)
6668{ 6672{
6669 struct net_device *udev; 6673 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6670 struct list_head *iter; 6674 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6671 int ret; 6675 int ret, cur = 0;
6672 6676
6673 for (iter = &dev->adj_list.upper, 6677 now = dev;
6674 udev = netdev_next_upper_dev_rcu(dev, &iter); 6678 iter = &dev->adj_list.upper;
6675 udev;
6676 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6677 /* first is the upper device itself */
6678 ret = fn(udev, data);
6679 if (ret)
6680 return ret;
6681 6679
6682 /* then look at all of its upper devices */ 6680 while (1) {
6683 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data); 6681 if (now != dev) {
6684 if (ret) 6682 ret = fn(now, data);
6685 return ret; 6683 if (ret)
6684 return ret;
6685 }
6686
6687 next = NULL;
6688 while (1) {
6689 udev = netdev_next_upper_dev_rcu(now, &iter);
6690 if (!udev)
6691 break;
6692
6693 next = udev;
6694 niter = &udev->adj_list.upper;
6695 dev_stack[cur] = now;
6696 iter_stack[cur++] = iter;
6697 break;
6698 }
6699
6700 if (!next) {
6701 if (!cur)
6702 return 0;
6703 next = dev_stack[--cur];
6704 niter = iter_stack[cur];
6705 }
6706
6707 now = next;
6708 iter = niter;
6686 } 6709 }
6687 6710
6688 return 0; 6711 return 0;
6689} 6712}
6690EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 6713EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6691 6714
6715static bool __netdev_has_upper_dev(struct net_device *dev,
6716 struct net_device *upper_dev)
6717{
6718 ASSERT_RTNL();
6719
6720 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
6721 upper_dev);
6722}
6723
6692/** 6724/**
6693 * netdev_lower_get_next_private - Get the next ->private from the 6725 * netdev_lower_get_next_private - Get the next ->private from the
6694 * lower neighbour list 6726 * lower neighbour list
@@ -6785,34 +6817,119 @@ static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6785 return lower->dev; 6817 return lower->dev;
6786} 6818}
6787 6819
6820static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
6821 struct list_head **iter,
6822 bool *ignore)
6823{
6824 struct netdev_adjacent *lower;
6825
6826 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
6827
6828 if (&lower->list == &dev->adj_list.lower)
6829 return NULL;
6830
6831 *iter = &lower->list;
6832 *ignore = lower->ignore;
6833
6834 return lower->dev;
6835}
6836
6788int netdev_walk_all_lower_dev(struct net_device *dev, 6837int netdev_walk_all_lower_dev(struct net_device *dev,
6789 int (*fn)(struct net_device *dev, 6838 int (*fn)(struct net_device *dev,
6790 void *data), 6839 void *data),
6791 void *data) 6840 void *data)
6792{ 6841{
6793 struct net_device *ldev; 6842 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6794 struct list_head *iter; 6843 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6795 int ret; 6844 int ret, cur = 0;
6796 6845
6797 for (iter = &dev->adj_list.lower, 6846 now = dev;
6798 ldev = netdev_next_lower_dev(dev, &iter); 6847 iter = &dev->adj_list.lower;
6799 ldev;
6800 ldev = netdev_next_lower_dev(dev, &iter)) {
6801 /* first is the lower device itself */
6802 ret = fn(ldev, data);
6803 if (ret)
6804 return ret;
6805 6848
6806 /* then look at all of its lower devices */ 6849 while (1) {
6807 ret = netdev_walk_all_lower_dev(ldev, fn, data); 6850 if (now != dev) {
6808 if (ret) 6851 ret = fn(now, data);
6809 return ret; 6852 if (ret)
6853 return ret;
6854 }
6855
6856 next = NULL;
6857 while (1) {
6858 ldev = netdev_next_lower_dev(now, &iter);
6859 if (!ldev)
6860 break;
6861
6862 next = ldev;
6863 niter = &ldev->adj_list.lower;
6864 dev_stack[cur] = now;
6865 iter_stack[cur++] = iter;
6866 break;
6867 }
6868
6869 if (!next) {
6870 if (!cur)
6871 return 0;
6872 next = dev_stack[--cur];
6873 niter = iter_stack[cur];
6874 }
6875
6876 now = next;
6877 iter = niter;
6810 } 6878 }
6811 6879
6812 return 0; 6880 return 0;
6813} 6881}
6814EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 6882EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6815 6883
6884static int __netdev_walk_all_lower_dev(struct net_device *dev,
6885 int (*fn)(struct net_device *dev,
6886 void *data),
6887 void *data)
6888{
6889 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6890 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6891 int ret, cur = 0;
6892 bool ignore;
6893
6894 now = dev;
6895 iter = &dev->adj_list.lower;
6896
6897 while (1) {
6898 if (now != dev) {
6899 ret = fn(now, data);
6900 if (ret)
6901 return ret;
6902 }
6903
6904 next = NULL;
6905 while (1) {
6906 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
6907 if (!ldev)
6908 break;
6909 if (ignore)
6910 continue;
6911
6912 next = ldev;
6913 niter = &ldev->adj_list.lower;
6914 dev_stack[cur] = now;
6915 iter_stack[cur++] = iter;
6916 break;
6917 }
6918
6919 if (!next) {
6920 if (!cur)
6921 return 0;
6922 next = dev_stack[--cur];
6923 niter = iter_stack[cur];
6924 }
6925
6926 now = next;
6927 iter = niter;
6928 }
6929
6930 return 0;
6931}
6932
6816static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 6933static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6817 struct list_head **iter) 6934 struct list_head **iter)
6818{ 6935{
@@ -6827,28 +6944,99 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6827 return lower->dev; 6944 return lower->dev;
6828} 6945}
6829 6946
6830int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 6947static u8 __netdev_upper_depth(struct net_device *dev)
6831 int (*fn)(struct net_device *dev, 6948{
6832 void *data), 6949 struct net_device *udev;
6833 void *data) 6950 struct list_head *iter;
6951 u8 max_depth = 0;
6952 bool ignore;
6953
6954 for (iter = &dev->adj_list.upper,
6955 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
6956 udev;
6957 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
6958 if (ignore)
6959 continue;
6960 if (max_depth < udev->upper_level)
6961 max_depth = udev->upper_level;
6962 }
6963
6964 return max_depth;
6965}
6966
6967static u8 __netdev_lower_depth(struct net_device *dev)
6834{ 6968{
6835 struct net_device *ldev; 6969 struct net_device *ldev;
6836 struct list_head *iter; 6970 struct list_head *iter;
6837 int ret; 6971 u8 max_depth = 0;
6972 bool ignore;
6838 6973
6839 for (iter = &dev->adj_list.lower, 6974 for (iter = &dev->adj_list.lower,
6840 ldev = netdev_next_lower_dev_rcu(dev, &iter); 6975 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
6841 ldev; 6976 ldev;
6842 ldev = netdev_next_lower_dev_rcu(dev, &iter)) { 6977 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
6843 /* first is the lower device itself */ 6978 if (ignore)
6844 ret = fn(ldev, data); 6979 continue;
6845 if (ret) 6980 if (max_depth < ldev->lower_level)
6846 return ret; 6981 max_depth = ldev->lower_level;
6982 }
6847 6983
6848 /* then look at all of its lower devices */ 6984 return max_depth;
6849 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data); 6985}
6850 if (ret) 6986
6851 return ret; 6987static int __netdev_update_upper_level(struct net_device *dev, void *data)
6988{
6989 dev->upper_level = __netdev_upper_depth(dev) + 1;
6990 return 0;
6991}
6992
6993static int __netdev_update_lower_level(struct net_device *dev, void *data)
6994{
6995 dev->lower_level = __netdev_lower_depth(dev) + 1;
6996 return 0;
6997}
6998
6999int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7000 int (*fn)(struct net_device *dev,
7001 void *data),
7002 void *data)
7003{
7004 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7005 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7006 int ret, cur = 0;
7007
7008 now = dev;
7009 iter = &dev->adj_list.lower;
7010
7011 while (1) {
7012 if (now != dev) {
7013 ret = fn(now, data);
7014 if (ret)
7015 return ret;
7016 }
7017
7018 next = NULL;
7019 while (1) {
7020 ldev = netdev_next_lower_dev_rcu(now, &iter);
7021 if (!ldev)
7022 break;
7023
7024 next = ldev;
7025 niter = &ldev->adj_list.lower;
7026 dev_stack[cur] = now;
7027 iter_stack[cur++] = iter;
7028 break;
7029 }
7030
7031 if (!next) {
7032 if (!cur)
7033 return 0;
7034 next = dev_stack[--cur];
7035 niter = iter_stack[cur];
7036 }
7037
7038 now = next;
7039 iter = niter;
6852 } 7040 }
6853 7041
6854 return 0; 7042 return 0;
@@ -6952,6 +7140,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
6952 adj->master = master; 7140 adj->master = master;
6953 adj->ref_nr = 1; 7141 adj->ref_nr = 1;
6954 adj->private = private; 7142 adj->private = private;
7143 adj->ignore = false;
6955 dev_hold(adj_dev); 7144 dev_hold(adj_dev);
6956 7145
6957 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7146 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
@@ -7102,14 +7291,17 @@ static int __netdev_upper_dev_link(struct net_device *dev,
7102 return -EBUSY; 7291 return -EBUSY;
7103 7292
7104 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7293 /* To prevent loops, check if dev is not upper device to upper_dev. */
7105 if (netdev_has_upper_dev(upper_dev, dev)) 7294 if (__netdev_has_upper_dev(upper_dev, dev))
7106 return -EBUSY; 7295 return -EBUSY;
7107 7296
7297 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7298 return -EMLINK;
7299
7108 if (!master) { 7300 if (!master) {
7109 if (netdev_has_upper_dev(dev, upper_dev)) 7301 if (__netdev_has_upper_dev(dev, upper_dev))
7110 return -EEXIST; 7302 return -EEXIST;
7111 } else { 7303 } else {
7112 master_dev = netdev_master_upper_dev_get(dev); 7304 master_dev = __netdev_master_upper_dev_get(dev);
7113 if (master_dev) 7305 if (master_dev)
7114 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7306 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7115 } 7307 }
@@ -7131,6 +7323,13 @@ static int __netdev_upper_dev_link(struct net_device *dev,
7131 if (ret) 7323 if (ret)
7132 goto rollback; 7324 goto rollback;
7133 7325
7326 __netdev_update_upper_level(dev, NULL);
7327 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7328
7329 __netdev_update_lower_level(upper_dev, NULL);
7330 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7331 NULL);
7332
7134 return 0; 7333 return 0;
7135 7334
7136rollback: 7335rollback:
@@ -7213,9 +7412,96 @@ void netdev_upper_dev_unlink(struct net_device *dev,
7213 7412
7214 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7413 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7215 &changeupper_info.info); 7414 &changeupper_info.info);
7415
7416 __netdev_update_upper_level(dev, NULL);
7417 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7418
7419 __netdev_update_lower_level(upper_dev, NULL);
7420 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7421 NULL);
7216} 7422}
7217EXPORT_SYMBOL(netdev_upper_dev_unlink); 7423EXPORT_SYMBOL(netdev_upper_dev_unlink);
7218 7424
7425static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7426 struct net_device *lower_dev,
7427 bool val)
7428{
7429 struct netdev_adjacent *adj;
7430
7431 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7432 if (adj)
7433 adj->ignore = val;
7434
7435 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7436 if (adj)
7437 adj->ignore = val;
7438}
7439
7440static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7441 struct net_device *lower_dev)
7442{
7443 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7444}
7445
7446static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7447 struct net_device *lower_dev)
7448{
7449 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7450}
7451
7452int netdev_adjacent_change_prepare(struct net_device *old_dev,
7453 struct net_device *new_dev,
7454 struct net_device *dev,
7455 struct netlink_ext_ack *extack)
7456{
7457 int err;
7458
7459 if (!new_dev)
7460 return 0;
7461
7462 if (old_dev && new_dev != old_dev)
7463 netdev_adjacent_dev_disable(dev, old_dev);
7464
7465 err = netdev_upper_dev_link(new_dev, dev, extack);
7466 if (err) {
7467 if (old_dev && new_dev != old_dev)
7468 netdev_adjacent_dev_enable(dev, old_dev);
7469 return err;
7470 }
7471
7472 return 0;
7473}
7474EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7475
7476void netdev_adjacent_change_commit(struct net_device *old_dev,
7477 struct net_device *new_dev,
7478 struct net_device *dev)
7479{
7480 if (!new_dev || !old_dev)
7481 return;
7482
7483 if (new_dev == old_dev)
7484 return;
7485
7486 netdev_adjacent_dev_enable(dev, old_dev);
7487 netdev_upper_dev_unlink(old_dev, dev);
7488}
7489EXPORT_SYMBOL(netdev_adjacent_change_commit);
7490
7491void netdev_adjacent_change_abort(struct net_device *old_dev,
7492 struct net_device *new_dev,
7493 struct net_device *dev)
7494{
7495 if (!new_dev)
7496 return;
7497
7498 if (old_dev && new_dev != old_dev)
7499 netdev_adjacent_dev_enable(dev, old_dev);
7500
7501 netdev_upper_dev_unlink(new_dev, dev);
7502}
7503EXPORT_SYMBOL(netdev_adjacent_change_abort);
7504
7219/** 7505/**
7220 * netdev_bonding_info_change - Dispatch event about slave change 7506 * netdev_bonding_info_change - Dispatch event about slave change
7221 * @dev: device 7507 * @dev: device
@@ -7329,25 +7615,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
7329EXPORT_SYMBOL(netdev_lower_dev_get_private); 7615EXPORT_SYMBOL(netdev_lower_dev_get_private);
7330 7616
7331 7617
7332int dev_get_nest_level(struct net_device *dev)
7333{
7334 struct net_device *lower = NULL;
7335 struct list_head *iter;
7336 int max_nest = -1;
7337 int nest;
7338
7339 ASSERT_RTNL();
7340
7341 netdev_for_each_lower_dev(dev, lower, iter) {
7342 nest = dev_get_nest_level(lower);
7343 if (max_nest < nest)
7344 max_nest = nest;
7345 }
7346
7347 return max_nest + 1;
7348}
7349EXPORT_SYMBOL(dev_get_nest_level);
7350
7351/** 7618/**
7352 * netdev_lower_change - Dispatch event about lower device state change 7619 * netdev_lower_change - Dispatch event about lower device state change
7353 * @lower_dev: device 7620 * @lower_dev: device
@@ -8154,7 +8421,8 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
8154 return -EINVAL; 8421 return -EINVAL;
8155 } 8422 }
8156 8423
8157 if (prog->aux->id == prog_id) { 8424 /* prog->aux->id may be 0 for orphaned device-bound progs */
8425 if (prog->aux->id && prog->aux->id == prog_id) {
8158 bpf_prog_put(prog); 8426 bpf_prog_put(prog);
8159 return 0; 8427 return 0;
8160 } 8428 }
@@ -8619,7 +8887,7 @@ static void netdev_init_one_queue(struct net_device *dev,
8619{ 8887{
8620 /* Initialize queue lock */ 8888 /* Initialize queue lock */
8621 spin_lock_init(&queue->_xmit_lock); 8889 spin_lock_init(&queue->_xmit_lock);
8622 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 8890 lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
8623 queue->xmit_lock_owner = -1; 8891 queue->xmit_lock_owner = -1;
8624 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 8892 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
8625 queue->dev = dev; 8893 queue->dev = dev;
@@ -8666,6 +8934,43 @@ void netif_tx_stop_all_queues(struct net_device *dev)
8666} 8934}
8667EXPORT_SYMBOL(netif_tx_stop_all_queues); 8935EXPORT_SYMBOL(netif_tx_stop_all_queues);
8668 8936
8937static void netdev_register_lockdep_key(struct net_device *dev)
8938{
8939 lockdep_register_key(&dev->qdisc_tx_busylock_key);
8940 lockdep_register_key(&dev->qdisc_running_key);
8941 lockdep_register_key(&dev->qdisc_xmit_lock_key);
8942 lockdep_register_key(&dev->addr_list_lock_key);
8943}
8944
8945static void netdev_unregister_lockdep_key(struct net_device *dev)
8946{
8947 lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
8948 lockdep_unregister_key(&dev->qdisc_running_key);
8949 lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
8950 lockdep_unregister_key(&dev->addr_list_lock_key);
8951}
8952
8953void netdev_update_lockdep_key(struct net_device *dev)
8954{
8955 struct netdev_queue *queue;
8956 int i;
8957
8958 lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
8959 lockdep_unregister_key(&dev->addr_list_lock_key);
8960
8961 lockdep_register_key(&dev->qdisc_xmit_lock_key);
8962 lockdep_register_key(&dev->addr_list_lock_key);
8963
8964 lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
8965 for (i = 0; i < dev->num_tx_queues; i++) {
8966 queue = netdev_get_tx_queue(dev, i);
8967
8968 lockdep_set_class(&queue->_xmit_lock,
8969 &dev->qdisc_xmit_lock_key);
8970 }
8971}
8972EXPORT_SYMBOL(netdev_update_lockdep_key);
8973
8669/** 8974/**
8670 * register_netdevice - register a network device 8975 * register_netdevice - register a network device
8671 * @dev: device to register 8976 * @dev: device to register
@@ -8700,7 +9005,7 @@ int register_netdevice(struct net_device *dev)
8700 BUG_ON(!net); 9005 BUG_ON(!net);
8701 9006
8702 spin_lock_init(&dev->addr_list_lock); 9007 spin_lock_init(&dev->addr_list_lock);
8703 netdev_set_addr_lockdep_class(dev); 9008 lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
8704 9009
8705 ret = dev_get_valid_name(net, dev, dev->name); 9010 ret = dev_get_valid_name(net, dev, dev->name);
8706 if (ret < 0) 9011 if (ret < 0)
@@ -9210,8 +9515,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
9210 9515
9211 dev_net_set(dev, &init_net); 9516 dev_net_set(dev, &init_net);
9212 9517
9518 netdev_register_lockdep_key(dev);
9519
9213 dev->gso_max_size = GSO_MAX_SIZE; 9520 dev->gso_max_size = GSO_MAX_SIZE;
9214 dev->gso_max_segs = GSO_MAX_SEGS; 9521 dev->gso_max_segs = GSO_MAX_SEGS;
9522 dev->upper_level = 1;
9523 dev->lower_level = 1;
9215 9524
9216 INIT_LIST_HEAD(&dev->napi_list); 9525 INIT_LIST_HEAD(&dev->napi_list);
9217 INIT_LIST_HEAD(&dev->unreg_list); 9526 INIT_LIST_HEAD(&dev->unreg_list);
@@ -9292,6 +9601,8 @@ void free_netdev(struct net_device *dev)
9292 free_percpu(dev->pcpu_refcnt); 9601 free_percpu(dev->pcpu_refcnt);
9293 dev->pcpu_refcnt = NULL; 9602 dev->pcpu_refcnt = NULL;
9294 9603
9604 netdev_unregister_lockdep_key(dev);
9605
9295 /* Compatibility with error handling in drivers */ 9606 /* Compatibility with error handling in drivers */
9296 if (dev->reg_state == NETREG_UNINITIALIZED) { 9607 if (dev->reg_state == NETREG_UNINITIALIZED) {
9297 netdev_freemem(dev); 9608 netdev_freemem(dev);
@@ -9460,7 +9771,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
9460 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 9771 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9461 rcu_barrier(); 9772 rcu_barrier();
9462 9773
9463 new_nsid = peernet2id_alloc(dev_net(dev), net); 9774 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
9464 /* If there is an ifindex conflict assign a new one */ 9775 /* If there is an ifindex conflict assign a new one */
9465 if (__dev_get_by_index(net, dev->ifindex)) 9776 if (__dev_get_by_index(net, dev->ifindex))
9466 new_ifindex = dev_new_index(net); 9777 new_ifindex = dev_new_index(net);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 6393ba930097..2f949b5a1eb9 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -637,7 +637,7 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
637 if (to->addr_len != from->addr_len) 637 if (to->addr_len != from->addr_len)
638 return -EINVAL; 638 return -EINVAL;
639 639
640 netif_addr_lock_nested(to); 640 netif_addr_lock(to);
641 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 641 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
642 if (!err) 642 if (!err)
643 __dev_set_rx_mode(to); 643 __dev_set_rx_mode(to);
@@ -667,7 +667,7 @@ int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
667 if (to->addr_len != from->addr_len) 667 if (to->addr_len != from->addr_len)
668 return -EINVAL; 668 return -EINVAL;
669 669
670 netif_addr_lock_nested(to); 670 netif_addr_lock(to);
671 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len); 671 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
672 if (!err) 672 if (!err)
673 __dev_set_rx_mode(to); 673 __dev_set_rx_mode(to);
@@ -691,7 +691,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
691 return; 691 return;
692 692
693 netif_addr_lock_bh(from); 693 netif_addr_lock_bh(from);
694 netif_addr_lock_nested(to); 694 netif_addr_lock(to);
695 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 695 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
696 __dev_set_rx_mode(to); 696 __dev_set_rx_mode(to);
697 netif_addr_unlock(to); 697 netif_addr_unlock(to);
@@ -858,7 +858,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
858 if (to->addr_len != from->addr_len) 858 if (to->addr_len != from->addr_len)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 netif_addr_lock_nested(to); 861 netif_addr_lock(to);
862 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); 862 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
863 if (!err) 863 if (!err)
864 __dev_set_rx_mode(to); 864 __dev_set_rx_mode(to);
@@ -888,7 +888,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
888 if (to->addr_len != from->addr_len) 888 if (to->addr_len != from->addr_len)
889 return -EINVAL; 889 return -EINVAL;
890 890
891 netif_addr_lock_nested(to); 891 netif_addr_lock(to);
892 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len); 892 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
893 if (!err) 893 if (!err)
894 __dev_set_rx_mode(to); 894 __dev_set_rx_mode(to);
@@ -912,7 +912,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
912 return; 912 return;
913 913
914 netif_addr_lock_bh(from); 914 netif_addr_lock_bh(from);
915 netif_addr_lock_nested(to); 915 netif_addr_lock(to);
916 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); 916 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
917 __dev_set_rx_mode(to); 917 __dev_set_rx_mode(to);
918 netif_addr_unlock(to); 918 netif_addr_unlock(to);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index c763106c73fc..cd9bc67381b2 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1396,11 +1396,13 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
1396 1396
1397static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 1397static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
1398{ 1398{
1399 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1399 struct ethtool_wolinfo wol;
1400 1400
1401 if (!dev->ethtool_ops->get_wol) 1401 if (!dev->ethtool_ops->get_wol)
1402 return -EOPNOTSUPP; 1402 return -EOPNOTSUPP;
1403 1403
1404 memset(&wol, 0, sizeof(struct ethtool_wolinfo));
1405 wol.cmd = ETHTOOL_GWOL;
1404 dev->ethtool_ops->get_wol(dev, &wol); 1406 dev->ethtool_ops->get_wol(dev, &wol);
1405 1407
1406 if (copy_to_user(useraddr, &wol, sizeof(wol))) 1408 if (copy_to_user(useraddr, &wol, sizeof(wol)))
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 7c09d87d3269..68eda10d0680 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1350,30 +1350,21 @@ out_bad:
1350} 1350}
1351EXPORT_SYMBOL(__skb_flow_dissect); 1351EXPORT_SYMBOL(__skb_flow_dissect);
1352 1352
1353static u32 hashrnd __read_mostly; 1353static siphash_key_t hashrnd __read_mostly;
1354static __always_inline void __flow_hash_secret_init(void) 1354static __always_inline void __flow_hash_secret_init(void)
1355{ 1355{
1356 net_get_random_once(&hashrnd, sizeof(hashrnd)); 1356 net_get_random_once(&hashrnd, sizeof(hashrnd));
1357} 1357}
1358 1358
1359static __always_inline u32 __flow_hash_words(const u32 *words, u32 length, 1359static const void *flow_keys_hash_start(const struct flow_keys *flow)
1360 u32 keyval)
1361{ 1360{
1362 return jhash2(words, length, keyval); 1361 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
1363} 1362 return &flow->FLOW_KEYS_HASH_START_FIELD;
1364
1365static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
1366{
1367 const void *p = flow;
1368
1369 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
1370 return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
1371} 1363}
1372 1364
1373static inline size_t flow_keys_hash_length(const struct flow_keys *flow) 1365static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
1374{ 1366{
1375 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); 1367 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
1376 BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
1377 BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != 1368 BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
1378 sizeof(*flow) - sizeof(flow->addrs)); 1369 sizeof(*flow) - sizeof(flow->addrs));
1379 1370
@@ -1388,7 +1379,7 @@ static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
1388 diff -= sizeof(flow->addrs.tipckey); 1379 diff -= sizeof(flow->addrs.tipckey);
1389 break; 1380 break;
1390 } 1381 }
1391 return (sizeof(*flow) - diff) / sizeof(u32); 1382 return sizeof(*flow) - diff;
1392} 1383}
1393 1384
1394__be32 flow_get_u32_src(const struct flow_keys *flow) 1385__be32 flow_get_u32_src(const struct flow_keys *flow)
@@ -1454,14 +1445,15 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
1454 } 1445 }
1455} 1446}
1456 1447
1457static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) 1448static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
1449 const siphash_key_t *keyval)
1458{ 1450{
1459 u32 hash; 1451 u32 hash;
1460 1452
1461 __flow_hash_consistentify(keys); 1453 __flow_hash_consistentify(keys);
1462 1454
1463 hash = __flow_hash_words(flow_keys_hash_start(keys), 1455 hash = siphash(flow_keys_hash_start(keys),
1464 flow_keys_hash_length(keys), keyval); 1456 flow_keys_hash_length(keys), keyval);
1465 if (!hash) 1457 if (!hash)
1466 hash = 1; 1458 hash = 1;
1467 1459
@@ -1471,12 +1463,13 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
1471u32 flow_hash_from_keys(struct flow_keys *keys) 1463u32 flow_hash_from_keys(struct flow_keys *keys)
1472{ 1464{
1473 __flow_hash_secret_init(); 1465 __flow_hash_secret_init();
1474 return __flow_hash_from_keys(keys, hashrnd); 1466 return __flow_hash_from_keys(keys, &hashrnd);
1475} 1467}
1476EXPORT_SYMBOL(flow_hash_from_keys); 1468EXPORT_SYMBOL(flow_hash_from_keys);
1477 1469
1478static inline u32 ___skb_get_hash(const struct sk_buff *skb, 1470static inline u32 ___skb_get_hash(const struct sk_buff *skb,
1479 struct flow_keys *keys, u32 keyval) 1471 struct flow_keys *keys,
1472 const siphash_key_t *keyval)
1480{ 1473{
1481 skb_flow_dissect_flow_keys(skb, keys, 1474 skb_flow_dissect_flow_keys(skb, keys,
1482 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1475 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@@ -1524,7 +1517,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
1524 &keys, NULL, 0, 0, 0, 1517 &keys, NULL, 0, 0, 0,
1525 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1518 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1526 1519
1527 return __flow_hash_from_keys(&keys, hashrnd); 1520 return __flow_hash_from_keys(&keys, &hashrnd);
1528} 1521}
1529EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); 1522EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1530 1523
@@ -1544,13 +1537,14 @@ void __skb_get_hash(struct sk_buff *skb)
1544 1537
1545 __flow_hash_secret_init(); 1538 __flow_hash_secret_init();
1546 1539
1547 hash = ___skb_get_hash(skb, &keys, hashrnd); 1540 hash = ___skb_get_hash(skb, &keys, &hashrnd);
1548 1541
1549 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1542 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1550} 1543}
1551EXPORT_SYMBOL(__skb_get_hash); 1544EXPORT_SYMBOL(__skb_get_hash);
1552 1545
1553__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) 1546__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1547 const siphash_key_t *perturb)
1554{ 1548{
1555 struct flow_keys keys; 1549 struct flow_keys keys;
1556 1550
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index f93785e5833c..74cfb8b5ab33 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -88,11 +88,16 @@ static int bpf_lwt_input_reroute(struct sk_buff *skb)
88 int err = -EINVAL; 88 int err = -EINVAL;
89 89
90 if (skb->protocol == htons(ETH_P_IP)) { 90 if (skb->protocol == htons(ETH_P_IP)) {
91 struct net_device *dev = skb_dst(skb)->dev;
91 struct iphdr *iph = ip_hdr(skb); 92 struct iphdr *iph = ip_hdr(skb);
92 93
94 dev_hold(dev);
95 skb_dst_drop(skb);
93 err = ip_route_input_noref(skb, iph->daddr, iph->saddr, 96 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
94 iph->tos, skb_dst(skb)->dev); 97 iph->tos, dev);
98 dev_put(dev);
95 } else if (skb->protocol == htons(ETH_P_IPV6)) { 99 } else if (skb->protocol == htons(ETH_P_IPV6)) {
100 skb_dst_drop(skb);
96 err = ipv6_stub->ipv6_route_input(skb); 101 err = ipv6_stub->ipv6_route_input(skb);
97 } else { 102 } else {
98 err = -EAFNOSUPPORT; 103 err = -EAFNOSUPPORT;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6d3e4821b02d..39402840025e 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -246,11 +246,11 @@ static int __peernet2id(struct net *net, struct net *peer)
246} 246}
247 247
248static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, 248static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
249 struct nlmsghdr *nlh); 249 struct nlmsghdr *nlh, gfp_t gfp);
250/* This function returns the id of a peer netns. If no id is assigned, one will 250/* This function returns the id of a peer netns. If no id is assigned, one will
251 * be allocated and returned. 251 * be allocated and returned.
252 */ 252 */
253int peernet2id_alloc(struct net *net, struct net *peer) 253int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
254{ 254{
255 bool alloc = false, alive = false; 255 bool alloc = false, alive = false;
256 int id; 256 int id;
@@ -269,7 +269,7 @@ int peernet2id_alloc(struct net *net, struct net *peer)
269 id = __peernet2id_alloc(net, peer, &alloc); 269 id = __peernet2id_alloc(net, peer, &alloc);
270 spin_unlock_bh(&net->nsid_lock); 270 spin_unlock_bh(&net->nsid_lock);
271 if (alloc && id >= 0) 271 if (alloc && id >= 0)
272 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL); 272 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
273 if (alive) 273 if (alive)
274 put_net(peer); 274 put_net(peer);
275 return id; 275 return id;
@@ -479,6 +479,7 @@ struct net *copy_net_ns(unsigned long flags,
479 479
480 if (rv < 0) { 480 if (rv < 0) {
481put_userns: 481put_userns:
482 key_remove_domain(net->key_domain);
482 put_user_ns(user_ns); 483 put_user_ns(user_ns);
483 net_drop_ns(net); 484 net_drop_ns(net);
484dec_ucounts: 485dec_ucounts:
@@ -533,7 +534,8 @@ static void unhash_nsid(struct net *net, struct net *last)
533 idr_remove(&tmp->netns_ids, id); 534 idr_remove(&tmp->netns_ids, id);
534 spin_unlock_bh(&tmp->nsid_lock); 535 spin_unlock_bh(&tmp->nsid_lock);
535 if (id >= 0) 536 if (id >= 0)
536 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL); 537 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
538 GFP_KERNEL);
537 if (tmp == last) 539 if (tmp == last)
538 break; 540 break;
539 } 541 }
@@ -766,7 +768,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
766 spin_unlock_bh(&net->nsid_lock); 768 spin_unlock_bh(&net->nsid_lock);
767 if (err >= 0) { 769 if (err >= 0) {
768 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid, 770 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
769 nlh); 771 nlh, GFP_KERNEL);
770 err = 0; 772 err = 0;
771 } else if (err == -ENOSPC && nsid >= 0) { 773 } else if (err == -ENOSPC && nsid >= 0) {
772 err = -EEXIST; 774 err = -EEXIST;
@@ -1054,7 +1056,7 @@ end:
1054} 1056}
1055 1057
1056static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, 1058static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1057 struct nlmsghdr *nlh) 1059 struct nlmsghdr *nlh, gfp_t gfp)
1058{ 1060{
1059 struct net_fill_args fillargs = { 1061 struct net_fill_args fillargs = {
1060 .portid = portid, 1062 .portid = portid,
@@ -1065,7 +1067,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1065 struct sk_buff *msg; 1067 struct sk_buff *msg;
1066 int err = -ENOMEM; 1068 int err = -ENOMEM;
1067 1069
1068 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 1070 msg = nlmsg_new(rtnl_net_get_size(), gfp);
1069 if (!msg) 1071 if (!msg)
1070 goto out; 1072 goto out;
1071 1073
@@ -1073,7 +1075,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1073 if (err < 0) 1075 if (err < 0)
1074 goto err_out; 1076 goto err_out;
1075 1077
1076 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0); 1078 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1077 return; 1079 return;
1078 1080
1079err_out: 1081err_out:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1ee6460f8275..c81cd80114d9 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1523,7 +1523,7 @@ static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1523 1523
1524static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1524static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1525 const struct net_device *dev, 1525 const struct net_device *dev,
1526 struct net *src_net) 1526 struct net *src_net, gfp_t gfp)
1527{ 1527{
1528 bool put_iflink = false; 1528 bool put_iflink = false;
1529 1529
@@ -1531,7 +1531,7 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1531 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1531 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1532 1532
1533 if (!net_eq(dev_net(dev), link_net)) { 1533 if (!net_eq(dev_net(dev), link_net)) {
1534 int id = peernet2id_alloc(src_net, link_net); 1534 int id = peernet2id_alloc(src_net, link_net, gfp);
1535 1535
1536 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1536 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1537 return -EMSGSIZE; 1537 return -EMSGSIZE;
@@ -1589,7 +1589,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
1589 int type, u32 pid, u32 seq, u32 change, 1589 int type, u32 pid, u32 seq, u32 change,
1590 unsigned int flags, u32 ext_filter_mask, 1590 unsigned int flags, u32 ext_filter_mask,
1591 u32 event, int *new_nsid, int new_ifindex, 1591 u32 event, int *new_nsid, int new_ifindex,
1592 int tgt_netnsid) 1592 int tgt_netnsid, gfp_t gfp)
1593{ 1593{
1594 struct ifinfomsg *ifm; 1594 struct ifinfomsg *ifm;
1595 struct nlmsghdr *nlh; 1595 struct nlmsghdr *nlh;
@@ -1681,7 +1681,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
1681 goto nla_put_failure; 1681 goto nla_put_failure;
1682 } 1682 }
1683 1683
1684 if (rtnl_fill_link_netnsid(skb, dev, src_net)) 1684 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1685 goto nla_put_failure; 1685 goto nla_put_failure;
1686 1686
1687 if (new_nsid && 1687 if (new_nsid &&
@@ -2001,7 +2001,7 @@ walk_entries:
2001 NETLINK_CB(cb->skb).portid, 2001 NETLINK_CB(cb->skb).portid,
2002 nlh->nlmsg_seq, 0, flags, 2002 nlh->nlmsg_seq, 0, flags,
2003 ext_filter_mask, 0, NULL, 0, 2003 ext_filter_mask, 0, NULL, 0,
2004 netnsid); 2004 netnsid, GFP_KERNEL);
2005 2005
2006 if (err < 0) { 2006 if (err < 0) {
2007 if (likely(skb->len)) 2007 if (likely(skb->len))
@@ -2355,6 +2355,7 @@ static int do_set_master(struct net_device *dev, int ifindex,
2355 err = ops->ndo_del_slave(upper_dev, dev); 2355 err = ops->ndo_del_slave(upper_dev, dev);
2356 if (err) 2356 if (err)
2357 return err; 2357 return err;
2358 netdev_update_lockdep_key(dev);
2358 } else { 2359 } else {
2359 return -EOPNOTSUPP; 2360 return -EOPNOTSUPP;
2360 } 2361 }
@@ -3359,7 +3360,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3359 err = rtnl_fill_ifinfo(nskb, dev, net, 3360 err = rtnl_fill_ifinfo(nskb, dev, net,
3360 RTM_NEWLINK, NETLINK_CB(skb).portid, 3361 RTM_NEWLINK, NETLINK_CB(skb).portid,
3361 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3362 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3362 0, NULL, 0, netnsid); 3363 0, NULL, 0, netnsid, GFP_KERNEL);
3363 if (err < 0) { 3364 if (err < 0) {
3364 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3365 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3365 WARN_ON(err == -EMSGSIZE); 3366 WARN_ON(err == -EMSGSIZE);
@@ -3471,7 +3472,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3471 3472
3472 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 3473 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3473 type, 0, 0, change, 0, 0, event, 3474 type, 0, 0, change, 0, 0, event,
3474 new_nsid, new_ifindex, -1); 3475 new_nsid, new_ifindex, -1, flags);
3475 if (err < 0) { 3476 if (err < 0) {
3476 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 3477 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3477 WARN_ON(err == -EMSGSIZE); 3478 WARN_ON(err == -EMSGSIZE);
@@ -3916,7 +3917,7 @@ static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
3916 ndm = nlmsg_data(nlh); 3917 ndm = nlmsg_data(nlh);
3917 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 3918 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
3918 ndm->ndm_flags || ndm->ndm_type) { 3919 ndm->ndm_flags || ndm->ndm_type) {
3919 NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request"); 3920 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
3920 return -EINVAL; 3921 return -EINVAL;
3921 } 3922 }
3922 3923
diff --git a/net/core/sock.c b/net/core/sock.c
index a515392ba84b..ac78a570e43a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1127,7 +1127,7 @@ set_rcvbuf:
1127 break; 1127 break;
1128 } 1128 }
1129 case SO_INCOMING_CPU: 1129 case SO_INCOMING_CPU:
1130 sk->sk_incoming_cpu = val; 1130 WRITE_ONCE(sk->sk_incoming_cpu, val);
1131 break; 1131 break;
1132 1132
1133 case SO_CNX_ADVICE: 1133 case SO_CNX_ADVICE:
@@ -1476,7 +1476,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1476 break; 1476 break;
1477 1477
1478 case SO_INCOMING_CPU: 1478 case SO_INCOMING_CPU:
1479 v.val = sk->sk_incoming_cpu; 1479 v.val = READ_ONCE(sk->sk_incoming_cpu);
1480 break; 1480 break;
1481 1481
1482 case SO_MEMINFO: 1482 case SO_MEMINFO:
@@ -3600,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
3600{ 3600{
3601 struct sock *sk = p; 3601 struct sock *sk = p;
3602 3602
3603 return !skb_queue_empty(&sk->sk_receive_queue) || 3603 return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
3604 sk_busy_loop_timeout(sk, start_time); 3604 sk_busy_loop_timeout(sk, start_time);
3605} 3605}
3606EXPORT_SYMBOL(sk_busy_loop_end); 3606EXPORT_SYMBOL(sk_busy_loop_end);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index d9b4200ed12d..0d8f782c25cc 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -117,7 +117,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
117 inet->inet_daddr, 117 inet->inet_daddr,
118 inet->inet_sport, 118 inet->inet_sport,
119 inet->inet_dport); 119 inet->inet_dport);
120 inet->inet_id = dp->dccps_iss ^ jiffies; 120 inet->inet_id = prandom_u32();
121 121
122 err = dccp_connect(sk); 122 err = dccp_connect(sk);
123 rt = NULL; 123 rt = NULL;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 0ea75286abf4..3349ea81f901 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1205,7 +1205,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wai
1205 struct dn_scp *scp = DN_SK(sk); 1205 struct dn_scp *scp = DN_SK(sk);
1206 __poll_t mask = datagram_poll(file, sock, wait); 1206 __poll_t mask = datagram_poll(file, sock, wait);
1207 1207
1208 if (!skb_queue_empty(&scp->other_receive_queue)) 1208 if (!skb_queue_empty_lockless(&scp->other_receive_queue))
1209 mask |= EPOLLRDBAND; 1209 mask |= EPOLLRDBAND;
1210 1210
1211 return mask; 1211 return mask;
diff --git a/net/dsa/master.c b/net/dsa/master.c
index a8e52c9967f4..3255dfc97f86 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -310,8 +310,6 @@ static void dsa_master_reset_mtu(struct net_device *dev)
310 rtnl_unlock(); 310 rtnl_unlock();
311} 311}
312 312
313static struct lock_class_key dsa_master_addr_list_lock_key;
314
315int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 313int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
316{ 314{
317 int ret; 315 int ret;
@@ -325,9 +323,6 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
325 wmb(); 323 wmb();
326 324
327 dev->dsa_ptr = cpu_dp; 325 dev->dsa_ptr = cpu_dp;
328 lockdep_set_class(&dev->addr_list_lock,
329 &dsa_master_addr_list_lock_key);
330
331 ret = dsa_master_ethtool_setup(dev); 326 ret = dsa_master_ethtool_setup(dev);
332 if (ret) 327 if (ret)
333 return ret; 328 return ret;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 75d58229a4bd..028e65f4b5ba 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1341,15 +1341,6 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
1341 return ret; 1341 return ret;
1342} 1342}
1343 1343
1344static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1345static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1346 struct netdev_queue *txq,
1347 void *_unused)
1348{
1349 lockdep_set_class(&txq->_xmit_lock,
1350 &dsa_slave_netdev_xmit_lock_key);
1351}
1352
1353int dsa_slave_suspend(struct net_device *slave_dev) 1344int dsa_slave_suspend(struct net_device *slave_dev)
1354{ 1345{
1355 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1346 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
@@ -1433,9 +1424,6 @@ int dsa_slave_create(struct dsa_port *port)
1433 slave_dev->max_mtu = ETH_MAX_MTU; 1424 slave_dev->max_mtu = ETH_MAX_MTU;
1434 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 1425 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1435 1426
1436 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1437 NULL);
1438
1439 SET_NETDEV_DEV(slave_dev, port->ds->dev); 1427 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1440 slave_dev->dev.of_node = port->dn; 1428 slave_dev->dev.of_node = port->dn;
1441 slave_dev->vlan_features = master->vlan_features; 1429 slave_dev->vlan_features = master->vlan_features;
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 3297e7fa9945..c0b107cdd715 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -58,13 +58,6 @@ static const struct header_ops lowpan_header_ops = {
58 .create = lowpan_header_create, 58 .create = lowpan_header_create,
59}; 59};
60 60
61static int lowpan_dev_init(struct net_device *ldev)
62{
63 netdev_lockdep_set_classes(ldev);
64
65 return 0;
66}
67
68static int lowpan_open(struct net_device *dev) 61static int lowpan_open(struct net_device *dev)
69{ 62{
70 if (!open_count) 63 if (!open_count)
@@ -96,7 +89,6 @@ static int lowpan_get_iflink(const struct net_device *dev)
96} 89}
97 90
98static const struct net_device_ops lowpan_netdev_ops = { 91static const struct net_device_ops lowpan_netdev_ops = {
99 .ndo_init = lowpan_dev_init,
100 .ndo_start_xmit = lowpan_xmit, 92 .ndo_start_xmit = lowpan_xmit,
101 .ndo_open = lowpan_open, 93 .ndo_open = lowpan_open,
102 .ndo_stop = lowpan_stop, 94 .ndo_stop = lowpan_stop,
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 9a0fe0c2fa02..4a8550c49202 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
73 reuseport_has_conns(sk, true); 73 reuseport_has_conns(sk, true);
74 sk->sk_state = TCP_ESTABLISHED; 74 sk->sk_state = TCP_ESTABLISHED;
75 sk_set_txhash(sk); 75 sk_set_txhash(sk);
76 inet->inet_id = jiffies; 76 inet->inet_id = prandom_u32();
77 77
78 sk_dst_set(sk, &rt->dst); 78 sk_dst_set(sk, &rt->dst);
79 err = 0; 79 err = 0;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index dde77f72e03e..71c78d223dfd 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1148,7 +1148,7 @@ void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric)
1148 if (!(dev->flags & IFF_UP) || 1148 if (!(dev->flags & IFF_UP) ||
1149 ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) || 1149 ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
1150 ipv4_is_zeronet(prefix) || 1150 ipv4_is_zeronet(prefix) ||
1151 prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32) 1151 (prefix == ifa->ifa_local && ifa->ifa_prefixlen == 32))
1152 return; 1152 return;
1153 1153
1154 /* add the new */ 1154 /* add the new */
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 97824864e40d..83fb00153018 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -240,7 +240,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
240 return -1; 240 return -1;
241 241
242 score = sk->sk_family == PF_INET ? 2 : 1; 242 score = sk->sk_family == PF_INET ? 2 : 1;
243 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 243 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
244 score++; 244 score++;
245 } 245 }
246 return score; 246 return score;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 52690bb3e40f..10636fb6093e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -509,9 +509,9 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
509 key = &tun_info->key; 509 key = &tun_info->key;
510 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 510 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
511 goto err_free_skb; 511 goto err_free_skb;
512 md = ip_tunnel_info_opts(tun_info); 512 if (tun_info->options_len < sizeof(*md))
513 if (!md)
514 goto err_free_skb; 513 goto err_free_skb;
514 md = ip_tunnel_info_opts(tun_info);
515 515
516 /* ERSPAN has fixed 8 byte GRE header */ 516 /* ERSPAN has fixed 8 byte GRE header */
517 version = md->version; 517 version = md->version;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 814b9b8882a0..3d8baaaf7086 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -645,11 +645,12 @@ void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
645EXPORT_SYMBOL(ip_fraglist_prepare); 645EXPORT_SYMBOL(ip_fraglist_prepare);
646 646
647void ip_frag_init(struct sk_buff *skb, unsigned int hlen, 647void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
648 unsigned int ll_rs, unsigned int mtu, 648 unsigned int ll_rs, unsigned int mtu, bool DF,
649 struct ip_frag_state *state) 649 struct ip_frag_state *state)
650{ 650{
651 struct iphdr *iph = ip_hdr(skb); 651 struct iphdr *iph = ip_hdr(skb);
652 652
653 state->DF = DF;
653 state->hlen = hlen; 654 state->hlen = hlen;
654 state->ll_rs = ll_rs; 655 state->ll_rs = ll_rs;
655 state->mtu = mtu; 656 state->mtu = mtu;
@@ -668,9 +669,6 @@ static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
668 /* Copy the flags to each fragment. */ 669 /* Copy the flags to each fragment. */
669 IPCB(to)->flags = IPCB(from)->flags; 670 IPCB(to)->flags = IPCB(from)->flags;
670 671
671 if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
672 state->iph->frag_off |= htons(IP_DF);
673
674 /* ANK: dirty, but effective trick. Upgrade options only if 672 /* ANK: dirty, but effective trick. Upgrade options only if
675 * the segment to be fragmented was THE FIRST (otherwise, 673 * the segment to be fragmented was THE FIRST (otherwise,
676 * options are already fixed) and make it ONCE 674 * options are already fixed) and make it ONCE
@@ -738,6 +736,8 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
738 */ 736 */
739 iph = ip_hdr(skb2); 737 iph = ip_hdr(skb2);
740 iph->frag_off = htons((state->offset >> 3)); 738 iph->frag_off = htons((state->offset >> 3));
739 if (state->DF)
740 iph->frag_off |= htons(IP_DF);
741 741
742 /* 742 /*
743 * Added AC : If we are fragmenting a fragment that's not the 743 * Added AC : If we are fragmenting a fragment that's not the
@@ -883,7 +883,8 @@ slow_path:
883 * Fragment the datagram. 883 * Fragment the datagram.
884 */ 884 */
885 885
886 ip_frag_init(skb, hlen, ll_rs, mtu, &state); 886 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
887 &state);
887 888
888 /* 889 /*
889 * Keep copying data until we run out. 890 * Keep copying data until we run out.
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 42187a3b82f4..d8876f0e9672 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -584,7 +584,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
584 } 584 }
585 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 585 /* This barrier is coupled with smp_wmb() in tcp_reset() */
586 smp_rmb(); 586 smp_rmb();
587 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 587 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
588 mask |= EPOLLERR; 588 mask |= EPOLLERR;
589 589
590 return mask; 590 return mask;
@@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1964 if (unlikely(flags & MSG_ERRQUEUE)) 1964 if (unlikely(flags & MSG_ERRQUEUE))
1965 return inet_recv_error(sk, msg, len, addr_len); 1965 return inet_recv_error(sk, msg, len, addr_len);
1966 1966
1967 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && 1967 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
1968 (sk->sk_state == TCP_ESTABLISHED)) 1968 (sk->sk_state == TCP_ESTABLISHED))
1969 sk_busy_loop(sk, nonblock); 1969 sk_busy_loop(sk, nonblock);
1970 1970
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6be568334848..67b2dc7a1727 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -303,7 +303,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
303 inet->inet_daddr); 303 inet->inet_daddr);
304 } 304 }
305 305
306 inet->inet_id = tp->write_seq ^ jiffies; 306 inet->inet_id = prandom_u32();
307 307
308 if (tcp_fastopen_defer_connect(sk, &err)) 308 if (tcp_fastopen_defer_connect(sk, &err))
309 return err; 309 return err;
@@ -1450,7 +1450,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1450 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1450 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1451 if (inet_opt) 1451 if (inet_opt)
1452 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1452 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1453 newinet->inet_id = newtp->write_seq ^ jiffies; 1453 newinet->inet_id = prandom_u32();
1454 1454
1455 if (!dst) { 1455 if (!dst) {
1456 dst = inet_csk_route_child_sock(sk, newsk, req); 1456 dst = inet_csk_route_child_sock(sk, newsk, req);
@@ -2681,7 +2681,7 @@ static int __net_init tcp_sk_init(struct net *net)
2681 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2; 2681 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2682 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; 2682 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2683 2683
2684 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); 2684 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
2685 net->ipv4.sysctl_tcp_sack = 1; 2685 net->ipv4.sysctl_tcp_sack = 1;
2686 net->ipv4.sysctl_tcp_window_scaling = 1; 2686 net->ipv4.sysctl_tcp_window_scaling = 1;
2687 net->ipv4.sysctl_tcp_timestamps = 1; 2687 net->ipv4.sysctl_tcp_timestamps = 1;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 14bc654b6842..1d58ce829dca 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -388,7 +388,7 @@ static int compute_score(struct sock *sk, struct net *net,
388 return -1; 388 return -1;
389 score += 4; 389 score += 4;
390 390
391 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 391 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
392 score++; 392 score++;
393 return score; 393 return score;
394} 394}
@@ -1316,6 +1316,20 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
1316 scratch->_tsize_state |= UDP_SKB_IS_STATELESS; 1316 scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
1317} 1317}
1318 1318
1319static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
1320{
1321 /* We come here after udp_lib_checksum_complete() returned 0.
1322 * This means that __skb_checksum_complete() might have
1323 * set skb->csum_valid to 1.
1324 * On 64bit platforms, we can set csum_unnecessary
1325 * to true, but only if the skb is not shared.
1326 */
1327#if BITS_PER_LONG == 64
1328 if (!skb_shared(skb))
1329 udp_skb_scratch(skb)->csum_unnecessary = true;
1330#endif
1331}
1332
1319static int udp_skb_truesize(struct sk_buff *skb) 1333static int udp_skb_truesize(struct sk_buff *skb)
1320{ 1334{
1321 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; 1335 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
@@ -1550,10 +1564,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
1550 *total += skb->truesize; 1564 *total += skb->truesize;
1551 kfree_skb(skb); 1565 kfree_skb(skb);
1552 } else { 1566 } else {
1553 /* the csum related bits could be changed, refresh 1567 udp_skb_csum_unnecessary_set(skb);
1554 * the scratch area
1555 */
1556 udp_set_dev_scratch(skb);
1557 break; 1568 break;
1558 } 1569 }
1559 } 1570 }
@@ -1577,7 +1588,7 @@ static int first_packet_length(struct sock *sk)
1577 1588
1578 spin_lock_bh(&rcvq->lock); 1589 spin_lock_bh(&rcvq->lock);
1579 skb = __first_packet_length(sk, rcvq, &total); 1590 skb = __first_packet_length(sk, rcvq, &total);
1580 if (!skb && !skb_queue_empty(sk_queue)) { 1591 if (!skb && !skb_queue_empty_lockless(sk_queue)) {
1581 spin_lock(&sk_queue->lock); 1592 spin_lock(&sk_queue->lock);
1582 skb_queue_splice_tail_init(sk_queue, rcvq); 1593 skb_queue_splice_tail_init(sk_queue, rcvq);
1583 spin_unlock(&sk_queue->lock); 1594 spin_unlock(&sk_queue->lock);
@@ -1650,7 +1661,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
1650 return skb; 1661 return skb;
1651 } 1662 }
1652 1663
1653 if (skb_queue_empty(sk_queue)) { 1664 if (skb_queue_empty_lockless(sk_queue)) {
1654 spin_unlock_bh(&queue->lock); 1665 spin_unlock_bh(&queue->lock);
1655 goto busy_check; 1666 goto busy_check;
1656 } 1667 }
@@ -1676,7 +1687,7 @@ busy_check:
1676 break; 1687 break;
1677 1688
1678 sk_busy_loop(sk, flags & MSG_DONTWAIT); 1689 sk_busy_loop(sk, flags & MSG_DONTWAIT);
1679 } while (!skb_queue_empty(sk_queue)); 1690 } while (!skb_queue_empty_lockless(sk_queue));
1680 1691
1681 /* sk_queue is empty, reader_queue may contain peeked packets */ 1692 /* sk_queue is empty, reader_queue may contain peeked packets */
1682 } while (timeo && 1693 } while (timeo &&
@@ -2712,7 +2723,7 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
2712 __poll_t mask = datagram_poll(file, sock, wait); 2723 __poll_t mask = datagram_poll(file, sock, wait);
2713 struct sock *sk = sock->sk; 2724 struct sock *sk = sock->sk;
2714 2725
2715 if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) 2726 if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
2716 mask |= EPOLLIN | EPOLLRDNORM; 2727 mask |= EPOLLIN | EPOLLRDNORM;
2717 2728
2718 /* Check for false positives due to checksum errors */ 2729 /* Check for false positives due to checksum errors */
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 783f3c1466da..2fc079284ca4 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -7,6 +7,7 @@
7#include <linux/export.h> 7#include <linux/export.h>
8#include <net/ipv6.h> 8#include <net/ipv6.h>
9#include <net/ipv6_stubs.h> 9#include <net/ipv6_stubs.h>
10#include <net/addrconf.h>
10#include <net/ip.h> 11#include <net/ip.h>
11 12
12/* if ipv6 module registers this function is used by xfrm to force all 13/* if ipv6 module registers this function is used by xfrm to force all
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index cf60fae9533b..fbe9d4295eac 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -105,7 +105,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
105 return -1; 105 return -1;
106 106
107 score = 1; 107 score = 1;
108 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 108 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
109 score++; 109 score++;
110 } 110 }
111 return score; 111 return score;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 787d9f2a6e99..923034c52ce4 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -980,9 +980,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
980 dsfield = key->tos; 980 dsfield = key->tos;
981 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 981 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
982 goto tx_err; 982 goto tx_err;
983 md = ip_tunnel_info_opts(tun_info); 983 if (tun_info->options_len < sizeof(*md))
984 if (!md)
985 goto tx_err; 984 goto tx_err;
985 md = ip_tunnel_info_opts(tun_info);
986 986
987 tun_id = tunnel_id_to_key32(key->tun_id); 987 tun_id = tunnel_id_to_key32(key->tun_id);
988 if (md->version == 1) { 988 if (md->version == 1) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6324d3a8cb53..9fec580c968e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -135,7 +135,7 @@ static int compute_score(struct sock *sk, struct net *net,
135 return -1; 135 return -1;
136 score++; 136 score++;
137 137
138 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 138 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
139 score++; 139 score++;
140 140
141 return score; 141 return score;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index fd5ac2788e45..d3b520b9b2c9 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -56,7 +56,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
56{ 56{
57 eth_hw_addr_random(dev); 57 eth_hw_addr_random(dev);
58 eth_broadcast_addr(dev->broadcast); 58 eth_broadcast_addr(dev->broadcast);
59 netdev_lockdep_set_classes(dev);
60 59
61 return 0; 60 return 0;
62} 61}
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 4515056ef1c2..f9b16f2b2219 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -193,21 +193,29 @@ struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *
193 193
194 mutex_lock(&__ip_vs_app_mutex); 194 mutex_lock(&__ip_vs_app_mutex);
195 195
196 /* increase the module use count */
197 if (!ip_vs_use_count_inc()) {
198 err = -ENOENT;
199 goto out_unlock;
200 }
201
196 list_for_each_entry(a, &ipvs->app_list, a_list) { 202 list_for_each_entry(a, &ipvs->app_list, a_list) {
197 if (!strcmp(app->name, a->name)) { 203 if (!strcmp(app->name, a->name)) {
198 err = -EEXIST; 204 err = -EEXIST;
205 /* decrease the module use count */
206 ip_vs_use_count_dec();
199 goto out_unlock; 207 goto out_unlock;
200 } 208 }
201 } 209 }
202 a = kmemdup(app, sizeof(*app), GFP_KERNEL); 210 a = kmemdup(app, sizeof(*app), GFP_KERNEL);
203 if (!a) { 211 if (!a) {
204 err = -ENOMEM; 212 err = -ENOMEM;
213 /* decrease the module use count */
214 ip_vs_use_count_dec();
205 goto out_unlock; 215 goto out_unlock;
206 } 216 }
207 INIT_LIST_HEAD(&a->incs_list); 217 INIT_LIST_HEAD(&a->incs_list);
208 list_add(&a->a_list, &ipvs->app_list); 218 list_add(&a->a_list, &ipvs->app_list);
209 /* increase the module use count */
210 ip_vs_use_count_inc();
211 219
212out_unlock: 220out_unlock:
213 mutex_unlock(&__ip_vs_app_mutex); 221 mutex_unlock(&__ip_vs_app_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 8b48e7ce1c2c..3cccc88ef817 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -93,7 +93,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net,
93static void update_defense_level(struct netns_ipvs *ipvs) 93static void update_defense_level(struct netns_ipvs *ipvs)
94{ 94{
95 struct sysinfo i; 95 struct sysinfo i;
96 static int old_secure_tcp = 0;
97 int availmem; 96 int availmem;
98 int nomem; 97 int nomem;
99 int to_change = -1; 98 int to_change = -1;
@@ -174,35 +173,35 @@ static void update_defense_level(struct netns_ipvs *ipvs)
174 spin_lock(&ipvs->securetcp_lock); 173 spin_lock(&ipvs->securetcp_lock);
175 switch (ipvs->sysctl_secure_tcp) { 174 switch (ipvs->sysctl_secure_tcp) {
176 case 0: 175 case 0:
177 if (old_secure_tcp >= 2) 176 if (ipvs->old_secure_tcp >= 2)
178 to_change = 0; 177 to_change = 0;
179 break; 178 break;
180 case 1: 179 case 1:
181 if (nomem) { 180 if (nomem) {
182 if (old_secure_tcp < 2) 181 if (ipvs->old_secure_tcp < 2)
183 to_change = 1; 182 to_change = 1;
184 ipvs->sysctl_secure_tcp = 2; 183 ipvs->sysctl_secure_tcp = 2;
185 } else { 184 } else {
186 if (old_secure_tcp >= 2) 185 if (ipvs->old_secure_tcp >= 2)
187 to_change = 0; 186 to_change = 0;
188 } 187 }
189 break; 188 break;
190 case 2: 189 case 2:
191 if (nomem) { 190 if (nomem) {
192 if (old_secure_tcp < 2) 191 if (ipvs->old_secure_tcp < 2)
193 to_change = 1; 192 to_change = 1;
194 } else { 193 } else {
195 if (old_secure_tcp >= 2) 194 if (ipvs->old_secure_tcp >= 2)
196 to_change = 0; 195 to_change = 0;
197 ipvs->sysctl_secure_tcp = 1; 196 ipvs->sysctl_secure_tcp = 1;
198 } 197 }
199 break; 198 break;
200 case 3: 199 case 3:
201 if (old_secure_tcp < 2) 200 if (ipvs->old_secure_tcp < 2)
202 to_change = 1; 201 to_change = 1;
203 break; 202 break;
204 } 203 }
205 old_secure_tcp = ipvs->sysctl_secure_tcp; 204 ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
206 if (to_change >= 0) 205 if (to_change >= 0)
207 ip_vs_protocol_timeout_change(ipvs, 206 ip_vs_protocol_timeout_change(ipvs,
208 ipvs->sysctl_secure_tcp > 1); 207 ipvs->sysctl_secure_tcp > 1);
@@ -1275,7 +1274,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
1275 struct ip_vs_service *svc = NULL; 1274 struct ip_vs_service *svc = NULL;
1276 1275
1277 /* increase the module use count */ 1276 /* increase the module use count */
1278 ip_vs_use_count_inc(); 1277 if (!ip_vs_use_count_inc())
1278 return -ENOPROTOOPT;
1279 1279
1280 /* Lookup the scheduler by 'u->sched_name' */ 1280 /* Lookup the scheduler by 'u->sched_name' */
1281 if (strcmp(u->sched_name, "none")) { 1281 if (strcmp(u->sched_name, "none")) {
@@ -2435,9 +2435,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2435 if (copy_from_user(arg, user, len) != 0) 2435 if (copy_from_user(arg, user, len) != 0)
2436 return -EFAULT; 2436 return -EFAULT;
2437 2437
2438 /* increase the module use count */
2439 ip_vs_use_count_inc();
2440
2441 /* Handle daemons since they have another lock */ 2438 /* Handle daemons since they have another lock */
2442 if (cmd == IP_VS_SO_SET_STARTDAEMON || 2439 if (cmd == IP_VS_SO_SET_STARTDAEMON ||
2443 cmd == IP_VS_SO_SET_STOPDAEMON) { 2440 cmd == IP_VS_SO_SET_STOPDAEMON) {
@@ -2450,13 +2447,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2450 ret = -EINVAL; 2447 ret = -EINVAL;
2451 if (strscpy(cfg.mcast_ifn, dm->mcast_ifn, 2448 if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
2452 sizeof(cfg.mcast_ifn)) <= 0) 2449 sizeof(cfg.mcast_ifn)) <= 0)
2453 goto out_dec; 2450 return ret;
2454 cfg.syncid = dm->syncid; 2451 cfg.syncid = dm->syncid;
2455 ret = start_sync_thread(ipvs, &cfg, dm->state); 2452 ret = start_sync_thread(ipvs, &cfg, dm->state);
2456 } else { 2453 } else {
2457 ret = stop_sync_thread(ipvs, dm->state); 2454 ret = stop_sync_thread(ipvs, dm->state);
2458 } 2455 }
2459 goto out_dec; 2456 return ret;
2460 } 2457 }
2461 2458
2462 mutex_lock(&__ip_vs_mutex); 2459 mutex_lock(&__ip_vs_mutex);
@@ -2551,10 +2548,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2551 2548
2552 out_unlock: 2549 out_unlock:
2553 mutex_unlock(&__ip_vs_mutex); 2550 mutex_unlock(&__ip_vs_mutex);
2554 out_dec:
2555 /* decrease the module use count */
2556 ip_vs_use_count_dec();
2557
2558 return ret; 2551 return ret;
2559} 2552}
2560 2553
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 8e104dff7abc..166c669f0763 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -68,7 +68,8 @@ int register_ip_vs_pe(struct ip_vs_pe *pe)
68 struct ip_vs_pe *tmp; 68 struct ip_vs_pe *tmp;
69 69
70 /* increase the module use count */ 70 /* increase the module use count */
71 ip_vs_use_count_inc(); 71 if (!ip_vs_use_count_inc())
72 return -ENOENT;
72 73
73 mutex_lock(&ip_vs_pe_mutex); 74 mutex_lock(&ip_vs_pe_mutex);
74 /* Make sure that the pe with this name doesn't exist 75 /* Make sure that the pe with this name doesn't exist
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 2f9d5cd5daee..d4903723be7e 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -179,7 +179,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
179 } 179 }
180 180
181 /* increase the module use count */ 181 /* increase the module use count */
182 ip_vs_use_count_inc(); 182 if (!ip_vs_use_count_inc())
183 return -ENOENT;
183 184
184 mutex_lock(&ip_vs_sched_mutex); 185 mutex_lock(&ip_vs_sched_mutex);
185 186
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index a4a78c4b06de..8dc892a9dc91 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1762,6 +1762,10 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1762 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", 1762 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
1763 sizeof(struct ip_vs_sync_conn_v0)); 1763 sizeof(struct ip_vs_sync_conn_v0));
1764 1764
1765 /* increase the module use count */
1766 if (!ip_vs_use_count_inc())
1767 return -ENOPROTOOPT;
1768
1765 /* Do not hold one mutex and then to block on another */ 1769 /* Do not hold one mutex and then to block on another */
1766 for (;;) { 1770 for (;;) {
1767 rtnl_lock(); 1771 rtnl_lock();
@@ -1892,9 +1896,6 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1892 mutex_unlock(&ipvs->sync_mutex); 1896 mutex_unlock(&ipvs->sync_mutex);
1893 rtnl_unlock(); 1897 rtnl_unlock();
1894 1898
1895 /* increase the module use count */
1896 ip_vs_use_count_inc();
1897
1898 return 0; 1899 return 0;
1899 1900
1900out: 1901out:
@@ -1924,11 +1925,17 @@ out:
1924 } 1925 }
1925 kfree(ti); 1926 kfree(ti);
1926 } 1927 }
1928
1929 /* decrease the module use count */
1930 ip_vs_use_count_dec();
1927 return result; 1931 return result;
1928 1932
1929out_early: 1933out_early:
1930 mutex_unlock(&ipvs->sync_mutex); 1934 mutex_unlock(&ipvs->sync_mutex);
1931 rtnl_unlock(); 1935 rtnl_unlock();
1936
1937 /* decrease the module use count */
1938 ip_vs_use_count_dec();
1932 return result; 1939 return result;
1933} 1940}
1934 1941
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 132f5228b431..128245efe84a 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -202,6 +202,8 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
202{ 202{
203 int err; 203 int err;
204 204
205 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
206
205 err = rhashtable_insert_fast(&flow_table->rhashtable, 207 err = rhashtable_insert_fast(&flow_table->rhashtable,
206 &flow->tuplehash[0].node, 208 &flow->tuplehash[0].node,
207 nf_flow_offload_rhash_params); 209 nf_flow_offload_rhash_params);
@@ -218,7 +220,6 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
218 return err; 220 return err;
219 } 221 }
220 222
221 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
222 return 0; 223 return 0;
223} 224}
224EXPORT_SYMBOL_GPL(flow_offload_add); 225EXPORT_SYMBOL_GPL(flow_offload_add);
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index e546f759b7a7..ad783f4840ef 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -347,7 +347,7 @@ int nft_flow_rule_offload_commit(struct net *net)
347 347
348 policy = nft_trans_chain_policy(trans); 348 policy = nft_trans_chain_policy(trans);
349 err = nft_flow_offload_chain(trans->ctx.chain, &policy, 349 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
350 FLOW_BLOCK_BIND); 350 FLOW_BLOCK_UNBIND);
351 break; 351 break;
352 case NFT_MSG_NEWRULE: 352 case NFT_MSG_NEWRULE:
353 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 353 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 22a80eb60222..5cb2d8908d2a 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -161,13 +161,21 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
161 161
162 switch (priv->offset) { 162 switch (priv->offset) {
163 case offsetof(struct ethhdr, h_source): 163 case offsetof(struct ethhdr, h_source):
164 if (priv->len != ETH_ALEN)
165 return -EOPNOTSUPP;
166
164 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, 167 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
165 src, ETH_ALEN, reg); 168 src, ETH_ALEN, reg);
166 break; 169 break;
167 case offsetof(struct ethhdr, h_dest): 170 case offsetof(struct ethhdr, h_dest):
171 if (priv->len != ETH_ALEN)
172 return -EOPNOTSUPP;
173
168 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, 174 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
169 dst, ETH_ALEN, reg); 175 dst, ETH_ALEN, reg);
170 break; 176 break;
177 default:
178 return -EOPNOTSUPP;
171 } 179 }
172 180
173 return 0; 181 return 0;
@@ -181,14 +189,23 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
181 189
182 switch (priv->offset) { 190 switch (priv->offset) {
183 case offsetof(struct iphdr, saddr): 191 case offsetof(struct iphdr, saddr):
192 if (priv->len != sizeof(struct in_addr))
193 return -EOPNOTSUPP;
194
184 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src, 195 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
185 sizeof(struct in_addr), reg); 196 sizeof(struct in_addr), reg);
186 break; 197 break;
187 case offsetof(struct iphdr, daddr): 198 case offsetof(struct iphdr, daddr):
199 if (priv->len != sizeof(struct in_addr))
200 return -EOPNOTSUPP;
201
188 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst, 202 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
189 sizeof(struct in_addr), reg); 203 sizeof(struct in_addr), reg);
190 break; 204 break;
191 case offsetof(struct iphdr, protocol): 205 case offsetof(struct iphdr, protocol):
206 if (priv->len != sizeof(__u8))
207 return -EOPNOTSUPP;
208
192 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, 209 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
193 sizeof(__u8), reg); 210 sizeof(__u8), reg);
194 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); 211 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -208,14 +225,23 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
208 225
209 switch (priv->offset) { 226 switch (priv->offset) {
210 case offsetof(struct ipv6hdr, saddr): 227 case offsetof(struct ipv6hdr, saddr):
228 if (priv->len != sizeof(struct in6_addr))
229 return -EOPNOTSUPP;
230
211 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src, 231 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
212 sizeof(struct in6_addr), reg); 232 sizeof(struct in6_addr), reg);
213 break; 233 break;
214 case offsetof(struct ipv6hdr, daddr): 234 case offsetof(struct ipv6hdr, daddr):
235 if (priv->len != sizeof(struct in6_addr))
236 return -EOPNOTSUPP;
237
215 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst, 238 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
216 sizeof(struct in6_addr), reg); 239 sizeof(struct in6_addr), reg);
217 break; 240 break;
218 case offsetof(struct ipv6hdr, nexthdr): 241 case offsetof(struct ipv6hdr, nexthdr):
242 if (priv->len != sizeof(__u8))
243 return -EOPNOTSUPP;
244
219 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, 245 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
220 sizeof(__u8), reg); 246 sizeof(__u8), reg);
221 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); 247 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -255,10 +281,16 @@ static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
255 281
256 switch (priv->offset) { 282 switch (priv->offset) {
257 case offsetof(struct tcphdr, source): 283 case offsetof(struct tcphdr, source):
284 if (priv->len != sizeof(__be16))
285 return -EOPNOTSUPP;
286
258 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src, 287 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
259 sizeof(__be16), reg); 288 sizeof(__be16), reg);
260 break; 289 break;
261 case offsetof(struct tcphdr, dest): 290 case offsetof(struct tcphdr, dest):
291 if (priv->len != sizeof(__be16))
292 return -EOPNOTSUPP;
293
262 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst, 294 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
263 sizeof(__be16), reg); 295 sizeof(__be16), reg);
264 break; 296 break;
@@ -277,10 +309,16 @@ static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
277 309
278 switch (priv->offset) { 310 switch (priv->offset) {
279 case offsetof(struct udphdr, source): 311 case offsetof(struct udphdr, source):
312 if (priv->len != sizeof(__be16))
313 return -EOPNOTSUPP;
314
280 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src, 315 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
281 sizeof(__be16), reg); 316 sizeof(__be16), reg);
282 break; 317 break;
283 case offsetof(struct udphdr, dest): 318 case offsetof(struct udphdr, dest):
319 if (priv->len != sizeof(__be16))
320 return -EOPNOTSUPP;
321
284 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst, 322 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
285 sizeof(__be16), reg); 323 sizeof(__be16), reg);
286 break; 324 break;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index c4f54ad2b98a..58d5373c513c 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -64,28 +64,6 @@ static DEFINE_SPINLOCK(nr_list_lock);
64static const struct proto_ops nr_proto_ops; 64static const struct proto_ops nr_proto_ops;
65 65
66/* 66/*
67 * NETROM network devices are virtual network devices encapsulating NETROM
68 * frames into AX.25 which will be sent through an AX.25 device, so form a
69 * special "super class" of normal net devices; split their locks off into a
70 * separate class since they always nest.
71 */
72static struct lock_class_key nr_netdev_xmit_lock_key;
73static struct lock_class_key nr_netdev_addr_lock_key;
74
75static void nr_set_lockdep_one(struct net_device *dev,
76 struct netdev_queue *txq,
77 void *_unused)
78{
79 lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
80}
81
82static void nr_set_lockdep_key(struct net_device *dev)
83{
84 lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
85 netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
86}
87
88/*
89 * Socket removal during an interrupt is now safe. 67 * Socket removal during an interrupt is now safe.
90 */ 68 */
91static void nr_remove_socket(struct sock *sk) 69static void nr_remove_socket(struct sock *sk)
@@ -1414,7 +1392,6 @@ static int __init nr_proto_init(void)
1414 free_netdev(dev); 1392 free_netdev(dev);
1415 goto fail; 1393 goto fail;
1416 } 1394 }
1417 nr_set_lockdep_key(dev);
1418 dev_nr[i] = dev; 1395 dev_nr[i] = dev;
1419 } 1396 }
1420 1397
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ccdd790e163a..28604414dec1 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -554,11 +554,11 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
554 if (sk->sk_state == LLCP_LISTEN) 554 if (sk->sk_state == LLCP_LISTEN)
555 return llcp_accept_poll(sk); 555 return llcp_accept_poll(sk);
556 556
557 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 557 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
558 mask |= EPOLLERR | 558 mask |= EPOLLERR |
559 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 559 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
560 560
561 if (!skb_queue_empty(&sk->sk_receive_queue)) 561 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
562 mask |= EPOLLIN | EPOLLRDNORM; 562 mask |= EPOLLIN | EPOLLRDNORM;
563 563
564 if (sk->sk_state == LLCP_CLOSED) 564 if (sk->sk_state == LLCP_CLOSED)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f30e406fbec5..d8c364d637b1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1881,7 +1881,7 @@ static struct genl_family dp_datapath_genl_family __ro_after_init = {
1881/* Called with ovs_mutex or RCU read lock. */ 1881/* Called with ovs_mutex or RCU read lock. */
1882static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, 1882static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1883 struct net *net, u32 portid, u32 seq, 1883 struct net *net, u32 portid, u32 seq,
1884 u32 flags, u8 cmd) 1884 u32 flags, u8 cmd, gfp_t gfp)
1885{ 1885{
1886 struct ovs_header *ovs_header; 1886 struct ovs_header *ovs_header;
1887 struct ovs_vport_stats vport_stats; 1887 struct ovs_vport_stats vport_stats;
@@ -1902,7 +1902,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1902 goto nla_put_failure; 1902 goto nla_put_failure;
1903 1903
1904 if (!net_eq(net, dev_net(vport->dev))) { 1904 if (!net_eq(net, dev_net(vport->dev))) {
1905 int id = peernet2id_alloc(net, dev_net(vport->dev)); 1905 int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
1906 1906
1907 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id)) 1907 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1908 goto nla_put_failure; 1908 goto nla_put_failure;
@@ -1943,11 +1943,12 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
1943 struct sk_buff *skb; 1943 struct sk_buff *skb;
1944 int retval; 1944 int retval;
1945 1945
1946 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1946 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1947 if (!skb) 1947 if (!skb)
1948 return ERR_PTR(-ENOMEM); 1948 return ERR_PTR(-ENOMEM);
1949 1949
1950 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd); 1950 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
1951 GFP_KERNEL);
1951 BUG_ON(retval < 0); 1952 BUG_ON(retval < 0);
1952 1953
1953 return skb; 1954 return skb;
@@ -2089,7 +2090,7 @@ restart:
2089 2090
2090 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2091 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2091 info->snd_portid, info->snd_seq, 0, 2092 info->snd_portid, info->snd_seq, 0,
2092 OVS_VPORT_CMD_NEW); 2093 OVS_VPORT_CMD_NEW, GFP_KERNEL);
2093 2094
2094 new_headroom = netdev_get_fwd_headroom(vport->dev); 2095 new_headroom = netdev_get_fwd_headroom(vport->dev);
2095 2096
@@ -2150,7 +2151,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2150 2151
2151 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2152 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2152 info->snd_portid, info->snd_seq, 0, 2153 info->snd_portid, info->snd_seq, 0,
2153 OVS_VPORT_CMD_SET); 2154 OVS_VPORT_CMD_SET, GFP_KERNEL);
2154 BUG_ON(err < 0); 2155 BUG_ON(err < 0);
2155 2156
2156 ovs_unlock(); 2157 ovs_unlock();
@@ -2190,7 +2191,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2190 2191
2191 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2192 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2192 info->snd_portid, info->snd_seq, 0, 2193 info->snd_portid, info->snd_seq, 0,
2193 OVS_VPORT_CMD_DEL); 2194 OVS_VPORT_CMD_DEL, GFP_KERNEL);
2194 BUG_ON(err < 0); 2195 BUG_ON(err < 0);
2195 2196
2196 /* the vport deletion may trigger dp headroom update */ 2197 /* the vport deletion may trigger dp headroom update */
@@ -2237,7 +2238,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2237 goto exit_unlock_free; 2238 goto exit_unlock_free;
2238 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2239 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2239 info->snd_portid, info->snd_seq, 0, 2240 info->snd_portid, info->snd_seq, 0,
2240 OVS_VPORT_CMD_GET); 2241 OVS_VPORT_CMD_GET, GFP_ATOMIC);
2241 BUG_ON(err < 0); 2242 BUG_ON(err < 0);
2242 rcu_read_unlock(); 2243 rcu_read_unlock();
2243 2244
@@ -2273,7 +2274,8 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2273 NETLINK_CB(cb->skb).portid, 2274 NETLINK_CB(cb->skb).portid,
2274 cb->nlh->nlmsg_seq, 2275 cb->nlh->nlmsg_seq,
2275 NLM_F_MULTI, 2276 NLM_F_MULTI,
2276 OVS_VPORT_CMD_GET) < 0) 2277 OVS_VPORT_CMD_GET,
2278 GFP_ATOMIC) < 0)
2277 goto out; 2279 goto out;
2278 2280
2279 j++; 2281 j++;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 21c90d3a7ebf..58a7b8312c28 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -137,7 +137,7 @@ static void do_setup(struct net_device *netdev)
137 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | 137 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
138 IFF_NO_QUEUE; 138 IFF_NO_QUEUE;
139 netdev->needs_free_netdev = true; 139 netdev->needs_free_netdev = true;
140 netdev->priv_destructor = internal_dev_destructor; 140 netdev->priv_destructor = NULL;
141 netdev->ethtool_ops = &internal_dev_ethtool_ops; 141 netdev->ethtool_ops = &internal_dev_ethtool_ops;
142 netdev->rtnl_link_ops = &internal_dev_link_ops; 142 netdev->rtnl_link_ops = &internal_dev_link_ops;
143 143
@@ -159,7 +159,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
159 struct internal_dev *internal_dev; 159 struct internal_dev *internal_dev;
160 struct net_device *dev; 160 struct net_device *dev;
161 int err; 161 int err;
162 bool free_vport = true;
163 162
164 vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms); 163 vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
165 if (IS_ERR(vport)) { 164 if (IS_ERR(vport)) {
@@ -190,10 +189,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
190 189
191 rtnl_lock(); 190 rtnl_lock();
192 err = register_netdevice(vport->dev); 191 err = register_netdevice(vport->dev);
193 if (err) { 192 if (err)
194 free_vport = false;
195 goto error_unlock; 193 goto error_unlock;
196 } 194 vport->dev->priv_destructor = internal_dev_destructor;
197 195
198 dev_set_promiscuity(vport->dev, 1); 196 dev_set_promiscuity(vport->dev, 1);
199 rtnl_unlock(); 197 rtnl_unlock();
@@ -207,8 +205,7 @@ error_unlock:
207error_free_netdev: 205error_free_netdev:
208 free_netdev(dev); 206 free_netdev(dev);
209error_free_vport: 207error_free_vport:
210 if (free_vport) 208 ovs_vport_free(vport);
211 ovs_vport_free(vport);
212error: 209error:
213 return ERR_PTR(err); 210 return ERR_PTR(err);
214} 211}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 96ea9f254ae9..76d499f6af9a 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -338,9 +338,9 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
338 338
339 if (sk->sk_state == TCP_CLOSE) 339 if (sk->sk_state == TCP_CLOSE)
340 return EPOLLERR; 340 return EPOLLERR;
341 if (!skb_queue_empty(&sk->sk_receive_queue)) 341 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
342 mask |= EPOLLIN | EPOLLRDNORM; 342 mask |= EPOLLIN | EPOLLRDNORM;
343 if (!skb_queue_empty(&pn->ctrlreq_queue)) 343 if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
344 mask |= EPOLLPRI; 344 mask |= EPOLLPRI;
345 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) 345 if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
346 return EPOLLHUP; 346 return EPOLLHUP;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index f0e9ccf472a9..6a0df7c8a939 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -65,28 +65,6 @@ static const struct proto_ops rose_proto_ops;
65ax25_address rose_callsign; 65ax25_address rose_callsign;
66 66
67/* 67/*
68 * ROSE network devices are virtual network devices encapsulating ROSE
69 * frames into AX.25 which will be sent through an AX.25 device, so form a
70 * special "super class" of normal net devices; split their locks off into a
71 * separate class since they always nest.
72 */
73static struct lock_class_key rose_netdev_xmit_lock_key;
74static struct lock_class_key rose_netdev_addr_lock_key;
75
76static void rose_set_lockdep_one(struct net_device *dev,
77 struct netdev_queue *txq,
78 void *_unused)
79{
80 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
81}
82
83static void rose_set_lockdep_key(struct net_device *dev)
84{
85 lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
86 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
87}
88
89/*
90 * Convert a ROSE address into text. 68 * Convert a ROSE address into text.
91 */ 69 */
92char *rose2asc(char *buf, const rose_address *addr) 70char *rose2asc(char *buf, const rose_address *addr)
@@ -1533,7 +1511,6 @@ static int __init rose_proto_init(void)
1533 free_netdev(dev); 1511 free_netdev(dev);
1534 goto fail; 1512 goto fail;
1535 } 1513 }
1536 rose_set_lockdep_key(dev);
1537 dev_rose[i] = dev; 1514 dev_rose[i] = dev;
1538 } 1515 }
1539 1516
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index ecc17dabec8f..7c7d10f2e0c1 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -601,6 +601,7 @@ struct rxrpc_call {
601 int debug_id; /* debug ID for printks */ 601 int debug_id; /* debug ID for printks */
602 unsigned short rx_pkt_offset; /* Current recvmsg packet offset */ 602 unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
603 unsigned short rx_pkt_len; /* Current recvmsg packet len */ 603 unsigned short rx_pkt_len; /* Current recvmsg packet len */
604 bool rx_pkt_last; /* Current recvmsg packet is last */
604 605
605 /* Rx/Tx circular buffer, depending on phase. 606 /* Rx/Tx circular buffer, depending on phase.
606 * 607 *
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index a4090797c9b2..8578c39ec839 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -267,11 +267,13 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
267 */ 267 */
268static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb, 268static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
269 u8 *_annotation, 269 u8 *_annotation,
270 unsigned int *_offset, unsigned int *_len) 270 unsigned int *_offset, unsigned int *_len,
271 bool *_last)
271{ 272{
272 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 273 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
273 unsigned int offset = sizeof(struct rxrpc_wire_header); 274 unsigned int offset = sizeof(struct rxrpc_wire_header);
274 unsigned int len; 275 unsigned int len;
276 bool last = false;
275 int ret; 277 int ret;
276 u8 annotation = *_annotation; 278 u8 annotation = *_annotation;
277 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; 279 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
@@ -281,6 +283,8 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
281 len = skb->len - offset; 283 len = skb->len - offset;
282 if (subpacket < sp->nr_subpackets - 1) 284 if (subpacket < sp->nr_subpackets - 1)
283 len = RXRPC_JUMBO_DATALEN; 285 len = RXRPC_JUMBO_DATALEN;
286 else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
287 last = true;
284 288
285 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { 289 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
286 ret = rxrpc_verify_packet(call, skb, annotation, offset, len); 290 ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -291,6 +295,7 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
291 295
292 *_offset = offset; 296 *_offset = offset;
293 *_len = len; 297 *_len = len;
298 *_last = last;
294 call->security->locate_data(call, skb, _offset, _len); 299 call->security->locate_data(call, skb, _offset, _len);
295 return 0; 300 return 0;
296} 301}
@@ -309,7 +314,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
309 rxrpc_serial_t serial; 314 rxrpc_serial_t serial;
310 rxrpc_seq_t hard_ack, top, seq; 315 rxrpc_seq_t hard_ack, top, seq;
311 size_t remain; 316 size_t remain;
312 bool last; 317 bool rx_pkt_last;
313 unsigned int rx_pkt_offset, rx_pkt_len; 318 unsigned int rx_pkt_offset, rx_pkt_len;
314 int ix, copy, ret = -EAGAIN, ret2; 319 int ix, copy, ret = -EAGAIN, ret2;
315 320
@@ -319,6 +324,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
319 324
320 rx_pkt_offset = call->rx_pkt_offset; 325 rx_pkt_offset = call->rx_pkt_offset;
321 rx_pkt_len = call->rx_pkt_len; 326 rx_pkt_len = call->rx_pkt_len;
327 rx_pkt_last = call->rx_pkt_last;
322 328
323 if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) { 329 if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
324 seq = call->rx_hard_ack; 330 seq = call->rx_hard_ack;
@@ -329,6 +335,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
329 /* Barriers against rxrpc_input_data(). */ 335 /* Barriers against rxrpc_input_data(). */
330 hard_ack = call->rx_hard_ack; 336 hard_ack = call->rx_hard_ack;
331 seq = hard_ack + 1; 337 seq = hard_ack + 1;
338
332 while (top = smp_load_acquire(&call->rx_top), 339 while (top = smp_load_acquire(&call->rx_top),
333 before_eq(seq, top) 340 before_eq(seq, top)
334 ) { 341 ) {
@@ -356,7 +363,8 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
356 if (rx_pkt_offset == 0) { 363 if (rx_pkt_offset == 0) {
357 ret2 = rxrpc_locate_data(call, skb, 364 ret2 = rxrpc_locate_data(call, skb,
358 &call->rxtx_annotations[ix], 365 &call->rxtx_annotations[ix],
359 &rx_pkt_offset, &rx_pkt_len); 366 &rx_pkt_offset, &rx_pkt_len,
367 &rx_pkt_last);
360 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq, 368 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
361 rx_pkt_offset, rx_pkt_len, ret2); 369 rx_pkt_offset, rx_pkt_len, ret2);
362 if (ret2 < 0) { 370 if (ret2 < 0) {
@@ -396,13 +404,12 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
396 } 404 }
397 405
398 /* The whole packet has been transferred. */ 406 /* The whole packet has been transferred. */
399 last = sp->hdr.flags & RXRPC_LAST_PACKET;
400 if (!(flags & MSG_PEEK)) 407 if (!(flags & MSG_PEEK))
401 rxrpc_rotate_rx_window(call); 408 rxrpc_rotate_rx_window(call);
402 rx_pkt_offset = 0; 409 rx_pkt_offset = 0;
403 rx_pkt_len = 0; 410 rx_pkt_len = 0;
404 411
405 if (last) { 412 if (rx_pkt_last) {
406 ASSERTCMP(seq, ==, READ_ONCE(call->rx_top)); 413 ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
407 ret = 1; 414 ret = 1;
408 goto out; 415 goto out;
@@ -415,6 +422,7 @@ out:
415 if (!(flags & MSG_PEEK)) { 422 if (!(flags & MSG_PEEK)) {
416 call->rx_pkt_offset = rx_pkt_offset; 423 call->rx_pkt_offset = rx_pkt_offset;
417 call->rx_pkt_len = rx_pkt_len; 424 call->rx_pkt_len = rx_pkt_len;
425 call->rx_pkt_last = rx_pkt_last;
418 } 426 }
419done: 427done:
420 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq, 428 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index bf10bdaf5012..8229ed4a67be 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -162,16 +162,20 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
162 cls_bpf.name = obj->bpf_name; 162 cls_bpf.name = obj->bpf_name;
163 cls_bpf.exts_integrated = obj->exts_integrated; 163 cls_bpf.exts_integrated = obj->exts_integrated;
164 164
165 if (oldprog) 165 if (oldprog && prog)
166 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf, 166 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
167 skip_sw, &oldprog->gen_flags, 167 skip_sw, &oldprog->gen_flags,
168 &oldprog->in_hw_count, 168 &oldprog->in_hw_count,
169 &prog->gen_flags, &prog->in_hw_count, 169 &prog->gen_flags, &prog->in_hw_count,
170 true); 170 true);
171 else 171 else if (prog)
172 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf, 172 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
173 skip_sw, &prog->gen_flags, 173 skip_sw, &prog->gen_flags,
174 &prog->in_hw_count, true); 174 &prog->in_hw_count, true);
175 else
176 err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
177 skip_sw, &oldprog->gen_flags,
178 &oldprog->in_hw_count, true);
175 179
176 if (prog && err) { 180 if (prog && err) {
177 cls_bpf_offload_cmd(tp, oldprog, prog, extack); 181 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 17bd8f539bc7..8769b4b8807d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -799,9 +799,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
799}; 799};
800EXPORT_SYMBOL(pfifo_fast_ops); 800EXPORT_SYMBOL(pfifo_fast_ops);
801 801
802static struct lock_class_key qdisc_tx_busylock;
803static struct lock_class_key qdisc_running_key;
804
805struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 802struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
806 const struct Qdisc_ops *ops, 803 const struct Qdisc_ops *ops,
807 struct netlink_ext_ack *extack) 804 struct netlink_ext_ack *extack)
@@ -854,17 +851,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
854 } 851 }
855 852
856 spin_lock_init(&sch->busylock); 853 spin_lock_init(&sch->busylock);
857 lockdep_set_class(&sch->busylock,
858 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
859
860 /* seqlock has the same scope of busylock, for NOLOCK qdisc */ 854 /* seqlock has the same scope of busylock, for NOLOCK qdisc */
861 spin_lock_init(&sch->seqlock); 855 spin_lock_init(&sch->seqlock);
862 lockdep_set_class(&sch->busylock,
863 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
864
865 seqcount_init(&sch->running); 856 seqcount_init(&sch->running);
866 lockdep_set_class(&sch->running,
867 dev->qdisc_running_key ?: &qdisc_running_key);
868 857
869 sch->ops = ops; 858 sch->ops = ops;
870 sch->flags = ops->static_flags; 859 sch->flags = ops->static_flags;
@@ -875,6 +864,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
875 dev_hold(dev); 864 dev_hold(dev);
876 refcount_set(&sch->refcnt, 1); 865 refcount_set(&sch->refcnt, 1);
877 866
867 if (sch != &noop_qdisc) {
868 lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
869 lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
870 lockdep_set_class(&sch->running, &dev->qdisc_running_key);
871 }
872
878 return sch; 873 return sch;
879errout1: 874errout1:
880 kfree(p); 875 kfree(p);
@@ -1043,6 +1038,8 @@ static void attach_one_default_qdisc(struct net_device *dev,
1043 1038
1044 if (dev->priv_flags & IFF_NO_QUEUE) 1039 if (dev->priv_flags & IFF_NO_QUEUE)
1045 ops = &noqueue_qdisc_ops; 1040 ops = &noqueue_qdisc_ops;
1041 else if(dev->type == ARPHRD_CAN)
1042 ops = &pfifo_fast_ops;
1046 1043
1047 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); 1044 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1048 if (!qdisc) { 1045 if (!qdisc) {
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 23cd1c873a2c..be35f03b657b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -5,11 +5,11 @@
5 * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com> 5 * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
6 */ 6 */
7 7
8#include <linux/jhash.h>
9#include <linux/jiffies.h> 8#include <linux/jiffies.h>
10#include <linux/module.h> 9#include <linux/module.h>
11#include <linux/skbuff.h> 10#include <linux/skbuff.h>
12#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/siphash.h>
13#include <net/pkt_sched.h> 13#include <net/pkt_sched.h>
14#include <net/sock.h> 14#include <net/sock.h>
15 15
@@ -126,7 +126,7 @@ struct wdrr_bucket {
126 126
127struct hhf_sched_data { 127struct hhf_sched_data {
128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; 128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
129 u32 perturbation; /* hash perturbation */ 129 siphash_key_t perturbation; /* hash perturbation */
130 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 130 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
131 u32 drop_overlimit; /* number of times max qdisc packet 131 u32 drop_overlimit; /* number of times max qdisc packet
132 * limit was hit 132 * limit was hit
@@ -264,7 +264,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
264 } 264 }
265 265
266 /* Get hashed flow-id of the skb. */ 266 /* Get hashed flow-id of the skb. */
267 hash = skb_get_hash_perturb(skb, q->perturbation); 267 hash = skb_get_hash_perturb(skb, &q->perturbation);
268 268
269 /* Check if this packet belongs to an already established HH flow. */ 269 /* Check if this packet belongs to an already established HH flow. */
270 flow_pos = hash & HHF_BIT_MASK; 270 flow_pos = hash & HHF_BIT_MASK;
@@ -582,7 +582,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
582 582
583 sch->limit = 1000; 583 sch->limit = 1000;
584 q->quantum = psched_mtu(qdisc_dev(sch)); 584 q->quantum = psched_mtu(qdisc_dev(sch));
585 q->perturbation = prandom_u32(); 585 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
586 INIT_LIST_HEAD(&q->new_buckets); 586 INIT_LIST_HEAD(&q->new_buckets);
587 INIT_LIST_HEAD(&q->old_buckets); 587 INIT_LIST_HEAD(&q->old_buckets);
588 588
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index d448fe3068e5..4074c50ac3d7 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -18,7 +18,7 @@
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/random.h> 20#include <linux/random.h>
21#include <linux/jhash.h> 21#include <linux/siphash.h>
22#include <net/ip.h> 22#include <net/ip.h>
23#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
24#include <net/pkt_cls.h> 24#include <net/pkt_cls.h>
@@ -45,7 +45,7 @@ struct sfb_bucket {
45 * (Section 4.4 of SFB reference : moving hash functions) 45 * (Section 4.4 of SFB reference : moving hash functions)
46 */ 46 */
47struct sfb_bins { 47struct sfb_bins {
48 u32 perturbation; /* jhash perturbation */ 48 siphash_key_t perturbation; /* siphash key */
49 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; 49 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
50}; 50};
51 51
@@ -217,7 +217,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da
217 217
218static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) 218static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
219{ 219{
220 q->bins[slot].perturbation = prandom_u32(); 220 get_random_bytes(&q->bins[slot].perturbation,
221 sizeof(q->bins[slot].perturbation));
221} 222}
222 223
223static void sfb_swap_slot(struct sfb_sched_data *q) 224static void sfb_swap_slot(struct sfb_sched_data *q)
@@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
314 /* If using external classifiers, get result and record it. */ 315 /* If using external classifiers, get result and record it. */
315 if (!sfb_classify(skb, fl, &ret, &salt)) 316 if (!sfb_classify(skb, fl, &ret, &salt))
316 goto other_drop; 317 goto other_drop;
317 sfbhash = jhash_1word(salt, q->bins[slot].perturbation); 318 sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
318 } else { 319 } else {
319 sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); 320 sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
320 } 321 }
321 322
322 323
@@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
352 /* Inelastic flow */ 353 /* Inelastic flow */
353 if (q->double_buffering) { 354 if (q->double_buffering) {
354 sfbhash = skb_get_hash_perturb(skb, 355 sfbhash = skb_get_hash_perturb(skb,
355 q->bins[slot].perturbation); 356 &q->bins[slot].perturbation);
356 if (!sfbhash) 357 if (!sfbhash)
357 sfbhash = 1; 358 sfbhash = 1;
358 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 359 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 68404a9d2ce4..c787d4d46017 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -14,7 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/jhash.h> 17#include <linux/siphash.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <net/netlink.h> 20#include <net/netlink.h>
@@ -117,7 +117,7 @@ struct sfq_sched_data {
117 u8 headdrop; 117 u8 headdrop;
118 u8 maxdepth; /* limit of packets per flow */ 118 u8 maxdepth; /* limit of packets per flow */
119 119
120 u32 perturbation; 120 siphash_key_t perturbation;
121 u8 cur_depth; /* depth of longest slot */ 121 u8 cur_depth; /* depth of longest slot */
122 u8 flags; 122 u8 flags;
123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ 123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
@@ -157,7 +157,7 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
157static unsigned int sfq_hash(const struct sfq_sched_data *q, 157static unsigned int sfq_hash(const struct sfq_sched_data *q,
158 const struct sk_buff *skb) 158 const struct sk_buff *skb)
159{ 159{
160 return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); 160 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
161} 161}
162 162
163static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, 163static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -607,9 +607,11 @@ static void sfq_perturbation(struct timer_list *t)
607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer); 607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
608 struct Qdisc *sch = q->sch; 608 struct Qdisc *sch = q->sch;
609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
610 siphash_key_t nkey;
610 611
612 get_random_bytes(&nkey, sizeof(nkey));
611 spin_lock(root_lock); 613 spin_lock(root_lock);
612 q->perturbation = prandom_u32(); 614 q->perturbation = nkey;
613 if (!q->filter_list && q->tail) 615 if (!q->filter_list && q->tail)
614 sfq_rehash(sch); 616 sfq_rehash(sch);
615 spin_unlock(root_lock); 617 spin_unlock(root_lock);
@@ -688,7 +690,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
688 del_timer(&q->perturb_timer); 690 del_timer(&q->perturb_timer);
689 if (q->perturb_period) { 691 if (q->perturb_period) {
690 mod_timer(&q->perturb_timer, jiffies + q->perturb_period); 692 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
691 q->perturbation = prandom_u32(); 693 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
692 } 694 }
693 sch_tree_unlock(sch); 695 sch_tree_unlock(sch);
694 kfree(p); 696 kfree(p);
@@ -745,7 +747,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
745 q->quantum = psched_mtu(qdisc_dev(sch)); 747 q->quantum = psched_mtu(qdisc_dev(sch));
746 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); 748 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
747 q->perturb_period = 0; 749 q->perturb_period = 0;
748 q->perturbation = prandom_u32(); 750 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
749 751
750 if (opt) { 752 if (opt) {
751 int err = sfq_change(sch, opt); 753 int err = sfq_change(sch, opt);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 6719a65169d4..2121187229cd 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1152,7 +1152,7 @@ EXPORT_SYMBOL_GPL(taprio_offload_free);
1152 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). 1152 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1153 * This is left as TODO. 1153 * This is left as TODO.
1154 */ 1154 */
1155void taprio_offload_config_changed(struct taprio_sched *q) 1155static void taprio_offload_config_changed(struct taprio_sched *q)
1156{ 1156{
1157 struct sched_gate_list *oper, *admin; 1157 struct sched_gate_list *oper, *admin;
1158 1158
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5ca0ec0e823c..ffd3262b7a41 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8476,7 +8476,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
8476 mask = 0; 8476 mask = 0;
8477 8477
8478 /* Is there any exceptional events? */ 8478 /* Is there any exceptional events? */
8479 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 8479 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
8480 mask |= EPOLLERR | 8480 mask |= EPOLLERR |
8481 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 8481 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
8482 if (sk->sk_shutdown & RCV_SHUTDOWN) 8482 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -8485,7 +8485,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
8485 mask |= EPOLLHUP; 8485 mask |= EPOLLHUP;
8486 8486
8487 /* Is it readable? Reconsider this code with TCP-style support. */ 8487 /* Is it readable? Reconsider this code with TCP-style support. */
8488 if (!skb_queue_empty(&sk->sk_receive_queue)) 8488 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
8489 mask |= EPOLLIN | EPOLLRDNORM; 8489 mask |= EPOLLIN | EPOLLRDNORM;
8490 8490
8491 /* The association is either gone or not ready. */ 8491 /* The association is either gone or not ready. */
@@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
8871 if (sk_can_busy_loop(sk)) { 8871 if (sk_can_busy_loop(sk)) {
8872 sk_busy_loop(sk, noblock); 8872 sk_busy_loop(sk, noblock);
8873 8873
8874 if (!skb_queue_empty(&sk->sk_receive_queue)) 8874 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
8875 continue; 8875 continue;
8876 } 8876 }
8877 8877
@@ -9306,7 +9306,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
9306 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 9306 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
9307 newinet->inet_dport = htons(asoc->peer.port); 9307 newinet->inet_dport = htons(asoc->peer.port);
9308 newinet->pmtudisc = inet->pmtudisc; 9308 newinet->pmtudisc = inet->pmtudisc;
9309 newinet->inet_id = asoc->next_tsn ^ jiffies; 9309 newinet->inet_id = prandom_u32();
9310 9310
9311 newinet->uc_ttl = inet->uc_ttl; 9311 newinet->uc_ttl = inet->uc_ttl;
9312 newinet->mc_loop = 1; 9312 newinet->mc_loop = 1;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5b932583e407..47946f489fd4 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -123,6 +123,12 @@ struct proto smc_proto6 = {
123}; 123};
124EXPORT_SYMBOL_GPL(smc_proto6); 124EXPORT_SYMBOL_GPL(smc_proto6);
125 125
126static void smc_restore_fallback_changes(struct smc_sock *smc)
127{
128 smc->clcsock->file->private_data = smc->sk.sk_socket;
129 smc->clcsock->file = NULL;
130}
131
126static int __smc_release(struct smc_sock *smc) 132static int __smc_release(struct smc_sock *smc)
127{ 133{
128 struct sock *sk = &smc->sk; 134 struct sock *sk = &smc->sk;
@@ -141,6 +147,7 @@ static int __smc_release(struct smc_sock *smc)
141 } 147 }
142 sk->sk_state = SMC_CLOSED; 148 sk->sk_state = SMC_CLOSED;
143 sk->sk_state_change(sk); 149 sk->sk_state_change(sk);
150 smc_restore_fallback_changes(smc);
144 } 151 }
145 152
146 sk->sk_prot->unhash(sk); 153 sk->sk_prot->unhash(sk);
@@ -700,8 +707,6 @@ static int __smc_connect(struct smc_sock *smc)
700 int smc_type; 707 int smc_type;
701 int rc = 0; 708 int rc = 0;
702 709
703 sock_hold(&smc->sk); /* sock put in passive closing */
704
705 if (smc->use_fallback) 710 if (smc->use_fallback)
706 return smc_connect_fallback(smc, smc->fallback_rsn); 711 return smc_connect_fallback(smc, smc->fallback_rsn);
707 712
@@ -846,6 +851,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
846 rc = kernel_connect(smc->clcsock, addr, alen, flags); 851 rc = kernel_connect(smc->clcsock, addr, alen, flags);
847 if (rc && rc != -EINPROGRESS) 852 if (rc && rc != -EINPROGRESS)
848 goto out; 853 goto out;
854
855 sock_hold(&smc->sk); /* sock put in passive closing */
849 if (flags & O_NONBLOCK) { 856 if (flags & O_NONBLOCK) {
850 if (schedule_work(&smc->connect_work)) 857 if (schedule_work(&smc->connect_work))
851 smc->connect_nonblock = 1; 858 smc->connect_nonblock = 1;
@@ -1291,8 +1298,8 @@ static void smc_listen_work(struct work_struct *work)
1291 /* check if RDMA is available */ 1298 /* check if RDMA is available */
1292 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */ 1299 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
1293 /* prepare RDMA check */ 1300 /* prepare RDMA check */
1294 memset(&ini, 0, sizeof(ini));
1295 ini.is_smcd = false; 1301 ini.is_smcd = false;
1302 ini.ism_dev = NULL;
1296 ini.ib_lcl = &pclc->lcl; 1303 ini.ib_lcl = &pclc->lcl;
1297 rc = smc_find_rdma_device(new_smc, &ini); 1304 rc = smc_find_rdma_device(new_smc, &ini);
1298 if (rc) { 1305 if (rc) {
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 88556f0251ab..2ba97ff325a5 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -561,7 +561,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
561 } 561 }
562 562
563 rtnl_lock(); 563 rtnl_lock();
564 nest_lvl = dev_get_nest_level(ndev); 564 nest_lvl = ndev->lower_level;
565 for (i = 0; i < nest_lvl; i++) { 565 for (i = 0; i < nest_lvl; i++) {
566 struct list_head *lower = &ndev->adj_list.lower; 566 struct list_head *lower = &ndev->adj_list.lower;
567 567
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index bab2da8cf17a..2920b006f65c 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -718,7 +718,7 @@ static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
718 int i, nest_lvl; 718 int i, nest_lvl;
719 719
720 rtnl_lock(); 720 rtnl_lock();
721 nest_lvl = dev_get_nest_level(ndev); 721 nest_lvl = ndev->lower_level;
722 for (i = 0; i < nest_lvl; i++) { 722 for (i = 0; i < nest_lvl; i++) {
723 struct list_head *lower = &ndev->adj_list.lower; 723 struct list_head *lower = &ndev->adj_list.lower;
724 724
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 339e8c077c2d..195b40c5dae4 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -220,7 +220,7 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
220 goto out; 220 goto out;
221 221
222 spin_lock_bh(&xprt->bc_pa_lock); 222 spin_lock_bh(&xprt->bc_pa_lock);
223 xprt->bc_alloc_max -= max_reqs; 223 xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
224 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 224 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
225 dprintk("RPC: req=%p\n", req); 225 dprintk("RPC: req=%p\n", req);
226 list_del(&req->rq_bc_pa_list); 226 list_del(&req->rq_bc_pa_list);
@@ -307,8 +307,8 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
307 */ 307 */
308 dprintk("RPC: Last session removed req=%p\n", req); 308 dprintk("RPC: Last session removed req=%p\n", req);
309 xprt_free_allocation(req); 309 xprt_free_allocation(req);
310 return;
311 } 310 }
311 xprt_put(xprt);
312} 312}
313 313
314/* 314/*
@@ -339,7 +339,7 @@ found:
339 spin_unlock(&xprt->bc_pa_lock); 339 spin_unlock(&xprt->bc_pa_lock);
340 if (new) { 340 if (new) {
341 if (req != new) 341 if (req != new)
342 xprt_free_bc_rqst(new); 342 xprt_free_allocation(new);
343 break; 343 break;
344 } else if (req) 344 } else if (req)
345 break; 345 break;
@@ -368,6 +368,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
368 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 368 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
369 369
370 dprintk("RPC: add callback request to list\n"); 370 dprintk("RPC: add callback request to list\n");
371 xprt_get(xprt);
371 spin_lock(&bc_serv->sv_cb_lock); 372 spin_lock(&bc_serv->sv_cb_lock);
372 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); 373 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
373 wake_up(&bc_serv->sv_cb_waitq); 374 wake_up(&bc_serv->sv_cb_waitq);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 8a45b3ccc313..41df4c507193 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1943,6 +1943,11 @@ static void xprt_destroy_cb(struct work_struct *work)
1943 rpc_destroy_wait_queue(&xprt->backlog); 1943 rpc_destroy_wait_queue(&xprt->backlog);
1944 kfree(xprt->servername); 1944 kfree(xprt->servername);
1945 /* 1945 /*
1946 * Destroy any existing back channel
1947 */
1948 xprt_destroy_backchannel(xprt, UINT_MAX);
1949
1950 /*
1946 * Tear down transport state and free the rpc_xprt 1951 * Tear down transport state and free the rpc_xprt
1947 */ 1952 */
1948 xprt->ops->destroy(xprt); 1953 xprt->ops->destroy(xprt);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 50e075fcdd8f..b458bf53ca69 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -163,6 +163,7 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
163 spin_lock(&xprt->bc_pa_lock); 163 spin_lock(&xprt->bc_pa_lock);
164 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 164 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
165 spin_unlock(&xprt->bc_pa_lock); 165 spin_unlock(&xprt->bc_pa_lock);
166 xprt_put(xprt);
166} 167}
167 168
168static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt) 169static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
@@ -259,6 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
259 260
260 /* Queue rqst for ULP's callback service */ 261 /* Queue rqst for ULP's callback service */
261 bc_serv = xprt->bc_serv; 262 bc_serv = xprt->bc_serv;
263 xprt_get(xprt);
262 spin_lock(&bc_serv->sv_cb_lock); 264 spin_lock(&bc_serv->sv_cb_lock);
263 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list); 265 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
264 spin_unlock(&bc_serv->sv_cb_lock); 266 spin_unlock(&bc_serv->sv_cb_lock);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f8bbc4aab213..4b92b196cfa6 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -740,7 +740,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
740 /* fall through */ 740 /* fall through */
741 case TIPC_LISTEN: 741 case TIPC_LISTEN:
742 case TIPC_CONNECTING: 742 case TIPC_CONNECTING:
743 if (!skb_queue_empty(&sk->sk_receive_queue)) 743 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
744 revents |= EPOLLIN | EPOLLRDNORM; 744 revents |= EPOLLIN | EPOLLRDNORM;
745 break; 745 break;
746 case TIPC_OPEN: 746 case TIPC_OPEN:
@@ -748,7 +748,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
748 revents |= EPOLLOUT; 748 revents |= EPOLLOUT;
749 if (!tipc_sk_type_connectionless(sk)) 749 if (!tipc_sk_type_connectionless(sk))
750 break; 750 break;
751 if (skb_queue_empty(&sk->sk_receive_queue)) 751 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
752 break; 752 break;
753 revents |= EPOLLIN | EPOLLRDNORM; 753 revents |= EPOLLIN | EPOLLRDNORM;
754 break; 754 break;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 67e87db5877f..0d8da809bea2 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2599,7 +2599,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
2599 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 2599 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2600 2600
2601 /* readable? */ 2601 /* readable? */
2602 if (!skb_queue_empty(&sk->sk_receive_queue)) 2602 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2603 mask |= EPOLLIN | EPOLLRDNORM; 2603 mask |= EPOLLIN | EPOLLRDNORM;
2604 2604
2605 /* Connection-based need to check for termination and startup */ 2605 /* Connection-based need to check for termination and startup */
@@ -2628,7 +2628,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2628 mask = 0; 2628 mask = 0;
2629 2629
2630 /* exceptional events? */ 2630 /* exceptional events? */
2631 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2631 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
2632 mask |= EPOLLERR | 2632 mask |= EPOLLERR |
2633 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 2633 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
2634 2634
@@ -2638,7 +2638,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2638 mask |= EPOLLHUP; 2638 mask |= EPOLLHUP;
2639 2639
2640 /* readable? */ 2640 /* readable? */
2641 if (!skb_queue_empty(&sk->sk_receive_queue)) 2641 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2642 mask |= EPOLLIN | EPOLLRDNORM; 2642 mask |= EPOLLIN | EPOLLRDNORM;
2643 2643
2644 /* Connection-based need to check for termination and startup */ 2644 /* Connection-based need to check for termination and startup */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 2ab43b2bba31..582a3e4dfce2 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -870,7 +870,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
870 * the queue and write as long as the socket isn't shutdown for 870 * the queue and write as long as the socket isn't shutdown for
871 * sending. 871 * sending.
872 */ 872 */
873 if (!skb_queue_empty(&sk->sk_receive_queue) || 873 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
874 (sk->sk_shutdown & RCV_SHUTDOWN)) { 874 (sk->sk_shutdown & RCV_SHUTDOWN)) {
875 mask |= EPOLLIN | EPOLLRDNORM; 875 mask |= EPOLLIN | EPOLLRDNORM;
876 } 876 }
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index e851cafd8e2f..fcac5c6366e1 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -204,6 +204,11 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
204 return false; 204 return false;
205 } 205 }
206 206
207 /* channel 14 is only for IEEE 802.11b */
208 if (chandef->center_freq1 == 2484 &&
209 chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
210 return false;
211
207 if (cfg80211_chandef_is_edmg(chandef) && 212 if (cfg80211_chandef_is_edmg(chandef) &&
208 !cfg80211_edmg_chandef_valid(chandef)) 213 !cfg80211_edmg_chandef_valid(chandef))
209 return false; 214 return false;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4453dd375de9..7b72286922f7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -393,7 +393,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
393 [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, 393 [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
394 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, 394 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
395 .len = IEEE80211_MAX_MESH_ID_LEN }, 395 .len = IEEE80211_MAX_MESH_ID_LEN },
396 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, 396 [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
397 397
398 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, 398 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
399 [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, 399 [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 419eb12c1e93..5b4ed5bbc542 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1559,7 +1559,8 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
1559 } 1559 }
1560 1560
1561 if (freq == 2484) { 1561 if (freq == 2484) {
1562 if (chandef->width > NL80211_CHAN_WIDTH_40) 1562 /* channel 14 is only for IEEE 802.11b */
1563 if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
1563 return false; 1564 return false;
1564 1565
1565 *op_class = 82; /* channel 14 */ 1566 *op_class = 82; /* channel 14 */
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 16d5f353163a..3049af269fbf 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -27,6 +27,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
27{ 27{
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (!xs->tx)
31 return;
32
30 spin_lock_irqsave(&umem->xsk_list_lock, flags); 33 spin_lock_irqsave(&umem->xsk_list_lock, flags);
31 list_add_rcu(&xs->list, &umem->xsk_list); 34 list_add_rcu(&xs->list, &umem->xsk_list);
32 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 35 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
@@ -36,6 +39,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
36{ 39{
37 unsigned long flags; 40 unsigned long flags;
38 41
42 if (!xs->tx)
43 return;
44
39 spin_lock_irqsave(&umem->xsk_list_lock, flags); 45 spin_lock_irqsave(&umem->xsk_list_lock, flags);
40 list_del_rcu(&xs->list); 46 list_del_rcu(&xs->list);
41 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 47 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);