summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-11-01 20:48:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-11-01 20:48:11 -0400
commit1204c70d9dcba31164f78ad5d8c88c42335d51f8 (patch)
treeacb4728a02f13a6c547518f22f0d60a5ef7eaeb1
parent372bf6c1c8f9712e7765acad568a6d7ed4e8d6c0 (diff)
parentaeb1b85c340c54dc1d68ff96b02d439d6a4f7150 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Fix free/alloc races in batmanadv, from Sven Eckelmann. 2) Several leaks and other fixes in kTLS support of mlx5 driver, from Tariq Toukan. 3) BPF devmap_hash cost calculation can overflow on 32-bit, from Toke Høiland-Jørgensen. 4) Add an r8152 device ID, from Kazutoshi Noguchi. 5) Missing include in ipv6's addrconf.c, from Ben Dooks. 6) Use siphash in flow dissector, from Eric Dumazet. Attackers can easily infer the 32-bit secret otherwise etc. 7) Several netdevice nesting depth fixes from Taehee Yoo. 8) Fix several KCSAN reported errors, from Eric Dumazet. For example, when doing lockless skb_queue_empty() checks, and accessing sk_napi_id/sk_incoming_cpu lockless as well. 9) Fix jumbo packet handling in RXRPC, from David Howells. 10) Bump SOMAXCONN and tcp_max_syn_backlog values, from Eric Dumazet. 11) Fix DMA synchronization in gve driver, from Yangchun Fu. 12) Several bpf offload fixes, from Jakub Kicinski. 13) Fix sk_page_frag() recursion during memory reclaim, from Tejun Heo. 14) Fix ping latency during high traffic rates in hisilicon driver, from Jiangfent Xiao. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (146 commits) net: fix installing orphaned programs net: cls_bpf: fix NULL deref on offload filter removal selftests: bpf: Skip write only files in debugfs selftests: net: reuseport_dualstack: fix uninitalized parameter r8169: fix wrong PHY ID issue with RTL8168dp net: dsa: bcm_sf2: Fix IMP setup for port different than 8 net: phylink: Fix phylink_dbg() macro gve: Fixes DMA synchronization. inet: stop leaking jiffies on the wire ixgbe: Remove duplicate clear_bit() call Documentation: networking: device drivers: Remove stray asterisks e1000: fix memory leaks i40e: Fix receive buffer starvation for AF_XDP igb: Fix constant media auto sense switching when no cable is connected net: ethernet: arc: add the missed clk_disable_unprepare igb: Enable media autosense for the i350. igb/igc: Don't warn on fatal read failures when the device is removed tcp: increase tcp_max_syn_backlog max value net: increase SOMAXCONN to 4096 netdevsim: Fix use-after-free during device dismantle ...
-rw-r--r--Documentation/networking/device_drivers/intel/e100.rst14
-rw-r--r--Documentation/networking/device_drivers/intel/e1000.rst12
-rw-r--r--Documentation/networking/device_drivers/intel/e1000e.rst14
-rw-r--r--Documentation/networking/device_drivers/intel/fm10k.rst10
-rw-r--r--Documentation/networking/device_drivers/intel/i40e.rst8
-rw-r--r--Documentation/networking/device_drivers/intel/iavf.rst8
-rw-r--r--Documentation/networking/device_drivers/intel/ice.rst6
-rw-r--r--Documentation/networking/device_drivers/intel/igb.rst12
-rw-r--r--Documentation/networking/device_drivers/intel/igbvf.rst6
-rw-r--r--Documentation/networking/device_drivers/intel/ixgbe.rst10
-rw-r--r--Documentation/networking/device_drivers/intel/ixgbevf.rst6
-rw-r--r--Documentation/networking/device_drivers/pensando/ionic.rst6
-rw-r--r--Documentation/networking/ip-sysctl.txt11
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c2
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c28
-rw-r--r--drivers/net/dsa/bcm_sf2.c36
-rw-r--r--drivers/net/dsa/sja1105/Kconfig4
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c8
-rw-r--r--drivers/net/ethernet/cortina/gemini.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c25
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c16
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c18
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/fjes/fjes_main.c15
-rw-r--r--drivers/net/hamradio/bpqether.c22
-rw-r--r--drivers/net/hyperv/netvsc_drv.c15
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/macsec.c18
-rw-r--r--drivers/net/macvlan.c19
-rw-r--r--drivers/net/netdevsim/dev.c5
-rw-r--r--drivers/net/phy/phylink.c16
-rw-r--r--drivers/net/phy/smsc.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/team/team.c16
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/lan78xx.c5
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/net/vxlan.c62
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c25
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c46
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c6
-rw-r--r--drivers/net/wireless/virt_wifi.c54
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--include/linux/dynamic_debug.h6
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/gfp.h23
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/if_vlan.h11
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3
-rw-r--r--include/linux/netdevice.h61
-rw-r--r--include/linux/skbuff.h36
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/busy_poll.h6
-rw-r--r--include/net/flow_dissector.h3
-rw-r--r--include/net/fq.h2
-rw-r--r--include/net/fq_impl.h4
-rw-r--r--include/net/hwbm.h10
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/sock.h15
-rw-r--r--include/net/vxlan.h1
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/devmap.c33
-rw-r--r--kernel/bpf/syscall.c31
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c33
-rw-r--r--net/atm/common.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c61
-rw-r--r--net/batman-adv/bat_v_ogm.c41
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/batman-adv/soft-interface.c32
-rw-r--r--net/batman-adv/types.h7
-rw-r--r--net/bluetooth/6lowpan.c8
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bridge/br_device.c8
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c2
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/dev.c623
-rw-r--r--net/core/dev_addr_lists.c12
-rw-r--r--net/core/ethtool.c4
-rw-r--r--net/core/flow_dissector.c38
-rw-r--r--net/core/lwt_bpf.c7
-rw-r--r--net/core/net_namespace.c18
-rw-r--r--net/core/rtnetlink.c17
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/dsa/master.c5
-rw-r--r--net/dsa/slave.c12
-rw-r--r--net/ieee802154/6lowpan/core.c8
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_output.c11
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/udp.c29
-rw-r--r--net/ipv6/addrconf_core.c1
-rw-r--r--net/ipv6/inet6_hashtables.c2
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/l2tp/l2tp_eth.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c29
-rw-r--r--net/netfilter/ipvs/ip_vs_pe.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c13
-rw-r--r--net/netfilter/nf_flow_table_core.c3
-rw-r--r--net/netfilter/nf_tables_offload.c2
-rw-r--r--net/netfilter/nft_payload.c38
-rw-r--r--net/netrom/af_netrom.c23
-rw-r--r--net/nfc/llcp_sock.c4
-rw-r--r--net/openvswitch/datapath.c20
-rw-r--r--net/openvswitch/vport-internal_dev.c11
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rose/af_rose.c23
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/recvmsg.c18
-rw-r--r--net/sched/cls_bpf.c8
-rw-r--r--net/sched/sch_generic.c19
-rw-r--r--net/sched/sch_hhf.c8
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sched/sch_sfq.c14
-rw-r--r--net/sched/sch_taprio.c2
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/smc/af_smc.c13
-rw-r--r--net/smc/smc_core.c2
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/wireless/chan.c5
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/util.c3
-rw-r--r--net/xdp/xdp_umem.c6
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py5
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_edt.sh2
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh21
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/l2tp.sh0
-rw-r--r--tools/testing/selftests/net/reuseport_dualstack.c3
213 files changed, 2133 insertions, 1289 deletions
diff --git a/Documentation/networking/device_drivers/intel/e100.rst b/Documentation/networking/device_drivers/intel/e100.rst
index 2b9f4887beda..caf023cc88de 100644
--- a/Documentation/networking/device_drivers/intel/e100.rst
+++ b/Documentation/networking/device_drivers/intel/e100.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3============================================================== 3=============================================================
4Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters 4Linux Base Driver for the Intel(R) PRO/100 Family of Adapters
5============================================================== 5=============================================================
6 6
7June 1, 2018 7June 1, 2018
8 8
@@ -21,7 +21,7 @@ Contents
21In This Release 21In This Release
22=============== 22===============
23 23
24This file describes the Linux* Base Driver for the Intel(R) PRO/100 Family of 24This file describes the Linux Base Driver for the Intel(R) PRO/100 Family of
25Adapters. This driver includes support for Itanium(R)2-based systems. 25Adapters. This driver includes support for Itanium(R)2-based systems.
26 26
27For questions related to hardware requirements, refer to the documentation 27For questions related to hardware requirements, refer to the documentation
@@ -138,9 +138,9 @@ version 1.6 or later is required for this functionality.
138The latest release of ethtool can be found from 138The latest release of ethtool can be found from
139https://www.kernel.org/pub/software/network/ethtool/ 139https://www.kernel.org/pub/software/network/ethtool/
140 140
141Enabling Wake on LAN* (WoL) 141Enabling Wake on LAN (WoL)
142--------------------------- 142--------------------------
143WoL is provided through the ethtool* utility. For instructions on 143WoL is provided through the ethtool utility. For instructions on
144enabling WoL with ethtool, refer to the ethtool man page. WoL will be 144enabling WoL with ethtool, refer to the ethtool man page. WoL will be
145enabled on the system during the next shut down or reboot. For this 145enabled on the system during the next shut down or reboot. For this
146driver version, in order to enable WoL, the e100 driver must be loaded 146driver version, in order to enable WoL, the e100 driver must be loaded
diff --git a/Documentation/networking/device_drivers/intel/e1000.rst b/Documentation/networking/device_drivers/intel/e1000.rst
index 956560b6e745..4aaae0f7d6ba 100644
--- a/Documentation/networking/device_drivers/intel/e1000.rst
+++ b/Documentation/networking/device_drivers/intel/e1000.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3=========================================================== 3==========================================================
4Linux* Base Driver for Intel(R) Ethernet Network Connection 4Linux Base Driver for Intel(R) Ethernet Network Connection
5=========================================================== 5==========================================================
6 6
7Intel Gigabit Linux driver. 7Intel Gigabit Linux driver.
8Copyright(c) 1999 - 2013 Intel Corporation. 8Copyright(c) 1999 - 2013 Intel Corporation.
@@ -438,10 +438,10 @@ ethtool
438 The latest release of ethtool can be found from 438 The latest release of ethtool can be found from
439 https://www.kernel.org/pub/software/network/ethtool/ 439 https://www.kernel.org/pub/software/network/ethtool/
440 440
441Enabling Wake on LAN* (WoL) 441Enabling Wake on LAN (WoL)
442--------------------------- 442--------------------------
443 443
444 WoL is configured through the ethtool* utility. 444 WoL is configured through the ethtool utility.
445 445
446 WoL will be enabled on the system during the next shut down or reboot. 446 WoL will be enabled on the system during the next shut down or reboot.
447 For this driver version, in order to enable WoL, the e1000 driver must be 447 For this driver version, in order to enable WoL, the e1000 driver must be
diff --git a/Documentation/networking/device_drivers/intel/e1000e.rst b/Documentation/networking/device_drivers/intel/e1000e.rst
index 01999f05509c..f49cd370e7bf 100644
--- a/Documentation/networking/device_drivers/intel/e1000e.rst
+++ b/Documentation/networking/device_drivers/intel/e1000e.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3====================================================== 3=====================================================
4Linux* Driver for Intel(R) Ethernet Network Connection 4Linux Driver for Intel(R) Ethernet Network Connection
5====================================================== 5=====================================================
6 6
7Intel Gigabit Linux driver. 7Intel Gigabit Linux driver.
8Copyright(c) 2008-2018 Intel Corporation. 8Copyright(c) 2008-2018 Intel Corporation.
@@ -338,7 +338,7 @@ and higher cannot be forced. Use the autonegotiation advertising setting to
338manually set devices for 1 Gbps and higher. 338manually set devices for 1 Gbps and higher.
339 339
340Speed, duplex, and autonegotiation advertising are configured through the 340Speed, duplex, and autonegotiation advertising are configured through the
341ethtool* utility. 341ethtool utility.
342 342
343Caution: Only experienced network administrators should force speed and duplex 343Caution: Only experienced network administrators should force speed and duplex
344or change autonegotiation advertising manually. The settings at the switch must 344or change autonegotiation advertising manually. The settings at the switch must
@@ -351,9 +351,9 @@ will not attempt to auto-negotiate with its link partner since those adapters
351operate only in full duplex and only at their native speed. 351operate only in full duplex and only at their native speed.
352 352
353 353
354Enabling Wake on LAN* (WoL) 354Enabling Wake on LAN (WoL)
355--------------------------- 355--------------------------
356WoL is configured through the ethtool* utility. 356WoL is configured through the ethtool utility.
357 357
358WoL will be enabled on the system during the next shut down or reboot. For 358WoL will be enabled on the system during the next shut down or reboot. For
359this driver version, in order to enable WoL, the e1000e driver must be loaded 359this driver version, in order to enable WoL, the e1000e driver must be loaded
diff --git a/Documentation/networking/device_drivers/intel/fm10k.rst b/Documentation/networking/device_drivers/intel/fm10k.rst
index ac3269e34f55..4d279e64e221 100644
--- a/Documentation/networking/device_drivers/intel/fm10k.rst
+++ b/Documentation/networking/device_drivers/intel/fm10k.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3============================================================== 3=============================================================
4Linux* Base Driver for Intel(R) Ethernet Multi-host Controller 4Linux Base Driver for Intel(R) Ethernet Multi-host Controller
5============================================================== 5=============================================================
6 6
7August 20, 2018 7August 20, 2018
8Copyright(c) 2015-2018 Intel Corporation. 8Copyright(c) 2015-2018 Intel Corporation.
@@ -120,8 +120,8 @@ rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r
120Known Issues/Troubleshooting 120Known Issues/Troubleshooting
121============================ 121============================
122 122
123Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS under Linux KVM 123Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS under Linux KVM
124--------------------------------------------------------------------------------------- 124-------------------------------------------------------------------------------------
125KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This 125KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
126includes traditional PCIe devices, as well as SR-IOV-capable devices based on 126includes traditional PCIe devices, as well as SR-IOV-capable devices based on
127the Intel Ethernet Controller XL710. 127the Intel Ethernet Controller XL710.
diff --git a/Documentation/networking/device_drivers/intel/i40e.rst b/Documentation/networking/device_drivers/intel/i40e.rst
index 848fd388fa6e..8a9b18573688 100644
--- a/Documentation/networking/device_drivers/intel/i40e.rst
+++ b/Documentation/networking/device_drivers/intel/i40e.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3================================================================== 3=================================================================
4Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series 4Linux Base Driver for the Intel(R) Ethernet Controller 700 Series
5================================================================== 5=================================================================
6 6
7Intel 40 Gigabit Linux driver. 7Intel 40 Gigabit Linux driver.
8Copyright(c) 1999-2018 Intel Corporation. 8Copyright(c) 1999-2018 Intel Corporation.
@@ -384,7 +384,7 @@ NOTE: You cannot set the speed for devices based on the Intel(R) Ethernet
384Network Adapter XXV710 based devices. 384Network Adapter XXV710 based devices.
385 385
386Speed, duplex, and autonegotiation advertising are configured through the 386Speed, duplex, and autonegotiation advertising are configured through the
387ethtool* utility. 387ethtool utility.
388 388
389Caution: Only experienced network administrators should force speed and duplex 389Caution: Only experienced network administrators should force speed and duplex
390or change autonegotiation advertising manually. The settings at the switch must 390or change autonegotiation advertising manually. The settings at the switch must
diff --git a/Documentation/networking/device_drivers/intel/iavf.rst b/Documentation/networking/device_drivers/intel/iavf.rst
index cfc08842e32c..84ac7e75f363 100644
--- a/Documentation/networking/device_drivers/intel/iavf.rst
+++ b/Documentation/networking/device_drivers/intel/iavf.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3================================================================== 3=================================================================
4Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function 4Linux Base Driver for Intel(R) Ethernet Adaptive Virtual Function
5================================================================== 5=================================================================
6 6
7Intel Ethernet Adaptive Virtual Function Linux driver. 7Intel Ethernet Adaptive Virtual Function Linux driver.
8Copyright(c) 2013-2018 Intel Corporation. 8Copyright(c) 2013-2018 Intel Corporation.
@@ -19,7 +19,7 @@ Contents
19Overview 19Overview
20======== 20========
21 21
22This file describes the iavf Linux* Base Driver. This driver was formerly 22This file describes the iavf Linux Base Driver. This driver was formerly
23called i40evf. 23called i40evf.
24 24
25The iavf driver supports the below mentioned virtual function devices and 25The iavf driver supports the below mentioned virtual function devices and
diff --git a/Documentation/networking/device_drivers/intel/ice.rst b/Documentation/networking/device_drivers/intel/ice.rst
index c220aa2711c6..ee43ea57d443 100644
--- a/Documentation/networking/device_drivers/intel/ice.rst
+++ b/Documentation/networking/device_drivers/intel/ice.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3=================================================================== 3==================================================================
4Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series 4Linux Base Driver for the Intel(R) Ethernet Connection E800 Series
5=================================================================== 5==================================================================
6 6
7Intel ice Linux driver. 7Intel ice Linux driver.
8Copyright(c) 2018 Intel Corporation. 8Copyright(c) 2018 Intel Corporation.
diff --git a/Documentation/networking/device_drivers/intel/igb.rst b/Documentation/networking/device_drivers/intel/igb.rst
index fc8cfaa5dcfa..87e560fe5eaa 100644
--- a/Documentation/networking/device_drivers/intel/igb.rst
+++ b/Documentation/networking/device_drivers/intel/igb.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3=========================================================== 3==========================================================
4Linux* Base Driver for Intel(R) Ethernet Network Connection 4Linux Base Driver for Intel(R) Ethernet Network Connection
5=========================================================== 5==========================================================
6 6
7Intel Gigabit Linux driver. 7Intel Gigabit Linux driver.
8Copyright(c) 1999-2018 Intel Corporation. 8Copyright(c) 1999-2018 Intel Corporation.
@@ -129,9 +129,9 @@ version is required for this functionality. Download it at:
129https://www.kernel.org/pub/software/network/ethtool/ 129https://www.kernel.org/pub/software/network/ethtool/
130 130
131 131
132Enabling Wake on LAN* (WoL) 132Enabling Wake on LAN (WoL)
133--------------------------- 133--------------------------
134WoL is configured through the ethtool* utility. 134WoL is configured through the ethtool utility.
135 135
136WoL will be enabled on the system during the next shut down or reboot. For 136WoL will be enabled on the system during the next shut down or reboot. For
137this driver version, in order to enable WoL, the igb driver must be loaded 137this driver version, in order to enable WoL, the igb driver must be loaded
diff --git a/Documentation/networking/device_drivers/intel/igbvf.rst b/Documentation/networking/device_drivers/intel/igbvf.rst
index 9cddabe8108e..557fc020ef31 100644
--- a/Documentation/networking/device_drivers/intel/igbvf.rst
+++ b/Documentation/networking/device_drivers/intel/igbvf.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3============================================================ 3===========================================================
4Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet 4Linux Base Virtual Function Driver for Intel(R) 1G Ethernet
5============================================================ 5===========================================================
6 6
7Intel Gigabit Virtual Function Linux driver. 7Intel Gigabit Virtual Function Linux driver.
8Copyright(c) 1999-2018 Intel Corporation. 8Copyright(c) 1999-2018 Intel Corporation.
diff --git a/Documentation/networking/device_drivers/intel/ixgbe.rst b/Documentation/networking/device_drivers/intel/ixgbe.rst
index c7d25483fedb..f1d5233e5e51 100644
--- a/Documentation/networking/device_drivers/intel/ixgbe.rst
+++ b/Documentation/networking/device_drivers/intel/ixgbe.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3============================================================================= 3===========================================================================
4Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters 4Linux Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
5============================================================================= 5===========================================================================
6 6
7Intel 10 Gigabit Linux driver. 7Intel 10 Gigabit Linux driver.
8Copyright(c) 1999-2018 Intel Corporation. 8Copyright(c) 1999-2018 Intel Corporation.
@@ -519,8 +519,8 @@ The offload is also supported for ixgbe's VFs, but the VF must be set as
519Known Issues/Troubleshooting 519Known Issues/Troubleshooting
520============================ 520============================
521 521
522Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS 522Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS
523----------------------------------------------------------------------- 523---------------------------------------------------------------------
524Linux KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. 524Linux KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM.
525This includes traditional PCIe devices, as well as SR-IOV-capable devices based 525This includes traditional PCIe devices, as well as SR-IOV-capable devices based
526on the Intel Ethernet Controller XL710. 526on the Intel Ethernet Controller XL710.
diff --git a/Documentation/networking/device_drivers/intel/ixgbevf.rst b/Documentation/networking/device_drivers/intel/ixgbevf.rst
index 5d4977360157..76bbde736f21 100644
--- a/Documentation/networking/device_drivers/intel/ixgbevf.rst
+++ b/Documentation/networking/device_drivers/intel/ixgbevf.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3============================================================= 3============================================================
4Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet 4Linux Base Virtual Function Driver for Intel(R) 10G Ethernet
5============================================================= 5============================================================
6 6
7Intel 10 Gigabit Virtual Function Linux driver. 7Intel 10 Gigabit Virtual Function Linux driver.
8Copyright(c) 1999-2018 Intel Corporation. 8Copyright(c) 1999-2018 Intel Corporation.
diff --git a/Documentation/networking/device_drivers/pensando/ionic.rst b/Documentation/networking/device_drivers/pensando/ionic.rst
index 13935896bee6..c17d680cf334 100644
--- a/Documentation/networking/device_drivers/pensando/ionic.rst
+++ b/Documentation/networking/device_drivers/pensando/ionic.rst
@@ -1,8 +1,8 @@
1.. SPDX-License-Identifier: GPL-2.0+ 1.. SPDX-License-Identifier: GPL-2.0+
2 2
3========================================================== 3========================================================
4Linux* Driver for the Pensando(R) Ethernet adapter family 4Linux Driver for the Pensando(R) Ethernet adapter family
5========================================================== 5========================================================
6 6
7Pensando Linux Ethernet driver. 7Pensando Linux Ethernet driver.
8Copyright(c) 2019 Pensando Systems, Inc 8Copyright(c) 2019 Pensando Systems, Inc
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 49e95f438ed7..8d4ad1d1ae26 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -207,8 +207,8 @@ TCP variables:
207 207
208somaxconn - INTEGER 208somaxconn - INTEGER
209 Limit of socket listen() backlog, known in userspace as SOMAXCONN. 209 Limit of socket listen() backlog, known in userspace as SOMAXCONN.
210 Defaults to 128. See also tcp_max_syn_backlog for additional tuning 210 Defaults to 4096. (Was 128 before linux-5.4)
211 for TCP sockets. 211 See also tcp_max_syn_backlog for additional tuning for TCP sockets.
212 212
213tcp_abort_on_overflow - BOOLEAN 213tcp_abort_on_overflow - BOOLEAN
214 If listening service is too slow to accept new connections, 214 If listening service is too slow to accept new connections,
@@ -408,11 +408,14 @@ tcp_max_orphans - INTEGER
408 up to ~64K of unswappable memory. 408 up to ~64K of unswappable memory.
409 409
410tcp_max_syn_backlog - INTEGER 410tcp_max_syn_backlog - INTEGER
411 Maximal number of remembered connection requests, which have not 411 Maximal number of remembered connection requests (SYN_RECV),
412 received an acknowledgment from connecting client. 412 which have not received an acknowledgment from connecting client.
413 This is a per-listener limit.
413 The minimal value is 128 for low memory machines, and it will 414 The minimal value is 128 for low memory machines, and it will
414 increase in proportion to the memory of machine. 415 increase in proportion to the memory of machine.
415 If server suffers from overload, try increasing this number. 416 If server suffers from overload, try increasing this number.
417 Remember to also check /proc/sys/net/core/somaxconn
418 A SYN_RECV request socket consumes about 304 bytes of memory.
416 419
417tcp_max_tw_buckets - INTEGER 420tcp_max_tw_buckets - INTEGER
418 Maximal number of timewait sockets held by system simultaneously. 421 Maximal number of timewait sockets held by system simultaneously.
diff --git a/MAINTAINERS b/MAINTAINERS
index f97f35163033..cba1095547fd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11408,7 +11408,6 @@ F: include/trace/events/tcp.h
11408NETWORKING [TLS] 11408NETWORKING [TLS]
11409M: Boris Pismenny <borisp@mellanox.com> 11409M: Boris Pismenny <borisp@mellanox.com>
11410M: Aviad Yehezkel <aviadye@mellanox.com> 11410M: Aviad Yehezkel <aviadye@mellanox.com>
11411M: Dave Watson <davejwatson@fb.com>
11412M: John Fastabend <john.fastabend@gmail.com> 11411M: John Fastabend <john.fastabend@gmail.com>
11413M: Daniel Borkmann <daniel@iogearbox.net> 11412M: Daniel Borkmann <daniel@iogearbox.net>
11414M: Jakub Kicinski <jakub.kicinski@netronome.com> 11413M: Jakub Kicinski <jakub.kicinski@netronome.com>
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 774d991d7cca..aca75237bbcf 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1297,7 +1297,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
1297 tp->write_seq = snd_isn; 1297 tp->write_seq = snd_isn;
1298 tp->snd_nxt = snd_isn; 1298 tp->snd_nxt = snd_isn;
1299 tp->snd_una = snd_isn; 1299 tp->snd_una = snd_isn;
1300 inet_sk(sk)->inet_id = tp->write_seq ^ jiffies; 1300 inet_sk(sk)->inet_id = prandom_u32();
1301 assign_rxopt(sk, opt); 1301 assign_rxopt(sk, opt);
1302 1302
1303 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) 1303 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 0891ab829b1b..98bc5a4cd5e7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1702 return peekmsg(sk, msg, len, nonblock, flags); 1702 return peekmsg(sk, msg, len, nonblock, flags);
1703 1703
1704 if (sk_can_busy_loop(sk) && 1704 if (sk_can_busy_loop(sk) &&
1705 skb_queue_empty(&sk->sk_receive_queue) && 1705 skb_queue_empty_lockless(&sk->sk_receive_queue) &&
1706 sk->sk_state == TCP_ESTABLISHED) 1706 sk->sk_state == TCP_ESTABLISHED)
1707 sk_busy_loop(sk, nonblock); 1707 sk_busy_loop(sk, nonblock);
1708 1708
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index c92b405b7646..ba8619524231 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait)
744 744
745 poll_wait(file, &(cdev->recvwait), wait); 745 poll_wait(file, &(cdev->recvwait), wait);
746 mask = EPOLLOUT | EPOLLWRNORM; 746 mask = EPOLLOUT | EPOLLWRNORM;
747 if (!skb_queue_empty(&cdev->recvqueue)) 747 if (!skb_queue_empty_lockless(&cdev->recvqueue))
748 mask |= EPOLLIN | EPOLLRDNORM; 748 mask |= EPOLLIN | EPOLLRDNORM;
749 return mask; 749 return mask;
750} 750}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 8c79bad2a9a5..4f2e6910c623 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -952,7 +952,7 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
952 struct bond_vlan_tag *tags; 952 struct bond_vlan_tag *tags;
953 953
954 if (is_vlan_dev(upper) && 954 if (is_vlan_dev(upper) &&
955 bond->nest_level == vlan_get_encap_level(upper) - 1) { 955 bond->dev->lower_level == upper->lower_level - 1) {
956 if (upper->addr_assign_type == NET_ADDR_STOLEN) { 956 if (upper->addr_assign_type == NET_ADDR_STOLEN) {
957 alb_send_lp_vid(slave, mac_addr, 957 alb_send_lp_vid(slave, mac_addr,
958 vlan_dev_vlan_proto(upper), 958 vlan_dev_vlan_proto(upper),
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 21d8fcc83c9c..480f9459b402 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1733,8 +1733,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1733 goto err_upper_unlink; 1733 goto err_upper_unlink;
1734 } 1734 }
1735 1735
1736 bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1737
1738 /* If the mode uses primary, then the following is handled by 1736 /* If the mode uses primary, then the following is handled by
1739 * bond_change_active_slave(). 1737 * bond_change_active_slave().
1740 */ 1738 */
@@ -1816,7 +1814,8 @@ err_detach:
1816 slave_disable_netpoll(new_slave); 1814 slave_disable_netpoll(new_slave);
1817 1815
1818err_close: 1816err_close:
1819 slave_dev->priv_flags &= ~IFF_BONDING; 1817 if (!netif_is_bond_master(slave_dev))
1818 slave_dev->priv_flags &= ~IFF_BONDING;
1820 dev_close(slave_dev); 1819 dev_close(slave_dev);
1821 1820
1822err_restore_mac: 1821err_restore_mac:
@@ -1956,9 +1955,6 @@ static int __bond_release_one(struct net_device *bond_dev,
1956 if (!bond_has_slaves(bond)) { 1955 if (!bond_has_slaves(bond)) {
1957 bond_set_carrier(bond); 1956 bond_set_carrier(bond);
1958 eth_hw_addr_random(bond_dev); 1957 eth_hw_addr_random(bond_dev);
1959 bond->nest_level = SINGLE_DEPTH_NESTING;
1960 } else {
1961 bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1962 } 1958 }
1963 1959
1964 unblock_netpoll_tx(); 1960 unblock_netpoll_tx();
@@ -2017,7 +2013,8 @@ static int __bond_release_one(struct net_device *bond_dev,
2017 else 2013 else
2018 dev_set_mtu(slave_dev, slave->original_mtu); 2014 dev_set_mtu(slave_dev, slave->original_mtu);
2019 2015
2020 slave_dev->priv_flags &= ~IFF_BONDING; 2016 if (!netif_is_bond_master(slave_dev))
2017 slave_dev->priv_flags &= ~IFF_BONDING;
2021 2018
2022 bond_free_slave(slave); 2019 bond_free_slave(slave);
2023 2020
@@ -3442,13 +3439,6 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3442 } 3439 }
3443} 3440}
3444 3441
3445static int bond_get_nest_level(struct net_device *bond_dev)
3446{
3447 struct bonding *bond = netdev_priv(bond_dev);
3448
3449 return bond->nest_level;
3450}
3451
3452static void bond_get_stats(struct net_device *bond_dev, 3442static void bond_get_stats(struct net_device *bond_dev,
3453 struct rtnl_link_stats64 *stats) 3443 struct rtnl_link_stats64 *stats)
3454{ 3444{
@@ -3457,7 +3447,7 @@ static void bond_get_stats(struct net_device *bond_dev,
3457 struct list_head *iter; 3447 struct list_head *iter;
3458 struct slave *slave; 3448 struct slave *slave;
3459 3449
3460 spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev)); 3450 spin_lock(&bond->stats_lock);
3461 memcpy(stats, &bond->bond_stats, sizeof(*stats)); 3451 memcpy(stats, &bond->bond_stats, sizeof(*stats));
3462 3452
3463 rcu_read_lock(); 3453 rcu_read_lock();
@@ -4268,7 +4258,6 @@ static const struct net_device_ops bond_netdev_ops = {
4268 .ndo_neigh_setup = bond_neigh_setup, 4258 .ndo_neigh_setup = bond_neigh_setup,
4269 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4259 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4270 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4260 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4271 .ndo_get_lock_subclass = bond_get_nest_level,
4272#ifdef CONFIG_NET_POLL_CONTROLLER 4261#ifdef CONFIG_NET_POLL_CONTROLLER
4273 .ndo_netpoll_setup = bond_netpoll_setup, 4262 .ndo_netpoll_setup = bond_netpoll_setup,
4274 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4263 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
@@ -4296,7 +4285,6 @@ void bond_setup(struct net_device *bond_dev)
4296 struct bonding *bond = netdev_priv(bond_dev); 4285 struct bonding *bond = netdev_priv(bond_dev);
4297 4286
4298 spin_lock_init(&bond->mode_lock); 4287 spin_lock_init(&bond->mode_lock);
4299 spin_lock_init(&bond->stats_lock);
4300 bond->params = bonding_defaults; 4288 bond->params = bonding_defaults;
4301 4289
4302 /* Initialize pointers */ 4290 /* Initialize pointers */
@@ -4365,6 +4353,7 @@ static void bond_uninit(struct net_device *bond_dev)
4365 4353
4366 list_del(&bond->bond_list); 4354 list_del(&bond->bond_list);
4367 4355
4356 lockdep_unregister_key(&bond->stats_lock_key);
4368 bond_debug_unregister(bond); 4357 bond_debug_unregister(bond);
4369} 4358}
4370 4359
@@ -4768,8 +4757,9 @@ static int bond_init(struct net_device *bond_dev)
4768 if (!bond->wq) 4757 if (!bond->wq)
4769 return -ENOMEM; 4758 return -ENOMEM;
4770 4759
4771 bond->nest_level = SINGLE_DEPTH_NESTING; 4760 spin_lock_init(&bond->stats_lock);
4772 netdev_lockdep_set_classes(bond_dev); 4761 lockdep_register_key(&bond->stats_lock_key);
4762 lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
4773 4763
4774 list_add_tail(&bond->bond_list, &bn->dev_list); 4764 list_add_tail(&bond->bond_list, &bn->dev_list);
4775 4765
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 26509fa37a50..d44651ad520c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -37,22 +37,11 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
37 unsigned int i; 37 unsigned int i;
38 u32 reg, offset; 38 u32 reg, offset;
39 39
40 if (priv->type == BCM7445_DEVICE_ID)
41 offset = CORE_STS_OVERRIDE_IMP;
42 else
43 offset = CORE_STS_OVERRIDE_IMP2;
44
45 /* Enable the port memories */ 40 /* Enable the port memories */
46 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 41 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
47 reg &= ~P_TXQ_PSM_VDD(port); 42 reg &= ~P_TXQ_PSM_VDD(port);
48 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 43 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
49 44
50 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
51 reg = core_readl(priv, CORE_IMP_CTL);
52 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
53 reg &= ~(RX_DIS | TX_DIS);
54 core_writel(priv, reg, CORE_IMP_CTL);
55
56 /* Enable forwarding */ 45 /* Enable forwarding */
57 core_writel(priv, SW_FWDG_EN, CORE_SWMODE); 46 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
58 47
@@ -71,10 +60,27 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
71 60
72 b53_brcm_hdr_setup(ds, port); 61 b53_brcm_hdr_setup(ds, port);
73 62
74 /* Force link status for IMP port */ 63 if (port == 8) {
75 reg = core_readl(priv, offset); 64 if (priv->type == BCM7445_DEVICE_ID)
76 reg |= (MII_SW_OR | LINK_STS); 65 offset = CORE_STS_OVERRIDE_IMP;
77 core_writel(priv, reg, offset); 66 else
67 offset = CORE_STS_OVERRIDE_IMP2;
68
69 /* Force link status for IMP port */
70 reg = core_readl(priv, offset);
71 reg |= (MII_SW_OR | LINK_STS);
72 core_writel(priv, reg, offset);
73
74 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
75 reg = core_readl(priv, CORE_IMP_CTL);
76 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
77 reg &= ~(RX_DIS | TX_DIS);
78 core_writel(priv, reg, CORE_IMP_CTL);
79 } else {
80 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
81 reg &= ~(RX_DIS | TX_DIS);
82 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
83 }
78} 84}
79 85
80static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) 86static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index f40b248f0b23..ffac0ea4e8d5 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -26,8 +26,8 @@ config NET_DSA_SJA1105_PTP
26 26
27config NET_DSA_SJA1105_TAS 27config NET_DSA_SJA1105_TAS
28 bool "Support for the Time-Aware Scheduler on NXP SJA1105" 28 bool "Support for the Time-Aware Scheduler on NXP SJA1105"
29 depends on NET_DSA_SJA1105 29 depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
30 depends on NET_SCH_TAPRIO 30 depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
31 help 31 help
32 This enables support for the TTEthernet-based egress scheduling 32 This enables support for the TTEthernet-based egress scheduling
33 engine in the SJA1105 DSA driver, which is controlled using a 33 engine in the SJA1105 DSA driver, which is controlled using a
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 42d2e1b02c44..664d664e0925 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -256,6 +256,9 @@ static int emac_rockchip_remove(struct platform_device *pdev)
256 if (priv->regulator) 256 if (priv->regulator)
257 regulator_disable(priv->regulator); 257 regulator_disable(priv->regulator);
258 258
259 if (priv->soc_data->need_div_macclk)
260 clk_disable_unprepare(priv->macclk);
261
259 free_netdev(ndev); 262 free_netdev(ndev);
260 return err; 263 return err;
261} 264}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b4a8cf620a0c..04ec909e06df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -10382,7 +10382,8 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
10382{ 10382{
10383 bnxt_unmap_bars(bp, bp->pdev); 10383 bnxt_unmap_bars(bp, bp->pdev);
10384 pci_release_regions(bp->pdev); 10384 pci_release_regions(bp->pdev);
10385 pci_disable_device(bp->pdev); 10385 if (pci_is_enabled(bp->pdev))
10386 pci_disable_device(bp->pdev);
10386} 10387}
10387 10388
10388static void bnxt_init_dflt_coal(struct bnxt *bp) 10389static void bnxt_init_dflt_coal(struct bnxt *bp)
@@ -10669,14 +10670,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
10669 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 10670 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10670 } 10671 }
10671 /* fall through */ 10672 /* fall through */
10672 case BNXT_FW_RESET_STATE_RESET_FW: { 10673 case BNXT_FW_RESET_STATE_RESET_FW:
10673 u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
10674
10675 bnxt_reset_all(bp); 10674 bnxt_reset_all(bp);
10676 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 10675 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10677 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 10676 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
10678 return; 10677 return;
10679 }
10680 case BNXT_FW_RESET_STATE_ENABLE_DEV: 10678 case BNXT_FW_RESET_STATE_ENABLE_DEV:
10681 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 10679 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
10682 bp->fw_health) { 10680 bp->fw_health) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index e664392dccc0..7151244f8c7d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -29,25 +29,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
29 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 29 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
30 health_status = val & 0xffff; 30 health_status = val & 0xffff;
31 31
32 if (health_status == BNXT_FW_STATUS_HEALTHY) { 32 if (health_status < BNXT_FW_STATUS_HEALTHY) {
33 rc = devlink_fmsg_string_pair_put(fmsg, "FW status", 33 rc = devlink_fmsg_string_pair_put(fmsg, "Description",
34 "Healthy;"); 34 "Not yet completed initialization");
35 if (rc)
36 return rc;
37 } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
38 rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
39 "Not yet completed initialization;");
40 if (rc) 35 if (rc)
41 return rc; 36 return rc;
42 } else if (health_status > BNXT_FW_STATUS_HEALTHY) { 37 } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
43 rc = devlink_fmsg_string_pair_put(fmsg, "FW status", 38 rc = devlink_fmsg_string_pair_put(fmsg, "Description",
44 "Encountered fatal error and cannot recover;"); 39 "Encountered fatal error and cannot recover");
45 if (rc) 40 if (rc)
46 return rc; 41 return rc;
47 } 42 }
48 43
49 if (val >> 16) { 44 if (val >> 16) {
50 rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16); 45 rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
51 if (rc) 46 if (rc)
52 return rc; 47 return rc;
53 } 48 }
@@ -215,25 +210,68 @@ enum bnxt_dl_param_id {
215 210
216static const struct bnxt_dl_nvm_param nvm_params[] = { 211static const struct bnxt_dl_nvm_param nvm_params[] = {
217 {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, 212 {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
218 BNXT_NVM_SHARED_CFG, 1}, 213 BNXT_NVM_SHARED_CFG, 1, 1},
219 {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, 214 {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
220 BNXT_NVM_SHARED_CFG, 1}, 215 BNXT_NVM_SHARED_CFG, 1, 1},
221 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 216 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
222 NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10}, 217 NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
223 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 218 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
224 NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7}, 219 NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
225 {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, 220 {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
226 BNXT_NVM_SHARED_CFG, 1}, 221 BNXT_NVM_SHARED_CFG, 1, 1},
227}; 222};
228 223
224union bnxt_nvm_data {
225 u8 val8;
226 __le32 val32;
227};
228
229static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst,
230 union devlink_param_value *src,
231 int nvm_num_bits, int dl_num_bytes)
232{
233 u32 val32 = 0;
234
235 if (nvm_num_bits == 1) {
236 dst->val8 = src->vbool;
237 return;
238 }
239 if (dl_num_bytes == 4)
240 val32 = src->vu32;
241 else if (dl_num_bytes == 2)
242 val32 = (u32)src->vu16;
243 else if (dl_num_bytes == 1)
244 val32 = (u32)src->vu8;
245 dst->val32 = cpu_to_le32(val32);
246}
247
248static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
249 union bnxt_nvm_data *src,
250 int nvm_num_bits, int dl_num_bytes)
251{
252 u32 val32;
253
254 if (nvm_num_bits == 1) {
255 dst->vbool = src->val8;
256 return;
257 }
258 val32 = le32_to_cpu(src->val32);
259 if (dl_num_bytes == 4)
260 dst->vu32 = val32;
261 else if (dl_num_bytes == 2)
262 dst->vu16 = (u16)val32;
263 else if (dl_num_bytes == 1)
264 dst->vu8 = (u8)val32;
265}
266
229static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, 267static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
230 int msg_len, union devlink_param_value *val) 268 int msg_len, union devlink_param_value *val)
231{ 269{
232 struct hwrm_nvm_get_variable_input *req = msg; 270 struct hwrm_nvm_get_variable_input *req = msg;
233 void *data_addr = NULL, *buf = NULL;
234 struct bnxt_dl_nvm_param nvm_param; 271 struct bnxt_dl_nvm_param nvm_param;
235 int bytesize, idx = 0, rc, i; 272 union bnxt_nvm_data *data;
236 dma_addr_t data_dma_addr; 273 dma_addr_t data_dma_addr;
274 int idx = 0, rc, i;
237 275
238 /* Get/Set NVM CFG parameter is supported only on PFs */ 276 /* Get/Set NVM CFG parameter is supported only on PFs */
239 if (BNXT_VF(bp)) 277 if (BNXT_VF(bp))
@@ -254,47 +292,31 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
254 else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) 292 else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
255 idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; 293 idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
256 294
257 bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; 295 data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
258 switch (bytesize) { 296 &data_dma_addr, GFP_KERNEL);
259 case 1: 297 if (!data)
260 if (nvm_param.num_bits == 1)
261 buf = &val->vbool;
262 else
263 buf = &val->vu8;
264 break;
265 case 2:
266 buf = &val->vu16;
267 break;
268 case 4:
269 buf = &val->vu32;
270 break;
271 default:
272 return -EFAULT;
273 }
274
275 data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
276 &data_dma_addr, GFP_KERNEL);
277 if (!data_addr)
278 return -ENOMEM; 298 return -ENOMEM;
279 299
280 req->dest_data_addr = cpu_to_le64(data_dma_addr); 300 req->dest_data_addr = cpu_to_le64(data_dma_addr);
281 req->data_len = cpu_to_le16(nvm_param.num_bits); 301 req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
282 req->option_num = cpu_to_le16(nvm_param.offset); 302 req->option_num = cpu_to_le16(nvm_param.offset);
283 req->index_0 = cpu_to_le16(idx); 303 req->index_0 = cpu_to_le16(idx);
284 if (idx) 304 if (idx)
285 req->dimensions = cpu_to_le16(1); 305 req->dimensions = cpu_to_le16(1);
286 306
287 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { 307 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
288 memcpy(data_addr, buf, bytesize); 308 bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
309 nvm_param.dl_num_bytes);
289 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); 310 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
290 } else { 311 } else {
291 rc = hwrm_send_message_silent(bp, msg, msg_len, 312 rc = hwrm_send_message_silent(bp, msg, msg_len,
292 HWRM_CMD_TIMEOUT); 313 HWRM_CMD_TIMEOUT);
314 if (!rc)
315 bnxt_copy_from_nvm_data(val, data,
316 nvm_param.nvm_num_bits,
317 nvm_param.dl_num_bytes);
293 } 318 }
294 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) 319 dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
295 memcpy(buf, data_addr, bytesize);
296
297 dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
298 if (rc == -EACCES) 320 if (rc == -EACCES)
299 netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); 321 netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
300 return rc; 322 return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index b97e0baeb42d..2f4fd0a7d04b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -52,7 +52,8 @@ struct bnxt_dl_nvm_param {
52 u16 id; 52 u16 id;
53 u16 offset; 53 u16 offset;
54 u16 dir_type; 54 u16 dir_type;
55 u16 num_bits; 55 u16 nvm_num_bits;
56 u8 dl_num_bytes;
56}; 57};
57 58
58void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); 59void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index a4dead4ab0ed..86b528d8364c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -695,10 +695,10 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
695 lld->write_cmpl_support = adap->params.write_cmpl_support; 695 lld->write_cmpl_support = adap->params.write_cmpl_support;
696} 696}
697 697
698static void uld_attach(struct adapter *adap, unsigned int uld) 698static int uld_attach(struct adapter *adap, unsigned int uld)
699{ 699{
700 void *handle;
701 struct cxgb4_lld_info lli; 700 struct cxgb4_lld_info lli;
701 void *handle;
702 702
703 uld_init(adap, &lli); 703 uld_init(adap, &lli);
704 uld_queue_init(adap, uld, &lli); 704 uld_queue_init(adap, uld, &lli);
@@ -708,7 +708,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
708 dev_warn(adap->pdev_dev, 708 dev_warn(adap->pdev_dev,
709 "could not attach to the %s driver, error %ld\n", 709 "could not attach to the %s driver, error %ld\n",
710 adap->uld[uld].name, PTR_ERR(handle)); 710 adap->uld[uld].name, PTR_ERR(handle));
711 return; 711 return PTR_ERR(handle);
712 } 712 }
713 713
714 adap->uld[uld].handle = handle; 714 adap->uld[uld].handle = handle;
@@ -716,22 +716,22 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
716 716
717 if (adap->flags & CXGB4_FULL_INIT_DONE) 717 if (adap->flags & CXGB4_FULL_INIT_DONE)
718 adap->uld[uld].state_change(handle, CXGB4_STATE_UP); 718 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
719
720 return 0;
719} 721}
720 722
721/** 723/* cxgb4_register_uld - register an upper-layer driver
722 * cxgb4_register_uld - register an upper-layer driver 724 * @type: the ULD type
723 * @type: the ULD type 725 * @p: the ULD methods
724 * @p: the ULD methods
725 * 726 *
726 * Registers an upper-layer driver with this driver and notifies the ULD 727 * Registers an upper-layer driver with this driver and notifies the ULD
727 * about any presently available devices that support its type. Returns 728 * about any presently available devices that support its type.
728 * %-EBUSY if a ULD of the same type is already registered.
729 */ 729 */
730void cxgb4_register_uld(enum cxgb4_uld type, 730void cxgb4_register_uld(enum cxgb4_uld type,
731 const struct cxgb4_uld_info *p) 731 const struct cxgb4_uld_info *p)
732{ 732{
733 int ret = 0;
734 struct adapter *adap; 733 struct adapter *adap;
734 int ret = 0;
735 735
736 if (type >= CXGB4_ULD_MAX) 736 if (type >= CXGB4_ULD_MAX)
737 return; 737 return;
@@ -763,8 +763,12 @@ void cxgb4_register_uld(enum cxgb4_uld type,
763 if (ret) 763 if (ret)
764 goto free_irq; 764 goto free_irq;
765 adap->uld[type] = *p; 765 adap->uld[type] = *p;
766 uld_attach(adap, type); 766 ret = uld_attach(adap, type);
767 if (ret)
768 goto free_txq;
767 continue; 769 continue;
770free_txq:
771 release_sge_txq_uld(adap, type);
768free_irq: 772free_irq:
769 if (adap->flags & CXGB4_FULL_INIT_DONE) 773 if (adap->flags & CXGB4_FULL_INIT_DONE)
770 quiesce_rx_uld(adap, type); 774 quiesce_rx_uld(adap, type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b3da81e90132..928bfea5457b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -3791,15 +3791,11 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
3791 * write the CIDX Updates into the Status Page at the end of the 3791 * write the CIDX Updates into the Status Page at the end of the
3792 * TX Queue. 3792 * TX Queue.
3793 */ 3793 */
3794 c.autoequiqe_to_viid = htonl((dbqt 3794 c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
3795 ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
3796 : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
3797 FW_EQ_ETH_CMD_VIID_V(pi->viid)); 3795 FW_EQ_ETH_CMD_VIID_V(pi->viid));
3798 3796
3799 c.fetchszm_to_iqid = 3797 c.fetchszm_to_iqid =
3800 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt 3798 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3801 ? HOSTFCMODE_INGRESS_QUEUE_X
3802 : HOSTFCMODE_STATUS_PAGE_X) |
3803 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | 3799 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
3804 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); 3800 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
3805 3801
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
index 0b12f89bf89a..9fdf77d5eb37 100644
--- a/drivers/net/ethernet/cortina/gemini.h
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Register definitions for Gemini GMAC Ethernet device driver 2/* Register definitions for Gemini GMAC Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006 Storlink, Corp. 4 * Copyright (C) 2006 Storlink, Corp.
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 9b7af94a40bb..96e9565f1e08 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -727,6 +727,18 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
727 */ 727 */
728 nfrags = skb_shinfo(skb)->nr_frags; 728 nfrags = skb_shinfo(skb)->nr_frags;
729 729
730 /* Setup HW checksumming */
731 csum_vlan = 0;
732 if (skb->ip_summed == CHECKSUM_PARTIAL &&
733 !ftgmac100_prep_tx_csum(skb, &csum_vlan))
734 goto drop;
735
736 /* Add VLAN tag */
737 if (skb_vlan_tag_present(skb)) {
738 csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
739 csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
740 }
741
730 /* Get header len */ 742 /* Get header len */
731 len = skb_headlen(skb); 743 len = skb_headlen(skb);
732 744
@@ -753,19 +765,6 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
753 if (nfrags == 0) 765 if (nfrags == 0)
754 f_ctl_stat |= FTGMAC100_TXDES0_LTS; 766 f_ctl_stat |= FTGMAC100_TXDES0_LTS;
755 txdes->txdes3 = cpu_to_le32(map); 767 txdes->txdes3 = cpu_to_le32(map);
756
757 /* Setup HW checksumming */
758 csum_vlan = 0;
759 if (skb->ip_summed == CHECKSUM_PARTIAL &&
760 !ftgmac100_prep_tx_csum(skb, &csum_vlan))
761 goto drop;
762
763 /* Add VLAN tag */
764 if (skb_vlan_tag_present(skb)) {
765 csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
766 csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
767 }
768
769 txdes->txdes1 = cpu_to_le32(csum_vlan); 768 txdes->txdes1 = cpu_to_le32(csum_vlan);
770 769
771 /* Next descriptor */ 770 /* Next descriptor */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
index ff2e177395d4..df2458a5e9ef 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2018 NXP 3 * Copyright 2018 NXP
4 */ 4 */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
index 720cd50f5895..4ac05bfef338 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2013-2016 Freescale Semiconductor Inc. 3 * Copyright 2013-2016 Freescale Semiconductor Inc.
4 * Copyright 2016-2018 NXP 4 * Copyright 2016-2018 NXP
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
index be7914c1634d..311c184e1aef 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2013-2016 Freescale Semiconductor Inc. 3 * Copyright 2013-2016 Freescale Semiconductor Inc.
4 * Copyright 2016-2018 NXP 4 * Copyright 2016-2018 NXP
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4d4c72adf49..22c01b224baa 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3558,7 +3558,7 @@ fec_probe(struct platform_device *pdev)
3558 3558
3559 for (i = 0; i < irq_cnt; i++) { 3559 for (i = 0; i < irq_cnt; i++) {
3560 snprintf(irq_name, sizeof(irq_name), "int%d", i); 3560 snprintf(irq_name, sizeof(irq_name), "int%d", i);
3561 irq = platform_get_irq_byname(pdev, irq_name); 3561 irq = platform_get_irq_byname_optional(pdev, irq_name);
3562 if (irq < 0) 3562 if (irq < 0)
3563 irq = platform_get_irq(pdev, i); 3563 irq = platform_get_irq(pdev, i);
3564 if (irq < 0) { 3564 if (irq < 0) {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 19e2365be7d8..945643c02615 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -600,9 +600,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
600 600
601 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); 601 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
602 602
603 irq = platform_get_irq_byname(pdev, "pps"); 603 irq = platform_get_irq_byname_optional(pdev, "pps");
604 if (irq < 0) 604 if (irq < 0)
605 irq = platform_get_irq(pdev, irq_idx); 605 irq = platform_get_irq_optional(pdev, irq_idx);
606 /* Failure to get an irq is not fatal, 606 /* Failure to get an irq is not fatal,
607 * only the PTP_CLOCK_PPS clock events should stop 607 * only the PTP_CLOCK_PPS clock events should stop
608 */ 608 */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 59564ac99d2a..edec61dfc868 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
289 289
290 len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; 290 len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
291 page_info = &rx->data.page_info[idx]; 291 page_info = &rx->data.page_info[idx];
292 dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
293 PAGE_SIZE, DMA_FROM_DEVICE);
292 294
293 /* gvnic can only receive into registered segments. If the buffer 295 /* gvnic can only receive into registered segments. If the buffer
294 * can't be recycled, our only choice is to copy the data out of 296 * can't be recycled, our only choice is to copy the data out of
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 778b87b5a06c..0a9a7ee2a866 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -390,7 +390,21 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
390 seg_desc->seg.seg_addr = cpu_to_be64(addr); 390 seg_desc->seg.seg_addr = cpu_to_be64(addr);
391} 391}
392 392
393static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb) 393static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
394 u64 iov_offset, u64 iov_len)
395{
396 dma_addr_t dma;
397 u64 addr;
398
399 for (addr = iov_offset; addr < iov_offset + iov_len;
400 addr += PAGE_SIZE) {
401 dma = page_buses[addr / PAGE_SIZE];
402 dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
403 }
404}
405
406static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
407 struct device *dev)
394{ 408{
395 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; 409 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
396 union gve_tx_desc *pkt_desc, *seg_desc; 410 union gve_tx_desc *pkt_desc, *seg_desc;
@@ -432,6 +446,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
432 skb_copy_bits(skb, 0, 446 skb_copy_bits(skb, 0,
433 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, 447 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
434 hlen); 448 hlen);
449 gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
450 info->iov[hdr_nfrags - 1].iov_offset,
451 info->iov[hdr_nfrags - 1].iov_len);
435 copy_offset = hlen; 452 copy_offset = hlen;
436 453
437 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) { 454 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
@@ -445,6 +462,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
445 skb_copy_bits(skb, copy_offset, 462 skb_copy_bits(skb, copy_offset,
446 tx->tx_fifo.base + info->iov[i].iov_offset, 463 tx->tx_fifo.base + info->iov[i].iov_offset,
447 info->iov[i].iov_len); 464 info->iov[i].iov_len);
465 gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
466 info->iov[i].iov_offset,
467 info->iov[i].iov_len);
448 copy_offset += info->iov[i].iov_len; 468 copy_offset += info->iov[i].iov_len;
449 } 469 }
450 470
@@ -473,7 +493,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
473 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); 493 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
474 return NETDEV_TX_BUSY; 494 return NETDEV_TX_BUSY;
475 } 495 }
476 nsegs = gve_tx_add_skb(tx, skb); 496 nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
477 497
478 netdev_tx_sent_queue(tx->netdev_txq, skb->len); 498 netdev_tx_sent_queue(tx->netdev_txq, skb->len);
479 skb_tx_timestamp(skb); 499 skb_tx_timestamp(skb);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index c84167447abe..4606a7e4a6d1 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -237,6 +237,7 @@ struct hip04_priv {
237 dma_addr_t rx_phys[RX_DESC_NUM]; 237 dma_addr_t rx_phys[RX_DESC_NUM];
238 unsigned int rx_head; 238 unsigned int rx_head;
239 unsigned int rx_buf_size; 239 unsigned int rx_buf_size;
240 unsigned int rx_cnt_remaining;
240 241
241 struct device_node *phy_node; 242 struct device_node *phy_node;
242 struct phy_device *phy; 243 struct phy_device *phy;
@@ -575,7 +576,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
575 struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); 576 struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
576 struct net_device *ndev = priv->ndev; 577 struct net_device *ndev = priv->ndev;
577 struct net_device_stats *stats = &ndev->stats; 578 struct net_device_stats *stats = &ndev->stats;
578 unsigned int cnt = hip04_recv_cnt(priv);
579 struct rx_desc *desc; 579 struct rx_desc *desc;
580 struct sk_buff *skb; 580 struct sk_buff *skb;
581 unsigned char *buf; 581 unsigned char *buf;
@@ -588,8 +588,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
588 588
589 /* clean up tx descriptors */ 589 /* clean up tx descriptors */
590 tx_remaining = hip04_tx_reclaim(ndev, false); 590 tx_remaining = hip04_tx_reclaim(ndev, false);
591 591 priv->rx_cnt_remaining += hip04_recv_cnt(priv);
592 while (cnt && !last) { 592 while (priv->rx_cnt_remaining && !last) {
593 buf = priv->rx_buf[priv->rx_head]; 593 buf = priv->rx_buf[priv->rx_head];
594 skb = build_skb(buf, priv->rx_buf_size); 594 skb = build_skb(buf, priv->rx_buf_size);
595 if (unlikely(!skb)) { 595 if (unlikely(!skb)) {
@@ -635,11 +635,13 @@ refill:
635 hip04_set_recv_desc(priv, phys); 635 hip04_set_recv_desc(priv, phys);
636 636
637 priv->rx_head = RX_NEXT(priv->rx_head); 637 priv->rx_head = RX_NEXT(priv->rx_head);
638 if (rx >= budget) 638 if (rx >= budget) {
639 --priv->rx_cnt_remaining;
639 goto done; 640 goto done;
641 }
640 642
641 if (--cnt == 0) 643 if (--priv->rx_cnt_remaining == 0)
642 cnt = hip04_recv_cnt(priv); 644 priv->rx_cnt_remaining += hip04_recv_cnt(priv);
643 } 645 }
644 646
645 if (!(priv->reg_inten & RCV_INT)) { 647 if (!(priv->reg_inten & RCV_INT)) {
@@ -724,6 +726,7 @@ static int hip04_mac_open(struct net_device *ndev)
724 int i; 726 int i;
725 727
726 priv->rx_head = 0; 728 priv->rx_head = 0;
729 priv->rx_cnt_remaining = 0;
727 priv->tx_head = 0; 730 priv->tx_head = 0;
728 priv->tx_tail = 0; 731 priv->tx_tail = 0;
729 hip04_reset_ppe(priv); 732 hip04_reset_ppe(priv);
@@ -1038,7 +1041,6 @@ static int hip04_remove(struct platform_device *pdev)
1038 1041
1039 hip04_free_ring(ndev, d); 1042 hip04_free_ring(ndev, d);
1040 unregister_netdev(ndev); 1043 unregister_netdev(ndev);
1041 free_irq(ndev->irq, ndev);
1042 of_node_put(priv->phy_node); 1044 of_node_put(priv->phy_node);
1043 cancel_work_sync(&priv->tx_timeout_task); 1045 cancel_work_sync(&priv->tx_timeout_task);
1044 free_netdev(ndev); 1046 free_netdev(ndev);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 71d3d8854d8f..be56e631d693 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
607 for (i = 0; i < adapter->num_rx_queues; i++) 607 for (i = 0; i < adapter->num_rx_queues; i++)
608 rxdr[i].count = rxdr->count; 608 rxdr[i].count = rxdr->count;
609 609
610 err = 0;
610 if (netif_running(adapter->netdev)) { 611 if (netif_running(adapter->netdev)) {
611 /* Try to get new resources before deleting old */ 612 /* Try to get new resources before deleting old */
612 err = e1000_setup_all_rx_resources(adapter); 613 err = e1000_setup_all_rx_resources(adapter);
@@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
627 adapter->rx_ring = rxdr; 628 adapter->rx_ring = rxdr;
628 adapter->tx_ring = txdr; 629 adapter->tx_ring = txdr;
629 err = e1000_up(adapter); 630 err = e1000_up(adapter);
630 if (err)
631 goto err_setup;
632 } 631 }
633 kfree(tx_old); 632 kfree(tx_old);
634 kfree(rx_old); 633 kfree(rx_old);
635 634
636 clear_bit(__E1000_RESETTING, &adapter->flags); 635 clear_bit(__E1000_RESETTING, &adapter->flags);
637 return 0; 636 return err;
637
638err_setup_tx: 638err_setup_tx:
639 e1000_free_all_rx_resources(adapter); 639 e1000_free_all_rx_resources(adapter);
640err_setup_rx: 640err_setup_rx:
@@ -646,7 +646,6 @@ err_alloc_rx:
646err_alloc_tx: 646err_alloc_tx:
647 if (netif_running(adapter->netdev)) 647 if (netif_running(adapter->netdev))
648 e1000_up(adapter); 648 e1000_up(adapter);
649err_setup:
650 clear_bit(__E1000_RESETTING, &adapter->flags); 649 clear_bit(__E1000_RESETTING, &adapter->flags);
651 return err; 650 return err;
652} 651}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b1c3227ae4ab..a05dfecdd9b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -157,11 +157,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
157 err = i40e_queue_pair_enable(vsi, qid); 157 err = i40e_queue_pair_enable(vsi, qid);
158 if (err) 158 if (err)
159 return err; 159 return err;
160
161 /* Kick start the NAPI context so that receiving will start */
162 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
163 if (err)
164 return err;
165 } 160 }
166 161
167 return 0; 162 return 0;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 3ec2ce0725d5..8a6ef3514129 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -466,7 +466,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
466 ? igb_setup_copper_link_82575 466 ? igb_setup_copper_link_82575
467 : igb_setup_serdes_link_82575; 467 : igb_setup_serdes_link_82575;
468 468
469 if (mac->type == e1000_82580) { 469 if (mac->type == e1000_82580 || mac->type == e1000_i350) {
470 switch (hw->device_id) { 470 switch (hw->device_id) {
471 /* feature not supported on these id's */ 471 /* feature not supported on these id's */
472 case E1000_DEV_ID_DH89XXCC_SGMII: 472 case E1000_DEV_ID_DH89XXCC_SGMII:
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 105b0624081a..9148c62d9ac5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -753,7 +753,8 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg)
753 struct net_device *netdev = igb->netdev; 753 struct net_device *netdev = igb->netdev;
754 hw->hw_addr = NULL; 754 hw->hw_addr = NULL;
755 netdev_err(netdev, "PCIe link lost\n"); 755 netdev_err(netdev, "PCIe link lost\n");
756 WARN(1, "igb: Failed to read reg 0x%x!\n", reg); 756 WARN(pci_device_is_present(igb->pdev),
757 "igb: Failed to read reg 0x%x!\n", reg);
757 } 758 }
758 759
759 return value; 760 return value;
@@ -2064,7 +2065,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
2064 if ((hw->phy.media_type == e1000_media_type_copper) && 2065 if ((hw->phy.media_type == e1000_media_type_copper) &&
2065 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { 2066 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2066 swap_now = true; 2067 swap_now = true;
2067 } else if (!(connsw & E1000_CONNSW_SERDESD)) { 2068 } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2069 !(connsw & E1000_CONNSW_SERDESD)) {
2068 /* copper signal takes time to appear */ 2070 /* copper signal takes time to appear */
2069 if (adapter->copper_tries < 4) { 2071 if (adapter->copper_tries < 4) {
2070 adapter->copper_tries++; 2072 adapter->copper_tries++;
@@ -2370,7 +2372,7 @@ void igb_reset(struct igb_adapter *adapter)
2370 adapter->ei.get_invariants(hw); 2372 adapter->ei.get_invariants(hw);
2371 adapter->flags &= ~IGB_FLAG_MEDIA_RESET; 2373 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2372 } 2374 }
2373 if ((mac->type == e1000_82575) && 2375 if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2374 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { 2376 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2375 igb_enable_mas(adapter); 2377 igb_enable_mas(adapter);
2376 } 2378 }
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 63b62d74f961..8e424dfab12e 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -4047,7 +4047,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
4047 hw->hw_addr = NULL; 4047 hw->hw_addr = NULL;
4048 netif_device_detach(netdev); 4048 netif_device_detach(netdev);
4049 netdev_err(netdev, "PCIe link lost, device now detached\n"); 4049 netdev_err(netdev, "PCIe link lost, device now detached\n");
4050 WARN(1, "igc: Failed to read reg 0x%x!\n", reg); 4050 WARN(pci_device_is_present(igc->pdev),
4051 "igc: Failed to read reg 0x%x!\n", reg);
4051 } 4052 }
4052 4053
4053 return value; 4054 return value;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 1ce2397306b9..91b3780ddb04 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4310,7 +4310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4310 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) 4310 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4311 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); 4311 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4312 4312
4313 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4314 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) 4313 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4315 continue; 4314 continue;
4316 4315
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index c8425d35c049..e47783ce77e0 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -160,16 +160,23 @@ static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
160 (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); 160 (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
161} 161}
162#else 162#else
163void mvneta_bm_pool_destroy(struct mvneta_bm *priv, 163static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
164 struct mvneta_bm_pool *bm_pool, u8 port_map) {} 164 struct mvneta_bm_pool *bm_pool,
165void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, 165 u8 port_map) {}
166 u8 port_map) {} 166static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv,
167int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; } 167 struct mvneta_bm_pool *bm_pool,
168int mvneta_bm_pool_refill(struct mvneta_bm *priv, 168 u8 port_map) {}
169 struct mvneta_bm_pool *bm_pool) {return 0; } 169static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
170struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, 170{ return 0; }
171 enum mvneta_bm_type type, u8 port_id, 171static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv,
172 int pkt_size) { return NULL; } 172 struct mvneta_bm_pool *bm_pool)
173{ return 0; }
174static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv,
175 u8 pool_id,
176 enum mvneta_bm_type type,
177 u8 port_id,
178 int pkt_size)
179{ return NULL; }
173 180
174static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, 181static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
175 struct mvneta_bm_pool *bm_pool, 182 struct mvneta_bm_pool *bm_pool,
@@ -178,7 +185,8 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
178static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, 185static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
179 struct mvneta_bm_pool *bm_pool) 186 struct mvneta_bm_pool *bm_pool)
180{ return 0; } 187{ return 0; }
181struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; } 188static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
182void mvneta_bm_put(struct mvneta_bm *priv) {} 189{ return NULL; }
190static inline void mvneta_bm_put(struct mvneta_bm *priv) {}
183#endif /* CONFIG_MVNETA_BM */ 191#endif /* CONFIG_MVNETA_BM */
184#endif 192#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 4356f3a58002..1187ef1375e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
471 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; 471 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
472} 472}
473 473
474static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) 474static int
475mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
476 struct resource_allocator *res_alloc,
477 int vf)
475{ 478{
476 /* reduce the sink counter */ 479 struct mlx4_active_ports actv_ports;
477 return (dev->caps.max_counters - 1 - 480 int ports, counters_guaranteed;
478 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) 481
479 / MLX4_MAX_PORTS; 482 /* For master, only allocate according to the number of phys ports */
483 if (vf == mlx4_master_func_num(dev))
484 return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
485
486 /* calculate real number of ports for the VF */
487 actv_ports = mlx4_get_active_ports(dev, vf);
488 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
489 counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
490
491 /* If we do not have enough counters for this VF, do not
492 * allocate any for it. '-1' to reduce the sink counter.
493 */
494 if ((res_alloc->res_reserved + counters_guaranteed) >
495 (dev->caps.max_counters - 1))
496 return 0;
497
498 return counters_guaranteed;
480} 499}
481 500
482int mlx4_init_resource_tracker(struct mlx4_dev *dev) 501int mlx4_init_resource_tracker(struct mlx4_dev *dev)
@@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
484 struct mlx4_priv *priv = mlx4_priv(dev); 503 struct mlx4_priv *priv = mlx4_priv(dev);
485 int i, j; 504 int i, j;
486 int t; 505 int t;
487 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
488 506
489 priv->mfunc.master.res_tracker.slave_list = 507 priv->mfunc.master.res_tracker.slave_list =
490 kcalloc(dev->num_slaves, sizeof(struct slave_list), 508 kcalloc(dev->num_slaves, sizeof(struct slave_list),
@@ -603,16 +621,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
603 break; 621 break;
604 case RES_COUNTER: 622 case RES_COUNTER:
605 res_alloc->quota[t] = dev->caps.max_counters; 623 res_alloc->quota[t] = dev->caps.max_counters;
606 if (t == mlx4_master_func_num(dev)) 624 res_alloc->guaranteed[t] =
607 res_alloc->guaranteed[t] = 625 mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
608 MLX4_PF_COUNTERS_PER_PORT *
609 MLX4_MAX_PORTS;
610 else if (t <= max_vfs_guarantee_counter)
611 res_alloc->guaranteed[t] =
612 MLX4_VF_COUNTERS_PER_PORT *
613 MLX4_MAX_PORTS;
614 else
615 res_alloc->guaranteed[t] = 0;
616 break; 626 break;
617 default: 627 default:
618 break; 628 break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8d76452cacdc..f1a7bc46f1c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -345,7 +345,7 @@ struct mlx5e_tx_wqe_info {
345 u8 num_wqebbs; 345 u8 num_wqebbs;
346 u8 num_dma; 346 u8 num_dma;
347#ifdef CONFIG_MLX5_EN_TLS 347#ifdef CONFIG_MLX5_EN_TLS
348 skb_frag_t *resync_dump_frag; 348 struct page *resync_dump_frag_page;
349#endif 349#endif
350}; 350};
351 351
@@ -410,6 +410,7 @@ struct mlx5e_txqsq {
410 struct device *pdev; 410 struct device *pdev;
411 __be32 mkey_be; 411 __be32 mkey_be;
412 unsigned long state; 412 unsigned long state;
413 unsigned int hw_mtu;
413 struct hwtstamp_config *tstamp; 414 struct hwtstamp_config *tstamp;
414 struct mlx5_clock *clock; 415 struct mlx5_clock *clock;
415 416
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index b3a249b2a482..ac44bbe95c5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -141,7 +141,7 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
141 "Failed to create hv vhca stats agent, err = %ld\n", 141 "Failed to create hv vhca stats agent, err = %ld\n",
142 PTR_ERR(agent)); 142 PTR_ERR(agent));
143 143
144 kfree(priv->stats_agent.buf); 144 kvfree(priv->stats_agent.buf);
145 return IS_ERR_OR_NULL(agent); 145 return IS_ERR_OR_NULL(agent);
146 } 146 }
147 147
@@ -157,5 +157,5 @@ void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
157 return; 157 return;
158 158
159 mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent); 159 mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
160 kfree(priv->stats_agent.buf); 160 kvfree(priv->stats_agent.buf);
161} 161}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index f8ee18b4da6f..13af72556987 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -97,15 +97,19 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
97 if (ret) 97 if (ret)
98 return ret; 98 return ret;
99 99
100 if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) 100 if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
101 ip_rt_put(rt);
101 return -ENETUNREACH; 102 return -ENETUNREACH;
103 }
102#else 104#else
103 return -EOPNOTSUPP; 105 return -EOPNOTSUPP;
104#endif 106#endif
105 107
106 ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev); 108 ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
107 if (ret < 0) 109 if (ret < 0) {
110 ip_rt_put(rt);
108 return ret; 111 return ret;
112 }
109 113
110 if (!(*out_ttl)) 114 if (!(*out_ttl))
111 *out_ttl = ip4_dst_hoplimit(&rt->dst); 115 *out_ttl = ip4_dst_hoplimit(&rt->dst);
@@ -149,8 +153,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
149 *out_ttl = ip6_dst_hoplimit(dst); 153 *out_ttl = ip6_dst_hoplimit(dst);
150 154
151 ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); 155 ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
152 if (ret < 0) 156 if (ret < 0) {
157 dst_release(dst);
153 return ret; 158 return ret;
159 }
154#else 160#else
155 return -EOPNOTSUPP; 161 return -EOPNOTSUPP;
156#endif 162#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 87be96747902..7c8796d9743f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -15,15 +15,14 @@
15#else 15#else
16/* TLS offload requires additional stop_room for: 16/* TLS offload requires additional stop_room for:
17 * - a resync SKB. 17 * - a resync SKB.
18 * kTLS offload requires additional stop_room for: 18 * kTLS offload requires fixed additional stop_room for:
19 * - static params WQE, 19 * - a static params WQE, and a progress params WQE.
20 * - progress params WQE, and 20 * The additional MTU-depending room for the resync DUMP WQEs
21 * - resync DUMP per frag. 21 * will be calculated and added in runtime.
22 */ 22 */
23#define MLX5E_SQ_TLS_ROOM \ 23#define MLX5E_SQ_TLS_ROOM \
24 (MLX5_SEND_WQE_MAX_WQEBBS + \ 24 (MLX5_SEND_WQE_MAX_WQEBBS + \
25 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \ 25 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
26 MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
27#endif 26#endif
28 27
29#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) 28#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
@@ -92,7 +91,7 @@ mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
92 91
93 /* fill sq frag edge with nops to avoid wqe wrapping two pages */ 92 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
94 for (; wi < edge_wi; wi++) { 93 for (; wi < edge_wi; wi++) {
95 wi->skb = NULL; 94 memset(wi, 0, sizeof(*wi));
96 wi->num_wqebbs = 1; 95 wi->num_wqebbs = 1;
97 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 96 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
98 } 97 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index d2ff74d52720..46725cd743a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -38,7 +38,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
38 return -ENOMEM; 38 return -ENOMEM;
39 39
40 tx_priv->expected_seq = start_offload_tcp_sn; 40 tx_priv->expected_seq = start_offload_tcp_sn;
41 tx_priv->crypto_info = crypto_info; 41 tx_priv->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
42 mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv); 42 mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
43 43
44 /* tc and underlay_qpn values are not in use for tls tis */ 44 /* tc and underlay_qpn values are not in use for tls tis */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index b7298f9ee3d3..a3efa29a4629 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -21,7 +21,14 @@
21 MLX5_ST_SZ_BYTES(tls_progress_params)) 21 MLX5_ST_SZ_BYTES(tls_progress_params))
22#define MLX5E_KTLS_PROGRESS_WQEBBS \ 22#define MLX5E_KTLS_PROGRESS_WQEBBS \
23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) 23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
24#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 24
25struct mlx5e_dump_wqe {
26 struct mlx5_wqe_ctrl_seg ctrl;
27 struct mlx5_wqe_data_seg data;
28};
29
30#define MLX5E_KTLS_DUMP_WQEBBS \
31 (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
25 32
26enum { 33enum {
27 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0, 34 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
@@ -37,7 +44,7 @@ enum {
37 44
38struct mlx5e_ktls_offload_context_tx { 45struct mlx5e_ktls_offload_context_tx {
39 struct tls_offload_context_tx *tx_ctx; 46 struct tls_offload_context_tx *tx_ctx;
40 struct tls_crypto_info *crypto_info; 47 struct tls12_crypto_info_aes_gcm_128 crypto_info;
41 u32 expected_seq; 48 u32 expected_seq;
42 u32 tisn; 49 u32 tisn;
43 u32 key_id; 50 u32 key_id;
@@ -86,14 +93,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
86 struct mlx5e_tx_wqe **wqe, u16 *pi); 93 struct mlx5e_tx_wqe **wqe, u16 *pi);
87void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, 94void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
88 struct mlx5e_tx_wqe_info *wi, 95 struct mlx5e_tx_wqe_info *wi,
89 struct mlx5e_sq_dma *dma); 96 u32 *dma_fifo_cc);
90 97static inline u8
98mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
99 unsigned int sync_len)
100{
101 /* Given the MTU and sync_len, calculates an upper bound for the
102 * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
103 */
104 return MLX5E_KTLS_DUMP_WQEBBS *
105 (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
106}
91#else 107#else
92 108
93static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) 109static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
94{ 110{
95} 111}
96 112
113static inline void
114mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
115 struct mlx5e_tx_wqe_info *wi,
116 u32 *dma_fifo_cc) {}
117
97#endif 118#endif
98 119
99#endif /* __MLX5E_TLS_H__ */ 120#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index d195366461c9..778dab1af8fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -24,17 +24,12 @@ enum {
24static void 24static void
25fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) 25fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
26{ 26{
27 struct tls_crypto_info *crypto_info = priv_tx->crypto_info; 27 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
28 struct tls12_crypto_info_aes_gcm_128 *info;
29 char *initial_rn, *gcm_iv; 28 char *initial_rn, *gcm_iv;
30 u16 salt_sz, rec_seq_sz; 29 u16 salt_sz, rec_seq_sz;
31 char *salt, *rec_seq; 30 char *salt, *rec_seq;
32 u8 tls_version; 31 u8 tls_version;
33 32
34 if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
35 return;
36
37 info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
38 EXTRACT_INFO_FIELDS; 33 EXTRACT_INFO_FIELDS;
39 34
40 gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv); 35 gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
@@ -108,16 +103,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
108} 103}
109 104
110static void tx_fill_wi(struct mlx5e_txqsq *sq, 105static void tx_fill_wi(struct mlx5e_txqsq *sq,
111 u16 pi, u8 num_wqebbs, 106 u16 pi, u8 num_wqebbs, u32 num_bytes,
112 skb_frag_t *resync_dump_frag, 107 struct page *page)
113 u32 num_bytes)
114{ 108{
115 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 109 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
116 110
117 wi->skb = NULL; 111 memset(wi, 0, sizeof(*wi));
118 wi->num_wqebbs = num_wqebbs; 112 wi->num_wqebbs = num_wqebbs;
119 wi->resync_dump_frag = resync_dump_frag; 113 wi->num_bytes = num_bytes;
120 wi->num_bytes = num_bytes; 114 wi->resync_dump_frag_page = page;
121} 115}
122 116
123void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) 117void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -145,7 +139,7 @@ post_static_params(struct mlx5e_txqsq *sq,
145 139
146 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); 140 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
147 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); 141 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
148 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0); 142 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
149 sq->pc += MLX5E_KTLS_STATIC_WQEBBS; 143 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
150} 144}
151 145
@@ -159,7 +153,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
159 153
160 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); 154 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
161 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); 155 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
162 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0); 156 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
163 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; 157 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
164} 158}
165 159
@@ -169,6 +163,14 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
169 bool skip_static_post, bool fence_first_post) 163 bool skip_static_post, bool fence_first_post)
170{ 164{
171 bool progress_fence = skip_static_post || !fence_first_post; 165 bool progress_fence = skip_static_post || !fence_first_post;
166 struct mlx5_wq_cyc *wq = &sq->wq;
167 u16 contig_wqebbs_room, pi;
168
169 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
170 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
171 if (unlikely(contig_wqebbs_room <
172 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
173 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
172 174
173 if (!skip_static_post) 175 if (!skip_static_post)
174 post_static_params(sq, priv_tx, fence_first_post); 176 post_static_params(sq, priv_tx, fence_first_post);
@@ -180,29 +182,36 @@ struct tx_sync_info {
180 u64 rcd_sn; 182 u64 rcd_sn;
181 s32 sync_len; 183 s32 sync_len;
182 int nr_frags; 184 int nr_frags;
183 skb_frag_t *frags[MAX_SKB_FRAGS]; 185 skb_frag_t frags[MAX_SKB_FRAGS];
186};
187
188enum mlx5e_ktls_sync_retval {
189 MLX5E_KTLS_SYNC_DONE,
190 MLX5E_KTLS_SYNC_FAIL,
191 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
184}; 192};
185 193
186static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, 194static enum mlx5e_ktls_sync_retval
187 u32 tcp_seq, struct tx_sync_info *info) 195tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
196 u32 tcp_seq, struct tx_sync_info *info)
188{ 197{
189 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; 198 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
199 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
190 struct tls_record_info *record; 200 struct tls_record_info *record;
191 int remaining, i = 0; 201 int remaining, i = 0;
192 unsigned long flags; 202 unsigned long flags;
193 bool ret = true;
194 203
195 spin_lock_irqsave(&tx_ctx->lock, flags); 204 spin_lock_irqsave(&tx_ctx->lock, flags);
196 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); 205 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
197 206
198 if (unlikely(!record)) { 207 if (unlikely(!record)) {
199 ret = false; 208 ret = MLX5E_KTLS_SYNC_FAIL;
200 goto out; 209 goto out;
201 } 210 }
202 211
203 if (unlikely(tcp_seq < tls_record_start_seq(record))) { 212 if (unlikely(tcp_seq < tls_record_start_seq(record))) {
204 if (!tls_record_is_start_marker(record)) 213 ret = tls_record_is_start_marker(record) ?
205 ret = false; 214 MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
206 goto out; 215 goto out;
207 } 216 }
208 217
@@ -211,13 +220,13 @@ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
211 while (remaining > 0) { 220 while (remaining > 0) {
212 skb_frag_t *frag = &record->frags[i]; 221 skb_frag_t *frag = &record->frags[i];
213 222
214 __skb_frag_ref(frag); 223 get_page(skb_frag_page(frag));
215 remaining -= skb_frag_size(frag); 224 remaining -= skb_frag_size(frag);
216 info->frags[i++] = frag; 225 info->frags[i++] = *frag;
217 } 226 }
218 /* reduce the part which will be sent with the original SKB */ 227 /* reduce the part which will be sent with the original SKB */
219 if (remaining < 0) 228 if (remaining < 0)
220 skb_frag_size_add(info->frags[i - 1], remaining); 229 skb_frag_size_add(&info->frags[i - 1], remaining);
221 info->nr_frags = i; 230 info->nr_frags = i;
222out: 231out:
223 spin_unlock_irqrestore(&tx_ctx->lock, flags); 232 spin_unlock_irqrestore(&tx_ctx->lock, flags);
@@ -229,17 +238,12 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
229 struct mlx5e_ktls_offload_context_tx *priv_tx, 238 struct mlx5e_ktls_offload_context_tx *priv_tx,
230 u64 rcd_sn) 239 u64 rcd_sn)
231{ 240{
232 struct tls_crypto_info *crypto_info = priv_tx->crypto_info; 241 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
233 struct tls12_crypto_info_aes_gcm_128 *info;
234 __be64 rn_be = cpu_to_be64(rcd_sn); 242 __be64 rn_be = cpu_to_be64(rcd_sn);
235 bool skip_static_post; 243 bool skip_static_post;
236 u16 rec_seq_sz; 244 u16 rec_seq_sz;
237 char *rec_seq; 245 char *rec_seq;
238 246
239 if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
240 return;
241
242 info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
243 rec_seq = info->rec_seq; 247 rec_seq = info->rec_seq;
244 rec_seq_sz = sizeof(info->rec_seq); 248 rec_seq_sz = sizeof(info->rec_seq);
245 249
@@ -250,11 +254,6 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
250 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); 254 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
251} 255}
252 256
253struct mlx5e_dump_wqe {
254 struct mlx5_wqe_ctrl_seg ctrl;
255 struct mlx5_wqe_data_seg data;
256};
257
258static int 257static int
259tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first) 258tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
260{ 259{
@@ -262,7 +261,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
262 struct mlx5_wqe_data_seg *dseg; 261 struct mlx5_wqe_data_seg *dseg;
263 struct mlx5e_dump_wqe *wqe; 262 struct mlx5e_dump_wqe *wqe;
264 dma_addr_t dma_addr = 0; 263 dma_addr_t dma_addr = 0;
265 u8 num_wqebbs;
266 u16 ds_cnt; 264 u16 ds_cnt;
267 int fsz; 265 int fsz;
268 u16 pi; 266 u16 pi;
@@ -270,7 +268,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
270 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); 268 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
271 269
272 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 270 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
273 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
274 271
275 cseg = &wqe->ctrl; 272 cseg = &wqe->ctrl;
276 dseg = &wqe->data; 273 dseg = &wqe->data;
@@ -291,24 +288,27 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
291 dseg->byte_count = cpu_to_be32(fsz); 288 dseg->byte_count = cpu_to_be32(fsz);
292 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 289 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
293 290
294 tx_fill_wi(sq, pi, num_wqebbs, frag, fsz); 291 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
295 sq->pc += num_wqebbs; 292 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
296
297 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
298 "unexpected DUMP num_wqebbs, %d > %d",
299 num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
300 293
301 return 0; 294 return 0;
302} 295}
303 296
304void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, 297void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
305 struct mlx5e_tx_wqe_info *wi, 298 struct mlx5e_tx_wqe_info *wi,
306 struct mlx5e_sq_dma *dma) 299 u32 *dma_fifo_cc)
307{ 300{
308 struct mlx5e_sq_stats *stats = sq->stats; 301 struct mlx5e_sq_stats *stats;
302 struct mlx5e_sq_dma *dma;
303
304 if (!wi->resync_dump_frag_page)
305 return;
306
307 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
308 stats = sq->stats;
309 309
310 mlx5e_tx_dma_unmap(sq->pdev, dma); 310 mlx5e_tx_dma_unmap(sq->pdev, dma);
311 __skb_frag_unref(wi->resync_dump_frag); 311 put_page(wi->resync_dump_frag_page);
312 stats->tls_dump_packets++; 312 stats->tls_dump_packets++;
313 stats->tls_dump_bytes += wi->num_bytes; 313 stats->tls_dump_bytes += wi->num_bytes;
314} 314}
@@ -318,25 +318,31 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
318 struct mlx5_wq_cyc *wq = &sq->wq; 318 struct mlx5_wq_cyc *wq = &sq->wq;
319 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 319 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
320 320
321 tx_fill_wi(sq, pi, 1, NULL, 0); 321 tx_fill_wi(sq, pi, 1, 0, NULL);
322 322
323 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); 323 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
324} 324}
325 325
326static struct sk_buff * 326static enum mlx5e_ktls_sync_retval
327mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, 327mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
328 struct mlx5e_txqsq *sq, 328 struct mlx5e_txqsq *sq,
329 struct sk_buff *skb, 329 int datalen,
330 u32 seq) 330 u32 seq)
331{ 331{
332 struct mlx5e_sq_stats *stats = sq->stats; 332 struct mlx5e_sq_stats *stats = sq->stats;
333 struct mlx5_wq_cyc *wq = &sq->wq; 333 struct mlx5_wq_cyc *wq = &sq->wq;
334 enum mlx5e_ktls_sync_retval ret;
334 struct tx_sync_info info = {}; 335 struct tx_sync_info info = {};
335 u16 contig_wqebbs_room, pi; 336 u16 contig_wqebbs_room, pi;
336 u8 num_wqebbs; 337 u8 num_wqebbs;
337 int i; 338 int i = 0;
338 339
339 if (!tx_sync_info_get(priv_tx, seq, &info)) { 340 ret = tx_sync_info_get(priv_tx, seq, &info);
341 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
342 if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
343 stats->tls_skip_no_sync_data++;
344 return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
345 }
340 /* We might get here if a retransmission reaches the driver 346 /* We might get here if a retransmission reaches the driver
341 * after the relevant record is acked. 347 * after the relevant record is acked.
342 * It should be safe to drop the packet in this case 348 * It should be safe to drop the packet in this case
@@ -346,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
346 } 352 }
347 353
348 if (unlikely(info.sync_len < 0)) { 354 if (unlikely(info.sync_len < 0)) {
349 u32 payload; 355 if (likely(datalen <= -info.sync_len))
350 int headln; 356 return MLX5E_KTLS_SYNC_DONE;
351
352 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
353 payload = skb->len - headln;
354 if (likely(payload <= -info.sync_len))
355 return skb;
356 357
357 stats->tls_drop_bypass_req++; 358 stats->tls_drop_bypass_req++;
358 goto err_out; 359 goto err_out;
@@ -360,30 +361,62 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
360 361
361 stats->tls_ooo++; 362 stats->tls_ooo++;
362 363
363 num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + 364 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
364 (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1); 365
366 /* If no dump WQE was sent, we need to have a fence NOP WQE before the
367 * actual data xmit.
368 */
369 if (!info.nr_frags) {
370 tx_post_fence_nop(sq);
371 return MLX5E_KTLS_SYNC_DONE;
372 }
373
374 num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
365 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 375 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
366 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 376 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
377
367 if (unlikely(contig_wqebbs_room < num_wqebbs)) 378 if (unlikely(contig_wqebbs_room < num_wqebbs))
368 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); 379 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
369 380
370 tx_post_resync_params(sq, priv_tx, info.rcd_sn); 381 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
371 382
372 for (i = 0; i < info.nr_frags; i++) 383 for (; i < info.nr_frags; i++) {
373 if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i)) 384 unsigned int orig_fsz, frag_offset = 0, n = 0;
374 goto err_out; 385 skb_frag_t *f = &info.frags[i];
375 386
376 /* If no dump WQE was sent, we need to have a fence NOP WQE before the 387 orig_fsz = skb_frag_size(f);
377 * actual data xmit.
378 */
379 if (!info.nr_frags)
380 tx_post_fence_nop(sq);
381 388
382 return skb; 389 do {
390 bool fence = !(i || frag_offset);
391 unsigned int fsz;
392
393 n++;
394 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
395 skb_frag_size_set(f, fsz);
396 if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
397 page_ref_add(skb_frag_page(f), n - 1);
398 goto err_out;
399 }
400
401 skb_frag_off_add(f, fsz);
402 frag_offset += fsz;
403 } while (frag_offset < orig_fsz);
404
405 page_ref_add(skb_frag_page(f), n - 1);
406 }
407
408 return MLX5E_KTLS_SYNC_DONE;
383 409
384err_out: 410err_out:
385 dev_kfree_skb_any(skb); 411 for (; i < info.nr_frags; i++)
386 return NULL; 412 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
413 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
414 * released only upon their completions (or in mlx5e_free_txqsq_descs,
415 * if channel closes).
416 */
417 put_page(skb_frag_page(&info.frags[i]));
418
419 return MLX5E_KTLS_SYNC_FAIL;
387} 420}
388 421
389struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, 422struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
@@ -419,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
419 452
420 seq = ntohl(tcp_hdr(skb)->seq); 453 seq = ntohl(tcp_hdr(skb)->seq);
421 if (unlikely(priv_tx->expected_seq != seq)) { 454 if (unlikely(priv_tx->expected_seq != seq)) {
422 skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq); 455 enum mlx5e_ktls_sync_retval ret =
423 if (unlikely(!skb)) 456 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
457
458 if (likely(ret == MLX5E_KTLS_SYNC_DONE))
459 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
460 else if (ret == MLX5E_KTLS_SYNC_FAIL)
461 goto err_out;
462 else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
424 goto out; 463 goto out;
425 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
426 } 464 }
427 465
428 priv_tx->expected_seq = seq + datalen; 466 priv_tx->expected_seq = seq + datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c5a9c20d7f00..327c93a7bd55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1021,7 +1021,7 @@ static bool ext_link_mode_requested(const unsigned long *adver)
1021{ 1021{
1022#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT 1022#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
1023 int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT; 1023 int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
1024 __ETHTOOL_DECLARE_LINK_MODE_MASK(modes); 1024 __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,};
1025 1025
1026 bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size); 1026 bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
1027 return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS); 1027 return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7569287f8f3c..772bfdbdeb9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1128,6 +1128,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1128 sq->txq_ix = txq_ix; 1128 sq->txq_ix = txq_ix;
1129 sq->uar_map = mdev->mlx5e_res.bfreg.map; 1129 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1130 sq->min_inline_mode = params->tx_min_inline_mode; 1130 sq->min_inline_mode = params->tx_min_inline_mode;
1131 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1131 sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; 1132 sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
1132 sq->stop_room = MLX5E_SQ_STOP_ROOM; 1133 sq->stop_room = MLX5E_SQ_STOP_ROOM;
1133 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); 1134 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
@@ -1135,10 +1136,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1135 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); 1136 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
1136 if (MLX5_IPSEC_DEV(c->priv->mdev)) 1137 if (MLX5_IPSEC_DEV(c->priv->mdev))
1137 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); 1138 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1139#ifdef CONFIG_MLX5_EN_TLS
1138 if (mlx5_accel_is_tls_device(c->priv->mdev)) { 1140 if (mlx5_accel_is_tls_device(c->priv->mdev)) {
1139 set_bit(MLX5E_SQ_STATE_TLS, &sq->state); 1141 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
1140 sq->stop_room += MLX5E_SQ_TLS_ROOM; 1142 sq->stop_room += MLX5E_SQ_TLS_ROOM +
1143 mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
1144 TLS_MAX_PAYLOAD_SIZE);
1141 } 1145 }
1146#endif
1142 1147
1143 param->wq.db_numa_node = cpu_to_node(c->cpu); 1148 param->wq.db_numa_node = cpu_to_node(c->cpu);
1144 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl); 1149 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1349,9 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1349 /* last doorbell out, godspeed .. */ 1354 /* last doorbell out, godspeed .. */
1350 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { 1355 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1351 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 1356 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1357 struct mlx5e_tx_wqe_info *wi;
1352 struct mlx5e_tx_wqe *nop; 1358 struct mlx5e_tx_wqe *nop;
1353 1359
1354 sq->db.wqe_info[pi].skb = NULL; 1360 wi = &sq->db.wqe_info[pi];
1361
1362 memset(wi, 0, sizeof(*wi));
1363 wi->num_wqebbs = 1;
1355 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); 1364 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1356 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); 1365 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1357 } 1366 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 95892a3b63a1..cd9bb7c7b341 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -611,8 +611,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
611 611
612 mutex_lock(&esw->offloads.encap_tbl_lock); 612 mutex_lock(&esw->offloads.encap_tbl_lock);
613 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); 613 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
614 if (e->compl_result || (encap_connected == neigh_connected && 614 if (e->compl_result < 0 || (encap_connected == neigh_connected &&
615 ether_addr_equal(e->h_dest, ha))) 615 ether_addr_equal(e->h_dest, ha)))
616 goto unlock; 616 goto unlock;
617 617
618 mlx5e_take_all_encap_flows(e, &flow_list); 618 mlx5e_take_all_encap_flows(e, &flow_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d6a547238de0..82cffb3a9964 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1386,8 +1386,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1386 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) 1386 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1387 return 0; 1387 return 0;
1388 1388
1389 if (rq->cqd.left) 1389 if (rq->cqd.left) {
1390 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); 1390 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
1391 if (rq->cqd.left || work_done >= budget)
1392 goto out;
1393 }
1391 1394
1392 cqe = mlx5_cqwq_get_cqe(cqwq); 1395 cqe = mlx5_cqwq_get_cqe(cqwq);
1393 if (!cqe) { 1396 if (!cqe) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 840ec945ccba..bbff8d8ded76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -35,6 +35,7 @@
35#include <linux/udp.h> 35#include <linux/udp.h>
36#include <net/udp.h> 36#include <net/udp.h>
37#include "en.h" 37#include "en.h"
38#include "en/port.h"
38 39
39enum { 40enum {
40 MLX5E_ST_LINK_STATE, 41 MLX5E_ST_LINK_STATE,
@@ -80,22 +81,12 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv)
80 81
81static int mlx5e_test_link_speed(struct mlx5e_priv *priv) 82static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
82{ 83{
83 u32 out[MLX5_ST_SZ_DW(ptys_reg)]; 84 u32 speed;
84 u32 eth_proto_oper;
85 int i;
86 85
87 if (!netif_carrier_ok(priv->netdev)) 86 if (!netif_carrier_ok(priv->netdev))
88 return 1; 87 return 1;
89 88
90 if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) 89 return mlx5e_port_linkspeed(priv->mdev, &speed);
91 return 1;
92
93 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
94 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
95 if (eth_proto_oper & MLX5E_PROT_MASK(i))
96 return 0;
97 }
98 return 1;
99} 90}
100 91
101struct mlx5ehdr { 92struct mlx5ehdr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index ac6fdcda7019..7e6ebd0505cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = {
52 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, 52 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
53 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) }, 53 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, 54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, 57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, 59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, 60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
60#endif 61#endif
61 62
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, 63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
288 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; 289 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
289 s->tx_tls_ctx += sq_stats->tls_ctx; 290 s->tx_tls_ctx += sq_stats->tls_ctx;
290 s->tx_tls_ooo += sq_stats->tls_ooo; 291 s->tx_tls_ooo += sq_stats->tls_ooo;
292 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
293 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
291 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; 294 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
295 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
292 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; 296 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
293 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; 297 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
294 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
295 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
296#endif 298#endif
297 s->tx_cqes += sq_stats->cqes; 299 s->tx_cqes += sq_stats->cqes;
298 } 300 }
@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = {
1472 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 1474 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1473 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, 1475 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1474 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 1476 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1475 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1476 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1477 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 1477 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1478 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 1478 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1479 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1480 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1481 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1482 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1479#endif 1483#endif
1480 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 1484 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1481 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, 1485 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 79f261bf86ac..869f3502f631 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -129,11 +129,12 @@ struct mlx5e_sw_stats {
129 u64 tx_tls_encrypted_bytes; 129 u64 tx_tls_encrypted_bytes;
130 u64 tx_tls_ctx; 130 u64 tx_tls_ctx;
131 u64 tx_tls_ooo; 131 u64 tx_tls_ooo;
132 u64 tx_tls_dump_packets;
133 u64 tx_tls_dump_bytes;
132 u64 tx_tls_resync_bytes; 134 u64 tx_tls_resync_bytes;
135 u64 tx_tls_skip_no_sync_data;
133 u64 tx_tls_drop_no_sync_data; 136 u64 tx_tls_drop_no_sync_data;
134 u64 tx_tls_drop_bypass_req; 137 u64 tx_tls_drop_bypass_req;
135 u64 tx_tls_dump_packets;
136 u64 tx_tls_dump_bytes;
137#endif 138#endif
138 139
139 u64 rx_xsk_packets; 140 u64 rx_xsk_packets;
@@ -273,11 +274,12 @@ struct mlx5e_sq_stats {
273 u64 tls_encrypted_bytes; 274 u64 tls_encrypted_bytes;
274 u64 tls_ctx; 275 u64 tls_ctx;
275 u64 tls_ooo; 276 u64 tls_ooo;
277 u64 tls_dump_packets;
278 u64 tls_dump_bytes;
276 u64 tls_resync_bytes; 279 u64 tls_resync_bytes;
280 u64 tls_skip_no_sync_data;
277 u64 tls_drop_no_sync_data; 281 u64 tls_drop_no_sync_data;
278 u64 tls_drop_bypass_req; 282 u64 tls_drop_bypass_req;
279 u64 tls_dump_packets;
280 u64 tls_dump_bytes;
281#endif 283#endif
282 /* less likely accessed in data path */ 284 /* less likely accessed in data path */
283 u64 csum_none; 285 u64 csum_none;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3e78a727f3e6..fda0b37075e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1278,8 +1278,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1278 mlx5_eswitch_del_vlan_action(esw, attr); 1278 mlx5_eswitch_del_vlan_action(esw, attr);
1279 1279
1280 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) 1280 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1281 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) 1281 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1282 mlx5e_detach_encap(priv, flow, out_index); 1282 mlx5e_detach_encap(priv, flow, out_index);
1283 kfree(attr->parse_attr->tun_info[out_index]);
1284 }
1283 kvfree(attr->parse_attr); 1285 kvfree(attr->parse_attr);
1284 1286
1285 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1287 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -1559,6 +1561,7 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
1559 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); 1561 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1560 } 1562 }
1561 1563
1564 kfree(e->tun_info);
1562 kfree(e->encap_header); 1565 kfree(e->encap_header);
1563 kfree_rcu(e, rcu); 1566 kfree_rcu(e, rcu);
1564} 1567}
@@ -2972,6 +2975,13 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
2972 return NULL; 2975 return NULL;
2973} 2976}
2974 2977
2978static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
2979{
2980 size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
2981
2982 return kmemdup(tun_info, tun_size, GFP_KERNEL);
2983}
2984
2975static int mlx5e_attach_encap(struct mlx5e_priv *priv, 2985static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2976 struct mlx5e_tc_flow *flow, 2986 struct mlx5e_tc_flow *flow,
2977 struct net_device *mirred_dev, 2987 struct net_device *mirred_dev,
@@ -3028,13 +3038,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3028 refcount_set(&e->refcnt, 1); 3038 refcount_set(&e->refcnt, 1);
3029 init_completion(&e->res_ready); 3039 init_completion(&e->res_ready);
3030 3040
3041 tun_info = dup_tun_info(tun_info);
3042 if (!tun_info) {
3043 err = -ENOMEM;
3044 goto out_err_init;
3045 }
3031 e->tun_info = tun_info; 3046 e->tun_info = tun_info;
3032 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); 3047 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3033 if (err) { 3048 if (err)
3034 kfree(e); 3049 goto out_err_init;
3035 e = NULL;
3036 goto out_err;
3037 }
3038 3050
3039 INIT_LIST_HEAD(&e->flows); 3051 INIT_LIST_HEAD(&e->flows);
3040 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); 3052 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
@@ -3075,6 +3087,12 @@ out_err:
3075 if (e) 3087 if (e)
3076 mlx5e_encap_put(priv, e); 3088 mlx5e_encap_put(priv, e);
3077 return err; 3089 return err;
3090
3091out_err_init:
3092 mutex_unlock(&esw->offloads.encap_tbl_lock);
3093 kfree(tun_info);
3094 kfree(e);
3095 return err;
3078} 3096}
3079 3097
3080static int parse_tc_vlan_action(struct mlx5e_priv *priv, 3098static int parse_tc_vlan_action(struct mlx5e_priv *priv,
@@ -3160,7 +3178,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
3160 struct mlx5_esw_flow_attr *attr, 3178 struct mlx5_esw_flow_attr *attr,
3161 u32 *action) 3179 u32 *action)
3162{ 3180{
3163 int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev); 3181 int nest_level = attr->parse_attr->filter_dev->lower_level;
3164 struct flow_action_entry vlan_act = { 3182 struct flow_action_entry vlan_act = {
3165 .id = FLOW_ACTION_VLAN_POP, 3183 .id = FLOW_ACTION_VLAN_POP,
3166 }; 3184 };
@@ -3295,7 +3313,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3295 } else if (encap) { 3313 } else if (encap) {
3296 parse_attr->mirred_ifindex[attr->out_count] = 3314 parse_attr->mirred_ifindex[attr->out_count] =
3297 out_dev->ifindex; 3315 out_dev->ifindex;
3298 parse_attr->tun_info[attr->out_count] = info; 3316 parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
3317 if (!parse_attr->tun_info[attr->out_count])
3318 return -ENOMEM;
3299 encap = false; 3319 encap = false;
3300 attr->dests[attr->out_count].flags |= 3320 attr->dests[attr->out_count].flags |=
3301 MLX5_ESW_DEST_ENCAP; 3321 MLX5_ESW_DEST_ENCAP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index d3a67a9b4eba..67dc4f0921b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -403,7 +403,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
403static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, 403static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
404 struct mlx5_err_cqe *err_cqe) 404 struct mlx5_err_cqe *err_cqe)
405{ 405{
406 u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq); 406 struct mlx5_cqwq *wq = &sq->cq.wq;
407 u32 ci;
408
409 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
407 410
408 netdev_err(sq->channel->netdev, 411 netdev_err(sq->channel->netdev,
409 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", 412 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
@@ -479,14 +482,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
479 skb = wi->skb; 482 skb = wi->skb;
480 483
481 if (unlikely(!skb)) { 484 if (unlikely(!skb)) {
482#ifdef CONFIG_MLX5_EN_TLS 485 mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
483 if (wi->resync_dump_frag) {
484 struct mlx5e_sq_dma *dma =
485 mlx5e_dma_get(sq, dma_fifo_cc++);
486
487 mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
488 }
489#endif
490 sqcc += wi->num_wqebbs; 486 sqcc += wi->num_wqebbs;
491 continue; 487 continue;
492 } 488 }
@@ -542,29 +538,38 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
542{ 538{
543 struct mlx5e_tx_wqe_info *wi; 539 struct mlx5e_tx_wqe_info *wi;
544 struct sk_buff *skb; 540 struct sk_buff *skb;
541 u32 dma_fifo_cc;
542 u16 sqcc;
545 u16 ci; 543 u16 ci;
546 int i; 544 int i;
547 545
548 while (sq->cc != sq->pc) { 546 sqcc = sq->cc;
549 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); 547 dma_fifo_cc = sq->dma_fifo_cc;
548
549 while (sqcc != sq->pc) {
550 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
550 wi = &sq->db.wqe_info[ci]; 551 wi = &sq->db.wqe_info[ci];
551 skb = wi->skb; 552 skb = wi->skb;
552 553
553 if (!skb) { /* nop */ 554 if (!skb) {
554 sq->cc++; 555 mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
556 sqcc += wi->num_wqebbs;
555 continue; 557 continue;
556 } 558 }
557 559
558 for (i = 0; i < wi->num_dma; i++) { 560 for (i = 0; i < wi->num_dma; i++) {
559 struct mlx5e_sq_dma *dma = 561 struct mlx5e_sq_dma *dma =
560 mlx5e_dma_get(sq, sq->dma_fifo_cc++); 562 mlx5e_dma_get(sq, dma_fifo_cc++);
561 563
562 mlx5e_tx_dma_unmap(sq->pdev, dma); 564 mlx5e_tx_dma_unmap(sq->pdev, dma);
563 } 565 }
564 566
565 dev_kfree_skb_any(skb); 567 dev_kfree_skb_any(skb);
566 sq->cc += wi->num_wqebbs; 568 sqcc += wi->num_wqebbs;
567 } 569 }
570
571 sq->dma_fifo_cc = dma_fifo_cc;
572 sq->cc = sqcc;
568} 573}
569 574
570#ifdef CONFIG_MLX5_CORE_IPOIB 575#ifdef CONFIG_MLX5_CORE_IPOIB
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 00d71db15f22..369499e88fe8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -285,7 +285,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
285 285
286 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
287 287
288 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
289 if (attr->outer_match_level != MLX5_MATCH_NONE) 288 if (attr->outer_match_level != MLX5_MATCH_NONE)
290 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 289 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
291 290
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 1d55a324a17e..7879e1746297 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -177,22 +177,32 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
177 memset(&src->vlan[1], 0, sizeof(src->vlan[1])); 177 memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
178} 178}
179 179
180static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
181 const struct mlx5_flow_spec *spec)
182{
183 u32 port_mask, port_value;
184
185 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
186 return spec->flow_context.flow_source == MLX5_VPORT_UPLINK;
187
188 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
189 misc_parameters.source_port);
190 port_value = MLX5_GET(fte_match_param, spec->match_value,
191 misc_parameters.source_port);
192 return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
193}
194
180bool 195bool
181mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 196mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
182 struct mlx5_flow_act *flow_act, 197 struct mlx5_flow_act *flow_act,
183 struct mlx5_flow_spec *spec) 198 struct mlx5_flow_spec *spec)
184{ 199{
185 u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
186 misc_parameters.source_port);
187 u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
188 misc_parameters.source_port);
189
190 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table)) 200 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
191 return false; 201 return false;
192 202
193 /* push vlan on RX */ 203 /* push vlan on RX */
194 return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) && 204 return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
195 ((port_mask & port_value) == MLX5_VPORT_UPLINK); 205 mlx5_eswitch_offload_is_uplink_port(esw, spec);
196} 206}
197 207
198struct mlx5_flow_handle * 208struct mlx5_flow_handle *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 4c50efe4e7f1..61021133029e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -464,8 +464,10 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
464 } 464 }
465 465
466 err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); 466 err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
467 if (err) 467 if (err) {
468 kvfree(in);
468 goto err_cqwq; 469 goto err_cqwq;
470 }
469 471
470 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 472 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
471 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); 473 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 579c306caa7b..3c816e81f8d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -507,7 +507,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
507 MLX5_SET(dest_format_struct, in_dests, 507 MLX5_SET(dest_format_struct, in_dests,
508 destination_eswitch_owner_vhca_id, 508 destination_eswitch_owner_vhca_id,
509 dst->dest_attr.vport.vhca_id); 509 dst->dest_attr.vport.vhca_id);
510 if (extended_dest) { 510 if (extended_dest &&
511 dst->dest_attr.vport.pkt_reformat) {
511 MLX5_SET(dest_format_struct, in_dests, 512 MLX5_SET(dest_format_struct, in_dests,
512 packet_reformat, 513 packet_reformat,
513 !!(dst->dest_attr.vport.flags & 514 !!(dst->dest_attr.vport.flags &
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d685122d9ff7..c07f3154437c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -572,7 +572,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
572 return -ENOMEM; 572 return -ENOMEM;
573 err = mlx5_crdump_collect(dev, cr_data); 573 err = mlx5_crdump_collect(dev, cr_data);
574 if (err) 574 if (err)
575 return err; 575 goto free_data;
576 576
577 if (priv_ctx) { 577 if (priv_ctx) {
578 struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; 578 struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 14dcc786926d..4421ab22182f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1186,7 +1186,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1186 if (err) 1186 if (err)
1187 goto err_thermal_init; 1187 goto err_thermal_init;
1188 1188
1189 if (mlxsw_driver->params_register && !reload) 1189 if (mlxsw_driver->params_register)
1190 devlink_params_publish(devlink); 1190 devlink_params_publish(devlink);
1191 1191
1192 return 0; 1192 return 0;
@@ -1259,7 +1259,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1259 return; 1259 return;
1260 } 1260 }
1261 1261
1262 if (mlxsw_core->driver->params_unregister && !reload) 1262 if (mlxsw_core->driver->params_unregister)
1263 devlink_params_unpublish(devlink); 1263 devlink_params_unpublish(devlink);
1264 mlxsw_thermal_fini(mlxsw_core->thermal); 1264 mlxsw_thermal_fini(mlxsw_core->thermal);
1265 mlxsw_hwmon_fini(mlxsw_core->hwmon); 1265 mlxsw_hwmon_fini(mlxsw_core->hwmon);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 4d1bce4389c7..344539c0d3aa 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -261,8 +261,15 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
261 port->pvid = vid; 261 port->pvid = vid;
262 262
263 /* Untagged egress vlan clasification */ 263 /* Untagged egress vlan clasification */
264 if (untagged) 264 if (untagged && port->vid != vid) {
265 if (port->vid) {
266 dev_err(ocelot->dev,
267 "Port already has a native VLAN: %d\n",
268 port->vid);
269 return -EBUSY;
270 }
265 port->vid = vid; 271 port->vid = vid;
272 }
266 273
267 ocelot_vlan_port_apply(ocelot, port); 274 ocelot_vlan_port_apply(ocelot, port);
268 275
@@ -934,7 +941,7 @@ end:
934static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, 941static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
935 u16 vid) 942 u16 vid)
936{ 943{
937 return ocelot_vlan_vid_add(dev, vid, false, true); 944 return ocelot_vlan_vid_add(dev, vid, false, false);
938} 945}
939 946
940static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, 947static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 1eef446036d6..79d72c88bbef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -299,22 +299,6 @@ static void nfp_repr_clean(struct nfp_repr *repr)
299 nfp_port_free(repr->port); 299 nfp_port_free(repr->port);
300} 300}
301 301
302static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
303static struct lock_class_key nfp_repr_netdev_addr_lock_key;
304
305static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
306 struct netdev_queue *txq,
307 void *_unused)
308{
309 lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
310}
311
312static void nfp_repr_set_lockdep_class(struct net_device *dev)
313{
314 lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
315 netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
316}
317
318int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, 302int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
319 u32 cmsg_port_id, struct nfp_port *port, 303 u32 cmsg_port_id, struct nfp_port *port,
320 struct net_device *pf_netdev) 304 struct net_device *pf_netdev)
@@ -324,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
324 u32 repr_cap = nn->tlv_caps.repr_cap; 308 u32 repr_cap = nn->tlv_caps.repr_cap;
325 int err; 309 int err;
326 310
327 nfp_repr_set_lockdep_class(netdev);
328
329 repr->port = port; 311 repr->port = port;
330 repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); 312 repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
331 if (!repr->dst) 313 if (!repr->dst)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 72107a0627a9..20faa8d24c9f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1,6 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 3
4#include <linux/printk.h>
5#include <linux/dynamic_debug.h>
4#include <linux/netdevice.h> 6#include <linux/netdevice.h>
5#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
6#include <linux/rtnetlink.h> 8#include <linux/rtnetlink.h>
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 15e432386b35..aab311413412 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -1,6 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 3
4#include <linux/printk.h>
5#include <linux/dynamic_debug.h>
4#include <linux/module.h> 6#include <linux/module.h>
5#include <linux/netdevice.h> 7#include <linux/netdevice.h>
6#include <linux/utsname.h> 8#include <linux/utsname.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 2ce70097d018..38f7f40b3a4d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -67,10 +67,9 @@
67#define QED_ROCE_QPS (8192) 67#define QED_ROCE_QPS (8192)
68#define QED_ROCE_DPIS (8) 68#define QED_ROCE_DPIS (8)
69#define QED_RDMA_SRQS QED_ROCE_QPS 69#define QED_RDMA_SRQS QED_ROCE_QPS
70#define QED_NVM_CFG_SET_FLAGS 0xE
71#define QED_NVM_CFG_SET_PF_FLAGS 0x1E
72#define QED_NVM_CFG_GET_FLAGS 0xA 70#define QED_NVM_CFG_GET_FLAGS 0xA
73#define QED_NVM_CFG_GET_PF_FLAGS 0x1A 71#define QED_NVM_CFG_GET_PF_FLAGS 0x1A
72#define QED_NVM_CFG_MAX_ATTRS 50
74 73
75static char version[] = 74static char version[] =
76 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 75 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -2255,6 +2254,7 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2255{ 2254{
2256 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2255 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2257 u8 entity_id, len, buf[32]; 2256 u8 entity_id, len, buf[32];
2257 bool need_nvm_init = true;
2258 struct qed_ptt *ptt; 2258 struct qed_ptt *ptt;
2259 u16 cfg_id, count; 2259 u16 cfg_id, count;
2260 int rc = 0, i; 2260 int rc = 0, i;
@@ -2271,8 +2271,10 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2271 2271
2272 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2272 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2273 "Read config ids: num_attrs = %0d\n", count); 2273 "Read config ids: num_attrs = %0d\n", count);
2274 /* NVM CFG ID attributes */ 2274 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2275 for (i = 0; i < count; i++) { 2275 * arithmetic operations in the implementation.
2276 */
2277 for (i = 1; i <= count; i++) {
2276 cfg_id = *((u16 *)*data); 2278 cfg_id = *((u16 *)*data);
2277 *data += 2; 2279 *data += 2;
2278 entity_id = **data; 2280 entity_id = **data;
@@ -2282,8 +2284,21 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2282 memcpy(buf, *data, len); 2284 memcpy(buf, *data, len);
2283 *data += len; 2285 *data += len;
2284 2286
2285 flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS : 2287 flags = 0;
2286 QED_NVM_CFG_SET_FLAGS; 2288 if (need_nvm_init) {
2289 flags |= QED_NVM_CFG_OPTION_INIT;
2290 need_nvm_init = false;
2291 }
2292
2293 /* Commit to flash and free the resources */
2294 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2295 flags |= QED_NVM_CFG_OPTION_COMMIT |
2296 QED_NVM_CFG_OPTION_FREE;
2297 need_nvm_init = true;
2298 }
2299
2300 if (entity_id)
2301 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2287 2302
2288 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2303 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2289 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2304 "cfg_id = %d entity = %d len = %d\n", cfg_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 78f77b712b10..dcb5c917f373 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2005,7 +2005,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
2005 (qed_iov_validate_active_txq(p_hwfn, vf))) { 2005 (qed_iov_validate_active_txq(p_hwfn, vf))) {
2006 vf->b_malicious = true; 2006 vf->b_malicious = true;
2007 DP_NOTICE(p_hwfn, 2007 DP_NOTICE(p_hwfn,
2008 "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n", 2008 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
2009 vf->abs_vf_id); 2009 vf->abs_vf_id);
2010 status = PFVF_STATUS_MALICIOUS; 2010 status = PFVF_STATUS_MALICIOUS;
2011 goto out; 2011 goto out;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 350b0d949611..5064c292b873 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1029,6 +1029,10 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1029{ 1029{
1030 int value; 1030 int value;
1031 1031
1032 /* Work around issue with chip reporting wrong PHY ID */
1033 if (reg == MII_PHYSID2)
1034 return 0xc912;
1035
1032 r8168dp_2_mdio_start(tp); 1036 r8168dp_2_mdio_start(tp);
1033 1037
1034 value = r8169_mdio_read(tp, reg); 1038 value = r8169_mdio_read(tp, reg);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3dfd04e0506a..4e9c848c67cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2995,6 +2995,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2995 } else { 2995 } else {
2996 stmmac_set_desc_addr(priv, first, des); 2996 stmmac_set_desc_addr(priv, first, des);
2997 tmp_pay_len = pay_len; 2997 tmp_pay_len = pay_len;
2998 des += proto_hdr_len;
2998 } 2999 }
2999 3000
3000 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 3001 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index bbbc1dcb6ab5..b517c1af9de0 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1237,8 +1237,17 @@ static int fjes_probe(struct platform_device *plat_dev)
1237 adapter->open_guard = false; 1237 adapter->open_guard = false;
1238 1238
1239 adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0); 1239 adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1240 if (unlikely(!adapter->txrx_wq)) {
1241 err = -ENOMEM;
1242 goto err_free_netdev;
1243 }
1244
1240 adapter->control_wq = alloc_workqueue(DRV_NAME "/control", 1245 adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1241 WQ_MEM_RECLAIM, 0); 1246 WQ_MEM_RECLAIM, 0);
1247 if (unlikely(!adapter->control_wq)) {
1248 err = -ENOMEM;
1249 goto err_free_txrx_wq;
1250 }
1242 1251
1243 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); 1252 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1244 INIT_WORK(&adapter->raise_intr_rxdata_task, 1253 INIT_WORK(&adapter->raise_intr_rxdata_task,
@@ -1255,7 +1264,7 @@ static int fjes_probe(struct platform_device *plat_dev)
1255 hw->hw_res.irq = platform_get_irq(plat_dev, 0); 1264 hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1256 err = fjes_hw_init(&adapter->hw); 1265 err = fjes_hw_init(&adapter->hw);
1257 if (err) 1266 if (err)
1258 goto err_free_netdev; 1267 goto err_free_control_wq;
1259 1268
1260 /* setup MAC address (02:00:00:00:00:[epid])*/ 1269 /* setup MAC address (02:00:00:00:00:[epid])*/
1261 netdev->dev_addr[0] = 2; 1270 netdev->dev_addr[0] = 2;
@@ -1277,6 +1286,10 @@ static int fjes_probe(struct platform_device *plat_dev)
1277 1286
1278err_hw_exit: 1287err_hw_exit:
1279 fjes_hw_exit(&adapter->hw); 1288 fjes_hw_exit(&adapter->hw);
1289err_free_control_wq:
1290 destroy_workqueue(adapter->control_wq);
1291err_free_txrx_wq:
1292 destroy_workqueue(adapter->txrx_wq);
1280err_free_netdev: 1293err_free_netdev:
1281 free_netdev(netdev); 1294 free_netdev(netdev);
1282err_out: 1295err_out:
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index fbec711ff514..fbea6f232819 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -107,27 +107,6 @@ struct bpqdev {
107 107
108static LIST_HEAD(bpq_devices); 108static LIST_HEAD(bpq_devices);
109 109
110/*
111 * bpqether network devices are paired with ethernet devices below them, so
112 * form a special "super class" of normal ethernet devices; split their locks
113 * off into a separate class since they always nest.
114 */
115static struct lock_class_key bpq_netdev_xmit_lock_key;
116static struct lock_class_key bpq_netdev_addr_lock_key;
117
118static void bpq_set_lockdep_class_one(struct net_device *dev,
119 struct netdev_queue *txq,
120 void *_unused)
121{
122 lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
123}
124
125static void bpq_set_lockdep_class(struct net_device *dev)
126{
127 lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
128 netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
129}
130
131/* ------------------------------------------------------------------------ */ 110/* ------------------------------------------------------------------------ */
132 111
133 112
@@ -498,7 +477,6 @@ static int bpq_new_device(struct net_device *edev)
498 err = register_netdevice(ndev); 477 err = register_netdevice(ndev);
499 if (err) 478 if (err)
500 goto error; 479 goto error;
501 bpq_set_lockdep_class(ndev);
502 480
503 /* List protected by RTNL */ 481 /* List protected by RTNL */
504 list_add_rcu(&bpq->bpq_list, &bpq_devices); 482 list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 39dddcd8b3cb..963509add611 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -982,7 +982,7 @@ static int netvsc_attach(struct net_device *ndev,
982 if (netif_running(ndev)) { 982 if (netif_running(ndev)) {
983 ret = rndis_filter_open(nvdev); 983 ret = rndis_filter_open(nvdev);
984 if (ret) 984 if (ret)
985 return ret; 985 goto err;
986 986
987 rdev = nvdev->extension; 987 rdev = nvdev->extension;
988 if (!rdev->link_state) 988 if (!rdev->link_state)
@@ -990,6 +990,13 @@ static int netvsc_attach(struct net_device *ndev,
990 } 990 }
991 991
992 return 0; 992 return 0;
993
994err:
995 netif_device_detach(ndev);
996
997 rndis_filter_device_remove(hdev, nvdev);
998
999 return ret;
993} 1000}
994 1001
995static int netvsc_set_channels(struct net_device *net, 1002static int netvsc_set_channels(struct net_device *net,
@@ -1807,8 +1814,10 @@ static int netvsc_set_features(struct net_device *ndev,
1807 1814
1808 ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads); 1815 ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1809 1816
1810 if (ret) 1817 if (ret) {
1811 features ^= NETIF_F_LRO; 1818 features ^= NETIF_F_LRO;
1819 ndev->features = features;
1820 }
1812 1821
1813syncvf: 1822syncvf:
1814 if (!vf_netdev) 1823 if (!vf_netdev)
@@ -2335,8 +2344,6 @@ static int netvsc_probe(struct hv_device *dev,
2335 NETIF_F_HW_VLAN_CTAG_RX; 2344 NETIF_F_HW_VLAN_CTAG_RX;
2336 net->vlan_features = net->features; 2345 net->vlan_features = net->features;
2337 2346
2338 netdev_lockdep_set_classes(net);
2339
2340 /* MTU range: 68 - 1500 or 65521 */ 2347 /* MTU range: 68 - 1500 or 65521 */
2341 net->min_mtu = NETVSC_MTU_MIN; 2348 net->min_mtu = NETVSC_MTU_MIN;
2342 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) 2349 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 887bbba4631e..ba3dfac1d904 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -131,8 +131,6 @@ static int ipvlan_init(struct net_device *dev)
131 dev->gso_max_segs = phy_dev->gso_max_segs; 131 dev->gso_max_segs = phy_dev->gso_max_segs;
132 dev->hard_header_len = phy_dev->hard_header_len; 132 dev->hard_header_len = phy_dev->hard_header_len;
133 133
134 netdev_lockdep_set_classes(dev);
135
136 ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); 134 ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
137 if (!ipvlan->pcpu_stats) 135 if (!ipvlan->pcpu_stats)
138 return -ENOMEM; 136 return -ENOMEM;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index cb7637364b40..afd8b2a08245 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -267,7 +267,6 @@ struct macsec_dev {
267 struct pcpu_secy_stats __percpu *stats; 267 struct pcpu_secy_stats __percpu *stats;
268 struct list_head secys; 268 struct list_head secys;
269 struct gro_cells gro_cells; 269 struct gro_cells gro_cells;
270 unsigned int nest_level;
271}; 270};
272 271
273/** 272/**
@@ -2750,7 +2749,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2750 2749
2751#define MACSEC_FEATURES \ 2750#define MACSEC_FEATURES \
2752 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2751 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2753static struct lock_class_key macsec_netdev_addr_lock_key;
2754 2752
2755static int macsec_dev_init(struct net_device *dev) 2753static int macsec_dev_init(struct net_device *dev)
2756{ 2754{
@@ -2958,11 +2956,6 @@ static int macsec_get_iflink(const struct net_device *dev)
2958 return macsec_priv(dev)->real_dev->ifindex; 2956 return macsec_priv(dev)->real_dev->ifindex;
2959} 2957}
2960 2958
2961static int macsec_get_nest_level(struct net_device *dev)
2962{
2963 return macsec_priv(dev)->nest_level;
2964}
2965
2966static const struct net_device_ops macsec_netdev_ops = { 2959static const struct net_device_ops macsec_netdev_ops = {
2967 .ndo_init = macsec_dev_init, 2960 .ndo_init = macsec_dev_init,
2968 .ndo_uninit = macsec_dev_uninit, 2961 .ndo_uninit = macsec_dev_uninit,
@@ -2976,7 +2969,6 @@ static const struct net_device_ops macsec_netdev_ops = {
2976 .ndo_start_xmit = macsec_start_xmit, 2969 .ndo_start_xmit = macsec_start_xmit,
2977 .ndo_get_stats64 = macsec_get_stats64, 2970 .ndo_get_stats64 = macsec_get_stats64,
2978 .ndo_get_iflink = macsec_get_iflink, 2971 .ndo_get_iflink = macsec_get_iflink,
2979 .ndo_get_lock_subclass = macsec_get_nest_level,
2980}; 2972};
2981 2973
2982static const struct device_type macsec_type = { 2974static const struct device_type macsec_type = {
@@ -3001,12 +2993,10 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3001static void macsec_free_netdev(struct net_device *dev) 2993static void macsec_free_netdev(struct net_device *dev)
3002{ 2994{
3003 struct macsec_dev *macsec = macsec_priv(dev); 2995 struct macsec_dev *macsec = macsec_priv(dev);
3004 struct net_device *real_dev = macsec->real_dev;
3005 2996
3006 free_percpu(macsec->stats); 2997 free_percpu(macsec->stats);
3007 free_percpu(macsec->secy.tx_sc.stats); 2998 free_percpu(macsec->secy.tx_sc.stats);
3008 2999
3009 dev_put(real_dev);
3010} 3000}
3011 3001
3012static void macsec_setup(struct net_device *dev) 3002static void macsec_setup(struct net_device *dev)
@@ -3261,14 +3251,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3261 if (err < 0) 3251 if (err < 0)
3262 return err; 3252 return err;
3263 3253
3264 dev_hold(real_dev);
3265
3266 macsec->nest_level = dev_get_nest_level(real_dev) + 1;
3267 netdev_lockdep_set_classes(dev);
3268 lockdep_set_class_and_subclass(&dev->addr_list_lock,
3269 &macsec_netdev_addr_lock_key,
3270 macsec_get_nest_level(dev));
3271
3272 err = netdev_upper_dev_link(real_dev, dev, extack); 3254 err = netdev_upper_dev_link(real_dev, dev, extack);
3273 if (err < 0) 3255 if (err < 0)
3274 goto unregister; 3256 goto unregister;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 940192c057b6..34fc59bd1e20 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -852,8 +852,6 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
852 * "super class" of normal network devices; split their locks off into a 852 * "super class" of normal network devices; split their locks off into a
853 * separate class since they always nest. 853 * separate class since they always nest.
854 */ 854 */
855static struct lock_class_key macvlan_netdev_addr_lock_key;
856
857#define ALWAYS_ON_OFFLOADS \ 855#define ALWAYS_ON_OFFLOADS \
858 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ 856 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
859 NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) 857 NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
@@ -869,19 +867,6 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
869#define MACVLAN_STATE_MASK \ 867#define MACVLAN_STATE_MASK \
870 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 868 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
871 869
872static int macvlan_get_nest_level(struct net_device *dev)
873{
874 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
875}
876
877static void macvlan_set_lockdep_class(struct net_device *dev)
878{
879 netdev_lockdep_set_classes(dev);
880 lockdep_set_class_and_subclass(&dev->addr_list_lock,
881 &macvlan_netdev_addr_lock_key,
882 macvlan_get_nest_level(dev));
883}
884
885static int macvlan_init(struct net_device *dev) 870static int macvlan_init(struct net_device *dev)
886{ 871{
887 struct macvlan_dev *vlan = netdev_priv(dev); 872 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -900,8 +885,6 @@ static int macvlan_init(struct net_device *dev)
900 dev->gso_max_segs = lowerdev->gso_max_segs; 885 dev->gso_max_segs = lowerdev->gso_max_segs;
901 dev->hard_header_len = lowerdev->hard_header_len; 886 dev->hard_header_len = lowerdev->hard_header_len;
902 887
903 macvlan_set_lockdep_class(dev);
904
905 vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 888 vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
906 if (!vlan->pcpu_stats) 889 if (!vlan->pcpu_stats)
907 return -ENOMEM; 890 return -ENOMEM;
@@ -1161,7 +1144,6 @@ static const struct net_device_ops macvlan_netdev_ops = {
1161 .ndo_fdb_add = macvlan_fdb_add, 1144 .ndo_fdb_add = macvlan_fdb_add,
1162 .ndo_fdb_del = macvlan_fdb_del, 1145 .ndo_fdb_del = macvlan_fdb_del,
1163 .ndo_fdb_dump = ndo_dflt_fdb_dump, 1146 .ndo_fdb_dump = ndo_dflt_fdb_dump,
1164 .ndo_get_lock_subclass = macvlan_get_nest_level,
1165#ifdef CONFIG_NET_POLL_CONTROLLER 1147#ifdef CONFIG_NET_POLL_CONTROLLER
1166 .ndo_poll_controller = macvlan_dev_poll_controller, 1148 .ndo_poll_controller = macvlan_dev_poll_controller,
1167 .ndo_netpoll_setup = macvlan_dev_netpoll_setup, 1149 .ndo_netpoll_setup = macvlan_dev_netpoll_setup,
@@ -1445,7 +1427,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1445 vlan->dev = dev; 1427 vlan->dev = dev;
1446 vlan->port = port; 1428 vlan->port = port;
1447 vlan->set_features = MACVLAN_FEATURES; 1429 vlan->set_features = MACVLAN_FEATURES;
1448 vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
1449 1430
1450 vlan->mode = MACVLAN_MODE_VEPA; 1431 vlan->mode = MACVLAN_MODE_VEPA;
1451 if (data && data[IFLA_MACVLAN_MODE]) 1432 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 56576d4f34a5..54ca6681ba31 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -806,9 +806,11 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
806{ 806{
807 struct nsim_dev_port *nsim_dev_port, *tmp; 807 struct nsim_dev_port *nsim_dev_port, *tmp;
808 808
809 mutex_lock(&nsim_dev->port_list_lock);
809 list_for_each_entry_safe(nsim_dev_port, tmp, 810 list_for_each_entry_safe(nsim_dev_port, tmp,
810 &nsim_dev->port_list, list) 811 &nsim_dev->port_list, list)
811 __nsim_dev_port_del(nsim_dev_port); 812 __nsim_dev_port_del(nsim_dev_port);
813 mutex_unlock(&nsim_dev->port_list_lock);
812} 814}
813 815
814int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) 816int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
@@ -822,14 +824,17 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
822 return PTR_ERR(nsim_dev); 824 return PTR_ERR(nsim_dev);
823 dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev); 825 dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
824 826
827 mutex_lock(&nsim_dev->port_list_lock);
825 for (i = 0; i < nsim_bus_dev->port_count; i++) { 828 for (i = 0; i < nsim_bus_dev->port_count; i++) {
826 err = __nsim_dev_port_add(nsim_dev, i); 829 err = __nsim_dev_port_add(nsim_dev, i);
827 if (err) 830 if (err)
828 goto err_port_del_all; 831 goto err_port_del_all;
829 } 832 }
833 mutex_unlock(&nsim_dev->port_list_lock);
830 return 0; 834 return 0;
831 835
832err_port_del_all: 836err_port_del_all:
837 mutex_unlock(&nsim_dev->port_list_lock);
833 nsim_dev_port_del_all(nsim_dev); 838 nsim_dev_port_del_all(nsim_dev);
834 nsim_dev_destroy(nsim_dev); 839 nsim_dev_destroy(nsim_dev);
835 return err; 840 return err;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 20e2ebe458f2..a578f7ebf715 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -87,8 +87,24 @@ struct phylink {
87 phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__) 87 phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
88#define phylink_info(pl, fmt, ...) \ 88#define phylink_info(pl, fmt, ...) \
89 phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__) 89 phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
90#if defined(CONFIG_DYNAMIC_DEBUG)
90#define phylink_dbg(pl, fmt, ...) \ 91#define phylink_dbg(pl, fmt, ...) \
92do { \
93 if ((pl)->config->type == PHYLINK_NETDEV) \
94 netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__); \
95 else if ((pl)->config->type == PHYLINK_DEV) \
96 dev_dbg((pl)->dev, fmt, ##__VA_ARGS__); \
97} while (0)
98#elif defined(DEBUG)
99#define phylink_dbg(pl, fmt, ...) \
91 phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__) 100 phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
101#else
102#define phylink_dbg(pl, fmt, ...) \
103({ \
104 if (0) \
105 phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__); \
106})
107#endif
92 108
93/** 109/**
94 * phylink_set_port_modes() - set the port type modes in the ethtool mask 110 * phylink_set_port_modes() - set the port type modes in the ethtool mask
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index dc3d92d340c4..b73298250793 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -327,6 +327,7 @@ static struct phy_driver smsc_phy_driver[] = {
327 .name = "SMSC LAN8740", 327 .name = "SMSC LAN8740",
328 328
329 /* PHY_BASIC_FEATURES */ 329 /* PHY_BASIC_FEATURES */
330 .flags = PHY_RST_AFTER_CLK_EN,
330 331
331 .probe = smsc_phy_probe, 332 .probe = smsc_phy_probe,
332 333
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9a1b006904a7..61824bbb5588 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1324,8 +1324,6 @@ static int ppp_dev_init(struct net_device *dev)
1324{ 1324{
1325 struct ppp *ppp; 1325 struct ppp *ppp;
1326 1326
1327 netdev_lockdep_set_classes(dev);
1328
1329 ppp = netdev_priv(dev); 1327 ppp = netdev_priv(dev);
1330 /* Let the netdevice take a reference on the ppp file. This ensures 1328 /* Let the netdevice take a reference on the ppp file. This ensures
1331 * that ppp_destroy_interface() won't run before the device gets 1329 * that ppp_destroy_interface() won't run before the device gets
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e8089def5a46..8156b33ee3e7 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1615,7 +1615,6 @@ static int team_init(struct net_device *dev)
1615 int err; 1615 int err;
1616 1616
1617 team->dev = dev; 1617 team->dev = dev;
1618 mutex_init(&team->lock);
1619 team_set_no_mode(team); 1618 team_set_no_mode(team);
1620 1619
1621 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats); 1620 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
@@ -1642,7 +1641,8 @@ static int team_init(struct net_device *dev)
1642 goto err_options_register; 1641 goto err_options_register;
1643 netif_carrier_off(dev); 1642 netif_carrier_off(dev);
1644 1643
1645 netdev_lockdep_set_classes(dev); 1644 lockdep_register_key(&team->team_lock_key);
1645 __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
1646 1646
1647 return 0; 1647 return 0;
1648 1648
@@ -1673,6 +1673,7 @@ static void team_uninit(struct net_device *dev)
1673 team_queue_override_fini(team); 1673 team_queue_override_fini(team);
1674 mutex_unlock(&team->lock); 1674 mutex_unlock(&team->lock);
1675 netdev_change_features(dev); 1675 netdev_change_features(dev);
1676 lockdep_unregister_key(&team->team_lock_key);
1676} 1677}
1677 1678
1678static void team_destructor(struct net_device *dev) 1679static void team_destructor(struct net_device *dev)
@@ -1976,8 +1977,15 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1976 err = team_port_del(team, port_dev); 1977 err = team_port_del(team, port_dev);
1977 mutex_unlock(&team->lock); 1978 mutex_unlock(&team->lock);
1978 1979
1979 if (!err) 1980 if (err)
1980 netdev_change_features(dev); 1981 return err;
1982
1983 if (netif_is_team_master(port_dev)) {
1984 lockdep_unregister_key(&team->team_lock_key);
1985 lockdep_register_key(&team->team_lock_key);
1986 lockdep_set_class(&team->lock, &team->team_lock_key);
1987 }
1988 netdev_change_features(dev);
1981 1989
1982 return err; 1990 return err;
1983} 1991}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 32f53de5b1fe..fe630438f67b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -787,6 +787,13 @@ static const struct usb_device_id products[] = {
787 .driver_info = 0, 787 .driver_info = 0,
788}, 788},
789 789
790/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
791{
792 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
793 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
794 .driver_info = 0,
795},
796
790/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 797/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
791{ 798{
792 USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM, 799 USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 62948098191f..f24a1b0b801f 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1264,8 +1264,11 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1264 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); 1264 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1265 lan78xx_defer_kevent(dev, EVENT_LINK_RESET); 1265 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1266 1266
1267 if (dev->domain_data.phyirq > 0) 1267 if (dev->domain_data.phyirq > 0) {
1268 local_irq_disable();
1268 generic_handle_irq(dev->domain_data.phyirq); 1269 generic_handle_irq(dev->domain_data.phyirq);
1270 local_irq_enable();
1271 }
1269 } else 1272 } else
1270 netdev_warn(dev->net, 1273 netdev_warn(dev->net,
1271 "unexpected interrupt: 0x%08x\n", intdata); 1274 "unexpected interrupt: 0x%08x\n", intdata);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index cee9fef925cd..d4a95b50bda6 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5755,6 +5755,7 @@ static const struct usb_device_id rtl8152_table[] = {
5755 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, 5755 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
5756 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, 5756 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
5757 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, 5757 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
5758 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
5758 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, 5759 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
5759 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, 5760 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
5760 {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, 5761 {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ee52bde058df..b8228f50bc94 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -865,7 +865,6 @@ static int vrf_dev_init(struct net_device *dev)
865 865
866 /* similarly, oper state is irrelevant; set to up to avoid confusion */ 866 /* similarly, oper state is irrelevant; set to up to avoid confusion */
867 dev->operstate = IF_OPER_UP; 867 dev->operstate = IF_OPER_UP;
868 netdev_lockdep_set_classes(dev);
869 return 0; 868 return 0;
870 869
871out_rth: 870out_rth:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3d9bcc957f7d..8869154fad88 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2487,9 +2487,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2487 vni = tunnel_id_to_key32(info->key.tun_id); 2487 vni = tunnel_id_to_key32(info->key.tun_id);
2488 ifindex = 0; 2488 ifindex = 0;
2489 dst_cache = &info->dst_cache; 2489 dst_cache = &info->dst_cache;
2490 if (info->options_len && 2490 if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
2491 info->key.tun_flags & TUNNEL_VXLAN_OPT) 2491 if (info->options_len < sizeof(*md))
2492 goto drop;
2492 md = ip_tunnel_info_opts(info); 2493 md = ip_tunnel_info_opts(info);
2494 }
2493 ttl = info->key.ttl; 2495 ttl = info->key.ttl;
2494 tos = info->key.tos; 2496 tos = info->key.tos;
2495 label = info->key.label; 2497 label = info->key.label;
@@ -3566,10 +3568,13 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3566{ 3568{
3567 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3569 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3568 struct vxlan_dev *vxlan = netdev_priv(dev); 3570 struct vxlan_dev *vxlan = netdev_priv(dev);
3571 struct net_device *remote_dev = NULL;
3569 struct vxlan_fdb *f = NULL; 3572 struct vxlan_fdb *f = NULL;
3570 bool unregister = false; 3573 bool unregister = false;
3574 struct vxlan_rdst *dst;
3571 int err; 3575 int err;
3572 3576
3577 dst = &vxlan->default_dst;
3573 err = vxlan_dev_configure(net, dev, conf, false, extack); 3578 err = vxlan_dev_configure(net, dev, conf, false, extack);
3574 if (err) 3579 if (err)
3575 return err; 3580 return err;
@@ -3577,14 +3582,14 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3577 dev->ethtool_ops = &vxlan_ethtool_ops; 3582 dev->ethtool_ops = &vxlan_ethtool_ops;
3578 3583
3579 /* create an fdb entry for a valid default destination */ 3584 /* create an fdb entry for a valid default destination */
3580 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 3585 if (!vxlan_addr_any(&dst->remote_ip)) {
3581 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3586 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3582 &vxlan->default_dst.remote_ip, 3587 &dst->remote_ip,
3583 NUD_REACHABLE | NUD_PERMANENT, 3588 NUD_REACHABLE | NUD_PERMANENT,
3584 vxlan->cfg.dst_port, 3589 vxlan->cfg.dst_port,
3585 vxlan->default_dst.remote_vni, 3590 dst->remote_vni,
3586 vxlan->default_dst.remote_vni, 3591 dst->remote_vni,
3587 vxlan->default_dst.remote_ifindex, 3592 dst->remote_ifindex,
3588 NTF_SELF, &f); 3593 NTF_SELF, &f);
3589 if (err) 3594 if (err)
3590 return err; 3595 return err;
@@ -3595,26 +3600,41 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3595 goto errout; 3600 goto errout;
3596 unregister = true; 3601 unregister = true;
3597 3602
3603 if (dst->remote_ifindex) {
3604 remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
3605 if (!remote_dev)
3606 goto errout;
3607
3608 err = netdev_upper_dev_link(remote_dev, dev, extack);
3609 if (err)
3610 goto errout;
3611 }
3612
3598 err = rtnl_configure_link(dev, NULL); 3613 err = rtnl_configure_link(dev, NULL);
3599 if (err) 3614 if (err)
3600 goto errout; 3615 goto unlink;
3601 3616
3602 if (f) { 3617 if (f) {
3603 vxlan_fdb_insert(vxlan, all_zeros_mac, 3618 vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
3604 vxlan->default_dst.remote_vni, f);
3605 3619
3606 /* notify default fdb entry */ 3620 /* notify default fdb entry */
3607 err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), 3621 err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
3608 RTM_NEWNEIGH, true, extack); 3622 RTM_NEWNEIGH, true, extack);
3609 if (err) { 3623 if (err) {
3610 vxlan_fdb_destroy(vxlan, f, false, false); 3624 vxlan_fdb_destroy(vxlan, f, false, false);
3625 if (remote_dev)
3626 netdev_upper_dev_unlink(remote_dev, dev);
3611 goto unregister; 3627 goto unregister;
3612 } 3628 }
3613 } 3629 }
3614 3630
3615 list_add(&vxlan->next, &vn->vxlan_list); 3631 list_add(&vxlan->next, &vn->vxlan_list);
3632 if (remote_dev)
3633 dst->remote_dev = remote_dev;
3616 return 0; 3634 return 0;
3617 3635unlink:
3636 if (remote_dev)
3637 netdev_upper_dev_unlink(remote_dev, dev);
3618errout: 3638errout:
3619 /* unregister_netdevice() destroys the default FDB entry with deletion 3639 /* unregister_netdevice() destroys the default FDB entry with deletion
3620 * notification. But the addition notification was not sent yet, so 3640 * notification. But the addition notification was not sent yet, so
@@ -3932,11 +3952,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3932 struct netlink_ext_ack *extack) 3952 struct netlink_ext_ack *extack)
3933{ 3953{
3934 struct vxlan_dev *vxlan = netdev_priv(dev); 3954 struct vxlan_dev *vxlan = netdev_priv(dev);
3935 struct vxlan_rdst *dst = &vxlan->default_dst;
3936 struct net_device *lowerdev; 3955 struct net_device *lowerdev;
3937 struct vxlan_config conf; 3956 struct vxlan_config conf;
3957 struct vxlan_rdst *dst;
3938 int err; 3958 int err;
3939 3959
3960 dst = &vxlan->default_dst;
3940 err = vxlan_nl2conf(tb, data, dev, &conf, true, extack); 3961 err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
3941 if (err) 3962 if (err)
3942 return err; 3963 return err;
@@ -3946,6 +3967,14 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3946 if (err) 3967 if (err)
3947 return err; 3968 return err;
3948 3969
3970 if (dst->remote_dev == lowerdev)
3971 lowerdev = NULL;
3972
3973 err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
3974 extack);
3975 if (err)
3976 return err;
3977
3949 /* handle default dst entry */ 3978 /* handle default dst entry */
3950 if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) { 3979 if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
3951 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); 3980 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
@@ -3962,6 +3991,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3962 NTF_SELF, true, extack); 3991 NTF_SELF, true, extack);
3963 if (err) { 3992 if (err) {
3964 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 3993 spin_unlock_bh(&vxlan->hash_lock[hash_index]);
3994 netdev_adjacent_change_abort(dst->remote_dev,
3995 lowerdev, dev);
3965 return err; 3996 return err;
3966 } 3997 }
3967 } 3998 }
@@ -3979,6 +4010,11 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3979 if (conf.age_interval != vxlan->cfg.age_interval) 4010 if (conf.age_interval != vxlan->cfg.age_interval)
3980 mod_timer(&vxlan->age_timer, jiffies); 4011 mod_timer(&vxlan->age_timer, jiffies);
3981 4012
4013 netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
4014 if (lowerdev && lowerdev != dst->remote_dev) {
4015 dst->remote_dev = lowerdev;
4016 netdev_update_lockdep_key(lowerdev);
4017 }
3982 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); 4018 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
3983 return 0; 4019 return 0;
3984} 4020}
@@ -3991,6 +4027,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3991 4027
3992 list_del(&vxlan->next); 4028 list_del(&vxlan->next);
3993 unregister_netdevice_queue(dev, head); 4029 unregister_netdevice_queue(dev, head);
4030 if (vxlan->default_dst.remote_dev)
4031 netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev);
3994} 4032}
3995 4033
3996static size_t vxlan_get_size(const struct net_device *dev) 4034static size_t vxlan_get_size(const struct net_device *dev)
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 8efb493ceec2..5c79f052cad2 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -127,12 +127,12 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
127 "%d\n", result); 127 "%d\n", result);
128 result = 0; 128 result = 0;
129error_cmd: 129error_cmd:
130 kfree(cmd);
131 kfree_skb(ack_skb); 130 kfree_skb(ack_skb);
132error_msg_to_dev: 131error_msg_to_dev:
133error_alloc: 132error_alloc:
134 d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n", 133 d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
135 wimax_dev, state, result); 134 wimax_dev, state, result);
135 kfree(cmd);
136 return result; 136 return result;
137} 137}
138 138
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 39c64850cb6f..c0750ced5ac2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -520,7 +520,7 @@ struct iwl_scan_dwell {
520} __packed; 520} __packed;
521 521
522/** 522/**
523 * struct iwl_scan_config 523 * struct iwl_scan_config_v1
524 * @flags: enum scan_config_flags 524 * @flags: enum scan_config_flags
525 * @tx_chains: valid_tx antenna - ANT_* definitions 525 * @tx_chains: valid_tx antenna - ANT_* definitions
526 * @rx_chains: valid_rx antenna - ANT_* definitions 526 * @rx_chains: valid_rx antenna - ANT_* definitions
@@ -552,7 +552,7 @@ struct iwl_scan_config_v1 {
552#define SCAN_LB_LMAC_IDX 0 552#define SCAN_LB_LMAC_IDX 0
553#define SCAN_HB_LMAC_IDX 1 553#define SCAN_HB_LMAC_IDX 1
554 554
555struct iwl_scan_config { 555struct iwl_scan_config_v2 {
556 __le32 flags; 556 __le32 flags;
557 __le32 tx_chains; 557 __le32 tx_chains;
558 __le32 rx_chains; 558 __le32 rx_chains;
@@ -564,6 +564,24 @@ struct iwl_scan_config {
564 u8 bcast_sta_id; 564 u8 bcast_sta_id;
565 u8 channel_flags; 565 u8 channel_flags;
566 u8 channel_array[]; 566 u8 channel_array[];
567} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
568
569/**
570 * struct iwl_scan_config
571 * @enable_cam_mode: whether to enable CAM mode.
572 * @enable_promiscouos_mode: whether to enable promiscouos mode
573 * @bcast_sta_id: the index of the station in the fw
574 * @reserved: reserved
575 * @tx_chains: valid_tx antenna - ANT_* definitions
576 * @rx_chains: valid_rx antenna - ANT_* definitions
577 */
578struct iwl_scan_config {
579 u8 enable_cam_mode;
580 u8 enable_promiscouos_mode;
581 u8 bcast_sta_id;
582 u8 reserved;
583 __le32 tx_chains;
584 __le32 rx_chains;
567} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */ 585} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
568 586
569/** 587/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 423cc0cf8e78..0d5bc4ce5c07 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -288,6 +288,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
288 * STA_CONTEXT_DOT11AX_API_S 288 * STA_CONTEXT_DOT11AX_API_S
289 * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar 289 * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
290 * version tables. 290 * version tables.
291 * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
292 * SCAN_CONFIG_DB_CMD_API_S.
291 * 293 *
292 * @NUM_IWL_UCODE_TLV_API: number of bits used 294 * @NUM_IWL_UCODE_TLV_API: number of bits used
293 */ 295 */
@@ -321,6 +323,7 @@ enum iwl_ucode_tlv_api {
321 IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53, 323 IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
322 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54, 324 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
323 IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55, 325 IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55,
326 IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56,
324 IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57, 327 IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
325 IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58, 328 IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
326 329
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index cb4c5514a556..695bbaa86273 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -279,6 +279,7 @@
279 * Indicates MAC is entering a power-saving sleep power-down. 279 * Indicates MAC is entering a power-saving sleep power-down.
280 * Not a good time to access device-internal resources. 280 * Not a good time to access device-internal resources.
281 */ 281 */
282#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
282#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) 283#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
283#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) 284#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
284 285
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index f47e0f97acf8..23c25a7665f2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -449,6 +449,11 @@ enum {
449#define PERSISTENCE_BIT BIT(12) 449#define PERSISTENCE_BIT BIT(12)
450#define PREG_WFPM_ACCESS BIT(12) 450#define PREG_WFPM_ACCESS BIT(12)
451 451
452#define HPM_HIPM_GEN_CFG 0xA03458
453#define HPM_HIPM_GEN_CFG_CR_PG_EN BIT(0)
454#define HPM_HIPM_GEN_CFG_CR_SLP_EN BIT(1)
455#define HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE BIT(10)
456
452#define UREG_DOORBELL_TO_ISR6 0xA05C04 457#define UREG_DOORBELL_TO_ISR6 0xA05C04
453#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0) 458#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0)
454#define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18) 459#define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 843d00bf2bd5..5ca50f39a023 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1405,6 +1405,12 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
1405 IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER); 1405 IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
1406} 1406}
1407 1407
1408static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
1409{
1410 return fw_has_api(&mvm->fw->ucode_capa,
1411 IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
1412}
1413
1408static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) 1414static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
1409{ 1415{
1410 return fw_has_api(&mvm->fw->ucode_capa, 1416 return fw_has_api(&mvm->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f6b3045badbd..fcafa22ec6ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1137,11 +1137,11 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
1137 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); 1137 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1138} 1138}
1139 1139
1140static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, 1140static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
1141 u32 flags, u8 channel_flags, 1141 u32 flags, u8 channel_flags,
1142 u32 max_channels) 1142 u32 max_channels)
1143{ 1143{
1144 struct iwl_scan_config *cfg = config; 1144 struct iwl_scan_config_v2 *cfg = config;
1145 1145
1146 cfg->flags = cpu_to_le32(flags); 1146 cfg->flags = cpu_to_le32(flags);
1147 cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); 1147 cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -1185,7 +1185,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
1185 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); 1185 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1186} 1186}
1187 1187
1188int iwl_mvm_config_scan(struct iwl_mvm *mvm) 1188static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
1189{ 1189{
1190 void *cfg; 1190 void *cfg;
1191 int ret, cmd_size; 1191 int ret, cmd_size;
@@ -1217,7 +1217,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1217 } 1217 }
1218 1218
1219 if (iwl_mvm_cdb_scan_api(mvm)) 1219 if (iwl_mvm_cdb_scan_api(mvm))
1220 cmd_size = sizeof(struct iwl_scan_config); 1220 cmd_size = sizeof(struct iwl_scan_config_v2);
1221 else 1221 else
1222 cmd_size = sizeof(struct iwl_scan_config_v1); 1222 cmd_size = sizeof(struct iwl_scan_config_v1);
1223 cmd_size += num_channels; 1223 cmd_size += num_channels;
@@ -1254,8 +1254,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1254 flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? 1254 flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
1255 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : 1255 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
1256 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; 1256 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
1257 iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags, 1257 iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
1258 num_channels); 1258 num_channels);
1259 } else { 1259 } else {
1260 iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags, 1260 iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
1261 num_channels); 1261 num_channels);
@@ -1277,6 +1277,30 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1277 return ret; 1277 return ret;
1278} 1278}
1279 1279
1280int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1281{
1282 struct iwl_scan_config cfg;
1283 struct iwl_host_cmd cmd = {
1284 .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
1285 .len[0] = sizeof(cfg),
1286 .data[0] = &cfg,
1287 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1288 };
1289
1290 if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
1291 return iwl_mvm_legacy_config_scan(mvm);
1292
1293 memset(&cfg, 0, sizeof(cfg));
1294
1295 cfg.bcast_sta_id = mvm->aux_sta.sta_id;
1296 cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1297 cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1298
1299 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1300
1301 return iwl_mvm_send_cmd(mvm, &cmd);
1302}
1303
1280static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) 1304static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
1281{ 1305{
1282 int i; 1306 int i;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 0bedba4c61f2..b3768d5d852a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1482,6 +1482,13 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1482 mvm_sta->sta_id, i); 1482 mvm_sta->sta_id, i);
1483 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, 1483 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1484 i, wdg); 1484 i, wdg);
1485 /*
1486 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1487 * to try again later, we have no other good way of
1488 * failing here
1489 */
1490 if (txq_id < 0)
1491 txq_id = IWL_MVM_INVALID_QUEUE;
1485 tid_data->txq_id = txq_id; 1492 tid_data->txq_id = txq_id;
1486 1493
1487 /* 1494 /*
@@ -1950,30 +1957,73 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1950 sta->sta_id = IWL_MVM_INVALID_STA; 1957 sta->sta_id = IWL_MVM_INVALID_STA;
1951} 1958}
1952 1959
1953static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, 1960static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
1954 u8 sta_id, u8 fifo) 1961 u8 sta_id, u8 fifo)
1955{ 1962{
1956 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? 1963 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1957 mvm->trans->trans_cfg->base_params->wd_timeout : 1964 mvm->trans->trans_cfg->base_params->wd_timeout :
1958 IWL_WATCHDOG_DISABLED; 1965 IWL_WATCHDOG_DISABLED;
1966 struct iwl_trans_txq_scd_cfg cfg = {
1967 .fifo = fifo,
1968 .sta_id = sta_id,
1969 .tid = IWL_MAX_TID_COUNT,
1970 .aggregate = false,
1971 .frame_limit = IWL_FRAME_LIMIT,
1972 };
1973
1974 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1975
1976 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
1977}
1978
1979static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
1980{
1981 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1982 mvm->trans->trans_cfg->base_params->wd_timeout :
1983 IWL_WATCHDOG_DISABLED;
1984
1985 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
1986
1987 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
1988 wdg_timeout);
1989}
1959 1990
1991static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
1992 int maccolor,
1993 struct iwl_mvm_int_sta *sta,
1994 u16 *queue, int fifo)
1995{
1996 int ret;
1997
1998 /* Map queue to fifo - needs to happen before adding station */
1999 if (!iwl_mvm_has_new_tx_api(mvm))
2000 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2001
2002 ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
2003 if (ret) {
2004 if (!iwl_mvm_has_new_tx_api(mvm))
2005 iwl_mvm_disable_txq(mvm, NULL, *queue,
2006 IWL_MAX_TID_COUNT, 0);
2007 return ret;
2008 }
2009
2010 /*
2011 * For 22000 firmware and on we cannot add queue to a station unknown
2012 * to firmware so enable queue here - after the station was added
2013 */
1960 if (iwl_mvm_has_new_tx_api(mvm)) { 2014 if (iwl_mvm_has_new_tx_api(mvm)) {
1961 int tvqm_queue = 2015 int txq;
1962 iwl_mvm_tvqm_enable_txq(mvm, sta_id,
1963 IWL_MAX_TID_COUNT,
1964 wdg_timeout);
1965 *queue = tvqm_queue;
1966 } else {
1967 struct iwl_trans_txq_scd_cfg cfg = {
1968 .fifo = fifo,
1969 .sta_id = sta_id,
1970 .tid = IWL_MAX_TID_COUNT,
1971 .aggregate = false,
1972 .frame_limit = IWL_FRAME_LIMIT,
1973 };
1974 2016
1975 iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout); 2017 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2018 if (txq < 0) {
2019 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2020 return txq;
2021 }
2022
2023 *queue = txq;
1976 } 2024 }
2025
2026 return 0;
1977} 2027}
1978 2028
1979int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) 2029int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
@@ -1989,59 +2039,26 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1989 if (ret) 2039 if (ret)
1990 return ret; 2040 return ret;
1991 2041
1992 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 2042 ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
1993 if (!iwl_mvm_has_new_tx_api(mvm)) 2043 &mvm->aux_sta, &mvm->aux_queue,
1994 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, 2044 IWL_MVM_TX_FIFO_MCAST);
1995 mvm->aux_sta.sta_id,
1996 IWL_MVM_TX_FIFO_MCAST);
1997
1998 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1999 MAC_INDEX_AUX, 0);
2000 if (ret) { 2045 if (ret) {
2001 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2046 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2002 return ret; 2047 return ret;
2003 } 2048 }
2004 2049
2005 /*
2006 * For 22000 firmware and on we cannot add queue to a station unknown
2007 * to firmware so enable queue here - after the station was added
2008 */
2009 if (iwl_mvm_has_new_tx_api(mvm))
2010 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2011 mvm->aux_sta.sta_id,
2012 IWL_MVM_TX_FIFO_MCAST);
2013
2014 return 0; 2050 return 0;
2015} 2051}
2016 2052
2017int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2053int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2018{ 2054{
2019 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2055 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2020 int ret;
2021 2056
2022 lockdep_assert_held(&mvm->mutex); 2057 lockdep_assert_held(&mvm->mutex);
2023 2058
2024 /* Map snif queue to fifo - must happen before adding snif station */ 2059 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2025 if (!iwl_mvm_has_new_tx_api(mvm)) 2060 &mvm->snif_sta, &mvm->snif_queue,
2026 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2027 mvm->snif_sta.sta_id,
2028 IWL_MVM_TX_FIFO_BE); 2061 IWL_MVM_TX_FIFO_BE);
2029
2030 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2031 mvmvif->id, 0);
2032 if (ret)
2033 return ret;
2034
2035 /*
2036 * For 22000 firmware and on we cannot add queue to a station unknown
2037 * to firmware so enable queue here - after the station was added
2038 */
2039 if (iwl_mvm_has_new_tx_api(mvm))
2040 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2041 mvm->snif_sta.sta_id,
2042 IWL_MVM_TX_FIFO_BE);
2043
2044 return 0;
2045} 2062}
2046 2063
2047int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2064int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2133,6 +2150,10 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2133 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, 2150 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2134 IWL_MAX_TID_COUNT, 2151 IWL_MAX_TID_COUNT,
2135 wdg_timeout); 2152 wdg_timeout);
2153 if (queue < 0) {
2154 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2155 return queue;
2156 }
2136 2157
2137 if (vif->type == NL80211_IFTYPE_AP || 2158 if (vif->type == NL80211_IFTYPE_AP ||
2138 vif->type == NL80211_IFTYPE_ADHOC) 2159 vif->type == NL80211_IFTYPE_ADHOC)
@@ -2307,10 +2328,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2307 } 2328 }
2308 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, 2329 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2309 mvmvif->id, mvmvif->color); 2330 mvmvif->id, mvmvif->color);
2310 if (ret) { 2331 if (ret)
2311 iwl_mvm_dealloc_int_sta(mvm, msta); 2332 goto err;
2312 return ret;
2313 }
2314 2333
2315 /* 2334 /*
2316 * Enable cab queue after the ADD_STA command is sent. 2335 * Enable cab queue after the ADD_STA command is sent.
@@ -2323,6 +2342,10 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2323 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 2342 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2324 0, 2343 0,
2325 timeout); 2344 timeout);
2345 if (queue < 0) {
2346 ret = queue;
2347 goto err;
2348 }
2326 mvmvif->cab_queue = queue; 2349 mvmvif->cab_queue = queue;
2327 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2350 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2328 IWL_UCODE_TLV_API_STA_TYPE)) 2351 IWL_UCODE_TLV_API_STA_TYPE))
@@ -2330,6 +2353,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2330 timeout); 2353 timeout);
2331 2354
2332 return 0; 2355 return 0;
2356err:
2357 iwl_mvm_dealloc_int_sta(mvm, msta);
2358 return ret;
2333} 2359}
2334 2360
2335static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, 2361static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 6f4bb7ce71a5..040cec17d3ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -573,20 +573,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
573 {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, 573 {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
574 {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)}, 574 {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
575 {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)}, 575 {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
576 {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, 576 {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
577 {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, 577 {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
578 {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, 578 {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
579 {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, 579 {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
580 {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, 580 {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
581 {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, 581 {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
582 {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, 582 {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
583 {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, 583 {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
584 {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, 584 {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
585 {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, 585 {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
586 {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, 586 {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
587 {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)}, 587 {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
588 {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, 588 {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
589 {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, 589 {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
590 {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)}, 590 {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
591 {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, 591 {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
592 {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, 592 {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
@@ -603,7 +603,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
603 {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)}, 603 {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
604 {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)}, 604 {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
605 {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)}, 605 {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
606 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, 606 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
607 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, 607 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
608 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, 608 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
609 {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)}, 609 {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
@@ -618,60 +618,61 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
618 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, 618 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
619 {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, 619 {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
620 {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)}, 620 {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
621 {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)}, 621
622 {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)}, 622 {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
623 {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)}, 623 {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
624 {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, 624 {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
625 {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, 625 {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
626 {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, 626 {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
627 {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)}, 627 {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
628 {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)}, 628 {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
629 {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, 629 {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
630 {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, 630 {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
631 {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, 631 {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
632 {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, 632 {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
633 {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, 633 {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
634 {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, 634 {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
635 {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)}, 635 {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
636 {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, 636 {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
637 {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, 637 {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
638 {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, 638 {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
639 {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, 639 {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
640 {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, 640 {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
641 {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)}, 641 {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
642 {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)}, 642 {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
643 {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)}, 643 {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
644 {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)}, 644 {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
645 {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, 645 {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
646 {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)}, 646 {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
647 {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)}, 647
648 648 {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
649 {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, 649 {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
650 {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, 650 {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
651 {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, 651 {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
652 {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, 652 {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
653 {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, 653 {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
654 {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, 654 {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
655 {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, 655 {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
656 {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, 656 {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
657 {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, 657 {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
658 {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, 658 {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
659 {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, 659 {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
660 {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, 660 {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
661 {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, 661 {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
662 {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, 662 {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
663 {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, 663 {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
664 {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, 664 {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
665 {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, 665 {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
666 {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, 666 {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
667 {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, 667 {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
668 {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, 668 {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
669 {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, 669 {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
670 {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, 670 {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
671 {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, 671 {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
672 {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, 672 {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
673 {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, 673 {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
674 {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, 674 {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
675 {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
675 676
676 {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)}, 677 {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
677 {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)}, 678 {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
@@ -1067,11 +1068,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1067 } 1068 }
1068 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == 1069 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
1069 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && 1070 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
1070 ((cfg != &iwl_ax200_cfg_cc && 1071 iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
1071 cfg != &killer1650x_2ax_cfg &&
1072 cfg != &killer1650w_2ax_cfg &&
1073 cfg != &iwl_ax201_cfg_quz_hr) ||
1074 iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
1075 u32 hw_status; 1072 u32 hw_status;
1076 1073
1077 hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS); 1074 hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index df8455f14e4d..ca3bb4d65b00 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -57,6 +57,24 @@
57#include "internal.h" 57#include "internal.h"
58#include "fw/dbg.h" 58#include "fw/dbg.h"
59 59
60static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
61{
62 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
63 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
64 udelay(20);
65 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
66 HPM_HIPM_GEN_CFG_CR_PG_EN |
67 HPM_HIPM_GEN_CFG_CR_SLP_EN);
68 udelay(20);
69 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
70 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
71
72 iwl_trans_sw_reset(trans);
73 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
74
75 return 0;
76}
77
60/* 78/*
61 * Start up NIC's basic functionality after it has been reset 79 * Start up NIC's basic functionality after it has been reset
62 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 80 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@@ -92,6 +110,13 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
92 110
93 iwl_pcie_apm_config(trans); 111 iwl_pcie_apm_config(trans);
94 112
113 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
114 trans->cfg->integrated) {
115 ret = iwl_pcie_gen2_force_power_gating(trans);
116 if (ret)
117 return ret;
118 }
119
95 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 120 ret = iwl_finish_nic_init(trans, trans->trans_cfg);
96 if (ret) 121 if (ret)
97 return ret; 122 return ret;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 158a3d762e55..e323e9a5999f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -3041,30 +3041,6 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
3041 } 3041 }
3042} 3042}
3043 3043
3044
3045/*
3046 * HostAP uses two layers of net devices, where the inner
3047 * layer gets called all the time from the outer layer.
3048 * This is a natural nesting, which needs a split lock type.
3049 */
3050static struct lock_class_key hostap_netdev_xmit_lock_key;
3051static struct lock_class_key hostap_netdev_addr_lock_key;
3052
3053static void prism2_set_lockdep_class_one(struct net_device *dev,
3054 struct netdev_queue *txq,
3055 void *_unused)
3056{
3057 lockdep_set_class(&txq->_xmit_lock,
3058 &hostap_netdev_xmit_lock_key);
3059}
3060
3061static void prism2_set_lockdep_class(struct net_device *dev)
3062{
3063 lockdep_set_class(&dev->addr_list_lock,
3064 &hostap_netdev_addr_lock_key);
3065 netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
3066}
3067
3068static struct net_device * 3044static struct net_device *
3069prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, 3045prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
3070 struct device *sdev) 3046 struct device *sdev)
@@ -3223,7 +3199,6 @@ while (0)
3223 if (ret >= 0) 3199 if (ret >= 0)
3224 ret = register_netdevice(dev); 3200 ret = register_netdevice(dev);
3225 3201
3226 prism2_set_lockdep_class(dev);
3227 rtnl_unlock(); 3202 rtnl_unlock();
3228 if (ret < 0) { 3203 if (ret < 0) {
3229 printk(KERN_WARNING "%s: register netdevice failed!\n", 3204 printk(KERN_WARNING "%s: register netdevice failed!\n",
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 4d03596e891f..d7a1ddc9e407 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -8,6 +8,8 @@ mt76-y := \
8 mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \ 8 mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
9 tx.o agg-rx.o mcu.o 9 tx.o agg-rx.o mcu.o
10 10
11mt76-$(CONFIG_PCI) += pci.o
12
11mt76-usb-y := usb.o usb_trace.o 13mt76-usb-y := usb.o usb_trace.o
12 14
13CFLAGS_trace.o := -I$(src) 15CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index c747eb24581c..8f69d00bd940 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -53,8 +53,10 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
53 u32 ctrl; 53 u32 ctrl;
54 int i, idx = -1; 54 int i, idx = -1;
55 55
56 if (txwi) 56 if (txwi) {
57 q->entry[q->head].txwi = DMA_DUMMY_DATA; 57 q->entry[q->head].txwi = DMA_DUMMY_DATA;
58 q->entry[q->head].skip_buf0 = true;
59 }
58 60
59 for (i = 0; i < nbufs; i += 2, buf += 2) { 61 for (i = 0; i < nbufs; i += 2, buf += 2) {
60 u32 buf0 = buf[0].addr, buf1 = 0; 62 u32 buf0 = buf[0].addr, buf1 = 0;
@@ -97,7 +99,7 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
97 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); 99 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
98 u32 ctrl = le32_to_cpu(__ctrl); 100 u32 ctrl = le32_to_cpu(__ctrl);
99 101
100 if (!e->txwi || !e->skb) { 102 if (!e->skip_buf0) {
101 __le32 addr = READ_ONCE(q->desc[idx].buf0); 103 __le32 addr = READ_ONCE(q->desc[idx].buf0);
102 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); 104 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
103 105
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 570c159515a0..8aec7ccf2d79 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -93,8 +93,9 @@ struct mt76_queue_entry {
93 struct urb *urb; 93 struct urb *urb;
94 }; 94 };
95 enum mt76_txq_id qid; 95 enum mt76_txq_id qid;
96 bool schedule; 96 bool skip_buf0:1;
97 bool done; 97 bool schedule:1;
98 bool done:1;
98}; 99};
99 100
100struct mt76_queue_regs { 101struct mt76_queue_regs {
@@ -578,6 +579,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
578#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 579#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
579 580
580void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 581void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
582void mt76_pci_disable_aspm(struct pci_dev *pdev);
581 583
582static inline u16 mt76_chip(struct mt76_dev *dev) 584static inline u16 mt76_chip(struct mt76_dev *dev)
583{ 585{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 73c3104f8858..cf611d1b817c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -81,6 +81,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
81 /* RG_SSUSB_CDR_BR_PE1D = 0x3 */ 81 /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
82 mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3); 82 mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
83 83
84 mt76_pci_disable_aspm(pdev);
85
84 return 0; 86 return 0;
85 87
86error: 88error:
diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c
new file mode 100644
index 000000000000..04c5a692bc85
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/pci.c
@@ -0,0 +1,46 @@
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
4 */
5
6#include <linux/pci.h>
7
8void mt76_pci_disable_aspm(struct pci_dev *pdev)
9{
10 struct pci_dev *parent = pdev->bus->self;
11 u16 aspm_conf, parent_aspm_conf = 0;
12
13 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
14 aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
15 if (parent) {
16 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
17 &parent_aspm_conf);
18 parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
19 }
20
21 if (!aspm_conf && (!parent || !parent_aspm_conf)) {
22 /* aspm already disabled */
23 return;
24 }
25
26 dev_info(&pdev->dev, "disabling ASPM %s %s\n",
27 (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
28 (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
29
30 if (IS_ENABLED(CONFIG_PCIEASPM)) {
31 int err;
32
33 err = pci_disable_link_state(pdev, aspm_conf);
34 if (!err)
35 return;
36 }
37
38 /* both device and parent should have the same ASPM setting.
39 * disable ASPM in downstream component first and then upstream.
40 */
41 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf);
42 if (parent)
43 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
44 aspm_conf);
45}
46EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 6087ec7a90a6..f88d26535978 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -822,7 +822,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
822 hdr = rtl_get_hdr(skb); 822 hdr = rtl_get_hdr(skb);
823 fc = rtl_get_fc(skb); 823 fc = rtl_get_fc(skb);
824 824
825 if (!stats.crc && !stats.hwerror) { 825 if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) {
826 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 826 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
827 sizeof(rx_status)); 827 sizeof(rx_status));
828 828
@@ -859,6 +859,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
859 _rtl_pci_rx_to_mac80211(hw, skb, rx_status); 859 _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
860 } 860 }
861 } else { 861 } else {
862 /* drop packets with errors or those too short */
862 dev_kfree_skb_any(skb); 863 dev_kfree_skb_any(skb);
863 } 864 }
864new_trx_end: 865new_trx_end:
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 70f04c2f5b17..fff8dda14023 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -754,6 +754,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
754 return; 754 return;
755 } else { 755 } else {
756 noa_num = (noa_len - 2) / 13; 756 noa_num = (noa_len - 2) / 13;
757 if (noa_num > P2P_MAX_NOA_NUM)
758 noa_num = P2P_MAX_NOA_NUM;
759
757 } 760 }
758 noa_index = ie[3]; 761 noa_index = ie[3];
759 if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == 762 if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
@@ -848,6 +851,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
848 return; 851 return;
849 } else { 852 } else {
850 noa_num = (noa_len - 2) / 13; 853 noa_num = (noa_len - 2) / 13;
854 if (noa_num > P2P_MAX_NOA_NUM)
855 noa_num = P2P_MAX_NOA_NUM;
856
851 } 857 }
852 noa_index = ie[3]; 858 noa_index = ie[3];
853 if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == 859 if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index be92e1220284..7997cc6de334 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -548,6 +548,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
548 priv->is_connected = false; 548 priv->is_connected = false;
549 priv->is_up = false; 549 priv->is_up = false;
550 INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete); 550 INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete);
551 __module_get(THIS_MODULE);
551 552
552 return 0; 553 return 0;
553unregister_netdev: 554unregister_netdev:
@@ -578,6 +579,7 @@ static void virt_wifi_dellink(struct net_device *dev,
578 netdev_upper_dev_unlink(priv->lowerdev, dev); 579 netdev_upper_dev_unlink(priv->lowerdev, dev);
579 580
580 unregister_netdevice_queue(dev, head); 581 unregister_netdevice_queue(dev, head);
582 module_put(THIS_MODULE);
581 583
582 /* Deleting the wiphy is handled in the module destructor. */ 584 /* Deleting the wiphy is handled in the module destructor. */
583} 585}
@@ -590,6 +592,42 @@ static struct rtnl_link_ops virt_wifi_link_ops = {
590 .priv_size = sizeof(struct virt_wifi_netdev_priv), 592 .priv_size = sizeof(struct virt_wifi_netdev_priv),
591}; 593};
592 594
595static bool netif_is_virt_wifi_dev(const struct net_device *dev)
596{
597 return rcu_access_pointer(dev->rx_handler) == virt_wifi_rx_handler;
598}
599
600static int virt_wifi_event(struct notifier_block *this, unsigned long event,
601 void *ptr)
602{
603 struct net_device *lower_dev = netdev_notifier_info_to_dev(ptr);
604 struct virt_wifi_netdev_priv *priv;
605 struct net_device *upper_dev;
606 LIST_HEAD(list_kill);
607
608 if (!netif_is_virt_wifi_dev(lower_dev))
609 return NOTIFY_DONE;
610
611 switch (event) {
612 case NETDEV_UNREGISTER:
613 priv = rtnl_dereference(lower_dev->rx_handler_data);
614 if (!priv)
615 return NOTIFY_DONE;
616
617 upper_dev = priv->upperdev;
618
619 upper_dev->rtnl_link_ops->dellink(upper_dev, &list_kill);
620 unregister_netdevice_many(&list_kill);
621 break;
622 }
623
624 return NOTIFY_DONE;
625}
626
627static struct notifier_block virt_wifi_notifier = {
628 .notifier_call = virt_wifi_event,
629};
630
593/* Acquires and releases the rtnl lock. */ 631/* Acquires and releases the rtnl lock. */
594static int __init virt_wifi_init_module(void) 632static int __init virt_wifi_init_module(void)
595{ 633{
@@ -598,14 +636,25 @@ static int __init virt_wifi_init_module(void)
598 /* Guaranteed to be locallly-administered and not multicast. */ 636 /* Guaranteed to be locallly-administered and not multicast. */
599 eth_random_addr(fake_router_bssid); 637 eth_random_addr(fake_router_bssid);
600 638
639 err = register_netdevice_notifier(&virt_wifi_notifier);
640 if (err)
641 return err;
642
643 err = -ENOMEM;
601 common_wiphy = virt_wifi_make_wiphy(); 644 common_wiphy = virt_wifi_make_wiphy();
602 if (!common_wiphy) 645 if (!common_wiphy)
603 return -ENOMEM; 646 goto notifier;
604 647
605 err = rtnl_link_register(&virt_wifi_link_ops); 648 err = rtnl_link_register(&virt_wifi_link_ops);
606 if (err) 649 if (err)
607 virt_wifi_destroy_wiphy(common_wiphy); 650 goto destroy_wiphy;
608 651
652 return 0;
653
654destroy_wiphy:
655 virt_wifi_destroy_wiphy(common_wiphy);
656notifier:
657 unregister_netdevice_notifier(&virt_wifi_notifier);
609 return err; 658 return err;
610} 659}
611 660
@@ -615,6 +664,7 @@ static void __exit virt_wifi_cleanup_module(void)
615 /* Will delete any devices that depend on the wiphy. */ 664 /* Will delete any devices that depend on the wiphy. */
616 rtnl_link_unregister(&virt_wifi_link_ops); 665 rtnl_link_unregister(&virt_wifi_link_ops);
617 virt_wifi_destroy_wiphy(common_wiphy); 666 virt_wifi_destroy_wiphy(common_wiphy);
667 unregister_netdevice_notifier(&virt_wifi_notifier);
618} 668}
619 669
620module_init(virt_wifi_init_module); 670module_init(virt_wifi_init_module);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 770dbcbc999e..7544be84ab35 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2219,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2219 struct nvme_tcp_queue *queue = hctx->driver_data; 2219 struct nvme_tcp_queue *queue = hctx->driver_data;
2220 struct sock *sk = queue->sock->sk; 2220 struct sock *sk = queue->sock->sk;
2221 2221
2222 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue)) 2222 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2223 sk_busy_loop(sk, true); 2223 sk_busy_loop(sk, true);
2224 nvme_tcp_try_recv(queue); 2224 nvme_tcp_try_recv(queue);
2225 return queue->nr_cqe; 2225 return queue->nr_cqe;
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 6c809440f319..4cf02ecd67de 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -204,6 +204,12 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
204 do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) 204 do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
205#define dynamic_dev_dbg(dev, fmt, ...) \ 205#define dynamic_dev_dbg(dev, fmt, ...) \
206 do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) 206 do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
207#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
208 groupsize, buf, len, ascii) \
209 do { if (0) \
210 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
211 rowsize, groupsize, buf, len, ascii); \
212 } while (0)
207#endif 213#endif
208 214
209#endif 215#endif
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 2ce57645f3cd..0367a75f873b 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1099,7 +1099,6 @@ static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
1099 1099
1100#endif /* CONFIG_BPF_JIT */ 1100#endif /* CONFIG_BPF_JIT */
1101 1101
1102void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
1103void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); 1102void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
1104 1103
1105#define BPF_ANC BIT(15) 1104#define BPF_ANC BIT(15)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..61f2f6ff9467 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -325,6 +325,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
325 return !!(gfp_flags & __GFP_DIRECT_RECLAIM); 325 return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
326} 326}
327 327
328/**
329 * gfpflags_normal_context - is gfp_flags a normal sleepable context?
330 * @gfp_flags: gfp_flags to test
331 *
332 * Test whether @gfp_flags indicates that the allocation is from the
333 * %current context and allowed to sleep.
334 *
335 * An allocation being allowed to block doesn't mean it owns the %current
336 * context. When direct reclaim path tries to allocate memory, the
337 * allocation context is nested inside whatever %current was doing at the
338 * time of the original allocation. The nested allocation may be allowed
339 * to block but modifying anything %current owns can corrupt the outer
340 * context's expectations.
341 *
342 * %true result from this function indicates that the allocation context
343 * can sleep and use anything that's associated with %current.
344 */
345static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
346{
347 return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
348 __GFP_DIRECT_RECLAIM;
349}
350
328#ifdef CONFIG_HIGHMEM 351#ifdef CONFIG_HIGHMEM
329#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM 352#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
330#else 353#else
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 2e55e4cdbd8a..a367ead4bf4b 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -29,7 +29,6 @@ struct macvlan_dev {
29 netdev_features_t set_features; 29 netdev_features_t set_features;
30 enum macvlan_mode mode; 30 enum macvlan_mode mode;
31 u16 flags; 31 u16 flags;
32 int nest_level;
33 unsigned int macaddr_count; 32 unsigned int macaddr_count;
34#ifdef CONFIG_NET_POLL_CONTROLLER 33#ifdef CONFIG_NET_POLL_CONTROLLER
35 struct netpoll *netpoll; 34 struct netpoll *netpoll;
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 06faa066496f..ec7e4bd07f82 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -223,6 +223,7 @@ struct team {
223 atomic_t count_pending; 223 atomic_t count_pending;
224 struct delayed_work dw; 224 struct delayed_work dw;
225 } mcast_rejoin; 225 } mcast_rejoin;
226 struct lock_class_key team_lock_key;
226 long mode_priv[TEAM_MODE_PRIV_LONGS]; 227 long mode_priv[TEAM_MODE_PRIV_LONGS];
227}; 228};
228 229
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 244278d5c222..b05e855f1ddd 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -182,7 +182,6 @@ struct vlan_dev_priv {
182#ifdef CONFIG_NET_POLL_CONTROLLER 182#ifdef CONFIG_NET_POLL_CONTROLLER
183 struct netpoll *netpoll; 183 struct netpoll *netpoll;
184#endif 184#endif
185 unsigned int nest_level;
186}; 185};
187 186
188static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) 187static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -221,11 +220,6 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
221 220
222extern bool vlan_uses_dev(const struct net_device *dev); 221extern bool vlan_uses_dev(const struct net_device *dev);
223 222
224static inline int vlan_get_encap_level(struct net_device *dev)
225{
226 BUG_ON(!is_vlan_dev(dev));
227 return vlan_dev_priv(dev)->nest_level;
228}
229#else 223#else
230static inline struct net_device * 224static inline struct net_device *
231__vlan_find_dev_deep_rcu(struct net_device *real_dev, 225__vlan_find_dev_deep_rcu(struct net_device *real_dev,
@@ -295,11 +289,6 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
295{ 289{
296 return false; 290 return false;
297} 291}
298static inline int vlan_get_encap_level(struct net_device *dev)
299{
300 BUG();
301 return 0;
302}
303#endif 292#endif
304 293
305/** 294/**
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 138c50d5a353..0836fe232f97 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1545,9 +1545,8 @@ struct mlx5_ifc_extended_dest_format_bits {
1545}; 1545};
1546 1546
1547union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { 1547union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
1548 struct mlx5_ifc_dest_format_struct_bits dest_format_struct; 1548 struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
1549 struct mlx5_ifc_flow_counter_list_bits flow_counter_list; 1549 struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
1550 u8 reserved_at_0[0x40];
1551}; 1550};
1552 1551
1553struct mlx5_ifc_fte_match_param_bits { 1552struct mlx5_ifc_fte_match_param_bits {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9eda1c31d1f7..c20f190b4c18 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -925,6 +925,7 @@ struct dev_ifalias {
925struct devlink; 925struct devlink;
926struct tlsdev_ops; 926struct tlsdev_ops;
927 927
928
928/* 929/*
929 * This structure defines the management hooks for network devices. 930 * This structure defines the management hooks for network devices.
930 * The following hooks can be defined; unless noted otherwise, they are 931 * The following hooks can be defined; unless noted otherwise, they are
@@ -1421,7 +1422,6 @@ struct net_device_ops {
1421 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1422 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1422 void *priv); 1423 void *priv);
1423 1424
1424 int (*ndo_get_lock_subclass)(struct net_device *dev);
1425 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1425 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1426 int queue_index, 1426 int queue_index,
1427 u32 maxrate); 1427 u32 maxrate);
@@ -1649,6 +1649,8 @@ enum netdev_priv_flags {
1649 * @perm_addr: Permanent hw address 1649 * @perm_addr: Permanent hw address
1650 * @addr_assign_type: Hw address assignment type 1650 * @addr_assign_type: Hw address assignment type
1651 * @addr_len: Hardware address length 1651 * @addr_len: Hardware address length
1652 * @upper_level: Maximum depth level of upper devices.
1653 * @lower_level: Maximum depth level of lower devices.
1652 * @neigh_priv_len: Used in neigh_alloc() 1654 * @neigh_priv_len: Used in neigh_alloc()
1653 * @dev_id: Used to differentiate devices that share 1655 * @dev_id: Used to differentiate devices that share
1654 * the same link layer address 1656 * the same link layer address
@@ -1758,9 +1760,13 @@ enum netdev_priv_flags {
1758 * @phydev: Physical device may attach itself 1760 * @phydev: Physical device may attach itself
1759 * for hardware timestamping 1761 * for hardware timestamping
1760 * @sfp_bus: attached &struct sfp_bus structure. 1762 * @sfp_bus: attached &struct sfp_bus structure.
1761 * 1763 * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
1762 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock 1764 spinlock
1763 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount 1765 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1766 * @qdisc_xmit_lock_key: lockdep class annotating
1767 * netdev_queue->_xmit_lock spinlock
1768 * @addr_list_lock_key: lockdep class annotating
1769 * net_device->addr_list_lock spinlock
1764 * 1770 *
1765 * @proto_down: protocol port state information can be sent to the 1771 * @proto_down: protocol port state information can be sent to the
1766 * switch driver and used to set the phys state of the 1772 * switch driver and used to set the phys state of the
@@ -1875,6 +1881,8 @@ struct net_device {
1875 unsigned char perm_addr[MAX_ADDR_LEN]; 1881 unsigned char perm_addr[MAX_ADDR_LEN];
1876 unsigned char addr_assign_type; 1882 unsigned char addr_assign_type;
1877 unsigned char addr_len; 1883 unsigned char addr_len;
1884 unsigned char upper_level;
1885 unsigned char lower_level;
1878 unsigned short neigh_priv_len; 1886 unsigned short neigh_priv_len;
1879 unsigned short dev_id; 1887 unsigned short dev_id;
1880 unsigned short dev_port; 1888 unsigned short dev_port;
@@ -2045,8 +2053,10 @@ struct net_device {
2045#endif 2053#endif
2046 struct phy_device *phydev; 2054 struct phy_device *phydev;
2047 struct sfp_bus *sfp_bus; 2055 struct sfp_bus *sfp_bus;
2048 struct lock_class_key *qdisc_tx_busylock; 2056 struct lock_class_key qdisc_tx_busylock_key;
2049 struct lock_class_key *qdisc_running_key; 2057 struct lock_class_key qdisc_running_key;
2058 struct lock_class_key qdisc_xmit_lock_key;
2059 struct lock_class_key addr_list_lock_key;
2050 bool proto_down; 2060 bool proto_down;
2051 unsigned wol_enabled:1; 2061 unsigned wol_enabled:1;
2052}; 2062};
@@ -2124,23 +2134,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
2124 f(dev, &dev->_tx[i], arg); 2134 f(dev, &dev->_tx[i], arg);
2125} 2135}
2126 2136
2127#define netdev_lockdep_set_classes(dev) \
2128{ \
2129 static struct lock_class_key qdisc_tx_busylock_key; \
2130 static struct lock_class_key qdisc_running_key; \
2131 static struct lock_class_key qdisc_xmit_lock_key; \
2132 static struct lock_class_key dev_addr_list_lock_key; \
2133 unsigned int i; \
2134 \
2135 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2136 (dev)->qdisc_running_key = &qdisc_running_key; \
2137 lockdep_set_class(&(dev)->addr_list_lock, \
2138 &dev_addr_list_lock_key); \
2139 for (i = 0; i < (dev)->num_tx_queues; i++) \
2140 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2141 &qdisc_xmit_lock_key); \
2142}
2143
2144u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 2137u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2145 struct net_device *sb_dev); 2138 struct net_device *sb_dev);
2146struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 2139struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -3139,6 +3132,7 @@ static inline void netif_stop_queue(struct net_device *dev)
3139} 3132}
3140 3133
3141void netif_tx_stop_all_queues(struct net_device *dev); 3134void netif_tx_stop_all_queues(struct net_device *dev);
3135void netdev_update_lockdep_key(struct net_device *dev);
3142 3136
3143static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 3137static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3144{ 3138{
@@ -4056,16 +4050,6 @@ static inline void netif_addr_lock(struct net_device *dev)
4056 spin_lock(&dev->addr_list_lock); 4050 spin_lock(&dev->addr_list_lock);
4057} 4051}
4058 4052
4059static inline void netif_addr_lock_nested(struct net_device *dev)
4060{
4061 int subclass = SINGLE_DEPTH_NESTING;
4062
4063 if (dev->netdev_ops->ndo_get_lock_subclass)
4064 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
4065
4066 spin_lock_nested(&dev->addr_list_lock, subclass);
4067}
4068
4069static inline void netif_addr_lock_bh(struct net_device *dev) 4053static inline void netif_addr_lock_bh(struct net_device *dev)
4070{ 4054{
4071 spin_lock_bh(&dev->addr_list_lock); 4055 spin_lock_bh(&dev->addr_list_lock);
@@ -4329,6 +4313,16 @@ int netdev_master_upper_dev_link(struct net_device *dev,
4329 struct netlink_ext_ack *extack); 4313 struct netlink_ext_ack *extack);
4330void netdev_upper_dev_unlink(struct net_device *dev, 4314void netdev_upper_dev_unlink(struct net_device *dev,
4331 struct net_device *upper_dev); 4315 struct net_device *upper_dev);
4316int netdev_adjacent_change_prepare(struct net_device *old_dev,
4317 struct net_device *new_dev,
4318 struct net_device *dev,
4319 struct netlink_ext_ack *extack);
4320void netdev_adjacent_change_commit(struct net_device *old_dev,
4321 struct net_device *new_dev,
4322 struct net_device *dev);
4323void netdev_adjacent_change_abort(struct net_device *old_dev,
4324 struct net_device *new_dev,
4325 struct net_device *dev);
4332void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 4326void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4333void *netdev_lower_dev_get_private(struct net_device *dev, 4327void *netdev_lower_dev_get_private(struct net_device *dev,
4334 struct net_device *lower_dev); 4328 struct net_device *lower_dev);
@@ -4340,7 +4334,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
4340extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 4334extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4341void netdev_rss_key_fill(void *buffer, size_t len); 4335void netdev_rss_key_fill(void *buffer, size_t len);
4342 4336
4343int dev_get_nest_level(struct net_device *dev);
4344int skb_checksum_help(struct sk_buff *skb); 4337int skb_checksum_help(struct sk_buff *skb);
4345int skb_crc32c_csum_help(struct sk_buff *skb); 4338int skb_crc32c_csum_help(struct sk_buff *skb);
4346int skb_csum_hwoffload_help(struct sk_buff *skb, 4339int skb_csum_hwoffload_help(struct sk_buff *skb,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7914fdaf4226..64a395c7f689 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1354,7 +1354,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6
1354 return skb->hash; 1354 return skb->hash;
1355} 1355}
1356 1356
1357__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); 1357__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1358 const siphash_key_t *perturb);
1358 1359
1359static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) 1360static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1360{ 1361{
@@ -1495,6 +1496,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
1495} 1496}
1496 1497
1497/** 1498/**
1499 * skb_queue_empty_lockless - check if a queue is empty
1500 * @list: queue head
1501 *
1502 * Returns true if the queue is empty, false otherwise.
1503 * This variant can be used in lockless contexts.
1504 */
1505static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1506{
1507 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1508}
1509
1510
1511/**
1498 * skb_queue_is_last - check if skb is the last entry in the queue 1512 * skb_queue_is_last - check if skb is the last entry in the queue
1499 * @list: queue head 1513 * @list: queue head
1500 * @skb: buffer 1514 * @skb: buffer
@@ -1847,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
1847 struct sk_buff *prev, struct sk_buff *next, 1861 struct sk_buff *prev, struct sk_buff *next,
1848 struct sk_buff_head *list) 1862 struct sk_buff_head *list)
1849{ 1863{
1850 newsk->next = next; 1864 /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
1851 newsk->prev = prev; 1865 WRITE_ONCE(newsk->next, next);
1852 next->prev = prev->next = newsk; 1866 WRITE_ONCE(newsk->prev, prev);
1867 WRITE_ONCE(next->prev, newsk);
1868 WRITE_ONCE(prev->next, newsk);
1853 list->qlen++; 1869 list->qlen++;
1854} 1870}
1855 1871
@@ -1860,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
1860 struct sk_buff *first = list->next; 1876 struct sk_buff *first = list->next;
1861 struct sk_buff *last = list->prev; 1877 struct sk_buff *last = list->prev;
1862 1878
1863 first->prev = prev; 1879 WRITE_ONCE(first->prev, prev);
1864 prev->next = first; 1880 WRITE_ONCE(prev->next, first);
1865 1881
1866 last->next = next; 1882 WRITE_ONCE(last->next, next);
1867 next->prev = last; 1883 WRITE_ONCE(next->prev, last);
1868} 1884}
1869 1885
1870/** 1886/**
@@ -2005,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2005 next = skb->next; 2021 next = skb->next;
2006 prev = skb->prev; 2022 prev = skb->prev;
2007 skb->next = skb->prev = NULL; 2023 skb->next = skb->prev = NULL;
2008 next->prev = prev; 2024 WRITE_ONCE(next->prev, prev);
2009 prev->next = next; 2025 WRITE_ONCE(prev->next, next);
2010} 2026}
2011 2027
2012/** 2028/**
diff --git a/include/linux/socket.h b/include/linux/socket.h
index fc0bed59fc84..4049d9755cf1 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -263,7 +263,7 @@ struct ucred {
263#define PF_MAX AF_MAX 263#define PF_MAX AF_MAX
264 264
265/* Maximum queue length specifiable by listen. */ 265/* Maximum queue length specifiable by listen. */
266#define SOMAXCONN 128 266#define SOMAXCONN 4096
267 267
268/* Flags we can use with send/ and recv. 268/* Flags we can use with send/ and recv.
269 Added those for 1003.1g not all are supported yet 269 Added those for 1003.1g not all are supported yet
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f7fe45689142..1afc125014da 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -203,7 +203,6 @@ struct bonding {
203 struct slave __rcu *primary_slave; 203 struct slave __rcu *primary_slave;
204 struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ 204 struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
205 bool force_primary; 205 bool force_primary;
206 u32 nest_level;
207 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 206 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
208 int (*recv_probe)(const struct sk_buff *, struct bonding *, 207 int (*recv_probe)(const struct sk_buff *, struct bonding *,
209 struct slave *); 208 struct slave *);
@@ -239,6 +238,7 @@ struct bonding {
239 struct dentry *debug_dir; 238 struct dentry *debug_dir;
240#endif /* CONFIG_DEBUG_FS */ 239#endif /* CONFIG_DEBUG_FS */
241 struct rtnl_link_stats64 bond_stats; 240 struct rtnl_link_stats64 bond_stats;
241 struct lock_class_key stats_lock_key;
242}; 242};
243 243
244#define bond_slave_get_rcu(dev) \ 244#define bond_slave_get_rcu(dev) \
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 127a5c4e3699..86e028388bad 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -122,7 +122,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
122static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) 122static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
123{ 123{
124#ifdef CONFIG_NET_RX_BUSY_POLL 124#ifdef CONFIG_NET_RX_BUSY_POLL
125 sk->sk_napi_id = skb->napi_id; 125 WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
126#endif 126#endif
127 sk_rx_queue_set(sk, skb); 127 sk_rx_queue_set(sk, skb);
128} 128}
@@ -132,8 +132,8 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
132 const struct sk_buff *skb) 132 const struct sk_buff *skb)
133{ 133{
134#ifdef CONFIG_NET_RX_BUSY_POLL 134#ifdef CONFIG_NET_RX_BUSY_POLL
135 if (!sk->sk_napi_id) 135 if (!READ_ONCE(sk->sk_napi_id))
136 sk->sk_napi_id = skb->napi_id; 136 WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
137#endif 137#endif
138} 138}
139 139
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 90bd210be060..5cd12276ae21 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/in6.h> 6#include <linux/in6.h>
7#include <linux/siphash.h>
7#include <uapi/linux/if_ether.h> 8#include <uapi/linux/if_ether.h>
8 9
9/** 10/**
@@ -276,7 +277,7 @@ struct flow_keys_basic {
276struct flow_keys { 277struct flow_keys {
277 struct flow_dissector_key_control control; 278 struct flow_dissector_key_control control;
278#define FLOW_KEYS_HASH_START_FIELD basic 279#define FLOW_KEYS_HASH_START_FIELD basic
279 struct flow_dissector_key_basic basic; 280 struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
280 struct flow_dissector_key_tags tags; 281 struct flow_dissector_key_tags tags;
281 struct flow_dissector_key_vlan vlan; 282 struct flow_dissector_key_vlan vlan;
282 struct flow_dissector_key_vlan cvlan; 283 struct flow_dissector_key_vlan cvlan;
diff --git a/include/net/fq.h b/include/net/fq.h
index d126b5d20261..2ad85e683041 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -69,7 +69,7 @@ struct fq {
69 struct list_head backlogs; 69 struct list_head backlogs;
70 spinlock_t lock; 70 spinlock_t lock;
71 u32 flows_cnt; 71 u32 flows_cnt;
72 u32 perturbation; 72 siphash_key_t perturbation;
73 u32 limit; 73 u32 limit;
74 u32 memory_limit; 74 u32 memory_limit;
75 u32 memory_usage; 75 u32 memory_usage;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index be40a4b327e3..107c0d700ed6 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -108,7 +108,7 @@ begin:
108 108
109static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) 109static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
110{ 110{
111 u32 hash = skb_get_hash_perturb(skb, fq->perturbation); 111 u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
112 112
113 return reciprocal_scale(hash, fq->flows_cnt); 113 return reciprocal_scale(hash, fq->flows_cnt);
114} 114}
@@ -308,7 +308,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
308 INIT_LIST_HEAD(&fq->backlogs); 308 INIT_LIST_HEAD(&fq->backlogs);
309 spin_lock_init(&fq->lock); 309 spin_lock_init(&fq->lock);
310 fq->flows_cnt = max_t(u32, flows_cnt, 1); 310 fq->flows_cnt = max_t(u32, flows_cnt, 1);
311 fq->perturbation = prandom_u32(); 311 get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
312 fq->quantum = 300; 312 fq->quantum = 300;
313 fq->limit = 8192; 313 fq->limit = 8192;
314 fq->memory_limit = 16 << 20; /* 16 MBytes */ 314 fq->memory_limit = 16 << 20; /* 16 MBytes */
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
index 81643cf8a1c4..c81444611a22 100644
--- a/include/net/hwbm.h
+++ b/include/net/hwbm.h
@@ -21,9 +21,13 @@ void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf);
21int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp); 21int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
22int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num); 22int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num);
23#else 23#else
24void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {} 24static inline void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
25int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; } 25
26int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num) 26static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
27{ return 0; }
28
29static inline int hwbm_pool_add(struct hwbm_pool *bm_pool,
30 unsigned int buf_num)
27{ return 0; } 31{ return 0; }
28#endif /* CONFIG_HWBM */ 32#endif /* CONFIG_HWBM */
29#endif /* _HWBM_H */ 33#endif /* _HWBM_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 95bb77f95bcc..a2c61c36dc4a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -185,7 +185,7 @@ static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
185} 185}
186 186
187struct ip_frag_state { 187struct ip_frag_state {
188 struct iphdr *iph; 188 bool DF;
189 unsigned int hlen; 189 unsigned int hlen;
190 unsigned int ll_rs; 190 unsigned int ll_rs;
191 unsigned int mtu; 191 unsigned int mtu;
@@ -196,7 +196,7 @@ struct ip_frag_state {
196}; 196};
197 197
198void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs, 198void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
199 unsigned int mtu, struct ip_frag_state *state); 199 unsigned int mtu, bool DF, struct ip_frag_state *state);
200struct sk_buff *ip_frag_next(struct sk_buff *skb, 200struct sk_buff *ip_frag_next(struct sk_buff *skb,
201 struct ip_frag_state *state); 201 struct ip_frag_state *state);
202 202
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 3759167f91f5..078887c8c586 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -889,6 +889,7 @@ struct netns_ipvs {
889 struct delayed_work defense_work; /* Work handler */ 889 struct delayed_work defense_work; /* Work handler */
890 int drop_rate; 890 int drop_rate;
891 int drop_counter; 891 int drop_counter;
892 int old_secure_tcp;
892 atomic_t dropentry; 893 atomic_t dropentry;
893 /* locks in ctl.c */ 894 /* locks in ctl.c */
894 spinlock_t dropentry_lock; /* drop entry handling */ 895 spinlock_t dropentry_lock; /* drop entry handling */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 4c2cd9378699..c7e15a213ef2 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -342,7 +342,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
342#define __net_initconst __initconst 342#define __net_initconst __initconst
343#endif 343#endif
344 344
345int peernet2id_alloc(struct net *net, struct net *peer); 345int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
346int peernet2id(struct net *net, struct net *peer); 346int peernet2id(struct net *net, struct net *peer);
347bool peernet_has_id(struct net *net, struct net *peer); 347bool peernet_has_id(struct net *net, struct net *peer);
348struct net *get_net_ns_by_id(struct net *net, int id); 348struct net *get_net_ns_by_id(struct net *net, int id);
diff --git a/include/net/sock.h b/include/net/sock.h
index f69b58bff7e5..8f9adcfac41b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -954,8 +954,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
954{ 954{
955 int cpu = raw_smp_processor_id(); 955 int cpu = raw_smp_processor_id();
956 956
957 if (unlikely(sk->sk_incoming_cpu != cpu)) 957 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
958 sk->sk_incoming_cpu = cpu; 958 WRITE_ONCE(sk->sk_incoming_cpu, cpu);
959} 959}
960 960
961static inline void sock_rps_record_flow_hash(__u32 hash) 961static inline void sock_rps_record_flow_hash(__u32 hash)
@@ -2242,12 +2242,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2242 * sk_page_frag - return an appropriate page_frag 2242 * sk_page_frag - return an appropriate page_frag
2243 * @sk: socket 2243 * @sk: socket
2244 * 2244 *
2245 * If socket allocation mode allows current thread to sleep, it means its 2245 * Use the per task page_frag instead of the per socket one for
2246 * safe to use the per task page_frag instead of the per socket one. 2246 * optimization when we know that we're in the normal context and owns
2247 * everything that's associated with %current.
2248 *
2249 * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
2250 * inside other socket operations and end up recursing into sk_page_frag()
2251 * while it's already in use.
2247 */ 2252 */
2248static inline struct page_frag *sk_page_frag(struct sock *sk) 2253static inline struct page_frag *sk_page_frag(struct sock *sk)
2249{ 2254{
2250 if (gfpflags_allow_blocking(sk->sk_allocation)) 2255 if (gfpflags_normal_context(sk->sk_allocation))
2251 return &current->task_frag; 2256 return &current->task_frag;
2252 2257
2253 return &sk->sk_frag; 2258 return &sk->sk_frag;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 335283dbe9b3..373aadcfea21 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -197,6 +197,7 @@ struct vxlan_rdst {
197 u8 offloaded:1; 197 u8 offloaded:1;
198 __be32 remote_vni; 198 __be32 remote_vni;
199 u32 remote_ifindex; 199 u32 remote_ifindex;
200 struct net_device *remote_dev;
200 struct list_head list; 201 struct list_head list;
201 struct rcu_head rcu; 202 struct rcu_head rcu;
202 struct dst_cache dst_cache; 203 struct dst_cache dst_cache;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 66088a9e9b9e..ef0e1e3e66f4 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -502,7 +502,7 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); 502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
503} 503}
504 504
505void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) 505static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
506{ 506{
507 int i; 507 int i;
508 508
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d27f3b60ff6d..3867864cdc2f 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -128,7 +128,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
128 128
129 if (!dtab->n_buckets) /* Overflow check */ 129 if (!dtab->n_buckets) /* Overflow check */
130 return -EINVAL; 130 return -EINVAL;
131 cost += sizeof(struct hlist_head) * dtab->n_buckets; 131 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
132 } 132 }
133 133
134 /* if map size is larger than memlock limit, reject it */ 134 /* if map size is larger than memlock limit, reject it */
@@ -719,6 +719,32 @@ const struct bpf_map_ops dev_map_hash_ops = {
719 .map_check_btf = map_check_no_btf, 719 .map_check_btf = map_check_no_btf,
720}; 720};
721 721
722static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
723 struct net_device *netdev)
724{
725 unsigned long flags;
726 u32 i;
727
728 spin_lock_irqsave(&dtab->index_lock, flags);
729 for (i = 0; i < dtab->n_buckets; i++) {
730 struct bpf_dtab_netdev *dev;
731 struct hlist_head *head;
732 struct hlist_node *next;
733
734 head = dev_map_index_hash(dtab, i);
735
736 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
737 if (netdev != dev->dev)
738 continue;
739
740 dtab->items--;
741 hlist_del_rcu(&dev->index_hlist);
742 call_rcu(&dev->rcu, __dev_map_entry_free);
743 }
744 }
745 spin_unlock_irqrestore(&dtab->index_lock, flags);
746}
747
722static int dev_map_notification(struct notifier_block *notifier, 748static int dev_map_notification(struct notifier_block *notifier,
723 ulong event, void *ptr) 749 ulong event, void *ptr)
724{ 750{
@@ -735,6 +761,11 @@ static int dev_map_notification(struct notifier_block *notifier,
735 */ 761 */
736 rcu_read_lock(); 762 rcu_read_lock();
737 list_for_each_entry_rcu(dtab, &dev_map_list, list) { 763 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
764 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
765 dev_map_hash_remove_netdev(dtab, netdev);
766 continue;
767 }
768
738 for (i = 0; i < dtab->map.max_entries; i++) { 769 for (i = 0; i < dtab->map.max_entries; i++) {
739 struct bpf_dtab_netdev *dev, *odev; 770 struct bpf_dtab_netdev *dev, *odev;
740 771
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 82eabd4e38ad..0937719b87e2 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1326,24 +1326,32 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1326{ 1326{
1327 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1327 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1328 1328
1329 kvfree(aux->func_info);
1329 free_used_maps(aux); 1330 free_used_maps(aux);
1330 bpf_prog_uncharge_memlock(aux->prog); 1331 bpf_prog_uncharge_memlock(aux->prog);
1331 security_bpf_prog_free(aux); 1332 security_bpf_prog_free(aux);
1332 bpf_prog_free(aux->prog); 1333 bpf_prog_free(aux->prog);
1333} 1334}
1334 1335
1336static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1337{
1338 bpf_prog_kallsyms_del_all(prog);
1339 btf_put(prog->aux->btf);
1340 bpf_prog_free_linfo(prog);
1341
1342 if (deferred)
1343 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1344 else
1345 __bpf_prog_put_rcu(&prog->aux->rcu);
1346}
1347
1335static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1348static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1336{ 1349{
1337 if (atomic_dec_and_test(&prog->aux->refcnt)) { 1350 if (atomic_dec_and_test(&prog->aux->refcnt)) {
1338 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 1351 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1339 /* bpf_prog_free_id() must be called first */ 1352 /* bpf_prog_free_id() must be called first */
1340 bpf_prog_free_id(prog, do_idr_lock); 1353 bpf_prog_free_id(prog, do_idr_lock);
1341 bpf_prog_kallsyms_del_all(prog); 1354 __bpf_prog_put_noref(prog, true);
1342 btf_put(prog->aux->btf);
1343 kvfree(prog->aux->func_info);
1344 bpf_prog_free_linfo(prog);
1345
1346 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1347 } 1355 }
1348} 1356}
1349 1357
@@ -1741,11 +1749,12 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1741 return err; 1749 return err;
1742 1750
1743free_used_maps: 1751free_used_maps:
1744 bpf_prog_free_linfo(prog); 1752 /* In case we have subprogs, we need to wait for a grace
1745 kvfree(prog->aux->func_info); 1753 * period before we can tear down JIT memory since symbols
1746 btf_put(prog->aux->btf); 1754 * are already exposed under kallsyms.
1747 bpf_prog_kallsyms_del_subprogs(prog); 1755 */
1748 free_used_maps(prog->aux); 1756 __bpf_prog_put_noref(prog, prog->aux->func_cnt);
1757 return err;
1749free_prog: 1758free_prog:
1750 bpf_prog_uncharge_memlock(prog); 1759 bpf_prog_uncharge_memlock(prog);
1751free_prog_sec: 1760free_prog_sec:
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 54728d2eda18..d4bcfd8f95bf 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -172,7 +172,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
172 if (err < 0) 172 if (err < 0)
173 goto out_uninit_mvrp; 173 goto out_uninit_mvrp;
174 174
175 vlan->nest_level = dev_get_nest_level(real_dev) + 1;
176 err = register_netdevice(dev); 175 err = register_netdevice(dev);
177 if (err < 0) 176 if (err < 0)
178 goto out_uninit_mvrp; 177 goto out_uninit_mvrp;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 93eadf179123..e5bff5cc6f97 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -489,36 +489,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
489 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 489 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
490} 490}
491 491
492/*
493 * vlan network devices have devices nesting below it, and are a special
494 * "super class" of normal network devices; split their locks off into a
495 * separate class since they always nest.
496 */
497static struct lock_class_key vlan_netdev_xmit_lock_key;
498static struct lock_class_key vlan_netdev_addr_lock_key;
499
500static void vlan_dev_set_lockdep_one(struct net_device *dev,
501 struct netdev_queue *txq,
502 void *_subclass)
503{
504 lockdep_set_class_and_subclass(&txq->_xmit_lock,
505 &vlan_netdev_xmit_lock_key,
506 *(int *)_subclass);
507}
508
509static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
510{
511 lockdep_set_class_and_subclass(&dev->addr_list_lock,
512 &vlan_netdev_addr_lock_key,
513 subclass);
514 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
515}
516
517static int vlan_dev_get_lock_subclass(struct net_device *dev)
518{
519 return vlan_dev_priv(dev)->nest_level;
520}
521
522static const struct header_ops vlan_header_ops = { 492static const struct header_ops vlan_header_ops = {
523 .create = vlan_dev_hard_header, 493 .create = vlan_dev_hard_header,
524 .parse = eth_header_parse, 494 .parse = eth_header_parse,
@@ -609,8 +579,6 @@ static int vlan_dev_init(struct net_device *dev)
609 579
610 SET_NETDEV_DEVTYPE(dev, &vlan_type); 580 SET_NETDEV_DEVTYPE(dev, &vlan_type);
611 581
612 vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
613
614 vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 582 vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
615 if (!vlan->vlan_pcpu_stats) 583 if (!vlan->vlan_pcpu_stats)
616 return -ENOMEM; 584 return -ENOMEM;
@@ -812,7 +780,6 @@ static const struct net_device_ops vlan_netdev_ops = {
812 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, 780 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
813#endif 781#endif
814 .ndo_fix_features = vlan_dev_fix_features, 782 .ndo_fix_features = vlan_dev_fix_features,
815 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
816 .ndo_get_iflink = vlan_dev_get_iflink, 783 .ndo_get_iflink = vlan_dev_get_iflink,
817}; 784};
818 785
diff --git a/net/atm/common.c b/net/atm/common.c
index b7528e77997c..0ce530af534d 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -668,7 +668,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
668 mask |= EPOLLHUP; 668 mask |= EPOLLHUP;
669 669
670 /* readable? */ 670 /* readable? */
671 if (!skb_queue_empty(&sk->sk_receive_queue)) 671 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
672 mask |= EPOLLIN | EPOLLRDNORM; 672 mask |= EPOLLIN | EPOLLRDNORM;
673 673
674 /* writable? */ 674 /* writable? */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index d78938e3e008..5b0b20e6da95 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -22,6 +22,8 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kref.h> 23#include <linux/kref.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/lockdep.h>
26#include <linux/mutex.h>
25#include <linux/netdevice.h> 27#include <linux/netdevice.h>
26#include <linux/netlink.h> 28#include <linux/netlink.h>
27#include <linux/pkt_sched.h> 29#include <linux/pkt_sched.h>
@@ -193,14 +195,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
193 unsigned char *ogm_buff; 195 unsigned char *ogm_buff;
194 u32 random_seqno; 196 u32 random_seqno;
195 197
198 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
199
196 /* randomize initial seqno to avoid collision */ 200 /* randomize initial seqno to avoid collision */
197 get_random_bytes(&random_seqno, sizeof(random_seqno)); 201 get_random_bytes(&random_seqno, sizeof(random_seqno));
198 atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno); 202 atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
199 203
200 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; 204 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
201 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); 205 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
202 if (!ogm_buff) 206 if (!ogm_buff) {
207 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
203 return -ENOMEM; 208 return -ENOMEM;
209 }
204 210
205 hard_iface->bat_iv.ogm_buff = ogm_buff; 211 hard_iface->bat_iv.ogm_buff = ogm_buff;
206 212
@@ -212,35 +218,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
212 batadv_ogm_packet->reserved = 0; 218 batadv_ogm_packet->reserved = 0;
213 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; 219 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
214 220
221 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
222
215 return 0; 223 return 0;
216} 224}
217 225
218static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) 226static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
219{ 227{
228 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
229
220 kfree(hard_iface->bat_iv.ogm_buff); 230 kfree(hard_iface->bat_iv.ogm_buff);
221 hard_iface->bat_iv.ogm_buff = NULL; 231 hard_iface->bat_iv.ogm_buff = NULL;
232
233 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
222} 234}
223 235
224static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface) 236static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
225{ 237{
226 struct batadv_ogm_packet *batadv_ogm_packet; 238 struct batadv_ogm_packet *batadv_ogm_packet;
227 unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff; 239 void *ogm_buff;
228 240
229 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; 241 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
242
243 ogm_buff = hard_iface->bat_iv.ogm_buff;
244 if (!ogm_buff)
245 goto unlock;
246
247 batadv_ogm_packet = ogm_buff;
230 ether_addr_copy(batadv_ogm_packet->orig, 248 ether_addr_copy(batadv_ogm_packet->orig,
231 hard_iface->net_dev->dev_addr); 249 hard_iface->net_dev->dev_addr);
232 ether_addr_copy(batadv_ogm_packet->prev_sender, 250 ether_addr_copy(batadv_ogm_packet->prev_sender,
233 hard_iface->net_dev->dev_addr); 251 hard_iface->net_dev->dev_addr);
252
253unlock:
254 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
234} 255}
235 256
236static void 257static void
237batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) 258batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
238{ 259{
239 struct batadv_ogm_packet *batadv_ogm_packet; 260 struct batadv_ogm_packet *batadv_ogm_packet;
240 unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff; 261 void *ogm_buff;
241 262
242 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; 263 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
264
265 ogm_buff = hard_iface->bat_iv.ogm_buff;
266 if (!ogm_buff)
267 goto unlock;
268
269 batadv_ogm_packet = ogm_buff;
243 batadv_ogm_packet->ttl = BATADV_TTL; 270 batadv_ogm_packet->ttl = BATADV_TTL;
271
272unlock:
273 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
244} 274}
245 275
246/* when do we schedule our own ogm to be sent */ 276/* when do we schedule our own ogm to be sent */
@@ -742,7 +772,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
742 } 772 }
743} 773}
744 774
745static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) 775/**
776 * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
777 * @hard_iface: interface whose ogm buffer should be transmitted
778 */
779static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
746{ 780{
747 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 781 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
748 unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff; 782 unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
@@ -753,9 +787,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
753 u16 tvlv_len = 0; 787 u16 tvlv_len = 0;
754 unsigned long send_time; 788 unsigned long send_time;
755 789
756 if (hard_iface->if_status == BATADV_IF_NOT_IN_USE || 790 lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
757 hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
758 return;
759 791
760 /* the interface gets activated here to avoid race conditions between 792 /* the interface gets activated here to avoid race conditions between
761 * the moment of activating the interface in 793 * the moment of activating the interface in
@@ -823,6 +855,17 @@ out:
823 batadv_hardif_put(primary_if); 855 batadv_hardif_put(primary_if);
824} 856}
825 857
858static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
859{
860 if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
861 hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
862 return;
863
864 mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
865 batadv_iv_ogm_schedule_buff(hard_iface);
866 mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
867}
868
826/** 869/**
827 * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface 870 * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface
828 * @orig_node: originator which reproadcasted the OGMs directly 871 * @orig_node: originator which reproadcasted the OGMs directly
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index dc4f7430cb5a..8033f24f506c 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -18,6 +18,7 @@
18#include <linux/kref.h> 18#include <linux/kref.h>
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/lockdep.h> 20#include <linux/lockdep.h>
21#include <linux/mutex.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
22#include <linux/random.h> 23#include <linux/random.h>
23#include <linux/rculist.h> 24#include <linux/rculist.h>
@@ -256,14 +257,12 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
256} 257}
257 258
258/** 259/**
259 * batadv_v_ogm_send() - periodic worker broadcasting the own OGM 260 * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
260 * @work: work queue item 261 * @bat_priv: the bat priv with all the soft interface information
261 */ 262 */
262static void batadv_v_ogm_send(struct work_struct *work) 263static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
263{ 264{
264 struct batadv_hard_iface *hard_iface; 265 struct batadv_hard_iface *hard_iface;
265 struct batadv_priv_bat_v *bat_v;
266 struct batadv_priv *bat_priv;
267 struct batadv_ogm2_packet *ogm_packet; 266 struct batadv_ogm2_packet *ogm_packet;
268 struct sk_buff *skb, *skb_tmp; 267 struct sk_buff *skb, *skb_tmp;
269 unsigned char *ogm_buff; 268 unsigned char *ogm_buff;
@@ -271,8 +270,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
271 u16 tvlv_len = 0; 270 u16 tvlv_len = 0;
272 int ret; 271 int ret;
273 272
274 bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work); 273 lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
275 bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
276 274
277 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) 275 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
278 goto out; 276 goto out;
@@ -364,6 +362,23 @@ out:
364} 362}
365 363
366/** 364/**
365 * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
366 * @work: work queue item
367 */
368static void batadv_v_ogm_send(struct work_struct *work)
369{
370 struct batadv_priv_bat_v *bat_v;
371 struct batadv_priv *bat_priv;
372
373 bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
374 bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
375
376 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
377 batadv_v_ogm_send_softif(bat_priv);
378 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
379}
380
381/**
367 * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface 382 * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
368 * @work: work queue item 383 * @work: work queue item
369 * 384 *
@@ -424,11 +439,15 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
424 struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface); 439 struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
425 struct batadv_ogm2_packet *ogm_packet; 440 struct batadv_ogm2_packet *ogm_packet;
426 441
442 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
427 if (!bat_priv->bat_v.ogm_buff) 443 if (!bat_priv->bat_v.ogm_buff)
428 return; 444 goto unlock;
429 445
430 ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff; 446 ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
431 ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr); 447 ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
448
449unlock:
450 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
432} 451}
433 452
434/** 453/**
@@ -1050,6 +1069,8 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
1050 atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno); 1069 atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
1051 INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send); 1070 INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
1052 1071
1072 mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
1073
1053 return 0; 1074 return 0;
1054} 1075}
1055 1076
@@ -1061,7 +1082,11 @@ void batadv_v_ogm_free(struct batadv_priv *bat_priv)
1061{ 1082{
1062 cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq); 1083 cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
1063 1084
1085 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
1086
1064 kfree(bat_priv->bat_v.ogm_buff); 1087 kfree(bat_priv->bat_v.ogm_buff);
1065 bat_priv->bat_v.ogm_buff = NULL; 1088 bat_priv->bat_v.ogm_buff = NULL;
1066 bat_priv->bat_v.ogm_buff_len = 0; 1089 bat_priv->bat_v.ogm_buff_len = 0;
1090
1091 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
1067} 1092}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index c90e47342bb0..afb52282d5bd 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -18,6 +18,7 @@
18#include <linux/kref.h> 18#include <linux/kref.h>
19#include <linux/limits.h> 19#include <linux/limits.h>
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/mutex.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
22#include <linux/printk.h> 23#include <linux/printk.h>
23#include <linux/rculist.h> 24#include <linux/rculist.h>
@@ -929,6 +930,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
929 INIT_LIST_HEAD(&hard_iface->list); 930 INIT_LIST_HEAD(&hard_iface->list);
930 INIT_HLIST_HEAD(&hard_iface->neigh_list); 931 INIT_HLIST_HEAD(&hard_iface->neigh_list);
931 932
933 mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
932 spin_lock_init(&hard_iface->neigh_list_lock); 934 spin_lock_init(&hard_iface->neigh_list_lock);
933 kref_init(&hard_iface->refcount); 935 kref_init(&hard_iface->refcount);
934 936
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9cbed6f5a85a..5ee8e9a100f9 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -740,36 +740,6 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
740 return 0; 740 return 0;
741} 741}
742 742
743/* batman-adv network devices have devices nesting below it and are a special
744 * "super class" of normal network devices; split their locks off into a
745 * separate class since they always nest.
746 */
747static struct lock_class_key batadv_netdev_xmit_lock_key;
748static struct lock_class_key batadv_netdev_addr_lock_key;
749
750/**
751 * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
752 * @dev: device which owns the tx queue
753 * @txq: tx queue to modify
754 * @_unused: always NULL
755 */
756static void batadv_set_lockdep_class_one(struct net_device *dev,
757 struct netdev_queue *txq,
758 void *_unused)
759{
760 lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
761}
762
763/**
764 * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
765 * @dev: network device to modify
766 */
767static void batadv_set_lockdep_class(struct net_device *dev)
768{
769 lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
770 netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
771}
772
773/** 743/**
774 * batadv_softif_init_late() - late stage initialization of soft interface 744 * batadv_softif_init_late() - late stage initialization of soft interface
775 * @dev: registered network device to modify 745 * @dev: registered network device to modify
@@ -783,8 +753,6 @@ static int batadv_softif_init_late(struct net_device *dev)
783 int ret; 753 int ret;
784 size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM; 754 size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
785 755
786 batadv_set_lockdep_class(dev);
787
788 bat_priv = netdev_priv(dev); 756 bat_priv = netdev_priv(dev);
789 bat_priv->soft_iface = dev; 757 bat_priv->soft_iface = dev;
790 758
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index be7c02aa91e2..4d7f1baee7b7 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -17,6 +17,7 @@
17#include <linux/if.h> 17#include <linux/if.h>
18#include <linux/if_ether.h> 18#include <linux/if_ether.h>
19#include <linux/kref.h> 19#include <linux/kref.h>
20#include <linux/mutex.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
21#include <linux/netlink.h> 22#include <linux/netlink.h>
22#include <linux/sched.h> /* for linux/wait.h */ 23#include <linux/sched.h> /* for linux/wait.h */
@@ -81,6 +82,9 @@ struct batadv_hard_iface_bat_iv {
81 82
82 /** @ogm_seqno: OGM sequence number - used to identify each OGM */ 83 /** @ogm_seqno: OGM sequence number - used to identify each OGM */
83 atomic_t ogm_seqno; 84 atomic_t ogm_seqno;
85
86 /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
87 struct mutex ogm_buff_mutex;
84}; 88};
85 89
86/** 90/**
@@ -1539,6 +1543,9 @@ struct batadv_priv_bat_v {
1539 /** @ogm_seqno: OGM sequence number - used to identify each OGM */ 1543 /** @ogm_seqno: OGM sequence number - used to identify each OGM */
1540 atomic_t ogm_seqno; 1544 atomic_t ogm_seqno;
1541 1545
1546 /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
1547 struct mutex ogm_buff_mutex;
1548
1542 /** @ogm_wq: workqueue used to schedule OGM transmissions */ 1549 /** @ogm_wq: workqueue used to schedule OGM transmissions */
1543 struct delayed_work ogm_wq; 1550 struct delayed_work ogm_wq;
1544}; 1551};
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index bb55d92691b0..4febc82a7c76 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -571,15 +571,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
571 return err < 0 ? NET_XMIT_DROP : err; 571 return err < 0 ? NET_XMIT_DROP : err;
572} 572}
573 573
574static int bt_dev_init(struct net_device *dev)
575{
576 netdev_lockdep_set_classes(dev);
577
578 return 0;
579}
580
581static const struct net_device_ops netdev_ops = { 574static const struct net_device_ops netdev_ops = {
582 .ndo_init = bt_dev_init,
583 .ndo_start_xmit = bt_xmit, 575 .ndo_start_xmit = bt_xmit,
584}; 576};
585 577
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 94ddf19998c7..5f508c50649d 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -460,7 +460,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
460 if (sk->sk_state == BT_LISTEN) 460 if (sk->sk_state == BT_LISTEN)
461 return bt_accept_poll(sk); 461 return bt_accept_poll(sk);
462 462
463 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 463 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
464 mask |= EPOLLERR | 464 mask |= EPOLLERR |
465 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 465 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
466 466
@@ -470,7 +470,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
470 if (sk->sk_shutdown == SHUTDOWN_MASK) 470 if (sk->sk_shutdown == SHUTDOWN_MASK)
471 mask |= EPOLLHUP; 471 mask |= EPOLLHUP;
472 472
473 if (!skb_queue_empty(&sk->sk_receive_queue)) 473 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
474 mask |= EPOLLIN | EPOLLRDNORM; 474 mask |= EPOLLIN | EPOLLRDNORM;
475 475
476 if (sk->sk_state == BT_CLOSED) 476 if (sk->sk_state == BT_CLOSED)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 681b72862c16..e804a3016902 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -24,8 +24,6 @@
24const struct nf_br_ops __rcu *nf_br_ops __read_mostly; 24const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
25EXPORT_SYMBOL_GPL(nf_br_ops); 25EXPORT_SYMBOL_GPL(nf_br_ops);
26 26
27static struct lock_class_key bridge_netdev_addr_lock_key;
28
29/* net device transmit always called with BH disabled */ 27/* net device transmit always called with BH disabled */
30netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 28netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
31{ 29{
@@ -108,11 +106,6 @@ out:
108 return NETDEV_TX_OK; 106 return NETDEV_TX_OK;
109} 107}
110 108
111static void br_set_lockdep_class(struct net_device *dev)
112{
113 lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
114}
115
116static int br_dev_init(struct net_device *dev) 109static int br_dev_init(struct net_device *dev)
117{ 110{
118 struct net_bridge *br = netdev_priv(dev); 111 struct net_bridge *br = netdev_priv(dev);
@@ -150,7 +143,6 @@ static int br_dev_init(struct net_device *dev)
150 br_mdb_hash_fini(br); 143 br_mdb_hash_fini(br);
151 br_fdb_hash_fini(br); 144 br_fdb_hash_fini(br);
152 } 145 }
153 br_set_lockdep_class(dev);
154 146
155 return err; 147 return err;
156} 148}
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 506d6141e44e..809673222382 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -95,7 +95,7 @@ slow_path:
95 * This may also be a clone skbuff, we could preserve the geometry for 95 * This may also be a clone skbuff, we could preserve the geometry for
96 * the copies but probably not worth the effort. 96 * the copies but probably not worth the effort.
97 */ 97 */
98 ip_frag_init(skb, hlen, ll_rs, frag_max_size, &state); 98 ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
99 99
100 while (state.left > 0) { 100 while (state.left > 0) {
101 struct sk_buff *skb2; 101 struct sk_buff *skb2;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 13ea920600ae..ef14da50a981 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -953,7 +953,7 @@ static __poll_t caif_poll(struct file *file,
953 mask |= EPOLLRDHUP; 953 mask |= EPOLLRDHUP;
954 954
955 /* readable? */ 955 /* readable? */
956 if (!skb_queue_empty(&sk->sk_receive_queue) || 956 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
957 (sk->sk_shutdown & RCV_SHUTDOWN)) 957 (sk->sk_shutdown & RCV_SHUTDOWN))
958 mask |= EPOLLIN | EPOLLRDNORM; 958 mask |= EPOLLIN | EPOLLRDNORM;
959 959
diff --git a/net/core/datagram.c b/net/core/datagram.c
index c210fc116103..da3c24ed129c 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -97,7 +97,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
97 if (error) 97 if (error)
98 goto out_err; 98 goto out_err;
99 99
100 if (sk->sk_receive_queue.prev != skb) 100 if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
101 goto out; 101 goto out;
102 102
103 /* Socket shut down? */ 103 /* Socket shut down? */
@@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
278 break; 278 break;
279 279
280 sk_busy_loop(sk, flags & MSG_DONTWAIT); 280 sk_busy_loop(sk, flags & MSG_DONTWAIT);
281 } while (sk->sk_receive_queue.prev != *last); 281 } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
282 282
283 error = -EAGAIN; 283 error = -EAGAIN;
284 284
@@ -767,7 +767,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
767 mask = 0; 767 mask = 0;
768 768
769 /* exceptional events? */ 769 /* exceptional events? */
770 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 770 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
771 mask |= EPOLLERR | 771 mask |= EPOLLERR |
772 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 772 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
773 773
@@ -777,7 +777,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
777 mask |= EPOLLHUP; 777 mask |= EPOLLHUP;
778 778
779 /* readable? */ 779 /* readable? */
780 if (!skb_queue_empty(&sk->sk_receive_queue)) 780 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
781 mask |= EPOLLIN | EPOLLRDNORM; 781 mask |= EPOLLIN | EPOLLRDNORM;
782 782
783 /* Connection-based need to check for termination and startup */ 783 /* Connection-based need to check for termination and startup */
diff --git a/net/core/dev.c b/net/core/dev.c
index bf3ed413abaf..99ac84ff398f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -146,6 +146,7 @@
146#include "net-sysfs.h" 146#include "net-sysfs.h"
147 147
148#define MAX_GRO_SKBS 8 148#define MAX_GRO_SKBS 8
149#define MAX_NEST_DEV 8
149 150
150/* This should be increased if a protocol with a bigger head is added. */ 151/* This should be increased if a protocol with a bigger head is added. */
151#define GRO_MAX_HEAD (MAX_HEADER + 128) 152#define GRO_MAX_HEAD (MAX_HEADER + 128)
@@ -276,88 +277,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
276DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 277DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
277EXPORT_PER_CPU_SYMBOL(softnet_data); 278EXPORT_PER_CPU_SYMBOL(softnet_data);
278 279
279#ifdef CONFIG_LOCKDEP
280/*
281 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
282 * according to dev->type
283 */
284static const unsigned short netdev_lock_type[] = {
285 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
286 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
287 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
288 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
289 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
290 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
291 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
292 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
293 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
294 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
295 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
296 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
297 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
298 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
299 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
300
301static const char *const netdev_lock_name[] = {
302 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
303 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
304 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
305 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
306 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
307 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
308 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
309 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
310 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
311 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
312 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
313 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
314 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
315 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
316 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
317
318static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
319static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
320
321static inline unsigned short netdev_lock_pos(unsigned short dev_type)
322{
323 int i;
324
325 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
326 if (netdev_lock_type[i] == dev_type)
327 return i;
328 /* the last key is used by default */
329 return ARRAY_SIZE(netdev_lock_type) - 1;
330}
331
332static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
333 unsigned short dev_type)
334{
335 int i;
336
337 i = netdev_lock_pos(dev_type);
338 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
339 netdev_lock_name[i]);
340}
341
342static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
343{
344 int i;
345
346 i = netdev_lock_pos(dev->type);
347 lockdep_set_class_and_name(&dev->addr_list_lock,
348 &netdev_addr_lock_key[i],
349 netdev_lock_name[i]);
350}
351#else
352static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
353 unsigned short dev_type)
354{
355}
356static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
357{
358}
359#endif
360
361/******************************************************************************* 280/*******************************************************************************
362 * 281 *
363 * Protocol management and registration routines 282 * Protocol management and registration routines
@@ -6489,6 +6408,9 @@ struct netdev_adjacent {
6489 /* upper master flag, there can only be one master device per list */ 6408 /* upper master flag, there can only be one master device per list */
6490 bool master; 6409 bool master;
6491 6410
6411 /* lookup ignore flag */
6412 bool ignore;
6413
6492 /* counter for the number of times this device was added to us */ 6414 /* counter for the number of times this device was added to us */
6493 u16 ref_nr; 6415 u16 ref_nr;
6494 6416
@@ -6511,7 +6433,7 @@ static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6511 return NULL; 6433 return NULL;
6512} 6434}
6513 6435
6514static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data) 6436static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6515{ 6437{
6516 struct net_device *dev = data; 6438 struct net_device *dev = data;
6517 6439
@@ -6532,7 +6454,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
6532{ 6454{
6533 ASSERT_RTNL(); 6455 ASSERT_RTNL();
6534 6456
6535 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6457 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6536 upper_dev); 6458 upper_dev);
6537} 6459}
6538EXPORT_SYMBOL(netdev_has_upper_dev); 6460EXPORT_SYMBOL(netdev_has_upper_dev);
@@ -6550,7 +6472,7 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
6550bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6472bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6551 struct net_device *upper_dev) 6473 struct net_device *upper_dev)
6552{ 6474{
6553 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev, 6475 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6554 upper_dev); 6476 upper_dev);
6555} 6477}
6556EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6478EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
@@ -6594,6 +6516,22 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6594} 6516}
6595EXPORT_SYMBOL(netdev_master_upper_dev_get); 6517EXPORT_SYMBOL(netdev_master_upper_dev_get);
6596 6518
6519static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6520{
6521 struct netdev_adjacent *upper;
6522
6523 ASSERT_RTNL();
6524
6525 if (list_empty(&dev->adj_list.upper))
6526 return NULL;
6527
6528 upper = list_first_entry(&dev->adj_list.upper,
6529 struct netdev_adjacent, list);
6530 if (likely(upper->master) && !upper->ignore)
6531 return upper->dev;
6532 return NULL;
6533}
6534
6597/** 6535/**
6598 * netdev_has_any_lower_dev - Check if device is linked to some device 6536 * netdev_has_any_lower_dev - Check if device is linked to some device
6599 * @dev: device 6537 * @dev: device
@@ -6644,6 +6582,23 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6644} 6582}
6645EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6583EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6646 6584
6585static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6586 struct list_head **iter,
6587 bool *ignore)
6588{
6589 struct netdev_adjacent *upper;
6590
6591 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6592
6593 if (&upper->list == &dev->adj_list.upper)
6594 return NULL;
6595
6596 *iter = &upper->list;
6597 *ignore = upper->ignore;
6598
6599 return upper->dev;
6600}
6601
6647static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6602static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6648 struct list_head **iter) 6603 struct list_head **iter)
6649{ 6604{
@@ -6661,34 +6616,111 @@ static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6661 return upper->dev; 6616 return upper->dev;
6662} 6617}
6663 6618
6619static int __netdev_walk_all_upper_dev(struct net_device *dev,
6620 int (*fn)(struct net_device *dev,
6621 void *data),
6622 void *data)
6623{
6624 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6625 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6626 int ret, cur = 0;
6627 bool ignore;
6628
6629 now = dev;
6630 iter = &dev->adj_list.upper;
6631
6632 while (1) {
6633 if (now != dev) {
6634 ret = fn(now, data);
6635 if (ret)
6636 return ret;
6637 }
6638
6639 next = NULL;
6640 while (1) {
6641 udev = __netdev_next_upper_dev(now, &iter, &ignore);
6642 if (!udev)
6643 break;
6644 if (ignore)
6645 continue;
6646
6647 next = udev;
6648 niter = &udev->adj_list.upper;
6649 dev_stack[cur] = now;
6650 iter_stack[cur++] = iter;
6651 break;
6652 }
6653
6654 if (!next) {
6655 if (!cur)
6656 return 0;
6657 next = dev_stack[--cur];
6658 niter = iter_stack[cur];
6659 }
6660
6661 now = next;
6662 iter = niter;
6663 }
6664
6665 return 0;
6666}
6667
6664int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6668int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6665 int (*fn)(struct net_device *dev, 6669 int (*fn)(struct net_device *dev,
6666 void *data), 6670 void *data),
6667 void *data) 6671 void *data)
6668{ 6672{
6669 struct net_device *udev; 6673 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6670 struct list_head *iter; 6674 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6671 int ret; 6675 int ret, cur = 0;
6672 6676
6673 for (iter = &dev->adj_list.upper, 6677 now = dev;
6674 udev = netdev_next_upper_dev_rcu(dev, &iter); 6678 iter = &dev->adj_list.upper;
6675 udev;
6676 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6677 /* first is the upper device itself */
6678 ret = fn(udev, data);
6679 if (ret)
6680 return ret;
6681 6679
6682 /* then look at all of its upper devices */ 6680 while (1) {
6683 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data); 6681 if (now != dev) {
6684 if (ret) 6682 ret = fn(now, data);
6685 return ret; 6683 if (ret)
6684 return ret;
6685 }
6686
6687 next = NULL;
6688 while (1) {
6689 udev = netdev_next_upper_dev_rcu(now, &iter);
6690 if (!udev)
6691 break;
6692
6693 next = udev;
6694 niter = &udev->adj_list.upper;
6695 dev_stack[cur] = now;
6696 iter_stack[cur++] = iter;
6697 break;
6698 }
6699
6700 if (!next) {
6701 if (!cur)
6702 return 0;
6703 next = dev_stack[--cur];
6704 niter = iter_stack[cur];
6705 }
6706
6707 now = next;
6708 iter = niter;
6686 } 6709 }
6687 6710
6688 return 0; 6711 return 0;
6689} 6712}
6690EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 6713EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6691 6714
6715static bool __netdev_has_upper_dev(struct net_device *dev,
6716 struct net_device *upper_dev)
6717{
6718 ASSERT_RTNL();
6719
6720 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
6721 upper_dev);
6722}
6723
6692/** 6724/**
6693 * netdev_lower_get_next_private - Get the next ->private from the 6725 * netdev_lower_get_next_private - Get the next ->private from the
6694 * lower neighbour list 6726 * lower neighbour list
@@ -6785,34 +6817,119 @@ static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6785 return lower->dev; 6817 return lower->dev;
6786} 6818}
6787 6819
6820static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
6821 struct list_head **iter,
6822 bool *ignore)
6823{
6824 struct netdev_adjacent *lower;
6825
6826 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
6827
6828 if (&lower->list == &dev->adj_list.lower)
6829 return NULL;
6830
6831 *iter = &lower->list;
6832 *ignore = lower->ignore;
6833
6834 return lower->dev;
6835}
6836
6788int netdev_walk_all_lower_dev(struct net_device *dev, 6837int netdev_walk_all_lower_dev(struct net_device *dev,
6789 int (*fn)(struct net_device *dev, 6838 int (*fn)(struct net_device *dev,
6790 void *data), 6839 void *data),
6791 void *data) 6840 void *data)
6792{ 6841{
6793 struct net_device *ldev; 6842 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6794 struct list_head *iter; 6843 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6795 int ret; 6844 int ret, cur = 0;
6796 6845
6797 for (iter = &dev->adj_list.lower, 6846 now = dev;
6798 ldev = netdev_next_lower_dev(dev, &iter); 6847 iter = &dev->adj_list.lower;
6799 ldev;
6800 ldev = netdev_next_lower_dev(dev, &iter)) {
6801 /* first is the lower device itself */
6802 ret = fn(ldev, data);
6803 if (ret)
6804 return ret;
6805 6848
6806 /* then look at all of its lower devices */ 6849 while (1) {
6807 ret = netdev_walk_all_lower_dev(ldev, fn, data); 6850 if (now != dev) {
6808 if (ret) 6851 ret = fn(now, data);
6809 return ret; 6852 if (ret)
6853 return ret;
6854 }
6855
6856 next = NULL;
6857 while (1) {
6858 ldev = netdev_next_lower_dev(now, &iter);
6859 if (!ldev)
6860 break;
6861
6862 next = ldev;
6863 niter = &ldev->adj_list.lower;
6864 dev_stack[cur] = now;
6865 iter_stack[cur++] = iter;
6866 break;
6867 }
6868
6869 if (!next) {
6870 if (!cur)
6871 return 0;
6872 next = dev_stack[--cur];
6873 niter = iter_stack[cur];
6874 }
6875
6876 now = next;
6877 iter = niter;
6810 } 6878 }
6811 6879
6812 return 0; 6880 return 0;
6813} 6881}
6814EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 6882EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6815 6883
6884static int __netdev_walk_all_lower_dev(struct net_device *dev,
6885 int (*fn)(struct net_device *dev,
6886 void *data),
6887 void *data)
6888{
6889 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
6890 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
6891 int ret, cur = 0;
6892 bool ignore;
6893
6894 now = dev;
6895 iter = &dev->adj_list.lower;
6896
6897 while (1) {
6898 if (now != dev) {
6899 ret = fn(now, data);
6900 if (ret)
6901 return ret;
6902 }
6903
6904 next = NULL;
6905 while (1) {
6906 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
6907 if (!ldev)
6908 break;
6909 if (ignore)
6910 continue;
6911
6912 next = ldev;
6913 niter = &ldev->adj_list.lower;
6914 dev_stack[cur] = now;
6915 iter_stack[cur++] = iter;
6916 break;
6917 }
6918
6919 if (!next) {
6920 if (!cur)
6921 return 0;
6922 next = dev_stack[--cur];
6923 niter = iter_stack[cur];
6924 }
6925
6926 now = next;
6927 iter = niter;
6928 }
6929
6930 return 0;
6931}
6932
6816static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 6933static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6817 struct list_head **iter) 6934 struct list_head **iter)
6818{ 6935{
@@ -6827,28 +6944,99 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6827 return lower->dev; 6944 return lower->dev;
6828} 6945}
6829 6946
6830int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 6947static u8 __netdev_upper_depth(struct net_device *dev)
6831 int (*fn)(struct net_device *dev, 6948{
6832 void *data), 6949 struct net_device *udev;
6833 void *data) 6950 struct list_head *iter;
6951 u8 max_depth = 0;
6952 bool ignore;
6953
6954 for (iter = &dev->adj_list.upper,
6955 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
6956 udev;
6957 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
6958 if (ignore)
6959 continue;
6960 if (max_depth < udev->upper_level)
6961 max_depth = udev->upper_level;
6962 }
6963
6964 return max_depth;
6965}
6966
6967static u8 __netdev_lower_depth(struct net_device *dev)
6834{ 6968{
6835 struct net_device *ldev; 6969 struct net_device *ldev;
6836 struct list_head *iter; 6970 struct list_head *iter;
6837 int ret; 6971 u8 max_depth = 0;
6972 bool ignore;
6838 6973
6839 for (iter = &dev->adj_list.lower, 6974 for (iter = &dev->adj_list.lower,
6840 ldev = netdev_next_lower_dev_rcu(dev, &iter); 6975 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
6841 ldev; 6976 ldev;
6842 ldev = netdev_next_lower_dev_rcu(dev, &iter)) { 6977 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
6843 /* first is the lower device itself */ 6978 if (ignore)
6844 ret = fn(ldev, data); 6979 continue;
6845 if (ret) 6980 if (max_depth < ldev->lower_level)
6846 return ret; 6981 max_depth = ldev->lower_level;
6982 }
6847 6983
6848 /* then look at all of its lower devices */ 6984 return max_depth;
6849 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data); 6985}
6850 if (ret) 6986
6851 return ret; 6987static int __netdev_update_upper_level(struct net_device *dev, void *data)
6988{
6989 dev->upper_level = __netdev_upper_depth(dev) + 1;
6990 return 0;
6991}
6992
6993static int __netdev_update_lower_level(struct net_device *dev, void *data)
6994{
6995 dev->lower_level = __netdev_lower_depth(dev) + 1;
6996 return 0;
6997}
6998
6999int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7000 int (*fn)(struct net_device *dev,
7001 void *data),
7002 void *data)
7003{
7004 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7005 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7006 int ret, cur = 0;
7007
7008 now = dev;
7009 iter = &dev->adj_list.lower;
7010
7011 while (1) {
7012 if (now != dev) {
7013 ret = fn(now, data);
7014 if (ret)
7015 return ret;
7016 }
7017
7018 next = NULL;
7019 while (1) {
7020 ldev = netdev_next_lower_dev_rcu(now, &iter);
7021 if (!ldev)
7022 break;
7023
7024 next = ldev;
7025 niter = &ldev->adj_list.lower;
7026 dev_stack[cur] = now;
7027 iter_stack[cur++] = iter;
7028 break;
7029 }
7030
7031 if (!next) {
7032 if (!cur)
7033 return 0;
7034 next = dev_stack[--cur];
7035 niter = iter_stack[cur];
7036 }
7037
7038 now = next;
7039 iter = niter;
6852 } 7040 }
6853 7041
6854 return 0; 7042 return 0;
@@ -6952,6 +7140,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
6952 adj->master = master; 7140 adj->master = master;
6953 adj->ref_nr = 1; 7141 adj->ref_nr = 1;
6954 adj->private = private; 7142 adj->private = private;
7143 adj->ignore = false;
6955 dev_hold(adj_dev); 7144 dev_hold(adj_dev);
6956 7145
6957 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7146 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
@@ -7102,14 +7291,17 @@ static int __netdev_upper_dev_link(struct net_device *dev,
7102 return -EBUSY; 7291 return -EBUSY;
7103 7292
7104 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7293 /* To prevent loops, check if dev is not upper device to upper_dev. */
7105 if (netdev_has_upper_dev(upper_dev, dev)) 7294 if (__netdev_has_upper_dev(upper_dev, dev))
7106 return -EBUSY; 7295 return -EBUSY;
7107 7296
7297 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7298 return -EMLINK;
7299
7108 if (!master) { 7300 if (!master) {
7109 if (netdev_has_upper_dev(dev, upper_dev)) 7301 if (__netdev_has_upper_dev(dev, upper_dev))
7110 return -EEXIST; 7302 return -EEXIST;
7111 } else { 7303 } else {
7112 master_dev = netdev_master_upper_dev_get(dev); 7304 master_dev = __netdev_master_upper_dev_get(dev);
7113 if (master_dev) 7305 if (master_dev)
7114 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7306 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7115 } 7307 }
@@ -7131,6 +7323,13 @@ static int __netdev_upper_dev_link(struct net_device *dev,
7131 if (ret) 7323 if (ret)
7132 goto rollback; 7324 goto rollback;
7133 7325
7326 __netdev_update_upper_level(dev, NULL);
7327 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7328
7329 __netdev_update_lower_level(upper_dev, NULL);
7330 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7331 NULL);
7332
7134 return 0; 7333 return 0;
7135 7334
7136rollback: 7335rollback:
@@ -7213,9 +7412,96 @@ void netdev_upper_dev_unlink(struct net_device *dev,
7213 7412
7214 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7413 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7215 &changeupper_info.info); 7414 &changeupper_info.info);
7415
7416 __netdev_update_upper_level(dev, NULL);
7417 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7418
7419 __netdev_update_lower_level(upper_dev, NULL);
7420 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7421 NULL);
7216} 7422}
7217EXPORT_SYMBOL(netdev_upper_dev_unlink); 7423EXPORT_SYMBOL(netdev_upper_dev_unlink);
7218 7424
7425static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7426 struct net_device *lower_dev,
7427 bool val)
7428{
7429 struct netdev_adjacent *adj;
7430
7431 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7432 if (adj)
7433 adj->ignore = val;
7434
7435 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7436 if (adj)
7437 adj->ignore = val;
7438}
7439
7440static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7441 struct net_device *lower_dev)
7442{
7443 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7444}
7445
7446static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7447 struct net_device *lower_dev)
7448{
7449 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7450}
7451
7452int netdev_adjacent_change_prepare(struct net_device *old_dev,
7453 struct net_device *new_dev,
7454 struct net_device *dev,
7455 struct netlink_ext_ack *extack)
7456{
7457 int err;
7458
7459 if (!new_dev)
7460 return 0;
7461
7462 if (old_dev && new_dev != old_dev)
7463 netdev_adjacent_dev_disable(dev, old_dev);
7464
7465 err = netdev_upper_dev_link(new_dev, dev, extack);
7466 if (err) {
7467 if (old_dev && new_dev != old_dev)
7468 netdev_adjacent_dev_enable(dev, old_dev);
7469 return err;
7470 }
7471
7472 return 0;
7473}
7474EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7475
7476void netdev_adjacent_change_commit(struct net_device *old_dev,
7477 struct net_device *new_dev,
7478 struct net_device *dev)
7479{
7480 if (!new_dev || !old_dev)
7481 return;
7482
7483 if (new_dev == old_dev)
7484 return;
7485
7486 netdev_adjacent_dev_enable(dev, old_dev);
7487 netdev_upper_dev_unlink(old_dev, dev);
7488}
7489EXPORT_SYMBOL(netdev_adjacent_change_commit);
7490
7491void netdev_adjacent_change_abort(struct net_device *old_dev,
7492 struct net_device *new_dev,
7493 struct net_device *dev)
7494{
7495 if (!new_dev)
7496 return;
7497
7498 if (old_dev && new_dev != old_dev)
7499 netdev_adjacent_dev_enable(dev, old_dev);
7500
7501 netdev_upper_dev_unlink(new_dev, dev);
7502}
7503EXPORT_SYMBOL(netdev_adjacent_change_abort);
7504
7219/** 7505/**
7220 * netdev_bonding_info_change - Dispatch event about slave change 7506 * netdev_bonding_info_change - Dispatch event about slave change
7221 * @dev: device 7507 * @dev: device
@@ -7329,25 +7615,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
7329EXPORT_SYMBOL(netdev_lower_dev_get_private); 7615EXPORT_SYMBOL(netdev_lower_dev_get_private);
7330 7616
7331 7617
7332int dev_get_nest_level(struct net_device *dev)
7333{
7334 struct net_device *lower = NULL;
7335 struct list_head *iter;
7336 int max_nest = -1;
7337 int nest;
7338
7339 ASSERT_RTNL();
7340
7341 netdev_for_each_lower_dev(dev, lower, iter) {
7342 nest = dev_get_nest_level(lower);
7343 if (max_nest < nest)
7344 max_nest = nest;
7345 }
7346
7347 return max_nest + 1;
7348}
7349EXPORT_SYMBOL(dev_get_nest_level);
7350
7351/** 7618/**
7352 * netdev_lower_change - Dispatch event about lower device state change 7619 * netdev_lower_change - Dispatch event about lower device state change
7353 * @lower_dev: device 7620 * @lower_dev: device
@@ -8154,7 +8421,8 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
8154 return -EINVAL; 8421 return -EINVAL;
8155 } 8422 }
8156 8423
8157 if (prog->aux->id == prog_id) { 8424 /* prog->aux->id may be 0 for orphaned device-bound progs */
8425 if (prog->aux->id && prog->aux->id == prog_id) {
8158 bpf_prog_put(prog); 8426 bpf_prog_put(prog);
8159 return 0; 8427 return 0;
8160 } 8428 }
@@ -8619,7 +8887,7 @@ static void netdev_init_one_queue(struct net_device *dev,
8619{ 8887{
8620 /* Initialize queue lock */ 8888 /* Initialize queue lock */
8621 spin_lock_init(&queue->_xmit_lock); 8889 spin_lock_init(&queue->_xmit_lock);
8622 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 8890 lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
8623 queue->xmit_lock_owner = -1; 8891 queue->xmit_lock_owner = -1;
8624 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 8892 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
8625 queue->dev = dev; 8893 queue->dev = dev;
@@ -8666,6 +8934,43 @@ void netif_tx_stop_all_queues(struct net_device *dev)
8666} 8934}
8667EXPORT_SYMBOL(netif_tx_stop_all_queues); 8935EXPORT_SYMBOL(netif_tx_stop_all_queues);
8668 8936
8937static void netdev_register_lockdep_key(struct net_device *dev)
8938{
8939 lockdep_register_key(&dev->qdisc_tx_busylock_key);
8940 lockdep_register_key(&dev->qdisc_running_key);
8941 lockdep_register_key(&dev->qdisc_xmit_lock_key);
8942 lockdep_register_key(&dev->addr_list_lock_key);
8943}
8944
8945static void netdev_unregister_lockdep_key(struct net_device *dev)
8946{
8947 lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
8948 lockdep_unregister_key(&dev->qdisc_running_key);
8949 lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
8950 lockdep_unregister_key(&dev->addr_list_lock_key);
8951}
8952
8953void netdev_update_lockdep_key(struct net_device *dev)
8954{
8955 struct netdev_queue *queue;
8956 int i;
8957
8958 lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
8959 lockdep_unregister_key(&dev->addr_list_lock_key);
8960
8961 lockdep_register_key(&dev->qdisc_xmit_lock_key);
8962 lockdep_register_key(&dev->addr_list_lock_key);
8963
8964 lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
8965 for (i = 0; i < dev->num_tx_queues; i++) {
8966 queue = netdev_get_tx_queue(dev, i);
8967
8968 lockdep_set_class(&queue->_xmit_lock,
8969 &dev->qdisc_xmit_lock_key);
8970 }
8971}
8972EXPORT_SYMBOL(netdev_update_lockdep_key);
8973
8669/** 8974/**
8670 * register_netdevice - register a network device 8975 * register_netdevice - register a network device
8671 * @dev: device to register 8976 * @dev: device to register
@@ -8700,7 +9005,7 @@ int register_netdevice(struct net_device *dev)
8700 BUG_ON(!net); 9005 BUG_ON(!net);
8701 9006
8702 spin_lock_init(&dev->addr_list_lock); 9007 spin_lock_init(&dev->addr_list_lock);
8703 netdev_set_addr_lockdep_class(dev); 9008 lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
8704 9009
8705 ret = dev_get_valid_name(net, dev, dev->name); 9010 ret = dev_get_valid_name(net, dev, dev->name);
8706 if (ret < 0) 9011 if (ret < 0)
@@ -9210,8 +9515,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
9210 9515
9211 dev_net_set(dev, &init_net); 9516 dev_net_set(dev, &init_net);
9212 9517
9518 netdev_register_lockdep_key(dev);
9519
9213 dev->gso_max_size = GSO_MAX_SIZE; 9520 dev->gso_max_size = GSO_MAX_SIZE;
9214 dev->gso_max_segs = GSO_MAX_SEGS; 9521 dev->gso_max_segs = GSO_MAX_SEGS;
9522 dev->upper_level = 1;
9523 dev->lower_level = 1;
9215 9524
9216 INIT_LIST_HEAD(&dev->napi_list); 9525 INIT_LIST_HEAD(&dev->napi_list);
9217 INIT_LIST_HEAD(&dev->unreg_list); 9526 INIT_LIST_HEAD(&dev->unreg_list);
@@ -9292,6 +9601,8 @@ void free_netdev(struct net_device *dev)
9292 free_percpu(dev->pcpu_refcnt); 9601 free_percpu(dev->pcpu_refcnt);
9293 dev->pcpu_refcnt = NULL; 9602 dev->pcpu_refcnt = NULL;
9294 9603
9604 netdev_unregister_lockdep_key(dev);
9605
9295 /* Compatibility with error handling in drivers */ 9606 /* Compatibility with error handling in drivers */
9296 if (dev->reg_state == NETREG_UNINITIALIZED) { 9607 if (dev->reg_state == NETREG_UNINITIALIZED) {
9297 netdev_freemem(dev); 9608 netdev_freemem(dev);
@@ -9460,7 +9771,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
9460 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 9771 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9461 rcu_barrier(); 9772 rcu_barrier();
9462 9773
9463 new_nsid = peernet2id_alloc(dev_net(dev), net); 9774 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
9464 /* If there is an ifindex conflict assign a new one */ 9775 /* If there is an ifindex conflict assign a new one */
9465 if (__dev_get_by_index(net, dev->ifindex)) 9776 if (__dev_get_by_index(net, dev->ifindex))
9466 new_ifindex = dev_new_index(net); 9777 new_ifindex = dev_new_index(net);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 6393ba930097..2f949b5a1eb9 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -637,7 +637,7 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
637 if (to->addr_len != from->addr_len) 637 if (to->addr_len != from->addr_len)
638 return -EINVAL; 638 return -EINVAL;
639 639
640 netif_addr_lock_nested(to); 640 netif_addr_lock(to);
641 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 641 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
642 if (!err) 642 if (!err)
643 __dev_set_rx_mode(to); 643 __dev_set_rx_mode(to);
@@ -667,7 +667,7 @@ int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
667 if (to->addr_len != from->addr_len) 667 if (to->addr_len != from->addr_len)
668 return -EINVAL; 668 return -EINVAL;
669 669
670 netif_addr_lock_nested(to); 670 netif_addr_lock(to);
671 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len); 671 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
672 if (!err) 672 if (!err)
673 __dev_set_rx_mode(to); 673 __dev_set_rx_mode(to);
@@ -691,7 +691,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
691 return; 691 return;
692 692
693 netif_addr_lock_bh(from); 693 netif_addr_lock_bh(from);
694 netif_addr_lock_nested(to); 694 netif_addr_lock(to);
695 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 695 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
696 __dev_set_rx_mode(to); 696 __dev_set_rx_mode(to);
697 netif_addr_unlock(to); 697 netif_addr_unlock(to);
@@ -858,7 +858,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
858 if (to->addr_len != from->addr_len) 858 if (to->addr_len != from->addr_len)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 netif_addr_lock_nested(to); 861 netif_addr_lock(to);
862 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); 862 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
863 if (!err) 863 if (!err)
864 __dev_set_rx_mode(to); 864 __dev_set_rx_mode(to);
@@ -888,7 +888,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
888 if (to->addr_len != from->addr_len) 888 if (to->addr_len != from->addr_len)
889 return -EINVAL; 889 return -EINVAL;
890 890
891 netif_addr_lock_nested(to); 891 netif_addr_lock(to);
892 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len); 892 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
893 if (!err) 893 if (!err)
894 __dev_set_rx_mode(to); 894 __dev_set_rx_mode(to);
@@ -912,7 +912,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
912 return; 912 return;
913 913
914 netif_addr_lock_bh(from); 914 netif_addr_lock_bh(from);
915 netif_addr_lock_nested(to); 915 netif_addr_lock(to);
916 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); 916 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
917 __dev_set_rx_mode(to); 917 __dev_set_rx_mode(to);
918 netif_addr_unlock(to); 918 netif_addr_unlock(to);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index c763106c73fc..cd9bc67381b2 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1396,11 +1396,13 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
1396 1396
1397static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 1397static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
1398{ 1398{
1399 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1399 struct ethtool_wolinfo wol;
1400 1400
1401 if (!dev->ethtool_ops->get_wol) 1401 if (!dev->ethtool_ops->get_wol)
1402 return -EOPNOTSUPP; 1402 return -EOPNOTSUPP;
1403 1403
1404 memset(&wol, 0, sizeof(struct ethtool_wolinfo));
1405 wol.cmd = ETHTOOL_GWOL;
1404 dev->ethtool_ops->get_wol(dev, &wol); 1406 dev->ethtool_ops->get_wol(dev, &wol);
1405 1407
1406 if (copy_to_user(useraddr, &wol, sizeof(wol))) 1408 if (copy_to_user(useraddr, &wol, sizeof(wol)))
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 7c09d87d3269..68eda10d0680 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1350,30 +1350,21 @@ out_bad:
1350} 1350}
1351EXPORT_SYMBOL(__skb_flow_dissect); 1351EXPORT_SYMBOL(__skb_flow_dissect);
1352 1352
1353static u32 hashrnd __read_mostly; 1353static siphash_key_t hashrnd __read_mostly;
1354static __always_inline void __flow_hash_secret_init(void) 1354static __always_inline void __flow_hash_secret_init(void)
1355{ 1355{
1356 net_get_random_once(&hashrnd, sizeof(hashrnd)); 1356 net_get_random_once(&hashrnd, sizeof(hashrnd));
1357} 1357}
1358 1358
1359static __always_inline u32 __flow_hash_words(const u32 *words, u32 length, 1359static const void *flow_keys_hash_start(const struct flow_keys *flow)
1360 u32 keyval)
1361{ 1360{
1362 return jhash2(words, length, keyval); 1361 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
1363} 1362 return &flow->FLOW_KEYS_HASH_START_FIELD;
1364
1365static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
1366{
1367 const void *p = flow;
1368
1369 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
1370 return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
1371} 1363}
1372 1364
1373static inline size_t flow_keys_hash_length(const struct flow_keys *flow) 1365static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
1374{ 1366{
1375 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); 1367 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
1376 BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
1377 BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != 1368 BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
1378 sizeof(*flow) - sizeof(flow->addrs)); 1369 sizeof(*flow) - sizeof(flow->addrs));
1379 1370
@@ -1388,7 +1379,7 @@ static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
1388 diff -= sizeof(flow->addrs.tipckey); 1379 diff -= sizeof(flow->addrs.tipckey);
1389 break; 1380 break;
1390 } 1381 }
1391 return (sizeof(*flow) - diff) / sizeof(u32); 1382 return sizeof(*flow) - diff;
1392} 1383}
1393 1384
1394__be32 flow_get_u32_src(const struct flow_keys *flow) 1385__be32 flow_get_u32_src(const struct flow_keys *flow)
@@ -1454,14 +1445,15 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
1454 } 1445 }
1455} 1446}
1456 1447
1457static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) 1448static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
1449 const siphash_key_t *keyval)
1458{ 1450{
1459 u32 hash; 1451 u32 hash;
1460 1452
1461 __flow_hash_consistentify(keys); 1453 __flow_hash_consistentify(keys);
1462 1454
1463 hash = __flow_hash_words(flow_keys_hash_start(keys), 1455 hash = siphash(flow_keys_hash_start(keys),
1464 flow_keys_hash_length(keys), keyval); 1456 flow_keys_hash_length(keys), keyval);
1465 if (!hash) 1457 if (!hash)
1466 hash = 1; 1458 hash = 1;
1467 1459
@@ -1471,12 +1463,13 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
1471u32 flow_hash_from_keys(struct flow_keys *keys) 1463u32 flow_hash_from_keys(struct flow_keys *keys)
1472{ 1464{
1473 __flow_hash_secret_init(); 1465 __flow_hash_secret_init();
1474 return __flow_hash_from_keys(keys, hashrnd); 1466 return __flow_hash_from_keys(keys, &hashrnd);
1475} 1467}
1476EXPORT_SYMBOL(flow_hash_from_keys); 1468EXPORT_SYMBOL(flow_hash_from_keys);
1477 1469
1478static inline u32 ___skb_get_hash(const struct sk_buff *skb, 1470static inline u32 ___skb_get_hash(const struct sk_buff *skb,
1479 struct flow_keys *keys, u32 keyval) 1471 struct flow_keys *keys,
1472 const siphash_key_t *keyval)
1480{ 1473{
1481 skb_flow_dissect_flow_keys(skb, keys, 1474 skb_flow_dissect_flow_keys(skb, keys,
1482 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1475 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@@ -1524,7 +1517,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
1524 &keys, NULL, 0, 0, 0, 1517 &keys, NULL, 0, 0, 0,
1525 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1518 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1526 1519
1527 return __flow_hash_from_keys(&keys, hashrnd); 1520 return __flow_hash_from_keys(&keys, &hashrnd);
1528} 1521}
1529EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); 1522EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1530 1523
@@ -1544,13 +1537,14 @@ void __skb_get_hash(struct sk_buff *skb)
1544 1537
1545 __flow_hash_secret_init(); 1538 __flow_hash_secret_init();
1546 1539
1547 hash = ___skb_get_hash(skb, &keys, hashrnd); 1540 hash = ___skb_get_hash(skb, &keys, &hashrnd);
1548 1541
1549 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1542 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1550} 1543}
1551EXPORT_SYMBOL(__skb_get_hash); 1544EXPORT_SYMBOL(__skb_get_hash);
1552 1545
1553__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) 1546__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1547 const siphash_key_t *perturb)
1554{ 1548{
1555 struct flow_keys keys; 1549 struct flow_keys keys;
1556 1550
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index f93785e5833c..74cfb8b5ab33 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -88,11 +88,16 @@ static int bpf_lwt_input_reroute(struct sk_buff *skb)
88 int err = -EINVAL; 88 int err = -EINVAL;
89 89
90 if (skb->protocol == htons(ETH_P_IP)) { 90 if (skb->protocol == htons(ETH_P_IP)) {
91 struct net_device *dev = skb_dst(skb)->dev;
91 struct iphdr *iph = ip_hdr(skb); 92 struct iphdr *iph = ip_hdr(skb);
92 93
94 dev_hold(dev);
95 skb_dst_drop(skb);
93 err = ip_route_input_noref(skb, iph->daddr, iph->saddr, 96 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
94 iph->tos, skb_dst(skb)->dev); 97 iph->tos, dev);
98 dev_put(dev);
95 } else if (skb->protocol == htons(ETH_P_IPV6)) { 99 } else if (skb->protocol == htons(ETH_P_IPV6)) {
100 skb_dst_drop(skb);
96 err = ipv6_stub->ipv6_route_input(skb); 101 err = ipv6_stub->ipv6_route_input(skb);
97 } else { 102 } else {
98 err = -EAFNOSUPPORT; 103 err = -EAFNOSUPPORT;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6d3e4821b02d..39402840025e 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -246,11 +246,11 @@ static int __peernet2id(struct net *net, struct net *peer)
246} 246}
247 247
248static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, 248static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
249 struct nlmsghdr *nlh); 249 struct nlmsghdr *nlh, gfp_t gfp);
250/* This function returns the id of a peer netns. If no id is assigned, one will 250/* This function returns the id of a peer netns. If no id is assigned, one will
251 * be allocated and returned. 251 * be allocated and returned.
252 */ 252 */
253int peernet2id_alloc(struct net *net, struct net *peer) 253int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
254{ 254{
255 bool alloc = false, alive = false; 255 bool alloc = false, alive = false;
256 int id; 256 int id;
@@ -269,7 +269,7 @@ int peernet2id_alloc(struct net *net, struct net *peer)
269 id = __peernet2id_alloc(net, peer, &alloc); 269 id = __peernet2id_alloc(net, peer, &alloc);
270 spin_unlock_bh(&net->nsid_lock); 270 spin_unlock_bh(&net->nsid_lock);
271 if (alloc && id >= 0) 271 if (alloc && id >= 0)
272 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL); 272 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
273 if (alive) 273 if (alive)
274 put_net(peer); 274 put_net(peer);
275 return id; 275 return id;
@@ -479,6 +479,7 @@ struct net *copy_net_ns(unsigned long flags,
479 479
480 if (rv < 0) { 480 if (rv < 0) {
481put_userns: 481put_userns:
482 key_remove_domain(net->key_domain);
482 put_user_ns(user_ns); 483 put_user_ns(user_ns);
483 net_drop_ns(net); 484 net_drop_ns(net);
484dec_ucounts: 485dec_ucounts:
@@ -533,7 +534,8 @@ static void unhash_nsid(struct net *net, struct net *last)
533 idr_remove(&tmp->netns_ids, id); 534 idr_remove(&tmp->netns_ids, id);
534 spin_unlock_bh(&tmp->nsid_lock); 535 spin_unlock_bh(&tmp->nsid_lock);
535 if (id >= 0) 536 if (id >= 0)
536 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL); 537 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
538 GFP_KERNEL);
537 if (tmp == last) 539 if (tmp == last)
538 break; 540 break;
539 } 541 }
@@ -766,7 +768,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
766 spin_unlock_bh(&net->nsid_lock); 768 spin_unlock_bh(&net->nsid_lock);
767 if (err >= 0) { 769 if (err >= 0) {
768 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid, 770 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
769 nlh); 771 nlh, GFP_KERNEL);
770 err = 0; 772 err = 0;
771 } else if (err == -ENOSPC && nsid >= 0) { 773 } else if (err == -ENOSPC && nsid >= 0) {
772 err = -EEXIST; 774 err = -EEXIST;
@@ -1054,7 +1056,7 @@ end:
1054} 1056}
1055 1057
1056static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, 1058static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1057 struct nlmsghdr *nlh) 1059 struct nlmsghdr *nlh, gfp_t gfp)
1058{ 1060{
1059 struct net_fill_args fillargs = { 1061 struct net_fill_args fillargs = {
1060 .portid = portid, 1062 .portid = portid,
@@ -1065,7 +1067,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1065 struct sk_buff *msg; 1067 struct sk_buff *msg;
1066 int err = -ENOMEM; 1068 int err = -ENOMEM;
1067 1069
1068 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 1070 msg = nlmsg_new(rtnl_net_get_size(), gfp);
1069 if (!msg) 1071 if (!msg)
1070 goto out; 1072 goto out;
1071 1073
@@ -1073,7 +1075,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1073 if (err < 0) 1075 if (err < 0)
1074 goto err_out; 1076 goto err_out;
1075 1077
1076 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0); 1078 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1077 return; 1079 return;
1078 1080
1079err_out: 1081err_out:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1ee6460f8275..c81cd80114d9 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1523,7 +1523,7 @@ static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1523 1523
1524static int rtnl_fill_link_netnsid(struct sk_buff *skb, 1524static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1525 const struct net_device *dev, 1525 const struct net_device *dev,
1526 struct net *src_net) 1526 struct net *src_net, gfp_t gfp)
1527{ 1527{
1528 bool put_iflink = false; 1528 bool put_iflink = false;
1529 1529
@@ -1531,7 +1531,7 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1531 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1531 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1532 1532
1533 if (!net_eq(dev_net(dev), link_net)) { 1533 if (!net_eq(dev_net(dev), link_net)) {
1534 int id = peernet2id_alloc(src_net, link_net); 1534 int id = peernet2id_alloc(src_net, link_net, gfp);
1535 1535
1536 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1536 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1537 return -EMSGSIZE; 1537 return -EMSGSIZE;
@@ -1589,7 +1589,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
1589 int type, u32 pid, u32 seq, u32 change, 1589 int type, u32 pid, u32 seq, u32 change,
1590 unsigned int flags, u32 ext_filter_mask, 1590 unsigned int flags, u32 ext_filter_mask,
1591 u32 event, int *new_nsid, int new_ifindex, 1591 u32 event, int *new_nsid, int new_ifindex,
1592 int tgt_netnsid) 1592 int tgt_netnsid, gfp_t gfp)
1593{ 1593{
1594 struct ifinfomsg *ifm; 1594 struct ifinfomsg *ifm;
1595 struct nlmsghdr *nlh; 1595 struct nlmsghdr *nlh;
@@ -1681,7 +1681,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
1681 goto nla_put_failure; 1681 goto nla_put_failure;
1682 } 1682 }
1683 1683
1684 if (rtnl_fill_link_netnsid(skb, dev, src_net)) 1684 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1685 goto nla_put_failure; 1685 goto nla_put_failure;
1686 1686
1687 if (new_nsid && 1687 if (new_nsid &&
@@ -2001,7 +2001,7 @@ walk_entries:
2001 NETLINK_CB(cb->skb).portid, 2001 NETLINK_CB(cb->skb).portid,
2002 nlh->nlmsg_seq, 0, flags, 2002 nlh->nlmsg_seq, 0, flags,
2003 ext_filter_mask, 0, NULL, 0, 2003 ext_filter_mask, 0, NULL, 0,
2004 netnsid); 2004 netnsid, GFP_KERNEL);
2005 2005
2006 if (err < 0) { 2006 if (err < 0) {
2007 if (likely(skb->len)) 2007 if (likely(skb->len))
@@ -2355,6 +2355,7 @@ static int do_set_master(struct net_device *dev, int ifindex,
2355 err = ops->ndo_del_slave(upper_dev, dev); 2355 err = ops->ndo_del_slave(upper_dev, dev);
2356 if (err) 2356 if (err)
2357 return err; 2357 return err;
2358 netdev_update_lockdep_key(dev);
2358 } else { 2359 } else {
2359 return -EOPNOTSUPP; 2360 return -EOPNOTSUPP;
2360 } 2361 }
@@ -3359,7 +3360,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3359 err = rtnl_fill_ifinfo(nskb, dev, net, 3360 err = rtnl_fill_ifinfo(nskb, dev, net,
3360 RTM_NEWLINK, NETLINK_CB(skb).portid, 3361 RTM_NEWLINK, NETLINK_CB(skb).portid,
3361 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 3362 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3362 0, NULL, 0, netnsid); 3363 0, NULL, 0, netnsid, GFP_KERNEL);
3363 if (err < 0) { 3364 if (err < 0) {
3364 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 3365 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3365 WARN_ON(err == -EMSGSIZE); 3366 WARN_ON(err == -EMSGSIZE);
@@ -3471,7 +3472,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3471 3472
3472 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev), 3473 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3473 type, 0, 0, change, 0, 0, event, 3474 type, 0, 0, change, 0, 0, event,
3474 new_nsid, new_ifindex, -1); 3475 new_nsid, new_ifindex, -1, flags);
3475 if (err < 0) { 3476 if (err < 0) {
3476 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 3477 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3477 WARN_ON(err == -EMSGSIZE); 3478 WARN_ON(err == -EMSGSIZE);
@@ -3916,7 +3917,7 @@ static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
3916 ndm = nlmsg_data(nlh); 3917 ndm = nlmsg_data(nlh);
3917 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 3918 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
3918 ndm->ndm_flags || ndm->ndm_type) { 3919 ndm->ndm_flags || ndm->ndm_type) {
3919 NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request"); 3920 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
3920 return -EINVAL; 3921 return -EINVAL;
3921 } 3922 }
3922 3923
diff --git a/net/core/sock.c b/net/core/sock.c
index a515392ba84b..ac78a570e43a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1127,7 +1127,7 @@ set_rcvbuf:
1127 break; 1127 break;
1128 } 1128 }
1129 case SO_INCOMING_CPU: 1129 case SO_INCOMING_CPU:
1130 sk->sk_incoming_cpu = val; 1130 WRITE_ONCE(sk->sk_incoming_cpu, val);
1131 break; 1131 break;
1132 1132
1133 case SO_CNX_ADVICE: 1133 case SO_CNX_ADVICE:
@@ -1476,7 +1476,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1476 break; 1476 break;
1477 1477
1478 case SO_INCOMING_CPU: 1478 case SO_INCOMING_CPU:
1479 v.val = sk->sk_incoming_cpu; 1479 v.val = READ_ONCE(sk->sk_incoming_cpu);
1480 break; 1480 break;
1481 1481
1482 case SO_MEMINFO: 1482 case SO_MEMINFO:
@@ -3600,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
3600{ 3600{
3601 struct sock *sk = p; 3601 struct sock *sk = p;
3602 3602
3603 return !skb_queue_empty(&sk->sk_receive_queue) || 3603 return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
3604 sk_busy_loop_timeout(sk, start_time); 3604 sk_busy_loop_timeout(sk, start_time);
3605} 3605}
3606EXPORT_SYMBOL(sk_busy_loop_end); 3606EXPORT_SYMBOL(sk_busy_loop_end);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index d9b4200ed12d..0d8f782c25cc 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -117,7 +117,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
117 inet->inet_daddr, 117 inet->inet_daddr,
118 inet->inet_sport, 118 inet->inet_sport,
119 inet->inet_dport); 119 inet->inet_dport);
120 inet->inet_id = dp->dccps_iss ^ jiffies; 120 inet->inet_id = prandom_u32();
121 121
122 err = dccp_connect(sk); 122 err = dccp_connect(sk);
123 rt = NULL; 123 rt = NULL;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 0ea75286abf4..3349ea81f901 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1205,7 +1205,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wai
1205 struct dn_scp *scp = DN_SK(sk); 1205 struct dn_scp *scp = DN_SK(sk);
1206 __poll_t mask = datagram_poll(file, sock, wait); 1206 __poll_t mask = datagram_poll(file, sock, wait);
1207 1207
1208 if (!skb_queue_empty(&scp->other_receive_queue)) 1208 if (!skb_queue_empty_lockless(&scp->other_receive_queue))
1209 mask |= EPOLLRDBAND; 1209 mask |= EPOLLRDBAND;
1210 1210
1211 return mask; 1211 return mask;
diff --git a/net/dsa/master.c b/net/dsa/master.c
index a8e52c9967f4..3255dfc97f86 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -310,8 +310,6 @@ static void dsa_master_reset_mtu(struct net_device *dev)
310 rtnl_unlock(); 310 rtnl_unlock();
311} 311}
312 312
313static struct lock_class_key dsa_master_addr_list_lock_key;
314
315int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 313int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
316{ 314{
317 int ret; 315 int ret;
@@ -325,9 +323,6 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
325 wmb(); 323 wmb();
326 324
327 dev->dsa_ptr = cpu_dp; 325 dev->dsa_ptr = cpu_dp;
328 lockdep_set_class(&dev->addr_list_lock,
329 &dsa_master_addr_list_lock_key);
330
331 ret = dsa_master_ethtool_setup(dev); 326 ret = dsa_master_ethtool_setup(dev);
332 if (ret) 327 if (ret)
333 return ret; 328 return ret;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 75d58229a4bd..028e65f4b5ba 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1341,15 +1341,6 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
1341 return ret; 1341 return ret;
1342} 1342}
1343 1343
1344static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1345static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1346 struct netdev_queue *txq,
1347 void *_unused)
1348{
1349 lockdep_set_class(&txq->_xmit_lock,
1350 &dsa_slave_netdev_xmit_lock_key);
1351}
1352
1353int dsa_slave_suspend(struct net_device *slave_dev) 1344int dsa_slave_suspend(struct net_device *slave_dev)
1354{ 1345{
1355 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1346 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
@@ -1433,9 +1424,6 @@ int dsa_slave_create(struct dsa_port *port)
1433 slave_dev->max_mtu = ETH_MAX_MTU; 1424 slave_dev->max_mtu = ETH_MAX_MTU;
1434 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 1425 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1435 1426
1436 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1437 NULL);
1438
1439 SET_NETDEV_DEV(slave_dev, port->ds->dev); 1427 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1440 slave_dev->dev.of_node = port->dn; 1428 slave_dev->dev.of_node = port->dn;
1441 slave_dev->vlan_features = master->vlan_features; 1429 slave_dev->vlan_features = master->vlan_features;
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 3297e7fa9945..c0b107cdd715 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -58,13 +58,6 @@ static const struct header_ops lowpan_header_ops = {
58 .create = lowpan_header_create, 58 .create = lowpan_header_create,
59}; 59};
60 60
61static int lowpan_dev_init(struct net_device *ldev)
62{
63 netdev_lockdep_set_classes(ldev);
64
65 return 0;
66}
67
68static int lowpan_open(struct net_device *dev) 61static int lowpan_open(struct net_device *dev)
69{ 62{
70 if (!open_count) 63 if (!open_count)
@@ -96,7 +89,6 @@ static int lowpan_get_iflink(const struct net_device *dev)
96} 89}
97 90
98static const struct net_device_ops lowpan_netdev_ops = { 91static const struct net_device_ops lowpan_netdev_ops = {
99 .ndo_init = lowpan_dev_init,
100 .ndo_start_xmit = lowpan_xmit, 92 .ndo_start_xmit = lowpan_xmit,
101 .ndo_open = lowpan_open, 93 .ndo_open = lowpan_open,
102 .ndo_stop = lowpan_stop, 94 .ndo_stop = lowpan_stop,
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 9a0fe0c2fa02..4a8550c49202 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
73 reuseport_has_conns(sk, true); 73 reuseport_has_conns(sk, true);
74 sk->sk_state = TCP_ESTABLISHED; 74 sk->sk_state = TCP_ESTABLISHED;
75 sk_set_txhash(sk); 75 sk_set_txhash(sk);
76 inet->inet_id = jiffies; 76 inet->inet_id = prandom_u32();
77 77
78 sk_dst_set(sk, &rt->dst); 78 sk_dst_set(sk, &rt->dst);
79 err = 0; 79 err = 0;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index dde77f72e03e..71c78d223dfd 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1148,7 +1148,7 @@ void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric)
1148 if (!(dev->flags & IFF_UP) || 1148 if (!(dev->flags & IFF_UP) ||
1149 ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) || 1149 ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
1150 ipv4_is_zeronet(prefix) || 1150 ipv4_is_zeronet(prefix) ||
1151 prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32) 1151 (prefix == ifa->ifa_local && ifa->ifa_prefixlen == 32))
1152 return; 1152 return;
1153 1153
1154 /* add the new */ 1154 /* add the new */
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 97824864e40d..83fb00153018 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -240,7 +240,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
240 return -1; 240 return -1;
241 241
242 score = sk->sk_family == PF_INET ? 2 : 1; 242 score = sk->sk_family == PF_INET ? 2 : 1;
243 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 243 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
244 score++; 244 score++;
245 } 245 }
246 return score; 246 return score;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 52690bb3e40f..10636fb6093e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -509,9 +509,9 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
509 key = &tun_info->key; 509 key = &tun_info->key;
510 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 510 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
511 goto err_free_skb; 511 goto err_free_skb;
512 md = ip_tunnel_info_opts(tun_info); 512 if (tun_info->options_len < sizeof(*md))
513 if (!md)
514 goto err_free_skb; 513 goto err_free_skb;
514 md = ip_tunnel_info_opts(tun_info);
515 515
516 /* ERSPAN has fixed 8 byte GRE header */ 516 /* ERSPAN has fixed 8 byte GRE header */
517 version = md->version; 517 version = md->version;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 814b9b8882a0..3d8baaaf7086 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -645,11 +645,12 @@ void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
645EXPORT_SYMBOL(ip_fraglist_prepare); 645EXPORT_SYMBOL(ip_fraglist_prepare);
646 646
647void ip_frag_init(struct sk_buff *skb, unsigned int hlen, 647void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
648 unsigned int ll_rs, unsigned int mtu, 648 unsigned int ll_rs, unsigned int mtu, bool DF,
649 struct ip_frag_state *state) 649 struct ip_frag_state *state)
650{ 650{
651 struct iphdr *iph = ip_hdr(skb); 651 struct iphdr *iph = ip_hdr(skb);
652 652
653 state->DF = DF;
653 state->hlen = hlen; 654 state->hlen = hlen;
654 state->ll_rs = ll_rs; 655 state->ll_rs = ll_rs;
655 state->mtu = mtu; 656 state->mtu = mtu;
@@ -668,9 +669,6 @@ static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
668 /* Copy the flags to each fragment. */ 669 /* Copy the flags to each fragment. */
669 IPCB(to)->flags = IPCB(from)->flags; 670 IPCB(to)->flags = IPCB(from)->flags;
670 671
671 if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
672 state->iph->frag_off |= htons(IP_DF);
673
674 /* ANK: dirty, but effective trick. Upgrade options only if 672 /* ANK: dirty, but effective trick. Upgrade options only if
675 * the segment to be fragmented was THE FIRST (otherwise, 673 * the segment to be fragmented was THE FIRST (otherwise,
676 * options are already fixed) and make it ONCE 674 * options are already fixed) and make it ONCE
@@ -738,6 +736,8 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
738 */ 736 */
739 iph = ip_hdr(skb2); 737 iph = ip_hdr(skb2);
740 iph->frag_off = htons((state->offset >> 3)); 738 iph->frag_off = htons((state->offset >> 3));
739 if (state->DF)
740 iph->frag_off |= htons(IP_DF);
741 741
742 /* 742 /*
743 * Added AC : If we are fragmenting a fragment that's not the 743 * Added AC : If we are fragmenting a fragment that's not the
@@ -883,7 +883,8 @@ slow_path:
883 * Fragment the datagram. 883 * Fragment the datagram.
884 */ 884 */
885 885
886 ip_frag_init(skb, hlen, ll_rs, mtu, &state); 886 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
887 &state);
887 888
888 /* 889 /*
889 * Keep copying data until we run out. 890 * Keep copying data until we run out.
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 42187a3b82f4..d8876f0e9672 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -584,7 +584,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
584 } 584 }
585 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 585 /* This barrier is coupled with smp_wmb() in tcp_reset() */
586 smp_rmb(); 586 smp_rmb();
587 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 587 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
588 mask |= EPOLLERR; 588 mask |= EPOLLERR;
589 589
590 return mask; 590 return mask;
@@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1964 if (unlikely(flags & MSG_ERRQUEUE)) 1964 if (unlikely(flags & MSG_ERRQUEUE))
1965 return inet_recv_error(sk, msg, len, addr_len); 1965 return inet_recv_error(sk, msg, len, addr_len);
1966 1966
1967 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && 1967 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
1968 (sk->sk_state == TCP_ESTABLISHED)) 1968 (sk->sk_state == TCP_ESTABLISHED))
1969 sk_busy_loop(sk, nonblock); 1969 sk_busy_loop(sk, nonblock);
1970 1970
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6be568334848..67b2dc7a1727 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -303,7 +303,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
303 inet->inet_daddr); 303 inet->inet_daddr);
304 } 304 }
305 305
306 inet->inet_id = tp->write_seq ^ jiffies; 306 inet->inet_id = prandom_u32();
307 307
308 if (tcp_fastopen_defer_connect(sk, &err)) 308 if (tcp_fastopen_defer_connect(sk, &err))
309 return err; 309 return err;
@@ -1450,7 +1450,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1450 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1450 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1451 if (inet_opt) 1451 if (inet_opt)
1452 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1452 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1453 newinet->inet_id = newtp->write_seq ^ jiffies; 1453 newinet->inet_id = prandom_u32();
1454 1454
1455 if (!dst) { 1455 if (!dst) {
1456 dst = inet_csk_route_child_sock(sk, newsk, req); 1456 dst = inet_csk_route_child_sock(sk, newsk, req);
@@ -2681,7 +2681,7 @@ static int __net_init tcp_sk_init(struct net *net)
2681 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2; 2681 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2682 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; 2682 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2683 2683
2684 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); 2684 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
2685 net->ipv4.sysctl_tcp_sack = 1; 2685 net->ipv4.sysctl_tcp_sack = 1;
2686 net->ipv4.sysctl_tcp_window_scaling = 1; 2686 net->ipv4.sysctl_tcp_window_scaling = 1;
2687 net->ipv4.sysctl_tcp_timestamps = 1; 2687 net->ipv4.sysctl_tcp_timestamps = 1;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 14bc654b6842..1d58ce829dca 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -388,7 +388,7 @@ static int compute_score(struct sock *sk, struct net *net,
388 return -1; 388 return -1;
389 score += 4; 389 score += 4;
390 390
391 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 391 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
392 score++; 392 score++;
393 return score; 393 return score;
394} 394}
@@ -1316,6 +1316,20 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
1316 scratch->_tsize_state |= UDP_SKB_IS_STATELESS; 1316 scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
1317} 1317}
1318 1318
1319static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
1320{
1321 /* We come here after udp_lib_checksum_complete() returned 0.
1322 * This means that __skb_checksum_complete() might have
1323 * set skb->csum_valid to 1.
1324 * On 64bit platforms, we can set csum_unnecessary
1325 * to true, but only if the skb is not shared.
1326 */
1327#if BITS_PER_LONG == 64
1328 if (!skb_shared(skb))
1329 udp_skb_scratch(skb)->csum_unnecessary = true;
1330#endif
1331}
1332
1319static int udp_skb_truesize(struct sk_buff *skb) 1333static int udp_skb_truesize(struct sk_buff *skb)
1320{ 1334{
1321 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; 1335 return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
@@ -1550,10 +1564,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
1550 *total += skb->truesize; 1564 *total += skb->truesize;
1551 kfree_skb(skb); 1565 kfree_skb(skb);
1552 } else { 1566 } else {
1553 /* the csum related bits could be changed, refresh 1567 udp_skb_csum_unnecessary_set(skb);
1554 * the scratch area
1555 */
1556 udp_set_dev_scratch(skb);
1557 break; 1568 break;
1558 } 1569 }
1559 } 1570 }
@@ -1577,7 +1588,7 @@ static int first_packet_length(struct sock *sk)
1577 1588
1578 spin_lock_bh(&rcvq->lock); 1589 spin_lock_bh(&rcvq->lock);
1579 skb = __first_packet_length(sk, rcvq, &total); 1590 skb = __first_packet_length(sk, rcvq, &total);
1580 if (!skb && !skb_queue_empty(sk_queue)) { 1591 if (!skb && !skb_queue_empty_lockless(sk_queue)) {
1581 spin_lock(&sk_queue->lock); 1592 spin_lock(&sk_queue->lock);
1582 skb_queue_splice_tail_init(sk_queue, rcvq); 1593 skb_queue_splice_tail_init(sk_queue, rcvq);
1583 spin_unlock(&sk_queue->lock); 1594 spin_unlock(&sk_queue->lock);
@@ -1650,7 +1661,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
1650 return skb; 1661 return skb;
1651 } 1662 }
1652 1663
1653 if (skb_queue_empty(sk_queue)) { 1664 if (skb_queue_empty_lockless(sk_queue)) {
1654 spin_unlock_bh(&queue->lock); 1665 spin_unlock_bh(&queue->lock);
1655 goto busy_check; 1666 goto busy_check;
1656 } 1667 }
@@ -1676,7 +1687,7 @@ busy_check:
1676 break; 1687 break;
1677 1688
1678 sk_busy_loop(sk, flags & MSG_DONTWAIT); 1689 sk_busy_loop(sk, flags & MSG_DONTWAIT);
1679 } while (!skb_queue_empty(sk_queue)); 1690 } while (!skb_queue_empty_lockless(sk_queue));
1680 1691
1681 /* sk_queue is empty, reader_queue may contain peeked packets */ 1692 /* sk_queue is empty, reader_queue may contain peeked packets */
1682 } while (timeo && 1693 } while (timeo &&
@@ -2712,7 +2723,7 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
2712 __poll_t mask = datagram_poll(file, sock, wait); 2723 __poll_t mask = datagram_poll(file, sock, wait);
2713 struct sock *sk = sock->sk; 2724 struct sock *sk = sock->sk;
2714 2725
2715 if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) 2726 if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
2716 mask |= EPOLLIN | EPOLLRDNORM; 2727 mask |= EPOLLIN | EPOLLRDNORM;
2717 2728
2718 /* Check for false positives due to checksum errors */ 2729 /* Check for false positives due to checksum errors */
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 783f3c1466da..2fc079284ca4 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -7,6 +7,7 @@
7#include <linux/export.h> 7#include <linux/export.h>
8#include <net/ipv6.h> 8#include <net/ipv6.h>
9#include <net/ipv6_stubs.h> 9#include <net/ipv6_stubs.h>
10#include <net/addrconf.h>
10#include <net/ip.h> 11#include <net/ip.h>
11 12
12/* if ipv6 module registers this function is used by xfrm to force all 13/* if ipv6 module registers this function is used by xfrm to force all
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index cf60fae9533b..fbe9d4295eac 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -105,7 +105,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
105 return -1; 105 return -1;
106 106
107 score = 1; 107 score = 1;
108 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 108 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
109 score++; 109 score++;
110 } 110 }
111 return score; 111 return score;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 787d9f2a6e99..923034c52ce4 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -980,9 +980,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
980 dsfield = key->tos; 980 dsfield = key->tos;
981 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) 981 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
982 goto tx_err; 982 goto tx_err;
983 md = ip_tunnel_info_opts(tun_info); 983 if (tun_info->options_len < sizeof(*md))
984 if (!md)
985 goto tx_err; 984 goto tx_err;
985 md = ip_tunnel_info_opts(tun_info);
986 986
987 tun_id = tunnel_id_to_key32(key->tun_id); 987 tun_id = tunnel_id_to_key32(key->tun_id);
988 if (md->version == 1) { 988 if (md->version == 1) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6324d3a8cb53..9fec580c968e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -135,7 +135,7 @@ static int compute_score(struct sock *sk, struct net *net,
135 return -1; 135 return -1;
136 score++; 136 score++;
137 137
138 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 138 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
139 score++; 139 score++;
140 140
141 return score; 141 return score;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index fd5ac2788e45..d3b520b9b2c9 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -56,7 +56,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
56{ 56{
57 eth_hw_addr_random(dev); 57 eth_hw_addr_random(dev);
58 eth_broadcast_addr(dev->broadcast); 58 eth_broadcast_addr(dev->broadcast);
59 netdev_lockdep_set_classes(dev);
60 59
61 return 0; 60 return 0;
62} 61}
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 4515056ef1c2..f9b16f2b2219 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -193,21 +193,29 @@ struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *
193 193
194 mutex_lock(&__ip_vs_app_mutex); 194 mutex_lock(&__ip_vs_app_mutex);
195 195
196 /* increase the module use count */
197 if (!ip_vs_use_count_inc()) {
198 err = -ENOENT;
199 goto out_unlock;
200 }
201
196 list_for_each_entry(a, &ipvs->app_list, a_list) { 202 list_for_each_entry(a, &ipvs->app_list, a_list) {
197 if (!strcmp(app->name, a->name)) { 203 if (!strcmp(app->name, a->name)) {
198 err = -EEXIST; 204 err = -EEXIST;
205 /* decrease the module use count */
206 ip_vs_use_count_dec();
199 goto out_unlock; 207 goto out_unlock;
200 } 208 }
201 } 209 }
202 a = kmemdup(app, sizeof(*app), GFP_KERNEL); 210 a = kmemdup(app, sizeof(*app), GFP_KERNEL);
203 if (!a) { 211 if (!a) {
204 err = -ENOMEM; 212 err = -ENOMEM;
213 /* decrease the module use count */
214 ip_vs_use_count_dec();
205 goto out_unlock; 215 goto out_unlock;
206 } 216 }
207 INIT_LIST_HEAD(&a->incs_list); 217 INIT_LIST_HEAD(&a->incs_list);
208 list_add(&a->a_list, &ipvs->app_list); 218 list_add(&a->a_list, &ipvs->app_list);
209 /* increase the module use count */
210 ip_vs_use_count_inc();
211 219
212out_unlock: 220out_unlock:
213 mutex_unlock(&__ip_vs_app_mutex); 221 mutex_unlock(&__ip_vs_app_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 8b48e7ce1c2c..3cccc88ef817 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -93,7 +93,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net,
93static void update_defense_level(struct netns_ipvs *ipvs) 93static void update_defense_level(struct netns_ipvs *ipvs)
94{ 94{
95 struct sysinfo i; 95 struct sysinfo i;
96 static int old_secure_tcp = 0;
97 int availmem; 96 int availmem;
98 int nomem; 97 int nomem;
99 int to_change = -1; 98 int to_change = -1;
@@ -174,35 +173,35 @@ static void update_defense_level(struct netns_ipvs *ipvs)
174 spin_lock(&ipvs->securetcp_lock); 173 spin_lock(&ipvs->securetcp_lock);
175 switch (ipvs->sysctl_secure_tcp) { 174 switch (ipvs->sysctl_secure_tcp) {
176 case 0: 175 case 0:
177 if (old_secure_tcp >= 2) 176 if (ipvs->old_secure_tcp >= 2)
178 to_change = 0; 177 to_change = 0;
179 break; 178 break;
180 case 1: 179 case 1:
181 if (nomem) { 180 if (nomem) {
182 if (old_secure_tcp < 2) 181 if (ipvs->old_secure_tcp < 2)
183 to_change = 1; 182 to_change = 1;
184 ipvs->sysctl_secure_tcp = 2; 183 ipvs->sysctl_secure_tcp = 2;
185 } else { 184 } else {
186 if (old_secure_tcp >= 2) 185 if (ipvs->old_secure_tcp >= 2)
187 to_change = 0; 186 to_change = 0;
188 } 187 }
189 break; 188 break;
190 case 2: 189 case 2:
191 if (nomem) { 190 if (nomem) {
192 if (old_secure_tcp < 2) 191 if (ipvs->old_secure_tcp < 2)
193 to_change = 1; 192 to_change = 1;
194 } else { 193 } else {
195 if (old_secure_tcp >= 2) 194 if (ipvs->old_secure_tcp >= 2)
196 to_change = 0; 195 to_change = 0;
197 ipvs->sysctl_secure_tcp = 1; 196 ipvs->sysctl_secure_tcp = 1;
198 } 197 }
199 break; 198 break;
200 case 3: 199 case 3:
201 if (old_secure_tcp < 2) 200 if (ipvs->old_secure_tcp < 2)
202 to_change = 1; 201 to_change = 1;
203 break; 202 break;
204 } 203 }
205 old_secure_tcp = ipvs->sysctl_secure_tcp; 204 ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
206 if (to_change >= 0) 205 if (to_change >= 0)
207 ip_vs_protocol_timeout_change(ipvs, 206 ip_vs_protocol_timeout_change(ipvs,
208 ipvs->sysctl_secure_tcp > 1); 207 ipvs->sysctl_secure_tcp > 1);
@@ -1275,7 +1274,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
1275 struct ip_vs_service *svc = NULL; 1274 struct ip_vs_service *svc = NULL;
1276 1275
1277 /* increase the module use count */ 1276 /* increase the module use count */
1278 ip_vs_use_count_inc(); 1277 if (!ip_vs_use_count_inc())
1278 return -ENOPROTOOPT;
1279 1279
1280 /* Lookup the scheduler by 'u->sched_name' */ 1280 /* Lookup the scheduler by 'u->sched_name' */
1281 if (strcmp(u->sched_name, "none")) { 1281 if (strcmp(u->sched_name, "none")) {
@@ -2435,9 +2435,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2435 if (copy_from_user(arg, user, len) != 0) 2435 if (copy_from_user(arg, user, len) != 0)
2436 return -EFAULT; 2436 return -EFAULT;
2437 2437
2438 /* increase the module use count */
2439 ip_vs_use_count_inc();
2440
2441 /* Handle daemons since they have another lock */ 2438 /* Handle daemons since they have another lock */
2442 if (cmd == IP_VS_SO_SET_STARTDAEMON || 2439 if (cmd == IP_VS_SO_SET_STARTDAEMON ||
2443 cmd == IP_VS_SO_SET_STOPDAEMON) { 2440 cmd == IP_VS_SO_SET_STOPDAEMON) {
@@ -2450,13 +2447,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2450 ret = -EINVAL; 2447 ret = -EINVAL;
2451 if (strscpy(cfg.mcast_ifn, dm->mcast_ifn, 2448 if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
2452 sizeof(cfg.mcast_ifn)) <= 0) 2449 sizeof(cfg.mcast_ifn)) <= 0)
2453 goto out_dec; 2450 return ret;
2454 cfg.syncid = dm->syncid; 2451 cfg.syncid = dm->syncid;
2455 ret = start_sync_thread(ipvs, &cfg, dm->state); 2452 ret = start_sync_thread(ipvs, &cfg, dm->state);
2456 } else { 2453 } else {
2457 ret = stop_sync_thread(ipvs, dm->state); 2454 ret = stop_sync_thread(ipvs, dm->state);
2458 } 2455 }
2459 goto out_dec; 2456 return ret;
2460 } 2457 }
2461 2458
2462 mutex_lock(&__ip_vs_mutex); 2459 mutex_lock(&__ip_vs_mutex);
@@ -2551,10 +2548,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2551 2548
2552 out_unlock: 2549 out_unlock:
2553 mutex_unlock(&__ip_vs_mutex); 2550 mutex_unlock(&__ip_vs_mutex);
2554 out_dec:
2555 /* decrease the module use count */
2556 ip_vs_use_count_dec();
2557
2558 return ret; 2551 return ret;
2559} 2552}
2560 2553
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 8e104dff7abc..166c669f0763 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -68,7 +68,8 @@ int register_ip_vs_pe(struct ip_vs_pe *pe)
68 struct ip_vs_pe *tmp; 68 struct ip_vs_pe *tmp;
69 69
70 /* increase the module use count */ 70 /* increase the module use count */
71 ip_vs_use_count_inc(); 71 if (!ip_vs_use_count_inc())
72 return -ENOENT;
72 73
73 mutex_lock(&ip_vs_pe_mutex); 74 mutex_lock(&ip_vs_pe_mutex);
74 /* Make sure that the pe with this name doesn't exist 75 /* Make sure that the pe with this name doesn't exist
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 2f9d5cd5daee..d4903723be7e 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -179,7 +179,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
179 } 179 }
180 180
181 /* increase the module use count */ 181 /* increase the module use count */
182 ip_vs_use_count_inc(); 182 if (!ip_vs_use_count_inc())
183 return -ENOENT;
183 184
184 mutex_lock(&ip_vs_sched_mutex); 185 mutex_lock(&ip_vs_sched_mutex);
185 186
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index a4a78c4b06de..8dc892a9dc91 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1762,6 +1762,10 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1762 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", 1762 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
1763 sizeof(struct ip_vs_sync_conn_v0)); 1763 sizeof(struct ip_vs_sync_conn_v0));
1764 1764
1765 /* increase the module use count */
1766 if (!ip_vs_use_count_inc())
1767 return -ENOPROTOOPT;
1768
1765 /* Do not hold one mutex and then to block on another */ 1769 /* Do not hold one mutex and then to block on another */
1766 for (;;) { 1770 for (;;) {
1767 rtnl_lock(); 1771 rtnl_lock();
@@ -1892,9 +1896,6 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
1892 mutex_unlock(&ipvs->sync_mutex); 1896 mutex_unlock(&ipvs->sync_mutex);
1893 rtnl_unlock(); 1897 rtnl_unlock();
1894 1898
1895 /* increase the module use count */
1896 ip_vs_use_count_inc();
1897
1898 return 0; 1899 return 0;
1899 1900
1900out: 1901out:
@@ -1924,11 +1925,17 @@ out:
1924 } 1925 }
1925 kfree(ti); 1926 kfree(ti);
1926 } 1927 }
1928
1929 /* decrease the module use count */
1930 ip_vs_use_count_dec();
1927 return result; 1931 return result;
1928 1932
1929out_early: 1933out_early:
1930 mutex_unlock(&ipvs->sync_mutex); 1934 mutex_unlock(&ipvs->sync_mutex);
1931 rtnl_unlock(); 1935 rtnl_unlock();
1936
1937 /* decrease the module use count */
1938 ip_vs_use_count_dec();
1932 return result; 1939 return result;
1933} 1940}
1934 1941
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 132f5228b431..128245efe84a 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -202,6 +202,8 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
202{ 202{
203 int err; 203 int err;
204 204
205 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
206
205 err = rhashtable_insert_fast(&flow_table->rhashtable, 207 err = rhashtable_insert_fast(&flow_table->rhashtable,
206 &flow->tuplehash[0].node, 208 &flow->tuplehash[0].node,
207 nf_flow_offload_rhash_params); 209 nf_flow_offload_rhash_params);
@@ -218,7 +220,6 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
218 return err; 220 return err;
219 } 221 }
220 222
221 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
222 return 0; 223 return 0;
223} 224}
224EXPORT_SYMBOL_GPL(flow_offload_add); 225EXPORT_SYMBOL_GPL(flow_offload_add);
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index e546f759b7a7..ad783f4840ef 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -347,7 +347,7 @@ int nft_flow_rule_offload_commit(struct net *net)
347 347
348 policy = nft_trans_chain_policy(trans); 348 policy = nft_trans_chain_policy(trans);
349 err = nft_flow_offload_chain(trans->ctx.chain, &policy, 349 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
350 FLOW_BLOCK_BIND); 350 FLOW_BLOCK_UNBIND);
351 break; 351 break;
352 case NFT_MSG_NEWRULE: 352 case NFT_MSG_NEWRULE:
353 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 353 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 22a80eb60222..5cb2d8908d2a 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -161,13 +161,21 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
161 161
162 switch (priv->offset) { 162 switch (priv->offset) {
163 case offsetof(struct ethhdr, h_source): 163 case offsetof(struct ethhdr, h_source):
164 if (priv->len != ETH_ALEN)
165 return -EOPNOTSUPP;
166
164 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, 167 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
165 src, ETH_ALEN, reg); 168 src, ETH_ALEN, reg);
166 break; 169 break;
167 case offsetof(struct ethhdr, h_dest): 170 case offsetof(struct ethhdr, h_dest):
171 if (priv->len != ETH_ALEN)
172 return -EOPNOTSUPP;
173
168 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, 174 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
169 dst, ETH_ALEN, reg); 175 dst, ETH_ALEN, reg);
170 break; 176 break;
177 default:
178 return -EOPNOTSUPP;
171 } 179 }
172 180
173 return 0; 181 return 0;
@@ -181,14 +189,23 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
181 189
182 switch (priv->offset) { 190 switch (priv->offset) {
183 case offsetof(struct iphdr, saddr): 191 case offsetof(struct iphdr, saddr):
192 if (priv->len != sizeof(struct in_addr))
193 return -EOPNOTSUPP;
194
184 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src, 195 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
185 sizeof(struct in_addr), reg); 196 sizeof(struct in_addr), reg);
186 break; 197 break;
187 case offsetof(struct iphdr, daddr): 198 case offsetof(struct iphdr, daddr):
199 if (priv->len != sizeof(struct in_addr))
200 return -EOPNOTSUPP;
201
188 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst, 202 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
189 sizeof(struct in_addr), reg); 203 sizeof(struct in_addr), reg);
190 break; 204 break;
191 case offsetof(struct iphdr, protocol): 205 case offsetof(struct iphdr, protocol):
206 if (priv->len != sizeof(__u8))
207 return -EOPNOTSUPP;
208
192 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, 209 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
193 sizeof(__u8), reg); 210 sizeof(__u8), reg);
194 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); 211 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -208,14 +225,23 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
208 225
209 switch (priv->offset) { 226 switch (priv->offset) {
210 case offsetof(struct ipv6hdr, saddr): 227 case offsetof(struct ipv6hdr, saddr):
228 if (priv->len != sizeof(struct in6_addr))
229 return -EOPNOTSUPP;
230
211 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src, 231 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
212 sizeof(struct in6_addr), reg); 232 sizeof(struct in6_addr), reg);
213 break; 233 break;
214 case offsetof(struct ipv6hdr, daddr): 234 case offsetof(struct ipv6hdr, daddr):
235 if (priv->len != sizeof(struct in6_addr))
236 return -EOPNOTSUPP;
237
215 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst, 238 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
216 sizeof(struct in6_addr), reg); 239 sizeof(struct in6_addr), reg);
217 break; 240 break;
218 case offsetof(struct ipv6hdr, nexthdr): 241 case offsetof(struct ipv6hdr, nexthdr):
242 if (priv->len != sizeof(__u8))
243 return -EOPNOTSUPP;
244
219 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, 245 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
220 sizeof(__u8), reg); 246 sizeof(__u8), reg);
221 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); 247 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -255,10 +281,16 @@ static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
255 281
256 switch (priv->offset) { 282 switch (priv->offset) {
257 case offsetof(struct tcphdr, source): 283 case offsetof(struct tcphdr, source):
284 if (priv->len != sizeof(__be16))
285 return -EOPNOTSUPP;
286
258 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src, 287 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
259 sizeof(__be16), reg); 288 sizeof(__be16), reg);
260 break; 289 break;
261 case offsetof(struct tcphdr, dest): 290 case offsetof(struct tcphdr, dest):
291 if (priv->len != sizeof(__be16))
292 return -EOPNOTSUPP;
293
262 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst, 294 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
263 sizeof(__be16), reg); 295 sizeof(__be16), reg);
264 break; 296 break;
@@ -277,10 +309,16 @@ static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
277 309
278 switch (priv->offset) { 310 switch (priv->offset) {
279 case offsetof(struct udphdr, source): 311 case offsetof(struct udphdr, source):
312 if (priv->len != sizeof(__be16))
313 return -EOPNOTSUPP;
314
280 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src, 315 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
281 sizeof(__be16), reg); 316 sizeof(__be16), reg);
282 break; 317 break;
283 case offsetof(struct udphdr, dest): 318 case offsetof(struct udphdr, dest):
319 if (priv->len != sizeof(__be16))
320 return -EOPNOTSUPP;
321
284 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst, 322 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
285 sizeof(__be16), reg); 323 sizeof(__be16), reg);
286 break; 324 break;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index c4f54ad2b98a..58d5373c513c 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -64,28 +64,6 @@ static DEFINE_SPINLOCK(nr_list_lock);
64static const struct proto_ops nr_proto_ops; 64static const struct proto_ops nr_proto_ops;
65 65
66/* 66/*
67 * NETROM network devices are virtual network devices encapsulating NETROM
68 * frames into AX.25 which will be sent through an AX.25 device, so form a
69 * special "super class" of normal net devices; split their locks off into a
70 * separate class since they always nest.
71 */
72static struct lock_class_key nr_netdev_xmit_lock_key;
73static struct lock_class_key nr_netdev_addr_lock_key;
74
75static void nr_set_lockdep_one(struct net_device *dev,
76 struct netdev_queue *txq,
77 void *_unused)
78{
79 lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
80}
81
82static void nr_set_lockdep_key(struct net_device *dev)
83{
84 lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
85 netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
86}
87
88/*
89 * Socket removal during an interrupt is now safe. 67 * Socket removal during an interrupt is now safe.
90 */ 68 */
91static void nr_remove_socket(struct sock *sk) 69static void nr_remove_socket(struct sock *sk)
@@ -1414,7 +1392,6 @@ static int __init nr_proto_init(void)
1414 free_netdev(dev); 1392 free_netdev(dev);
1415 goto fail; 1393 goto fail;
1416 } 1394 }
1417 nr_set_lockdep_key(dev);
1418 dev_nr[i] = dev; 1395 dev_nr[i] = dev;
1419 } 1396 }
1420 1397
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ccdd790e163a..28604414dec1 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -554,11 +554,11 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
554 if (sk->sk_state == LLCP_LISTEN) 554 if (sk->sk_state == LLCP_LISTEN)
555 return llcp_accept_poll(sk); 555 return llcp_accept_poll(sk);
556 556
557 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 557 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
558 mask |= EPOLLERR | 558 mask |= EPOLLERR |
559 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 559 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
560 560
561 if (!skb_queue_empty(&sk->sk_receive_queue)) 561 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
562 mask |= EPOLLIN | EPOLLRDNORM; 562 mask |= EPOLLIN | EPOLLRDNORM;
563 563
564 if (sk->sk_state == LLCP_CLOSED) 564 if (sk->sk_state == LLCP_CLOSED)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f30e406fbec5..d8c364d637b1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1881,7 +1881,7 @@ static struct genl_family dp_datapath_genl_family __ro_after_init = {
1881/* Called with ovs_mutex or RCU read lock. */ 1881/* Called with ovs_mutex or RCU read lock. */
1882static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, 1882static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1883 struct net *net, u32 portid, u32 seq, 1883 struct net *net, u32 portid, u32 seq,
1884 u32 flags, u8 cmd) 1884 u32 flags, u8 cmd, gfp_t gfp)
1885{ 1885{
1886 struct ovs_header *ovs_header; 1886 struct ovs_header *ovs_header;
1887 struct ovs_vport_stats vport_stats; 1887 struct ovs_vport_stats vport_stats;
@@ -1902,7 +1902,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1902 goto nla_put_failure; 1902 goto nla_put_failure;
1903 1903
1904 if (!net_eq(net, dev_net(vport->dev))) { 1904 if (!net_eq(net, dev_net(vport->dev))) {
1905 int id = peernet2id_alloc(net, dev_net(vport->dev)); 1905 int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
1906 1906
1907 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id)) 1907 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1908 goto nla_put_failure; 1908 goto nla_put_failure;
@@ -1943,11 +1943,12 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
1943 struct sk_buff *skb; 1943 struct sk_buff *skb;
1944 int retval; 1944 int retval;
1945 1945
1946 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1946 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1947 if (!skb) 1947 if (!skb)
1948 return ERR_PTR(-ENOMEM); 1948 return ERR_PTR(-ENOMEM);
1949 1949
1950 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd); 1950 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
1951 GFP_KERNEL);
1951 BUG_ON(retval < 0); 1952 BUG_ON(retval < 0);
1952 1953
1953 return skb; 1954 return skb;
@@ -2089,7 +2090,7 @@ restart:
2089 2090
2090 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2091 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2091 info->snd_portid, info->snd_seq, 0, 2092 info->snd_portid, info->snd_seq, 0,
2092 OVS_VPORT_CMD_NEW); 2093 OVS_VPORT_CMD_NEW, GFP_KERNEL);
2093 2094
2094 new_headroom = netdev_get_fwd_headroom(vport->dev); 2095 new_headroom = netdev_get_fwd_headroom(vport->dev);
2095 2096
@@ -2150,7 +2151,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2150 2151
2151 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2152 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2152 info->snd_portid, info->snd_seq, 0, 2153 info->snd_portid, info->snd_seq, 0,
2153 OVS_VPORT_CMD_SET); 2154 OVS_VPORT_CMD_SET, GFP_KERNEL);
2154 BUG_ON(err < 0); 2155 BUG_ON(err < 0);
2155 2156
2156 ovs_unlock(); 2157 ovs_unlock();
@@ -2190,7 +2191,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2190 2191
2191 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2192 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2192 info->snd_portid, info->snd_seq, 0, 2193 info->snd_portid, info->snd_seq, 0,
2193 OVS_VPORT_CMD_DEL); 2194 OVS_VPORT_CMD_DEL, GFP_KERNEL);
2194 BUG_ON(err < 0); 2195 BUG_ON(err < 0);
2195 2196
2196 /* the vport deletion may trigger dp headroom update */ 2197 /* the vport deletion may trigger dp headroom update */
@@ -2237,7 +2238,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2237 goto exit_unlock_free; 2238 goto exit_unlock_free;
2238 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), 2239 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2239 info->snd_portid, info->snd_seq, 0, 2240 info->snd_portid, info->snd_seq, 0,
2240 OVS_VPORT_CMD_GET); 2241 OVS_VPORT_CMD_GET, GFP_ATOMIC);
2241 BUG_ON(err < 0); 2242 BUG_ON(err < 0);
2242 rcu_read_unlock(); 2243 rcu_read_unlock();
2243 2244
@@ -2273,7 +2274,8 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2273 NETLINK_CB(cb->skb).portid, 2274 NETLINK_CB(cb->skb).portid,
2274 cb->nlh->nlmsg_seq, 2275 cb->nlh->nlmsg_seq,
2275 NLM_F_MULTI, 2276 NLM_F_MULTI,
2276 OVS_VPORT_CMD_GET) < 0) 2277 OVS_VPORT_CMD_GET,
2278 GFP_ATOMIC) < 0)
2277 goto out; 2279 goto out;
2278 2280
2279 j++; 2281 j++;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 21c90d3a7ebf..58a7b8312c28 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -137,7 +137,7 @@ static void do_setup(struct net_device *netdev)
137 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | 137 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
138 IFF_NO_QUEUE; 138 IFF_NO_QUEUE;
139 netdev->needs_free_netdev = true; 139 netdev->needs_free_netdev = true;
140 netdev->priv_destructor = internal_dev_destructor; 140 netdev->priv_destructor = NULL;
141 netdev->ethtool_ops = &internal_dev_ethtool_ops; 141 netdev->ethtool_ops = &internal_dev_ethtool_ops;
142 netdev->rtnl_link_ops = &internal_dev_link_ops; 142 netdev->rtnl_link_ops = &internal_dev_link_ops;
143 143
@@ -159,7 +159,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
159 struct internal_dev *internal_dev; 159 struct internal_dev *internal_dev;
160 struct net_device *dev; 160 struct net_device *dev;
161 int err; 161 int err;
162 bool free_vport = true;
163 162
164 vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms); 163 vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
165 if (IS_ERR(vport)) { 164 if (IS_ERR(vport)) {
@@ -190,10 +189,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
190 189
191 rtnl_lock(); 190 rtnl_lock();
192 err = register_netdevice(vport->dev); 191 err = register_netdevice(vport->dev);
193 if (err) { 192 if (err)
194 free_vport = false;
195 goto error_unlock; 193 goto error_unlock;
196 } 194 vport->dev->priv_destructor = internal_dev_destructor;
197 195
198 dev_set_promiscuity(vport->dev, 1); 196 dev_set_promiscuity(vport->dev, 1);
199 rtnl_unlock(); 197 rtnl_unlock();
@@ -207,8 +205,7 @@ error_unlock:
207error_free_netdev: 205error_free_netdev:
208 free_netdev(dev); 206 free_netdev(dev);
209error_free_vport: 207error_free_vport:
210 if (free_vport) 208 ovs_vport_free(vport);
211 ovs_vport_free(vport);
212error: 209error:
213 return ERR_PTR(err); 210 return ERR_PTR(err);
214} 211}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 96ea9f254ae9..76d499f6af9a 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -338,9 +338,9 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
338 338
339 if (sk->sk_state == TCP_CLOSE) 339 if (sk->sk_state == TCP_CLOSE)
340 return EPOLLERR; 340 return EPOLLERR;
341 if (!skb_queue_empty(&sk->sk_receive_queue)) 341 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
342 mask |= EPOLLIN | EPOLLRDNORM; 342 mask |= EPOLLIN | EPOLLRDNORM;
343 if (!skb_queue_empty(&pn->ctrlreq_queue)) 343 if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
344 mask |= EPOLLPRI; 344 mask |= EPOLLPRI;
345 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) 345 if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
346 return EPOLLHUP; 346 return EPOLLHUP;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index f0e9ccf472a9..6a0df7c8a939 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -65,28 +65,6 @@ static const struct proto_ops rose_proto_ops;
65ax25_address rose_callsign; 65ax25_address rose_callsign;
66 66
67/* 67/*
68 * ROSE network devices are virtual network devices encapsulating ROSE
69 * frames into AX.25 which will be sent through an AX.25 device, so form a
70 * special "super class" of normal net devices; split their locks off into a
71 * separate class since they always nest.
72 */
73static struct lock_class_key rose_netdev_xmit_lock_key;
74static struct lock_class_key rose_netdev_addr_lock_key;
75
76static void rose_set_lockdep_one(struct net_device *dev,
77 struct netdev_queue *txq,
78 void *_unused)
79{
80 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
81}
82
83static void rose_set_lockdep_key(struct net_device *dev)
84{
85 lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
86 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
87}
88
89/*
90 * Convert a ROSE address into text. 68 * Convert a ROSE address into text.
91 */ 69 */
92char *rose2asc(char *buf, const rose_address *addr) 70char *rose2asc(char *buf, const rose_address *addr)
@@ -1533,7 +1511,6 @@ static int __init rose_proto_init(void)
1533 free_netdev(dev); 1511 free_netdev(dev);
1534 goto fail; 1512 goto fail;
1535 } 1513 }
1536 rose_set_lockdep_key(dev);
1537 dev_rose[i] = dev; 1514 dev_rose[i] = dev;
1538 } 1515 }
1539 1516
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index ecc17dabec8f..7c7d10f2e0c1 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -601,6 +601,7 @@ struct rxrpc_call {
601 int debug_id; /* debug ID for printks */ 601 int debug_id; /* debug ID for printks */
602 unsigned short rx_pkt_offset; /* Current recvmsg packet offset */ 602 unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
603 unsigned short rx_pkt_len; /* Current recvmsg packet len */ 603 unsigned short rx_pkt_len; /* Current recvmsg packet len */
604 bool rx_pkt_last; /* Current recvmsg packet is last */
604 605
605 /* Rx/Tx circular buffer, depending on phase. 606 /* Rx/Tx circular buffer, depending on phase.
606 * 607 *
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index a4090797c9b2..8578c39ec839 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -267,11 +267,13 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
267 */ 267 */
268static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb, 268static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
269 u8 *_annotation, 269 u8 *_annotation,
270 unsigned int *_offset, unsigned int *_len) 270 unsigned int *_offset, unsigned int *_len,
271 bool *_last)
271{ 272{
272 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 273 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
273 unsigned int offset = sizeof(struct rxrpc_wire_header); 274 unsigned int offset = sizeof(struct rxrpc_wire_header);
274 unsigned int len; 275 unsigned int len;
276 bool last = false;
275 int ret; 277 int ret;
276 u8 annotation = *_annotation; 278 u8 annotation = *_annotation;
277 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; 279 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
@@ -281,6 +283,8 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
281 len = skb->len - offset; 283 len = skb->len - offset;
282 if (subpacket < sp->nr_subpackets - 1) 284 if (subpacket < sp->nr_subpackets - 1)
283 len = RXRPC_JUMBO_DATALEN; 285 len = RXRPC_JUMBO_DATALEN;
286 else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
287 last = true;
284 288
285 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { 289 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
286 ret = rxrpc_verify_packet(call, skb, annotation, offset, len); 290 ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -291,6 +295,7 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
291 295
292 *_offset = offset; 296 *_offset = offset;
293 *_len = len; 297 *_len = len;
298 *_last = last;
294 call->security->locate_data(call, skb, _offset, _len); 299 call->security->locate_data(call, skb, _offset, _len);
295 return 0; 300 return 0;
296} 301}
@@ -309,7 +314,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
309 rxrpc_serial_t serial; 314 rxrpc_serial_t serial;
310 rxrpc_seq_t hard_ack, top, seq; 315 rxrpc_seq_t hard_ack, top, seq;
311 size_t remain; 316 size_t remain;
312 bool last; 317 bool rx_pkt_last;
313 unsigned int rx_pkt_offset, rx_pkt_len; 318 unsigned int rx_pkt_offset, rx_pkt_len;
314 int ix, copy, ret = -EAGAIN, ret2; 319 int ix, copy, ret = -EAGAIN, ret2;
315 320
@@ -319,6 +324,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
319 324
320 rx_pkt_offset = call->rx_pkt_offset; 325 rx_pkt_offset = call->rx_pkt_offset;
321 rx_pkt_len = call->rx_pkt_len; 326 rx_pkt_len = call->rx_pkt_len;
327 rx_pkt_last = call->rx_pkt_last;
322 328
323 if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) { 329 if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
324 seq = call->rx_hard_ack; 330 seq = call->rx_hard_ack;
@@ -329,6 +335,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
329 /* Barriers against rxrpc_input_data(). */ 335 /* Barriers against rxrpc_input_data(). */
330 hard_ack = call->rx_hard_ack; 336 hard_ack = call->rx_hard_ack;
331 seq = hard_ack + 1; 337 seq = hard_ack + 1;
338
332 while (top = smp_load_acquire(&call->rx_top), 339 while (top = smp_load_acquire(&call->rx_top),
333 before_eq(seq, top) 340 before_eq(seq, top)
334 ) { 341 ) {
@@ -356,7 +363,8 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
356 if (rx_pkt_offset == 0) { 363 if (rx_pkt_offset == 0) {
357 ret2 = rxrpc_locate_data(call, skb, 364 ret2 = rxrpc_locate_data(call, skb,
358 &call->rxtx_annotations[ix], 365 &call->rxtx_annotations[ix],
359 &rx_pkt_offset, &rx_pkt_len); 366 &rx_pkt_offset, &rx_pkt_len,
367 &rx_pkt_last);
360 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq, 368 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
361 rx_pkt_offset, rx_pkt_len, ret2); 369 rx_pkt_offset, rx_pkt_len, ret2);
362 if (ret2 < 0) { 370 if (ret2 < 0) {
@@ -396,13 +404,12 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
396 } 404 }
397 405
398 /* The whole packet has been transferred. */ 406 /* The whole packet has been transferred. */
399 last = sp->hdr.flags & RXRPC_LAST_PACKET;
400 if (!(flags & MSG_PEEK)) 407 if (!(flags & MSG_PEEK))
401 rxrpc_rotate_rx_window(call); 408 rxrpc_rotate_rx_window(call);
402 rx_pkt_offset = 0; 409 rx_pkt_offset = 0;
403 rx_pkt_len = 0; 410 rx_pkt_len = 0;
404 411
405 if (last) { 412 if (rx_pkt_last) {
406 ASSERTCMP(seq, ==, READ_ONCE(call->rx_top)); 413 ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
407 ret = 1; 414 ret = 1;
408 goto out; 415 goto out;
@@ -415,6 +422,7 @@ out:
415 if (!(flags & MSG_PEEK)) { 422 if (!(flags & MSG_PEEK)) {
416 call->rx_pkt_offset = rx_pkt_offset; 423 call->rx_pkt_offset = rx_pkt_offset;
417 call->rx_pkt_len = rx_pkt_len; 424 call->rx_pkt_len = rx_pkt_len;
425 call->rx_pkt_last = rx_pkt_last;
418 } 426 }
419done: 427done:
420 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq, 428 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index bf10bdaf5012..8229ed4a67be 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -162,16 +162,20 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
162 cls_bpf.name = obj->bpf_name; 162 cls_bpf.name = obj->bpf_name;
163 cls_bpf.exts_integrated = obj->exts_integrated; 163 cls_bpf.exts_integrated = obj->exts_integrated;
164 164
165 if (oldprog) 165 if (oldprog && prog)
166 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf, 166 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
167 skip_sw, &oldprog->gen_flags, 167 skip_sw, &oldprog->gen_flags,
168 &oldprog->in_hw_count, 168 &oldprog->in_hw_count,
169 &prog->gen_flags, &prog->in_hw_count, 169 &prog->gen_flags, &prog->in_hw_count,
170 true); 170 true);
171 else 171 else if (prog)
172 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf, 172 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
173 skip_sw, &prog->gen_flags, 173 skip_sw, &prog->gen_flags,
174 &prog->in_hw_count, true); 174 &prog->in_hw_count, true);
175 else
176 err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
177 skip_sw, &oldprog->gen_flags,
178 &oldprog->in_hw_count, true);
175 179
176 if (prog && err) { 180 if (prog && err) {
177 cls_bpf_offload_cmd(tp, oldprog, prog, extack); 181 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 17bd8f539bc7..8769b4b8807d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -799,9 +799,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
799}; 799};
800EXPORT_SYMBOL(pfifo_fast_ops); 800EXPORT_SYMBOL(pfifo_fast_ops);
801 801
802static struct lock_class_key qdisc_tx_busylock;
803static struct lock_class_key qdisc_running_key;
804
805struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 802struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
806 const struct Qdisc_ops *ops, 803 const struct Qdisc_ops *ops,
807 struct netlink_ext_ack *extack) 804 struct netlink_ext_ack *extack)
@@ -854,17 +851,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
854 } 851 }
855 852
856 spin_lock_init(&sch->busylock); 853 spin_lock_init(&sch->busylock);
857 lockdep_set_class(&sch->busylock,
858 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
859
860 /* seqlock has the same scope of busylock, for NOLOCK qdisc */ 854 /* seqlock has the same scope of busylock, for NOLOCK qdisc */
861 spin_lock_init(&sch->seqlock); 855 spin_lock_init(&sch->seqlock);
862 lockdep_set_class(&sch->busylock,
863 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
864
865 seqcount_init(&sch->running); 856 seqcount_init(&sch->running);
866 lockdep_set_class(&sch->running,
867 dev->qdisc_running_key ?: &qdisc_running_key);
868 857
869 sch->ops = ops; 858 sch->ops = ops;
870 sch->flags = ops->static_flags; 859 sch->flags = ops->static_flags;
@@ -875,6 +864,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
875 dev_hold(dev); 864 dev_hold(dev);
876 refcount_set(&sch->refcnt, 1); 865 refcount_set(&sch->refcnt, 1);
877 866
867 if (sch != &noop_qdisc) {
868 lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
869 lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
870 lockdep_set_class(&sch->running, &dev->qdisc_running_key);
871 }
872
878 return sch; 873 return sch;
879errout1: 874errout1:
880 kfree(p); 875 kfree(p);
@@ -1043,6 +1038,8 @@ static void attach_one_default_qdisc(struct net_device *dev,
1043 1038
1044 if (dev->priv_flags & IFF_NO_QUEUE) 1039 if (dev->priv_flags & IFF_NO_QUEUE)
1045 ops = &noqueue_qdisc_ops; 1040 ops = &noqueue_qdisc_ops;
1041 else if(dev->type == ARPHRD_CAN)
1042 ops = &pfifo_fast_ops;
1046 1043
1047 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); 1044 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1048 if (!qdisc) { 1045 if (!qdisc) {
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 23cd1c873a2c..be35f03b657b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -5,11 +5,11 @@
5 * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com> 5 * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
6 */ 6 */
7 7
8#include <linux/jhash.h>
9#include <linux/jiffies.h> 8#include <linux/jiffies.h>
10#include <linux/module.h> 9#include <linux/module.h>
11#include <linux/skbuff.h> 10#include <linux/skbuff.h>
12#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/siphash.h>
13#include <net/pkt_sched.h> 13#include <net/pkt_sched.h>
14#include <net/sock.h> 14#include <net/sock.h>
15 15
@@ -126,7 +126,7 @@ struct wdrr_bucket {
126 126
127struct hhf_sched_data { 127struct hhf_sched_data {
128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; 128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
129 u32 perturbation; /* hash perturbation */ 129 siphash_key_t perturbation; /* hash perturbation */
130 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 130 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
131 u32 drop_overlimit; /* number of times max qdisc packet 131 u32 drop_overlimit; /* number of times max qdisc packet
132 * limit was hit 132 * limit was hit
@@ -264,7 +264,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
264 } 264 }
265 265
266 /* Get hashed flow-id of the skb. */ 266 /* Get hashed flow-id of the skb. */
267 hash = skb_get_hash_perturb(skb, q->perturbation); 267 hash = skb_get_hash_perturb(skb, &q->perturbation);
268 268
269 /* Check if this packet belongs to an already established HH flow. */ 269 /* Check if this packet belongs to an already established HH flow. */
270 flow_pos = hash & HHF_BIT_MASK; 270 flow_pos = hash & HHF_BIT_MASK;
@@ -582,7 +582,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
582 582
583 sch->limit = 1000; 583 sch->limit = 1000;
584 q->quantum = psched_mtu(qdisc_dev(sch)); 584 q->quantum = psched_mtu(qdisc_dev(sch));
585 q->perturbation = prandom_u32(); 585 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
586 INIT_LIST_HEAD(&q->new_buckets); 586 INIT_LIST_HEAD(&q->new_buckets);
587 INIT_LIST_HEAD(&q->old_buckets); 587 INIT_LIST_HEAD(&q->old_buckets);
588 588
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index d448fe3068e5..4074c50ac3d7 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -18,7 +18,7 @@
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/random.h> 20#include <linux/random.h>
21#include <linux/jhash.h> 21#include <linux/siphash.h>
22#include <net/ip.h> 22#include <net/ip.h>
23#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
24#include <net/pkt_cls.h> 24#include <net/pkt_cls.h>
@@ -45,7 +45,7 @@ struct sfb_bucket {
45 * (Section 4.4 of SFB reference : moving hash functions) 45 * (Section 4.4 of SFB reference : moving hash functions)
46 */ 46 */
47struct sfb_bins { 47struct sfb_bins {
48 u32 perturbation; /* jhash perturbation */ 48 siphash_key_t perturbation; /* siphash key */
49 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; 49 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
50}; 50};
51 51
@@ -217,7 +217,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da
217 217
218static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) 218static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
219{ 219{
220 q->bins[slot].perturbation = prandom_u32(); 220 get_random_bytes(&q->bins[slot].perturbation,
221 sizeof(q->bins[slot].perturbation));
221} 222}
222 223
223static void sfb_swap_slot(struct sfb_sched_data *q) 224static void sfb_swap_slot(struct sfb_sched_data *q)
@@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
314 /* If using external classifiers, get result and record it. */ 315 /* If using external classifiers, get result and record it. */
315 if (!sfb_classify(skb, fl, &ret, &salt)) 316 if (!sfb_classify(skb, fl, &ret, &salt))
316 goto other_drop; 317 goto other_drop;
317 sfbhash = jhash_1word(salt, q->bins[slot].perturbation); 318 sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
318 } else { 319 } else {
319 sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); 320 sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
320 } 321 }
321 322
322 323
@@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
352 /* Inelastic flow */ 353 /* Inelastic flow */
353 if (q->double_buffering) { 354 if (q->double_buffering) {
354 sfbhash = skb_get_hash_perturb(skb, 355 sfbhash = skb_get_hash_perturb(skb,
355 q->bins[slot].perturbation); 356 &q->bins[slot].perturbation);
356 if (!sfbhash) 357 if (!sfbhash)
357 sfbhash = 1; 358 sfbhash = 1;
358 sfb_skb_cb(skb)->hashes[slot] = sfbhash; 359 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 68404a9d2ce4..c787d4d46017 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -14,7 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/jhash.h> 17#include <linux/siphash.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <net/netlink.h> 20#include <net/netlink.h>
@@ -117,7 +117,7 @@ struct sfq_sched_data {
117 u8 headdrop; 117 u8 headdrop;
118 u8 maxdepth; /* limit of packets per flow */ 118 u8 maxdepth; /* limit of packets per flow */
119 119
120 u32 perturbation; 120 siphash_key_t perturbation;
121 u8 cur_depth; /* depth of longest slot */ 121 u8 cur_depth; /* depth of longest slot */
122 u8 flags; 122 u8 flags;
123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ 123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
@@ -157,7 +157,7 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
157static unsigned int sfq_hash(const struct sfq_sched_data *q, 157static unsigned int sfq_hash(const struct sfq_sched_data *q,
158 const struct sk_buff *skb) 158 const struct sk_buff *skb)
159{ 159{
160 return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); 160 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
161} 161}
162 162
163static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, 163static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -607,9 +607,11 @@ static void sfq_perturbation(struct timer_list *t)
607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer); 607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
608 struct Qdisc *sch = q->sch; 608 struct Qdisc *sch = q->sch;
609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
610 siphash_key_t nkey;
610 611
612 get_random_bytes(&nkey, sizeof(nkey));
611 spin_lock(root_lock); 613 spin_lock(root_lock);
612 q->perturbation = prandom_u32(); 614 q->perturbation = nkey;
613 if (!q->filter_list && q->tail) 615 if (!q->filter_list && q->tail)
614 sfq_rehash(sch); 616 sfq_rehash(sch);
615 spin_unlock(root_lock); 617 spin_unlock(root_lock);
@@ -688,7 +690,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
688 del_timer(&q->perturb_timer); 690 del_timer(&q->perturb_timer);
689 if (q->perturb_period) { 691 if (q->perturb_period) {
690 mod_timer(&q->perturb_timer, jiffies + q->perturb_period); 692 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
691 q->perturbation = prandom_u32(); 693 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
692 } 694 }
693 sch_tree_unlock(sch); 695 sch_tree_unlock(sch);
694 kfree(p); 696 kfree(p);
@@ -745,7 +747,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
745 q->quantum = psched_mtu(qdisc_dev(sch)); 747 q->quantum = psched_mtu(qdisc_dev(sch));
746 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); 748 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
747 q->perturb_period = 0; 749 q->perturb_period = 0;
748 q->perturbation = prandom_u32(); 750 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
749 751
750 if (opt) { 752 if (opt) {
751 int err = sfq_change(sch, opt); 753 int err = sfq_change(sch, opt);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 6719a65169d4..2121187229cd 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1152,7 +1152,7 @@ EXPORT_SYMBOL_GPL(taprio_offload_free);
1152 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). 1152 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1153 * This is left as TODO. 1153 * This is left as TODO.
1154 */ 1154 */
1155void taprio_offload_config_changed(struct taprio_sched *q) 1155static void taprio_offload_config_changed(struct taprio_sched *q)
1156{ 1156{
1157 struct sched_gate_list *oper, *admin; 1157 struct sched_gate_list *oper, *admin;
1158 1158
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5ca0ec0e823c..ffd3262b7a41 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8476,7 +8476,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
8476 mask = 0; 8476 mask = 0;
8477 8477
8478 /* Is there any exceptional events? */ 8478 /* Is there any exceptional events? */
8479 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 8479 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
8480 mask |= EPOLLERR | 8480 mask |= EPOLLERR |
8481 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 8481 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
8482 if (sk->sk_shutdown & RCV_SHUTDOWN) 8482 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -8485,7 +8485,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
8485 mask |= EPOLLHUP; 8485 mask |= EPOLLHUP;
8486 8486
8487 /* Is it readable? Reconsider this code with TCP-style support. */ 8487 /* Is it readable? Reconsider this code with TCP-style support. */
8488 if (!skb_queue_empty(&sk->sk_receive_queue)) 8488 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
8489 mask |= EPOLLIN | EPOLLRDNORM; 8489 mask |= EPOLLIN | EPOLLRDNORM;
8490 8490
8491 /* The association is either gone or not ready. */ 8491 /* The association is either gone or not ready. */
@@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
8871 if (sk_can_busy_loop(sk)) { 8871 if (sk_can_busy_loop(sk)) {
8872 sk_busy_loop(sk, noblock); 8872 sk_busy_loop(sk, noblock);
8873 8873
8874 if (!skb_queue_empty(&sk->sk_receive_queue)) 8874 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
8875 continue; 8875 continue;
8876 } 8876 }
8877 8877
@@ -9306,7 +9306,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
9306 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 9306 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
9307 newinet->inet_dport = htons(asoc->peer.port); 9307 newinet->inet_dport = htons(asoc->peer.port);
9308 newinet->pmtudisc = inet->pmtudisc; 9308 newinet->pmtudisc = inet->pmtudisc;
9309 newinet->inet_id = asoc->next_tsn ^ jiffies; 9309 newinet->inet_id = prandom_u32();
9310 9310
9311 newinet->uc_ttl = inet->uc_ttl; 9311 newinet->uc_ttl = inet->uc_ttl;
9312 newinet->mc_loop = 1; 9312 newinet->mc_loop = 1;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5b932583e407..47946f489fd4 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -123,6 +123,12 @@ struct proto smc_proto6 = {
123}; 123};
124EXPORT_SYMBOL_GPL(smc_proto6); 124EXPORT_SYMBOL_GPL(smc_proto6);
125 125
126static void smc_restore_fallback_changes(struct smc_sock *smc)
127{
128 smc->clcsock->file->private_data = smc->sk.sk_socket;
129 smc->clcsock->file = NULL;
130}
131
126static int __smc_release(struct smc_sock *smc) 132static int __smc_release(struct smc_sock *smc)
127{ 133{
128 struct sock *sk = &smc->sk; 134 struct sock *sk = &smc->sk;
@@ -141,6 +147,7 @@ static int __smc_release(struct smc_sock *smc)
141 } 147 }
142 sk->sk_state = SMC_CLOSED; 148 sk->sk_state = SMC_CLOSED;
143 sk->sk_state_change(sk); 149 sk->sk_state_change(sk);
150 smc_restore_fallback_changes(smc);
144 } 151 }
145 152
146 sk->sk_prot->unhash(sk); 153 sk->sk_prot->unhash(sk);
@@ -700,8 +707,6 @@ static int __smc_connect(struct smc_sock *smc)
700 int smc_type; 707 int smc_type;
701 int rc = 0; 708 int rc = 0;
702 709
703 sock_hold(&smc->sk); /* sock put in passive closing */
704
705 if (smc->use_fallback) 710 if (smc->use_fallback)
706 return smc_connect_fallback(smc, smc->fallback_rsn); 711 return smc_connect_fallback(smc, smc->fallback_rsn);
707 712
@@ -846,6 +851,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
846 rc = kernel_connect(smc->clcsock, addr, alen, flags); 851 rc = kernel_connect(smc->clcsock, addr, alen, flags);
847 if (rc && rc != -EINPROGRESS) 852 if (rc && rc != -EINPROGRESS)
848 goto out; 853 goto out;
854
855 sock_hold(&smc->sk); /* sock put in passive closing */
849 if (flags & O_NONBLOCK) { 856 if (flags & O_NONBLOCK) {
850 if (schedule_work(&smc->connect_work)) 857 if (schedule_work(&smc->connect_work))
851 smc->connect_nonblock = 1; 858 smc->connect_nonblock = 1;
@@ -1291,8 +1298,8 @@ static void smc_listen_work(struct work_struct *work)
1291 /* check if RDMA is available */ 1298 /* check if RDMA is available */
1292 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */ 1299 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
1293 /* prepare RDMA check */ 1300 /* prepare RDMA check */
1294 memset(&ini, 0, sizeof(ini));
1295 ini.is_smcd = false; 1301 ini.is_smcd = false;
1302 ini.ism_dev = NULL;
1296 ini.ib_lcl = &pclc->lcl; 1303 ini.ib_lcl = &pclc->lcl;
1297 rc = smc_find_rdma_device(new_smc, &ini); 1304 rc = smc_find_rdma_device(new_smc, &ini);
1298 if (rc) { 1305 if (rc) {
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 88556f0251ab..2ba97ff325a5 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -561,7 +561,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
561 } 561 }
562 562
563 rtnl_lock(); 563 rtnl_lock();
564 nest_lvl = dev_get_nest_level(ndev); 564 nest_lvl = ndev->lower_level;
565 for (i = 0; i < nest_lvl; i++) { 565 for (i = 0; i < nest_lvl; i++) {
566 struct list_head *lower = &ndev->adj_list.lower; 566 struct list_head *lower = &ndev->adj_list.lower;
567 567
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index bab2da8cf17a..2920b006f65c 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -718,7 +718,7 @@ static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
718 int i, nest_lvl; 718 int i, nest_lvl;
719 719
720 rtnl_lock(); 720 rtnl_lock();
721 nest_lvl = dev_get_nest_level(ndev); 721 nest_lvl = ndev->lower_level;
722 for (i = 0; i < nest_lvl; i++) { 722 for (i = 0; i < nest_lvl; i++) {
723 struct list_head *lower = &ndev->adj_list.lower; 723 struct list_head *lower = &ndev->adj_list.lower;
724 724
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f8bbc4aab213..4b92b196cfa6 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -740,7 +740,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
740 /* fall through */ 740 /* fall through */
741 case TIPC_LISTEN: 741 case TIPC_LISTEN:
742 case TIPC_CONNECTING: 742 case TIPC_CONNECTING:
743 if (!skb_queue_empty(&sk->sk_receive_queue)) 743 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
744 revents |= EPOLLIN | EPOLLRDNORM; 744 revents |= EPOLLIN | EPOLLRDNORM;
745 break; 745 break;
746 case TIPC_OPEN: 746 case TIPC_OPEN:
@@ -748,7 +748,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
748 revents |= EPOLLOUT; 748 revents |= EPOLLOUT;
749 if (!tipc_sk_type_connectionless(sk)) 749 if (!tipc_sk_type_connectionless(sk))
750 break; 750 break;
751 if (skb_queue_empty(&sk->sk_receive_queue)) 751 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
752 break; 752 break;
753 revents |= EPOLLIN | EPOLLRDNORM; 753 revents |= EPOLLIN | EPOLLRDNORM;
754 break; 754 break;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 67e87db5877f..0d8da809bea2 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2599,7 +2599,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
2599 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 2599 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
2600 2600
2601 /* readable? */ 2601 /* readable? */
2602 if (!skb_queue_empty(&sk->sk_receive_queue)) 2602 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2603 mask |= EPOLLIN | EPOLLRDNORM; 2603 mask |= EPOLLIN | EPOLLRDNORM;
2604 2604
2605 /* Connection-based need to check for termination and startup */ 2605 /* Connection-based need to check for termination and startup */
@@ -2628,7 +2628,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2628 mask = 0; 2628 mask = 0;
2629 2629
2630 /* exceptional events? */ 2630 /* exceptional events? */
2631 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2631 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
2632 mask |= EPOLLERR | 2632 mask |= EPOLLERR |
2633 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 2633 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
2634 2634
@@ -2638,7 +2638,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
2638 mask |= EPOLLHUP; 2638 mask |= EPOLLHUP;
2639 2639
2640 /* readable? */ 2640 /* readable? */
2641 if (!skb_queue_empty(&sk->sk_receive_queue)) 2641 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
2642 mask |= EPOLLIN | EPOLLRDNORM; 2642 mask |= EPOLLIN | EPOLLRDNORM;
2643 2643
2644 /* Connection-based need to check for termination and startup */ 2644 /* Connection-based need to check for termination and startup */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 2ab43b2bba31..582a3e4dfce2 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -870,7 +870,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
870 * the queue and write as long as the socket isn't shutdown for 870 * the queue and write as long as the socket isn't shutdown for
871 * sending. 871 * sending.
872 */ 872 */
873 if (!skb_queue_empty(&sk->sk_receive_queue) || 873 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
874 (sk->sk_shutdown & RCV_SHUTDOWN)) { 874 (sk->sk_shutdown & RCV_SHUTDOWN)) {
875 mask |= EPOLLIN | EPOLLRDNORM; 875 mask |= EPOLLIN | EPOLLRDNORM;
876 } 876 }
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index e851cafd8e2f..fcac5c6366e1 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -204,6 +204,11 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
204 return false; 204 return false;
205 } 205 }
206 206
207 /* channel 14 is only for IEEE 802.11b */
208 if (chandef->center_freq1 == 2484 &&
209 chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
210 return false;
211
207 if (cfg80211_chandef_is_edmg(chandef) && 212 if (cfg80211_chandef_is_edmg(chandef) &&
208 !cfg80211_edmg_chandef_valid(chandef)) 213 !cfg80211_edmg_chandef_valid(chandef))
209 return false; 214 return false;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4453dd375de9..7b72286922f7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -393,7 +393,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
393 [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, 393 [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
394 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, 394 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
395 .len = IEEE80211_MAX_MESH_ID_LEN }, 395 .len = IEEE80211_MAX_MESH_ID_LEN },
396 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, 396 [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
397 397
398 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, 398 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
399 [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, 399 [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 419eb12c1e93..5b4ed5bbc542 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1559,7 +1559,8 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
1559 } 1559 }
1560 1560
1561 if (freq == 2484) { 1561 if (freq == 2484) {
1562 if (chandef->width > NL80211_CHAN_WIDTH_40) 1562 /* channel 14 is only for IEEE 802.11b */
1563 if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
1563 return false; 1564 return false;
1564 1565
1565 *op_class = 82; /* channel 14 */ 1566 *op_class = 82; /* channel 14 */
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 16d5f353163a..3049af269fbf 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -27,6 +27,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
27{ 27{
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (!xs->tx)
31 return;
32
30 spin_lock_irqsave(&umem->xsk_list_lock, flags); 33 spin_lock_irqsave(&umem->xsk_list_lock, flags);
31 list_add_rcu(&xs->list, &umem->xsk_list); 34 list_add_rcu(&xs->list, &umem->xsk_list);
32 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 35 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
@@ -36,6 +39,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
36{ 39{
37 unsigned long flags; 40 unsigned long flags;
38 41
42 if (!xs->tx)
43 return;
44
39 spin_lock_irqsave(&umem->xsk_list_lock, flags); 45 spin_lock_irqsave(&umem->xsk_list_lock, flags);
40 list_del_rcu(&xs->list); 46 list_del_rcu(&xs->list);
41 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); 47 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index 15a666329a34..1afa22c88e42 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -22,6 +22,7 @@ import os
22import pprint 22import pprint
23import random 23import random
24import re 24import re
25import stat
25import string 26import string
26import struct 27import struct
27import subprocess 28import subprocess
@@ -311,7 +312,11 @@ class DebugfsDir:
311 for f in out.split(): 312 for f in out.split():
312 if f == "ports": 313 if f == "ports":
313 continue 314 continue
315
314 p = os.path.join(path, f) 316 p = os.path.join(path, f)
317 if not os.stat(p).st_mode & stat.S_IRUSR:
318 continue
319
315 if os.path.isfile(p): 320 if os.path.isfile(p):
316 _, out = cmd('cat %s/%s' % (path, f)) 321 _, out = cmd('cat %s/%s' % (path, f))
317 dfs[f] = out.strip() 322 dfs[f] = out.strip()
diff --git a/tools/testing/selftests/bpf/test_tc_edt.sh b/tools/testing/selftests/bpf/test_tc_edt.sh
index f38567ef694b..daa7d1b8d309 100755
--- a/tools/testing/selftests/bpf/test_tc_edt.sh
+++ b/tools/testing/selftests/bpf/test_tc_edt.sh
@@ -59,7 +59,7 @@ ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
59 59
60# start the listener 60# start the listener
61ip netns exec ${NS_DST} bash -c \ 61ip netns exec ${NS_DST} bash -c \
62 "nc -4 -l -s ${IP_DST} -p 9000 >/dev/null &" 62 "nc -4 -l -p 9000 >/dev/null &"
63declare -i NC_PID=$! 63declare -i NC_PID=$!
64sleep 1 64sleep 1
65 65
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index c4ba0ff4a53f..76c1897e6352 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -1438,6 +1438,27 @@ ipv4_addr_metric_test()
1438 fi 1438 fi
1439 log_test $rc 0 "Prefix route with metric on link up" 1439 log_test $rc 0 "Prefix route with metric on link up"
1440 1440
1441 # explicitly check for metric changes on edge scenarios
1442 run_cmd "$IP addr flush dev dummy2"
1443 run_cmd "$IP addr add dev dummy2 172.16.104.0/24 metric 259"
1444 run_cmd "$IP addr change dev dummy2 172.16.104.0/24 metric 260"
1445 rc=$?
1446 if [ $rc -eq 0 ]; then
1447 check_route "172.16.104.0/24 dev dummy2 proto kernel scope link src 172.16.104.0 metric 260"
1448 rc=$?
1449 fi
1450 log_test $rc 0 "Modify metric of .0/24 address"
1451
1452 run_cmd "$IP addr flush dev dummy2"
1453 run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
1454 run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
1455 rc=$?
1456 if [ $rc -eq 0 ]; then
1457 check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
1458 rc=$?
1459 fi
1460 log_test $rc 0 "Modify metric of address with peer route"
1461
1441 $IP li del dummy1 1462 $IP li del dummy1
1442 $IP li del dummy2 1463 $IP li del dummy2
1443 cleanup 1464 cleanup
diff --git a/tools/testing/selftests/net/l2tp.sh b/tools/testing/selftests/net/l2tp.sh
index 5782433886fc..5782433886fc 100644..100755
--- a/tools/testing/selftests/net/l2tp.sh
+++ b/tools/testing/selftests/net/l2tp.sh
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
index fe3230c55986..fb7a59ed759e 100644
--- a/tools/testing/selftests/net/reuseport_dualstack.c
+++ b/tools/testing/selftests/net/reuseport_dualstack.c
@@ -129,7 +129,7 @@ static void test(int *rcv_fds, int count, int proto)
129{ 129{
130 struct epoll_event ev; 130 struct epoll_event ev;
131 int epfd, i, test_fd; 131 int epfd, i, test_fd;
132 uint16_t test_family; 132 int test_family;
133 socklen_t len; 133 socklen_t len;
134 134
135 epfd = epoll_create(1); 135 epfd = epoll_create(1);
@@ -146,6 +146,7 @@ static void test(int *rcv_fds, int count, int proto)
146 send_from_v4(proto); 146 send_from_v4(proto);
147 147
148 test_fd = receive_once(epfd, proto); 148 test_fd = receive_once(epfd, proto);
149 len = sizeof(test_family);
149 if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len)) 150 if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
150 error(1, errno, "failed to read socket domain"); 151 error(1, errno, "failed to read socket domain");
151 if (test_family != AF_INET) 152 if (test_family != AF_INET)