aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-29 23:33:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-29 23:33:20 -0400
commit2a26d99b251b8625d27aed14e97fc10707a3a81f (patch)
tree69eb8aa0476294236ceb8a864be9a697e2303ace
parenta909d3e636995ba7c349e2ca5dbb528154d4ac30 (diff)
parentfceb9c3e38252992bbf1a3028cc2f7b871211533 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Lots of fixes, mostly drivers as is usually the case. 1) Don't treat zero DMA address as invalid in vmxnet3, from Alexey Khoroshilov. 2) Fix element timeouts in netfilter's nft_dynset, from Anders K. Pedersen. 3) Don't put aead_req crypto struct on the stack in mac80211, from Ard Biesheuvel. 4) Several uninitialized variable warning fixes from Arnd Bergmann. 5) Fix memory leak in cxgb4, from Colin Ian King. 6) Fix bpf handling of VLAN header push/pop, from Daniel Borkmann. 7) Several VRF semantic fixes from David Ahern. 8) Set skb->protocol properly in ip6_tnl_xmit(), from Eli Cooper. 9) Socket needs to be locked in udp_disconnect(), from Eric Dumazet. 10) Div-by-zero on 32-bit fix in mlx4 driver, from Eugenia Emantayev. 11) Fix stale link state during failover in NCSCI driver, from Gavin Shan. 12) Fix netdev lower adjacency list traversal, from Ido Schimmel. 13) Propvide proper handle when emitting notifications of filter deletes, from Jamal Hadi Salim. 14) Memory leaks and big-endian issues in rtl8xxxu, from Jes Sorensen. 15) Fix DESYNC_FACTOR handling in ipv6, from Jiri Bohac. 16) Several routing offload fixes in mlxsw driver, from Jiri Pirko. 17) Fix broadcast sync problem in TIPC, from Jon Paul Maloy. 18) Validate chunk len before using it in SCTP, from Marcelo Ricardo Leitner. 19) Revert a netns locking change that causes regressions, from Paul Moore. 20) Add recursion limit to GRO handling, from Sabrina Dubroca. 21) GFP_KERNEL in irq context fix in ibmvnic, from Thomas Falcon. 22) Avoid accessing stale vxlan/geneve socket in data path, from Pravin Shelar" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (189 commits) geneve: avoid using stale geneve socket. vxlan: avoid using stale vxlan socket. qede: Fix out-of-bound fastpath memory access net: phy: dp83848: add dp83822 PHY support enic: fix rq disable tipc: fix broadcast link synchronization problem ibmvnic: Fix missing brackets in init_sub_crq_irqs ibmvnic: Fix releasing of sub-CRQ IRQs in interrupt context Revert "ibmvnic: Fix releasing of sub-CRQ IRQs in interrupt context" arch/powerpc: Update parameters for csum_tcpudp_magic & csum_tcpudp_nofold net/mlx4_en: Save slave ethtool stats command net/mlx4_en: Fix potential deadlock in port statistics flow net/mlx4: Fix firmware command timeout during interrupt test net/mlx4_core: Do not access comm channel if it has not yet been initialized net/mlx4_en: Fix panic during reboot net/mlx4_en: Process all completions in RX rings after port goes up net/mlx4_en: Resolve dividing by zero in 32-bit system net/mlx4_core: Change the default value of enable_qos net/mlx4_core: Avoid setting ports to auto when only one port type is supported net/mlx4_core: Fix the resource-type enum in res tracker to conform to FW spec ...
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-net.txt1
-rw-r--r--Documentation/networking/netdev-FAQ.txt8
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt18
-rw-r--r--MAINTAINERS41
-rw-r--r--arch/powerpc/include/asm/checksum.h12
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c54
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c16
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c32
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c45
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c99
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c15
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c13
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/geneve.c47
-rw-r--r--drivers/net/hyperv/netvsc_drv.c25
-rw-r--r--drivers/net/macsec.c26
-rw-r--r--drivers/net/phy/at803x.c65
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/vxlan.c82
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/slic_ds26522.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--fs/afs/cmservice.c6
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c3
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/ipv6.h17
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/netdevice.h41
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/cfg80211.h32
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/mac80211.h21
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h13
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/uapi/linux/ethtool.h3
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/batman-adv/hard-interface.c1
-rw-r--r--net/batman-adv/log.h2
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/bluetooth/hci_request.c49
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/mgmt.c26
-rw-r--r--net/bridge/br_multicast.c23
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/flow_dissector.c12
-rw-r--r--net/core/net_namespace.c35
-rw-r--r--net/core/pktgen.c17
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c11
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c8
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/addrconf.c101
-rw-r--r--net/ipv6/inet6_hashtables.c13
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c17
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c3
-rw-r--r--net/ipv6/route.c74
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/mac80211/aes_ccm.c46
-rw-r--r--net/mac80211/aes_ccm.h8
-rw-r--r--net/mac80211/aes_gcm.c43
-rw-r--r--net/mac80211/aes_gcm.h6
-rw-r--r--net/mac80211/aes_gmac.c26
-rw-r--r--net/mac80211/aes_gmac.h4
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rx.c51
-rw-r--r--net/mac80211/wpa.c22
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-aen.c18
-rw-r--r--net/ncsi/ncsi-manage.c126
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_queue.c48
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nft_dynset.c6
-rw-r--r--net/netfilter/nft_exthdr.c3
-rw-r--r--net/netfilter/nft_hash.c1
-rw-r--r--net/netfilter/nft_range.c26
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netfilter/xt_NFLOG.c1
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/rds/Makefile2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/peer_object.c4
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_mirred.c5
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/switchdev/switchdev.c9
-rw-r--r--net/tipc/bcast.c14
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.h17
-rw-r--r--net/tipc/name_distr.c1
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/wireless/sysfs.c5
-rw-r--r--net/wireless/util.c34
-rw-r--r--samples/bpf/parse_ldabs.c1
-rw-r--r--samples/bpf/parse_simple.c1
-rw-r--r--samples/bpf/parse_varlen.c1
-rw-r--r--samples/bpf/tcbpf1_kern.c1
-rw-r--r--samples/bpf/tcbpf2_kern.c1
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c1
240 files changed, 1988 insertions, 1387 deletions
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
index bce52b2ec55e..6fd988c84c4f 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
@@ -49,6 +49,7 @@ Optional port properties:
49and 49and
50 50
51 - phy-handle: See ethernet.txt file in the same directory. 51 - phy-handle: See ethernet.txt file in the same directory.
52 - phy-mode: See ethernet.txt file in the same directory.
52 53
53or 54or
54 55
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index 0fe1c6e0dbcd..a20b2fae942b 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -29,8 +29,8 @@ A: There are always two trees (git repositories) in play. Both are driven
29 Linus, and net-next is where the new code goes for the future release. 29 Linus, and net-next is where the new code goes for the future release.
30 You can find the trees here: 30 You can find the trees here:
31 31
32 http://git.kernel.org/?p=linux/kernel/git/davem/net.git 32 https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
33 http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git 33 https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
34 34
35Q: How often do changes from these trees make it to the mainline Linus tree? 35Q: How often do changes from these trees make it to the mainline Linus tree?
36 36
@@ -76,7 +76,7 @@ Q: So where are we now in this cycle?
76 76
77A: Load the mainline (Linus) page here: 77A: Load the mainline (Linus) page here:
78 78
79 http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git 79 https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
80 80
81 and note the top of the "tags" section. If it is rc1, it is early 81 and note the top of the "tags" section. If it is rc1, it is early
82 in the dev cycle. If it was tagged rc7 a week ago, then a release 82 in the dev cycle. If it was tagged rc7 a week ago, then a release
@@ -123,7 +123,7 @@ A: Normally Greg Kroah-Hartman collects stable commits himself, but
123 123
124 It contains the patches which Dave has selected, but not yet handed 124 It contains the patches which Dave has selected, but not yet handed
125 off to Greg. If Greg already has the patch, then it will be here: 125 off to Greg. If Greg already has the patch, then it will be here:
126 http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git 126 https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
127 127
128 A quick way to find whether the patch is in this stable-queue is 128 A quick way to find whether the patch is in this stable-queue is
129 to simply clone the repo, and then git grep the mainline commit ID, e.g. 129 to simply clone the repo, and then git grep the mainline commit ID, e.g.
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 4fb51d32fccc..399e4e866a9c 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -33,24 +33,6 @@ nf_conntrack_events - BOOLEAN
33 If this option is enabled, the connection tracking code will 33 If this option is enabled, the connection tracking code will
34 provide userspace with connection tracking events via ctnetlink. 34 provide userspace with connection tracking events via ctnetlink.
35 35
36nf_conntrack_events_retry_timeout - INTEGER (seconds)
37 default 15
38
39 This option is only relevant when "reliable connection tracking
40 events" are used. Normally, ctnetlink is "lossy", that is,
41 events are normally dropped when userspace listeners can't keep up.
42
43 Userspace can request "reliable event mode". When this mode is
44 active, the conntrack will only be destroyed after the event was
45 delivered. If event delivery fails, the kernel periodically
46 re-tries to send the event to userspace.
47
48 This is the maximum interval the kernel should use when re-trying
49 to deliver the destroy event.
50
51 A higher number means there will be fewer delivery retries and it
52 will take longer for a backlog to be processed.
53
54nf_conntrack_expect_max - INTEGER 36nf_conntrack_expect_max - INTEGER
55 Maximum size of expectation table. Default value is 37 Maximum size of expectation table. Default value is
56 nf_conntrack_buckets / 256. Minimum is 1. 38 nf_conntrack_buckets / 256. Minimum is 1.
diff --git a/MAINTAINERS b/MAINTAINERS
index 3d838cf49f81..4012c2f98617 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2552,15 +2552,18 @@ S: Supported
2552F: drivers/net/ethernet/broadcom/genet/ 2552F: drivers/net/ethernet/broadcom/genet/
2553 2553
2554BROADCOM BNX2 GIGABIT ETHERNET DRIVER 2554BROADCOM BNX2 GIGABIT ETHERNET DRIVER
2555M: Sony Chacko <sony.chacko@qlogic.com> 2555M: Rasesh Mody <rasesh.mody@cavium.com>
2556M: Dept-HSGLinuxNICDev@qlogic.com 2556M: Harish Patil <harish.patil@cavium.com>
2557M: Dept-GELinuxNICDev@cavium.com
2557L: netdev@vger.kernel.org 2558L: netdev@vger.kernel.org
2558S: Supported 2559S: Supported
2559F: drivers/net/ethernet/broadcom/bnx2.* 2560F: drivers/net/ethernet/broadcom/bnx2.*
2560F: drivers/net/ethernet/broadcom/bnx2_* 2561F: drivers/net/ethernet/broadcom/bnx2_*
2561 2562
2562BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER 2563BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
2563M: Ariel Elior <ariel.elior@qlogic.com> 2564M: Yuval Mintz <Yuval.Mintz@cavium.com>
2565M: Ariel Elior <ariel.elior@cavium.com>
2566M: everest-linux-l2@cavium.com
2564L: netdev@vger.kernel.org 2567L: netdev@vger.kernel.org
2565S: Supported 2568S: Supported
2566F: drivers/net/ethernet/broadcom/bnx2x/ 2569F: drivers/net/ethernet/broadcom/bnx2x/
@@ -2767,7 +2770,9 @@ S: Supported
2767F: drivers/scsi/bfa/ 2770F: drivers/scsi/bfa/
2768 2771
2769BROCADE BNA 10 GIGABIT ETHERNET DRIVER 2772BROCADE BNA 10 GIGABIT ETHERNET DRIVER
2770M: Rasesh Mody <rasesh.mody@qlogic.com> 2773M: Rasesh Mody <rasesh.mody@cavium.com>
2774M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
2775M: Dept-GELinuxNICDev@cavium.com
2771L: netdev@vger.kernel.org 2776L: netdev@vger.kernel.org
2772S: Supported 2777S: Supported
2773F: drivers/net/ethernet/brocade/bna/ 2778F: drivers/net/ethernet/brocade/bna/
@@ -8517,11 +8522,10 @@ F: Documentation/devicetree/bindings/net/wireless/
8517F: drivers/net/wireless/ 8522F: drivers/net/wireless/
8518 8523
8519NETXEN (1/10) GbE SUPPORT 8524NETXEN (1/10) GbE SUPPORT
8520M: Manish Chopra <manish.chopra@qlogic.com> 8525M: Manish Chopra <manish.chopra@cavium.com>
8521M: Sony Chacko <sony.chacko@qlogic.com> 8526M: Rahul Verma <rahul.verma@cavium.com>
8522M: Rajesh Borundia <rajesh.borundia@qlogic.com> 8527M: Dept-GELinuxNICDev@cavium.com
8523L: netdev@vger.kernel.org 8528L: netdev@vger.kernel.org
8524W: http://www.qlogic.com
8525S: Supported 8529S: Supported
8526F: drivers/net/ethernet/qlogic/netxen/ 8530F: drivers/net/ethernet/qlogic/netxen/
8527 8531
@@ -9897,33 +9901,32 @@ F: Documentation/scsi/LICENSE.qla4xxx
9897F: drivers/scsi/qla4xxx/ 9901F: drivers/scsi/qla4xxx/
9898 9902
9899QLOGIC QLA3XXX NETWORK DRIVER 9903QLOGIC QLA3XXX NETWORK DRIVER
9900M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> 9904M: Dept-GELinuxNICDev@cavium.com
9901M: Ron Mercer <ron.mercer@qlogic.com>
9902M: linux-driver@qlogic.com
9903L: netdev@vger.kernel.org 9905L: netdev@vger.kernel.org
9904S: Supported 9906S: Supported
9905F: Documentation/networking/LICENSE.qla3xxx 9907F: Documentation/networking/LICENSE.qla3xxx
9906F: drivers/net/ethernet/qlogic/qla3xxx.* 9908F: drivers/net/ethernet/qlogic/qla3xxx.*
9907 9909
9908QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER 9910QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
9909M: Dept-GELinuxNICDev@qlogic.com 9911M: Harish Patil <harish.patil@cavium.com>
9912M: Manish Chopra <manish.chopra@cavium.com>
9913M: Dept-GELinuxNICDev@cavium.com
9910L: netdev@vger.kernel.org 9914L: netdev@vger.kernel.org
9911S: Supported 9915S: Supported
9912F: drivers/net/ethernet/qlogic/qlcnic/ 9916F: drivers/net/ethernet/qlogic/qlcnic/
9913 9917
9914QLOGIC QLGE 10Gb ETHERNET DRIVER 9918QLOGIC QLGE 10Gb ETHERNET DRIVER
9915M: Harish Patil <harish.patil@qlogic.com> 9919M: Harish Patil <harish.patil@cavium.com>
9916M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com> 9920M: Manish Chopra <manish.chopra@cavium.com>
9917M: Dept-GELinuxNICDev@qlogic.com 9921M: Dept-GELinuxNICDev@cavium.com
9918M: linux-driver@qlogic.com
9919L: netdev@vger.kernel.org 9922L: netdev@vger.kernel.org
9920S: Supported 9923S: Supported
9921F: drivers/net/ethernet/qlogic/qlge/ 9924F: drivers/net/ethernet/qlogic/qlge/
9922 9925
9923QLOGIC QL4xxx ETHERNET DRIVER 9926QLOGIC QL4xxx ETHERNET DRIVER
9924M: Yuval Mintz <Yuval.Mintz@qlogic.com> 9927M: Yuval Mintz <Yuval.Mintz@cavium.com>
9925M: Ariel Elior <Ariel.Elior@qlogic.com> 9928M: Ariel Elior <Ariel.Elior@cavium.com>
9926M: everest-linux-l2@qlogic.com 9929M: everest-linux-l2@cavium.com
9927L: netdev@vger.kernel.org 9930L: netdev@vger.kernel.org
9928S: Supported 9931S: Supported
9929F: drivers/net/ethernet/qlogic/qed/ 9932F: drivers/net/ethernet/qlogic/qed/
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ee655ed1ff1b..1e8fceb308a5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
53 return (__force __sum16)(~((__force u32)sum + tmp) >> 16); 53 return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
54} 54}
55 55
56static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 56static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
57 unsigned short len, 57 __u8 proto, __wsum sum)
58 unsigned short proto,
59 __wsum sum)
60{ 58{
61#ifdef __powerpc64__ 59#ifdef __powerpc64__
62 unsigned long s = (__force u32)sum; 60 unsigned long s = (__force u32)sum;
@@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
83 * computes the checksum of the TCP/UDP pseudo-header 81 * computes the checksum of the TCP/UDP pseudo-header
84 * returns a 16-bit checksum, already complemented 82 * returns a 16-bit checksum, already complemented
85 */ 83 */
86static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 84static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
87 unsigned short len, 85 __u8 proto, __wsum sum)
88 unsigned short proto,
89 __wsum sum)
90{ 86{
91 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 87 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
92} 88}
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index ef51c9c864c5..b6bb58c41df5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev)
310 BT_DBG("HCI device registered (hdev %p)", hdev); 310 BT_DBG("HCI device registered (hdev %p)", hdev);
311 311
312 dev_set_drvdata(&pdev->dev, hst); 312 dev_set_drvdata(&pdev->dev, hst);
313 return err; 313 return 0;
314} 314}
315 315
316static int bt_ti_remove(struct platform_device *pdev) 316static int bt_ti_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5ccb90ef0146..8f6c23c20c52 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
643 }, 643 },
644 .driver_data = &acpi_active_low, 644 .driver_data = &acpi_active_low,
645 }, 645 },
646 { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
647 .ident = "Lenovo ThinkPad 8",
648 .matches = {
649 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
650 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
651 },
652 .driver_data = &acpi_active_low,
653 },
646 { } 654 { }
647}; 655};
648 656
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 22174774dbb8..63036c731626 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1019 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1019 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1020 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) 1020 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1021 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1021 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1022 resp.cache_line_size = L1_CACHE_BYTES; 1022 resp.cache_line_size = cache_line_size();
1023 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1023 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1024 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1024 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1025 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1025 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 41f4c2afbcdd..7ce97daf26c6 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -52,7 +52,6 @@ enum {
52 52
53enum { 53enum {
54 MLX5_IB_SQ_STRIDE = 6, 54 MLX5_IB_SQ_STRIDE = 6,
55 MLX5_IB_CACHE_LINE_SIZE = 64,
56}; 55};
57 56
58static const u32 mlx5_ib_opcode[] = { 57static const u32 mlx5_ib_opcode[] = {
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
index 7c06d85568d4..6c9f3923e838 100644
--- a/drivers/infiniband/hw/qedr/Kconfig
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_QEDR
2 tristate "QLogic RoCE driver" 2 tristate "QLogic RoCE driver"
3 depends on 64BIT && QEDE 3 depends on 64BIT && QEDE
4 select QED_LL2 4 select QED_LL2
5 select QED_RDMA
5 ---help--- 6 ---help---
6 This driver provides low-level InfiniBand over Ethernet 7 This driver provides low-level InfiniBand over Ethernet
7 support for QLogic QED host channel adapters (HCAs). 8 support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b8d2d9e2263..da12717a3eb7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -63,6 +63,8 @@ enum ipoib_flush_level {
63 63
64enum { 64enum {
65 IPOIB_ENCAP_LEN = 4, 65 IPOIB_ENCAP_LEN = 4,
66 IPOIB_PSEUDO_LEN = 20,
67 IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
66 68
67 IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, 69 IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
68 IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */ 70 IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -134,15 +136,21 @@ struct ipoib_header {
134 u16 reserved; 136 u16 reserved;
135}; 137};
136 138
137struct ipoib_cb { 139struct ipoib_pseudo_header {
138 struct qdisc_skb_cb qdisc_cb; 140 u8 hwaddr[INFINIBAND_ALEN];
139 u8 hwaddr[INFINIBAND_ALEN];
140}; 141};
141 142
142static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb) 143static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
143{ 144{
144 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb)); 145 char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
145 return (struct ipoib_cb *)skb->cb; 146
147 /*
148 * only the ipoib header is present now, make room for a dummy
149 * pseudo header and set skb field accordingly
150 */
151 memset(data, 0, IPOIB_PSEUDO_LEN);
152 skb_reset_mac_header(skb);
153 skb_pull(skb, IPOIB_HARD_LEN);
146} 154}
147 155
148/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ 156/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 4ad297d3de89..339a1eecdfe3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
63#define IPOIB_CM_RX_DELAY (3 * 256 * HZ) 63#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
64#define IPOIB_CM_RX_UPDATE_MASK (0x3) 64#define IPOIB_CM_RX_UPDATE_MASK (0x3)
65 65
66#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
67
66static struct ib_qp_attr ipoib_cm_err_attr = { 68static struct ib_qp_attr ipoib_cm_err_attr = {
67 .qp_state = IB_QPS_ERR 69 .qp_state = IB_QPS_ERR
68}; 70};
@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
146 struct sk_buff *skb; 148 struct sk_buff *skb;
147 int i; 149 int i;
148 150
149 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); 151 skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
150 if (unlikely(!skb)) 152 if (unlikely(!skb))
151 return NULL; 153 return NULL;
152 154
153 /* 155 /*
154 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the 156 * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
155 * IP header to a multiple of 16. 157 * IP header to a multiple of 16.
156 */ 158 */
157 skb_reserve(skb, 12); 159 skb_reserve(skb, IPOIB_CM_RX_RESERVE);
158 160
159 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, 161 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
160 DMA_FROM_DEVICE); 162 DMA_FROM_DEVICE);
@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
624 if (wc->byte_len < IPOIB_CM_COPYBREAK) { 626 if (wc->byte_len < IPOIB_CM_COPYBREAK) {
625 int dlen = wc->byte_len; 627 int dlen = wc->byte_len;
626 628
627 small_skb = dev_alloc_skb(dlen + 12); 629 small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
628 if (small_skb) { 630 if (small_skb) {
629 skb_reserve(small_skb, 12); 631 skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
630 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], 632 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
631 dlen, DMA_FROM_DEVICE); 633 dlen, DMA_FROM_DEVICE);
632 skb_copy_from_linear_data(skb, small_skb->data, dlen); 634 skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
663 665
664copied: 666copied:
665 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 667 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
666 skb_reset_mac_header(skb); 668 skb_add_pseudo_hdr(skb);
667 skb_pull(skb, IPOIB_ENCAP_LEN);
668 669
669 ++dev->stats.rx_packets; 670 ++dev->stats.rx_packets;
670 dev->stats.rx_bytes += skb->len; 671 dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index be11d5d5b8c1..830fecb6934c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
128 128
129 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); 129 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
130 130
131 skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN); 131 skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
132 if (unlikely(!skb)) 132 if (unlikely(!skb))
133 return NULL; 133 return NULL;
134 134
135 /* 135 /*
136 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte 136 * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
137 * header. So we need 4 more bytes to get to 48 and align the 137 * 64 bytes aligned
138 * IP header to a multiple of 16.
139 */ 138 */
140 skb_reserve(skb, 4); 139 skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
141 140
142 mapping = priv->rx_ring[id].mapping; 141 mapping = priv->rx_ring[id].mapping;
143 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, 142 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
253 skb_pull(skb, IB_GRH_BYTES); 252 skb_pull(skb, IB_GRH_BYTES);
254 253
255 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 254 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
256 skb_reset_mac_header(skb); 255 skb_add_pseudo_hdr(skb);
257 skb_pull(skb, IPOIB_ENCAP_LEN);
258 256
259 ++dev->stats.rx_packets; 257 ++dev->stats.rx_packets;
260 dev->stats.rx_bytes += skb->len; 258 dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5636fc3da6b8..b58d9dca5c93 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
925 ipoib_neigh_free(neigh); 925 ipoib_neigh_free(neigh);
926 goto err_drop; 926 goto err_drop;
927 } 927 }
928 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 928 if (skb_queue_len(&neigh->queue) <
929 IPOIB_MAX_PATH_REC_QUEUE) {
930 /* put pseudoheader back on for next time */
931 skb_push(skb, IPOIB_PSEUDO_LEN);
929 __skb_queue_tail(&neigh->queue, skb); 932 __skb_queue_tail(&neigh->queue, skb);
930 else { 933 } else {
931 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 934 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
932 skb_queue_len(&neigh->queue)); 935 skb_queue_len(&neigh->queue));
933 goto err_drop; 936 goto err_drop;
@@ -964,7 +967,7 @@ err_drop:
964} 967}
965 968
966static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 969static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
967 struct ipoib_cb *cb) 970 struct ipoib_pseudo_header *phdr)
968{ 971{
969 struct ipoib_dev_priv *priv = netdev_priv(dev); 972 struct ipoib_dev_priv *priv = netdev_priv(dev);
970 struct ipoib_path *path; 973 struct ipoib_path *path;
@@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
972 975
973 spin_lock_irqsave(&priv->lock, flags); 976 spin_lock_irqsave(&priv->lock, flags);
974 977
975 path = __path_find(dev, cb->hwaddr + 4); 978 path = __path_find(dev, phdr->hwaddr + 4);
976 if (!path || !path->valid) { 979 if (!path || !path->valid) {
977 int new_path = 0; 980 int new_path = 0;
978 981
979 if (!path) { 982 if (!path) {
980 path = path_rec_create(dev, cb->hwaddr + 4); 983 path = path_rec_create(dev, phdr->hwaddr + 4);
981 new_path = 1; 984 new_path = 1;
982 } 985 }
983 if (path) { 986 if (path) {
984 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 987 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
988 /* put pseudoheader back on for next time */
989 skb_push(skb, IPOIB_PSEUDO_LEN);
985 __skb_queue_tail(&path->queue, skb); 990 __skb_queue_tail(&path->queue, skb);
986 } else { 991 } else {
987 ++dev->stats.tx_dropped; 992 ++dev->stats.tx_dropped;
@@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1009 be16_to_cpu(path->pathrec.dlid)); 1014 be16_to_cpu(path->pathrec.dlid));
1010 1015
1011 spin_unlock_irqrestore(&priv->lock, flags); 1016 spin_unlock_irqrestore(&priv->lock, flags);
1012 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); 1017 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
1013 return; 1018 return;
1014 } else if ((path->query || !path_rec_start(dev, path)) && 1019 } else if ((path->query || !path_rec_start(dev, path)) &&
1015 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1020 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1021 /* put pseudoheader back on for next time */
1022 skb_push(skb, IPOIB_PSEUDO_LEN);
1016 __skb_queue_tail(&path->queue, skb); 1023 __skb_queue_tail(&path->queue, skb);
1017 } else { 1024 } else {
1018 ++dev->stats.tx_dropped; 1025 ++dev->stats.tx_dropped;
@@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1026{ 1033{
1027 struct ipoib_dev_priv *priv = netdev_priv(dev); 1034 struct ipoib_dev_priv *priv = netdev_priv(dev);
1028 struct ipoib_neigh *neigh; 1035 struct ipoib_neigh *neigh;
1029 struct ipoib_cb *cb = ipoib_skb_cb(skb); 1036 struct ipoib_pseudo_header *phdr;
1030 struct ipoib_header *header; 1037 struct ipoib_header *header;
1031 unsigned long flags; 1038 unsigned long flags;
1032 1039
1040 phdr = (struct ipoib_pseudo_header *) skb->data;
1041 skb_pull(skb, sizeof(*phdr));
1033 header = (struct ipoib_header *) skb->data; 1042 header = (struct ipoib_header *) skb->data;
1034 1043
1035 if (unlikely(cb->hwaddr[4] == 0xff)) { 1044 if (unlikely(phdr->hwaddr[4] == 0xff)) {
1036 /* multicast, arrange "if" according to probability */ 1045 /* multicast, arrange "if" according to probability */
1037 if ((header->proto != htons(ETH_P_IP)) && 1046 if ((header->proto != htons(ETH_P_IP)) &&
1038 (header->proto != htons(ETH_P_IPV6)) && 1047 (header->proto != htons(ETH_P_IPV6)) &&
@@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1045 return NETDEV_TX_OK; 1054 return NETDEV_TX_OK;
1046 } 1055 }
1047 /* Add in the P_Key for multicast*/ 1056 /* Add in the P_Key for multicast*/
1048 cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; 1057 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1049 cb->hwaddr[9] = priv->pkey & 0xff; 1058 phdr->hwaddr[9] = priv->pkey & 0xff;
1050 1059
1051 neigh = ipoib_neigh_get(dev, cb->hwaddr); 1060 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1052 if (likely(neigh)) 1061 if (likely(neigh))
1053 goto send_using_neigh; 1062 goto send_using_neigh;
1054 ipoib_mcast_send(dev, cb->hwaddr, skb); 1063 ipoib_mcast_send(dev, phdr->hwaddr, skb);
1055 return NETDEV_TX_OK; 1064 return NETDEV_TX_OK;
1056 } 1065 }
1057 1066
@@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1060 case htons(ETH_P_IP): 1069 case htons(ETH_P_IP):
1061 case htons(ETH_P_IPV6): 1070 case htons(ETH_P_IPV6):
1062 case htons(ETH_P_TIPC): 1071 case htons(ETH_P_TIPC):
1063 neigh = ipoib_neigh_get(dev, cb->hwaddr); 1072 neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1064 if (unlikely(!neigh)) { 1073 if (unlikely(!neigh)) {
1065 neigh_add_path(skb, cb->hwaddr, dev); 1074 neigh_add_path(skb, phdr->hwaddr, dev);
1066 return NETDEV_TX_OK; 1075 return NETDEV_TX_OK;
1067 } 1076 }
1068 break; 1077 break;
1069 case htons(ETH_P_ARP): 1078 case htons(ETH_P_ARP):
1070 case htons(ETH_P_RARP): 1079 case htons(ETH_P_RARP):
1071 /* for unicast ARP and RARP should always perform path find */ 1080 /* for unicast ARP and RARP should always perform path find */
1072 unicast_arp_send(skb, dev, cb); 1081 unicast_arp_send(skb, dev, phdr);
1073 return NETDEV_TX_OK; 1082 return NETDEV_TX_OK;
1074 default: 1083 default:
1075 /* ethertype not supported by IPoIB */ 1084 /* ethertype not supported by IPoIB */
@@ -1086,11 +1095,13 @@ send_using_neigh:
1086 goto unref; 1095 goto unref;
1087 } 1096 }
1088 } else if (neigh->ah) { 1097 } else if (neigh->ah) {
1089 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr)); 1098 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
1090 goto unref; 1099 goto unref;
1091 } 1100 }
1092 1101
1093 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1102 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1103 /* put pseudoheader back on for next time */
1104 skb_push(skb, sizeof(*phdr));
1094 spin_lock_irqsave(&priv->lock, flags); 1105 spin_lock_irqsave(&priv->lock, flags);
1095 __skb_queue_tail(&neigh->queue, skb); 1106 __skb_queue_tail(&neigh->queue, skb);
1096 spin_unlock_irqrestore(&priv->lock, flags); 1107 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
1122 unsigned short type, 1133 unsigned short type,
1123 const void *daddr, const void *saddr, unsigned len) 1134 const void *daddr, const void *saddr, unsigned len)
1124{ 1135{
1136 struct ipoib_pseudo_header *phdr;
1125 struct ipoib_header *header; 1137 struct ipoib_header *header;
1126 struct ipoib_cb *cb = ipoib_skb_cb(skb);
1127 1138
1128 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 1139 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
1129 1140
@@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
1132 1143
1133 /* 1144 /*
1134 * we don't rely on dst_entry structure, always stuff the 1145 * we don't rely on dst_entry structure, always stuff the
1135 * destination address into skb->cb so we can figure out where 1146 * destination address into skb hard header so we can figure out where
1136 * to send the packet later. 1147 * to send the packet later.
1137 */ 1148 */
1138 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); 1149 phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
1150 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
1139 1151
1140 return sizeof *header; 1152 return IPOIB_HARD_LEN;
1141} 1153}
1142 1154
1143static void ipoib_set_mcast_list(struct net_device *dev) 1155static void ipoib_set_mcast_list(struct net_device *dev)
@@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev)
1759 1771
1760 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1772 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1761 1773
1762 dev->hard_header_len = IPOIB_ENCAP_LEN; 1774 dev->hard_header_len = IPOIB_HARD_LEN;
1763 dev->addr_len = INFINIBAND_ALEN; 1775 dev->addr_len = INFINIBAND_ALEN;
1764 dev->type = ARPHRD_INFINIBAND; 1776 dev->type = ARPHRD_INFINIBAND;
1765 dev->tx_queue_len = ipoib_sendq_size * 2; 1777 dev->tx_queue_len = ipoib_sendq_size * 2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d3394b6add24..1909dd252c94 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
796 __ipoib_mcast_add(dev, mcast); 796 __ipoib_mcast_add(dev, mcast);
797 list_add_tail(&mcast->list, &priv->multicast_list); 797 list_add_tail(&mcast->list, &priv->multicast_list);
798 } 798 }
799 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) 799 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
800 /* put pseudoheader back on for next time */
801 skb_push(skb, sizeof(struct ipoib_pseudo_header));
800 skb_queue_tail(&mcast->pkt_queue, skb); 802 skb_queue_tail(&mcast->pkt_queue, skb);
801 else { 803 } else {
802 ++dev->stats.tx_dropped; 804 ++dev->stats.tx_dropped;
803 dev_kfree_skb_any(skb); 805 dev_kfree_skb_any(skb);
804 } 806 }
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 76fb8552c9d9..ef63d24fef81 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -256,6 +256,7 @@ static const struct of_device_id b53_mmap_of_table[] = {
256 { .compatible = "brcm,bcm63xx-switch" }, 256 { .compatible = "brcm,bcm63xx-switch" },
257 { /* sentinel */ }, 257 { /* sentinel */ },
258}; 258};
259MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
259 260
260static struct platform_driver b53_mmap_driver = { 261static struct platform_driver b53_mmap_driver = {
261 .probe = b53_mmap_probe, 262 .probe = b53_mmap_probe,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e218887f18b7..e3ee27ce13dd 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1133,6 +1133,20 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
1133 return 0; 1133 return 0;
1134} 1134}
1135 1135
1136static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1137{
1138 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1139
1140 /* For a kernel about to be kexec'd we want to keep the GPHY on for a
1141 * successful MDIO bus scan to occur. If we did turn off the GPHY
1142 * before (e.g: port_disable), this will also power it back on.
1143 *
1144 * Do not rely on kexec_in_progress, just power the PHY on.
1145 */
1146 if (priv->hw_params.num_gphy == 1)
1147 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1148}
1149
1136#ifdef CONFIG_PM_SLEEP 1150#ifdef CONFIG_PM_SLEEP
1137static int bcm_sf2_suspend(struct device *dev) 1151static int bcm_sf2_suspend(struct device *dev)
1138{ 1152{
@@ -1158,10 +1172,12 @@ static const struct of_device_id bcm_sf2_of_match[] = {
1158 { .compatible = "brcm,bcm7445-switch-v4.0" }, 1172 { .compatible = "brcm,bcm7445-switch-v4.0" },
1159 { /* sentinel */ }, 1173 { /* sentinel */ },
1160}; 1174};
1175MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
1161 1176
1162static struct platform_driver bcm_sf2_driver = { 1177static struct platform_driver bcm_sf2_driver = {
1163 .probe = bcm_sf2_sw_probe, 1178 .probe = bcm_sf2_sw_probe,
1164 .remove = bcm_sf2_sw_remove, 1179 .remove = bcm_sf2_sw_remove,
1180 .shutdown = bcm_sf2_sw_shutdown,
1165 .driver = { 1181 .driver = {
1166 .name = "brcm-sf2", 1182 .name = "brcm-sf2",
1167 .of_match_table = bcm_sf2_of_match, 1183 .of_match_table = bcm_sf2_of_match,
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index b047fd607b83..00c38bf151e6 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1358,6 +1358,7 @@ static const struct of_device_id nb8800_dt_ids[] = {
1358 }, 1358 },
1359 { } 1359 { }
1360}; 1360};
1361MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
1361 1362
1362static int nb8800_probe(struct platform_device *pdev) 1363static int nb8800_probe(struct platform_device *pdev)
1363{ 1364{
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index ae364c74baf3..537090952c45 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1126,7 +1126,8 @@ out_freeirq:
1126 free_irq(dev->irq, dev); 1126 free_irq(dev->irq, dev);
1127 1127
1128out_phy_disconnect: 1128out_phy_disconnect:
1129 phy_disconnect(phydev); 1129 if (priv->has_phy)
1130 phy_disconnect(phydev);
1130 1131
1131 return ret; 1132 return ret;
1132} 1133}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 856379cbb402..31ca204b38d2 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1449,7 +1449,7 @@ static int bgmac_phy_connect(struct bgmac *bgmac)
1449 phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, 1449 phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
1450 PHY_INTERFACE_MODE_MII); 1450 PHY_INTERFACE_MODE_MII);
1451 if (IS_ERR(phy_dev)) { 1451 if (IS_ERR(phy_dev)) {
1452 dev_err(bgmac->dev, "PHY connecton failed\n"); 1452 dev_err(bgmac->dev, "PHY connection failed\n");
1453 return PTR_ERR(phy_dev); 1453 return PTR_ERR(phy_dev);
1454 } 1454 }
1455 1455
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 27f11a5d5fe2..b3791b394715 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -271,22 +271,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
271static u32 271static u32
272bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) 272bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273{ 273{
274 unsigned long flags;
274 u32 val; 275 u32 val;
275 276
276 spin_lock_bh(&bp->indirect_lock); 277 spin_lock_irqsave(&bp->indirect_lock, flags);
277 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); 278 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); 279 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 spin_unlock_bh(&bp->indirect_lock); 280 spin_unlock_irqrestore(&bp->indirect_lock, flags);
280 return val; 281 return val;
281} 282}
282 283
283static void 284static void
284bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) 285bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285{ 286{
286 spin_lock_bh(&bp->indirect_lock); 287 unsigned long flags;
288
289 spin_lock_irqsave(&bp->indirect_lock, flags);
287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); 290 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); 291 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 spin_unlock_bh(&bp->indirect_lock); 292 spin_unlock_irqrestore(&bp->indirect_lock, flags);
290} 293}
291 294
292static void 295static void
@@ -304,8 +307,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
304static void 307static void
305bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) 308bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306{ 309{
310 unsigned long flags;
311
307 offset += cid_addr; 312 offset += cid_addr;
308 spin_lock_bh(&bp->indirect_lock); 313 spin_lock_irqsave(&bp->indirect_lock, flags);
309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { 314 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310 int i; 315 int i;
311 316
@@ -322,7 +327,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
322 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); 327 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 BNX2_WR(bp, BNX2_CTX_DATA, val); 328 BNX2_WR(bp, BNX2_CTX_DATA, val);
324 } 329 }
325 spin_unlock_bh(&bp->indirect_lock); 330 spin_unlock_irqrestore(&bp->indirect_lock, flags);
326} 331}
327 332
328#ifdef BCM_CNIC 333#ifdef BCM_CNIC
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 20fe6a8c35c1..0cee4c0283f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15241,7 +15241,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15241 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); 15241 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15242 bp->cyclecounter.read = bnx2x_cyclecounter_read; 15242 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15243 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); 15243 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15244 bp->cyclecounter.shift = 1; 15244 bp->cyclecounter.shift = 0;
15245 bp->cyclecounter.mult = 1; 15245 bp->cyclecounter.mult = 1;
15246} 15246}
15247 15247
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f320497368f4..57eb4e1345cb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4057,7 +4057,7 @@ static void cfg_queues(struct adapter *adap)
4057 * capped by the number of available cores. 4057 * capped by the number of available cores.
4058 */ 4058 */
4059 if (n10g) { 4059 if (n10g) {
4060 i = num_online_cpus(); 4060 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
4061 s->ofldqsets = roundup(i, adap->params.nports); 4061 s->ofldqsets = roundup(i, adap->params.nports);
4062 } else { 4062 } else {
4063 s->ofldqsets = adap->params.nports; 4063 s->ofldqsets = adap->params.nports;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 0945fa49a5dd..2471ff465d5c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
135} 135}
136 136
137static int alloc_uld_rxqs(struct adapter *adap, 137static int alloc_uld_rxqs(struct adapter *adap,
138 struct sge_uld_rxq_info *rxq_info, 138 struct sge_uld_rxq_info *rxq_info, bool lro)
139 unsigned int nq, unsigned int offset, bool lro)
140{ 139{
141 struct sge *s = &adap->sge; 140 struct sge *s = &adap->sge;
142 struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; 141 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
143 unsigned short *ids = rxq_info->rspq_id + offset; 142 struct sge_ofld_rxq *q = rxq_info->uldrxq;
144 unsigned int per_chan = nq / adap->params.nports; 143 unsigned short *ids = rxq_info->rspq_id;
145 unsigned int bmap_idx = 0; 144 unsigned int bmap_idx = 0;
146 int i, err, msi_idx; 145 unsigned int per_chan;
146 int i, err, msi_idx, que_idx = 0;
147
148 per_chan = rxq_info->nrxq / adap->params.nports;
147 149
148 if (adap->flags & USING_MSIX) 150 if (adap->flags & USING_MSIX)
149 msi_idx = 1; 151 msi_idx = 1;
@@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
151 msi_idx = -((int)s->intrq.abs_id + 1); 153 msi_idx = -((int)s->intrq.abs_id + 1);
152 154
153 for (i = 0; i < nq; i++, q++) { 155 for (i = 0; i < nq; i++, q++) {
156 if (i == rxq_info->nrxq) {
157 /* start allocation of concentrator queues */
158 per_chan = rxq_info->nciq / adap->params.nports;
159 que_idx = 0;
160 }
161
154 if (msi_idx >= 0) { 162 if (msi_idx >= 0) {
155 bmap_idx = get_msix_idx_from_bmap(adap); 163 bmap_idx = get_msix_idx_from_bmap(adap);
156 msi_idx = adap->msix_info_ulds[bmap_idx].idx; 164 msi_idx = adap->msix_info_ulds[bmap_idx].idx;
157 } 165 }
158 err = t4_sge_alloc_rxq(adap, &q->rspq, false, 166 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
159 adap->port[i / per_chan], 167 adap->port[que_idx++ / per_chan],
160 msi_idx, 168 msi_idx,
161 q->fl.size ? &q->fl : NULL, 169 q->fl.size ? &q->fl : NULL,
162 uldrx_handler, 170 uldrx_handler,
@@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap,
165 if (err) 173 if (err)
166 goto freeout; 174 goto freeout;
167 if (msi_idx >= 0) 175 if (msi_idx >= 0)
168 rxq_info->msix_tbl[i + offset] = bmap_idx; 176 rxq_info->msix_tbl[i] = bmap_idx;
169 memset(&q->stats, 0, sizeof(q->stats)); 177 memset(&q->stats, 0, sizeof(q->stats));
170 if (ids) 178 if (ids)
171 ids[i] = q->rspq.abs_id; 179 ids[i] = q->rspq.abs_id;
172 } 180 }
173 return 0; 181 return 0;
174freeout: 182freeout:
175 q = rxq_info->uldrxq + offset; 183 q = rxq_info->uldrxq;
176 for ( ; i; i--, q++) { 184 for ( ; i; i--, q++) {
177 if (q->rspq.desc) 185 if (q->rspq.desc)
178 free_rspq_fl(adap, &q->rspq, 186 free_rspq_fl(adap, &q->rspq,
179 q->fl.size ? &q->fl : NULL); 187 q->fl.size ? &q->fl : NULL);
180 } 188 }
181
182 /* We need to free rxq also in case of ciq allocation failure */
183 if (offset) {
184 q = rxq_info->uldrxq + offset;
185 for ( ; i; i--, q++) {
186 if (q->rspq.desc)
187 free_rspq_fl(adap, &q->rspq,
188 q->fl.size ? &q->fl : NULL);
189 }
190 }
191 return err; 189 return err;
192} 190}
193 191
@@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
205 return -ENOMEM; 203 return -ENOMEM;
206 } 204 }
207 205
208 ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && 206 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
209 !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
210 rxq_info->nrxq, lro));
211 207
212 /* Tell uP to route control queue completions to rdma rspq */ 208 /* Tell uP to route control queue completions to rdma rspq */
213 if (adap->flags & FULL_INIT_DONE && 209 if (adap->flags & FULL_INIT_DONE &&
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 539de764bbd3..cbd68a8fe2e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
210 210
211 /* Unbind queue from any existing class */ 211 /* Unbind queue from any existing class */
212 err = t4_sched_queue_unbind(pi, p); 212 err = t4_sched_queue_unbind(pi, p);
213 if (err) 213 if (err) {
214 t4_free_mem(qe);
214 goto out; 215 goto out;
216 }
215 217
216 /* Bind queue to specified class */ 218 /* Bind queue to specified class */
217 memset(qe, 0, sizeof(*qe)); 219 memset(qe, 0, sizeof(*qe));
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index e572a527b18d..36bc2c71fba9 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq)
169{ 169{
170 unsigned int wait; 170 unsigned int wait;
171 struct vnic_dev *vdev = rq->vdev; 171 struct vnic_dev *vdev = rq->vdev;
172 int i;
172 173
173 iowrite32(0, &rq->ctrl->enable); 174 /* Due to a race condition with clearing RQ "mini-cache" in hw, we need
175 * to disable the RQ twice to guarantee that stale descriptors are not
176 * used when this RQ is re-enabled.
177 */
178 for (i = 0; i < 2; i++) {
179 iowrite32(0, &rq->ctrl->enable);
174 180
175 /* Wait for HW to ACK disable request */ 181 /* Wait for HW to ACK disable request */
176 for (wait = 0; wait < 1000; wait++) { 182 for (wait = 20000; wait > 0; wait--)
177 if (!(ioread32(&rq->ctrl->running))) 183 if (!ioread32(&rq->ctrl->running))
178 return 0; 184 break;
179 udelay(10); 185 if (!wait) {
180 } 186 vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
187 rq->index);
181 188
182 vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index); 189 return -ETIMEDOUT;
190 }
191 }
183 192
184 return -ETIMEDOUT; 193 return 0;
185} 194}
186 195
187void vnic_rq_clean(struct vnic_rq *rq, 196void vnic_rq_clean(struct vnic_rq *rq,
@@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq,
212 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; 221 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
213 iowrite32(fetch_index, &rq->ctrl->posted_index); 222 iowrite32(fetch_index, &rq->ctrl->posted_index);
214 223
224 /* Anytime we write fetch_index, we need to re-write 0 to rq->enable
225 * to re-sync internal VIC state.
226 */
227 iowrite32(0, &rq->ctrl->enable);
228
215 vnic_dev_clear_desc_ring(&rq->ring); 229 vnic_dev_clear_desc_ring(&rq->ring);
216} 230}
217 231
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f928e6f79c89..223f35cc034c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -669,6 +669,7 @@ static const struct of_device_id nps_enet_dt_ids[] = {
669 { .compatible = "ezchip,nps-mgt-enet" }, 669 { .compatible = "ezchip,nps-mgt-enet" },
670 { /* Sentinel */ } 670 { /* Sentinel */ }
671}; 671};
672MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
672 673
673static struct platform_driver nps_enet_driver = { 674static struct platform_driver nps_enet_driver = {
674 .probe = nps_enet_probe, 675 .probe = nps_enet_probe,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 48a033e64423..5aa9d4ded214 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1430,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1430 skb_put(skb, pkt_len - 4); 1430 skb_put(skb, pkt_len - 4);
1431 data = skb->data; 1431 data = skb->data;
1432 1432
1433 if (!is_copybreak && need_swap)
1434 swap_buffer(data, pkt_len);
1435
1433#if !defined(CONFIG_M5272) 1436#if !defined(CONFIG_M5272)
1434 if (fep->quirks & FEC_QUIRK_HAS_RACC) 1437 if (fep->quirks & FEC_QUIRK_HAS_RACC)
1435 data = skb_pull_inline(skb, 2); 1438 data = skb_pull_inline(skb, 2);
1436#endif 1439#endif
1437 1440
1438 if (!is_copybreak && need_swap)
1439 swap_buffer(data, pkt_len);
1440
1441 /* Extract the enhanced buffer descriptor */ 1441 /* Extract the enhanced buffer descriptor */
1442 ebdp = NULL; 1442 ebdp = NULL;
1443 if (fep->bufdesc_ex) 1443 if (fep->bufdesc_ex)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 8d70377f6624..8ea3d95fa483 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2751,6 +2751,7 @@ static const struct of_device_id g_dsaf_match[] = {
2751 {.compatible = "hisilicon,hns-dsaf-v2"}, 2751 {.compatible = "hisilicon,hns-dsaf-v2"},
2752 {} 2752 {}
2753}; 2753};
2754MODULE_DEVICE_TABLE(of, g_dsaf_match);
2754 2755
2755static struct platform_driver g_dsaf_driver = { 2756static struct platform_driver g_dsaf_driver = {
2756 .probe = hns_dsaf_probe, 2757 .probe = hns_dsaf_probe,
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 33f4c483af0f..501eb2090ca6 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -563,6 +563,7 @@ static const struct of_device_id hns_mdio_match[] = {
563 {.compatible = "hisilicon,hns-mdio"}, 563 {.compatible = "hisilicon,hns-mdio"},
564 {} 564 {}
565}; 565};
566MODULE_DEVICE_TABLE(of, hns_mdio_match);
566 567
567static const struct acpi_device_id hns_mdio_acpi_match[] = { 568static const struct acpi_device_id hns_mdio_acpi_match[] = {
568 { "HISI0141", 0 }, 569 { "HISI0141", 0 },
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index bfe17d9c022d..5f44c5520fbc 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1190,7 +1190,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1190 if (!scrq) 1190 if (!scrq)
1191 return NULL; 1191 return NULL;
1192 1192
1193 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2); 1193 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
1194 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 1194 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1195 if (!scrq->msgs) { 1195 if (!scrq->msgs) {
1196 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 1196 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
@@ -1461,14 +1461,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1461 return rc; 1461 return rc;
1462 1462
1463req_rx_irq_failed: 1463req_rx_irq_failed:
1464 for (j = 0; j < i; j++) 1464 for (j = 0; j < i; j++) {
1465 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 1465 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1466 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 1466 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1467 }
1467 i = adapter->req_tx_queues; 1468 i = adapter->req_tx_queues;
1468req_tx_irq_failed: 1469req_tx_irq_failed:
1469 for (j = 0; j < i; j++) 1470 for (j = 0; j < i; j++) {
1470 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 1471 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1471 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 1472 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1473 }
1472 release_sub_crqs_no_irqs(adapter); 1474 release_sub_crqs_no_irqs(adapter);
1473 return rc; 1475 return rc;
1474} 1476}
@@ -3232,6 +3234,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3232 spin_unlock_irqrestore(&adapter->inflight_lock, flags); 3234 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3233} 3235}
3234 3236
3237static void ibmvnic_xport_event(struct work_struct *work)
3238{
3239 struct ibmvnic_adapter *adapter = container_of(work,
3240 struct ibmvnic_adapter,
3241 ibmvnic_xport);
3242 struct device *dev = &adapter->vdev->dev;
3243 long rc;
3244
3245 ibmvnic_free_inflight(adapter);
3246 release_sub_crqs(adapter);
3247 if (adapter->migrated) {
3248 rc = ibmvnic_reenable_crq_queue(adapter);
3249 if (rc)
3250 dev_err(dev, "Error after enable rc=%ld\n", rc);
3251 adapter->migrated = false;
3252 rc = ibmvnic_send_crq_init(adapter);
3253 if (rc)
3254 dev_err(dev, "Error sending init rc=%ld\n", rc);
3255 }
3256}
3257
3235static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 3258static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3236 struct ibmvnic_adapter *adapter) 3259 struct ibmvnic_adapter *adapter)
3237{ 3260{
@@ -3267,15 +3290,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3267 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 3290 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3268 dev_info(dev, "Re-enabling adapter\n"); 3291 dev_info(dev, "Re-enabling adapter\n");
3269 adapter->migrated = true; 3292 adapter->migrated = true;
3270 ibmvnic_free_inflight(adapter); 3293 schedule_work(&adapter->ibmvnic_xport);
3271 release_sub_crqs(adapter);
3272 rc = ibmvnic_reenable_crq_queue(adapter);
3273 if (rc)
3274 dev_err(dev, "Error after enable rc=%ld\n", rc);
3275 adapter->migrated = false;
3276 rc = ibmvnic_send_crq_init(adapter);
3277 if (rc)
3278 dev_err(dev, "Error sending init rc=%ld\n", rc);
3279 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 3294 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3280 dev_info(dev, "Backing device failover detected\n"); 3295 dev_info(dev, "Backing device failover detected\n");
3281 netif_carrier_off(netdev); 3296 netif_carrier_off(netdev);
@@ -3284,8 +3299,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3284 /* The adapter lost the connection */ 3299 /* The adapter lost the connection */
3285 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 3300 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3286 gen_crq->cmd); 3301 gen_crq->cmd);
3287 ibmvnic_free_inflight(adapter); 3302 schedule_work(&adapter->ibmvnic_xport);
3288 release_sub_crqs(adapter);
3289 } 3303 }
3290 return; 3304 return;
3291 case IBMVNIC_CRQ_CMD_RSP: 3305 case IBMVNIC_CRQ_CMD_RSP:
@@ -3654,6 +3668,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
3654 goto task_failed; 3668 goto task_failed;
3655 3669
3656 netdev->real_num_tx_queues = adapter->req_tx_queues; 3670 netdev->real_num_tx_queues = adapter->req_tx_queues;
3671 netdev->mtu = adapter->req_mtu;
3657 3672
3658 if (adapter->failover) { 3673 if (adapter->failover) {
3659 adapter->failover = false; 3674 adapter->failover = false;
@@ -3725,6 +3740,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3725 SET_NETDEV_DEV(netdev, &dev->dev); 3740 SET_NETDEV_DEV(netdev, &dev->dev);
3726 3741
3727 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); 3742 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3743 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3728 3744
3729 spin_lock_init(&adapter->stats_lock); 3745 spin_lock_init(&adapter->stats_lock);
3730 3746
@@ -3792,6 +3808,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3792 } 3808 }
3793 3809
3794 netdev->real_num_tx_queues = adapter->req_tx_queues; 3810 netdev->real_num_tx_queues = adapter->req_tx_queues;
3811 netdev->mtu = adapter->req_mtu;
3795 3812
3796 rc = register_netdev(netdev); 3813 rc = register_netdev(netdev);
3797 if (rc) { 3814 if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index bfc84c7d0e11..dd775d951b73 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -27,7 +27,7 @@
27/**************************************************************************/ 27/**************************************************************************/
28 28
29#define IBMVNIC_NAME "ibmvnic" 29#define IBMVNIC_NAME "ibmvnic"
30#define IBMVNIC_DRIVER_VERSION "1.0" 30#define IBMVNIC_DRIVER_VERSION "1.0.1"
31#define IBMVNIC_INVALID_MAP -1 31#define IBMVNIC_INVALID_MAP -1
32#define IBMVNIC_STATS_TIMEOUT 1 32#define IBMVNIC_STATS_TIMEOUT 1
33/* basic structures plus 100 2k buffers */ 33/* basic structures plus 100 2k buffers */
@@ -1048,5 +1048,6 @@ struct ibmvnic_adapter {
1048 u8 map_id; 1048 u8 map_id;
1049 1049
1050 struct work_struct vnic_crq_init; 1050 struct work_struct vnic_crq_init;
1051 struct work_struct ibmvnic_xport;
1051 bool failover; 1052 bool failover;
1052}; 1053};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2030d7c1dc94..6d61e443bdf8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -92,6 +92,7 @@
92#define I40E_AQ_LEN 256 92#define I40E_AQ_LEN 256
93#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ 93#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
94#define I40E_MAX_USER_PRIORITY 8 94#define I40E_MAX_USER_PRIORITY 8
95#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
95#define I40E_DEFAULT_MSG_ENABLE 4 96#define I40E_DEFAULT_MSG_ENABLE 4
96#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 97#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
97#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) 98#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ac1faee2a5b8..31c97e3937a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4641,29 +4641,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4641} 4641}
4642 4642
4643/** 4643/**
4644 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4645 * @pf: PF being queried
4646 *
4647 * Return a bitmap for first enabled traffic class for this PF.
4648 **/
4649static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4650{
4651 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4652 u8 i = 0;
4653
4654 if (!enabled_tc)
4655 return 0x1; /* TC0 */
4656
4657 /* Find the first enabled TC */
4658 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4659 if (enabled_tc & BIT(i))
4660 break;
4661 }
4662
4663 return BIT(i);
4664}
4665
4666/**
4667 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4644 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4668 * @pf: PF being queried 4645 * @pf: PF being queried
4669 * 4646 *
@@ -4673,7 +4650,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4673{ 4650{
4674 /* If DCB is not enabled for this PF then just return default TC */ 4651 /* If DCB is not enabled for this PF then just return default TC */
4675 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4652 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4676 return i40e_pf_get_default_tc(pf); 4653 return I40E_DEFAULT_TRAFFIC_CLASS;
4677 4654
4678 /* SFP mode we want PF to be enabled for all TCs */ 4655 /* SFP mode we want PF to be enabled for all TCs */
4679 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4656 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4683,7 +4660,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4683 if (pf->hw.func_caps.iscsi) 4660 if (pf->hw.func_caps.iscsi)
4684 return i40e_get_iscsi_tc_map(pf); 4661 return i40e_get_iscsi_tc_map(pf);
4685 else 4662 else
4686 return i40e_pf_get_default_tc(pf); 4663 return I40E_DEFAULT_TRAFFIC_CLASS;
4687} 4664}
4688 4665
4689/** 4666/**
@@ -5029,7 +5006,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
5029 if (v == pf->lan_vsi) 5006 if (v == pf->lan_vsi)
5030 tc_map = i40e_pf_get_tc_map(pf); 5007 tc_map = i40e_pf_get_tc_map(pf);
5031 else 5008 else
5032 tc_map = i40e_pf_get_default_tc(pf); 5009 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
5033#ifdef I40E_FCOE 5010#ifdef I40E_FCOE
5034 if (pf->vsi[v]->type == I40E_VSI_FCOE) 5011 if (pf->vsi[v]->type == I40E_VSI_FCOE)
5035 tc_map = i40e_get_fcoe_tc_map(pf); 5012 tc_map = i40e_get_fcoe_tc_map(pf);
@@ -5717,7 +5694,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5717 u8 type; 5694 u8 type;
5718 5695
5719 /* Not DCB capable or capability disabled */ 5696 /* Not DCB capable or capability disabled */
5720 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 5697 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5721 return ret; 5698 return ret;
5722 5699
5723 /* Ignore if event is not for Nearest Bridge */ 5700 /* Ignore if event is not for Nearest Bridge */
@@ -7707,6 +7684,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
7707 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 7684 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7708 kfree(pf->msix_entries); 7685 kfree(pf->msix_entries);
7709 pf->msix_entries = NULL; 7686 pf->msix_entries = NULL;
7687 pci_disable_msix(pf->pdev);
7710 return -ENODEV; 7688 return -ENODEV;
7711 7689
7712 } else if (v_actual == I40E_MIN_MSIX) { 7690 } else if (v_actual == I40E_MIN_MSIX) {
@@ -9056,7 +9034,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9056 return 0; 9034 return 0;
9057 9035
9058 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 9036 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9059 nlflags, 0, 0, filter_mask, NULL); 9037 0, 0, nlflags, filter_mask, NULL);
9060} 9038}
9061 9039
9062/* Hardware supports L4 tunnel length of 128B (=2^7) which includes 9040/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a244d9a67264..bd93d823cc25 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9135,10 +9135,14 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9135 goto fwd_add_err; 9135 goto fwd_add_err;
9136 fwd_adapter->pool = pool; 9136 fwd_adapter->pool = pool;
9137 fwd_adapter->real_adapter = adapter; 9137 fwd_adapter->real_adapter = adapter;
9138 err = ixgbe_fwd_ring_up(vdev, fwd_adapter); 9138
9139 if (err) 9139 if (netif_running(pdev)) {
9140 goto fwd_add_err; 9140 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
9141 netif_tx_start_all_queues(vdev); 9141 if (err)
9142 goto fwd_add_err;
9143 netif_tx_start_all_queues(vdev);
9144 }
9145
9142 return fwd_adapter; 9146 return fwd_adapter;
9143fwd_add_err: 9147fwd_add_err:
9144 /* unwind counter and free adapter struct */ 9148 /* unwind counter and free adapter struct */
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 55831188bc32..bf5cc55ba24c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2968,6 +2968,22 @@ static void set_params(struct mv643xx_eth_private *mp,
2968 mp->txq_count = pd->tx_queue_count ? : 1; 2968 mp->txq_count = pd->tx_queue_count ? : 1;
2969} 2969}
2970 2970
2971static int get_phy_mode(struct mv643xx_eth_private *mp)
2972{
2973 struct device *dev = mp->dev->dev.parent;
2974 int iface = -1;
2975
2976 if (dev->of_node)
2977 iface = of_get_phy_mode(dev->of_node);
2978
2979 /* Historical default if unspecified. We could also read/write
2980 * the interface state in the PSC1
2981 */
2982 if (iface < 0)
2983 iface = PHY_INTERFACE_MODE_GMII;
2984 return iface;
2985}
2986
2971static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2987static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2972 int phy_addr) 2988 int phy_addr)
2973{ 2989{
@@ -2994,7 +3010,7 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2994 "orion-mdio-mii", addr); 3010 "orion-mdio-mii", addr);
2995 3011
2996 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, 3012 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
2997 PHY_INTERFACE_MODE_GMII); 3013 get_phy_mode(mp));
2998 if (!IS_ERR(phydev)) { 3014 if (!IS_ERR(phydev)) {
2999 phy_addr_set(mp, addr); 3015 phy_addr_set(mp, addr);
3000 break; 3016 break;
@@ -3090,6 +3106,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
3090 if (!dev) 3106 if (!dev)
3091 return -ENOMEM; 3107 return -ENOMEM;
3092 3108
3109 SET_NETDEV_DEV(dev, &pdev->dev);
3093 mp = netdev_priv(dev); 3110 mp = netdev_priv(dev);
3094 platform_set_drvdata(pdev, mp); 3111 platform_set_drvdata(pdev, mp);
3095 3112
@@ -3129,7 +3146,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
3129 if (pd->phy_node) { 3146 if (pd->phy_node) {
3130 mp->phy = of_phy_connect(mp->dev, pd->phy_node, 3147 mp->phy = of_phy_connect(mp->dev, pd->phy_node,
3131 mv643xx_eth_adjust_link, 0, 3148 mv643xx_eth_adjust_link, 0,
3132 PHY_INTERFACE_MODE_GMII); 3149 get_phy_mode(mp));
3133 if (!mp->phy) 3150 if (!mp->phy)
3134 err = -ENODEV; 3151 err = -ENODEV;
3135 else 3152 else
@@ -3187,8 +3204,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
3187 dev->priv_flags |= IFF_UNICAST_FLT; 3204 dev->priv_flags |= IFF_UNICAST_FLT;
3188 dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; 3205 dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
3189 3206
3190 SET_NETDEV_DEV(dev, &pdev->dev);
3191
3192 if (mp->shared->win_protect) 3207 if (mp->shared->win_protect)
3193 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 3208 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
3194 3209
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b1cef7a0f7ca..e36bebcab3f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2469,6 +2469,7 @@ err_comm_admin:
2469 kfree(priv->mfunc.master.slave_state); 2469 kfree(priv->mfunc.master.slave_state);
2470err_comm: 2470err_comm:
2471 iounmap(priv->mfunc.comm); 2471 iounmap(priv->mfunc.comm);
2472 priv->mfunc.comm = NULL;
2472err_vhcr: 2473err_vhcr:
2473 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 2474 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2474 priv->mfunc.vhcr, 2475 priv->mfunc.vhcr,
@@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2537 int slave; 2538 int slave;
2538 u32 slave_read; 2539 u32 slave_read;
2539 2540
2541 /* If the comm channel has not yet been initialized,
2542 * skip reporting the internal error event to all
2543 * the communication channels.
2544 */
2545 if (!priv->mfunc.comm)
2546 return;
2547
2540 /* Report an internal error event to all 2548 /* Report an internal error event to all
2541 * communication channels. 2549 * communication channels.
2542 */ 2550 */
@@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2571 } 2579 }
2572 2580
2573 iounmap(priv->mfunc.comm); 2581 iounmap(priv->mfunc.comm);
2582 priv->mfunc.comm = NULL;
2574} 2583}
2575 2584
2576void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) 2585void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 08fc5fc56d43..a5fc46bbcbe2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq)
245{ 245{
246 u32 freq_khz = freq * 1000; 246 u32 freq_khz = freq * 1000;
247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; 247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
248 u64 tmp_rounded =
249 roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
250 roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
248 u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? 251 u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
249 max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1; 252 max_val_cycles : tmp_rounded;
250 /* calculate max possible multiplier in order to fit in 64bit */ 253 /* calculate max possible multiplier in order to fit in 64bit */
251 u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); 254 u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
252 255
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 132cea655920..e3be7e44ff51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
127 /* For TX we use the same irq per 127 /* For TX we use the same irq per
128 ring we assigned for the RX */ 128 ring we assigned for the RX */
129 struct mlx4_en_cq *rx_cq; 129 struct mlx4_en_cq *rx_cq;
130 130 int xdp_index;
131
132 /* The xdp tx irq must align with the rx ring that forwards to
133 * it, so reindex these from 0. This should only happen when
134 * tx_ring_num is not a multiple of rx_ring_num.
135 */
136 xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
137 if (xdp_index >= 0)
138 cq_idx = xdp_index;
131 cq_idx = cq_idx % priv->rx_ring_num; 139 cq_idx = cq_idx % priv->rx_ring_num;
132 rx_cq = priv->rx_cq[cq_idx]; 140 rx_cq = priv->rx_cq[cq_idx];
133 cq->vector = rx_cq->vector; 141 cq->vector = rx_cq->vector;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e703bed7b82..12c99a2655f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1733,6 +1733,13 @@ int mlx4_en_start_port(struct net_device *dev)
1733 udp_tunnel_get_rx_info(dev); 1733 udp_tunnel_get_rx_info(dev);
1734 1734
1735 priv->port_up = true; 1735 priv->port_up = true;
1736
1737 /* Process all completions if exist to prevent
1738 * the queues freezing if they are full
1739 */
1740 for (i = 0; i < priv->rx_ring_num; i++)
1741 napi_schedule(&priv->rx_cq[i]->napi);
1742
1736 netif_tx_start_all_queues(dev); 1743 netif_tx_start_all_queues(dev);
1737 netif_device_attach(dev); 1744 netif_device_attach(dev);
1738 1745
@@ -1910,8 +1917,9 @@ static void mlx4_en_clear_stats(struct net_device *dev)
1910 struct mlx4_en_dev *mdev = priv->mdev; 1917 struct mlx4_en_dev *mdev = priv->mdev;
1911 int i; 1918 int i;
1912 1919
1913 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1920 if (!mlx4_is_slave(mdev->dev))
1914 en_dbg(HW, priv, "Failed dumping statistics\n"); 1921 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1922 en_dbg(HW, priv, "Failed dumping statistics\n");
1915 1923
1916 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1924 memset(&priv->pstats, 0, sizeof(priv->pstats));
1917 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1925 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
@@ -2194,6 +2202,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2194 2202
2195 if (!shutdown) 2203 if (!shutdown)
2196 free_netdev(dev); 2204 free_netdev(dev);
2205 dev->ethtool_ops = NULL;
2197} 2206}
2198 2207
2199static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2208static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5aa8b751f417..59473a0ebcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
166 return PTR_ERR(mailbox); 166 return PTR_ERR(mailbox);
167 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, 167 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
168 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 168 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
169 MLX4_CMD_WRAPPED); 169 MLX4_CMD_NATIVE);
170 if (err) 170 if (err)
171 goto out; 171 goto out;
172 172
@@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
322 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, 322 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
323 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, 323 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
324 0, MLX4_CMD_DUMP_ETH_STATS, 324 0, MLX4_CMD_DUMP_ETH_STATS,
325 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 325 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
326 if (err) 326 if (err)
327 goto out; 327 goto out;
328 } 328 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index b66e03d9711f..c06346a82496 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit:
118 return !loopback_ok; 118 return !loopback_ok;
119} 119}
120 120
121static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
122{
123 struct mlx4_en_dev *mdev = priv->mdev;
124 int err = 0;
125 int i = 0;
126
127 err = mlx4_test_async(mdev->dev);
128 /* When not in MSI_X or slave, test only async */
129 if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
130 return err;
131
132 /* A loop over all completion vectors of current port,
133 * for each vector check whether it works by mapping command
134 * completions to that vector and performing a NOP command
135 */
136 for (i = 0; i < priv->rx_ring_num; i++) {
137 err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
138 if (err)
139 break;
140 }
141
142 return err;
143}
121 144
122static int mlx4_en_test_link(struct mlx4_en_priv *priv) 145static int mlx4_en_test_link(struct mlx4_en_priv *priv)
123{ 146{
@@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
151void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) 174void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
152{ 175{
153 struct mlx4_en_priv *priv = netdev_priv(dev); 176 struct mlx4_en_priv *priv = netdev_priv(dev);
154 struct mlx4_en_dev *mdev = priv->mdev;
155 int i, carrier_ok; 177 int i, carrier_ok;
156 178
157 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); 179 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
177 netif_carrier_on(dev); 199 netif_carrier_on(dev);
178 200
179 } 201 }
180 buf[0] = mlx4_test_interrupts(mdev->dev); 202 buf[0] = mlx4_en_test_interrupts(priv);
181 buf[1] = mlx4_en_test_link(priv); 203 buf[1] = mlx4_en_test_link(priv);
182 buf[2] = mlx4_en_test_speed(priv); 204 buf[2] = mlx4_en_test_speed(priv);
183 205
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cf8f8a72a801..cd3638e6fe25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1361 kfree(priv->eq_table.uar_map); 1361 kfree(priv->eq_table.uar_map);
1362} 1362}
1363 1363
1364/* A test that verifies that we can accept interrupts on all 1364/* A test that verifies that we can accept interrupts
1365 * the irq vectors of the device. 1365 * on the vector allocated for asynchronous events
1366 */
1367int mlx4_test_async(struct mlx4_dev *dev)
1368{
1369 return mlx4_NOP(dev);
1370}
1371EXPORT_SYMBOL(mlx4_test_async);
1372
1373/* A test that verifies that we can accept interrupts
1374 * on the given irq vector of the tested port.
1366 * Interrupts are checked using the NOP command. 1375 * Interrupts are checked using the NOP command.
1367 */ 1376 */
1368int mlx4_test_interrupts(struct mlx4_dev *dev) 1377int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
1369{ 1378{
1370 struct mlx4_priv *priv = mlx4_priv(dev); 1379 struct mlx4_priv *priv = mlx4_priv(dev);
1371 int i;
1372 int err; 1380 int err;
1373 1381
1374 err = mlx4_NOP(dev); 1382 /* Temporary use polling for command completions */
1375 /* When not in MSI_X, there is only one irq to check */ 1383 mlx4_cmd_use_polling(dev);
1376 if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
1377 return err;
1378
1379 /* A loop over all completion vectors, for each vector we will check
1380 * whether it works by mapping command completions to that vector
1381 * and performing a NOP command
1382 */
1383 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
1384 /* Make sure request_irq was called */
1385 if (!priv->eq_table.eq[i].have_irq)
1386 continue;
1387
1388 /* Temporary use polling for command completions */
1389 mlx4_cmd_use_polling(dev);
1390
1391 /* Map the new eq to handle all asynchronous events */
1392 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1393 priv->eq_table.eq[i].eqn);
1394 if (err) {
1395 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1396 mlx4_cmd_use_events(dev);
1397 break;
1398 }
1399 1384
1400 /* Go back to using events */ 1385 /* Map the new eq to handle all asynchronous events */
1401 mlx4_cmd_use_events(dev); 1386 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1402 err = mlx4_NOP(dev); 1387 priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
1388 if (err) {
1389 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1390 goto out;
1403 } 1391 }
1404 1392
1393 /* Go back to using events */
1394 mlx4_cmd_use_events(dev);
1395 err = mlx4_NOP(dev);
1396
1405 /* Return to default */ 1397 /* Return to default */
1398 mlx4_cmd_use_polling(dev);
1399out:
1406 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1400 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1407 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 1401 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
1402 mlx4_cmd_use_events(dev);
1403
1408 return err; 1404 return err;
1409} 1405}
1410EXPORT_SYMBOL(mlx4_test_interrupts); 1406EXPORT_SYMBOL(mlx4_test_interrupt);
1411 1407
1412bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) 1408bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
1413{ 1409{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c41ab31a39f8..84bab9f0732e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -49,9 +49,9 @@ enum {
49extern void __buggy_use_of_MLX4_GET(void); 49extern void __buggy_use_of_MLX4_GET(void);
50extern void __buggy_use_of_MLX4_PUT(void); 50extern void __buggy_use_of_MLX4_PUT(void);
51 51
52static bool enable_qos = true; 52static bool enable_qos;
53module_param(enable_qos, bool, 0444); 53module_param(enable_qos, bool, 0444);
54MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); 54MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
55 55
56#define MLX4_GET(dest, source, offset) \ 56#define MLX4_GET(dest, source, offset) \
57 do { \ 57 do { \
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7183ac4135d2..6f4e67bc3538 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info,
1102 int i; 1102 int i;
1103 int err = 0; 1103 int err = 0;
1104 1104
1105 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
1106 mlx4_err(mdev,
1107 "Requested port type for port %d is not supported on this HCA\n",
1108 info->port);
1109 err = -EINVAL;
1110 goto err_sup;
1111 }
1112
1105 mlx4_stop_sense(mdev); 1113 mlx4_stop_sense(mdev);
1106 mutex_lock(&priv->port_mutex); 1114 mutex_lock(&priv->port_mutex);
1107 info->tmp_type = port_type; 1115 info->tmp_type = port_type;
@@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info,
1147out: 1155out:
1148 mlx4_start_sense(mdev); 1156 mlx4_start_sense(mdev);
1149 mutex_unlock(&priv->port_mutex); 1157 mutex_unlock(&priv->port_mutex);
1150 1158err_sup:
1151 return err; 1159 return err;
1152} 1160}
1153 1161
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e4878f31e45d..88ee7d8a5923 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -145,9 +145,10 @@ enum mlx4_resource {
145 RES_MTT, 145 RES_MTT,
146 RES_MAC, 146 RES_MAC,
147 RES_VLAN, 147 RES_VLAN,
148 RES_EQ, 148 RES_NPORT_ID,
149 RES_COUNTER, 149 RES_COUNTER,
150 RES_FS_RULE, 150 RES_FS_RULE,
151 RES_EQ,
151 MLX4_NUM_OF_RESOURCE_TYPE 152 MLX4_NUM_OF_RESOURCE_TYPE
152}; 153};
153 154
@@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1329 struct mlx4_cmd_info *cmd); 1330 struct mlx4_cmd_info *cmd);
1330int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, 1331int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
1331 int port, void *buf); 1332 int port, void *buf);
1332int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
1333 struct mlx4_cmd_mailbox *outbox);
1334int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, 1333int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1335 struct mlx4_vhcr *vhcr, 1334 struct mlx4_vhcr *vhcr,
1336 struct mlx4_cmd_mailbox *inbox, 1335 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index c5b2064297a1..b656dd5772e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1728 return err; 1728 return err;
1729} 1729}
1730 1730
1731int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
1732 u32 in_mod, struct mlx4_cmd_mailbox *outbox)
1733{
1734 return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
1735 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
1736 MLX4_CMD_NATIVE);
1737}
1738
1739int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, 1731int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1740 struct mlx4_vhcr *vhcr, 1732 struct mlx4_vhcr *vhcr,
1741 struct mlx4_cmd_mailbox *inbox, 1733 struct mlx4_cmd_mailbox *inbox,
1742 struct mlx4_cmd_mailbox *outbox, 1734 struct mlx4_cmd_mailbox *outbox,
1743 struct mlx4_cmd_info *cmd) 1735 struct mlx4_cmd_info *cmd)
1744{ 1736{
1745 if (slave != dev->caps.function) 1737 return 0;
1746 return 0;
1747 return mlx4_common_dump_eth_stats(dev, slave,
1748 vhcr->in_modifier, outbox);
1749} 1738}
1750 1739
1751int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, 1740int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 84d7857ccc27..c548beaaf910 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1605 r->com.from_state = r->com.state; 1605 r->com.from_state = r->com.state;
1606 r->com.to_state = state; 1606 r->com.to_state = state;
1607 r->com.state = RES_EQ_BUSY; 1607 r->com.state = RES_EQ_BUSY;
1608 if (eq)
1609 *eq = r;
1610 } 1608 }
1611 } 1609 }
1612 1610
1613 spin_unlock_irq(mlx4_tlock(dev)); 1611 spin_unlock_irq(mlx4_tlock(dev));
1614 1612
1613 if (!err && eq)
1614 *eq = r;
1615
1615 return err; 1616 return err;
1616} 1617}
1617 1618
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 6cb38304669f..2c6e3c7b7417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -41,6 +41,13 @@
41 41
42#include "mlx5_core.h" 42#include "mlx5_core.h"
43 43
44struct mlx5_db_pgdir {
45 struct list_head list;
46 unsigned long *bitmap;
47 __be32 *db_page;
48 dma_addr_t db_dma;
49};
50
44/* Handling for queue buffers -- we allocate a bunch of memory and 51/* Handling for queue buffers -- we allocate a bunch of memory and
45 * register it in a memory region at HCA virtual address 0. 52 * register it in a memory region at HCA virtual address 0.
46 */ 53 */
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
102static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, 109static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
103 int node) 110 int node)
104{ 111{
112 u32 db_per_page = PAGE_SIZE / cache_line_size();
105 struct mlx5_db_pgdir *pgdir; 113 struct mlx5_db_pgdir *pgdir;
106 114
107 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); 115 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
108 if (!pgdir) 116 if (!pgdir)
109 return NULL; 117 return NULL;
110 118
111 bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); 119 pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
120 sizeof(unsigned long),
121 GFP_KERNEL);
122
123 if (!pgdir->bitmap) {
124 kfree(pgdir);
125 return NULL;
126 }
127
128 bitmap_fill(pgdir->bitmap, db_per_page);
112 129
113 pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, 130 pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
114 &pgdir->db_dma, node); 131 &pgdir->db_dma, node);
115 if (!pgdir->db_page) { 132 if (!pgdir->db_page) {
133 kfree(pgdir->bitmap);
116 kfree(pgdir); 134 kfree(pgdir);
117 return NULL; 135 return NULL;
118 } 136 }
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
123static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, 141static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
124 struct mlx5_db *db) 142 struct mlx5_db *db)
125{ 143{
144 u32 db_per_page = PAGE_SIZE / cache_line_size();
126 int offset; 145 int offset;
127 int i; 146 int i;
128 147
129 i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); 148 i = find_first_bit(pgdir->bitmap, db_per_page);
130 if (i >= MLX5_DB_PER_PAGE) 149 if (i >= db_per_page)
131 return -ENOMEM; 150 return -ENOMEM;
132 151
133 __clear_bit(i, pgdir->bitmap); 152 __clear_bit(i, pgdir->bitmap);
134 153
135 db->u.pgdir = pgdir; 154 db->u.pgdir = pgdir;
136 db->index = i; 155 db->index = i;
137 offset = db->index * L1_CACHE_BYTES; 156 offset = db->index * cache_line_size();
138 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); 157 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
139 db->dma = pgdir->db_dma + offset; 158 db->dma = pgdir->db_dma + offset;
140 159
@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
181 200
182void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) 201void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
183{ 202{
203 u32 db_per_page = PAGE_SIZE / cache_line_size();
184 mutex_lock(&dev->priv.pgdir_mutex); 204 mutex_lock(&dev->priv.pgdir_mutex);
185 205
186 __set_bit(db->index, db->u.pgdir->bitmap); 206 __set_bit(db->index, db->u.pgdir->bitmap);
187 207
188 if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { 208 if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
189 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 209 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
190 db->u.pgdir->db_page, db->u.pgdir->db_dma); 210 db->u.pgdir->db_page, db->u.pgdir->db_dma);
191 list_del(&db->u.pgdir->list); 211 list_del(&db->u.pgdir->list);
212 kfree(db->u.pgdir->bitmap);
192 kfree(db->u.pgdir); 213 kfree(db->u.pgdir);
193 } 214 }
194 215
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 460363b66cb1..7a43502a89cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -85,6 +85,9 @@
85#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) 85#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
86 86
87#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 87#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
88#define MLX5E_DEFAULT_LRO_TIMEOUT 32
89#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
90
88#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 91#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
89#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 92#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
90#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 93#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
@@ -221,6 +224,7 @@ struct mlx5e_params {
221 struct ieee_ets ets; 224 struct ieee_ets ets;
222#endif 225#endif
223 bool rx_am_enabled; 226 bool rx_am_enabled;
227 u32 lro_timeout;
224}; 228};
225 229
226struct mlx5e_tstamp { 230struct mlx5e_tstamp {
@@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
888void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); 892void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
889struct rtnl_link_stats64 * 893struct rtnl_link_stats64 *
890mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); 894mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
895u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
891 896
892#endif /* __MLX5_EN_H__ */ 897#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7eaf38020a8f..f4c687ce4c59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1971,9 +1971,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1971 MLX5_SET(tirc, tirc, lro_max_ip_payload_size, 1971 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1972 (priv->params.lro_wqe_sz - 1972 (priv->params.lro_wqe_sz -
1973 ROUGH_MAX_L2_L3_HDR_SZ) >> 8); 1973 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1974 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, 1974 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
1975 MLX5_CAP_ETH(priv->mdev,
1976 lro_timer_supported_periods[2]));
1977} 1975}
1978 1976
1979void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) 1977void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
@@ -3401,6 +3399,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
3401 } 3399 }
3402} 3400}
3403 3401
3402u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
3403{
3404 int i;
3405
3406 /* The supported periods are organized in ascending order */
3407 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
3408 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
3409 break;
3410
3411 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
3412}
3413
3404static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, 3414static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3405 struct net_device *netdev, 3415 struct net_device *netdev,
3406 const struct mlx5e_profile *profile, 3416 const struct mlx5e_profile *profile,
@@ -3419,6 +3429,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3419 priv->profile = profile; 3429 priv->profile = profile;
3420 priv->ppriv = ppriv; 3430 priv->ppriv = ppriv;
3421 3431
3432 priv->params.lro_timeout =
3433 mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
3434
3422 priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 3435 priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3423 3436
3424 /* set CQE compression */ 3437 /* set CQE compression */
@@ -4035,7 +4048,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
4035 const struct mlx5e_profile *profile = priv->profile; 4048 const struct mlx5e_profile *profile = priv->profile;
4036 struct net_device *netdev = priv->netdev; 4049 struct net_device *netdev = priv->netdev;
4037 4050
4038 unregister_netdev(netdev);
4039 destroy_workqueue(priv->wq); 4051 destroy_workqueue(priv->wq);
4040 if (profile->cleanup) 4052 if (profile->cleanup)
4041 profile->cleanup(priv); 4053 profile->cleanup(priv);
@@ -4052,6 +4064,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4052 for (vport = 1; vport < total_vfs; vport++) 4064 for (vport = 1; vport < total_vfs; vport++)
4053 mlx5_eswitch_unregister_vport_rep(esw, vport); 4065 mlx5_eswitch_unregister_vport_rep(esw, vport);
4054 4066
4067 unregister_netdev(priv->netdev);
4055 mlx5e_detach(mdev, vpriv); 4068 mlx5e_detach(mdev, vpriv);
4056 mlx5e_destroy_netdev(mdev, priv); 4069 mlx5e_destroy_netdev(mdev, priv);
4057} 4070}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3c97da103d30..7fe6559e4ab3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
457 struct mlx5e_priv *priv = rep->priv_data; 457 struct mlx5e_priv *priv = rep->priv_data;
458 struct net_device *netdev = priv->netdev; 458 struct net_device *netdev = priv->netdev;
459 459
460 unregister_netdev(netdev);
460 mlx5e_detach_netdev(esw->dev, netdev); 461 mlx5e_detach_netdev(esw->dev, netdev);
461 mlx5e_destroy_netdev(esw->dev, priv); 462 mlx5e_destroy_netdev(esw->dev, priv);
462} 463}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index abbf2c369923..be1f7333ab7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work)
931 mutex_unlock(&esw->state_lock); 931 mutex_unlock(&esw->state_lock);
932} 932}
933 933
934static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, 934static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
935 struct mlx5_vport *vport) 935 struct mlx5_vport *vport)
936{ 936{
937 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 937 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
938 struct mlx5_flow_group *vlan_grp = NULL; 938 struct mlx5_flow_group *vlan_grp = NULL;
@@ -949,9 +949,11 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
949 int table_size = 2; 949 int table_size = 2;
950 int err = 0; 950 int err = 0;
951 951
952 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) || 952 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
953 !IS_ERR_OR_NULL(vport->egress.acl)) 953 return -EOPNOTSUPP;
954 return; 954
955 if (!IS_ERR_OR_NULL(vport->egress.acl))
956 return 0;
955 957
956 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", 958 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
957 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); 959 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
@@ -959,12 +961,12 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
959 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); 961 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
960 if (!root_ns) { 962 if (!root_ns) {
961 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); 963 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
962 return; 964 return -EIO;
963 } 965 }
964 966
965 flow_group_in = mlx5_vzalloc(inlen); 967 flow_group_in = mlx5_vzalloc(inlen);
966 if (!flow_group_in) 968 if (!flow_group_in)
967 return; 969 return -ENOMEM;
968 970
969 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); 971 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
970 if (IS_ERR(acl)) { 972 if (IS_ERR(acl)) {
@@ -1009,6 +1011,7 @@ out:
1009 mlx5_destroy_flow_group(vlan_grp); 1011 mlx5_destroy_flow_group(vlan_grp);
1010 if (err && !IS_ERR_OR_NULL(acl)) 1012 if (err && !IS_ERR_OR_NULL(acl))
1011 mlx5_destroy_flow_table(acl); 1013 mlx5_destroy_flow_table(acl);
1014 return err;
1012} 1015}
1013 1016
1014static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, 1017static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
@@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1041 vport->egress.acl = NULL; 1044 vport->egress.acl = NULL;
1042} 1045}
1043 1046
1044static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, 1047static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1045 struct mlx5_vport *vport) 1048 struct mlx5_vport *vport)
1046{ 1049{
1047 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1050 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1048 struct mlx5_core_dev *dev = esw->dev; 1051 struct mlx5_core_dev *dev = esw->dev;
@@ -1063,9 +1066,11 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1063 int table_size = 4; 1066 int table_size = 4;
1064 int err = 0; 1067 int err = 0;
1065 1068
1066 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) || 1069 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1067 !IS_ERR_OR_NULL(vport->ingress.acl)) 1070 return -EOPNOTSUPP;
1068 return; 1071
1072 if (!IS_ERR_OR_NULL(vport->ingress.acl))
1073 return 0;
1069 1074
1070 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", 1075 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1071 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); 1076 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
@@ -1073,12 +1078,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1073 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); 1078 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1074 if (!root_ns) { 1079 if (!root_ns) {
1075 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); 1080 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1076 return; 1081 return -EIO;
1077 } 1082 }
1078 1083
1079 flow_group_in = mlx5_vzalloc(inlen); 1084 flow_group_in = mlx5_vzalloc(inlen);
1080 if (!flow_group_in) 1085 if (!flow_group_in)
1081 return; 1086 return -ENOMEM;
1082 1087
1083 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); 1088 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1084 if (IS_ERR(acl)) { 1089 if (IS_ERR(acl)) {
@@ -1167,6 +1172,7 @@ out:
1167 } 1172 }
1168 1173
1169 kvfree(flow_group_in); 1174 kvfree(flow_group_in);
1175 return err;
1170} 1176}
1171 1177
1172static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, 1178static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1225 return 0; 1231 return 0;
1226 } 1232 }
1227 1233
1228 esw_vport_enable_ingress_acl(esw, vport); 1234 err = esw_vport_enable_ingress_acl(esw, vport);
1235 if (err) {
1236 mlx5_core_warn(esw->dev,
1237 "failed to enable ingress acl (%d) on vport[%d]\n",
1238 err, vport->vport);
1239 return err;
1240 }
1229 1241
1230 esw_debug(esw->dev, 1242 esw_debug(esw->dev,
1231 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", 1243 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
@@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1299 return 0; 1311 return 0;
1300 } 1312 }
1301 1313
1302 esw_vport_enable_egress_acl(esw, vport); 1314 err = esw_vport_enable_egress_acl(esw, vport);
1315 if (err) {
1316 mlx5_core_warn(esw->dev,
1317 "failed to enable egress acl (%d) on vport[%d]\n",
1318 err, vport->vport);
1319 return err;
1320 }
1303 1321
1304 esw_debug(esw->dev, 1322 esw_debug(esw->dev,
1305 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", 1323 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5da2cc878582..89696048b045 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node)
436 fs_get_obj(ft, fg->node.parent); 436 fs_get_obj(ft, fg->node.parent);
437 dev = get_dev(&ft->node); 437 dev = get_dev(&ft->node);
438 438
439 if (ft->autogroup.active)
440 ft->autogroup.num_groups--;
441
439 if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) 442 if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
440 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", 443 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
441 fg->id, ft->id); 444 fg->id, ft->id);
@@ -879,7 +882,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *
879 tree_init_node(&fg->node, !is_auto_fg, del_flow_group); 882 tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
880 tree_add_node(&fg->node, &ft->node); 883 tree_add_node(&fg->node, &ft->node);
881 /* Add node to group list */ 884 /* Add node to group list */
882 list_add(&fg->node.list, ft->node.children.prev); 885 list_add(&fg->node.list, prev_fg);
883 886
884 return fg; 887 return fg;
885} 888}
@@ -893,7 +896,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
893 return ERR_PTR(-EPERM); 896 return ERR_PTR(-EPERM);
894 897
895 lock_ref_node(&ft->node); 898 lock_ref_node(&ft->node);
896 fg = create_flow_group_common(ft, fg_in, &ft->node.children, false); 899 fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
897 unlock_ref_node(&ft->node); 900 unlock_ref_node(&ft->node);
898 901
899 return fg; 902 return fg;
@@ -1012,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1012 u32 *match_criteria) 1015 u32 *match_criteria)
1013{ 1016{
1014 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1017 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1015 struct list_head *prev = &ft->node.children; 1018 struct list_head *prev = ft->node.children.prev;
1016 unsigned int candidate_index = 0; 1019 unsigned int candidate_index = 0;
1017 struct mlx5_flow_group *fg; 1020 struct mlx5_flow_group *fg;
1018 void *match_criteria_addr; 1021 void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3a9195b4169d..3b026c151cf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
218 goto err_out; 218 goto err_out;
219 219
220 if (aging) { 220 if (aging) {
221 counter->cache.lastuse = jiffies;
221 counter->aging = true; 222 counter->aging = true;
222 223
223 spin_lock(&fc_stats->addlist_lock); 224 spin_lock(&fc_stats->addlist_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a05fb965c8d..5bcf93422ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -61,10 +61,15 @@ enum {
61enum { 61enum {
62 MLX5_NIC_IFC_FULL = 0, 62 MLX5_NIC_IFC_FULL = 0,
63 MLX5_NIC_IFC_DISABLED = 1, 63 MLX5_NIC_IFC_DISABLED = 1,
64 MLX5_NIC_IFC_NO_DRAM_NIC = 2 64 MLX5_NIC_IFC_NO_DRAM_NIC = 2,
65 MLX5_NIC_IFC_INVALID = 3
65}; 66};
66 67
67static u8 get_nic_interface(struct mlx5_core_dev *dev) 68enum {
69 MLX5_DROP_NEW_HEALTH_WORK,
70};
71
72static u8 get_nic_state(struct mlx5_core_dev *dev)
68{ 73{
69 return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; 74 return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
70} 75}
@@ -97,7 +102,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
97 struct mlx5_core_health *health = &dev->priv.health; 102 struct mlx5_core_health *health = &dev->priv.health;
98 struct health_buffer __iomem *h = health->health; 103 struct health_buffer __iomem *h = health->health;
99 104
100 if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED) 105 if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
101 return 1; 106 return 1;
102 107
103 if (ioread32be(&h->fw_ver) == 0xffffffff) 108 if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -127,7 +132,7 @@ unlock:
127 132
128static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) 133static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
129{ 134{
130 u8 nic_interface = get_nic_interface(dev); 135 u8 nic_interface = get_nic_state(dev);
131 136
132 switch (nic_interface) { 137 switch (nic_interface) {
133 case MLX5_NIC_IFC_FULL: 138 case MLX5_NIC_IFC_FULL:
@@ -149,8 +154,34 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
149 mlx5_disable_device(dev); 154 mlx5_disable_device(dev);
150} 155}
151 156
157static void health_recover(struct work_struct *work)
158{
159 struct mlx5_core_health *health;
160 struct delayed_work *dwork;
161 struct mlx5_core_dev *dev;
162 struct mlx5_priv *priv;
163 u8 nic_state;
164
165 dwork = container_of(work, struct delayed_work, work);
166 health = container_of(dwork, struct mlx5_core_health, recover_work);
167 priv = container_of(health, struct mlx5_priv, health);
168 dev = container_of(priv, struct mlx5_core_dev, priv);
169
170 nic_state = get_nic_state(dev);
171 if (nic_state == MLX5_NIC_IFC_INVALID) {
172 dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
173 return;
174 }
175
176 dev_err(&dev->pdev->dev, "starting health recovery flow\n");
177 mlx5_recover_device(dev);
178}
179
180/* How much time to wait until health resetting the driver (in msecs) */
181#define MLX5_RECOVERY_DELAY_MSECS 60000
152static void health_care(struct work_struct *work) 182static void health_care(struct work_struct *work)
153{ 183{
184 unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
154 struct mlx5_core_health *health; 185 struct mlx5_core_health *health;
155 struct mlx5_core_dev *dev; 186 struct mlx5_core_dev *dev;
156 struct mlx5_priv *priv; 187 struct mlx5_priv *priv;
@@ -160,6 +191,14 @@ static void health_care(struct work_struct *work)
160 dev = container_of(priv, struct mlx5_core_dev, priv); 191 dev = container_of(priv, struct mlx5_core_dev, priv);
161 mlx5_core_warn(dev, "handling bad device here\n"); 192 mlx5_core_warn(dev, "handling bad device here\n");
162 mlx5_handle_bad_state(dev); 193 mlx5_handle_bad_state(dev);
194
195 spin_lock(&health->wq_lock);
196 if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
197 schedule_delayed_work(&health->recover_work, recover_delay);
198 else
199 dev_err(&dev->pdev->dev,
200 "new health works are not permitted at this stage\n");
201 spin_unlock(&health->wq_lock);
163} 202}
164 203
165static const char *hsynd_str(u8 synd) 204static const char *hsynd_str(u8 synd)
@@ -272,7 +311,13 @@ static void poll_health(unsigned long data)
272 if (in_fatal(dev) && !health->sick) { 311 if (in_fatal(dev) && !health->sick) {
273 health->sick = true; 312 health->sick = true;
274 print_health_info(dev); 313 print_health_info(dev);
275 schedule_work(&health->work); 314 spin_lock(&health->wq_lock);
315 if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
316 queue_work(health->wq, &health->work);
317 else
318 dev_err(&dev->pdev->dev,
319 "new health works are not permitted at this stage\n");
320 spin_unlock(&health->wq_lock);
276 } 321 }
277} 322}
278 323
@@ -281,6 +326,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
281 struct mlx5_core_health *health = &dev->priv.health; 326 struct mlx5_core_health *health = &dev->priv.health;
282 327
283 init_timer(&health->timer); 328 init_timer(&health->timer);
329 health->sick = 0;
330 clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
284 health->health = &dev->iseg->health; 331 health->health = &dev->iseg->health;
285 health->health_counter = &dev->iseg->health_counter; 332 health->health_counter = &dev->iseg->health_counter;
286 333
@@ -297,11 +344,22 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
297 del_timer_sync(&health->timer); 344 del_timer_sync(&health->timer);
298} 345}
299 346
347void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
348{
349 struct mlx5_core_health *health = &dev->priv.health;
350
351 spin_lock(&health->wq_lock);
352 set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
353 spin_unlock(&health->wq_lock);
354 cancel_delayed_work_sync(&health->recover_work);
355 cancel_work_sync(&health->work);
356}
357
300void mlx5_health_cleanup(struct mlx5_core_dev *dev) 358void mlx5_health_cleanup(struct mlx5_core_dev *dev)
301{ 359{
302 struct mlx5_core_health *health = &dev->priv.health; 360 struct mlx5_core_health *health = &dev->priv.health;
303 361
304 flush_work(&health->work); 362 destroy_workqueue(health->wq);
305} 363}
306 364
307int mlx5_health_init(struct mlx5_core_dev *dev) 365int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -316,9 +374,13 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
316 374
317 strcpy(name, "mlx5_health"); 375 strcpy(name, "mlx5_health");
318 strcat(name, dev_name(&dev->pdev->dev)); 376 strcat(name, dev_name(&dev->pdev->dev));
377 health->wq = create_singlethread_workqueue(name);
319 kfree(name); 378 kfree(name);
320 379 if (!health->wq)
380 return -ENOMEM;
381 spin_lock_init(&health->wq_lock);
321 INIT_WORK(&health->work, health_care); 382 INIT_WORK(&health->work, health_care);
383 INIT_DELAYED_WORK(&health->recover_work, health_recover);
322 384
323 return 0; 385 return 0;
324} 386}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d9c3c70b29e4..d5433c49b2b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -844,12 +844,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
844 struct pci_dev *pdev = dev->pdev; 844 struct pci_dev *pdev = dev->pdev;
845 int err; 845 int err;
846 846
847 err = mlx5_query_hca_caps(dev);
848 if (err) {
849 dev_err(&pdev->dev, "query hca failed\n");
850 goto out;
851 }
852
853 err = mlx5_query_board_id(dev); 847 err = mlx5_query_board_id(dev);
854 if (err) { 848 if (err) {
855 dev_err(&pdev->dev, "query board id failed\n"); 849 dev_err(&pdev->dev, "query board id failed\n");
@@ -1023,6 +1017,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1023 1017
1024 mlx5_start_health_poll(dev); 1018 mlx5_start_health_poll(dev);
1025 1019
1020 err = mlx5_query_hca_caps(dev);
1021 if (err) {
1022 dev_err(&pdev->dev, "query hca failed\n");
1023 goto err_stop_poll;
1024 }
1025
1026 if (boot && mlx5_init_once(dev, priv)) { 1026 if (boot && mlx5_init_once(dev, priv)) {
1027 dev_err(&pdev->dev, "sw objs init failed\n"); 1027 dev_err(&pdev->dev, "sw objs init failed\n");
1028 goto err_stop_poll; 1028 goto err_stop_poll;
@@ -1313,10 +1313,16 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1313 struct mlx5_priv *priv = &dev->priv; 1313 struct mlx5_priv *priv = &dev->priv;
1314 1314
1315 dev_info(&pdev->dev, "%s was called\n", __func__); 1315 dev_info(&pdev->dev, "%s was called\n", __func__);
1316
1316 mlx5_enter_error_state(dev); 1317 mlx5_enter_error_state(dev);
1317 mlx5_unload_one(dev, priv, false); 1318 mlx5_unload_one(dev, priv, false);
1318 pci_save_state(pdev); 1319 /* In case of kernel call save the pci state and drain health wq */
1319 mlx5_pci_disable_device(dev); 1320 if (state) {
1321 pci_save_state(pdev);
1322 mlx5_drain_health_wq(dev);
1323 mlx5_pci_disable_device(dev);
1324 }
1325
1320 return state == pci_channel_io_perm_failure ? 1326 return state == pci_channel_io_perm_failure ?
1321 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1327 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1322} 1328}
@@ -1373,11 +1379,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1373 return PCI_ERS_RESULT_RECOVERED; 1379 return PCI_ERS_RESULT_RECOVERED;
1374} 1380}
1375 1381
1376void mlx5_disable_device(struct mlx5_core_dev *dev)
1377{
1378 mlx5_pci_err_detected(dev->pdev, 0);
1379}
1380
1381static void mlx5_pci_resume(struct pci_dev *pdev) 1382static void mlx5_pci_resume(struct pci_dev *pdev)
1382{ 1383{
1383 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1384 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -1427,6 +1428,18 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
1427 1428
1428MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 1429MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1429 1430
1431void mlx5_disable_device(struct mlx5_core_dev *dev)
1432{
1433 mlx5_pci_err_detected(dev->pdev, 0);
1434}
1435
1436void mlx5_recover_device(struct mlx5_core_dev *dev)
1437{
1438 mlx5_pci_disable_device(dev);
1439 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1440 mlx5_pci_resume(dev->pdev);
1441}
1442
1430static struct pci_driver mlx5_core_driver = { 1443static struct pci_driver mlx5_core_driver = {
1431 .name = DRIVER_NAME, 1444 .name = DRIVER_NAME,
1432 .id_table = mlx5_core_pci_table, 1445 .id_table = mlx5_core_pci_table,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 3d0cfb9f18f9..187662c8ea96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -83,6 +83,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
83 unsigned long param); 83 unsigned long param);
84void mlx5_enter_error_state(struct mlx5_core_dev *dev); 84void mlx5_enter_error_state(struct mlx5_core_dev *dev);
85void mlx5_disable_device(struct mlx5_core_dev *dev); 85void mlx5_disable_device(struct mlx5_core_dev *dev);
86void mlx5_recover_device(struct mlx5_core_dev *dev);
86int mlx5_sriov_init(struct mlx5_core_dev *dev); 87int mlx5_sriov_init(struct mlx5_core_dev *dev);
87void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); 88void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
88int mlx5_sriov_attach(struct mlx5_core_dev *dev); 89int mlx5_sriov_attach(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index cc4fd61914d3..a57d5a81eb05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -209,6 +209,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
209static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) 209static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
210{ 210{
211 struct page *page; 211 struct page *page;
212 u64 zero_addr = 1;
212 u64 addr; 213 u64 addr;
213 int err; 214 int err;
214 int nid = dev_to_node(&dev->pdev->dev); 215 int nid = dev_to_node(&dev->pdev->dev);
@@ -218,26 +219,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
218 mlx5_core_warn(dev, "failed to allocate page\n"); 219 mlx5_core_warn(dev, "failed to allocate page\n");
219 return -ENOMEM; 220 return -ENOMEM;
220 } 221 }
222map:
221 addr = dma_map_page(&dev->pdev->dev, page, 0, 223 addr = dma_map_page(&dev->pdev->dev, page, 0,
222 PAGE_SIZE, DMA_BIDIRECTIONAL); 224 PAGE_SIZE, DMA_BIDIRECTIONAL);
223 if (dma_mapping_error(&dev->pdev->dev, addr)) { 225 if (dma_mapping_error(&dev->pdev->dev, addr)) {
224 mlx5_core_warn(dev, "failed dma mapping page\n"); 226 mlx5_core_warn(dev, "failed dma mapping page\n");
225 err = -ENOMEM; 227 err = -ENOMEM;
226 goto out_alloc; 228 goto err_mapping;
227 } 229 }
230
231 /* Firmware doesn't support page with physical address 0 */
232 if (addr == 0) {
233 zero_addr = addr;
234 goto map;
235 }
236
228 err = insert_page(dev, addr, page, func_id); 237 err = insert_page(dev, addr, page, func_id);
229 if (err) { 238 if (err) {
230 mlx5_core_err(dev, "failed to track allocated page\n"); 239 mlx5_core_err(dev, "failed to track allocated page\n");
231 goto out_mapping; 240 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
241 DMA_BIDIRECTIONAL);
232 } 242 }
233 243
234 return 0; 244err_mapping:
235 245 if (err)
236out_mapping: 246 __free_page(page);
237 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
238 247
239out_alloc: 248 if (zero_addr == 0)
240 __free_page(page); 249 dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
250 DMA_BIDIRECTIONAL);
241 251
242 return err; 252 return err;
243} 253}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index e742bd4e8894..912f71f84209 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1838,11 +1838,17 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
1838 .cmd_exec = mlxsw_pci_cmd_exec, 1838 .cmd_exec = mlxsw_pci_cmd_exec,
1839}; 1839};
1840 1840
1841static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) 1841static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1842 const struct pci_device_id *id)
1842{ 1843{
1843 unsigned long end; 1844 unsigned long end;
1844 1845
1845 mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); 1846 mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
1847 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1848 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1849 return 0;
1850 }
1851
1846 wmb(); /* reset needs to be written before we read control register */ 1852 wmb(); /* reset needs to be written before we read control register */
1847 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1853 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1848 do { 1854 do {
@@ -1909,7 +1915,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1909 mlxsw_pci->pdev = pdev; 1915 mlxsw_pci->pdev = pdev;
1910 pci_set_drvdata(pdev, mlxsw_pci); 1916 pci_set_drvdata(pdev, mlxsw_pci);
1911 1917
1912 err = mlxsw_pci_sw_reset(mlxsw_pci); 1918 err = mlxsw_pci_sw_reset(mlxsw_pci, id);
1913 if (err) { 1919 if (err) {
1914 dev_err(&pdev->dev, "Software reset failed\n"); 1920 dev_err(&pdev->dev, "Software reset failed\n");
1915 goto err_sw_reset; 1921 goto err_sw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 78fc557d6dd7..4573da2c5560 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -320,6 +320,8 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
320 lpm_tree); 320 lpm_tree);
321 if (err) 321 if (err)
322 goto err_left_struct_set; 322 goto err_left_struct_set;
323 memcpy(&lpm_tree->prefix_usage, prefix_usage,
324 sizeof(lpm_tree->prefix_usage));
323 return lpm_tree; 325 return lpm_tree;
324 326
325err_left_struct_set: 327err_left_struct_set:
@@ -343,7 +345,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
343 345
344 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { 346 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
345 lpm_tree = &mlxsw_sp->router.lpm_trees[i]; 347 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
346 if (lpm_tree->proto == proto && 348 if (lpm_tree->ref_count != 0 &&
349 lpm_tree->proto == proto &&
347 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, 350 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
348 prefix_usage)) 351 prefix_usage))
349 goto inc_ref_count; 352 goto inc_ref_count;
@@ -1820,19 +1823,17 @@ err_fib_entry_insert:
1820 return err; 1823 return err;
1821} 1824}
1822 1825
1823static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, 1826static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
1824 struct fib_entry_notifier_info *fen_info) 1827 struct fib_entry_notifier_info *fen_info)
1825{ 1828{
1826 struct mlxsw_sp_fib_entry *fib_entry; 1829 struct mlxsw_sp_fib_entry *fib_entry;
1827 1830
1828 if (mlxsw_sp->router.aborted) 1831 if (mlxsw_sp->router.aborted)
1829 return 0; 1832 return;
1830 1833
1831 fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info); 1834 fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
1832 if (!fib_entry) { 1835 if (!fib_entry)
1833 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); 1836 return;
1834 return -ENOENT;
1835 }
1836 1837
1837 if (fib_entry->ref_count == 1) { 1838 if (fib_entry->ref_count == 1) {
1838 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); 1839 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
@@ -1840,7 +1841,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
1840 } 1841 }
1841 1842
1842 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); 1843 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1843 return 0;
1844} 1844}
1845 1845
1846static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) 1846static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -1862,7 +1862,8 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
1862 if (err) 1862 if (err)
1863 return err; 1863 return err;
1864 1864
1865 mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0); 1865 mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
1866 MLXSW_SP_LPM_TREE_MIN);
1866 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); 1867 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
1867 if (err) 1868 if (err)
1868 return err; 1869 return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index c0c23e2f3275..92bda8703f87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1088,6 +1088,7 @@ err_port_stp_state_set:
1088err_port_admin_status_set: 1088err_port_admin_status_set:
1089err_port_mtu_set: 1089err_port_mtu_set:
1090err_port_speed_set: 1090err_port_speed_set:
1091 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1091err_port_swid_set: 1092err_port_swid_set:
1092err_port_system_port_mapping_set: 1093err_port_system_port_mapping_set:
1093port_not_usable: 1094port_not_usable:
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 1e8339a67f6e..32f2a45f4ab2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -107,4 +107,7 @@ config QEDE
107 ---help--- 107 ---help---
108 This enables the support for ... 108 This enables the support for ...
109 109
110config QED_RDMA
111 bool
112
110endif # NET_VENDOR_QLOGIC 113endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index cda0af7fbc20..967acf322c09 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
5 qed_selftest.o qed_dcbx.o qed_debug.o 5 qed_selftest.o qed_dcbx.o qed_debug.o
6qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o 6qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
7qed-$(CONFIG_QED_LL2) += qed_ll2.o 7qed-$(CONFIG_QED_LL2) += qed_ll2.o
8qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o 8qed-$(CONFIG_QED_RDMA) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 82370a1a59ad..0c42c240b5cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,13 +47,8 @@
47#define TM_ALIGN BIT(TM_SHIFT) 47#define TM_ALIGN BIT(TM_SHIFT)
48#define TM_ELEM_SIZE 4 48#define TM_ELEM_SIZE 4
49 49
50/* ILT constants */
51#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
52/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ 50/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
53#define ILT_DEFAULT_HW_P_SIZE 4 51#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
54#else
55#define ILT_DEFAULT_HW_P_SIZE 3
56#endif
57 52
58#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) 53#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
59#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 54#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
349 return NULL; 344 return NULL;
350} 345}
351 346
352void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) 347static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
353{ 348{
354 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; 349 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
355 350
356 p_mgr->srq_count = num_srqs; 351 p_mgr->srq_count = num_srqs;
357} 352}
358 353
359u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) 354static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
360{ 355{
361 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; 356 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
362 357
@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
1804 return 0; 1799 return 0;
1805} 1800}
1806 1801
1807void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, 1802static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
1808 struct qed_rdma_pf_params *p_params) 1803 struct qed_rdma_pf_params *p_params)
1809{ 1804{
1810 u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs; 1805 u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
1811 enum protocol_type proto; 1806 enum protocol_type proto;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 130da1c0490b..a4789a93b692 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
1190 if (!dcbx_info) 1190 if (!dcbx_info)
1191 return -ENOMEM; 1191 return -ENOMEM;
1192 1192
1193 memset(dcbx_info, 0, sizeof(*dcbx_info));
1193 rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); 1194 rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
1194 if (rc) { 1195 if (rc) {
1195 kfree(dcbx_info); 1196 kfree(dcbx_info);
@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
1225 if (!dcbx_info) 1226 if (!dcbx_info)
1226 return NULL; 1227 return NULL;
1227 1228
1229 memset(dcbx_info, 0, sizeof(*dcbx_info));
1228 if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { 1230 if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
1229 kfree(dcbx_info); 1231 kfree(dcbx_info);
1230 return NULL; 1232 return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 88e7d5bef909..68f19ca57f96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -405,7 +405,7 @@ struct phy_defs {
405/***************************** Constant Arrays *******************************/ 405/***************************** Constant Arrays *******************************/
406 406
407/* Debug arrays */ 407/* Debug arrays */
408static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} }; 408static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
409 409
410/* Chip constant definitions array */ 410/* Chip constant definitions array */
411static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { 411static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4028} 4028}
4029 4029
4030/* Dump MCP Trace */ 4030/* Dump MCP Trace */
4031enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, 4031static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4032 struct qed_ptt *p_ptt, 4032 struct qed_ptt *p_ptt,
4033 u32 *dump_buf, 4033 u32 *dump_buf,
4034 bool dump, u32 *num_dumped_dwords) 4034 bool dump, u32 *num_dumped_dwords)
4035{ 4035{
4036 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; 4036 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4037 u32 trace_meta_size_dwords, running_bundle_id, offset = 0; 4037 u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4130} 4130}
4131 4131
4132/* Dump GRC FIFO */ 4132/* Dump GRC FIFO */
4133enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, 4133static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4134 struct qed_ptt *p_ptt, 4134 struct qed_ptt *p_ptt,
4135 u32 *dump_buf, 4135 u32 *dump_buf,
4136 bool dump, u32 *num_dumped_dwords) 4136 bool dump, u32 *num_dumped_dwords)
4137{ 4137{
4138 u32 offset = 0, dwords_read, size_param_offset; 4138 u32 offset = 0, dwords_read, size_param_offset;
4139 bool fifo_has_data; 4139 bool fifo_has_data;
@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4192} 4192}
4193 4193
4194/* Dump IGU FIFO */ 4194/* Dump IGU FIFO */
4195enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, 4195static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4196 struct qed_ptt *p_ptt, 4196 struct qed_ptt *p_ptt,
4197 u32 *dump_buf, 4197 u32 *dump_buf,
4198 bool dump, u32 *num_dumped_dwords) 4198 bool dump, u32 *num_dumped_dwords)
4199{ 4199{
4200 u32 offset = 0, dwords_read, size_param_offset; 4200 u32 offset = 0, dwords_read, size_param_offset;
4201 bool fifo_has_data; 4201 bool fifo_has_data;
@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4255} 4255}
4256 4256
4257/* Protection Override dump */ 4257/* Protection Override dump */
4258enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, 4258static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4259 struct qed_ptt *p_ptt, 4259 struct qed_ptt *p_ptt,
4260 u32 *dump_buf, 4260 u32 *dump_buf,
4261 bool dump, u32 *num_dumped_dwords) 4261 bool dump,
4262 u32 *num_dumped_dwords)
4262{ 4263{
4263 u32 offset = 0, size_param_offset, override_window_dwords; 4264 u32 offset = 0, size_param_offset, override_window_dwords;
4264 4265
@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
6339} 6340}
6340 6341
6341/* Wrapper for unifying the idle_chk and mcp_trace api */ 6342/* Wrapper for unifying the idle_chk and mcp_trace api */
6342enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, 6343static enum dbg_status
6343 u32 *dump_buf, 6344qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
6344 u32 num_dumped_dwords, 6345 u32 *dump_buf,
6345 char *results_buf) 6346 u32 num_dumped_dwords,
6347 char *results_buf)
6346{ 6348{
6347 u32 num_errors, num_warnnings; 6349 u32 num_errors, num_warnnings;
6348 6350
@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
6413 6415
6414#define QED_RESULTS_BUF_MIN_SIZE 16 6416#define QED_RESULTS_BUF_MIN_SIZE 16
6415/* Generic function for decoding debug feature info */ 6417/* Generic function for decoding debug feature info */
6416enum dbg_status format_feature(struct qed_hwfn *p_hwfn, 6418static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
6417 enum qed_dbg_features feature_idx) 6419 enum qed_dbg_features feature_idx)
6418{ 6420{
6419 struct qed_dbg_feature *feature = 6421 struct qed_dbg_feature *feature =
6420 &p_hwfn->cdev->dbg_params.features[feature_idx]; 6422 &p_hwfn->cdev->dbg_params.features[feature_idx];
@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
6480} 6482}
6481 6483
6482/* Generic function for performing the dump of a debug feature. */ 6484/* Generic function for performing the dump of a debug feature. */
6483enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 6485static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
6484 enum qed_dbg_features feature_idx) 6486 struct qed_ptt *p_ptt,
6487 enum qed_dbg_features feature_idx)
6485{ 6488{
6486 struct qed_dbg_feature *feature = 6489 struct qed_dbg_feature *feature =
6487 &p_hwfn->cdev->dbg_params.features[feature_idx]; 6490 &p_hwfn->cdev->dbg_params.features[feature_idx];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 754f6a908858..edae5fc5fccd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
497 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 497 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
498 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, 498 num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
499 PROTOCOLID_ROCE, 499 PROTOCOLID_ROCE,
500 0) * 2; 500 NULL) * 2;
501 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; 501 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
502 } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { 502 } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
503 num_cons = 503 num_cons =
504 qed_cxt_get_proto_cid_count(p_hwfn, 504 qed_cxt_get_proto_cid_count(p_hwfn,
505 PROTOCOLID_ISCSI, 0); 505 PROTOCOLID_ISCSI,
506 NULL);
506 n_eqes += 2 * num_cons; 507 n_eqes += 2 * num_cons;
507 } 508 }
508 509
@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1422 u32 *feat_num = p_hwfn->hw_info.feat_num; 1423 u32 *feat_num = p_hwfn->hw_info.feat_num;
1423 int num_features = 1; 1424 int num_features = 1;
1424 1425
1425#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 1426 if (IS_ENABLED(CONFIG_QED_RDMA) &&
1426 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the 1427 p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
1427 * status blocks equally between L2 / RoCE but with consideration as 1428 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
1428 * to how many l2 queues / cnqs we have 1429 * the status blocks equally between L2 / RoCE but with
1429 */ 1430 * consideration as to how many l2 queues / cnqs we have.
1430 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 1431 */
1431 num_features++; 1432 num_features++;
1432 1433
1433 feat_num[QED_RDMA_CNQ] = 1434 feat_num[QED_RDMA_CNQ] =
1434 min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, 1435 min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
1435 RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); 1436 RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
1436 } 1437 }
1437#endif 1438
1438 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 1439 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1439 num_features, 1440 num_features,
1440 RESC_NUM(p_hwfn, QED_L2_QUEUE)); 1441 RESC_NUM(p_hwfn, QED_L2_QUEUE));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a8be2faed7..63e1a1b0ef8e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,6 +38,7 @@
38#include "qed_mcp.h" 38#include "qed_mcp.h"
39#include "qed_reg_addr.h" 39#include "qed_reg_addr.h"
40#include "qed_sp.h" 40#include "qed_sp.h"
41#include "qed_roce.h"
41 42
42#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) 43#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
43#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) 44#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
140 qed_ll2_dealloc_buffer(cdev, buffer); 141 qed_ll2_dealloc_buffer(cdev, buffer);
141} 142}
142 143
143void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, 144static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
144 u8 connection_handle, 145 u8 connection_handle,
145 struct qed_ll2_rx_packet *p_pkt, 146 struct qed_ll2_rx_packet *p_pkt,
146 struct core_rx_fast_path_cqe *p_cqe, 147 struct core_rx_fast_path_cqe *p_cqe,
147 bool b_last_packet) 148 bool b_last_packet)
148{ 149{
149 u16 packet_length = le16_to_cpu(p_cqe->packet_length); 150 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
150 struct qed_ll2_buffer *buffer = p_pkt->cookie; 151 struct qed_ll2_buffer *buffer = p_pkt->cookie;
@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
515 return rc; 516 return rc;
516} 517}
517 518
518void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) 519static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
519{ 520{
520 struct qed_ll2_info *p_ll2_conn = NULL; 521 struct qed_ll2_info *p_ll2_conn = NULL;
521 struct qed_ll2_rx_packet *p_pkt = NULL; 522 struct qed_ll2_rx_packet *p_pkt = NULL;
@@ -537,8 +538,7 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
537 if (!p_pkt) 538 if (!p_pkt)
538 break; 539 break;
539 540
540 list_del(&p_pkt->list_entry); 541 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
541 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
542 542
543 rx_buf_addr = p_pkt->rx_buf_addr; 543 rx_buf_addr = p_pkt->rx_buf_addr;
544 cookie = p_pkt->cookie; 544 cookie = p_pkt->cookie;
@@ -992,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
992 p_posting_packet = list_first_entry(&p_rx->posting_descq, 992 p_posting_packet = list_first_entry(&p_rx->posting_descq,
993 struct qed_ll2_rx_packet, 993 struct qed_ll2_rx_packet,
994 list_entry); 994 list_entry);
995 list_del(&p_posting_packet->list_entry); 995 list_move_tail(&p_posting_packet->list_entry,
996 list_add_tail(&p_posting_packet->list_entry, 996 &p_rx->active_descq);
997 &p_rx->active_descq);
998 b_notify_fw = true; 997 b_notify_fw = true;
999 } 998 }
1000 999
@@ -1123,9 +1122,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1123 DMA_REGPAIR_LE(start_bd->addr, first_frag); 1122 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1124 start_bd->nbytes = cpu_to_le16(first_frag_len); 1123 start_bd->nbytes = cpu_to_le16(first_frag_len);
1125 1124
1126 SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
1127 type);
1128
1129 DP_VERBOSE(p_hwfn, 1125 DP_VERBOSE(p_hwfn,
1130 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), 1126 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1131 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", 1127 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
@@ -1188,8 +1184,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1188 if (!p_pkt) 1184 if (!p_pkt)
1189 break; 1185 break;
1190 1186
1191 list_del(&p_pkt->list_entry); 1187 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1192 list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
1193 } 1188 }
1194 1189
1195 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); 1190 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2d652d..4e3d62a16cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
293 */ 293 */
294void qed_ll2_free(struct qed_hwfn *p_hwfn, 294void qed_ll2_free(struct qed_hwfn *p_hwfn,
295 struct qed_ll2_info *p_ll2_connections); 295 struct qed_ll2_info *p_ll2_connections);
296void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
297 u8 connection_handle,
298 void *cookie,
299 dma_addr_t rx_buf_addr,
300 u16 data_length,
301 u8 data_length_error,
302 u16 parse_flags,
303 u16 vlan,
304 u32 src_mac_addr_hi,
305 u16 src_mac_addr_lo, bool b_last_packet);
306void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
307 u8 connection_handle,
308 void *cookie,
309 dma_addr_t first_frag_addr,
310 bool b_last_fragment, bool b_last_packet);
311void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
312 u8 connection_handle,
313 void *cookie,
314 dma_addr_t first_frag_addr,
315 bool b_last_fragment, bool b_last_packet);
316#endif 296#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 4ee3151e80c2..c418360ba02a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -33,10 +33,8 @@
33#include "qed_hw.h" 33#include "qed_hw.h"
34#include "qed_selftest.h" 34#include "qed_selftest.h"
35 35
36#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
37#define QED_ROCE_QPS (8192) 36#define QED_ROCE_QPS (8192)
38#define QED_ROCE_DPIS (8) 37#define QED_ROCE_DPIS (8)
39#endif
40 38
41static char version[] = 39static char version[] =
42 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 40 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
682 enum qed_int_mode int_mode) 680 enum qed_int_mode int_mode)
683{ 681{
684 struct qed_sb_cnt_info sb_cnt_info; 682 struct qed_sb_cnt_info sb_cnt_info;
685#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 683 int num_l2_queues = 0;
686 int num_l2_queues;
687#endif
688 int rc; 684 int rc;
689 int i; 685 int i;
690 686
@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
715 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 711 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
716 cdev->num_hwfns; 712 cdev->num_hwfns;
717 713
718#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 714 if (!IS_ENABLED(CONFIG_QED_RDMA))
719 num_l2_queues = 0; 715 return 0;
716
720 for_each_hwfn(cdev, i) 717 for_each_hwfn(cdev, i)
721 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 718 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
722 719
@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
738 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 735 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
739 cdev->int_params.rdma_msix_cnt, 736 cdev->int_params.rdma_msix_cnt,
740 cdev->int_params.rdma_msix_base); 737 cdev->int_params.rdma_msix_base);
741#endif
742 738
743 return 0; 739 return 0;
744} 740}
@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
843{ 839{
844 int i; 840 int i;
845 841
846#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
847 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
848 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
849 /* divide by 3 the MRs to avoid MF ILT overflow */
850 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
851 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
852#endif
853 for (i = 0; i < cdev->num_hwfns; i++) { 842 for (i = 0; i < cdev->num_hwfns; i++) {
854 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 843 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
855 844
856 p_hwfn->pf_params = *params; 845 p_hwfn->pf_params = *params;
857 } 846 }
847
848 if (!IS_ENABLED(CONFIG_QED_RDMA))
849 return;
850
851 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
852 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
853 /* divide by 3 the MRs to avoid MF ILT overflow */
854 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
855 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
858} 856}
859 857
860static int qed_slowpath_start(struct qed_dev *cdev, 858static int qed_slowpath_start(struct qed_dev *cdev,
@@ -880,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
880 } 878 }
881 } 879 }
882 880
881 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
883 rc = qed_nic_setup(cdev); 882 rc = qed_nic_setup(cdev);
884 if (rc) 883 if (rc)
885 goto err; 884 goto err;
@@ -1432,7 +1431,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1432 return status; 1431 return status;
1433} 1432}
1434 1433
1435struct qed_selftest_ops qed_selftest_ops_pass = { 1434static struct qed_selftest_ops qed_selftest_ops_pass = {
1436 .selftest_memory = &qed_selftest_memory, 1435 .selftest_memory = &qed_selftest_memory,
1437 .selftest_interrupt = &qed_selftest_interrupt, 1436 .selftest_interrupt = &qed_selftest_interrupt,
1438 .selftest_register = &qed_selftest_register, 1437 .selftest_register = &qed_selftest_register,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 76831a398bed..f3a825a8f8d5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
129 } 129 }
130} 130}
131 131
132u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) 132static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
133{ 133{
134 /* First sb id for RoCE is after all the l2 sb */ 134 /* First sb id for RoCE is after all the l2 sb */
135 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 135 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
136} 136}
137 137
138u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
139{
140 return QED_CAU_DEF_RX_TIMER_RES;
141}
142
143static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, 138static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
144 struct qed_ptt *p_ptt, 139 struct qed_ptt *p_ptt,
145 struct qed_rdma_start_in_params *params) 140 struct qed_rdma_start_in_params *params)
@@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
162 p_hwfn->p_rdma_info = p_rdma_info; 157 p_hwfn->p_rdma_info = p_rdma_info;
163 p_rdma_info->proto = PROTOCOLID_ROCE; 158 p_rdma_info->proto = PROTOCOLID_ROCE;
164 159
165 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0); 160 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
161 NULL);
166 162
167 p_rdma_info->num_qps = num_cons / 2; 163 p_rdma_info->num_qps = num_cons / 2;
168 164
@@ -275,7 +271,7 @@ free_rdma_info:
275 return rc; 271 return rc;
276} 272}
277 273
278void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) 274static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
279{ 275{
280 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 276 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
281 277
@@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
527 return qed_spq_post(p_hwfn, p_ent, NULL); 523 return qed_spq_post(p_hwfn, p_ent, NULL);
528} 524}
529 525
526static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
527{
528 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
529 int rc;
530
531 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
532
533 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
534 rc = qed_rdma_bmap_alloc_id(p_hwfn,
535 &p_hwfn->p_rdma_info->tid_map, itid);
536 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
537 if (rc)
538 goto out;
539
540 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
541out:
542 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
543 return rc;
544}
545
530static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) 546static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
531{ 547{
532 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 548 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
@@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
573 return qed_rdma_start_fw(p_hwfn, params, p_ptt); 589 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
574} 590}
575 591
576int qed_rdma_stop(void *rdma_cxt) 592static int qed_rdma_stop(void *rdma_cxt)
577{ 593{
578 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 594 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
579 struct rdma_close_func_ramrod_data *p_ramrod; 595 struct rdma_close_func_ramrod_data *p_ramrod;
@@ -629,8 +645,8 @@ out:
629 return rc; 645 return rc;
630} 646}
631 647
632int qed_rdma_add_user(void *rdma_cxt, 648static int qed_rdma_add_user(void *rdma_cxt,
633 struct qed_rdma_add_user_out_params *out_params) 649 struct qed_rdma_add_user_out_params *out_params)
634{ 650{
635 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 651 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
636 u32 dpi_start_offset; 652 u32 dpi_start_offset;
@@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt,
664 return rc; 680 return rc;
665} 681}
666 682
667struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) 683static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
668{ 684{
669 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 685 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
670 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; 686 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
@@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
680 return p_port; 696 return p_port;
681} 697}
682 698
683struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) 699static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
684{ 700{
685 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 701 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
686 702
@@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
690 return p_hwfn->p_rdma_info->dev; 706 return p_hwfn->p_rdma_info->dev;
691} 707}
692 708
693void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 709static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
694{ 710{
695 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 711 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
696 712
@@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
701 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 717 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
702} 718}
703 719
704int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) 720static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
705{
706 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
707 int rc;
708
709 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
710
711 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
712 rc = qed_rdma_bmap_alloc_id(p_hwfn,
713 &p_hwfn->p_rdma_info->tid_map, itid);
714 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
715 if (rc)
716 goto out;
717
718 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
719out:
720 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
721 return rc;
722}
723
724void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
725{ 721{
726 struct qed_hwfn *p_hwfn; 722 struct qed_hwfn *p_hwfn;
727 u16 qz_num; 723 u16 qz_num;
@@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
816 return 0; 812 return 0;
817} 813}
818 814
819int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) 815static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
820{ 816{
821 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 817 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
822 u32 returned_id; 818 u32 returned_id;
@@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
836 return rc; 832 return rc;
837} 833}
838 834
839void qed_rdma_free_pd(void *rdma_cxt, u16 pd) 835static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
840{ 836{
841 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 837 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
842 838
@@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
873 return toggle_bit; 869 return toggle_bit;
874} 870}
875 871
876int qed_rdma_create_cq(void *rdma_cxt, 872static int qed_rdma_create_cq(void *rdma_cxt,
877 struct qed_rdma_create_cq_in_params *params, u16 *icid) 873 struct qed_rdma_create_cq_in_params *params,
874 u16 *icid)
878{ 875{
879 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 876 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
880 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 877 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
@@ -957,98 +954,10 @@ err:
957 return rc; 954 return rc;
958} 955}
959 956
960int qed_rdma_resize_cq(void *rdma_cxt, 957static int
961 struct qed_rdma_resize_cq_in_params *in_params, 958qed_rdma_destroy_cq(void *rdma_cxt,
962 struct qed_rdma_resize_cq_out_params *out_params) 959 struct qed_rdma_destroy_cq_in_params *in_params,
963{ 960 struct qed_rdma_destroy_cq_out_params *out_params)
964 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
965 struct rdma_resize_cq_output_params *p_ramrod_res;
966 struct rdma_resize_cq_ramrod_data *p_ramrod;
967 enum qed_rdma_toggle_bit toggle_bit;
968 struct qed_sp_init_data init_data;
969 struct qed_spq_entry *p_ent;
970 dma_addr_t ramrod_res_phys;
971 u8 fw_return_code;
972 int rc = -ENOMEM;
973
974 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
975
976 p_ramrod_res =
977 (struct rdma_resize_cq_output_params *)
978 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
979 sizeof(struct rdma_resize_cq_output_params),
980 &ramrod_res_phys, GFP_KERNEL);
981 if (!p_ramrod_res) {
982 DP_NOTICE(p_hwfn,
983 "qed resize cq failed: cannot allocate memory (ramrod)\n");
984 return rc;
985 }
986
987 /* Get SPQ entry */
988 memset(&init_data, 0, sizeof(init_data));
989 init_data.cid = in_params->icid;
990 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
991 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
992
993 rc = qed_sp_init_request(p_hwfn, &p_ent,
994 RDMA_RAMROD_RESIZE_CQ,
995 p_hwfn->p_rdma_info->proto, &init_data);
996 if (rc)
997 goto err;
998
999 p_ramrod = &p_ent->ramrod.rdma_resize_cq;
1000
1001 p_ramrod->flags = 0;
1002
1003 /* toggle the bit for every resize or create cq for a given icid */
1004 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
1005 in_params->icid);
1006
1007 SET_FIELD(p_ramrod->flags,
1008 RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
1009
1010 SET_FIELD(p_ramrod->flags,
1011 RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
1012 in_params->pbl_two_level);
1013
1014 p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
1015 p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
1016 p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
1017 DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
1018 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1019
1020 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1021 if (rc)
1022 goto err;
1023
1024 if (fw_return_code != RDMA_RETURN_OK) {
1025 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1026 rc = -EINVAL;
1027 goto err;
1028 }
1029
1030 out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
1031 out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
1032
1033 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1034 sizeof(struct rdma_resize_cq_output_params),
1035 p_ramrod_res, ramrod_res_phys);
1036
1037 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
1038
1039 return rc;
1040
1041err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1042 sizeof(struct rdma_resize_cq_output_params),
1043 p_ramrod_res, ramrod_res_phys);
1044 DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
1045
1046 return rc;
1047}
1048
1049int qed_rdma_destroy_cq(void *rdma_cxt,
1050 struct qed_rdma_destroy_cq_in_params *in_params,
1051 struct qed_rdma_destroy_cq_out_params *out_params)
1052{ 961{
1053 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 962 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1054 struct rdma_destroy_cq_output_params *p_ramrod_res; 963 struct rdma_destroy_cq_output_params *p_ramrod_res;
@@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1169 return flavor; 1078 return flavor;
1170} 1079}
1171 1080
1172int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) 1081static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
1173{ 1082{
1174 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 1083 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1175 u32 responder_icid; 1084 u32 responder_icid;
@@ -1793,9 +1702,9 @@ err:
1793 return rc; 1702 return rc;
1794} 1703}
1795 1704
1796int qed_roce_query_qp(struct qed_hwfn *p_hwfn, 1705static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1797 struct qed_rdma_qp *qp, 1706 struct qed_rdma_qp *qp,
1798 struct qed_rdma_query_qp_out_params *out_params) 1707 struct qed_rdma_query_qp_out_params *out_params)
1799{ 1708{
1800 struct roce_query_qp_resp_output_params *p_resp_ramrod_res; 1709 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1801 struct roce_query_qp_req_output_params *p_req_ramrod_res; 1710 struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -1936,7 +1845,7 @@ err_resp:
1936 return rc; 1845 return rc;
1937} 1846}
1938 1847
1939int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 1848static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1940{ 1849{
1941 u32 num_invalidated_mw = 0; 1850 u32 num_invalidated_mw = 0;
1942 u32 num_bound_mw = 0; 1851 u32 num_bound_mw = 0;
@@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1985 return 0; 1894 return 0;
1986} 1895}
1987 1896
1988int qed_rdma_query_qp(void *rdma_cxt, 1897static int qed_rdma_query_qp(void *rdma_cxt,
1989 struct qed_rdma_qp *qp, 1898 struct qed_rdma_qp *qp,
1990 struct qed_rdma_query_qp_out_params *out_params) 1899 struct qed_rdma_query_qp_out_params *out_params)
1991{ 1900{
1992 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1901 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1993 int rc; 1902 int rc;
@@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
2022 return rc; 1931 return rc;
2023} 1932}
2024 1933
2025int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) 1934static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
2026{ 1935{
2027 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1936 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2028 int rc = 0; 1937 int rc = 0;
@@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
2038 return rc; 1947 return rc;
2039} 1948}
2040 1949
2041struct qed_rdma_qp * 1950static struct qed_rdma_qp *
2042qed_rdma_create_qp(void *rdma_cxt, 1951qed_rdma_create_qp(void *rdma_cxt,
2043 struct qed_rdma_create_qp_in_params *in_params, 1952 struct qed_rdma_create_qp_in_params *in_params,
2044 struct qed_rdma_create_qp_out_params *out_params) 1953 struct qed_rdma_create_qp_out_params *out_params)
@@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2215 return rc; 2124 return rc;
2216} 2125}
2217 2126
2218int qed_rdma_modify_qp(void *rdma_cxt, 2127static int qed_rdma_modify_qp(void *rdma_cxt,
2219 struct qed_rdma_qp *qp, 2128 struct qed_rdma_qp *qp,
2220 struct qed_rdma_modify_qp_in_params *params) 2129 struct qed_rdma_modify_qp_in_params *params)
2221{ 2130{
2222 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2131 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2223 enum qed_roce_qp_state prev_state; 2132 enum qed_roce_qp_state prev_state;
@@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
2312 return rc; 2221 return rc;
2313} 2222}
2314 2223
2315int qed_rdma_register_tid(void *rdma_cxt, 2224static int
2316 struct qed_rdma_register_tid_in_params *params) 2225qed_rdma_register_tid(void *rdma_cxt,
2226 struct qed_rdma_register_tid_in_params *params)
2317{ 2227{
2318 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2228 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2319 struct rdma_register_tid_ramrod_data *p_ramrod; 2229 struct rdma_register_tid_ramrod_data *p_ramrod;
@@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
2450 return rc; 2360 return rc;
2451} 2361}
2452 2362
2453int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) 2363static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
2454{ 2364{
2455 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2365 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2456 struct rdma_deregister_tid_ramrod_data *p_ramrod; 2366 struct rdma_deregister_tid_ramrod_data *p_ramrod;
@@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2561 qed_rdma_dpm_conf(p_hwfn, p_ptt); 2471 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2562} 2472}
2563 2473
2564int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) 2474static int qed_rdma_start(void *rdma_cxt,
2475 struct qed_rdma_start_in_params *params)
2565{ 2476{
2566 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2477 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2567 struct qed_ptt *p_ptt; 2478 struct qed_ptt *p_ptt;
@@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
2601 return qed_rdma_start(QED_LEADING_HWFN(cdev), params); 2512 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
2602} 2513}
2603 2514
2604void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) 2515static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
2605{ 2516{
2606 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2517 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2607 2518
@@ -2809,11 +2720,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev)
2809 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; 2720 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2810 int rc; 2721 int rc;
2811 2722
2812 if (!cdev) {
2813 DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
2814 return -EINVAL;
2815 }
2816
2817 if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) { 2723 if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
2818 DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n"); 2724 DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
2819 return -EINVAL; 2725 return -EINVAL;
@@ -2850,7 +2756,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
2850 int rc; 2756 int rc;
2851 int i; 2757 int i;
2852 2758
2853 if (!cdev || !pkt || !params) { 2759 if (!pkt || !params) {
2854 DP_ERR(cdev, 2760 DP_ERR(cdev,
2855 "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n", 2761 "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
2856 cdev, pkt, params); 2762 cdev, pkt, params);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 2f091e8a0f40..279f342af8db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -95,26 +95,6 @@ struct qed_rdma_info {
95 enum protocol_type proto; 95 enum protocol_type proto;
96}; 96};
97 97
98struct qed_rdma_resize_cq_in_params {
99 u16 icid;
100 u32 cq_size;
101 bool pbl_two_level;
102 u64 pbl_ptr;
103 u16 pbl_num_pages;
104 u8 pbl_page_size_log;
105};
106
107struct qed_rdma_resize_cq_out_params {
108 u32 prod;
109 u32 cons;
110};
111
112struct qed_rdma_resize_cnq_in_params {
113 u32 cnq_id;
114 u32 pbl_page_size_log;
115 u64 pbl_ptr;
116};
117
118struct qed_rdma_qp { 98struct qed_rdma_qp {
119 struct regpair qp_handle; 99 struct regpair qp_handle;
120 struct regpair qp_handle_async; 100 struct regpair qp_handle_async;
@@ -181,36 +161,55 @@ struct qed_rdma_qp {
181 dma_addr_t shared_queue_phys_addr; 161 dma_addr_t shared_queue_phys_addr;
182}; 162};
183 163
184int 164#if IS_ENABLED(CONFIG_QED_RDMA)
185qed_rdma_add_user(void *rdma_cxt, 165void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
186 struct qed_rdma_add_user_out_params *out_params);
187int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
188int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
189int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
190void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
191struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
192struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
193int
194qed_rdma_register_tid(void *rdma_cxt,
195 struct qed_rdma_register_tid_in_params *params);
196void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
197int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
198int qed_rdma_stop(void *rdma_cxt);
199u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
200u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
201void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
202void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
203void qed_async_roce_event(struct qed_hwfn *p_hwfn, 166void qed_async_roce_event(struct qed_hwfn *p_hwfn,
204 struct event_ring_entry *p_eqe); 167 struct event_ring_entry *p_eqe);
205int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp); 168void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
206int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp, 169 u8 connection_handle,
207 struct qed_rdma_modify_qp_in_params *params); 170 void *cookie,
208int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp, 171 dma_addr_t first_frag_addr,
209 struct qed_rdma_query_qp_out_params *out_params); 172 bool b_last_fragment, bool b_last_packet);
210 173void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
211#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 174 u8 connection_handle,
212void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 175 void *cookie,
176 dma_addr_t first_frag_addr,
177 bool b_last_fragment, bool b_last_packet);
178void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
179 u8 connection_handle,
180 void *cookie,
181 dma_addr_t rx_buf_addr,
182 u16 data_length,
183 u8 data_length_error,
184 u16 parse_flags,
185 u16 vlan,
186 u32 src_mac_addr_hi,
187 u16 src_mac_addr_lo, bool b_last_packet);
213#else 188#else
214void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} 189static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
190static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
191static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
192 u8 connection_handle,
193 void *cookie,
194 dma_addr_t first_frag_addr,
195 bool b_last_fragment,
196 bool b_last_packet) {}
197static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
198 u8 connection_handle,
199 void *cookie,
200 dma_addr_t first_frag_addr,
201 bool b_last_fragment,
202 bool b_last_packet) {}
203static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
204 u8 connection_handle,
205 void *cookie,
206 dma_addr_t rx_buf_addr,
207 u16 data_length,
208 u8 data_length_error,
209 u16 parse_flags,
210 u16 vlan,
211 u32 src_mac_addr_hi,
212 u16 src_mac_addr_lo,
213 bool b_last_packet) {}
215#endif 214#endif
216#endif 215#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 652c90819758..b2c08e4d2a9b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -80,7 +80,6 @@ union ramrod_data {
80 struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; 80 struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
81 struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; 81 struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
82 struct rdma_create_cq_ramrod_data rdma_create_cq; 82 struct rdma_create_cq_ramrod_data rdma_create_cq;
83 struct rdma_resize_cq_ramrod_data rdma_resize_cq;
84 struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; 83 struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
85 struct rdma_srq_create_ramrod_data rdma_create_srq; 84 struct rdma_srq_create_ramrod_data rdma_create_srq;
86 struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; 85 struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index caff41544898..9fbaf9429fd0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,9 +28,7 @@
28#include "qed_reg_addr.h" 28#include "qed_reg_addr.h"
29#include "qed_sp.h" 29#include "qed_sp.h"
30#include "qed_sriov.h" 30#include "qed_sriov.h"
31#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
32#include "qed_roce.h" 31#include "qed_roce.h"
33#endif
34 32
35/*************************************************************************** 33/***************************************************************************
36* Structures & Definitions 34* Structures & Definitions
@@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
240 struct event_ring_entry *p_eqe) 238 struct event_ring_entry *p_eqe)
241{ 239{
242 switch (p_eqe->protocol_id) { 240 switch (p_eqe->protocol_id) {
243#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
244 case PROTOCOLID_ROCE: 241 case PROTOCOLID_ROCE:
245 qed_async_roce_event(p_hwfn, p_eqe); 242 qed_async_roce_event(p_hwfn, p_eqe);
246 return 0; 243 return 0;
247#endif
248 case PROTOCOLID_COMMON: 244 case PROTOCOLID_COMMON:
249 return qed_sriov_eqe_event(p_hwfn, 245 return qed_sriov_eqe_event(p_hwfn,
250 p_eqe->opcode, 246 p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 28dc58919c85..048a230c3ce0 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
2 2
3qede-y := qede_main.o qede_ethtool.o 3qede-y := qede_main.o qede_ethtool.o
4qede-$(CONFIG_DCB) += qede_dcbnl.o 4qede-$(CONFIG_DCB) += qede_dcbnl.o
5qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o 5qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 28c0e9f42c9e..974689a13337 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
348int qede_txq_has_work(struct qede_tx_queue *txq); 348int qede_txq_has_work(struct qede_tx_queue *txq);
349void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, 349void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
350 u8 count); 350 u8 count);
351void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
351 352
352#define RX_RING_SIZE_POW 13 353#define RX_RING_SIZE_POW 13
353#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) 354#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
354#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) 355#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
355#define NUM_RX_BDS_MIN 128 356#define NUM_RX_BDS_MIN 128
356#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX 357#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
357 358
358#define TX_RING_SIZE_POW 13 359#define TX_RING_SIZE_POW 13
359#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) 360#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 25a9b293ee8f..12251a1032d1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -756,6 +756,8 @@ static void qede_get_channels(struct net_device *dev,
756 struct qede_dev *edev = netdev_priv(dev); 756 struct qede_dev *edev = netdev_priv(dev);
757 757
758 channels->max_combined = QEDE_MAX_RSS_CNT(edev); 758 channels->max_combined = QEDE_MAX_RSS_CNT(edev);
759 channels->max_rx = QEDE_MAX_RSS_CNT(edev);
760 channels->max_tx = QEDE_MAX_RSS_CNT(edev);
759 channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - 761 channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
760 edev->fp_num_rx; 762 edev->fp_num_rx;
761 channels->tx_count = edev->fp_num_tx; 763 channels->tx_count = edev->fp_num_tx;
@@ -820,6 +822,13 @@ static int qede_set_channels(struct net_device *dev,
820 edev->req_queues = count; 822 edev->req_queues = count;
821 edev->req_num_tx = channels->tx_count; 823 edev->req_num_tx = channels->tx_count;
822 edev->req_num_rx = channels->rx_count; 824 edev->req_num_rx = channels->rx_count;
825 /* Reset the indirection table if rx queue count is updated */
826 if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
827 edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
828 memset(&edev->rss_params.rss_ind_table, 0,
829 sizeof(edev->rss_params.rss_ind_table));
830 }
831
823 if (netif_running(dev)) 832 if (netif_running(dev))
824 qede_reload(edev, NULL, NULL); 833 qede_reload(edev, NULL, NULL);
825 834
@@ -1053,6 +1062,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
1053 struct qede_dev *edev = netdev_priv(dev); 1062 struct qede_dev *edev = netdev_priv(dev);
1054 int i; 1063 int i;
1055 1064
1065 if (edev->dev_info.common.num_hwfns > 1) {
1066 DP_INFO(edev,
1067 "RSS configuration is not supported for 100G devices\n");
1068 return -EOPNOTSUPP;
1069 }
1070
1056 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1071 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1057 return -EOPNOTSUPP; 1072 return -EOPNOTSUPP;
1058 1073
@@ -1184,8 +1199,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
1184 } 1199 }
1185 1200
1186 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 1201 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
1187 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 1202 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
1188 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); 1203 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
1189 txq->sw_tx_cons++; 1204 txq->sw_tx_cons++;
1190 txq->sw_tx_ring[idx].skb = NULL; 1205 txq->sw_tx_ring[idx].skb = NULL;
1191 1206
@@ -1199,8 +1214,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
1199 struct qede_rx_queue *rxq = NULL; 1214 struct qede_rx_queue *rxq = NULL;
1200 struct sw_rx_data *sw_rx_data; 1215 struct sw_rx_data *sw_rx_data;
1201 union eth_rx_cqe *cqe; 1216 union eth_rx_cqe *cqe;
1217 int i, rc = 0;
1202 u8 *data_ptr; 1218 u8 *data_ptr;
1203 int i;
1204 1219
1205 for_each_queue(i) { 1220 for_each_queue(i) {
1206 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { 1221 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
@@ -1219,46 +1234,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
1219 * queue and that the loopback traffic is not IP. 1234 * queue and that the loopback traffic is not IP.
1220 */ 1235 */
1221 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 1236 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
1222 if (qede_has_rx_work(rxq)) 1237 if (!qede_has_rx_work(rxq)) {
1238 usleep_range(100, 200);
1239 continue;
1240 }
1241
1242 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1243 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1244
1245 /* Memory barrier to prevent the CPU from doing speculative
1246 * reads of CQE/BD before reading hw_comp_cons. If the CQE is
1247 * read before it is written by FW, then FW writes CQE and SB,
1248 * and then the CPU reads the hw_comp_cons, it will use an old
1249 * CQE.
1250 */
1251 rmb();
1252
1253 /* Get the CQE from the completion ring */
1254 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1255
1256 /* Get the data from the SW ring */
1257 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1258 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1259 fp_cqe = &cqe->fast_path_regular;
1260 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1261 data_ptr = (u8 *)(page_address(sw_rx_data->data) +
1262 fp_cqe->placement_offset +
1263 sw_rx_data->page_offset);
1264 if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
1265 ether_addr_equal(data_ptr + ETH_ALEN,
1266 edev->ndev->dev_addr)) {
1267 for (i = ETH_HLEN; i < len; i++)
1268 if (data_ptr[i] != (unsigned char)(i & 0xff)) {
1269 rc = -1;
1270 break;
1271 }
1272
1273 qede_recycle_rx_bd_ring(rxq, edev, 1);
1274 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1223 break; 1275 break;
1224 usleep_range(100, 200); 1276 }
1277
1278 DP_INFO(edev, "Not the transmitted packet\n");
1279 qede_recycle_rx_bd_ring(rxq, edev, 1);
1280 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1225 } 1281 }
1226 1282
1227 if (!qede_has_rx_work(rxq)) { 1283 if (i == QEDE_SELFTEST_POLL_COUNT) {
1228 DP_NOTICE(edev, "Failed to receive the traffic\n"); 1284 DP_NOTICE(edev, "Failed to receive the traffic\n");
1229 return -1; 1285 return -1;
1230 } 1286 }
1231 1287
1232 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 1288 qede_update_rx_prod(edev, rxq);
1233 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1234 1289
1235 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 1290 return rc;
1236 * / BD before reading hw_comp_cons. If the CQE is read before it is
1237 * written by FW, then FW writes CQE and SB, and then the CPU reads the
1238 * hw_comp_cons, it will use an old CQE.
1239 */
1240 rmb();
1241
1242 /* Get the CQE from the completion ring */
1243 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1244
1245 /* Get the data from the SW ring */
1246 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1247 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1248 fp_cqe = &cqe->fast_path_regular;
1249 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1250 data_ptr = (u8 *)(page_address(sw_rx_data->data) +
1251 fp_cqe->placement_offset + sw_rx_data->page_offset);
1252 for (i = ETH_HLEN; i < len; i++)
1253 if (data_ptr[i] != (unsigned char)(i & 0xff)) {
1254 DP_NOTICE(edev, "Loopback test failed\n");
1255 qede_recycle_rx_bd_ring(rxq, edev, 1);
1256 return -1;
1257 }
1258
1259 qede_recycle_rx_bd_ring(rxq, edev, 1);
1260
1261 return 0;
1262} 1291}
1263 1292
1264static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) 1293static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 343038ca047d..7def29aaf65c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -313,8 +313,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
313 split_bd_len = BD_UNMAP_LEN(split); 313 split_bd_len = BD_UNMAP_LEN(split);
314 bds_consumed++; 314 bds_consumed++;
315 } 315 }
316 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 316 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
317 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); 317 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
318 318
319 /* Unmap the data of the skb frags */ 319 /* Unmap the data of the skb frags */
320 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { 320 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
@@ -359,8 +359,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
359 nbd--; 359 nbd--;
360 } 360 }
361 361
362 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 362 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
363 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); 363 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
364 364
365 /* Unmap the data of the skb frags */ 365 /* Unmap the data of the skb frags */
366 for (i = 0; i < nbd; i++) { 366 for (i = 0; i < nbd; i++) {
@@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
943 return 0; 943 return 0;
944} 944}
945 945
946static inline void qede_update_rx_prod(struct qede_dev *edev, 946void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
947 struct qede_rx_queue *rxq)
948{ 947{
949 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); 948 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
950 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); 949 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -2941,7 +2940,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
2941 txq->num_tx_buffers = edev->q_num_tx_buffers; 2940 txq->num_tx_buffers = edev->q_num_tx_buffers;
2942 2941
2943 /* Allocate the parallel driver ring for Tx buffers */ 2942 /* Allocate the parallel driver ring for Tx buffers */
2944 size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX; 2943 size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
2945 txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); 2944 txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
2946 if (!txq->sw_tx_ring) { 2945 if (!txq->sw_tx_ring) {
2947 DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); 2946 DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
@@ -2952,7 +2951,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
2952 QED_CHAIN_USE_TO_CONSUME_PRODUCE, 2951 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
2953 QED_CHAIN_MODE_PBL, 2952 QED_CHAIN_MODE_PBL,
2954 QED_CHAIN_CNT_TYPE_U16, 2953 QED_CHAIN_CNT_TYPE_U16,
2955 NUM_TX_BDS_MAX, 2954 TX_RING_SIZE,
2956 sizeof(*p_virt), &txq->tx_pbl); 2955 sizeof(*p_virt), &txq->tx_pbl);
2957 if (rc) 2956 if (rc)
2958 goto err; 2957 goto err;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index e97968ed4b8f..6fb3bee904d3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1021,14 +1021,18 @@ void emac_mac_down(struct emac_adapter *adpt)
1021 napi_disable(&adpt->rx_q.napi); 1021 napi_disable(&adpt->rx_q.napi);
1022 1022
1023 phy_stop(adpt->phydev); 1023 phy_stop(adpt->phydev);
1024 phy_disconnect(adpt->phydev);
1025 1024
1026 /* disable mac irq */ 1025 /* Interrupts must be disabled before the PHY is disconnected, to
1026 * avoid a race condition where adjust_link is null when we get
1027 * an interrupt.
1028 */
1027 writel(DIS_INT, adpt->base + EMAC_INT_STATUS); 1029 writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
1028 writel(0, adpt->base + EMAC_INT_MASK); 1030 writel(0, adpt->base + EMAC_INT_MASK);
1029 synchronize_irq(adpt->irq.irq); 1031 synchronize_irq(adpt->irq.irq);
1030 free_irq(adpt->irq.irq, &adpt->irq); 1032 free_irq(adpt->irq.irq, &adpt->irq);
1031 1033
1034 phy_disconnect(adpt->phydev);
1035
1032 emac_mac_reset(adpt); 1036 emac_mac_reset(adpt);
1033 1037
1034 emac_tx_q_descs_free(adpt); 1038 emac_tx_q_descs_free(adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 9bf3b2b82e95..4fede4b86538 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -575,6 +575,7 @@ static const struct of_device_id emac_dt_match[] = {
575 }, 575 },
576 {} 576 {}
577}; 577};
578MODULE_DEVICE_TABLE(of, emac_dt_match);
578 579
579#if IS_ENABLED(CONFIG_ACPI) 580#if IS_ENABLED(CONFIG_ACPI)
580static const struct acpi_device_id emac_acpi_match[] = { 581static const struct acpi_device_id emac_acpi_match[] = {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e55638c7505a..bf000d819a21 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8273,7 +8273,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8273 if ((sizeof(dma_addr_t) > 4) && 8273 if ((sizeof(dma_addr_t) > 4) &&
8274 (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && 8274 (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
8275 tp->mac_version >= RTL_GIGA_MAC_VER_18)) && 8275 tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
8276 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 8276 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
8277 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
8277 8278
8278 /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ 8279 /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
8279 if (!pci_is_pcie(pdev)) 8280 if (!pci_is_pcie(pdev))
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 5424fb341613..24b746406bc7 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1471,7 +1471,7 @@ static int rocker_world_check_init(struct rocker_port *rocker_port)
1471 if (rocker->wops) { 1471 if (rocker->wops) {
1472 if (rocker->wops->mode != mode) { 1472 if (rocker->wops->mode != mode) {
1473 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); 1473 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1474 return err; 1474 return -EINVAL;
1475 } 1475 }
1476 return 0; 1476 return 0;
1477 } 1477 }
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 431a60804272..4ca461322d60 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1493,8 +1493,6 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1493 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); 1493 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1494 1494
1495 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); 1495 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1496 if (found)
1497 *index = found->index;
1498 1496
1499 updating = found && adding; 1497 updating = found && adding;
1500 removing = found && !adding; 1498 removing = found && !adding;
@@ -1508,9 +1506,11 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1508 resolved = false; 1506 resolved = false;
1509 } else if (removing) { 1507 } else if (removing) {
1510 ofdpa_neigh_del(trans, found); 1508 ofdpa_neigh_del(trans, found);
1509 *index = found->index;
1511 } else if (updating) { 1510 } else if (updating) {
1512 ofdpa_neigh_update(found, trans, NULL, false); 1511 ofdpa_neigh_update(found, trans, NULL, false);
1513 resolved = !is_zero_ether_addr(found->eth_dst); 1512 resolved = !is_zero_ether_addr(found->eth_dst);
1513 *index = found->index;
1514 } else { 1514 } else {
1515 err = -ENOENT; 1515 err = -ENOENT;
1516 } 1516 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4ec7397e7fb3..a1b17cd7886b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -347,10 +347,9 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
347 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); 347 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
348 348
349 for (i = 0; i < size; i++) { 349 for (i = 0; i < size; i++) {
350 if (p->des0) 350 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
351 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 351 i, (unsigned int)virt_to_phys(p),
352 i, (unsigned int)virt_to_phys(p), 352 p->des0, p->des1, p->des2, p->des3);
353 p->des0, p->des1, p->des2, p->des3);
354 p++; 353 p++;
355 } 354 }
356} 355}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8dc9056c1001..b15fc55f1b96 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -145,7 +145,7 @@ int stmmac_mdio_register(struct net_device *ndev);
145int stmmac_mdio_reset(struct mii_bus *mii); 145int stmmac_mdio_reset(struct mii_bus *mii);
146void stmmac_set_ethtool_ops(struct net_device *netdev); 146void stmmac_set_ethtool_ops(struct net_device *netdev);
147 147
148int stmmac_ptp_register(struct stmmac_priv *priv); 148void stmmac_ptp_register(struct stmmac_priv *priv);
149void stmmac_ptp_unregister(struct stmmac_priv *priv); 149void stmmac_ptp_unregister(struct stmmac_priv *priv);
150int stmmac_resume(struct device *dev); 150int stmmac_resume(struct device *dev);
151int stmmac_suspend(struct device *dev); 151int stmmac_suspend(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6c85b61aaa0b..48e71fad4210 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -676,7 +676,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
676 priv->hwts_tx_en = 0; 676 priv->hwts_tx_en = 0;
677 priv->hwts_rx_en = 0; 677 priv->hwts_rx_en = 0;
678 678
679 return stmmac_ptp_register(priv); 679 stmmac_ptp_register(priv);
680
681 return 0;
680} 682}
681 683
682static void stmmac_release_ptp(struct stmmac_priv *priv) 684static void stmmac_release_ptp(struct stmmac_priv *priv)
@@ -1710,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1710 if (init_ptp) { 1712 if (init_ptp) {
1711 ret = stmmac_init_ptp(priv); 1713 ret = stmmac_init_ptp(priv);
1712 if (ret) 1714 if (ret)
1713 netdev_warn(priv->dev, "PTP support cannot init.\n"); 1715 netdev_warn(priv->dev, "fail to init PTP.\n");
1714 } 1716 }
1715 1717
1716#ifdef CONFIG_DEBUG_FS 1718#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 289d52725a6c..1477471f8d44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -177,7 +177,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
177 * Description: this function will register the ptp clock driver 177 * Description: this function will register the ptp clock driver
178 * to kernel. It also does some house keeping work. 178 * to kernel. It also does some house keeping work.
179 */ 179 */
180int stmmac_ptp_register(struct stmmac_priv *priv) 180void stmmac_ptp_register(struct stmmac_priv *priv)
181{ 181{
182 spin_lock_init(&priv->ptp_lock); 182 spin_lock_init(&priv->ptp_lock);
183 priv->ptp_clock_ops = stmmac_ptp_clock_ops; 183 priv->ptp_clock_ops = stmmac_ptp_clock_ops;
@@ -185,15 +185,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
185 priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, 185 priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
186 priv->device); 186 priv->device);
187 if (IS_ERR(priv->ptp_clock)) { 187 if (IS_ERR(priv->ptp_clock)) {
188 netdev_err(priv->dev, "ptp_clock_register failed\n");
188 priv->ptp_clock = NULL; 189 priv->ptp_clock = NULL;
189 return PTR_ERR(priv->ptp_clock); 190 } else if (priv->ptp_clock)
190 } 191 netdev_info(priv->dev, "registered PTP clock\n");
191
192 spin_lock_init(&priv->ptp_lock);
193
194 netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
195
196 return 0;
197} 192}
198 193
199/** 194/**
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 0d0053128542..5eedac495077 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -982,11 +982,13 @@ static int dwceqos_mii_probe(struct net_device *ndev)
982 if (netif_msg_probe(lp)) 982 if (netif_msg_probe(lp))
983 phy_attached_info(phydev); 983 phy_attached_info(phydev);
984 984
985 phydev->supported &= PHY_GBIT_FEATURES; 985 phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
986 SUPPORTED_Asym_Pause;
986 987
987 lp->link = 0; 988 lp->link = 0;
988 lp->speed = 0; 989 lp->speed = 0;
989 lp->duplex = DUPLEX_UNKNOWN; 990 lp->duplex = DUPLEX_UNKNOWN;
991 lp->flowcontrol.autoneg = AUTONEG_ENABLE;
990 992
991 return 0; 993 return 0;
992} 994}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3c20e87bb761..42edd7b7902f 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -58,9 +58,9 @@ struct geneve_dev {
58 struct hlist_node hlist; /* vni hash table */ 58 struct hlist_node hlist; /* vni hash table */
59 struct net *net; /* netns for packet i/o */ 59 struct net *net; /* netns for packet i/o */
60 struct net_device *dev; /* netdev for geneve tunnel */ 60 struct net_device *dev; /* netdev for geneve tunnel */
61 struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */ 61 struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */
62#if IS_ENABLED(CONFIG_IPV6) 62#if IS_ENABLED(CONFIG_IPV6)
63 struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */ 63 struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */
64#endif 64#endif
65 u8 vni[3]; /* virtual network ID for tunnel */ 65 u8 vni[3]; /* virtual network ID for tunnel */
66 u8 ttl; /* TTL override */ 66 u8 ttl; /* TTL override */
@@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
453 453
454 skb_gro_pull(skb, gh_len); 454 skb_gro_pull(skb, gh_len);
455 skb_gro_postpull_rcsum(skb, gh, gh_len); 455 skb_gro_postpull_rcsum(skb, gh, gh_len);
456 pp = ptype->callbacks.gro_receive(head, skb); 456 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
457 flush = 0; 457 flush = 0;
458 458
459out_unlock: 459out_unlock:
@@ -543,9 +543,19 @@ static void __geneve_sock_release(struct geneve_sock *gs)
543 543
544static void geneve_sock_release(struct geneve_dev *geneve) 544static void geneve_sock_release(struct geneve_dev *geneve)
545{ 545{
546 __geneve_sock_release(geneve->sock4); 546 struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4);
547#if IS_ENABLED(CONFIG_IPV6) 547#if IS_ENABLED(CONFIG_IPV6)
548 __geneve_sock_release(geneve->sock6); 548 struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6);
549
550 rcu_assign_pointer(geneve->sock6, NULL);
551#endif
552
553 rcu_assign_pointer(geneve->sock4, NULL);
554 synchronize_net();
555
556 __geneve_sock_release(gs4);
557#if IS_ENABLED(CONFIG_IPV6)
558 __geneve_sock_release(gs6);
549#endif 559#endif
550} 560}
551 561
@@ -586,10 +596,10 @@ out:
586 gs->flags = geneve->flags; 596 gs->flags = geneve->flags;
587#if IS_ENABLED(CONFIG_IPV6) 597#if IS_ENABLED(CONFIG_IPV6)
588 if (ipv6) 598 if (ipv6)
589 geneve->sock6 = gs; 599 rcu_assign_pointer(geneve->sock6, gs);
590 else 600 else
591#endif 601#endif
592 geneve->sock4 = gs; 602 rcu_assign_pointer(geneve->sock4, gs);
593 603
594 hash = geneve_net_vni_hash(geneve->vni); 604 hash = geneve_net_vni_hash(geneve->vni);
595 hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); 605 hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
@@ -603,9 +613,7 @@ static int geneve_open(struct net_device *dev)
603 bool metadata = geneve->collect_md; 613 bool metadata = geneve->collect_md;
604 int ret = 0; 614 int ret = 0;
605 615
606 geneve->sock4 = NULL;
607#if IS_ENABLED(CONFIG_IPV6) 616#if IS_ENABLED(CONFIG_IPV6)
608 geneve->sock6 = NULL;
609 if (ipv6 || metadata) 617 if (ipv6 || metadata)
610 ret = geneve_sock_add(geneve, true); 618 ret = geneve_sock_add(geneve, true);
611#endif 619#endif
@@ -720,6 +728,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
720 struct rtable *rt = NULL; 728 struct rtable *rt = NULL;
721 __u8 tos; 729 __u8 tos;
722 730
731 if (!rcu_dereference(geneve->sock4))
732 return ERR_PTR(-EIO);
733
723 memset(fl4, 0, sizeof(*fl4)); 734 memset(fl4, 0, sizeof(*fl4));
724 fl4->flowi4_mark = skb->mark; 735 fl4->flowi4_mark = skb->mark;
725 fl4->flowi4_proto = IPPROTO_UDP; 736 fl4->flowi4_proto = IPPROTO_UDP;
@@ -772,11 +783,15 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
772{ 783{
773 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 784 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
774 struct geneve_dev *geneve = netdev_priv(dev); 785 struct geneve_dev *geneve = netdev_priv(dev);
775 struct geneve_sock *gs6 = geneve->sock6;
776 struct dst_entry *dst = NULL; 786 struct dst_entry *dst = NULL;
777 struct dst_cache *dst_cache; 787 struct dst_cache *dst_cache;
788 struct geneve_sock *gs6;
778 __u8 prio; 789 __u8 prio;
779 790
791 gs6 = rcu_dereference(geneve->sock6);
792 if (!gs6)
793 return ERR_PTR(-EIO);
794
780 memset(fl6, 0, sizeof(*fl6)); 795 memset(fl6, 0, sizeof(*fl6));
781 fl6->flowi6_mark = skb->mark; 796 fl6->flowi6_mark = skb->mark;
782 fl6->flowi6_proto = IPPROTO_UDP; 797 fl6->flowi6_proto = IPPROTO_UDP;
@@ -842,7 +857,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
842 struct ip_tunnel_info *info) 857 struct ip_tunnel_info *info)
843{ 858{
844 struct geneve_dev *geneve = netdev_priv(dev); 859 struct geneve_dev *geneve = netdev_priv(dev);
845 struct geneve_sock *gs4 = geneve->sock4; 860 struct geneve_sock *gs4;
846 struct rtable *rt = NULL; 861 struct rtable *rt = NULL;
847 const struct iphdr *iip; /* interior IP header */ 862 const struct iphdr *iip; /* interior IP header */
848 int err = -EINVAL; 863 int err = -EINVAL;
@@ -853,6 +868,10 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
853 bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); 868 bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
854 u32 flags = geneve->flags; 869 u32 flags = geneve->flags;
855 870
871 gs4 = rcu_dereference(geneve->sock4);
872 if (!gs4)
873 goto tx_error;
874
856 if (geneve->collect_md) { 875 if (geneve->collect_md) {
857 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { 876 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
858 netdev_dbg(dev, "no tunnel metadata\n"); 877 netdev_dbg(dev, "no tunnel metadata\n");
@@ -932,9 +951,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
932 struct ip_tunnel_info *info) 951 struct ip_tunnel_info *info)
933{ 952{
934 struct geneve_dev *geneve = netdev_priv(dev); 953 struct geneve_dev *geneve = netdev_priv(dev);
935 struct geneve_sock *gs6 = geneve->sock6;
936 struct dst_entry *dst = NULL; 954 struct dst_entry *dst = NULL;
937 const struct iphdr *iip; /* interior IP header */ 955 const struct iphdr *iip; /* interior IP header */
956 struct geneve_sock *gs6;
938 int err = -EINVAL; 957 int err = -EINVAL;
939 struct flowi6 fl6; 958 struct flowi6 fl6;
940 __u8 prio, ttl; 959 __u8 prio, ttl;
@@ -943,6 +962,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
943 bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); 962 bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
944 u32 flags = geneve->flags; 963 u32 flags = geneve->flags;
945 964
965 gs6 = rcu_dereference(geneve->sock6);
966 if (!gs6)
967 goto tx_error;
968
946 if (geneve->collect_md) { 969 if (geneve->collect_md) {
947 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { 970 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
948 netdev_dbg(dev, "no tunnel metadata\n"); 971 netdev_dbg(dev, "no tunnel metadata\n");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f0919bd3a563..f6382150b16a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -447,7 +447,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
447 * Setup the sendside checksum offload only if this is not a 447 * Setup the sendside checksum offload only if this is not a
448 * GSO packet. 448 * GSO packet.
449 */ 449 */
450 if (skb_is_gso(skb)) { 450 if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
451 struct ndis_tcp_lso_info *lso_info; 451 struct ndis_tcp_lso_info *lso_info;
452 452
453 rndis_msg_size += NDIS_LSO_PPI_SIZE; 453 rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -607,15 +607,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
607 packet->total_data_buflen); 607 packet->total_data_buflen);
608 608
609 skb->protocol = eth_type_trans(skb, net); 609 skb->protocol = eth_type_trans(skb, net);
610 if (csum_info) { 610
611 /* We only look at the IP checksum here. 611 /* skb is already created with CHECKSUM_NONE */
612 * Should we be dropping the packet if checksum 612 skb_checksum_none_assert(skb);
613 * failed? How do we deal with other checksums - TCP/UDP? 613
614 */ 614 /*
615 if (csum_info->receive.ip_checksum_succeeded) 615 * In Linux, the IP checksum is always checked.
616 * Do L4 checksum offload if enabled and present.
617 */
618 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
619 if (csum_info->receive.tcp_checksum_succeeded ||
620 csum_info->receive.udp_checksum_succeeded)
616 skb->ip_summed = CHECKSUM_UNNECESSARY; 621 skb->ip_summed = CHECKSUM_UNNECESSARY;
617 else
618 skb->ip_summed = CHECKSUM_NONE;
619 } 622 }
620 623
621 if (vlan_tci & VLAN_TAG_PRESENT) 624 if (vlan_tci & VLAN_TAG_PRESENT)
@@ -696,12 +699,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
696static void netvsc_get_drvinfo(struct net_device *net, 699static void netvsc_get_drvinfo(struct net_device *net,
697 struct ethtool_drvinfo *info) 700 struct ethtool_drvinfo *info)
698{ 701{
699 struct net_device_context *net_device_ctx = netdev_priv(net);
700 struct hv_device *dev = net_device_ctx->device_ctx;
701
702 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 702 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
703 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 703 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
704 strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
705} 704}
706 705
707static void netvsc_get_channels(struct net_device *net, 706static void netvsc_get_channels(struct net_device *net,
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3ea47f28e143..d2e61e002926 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
397#define DEFAULT_ENCRYPT false 397#define DEFAULT_ENCRYPT false
398#define DEFAULT_ENCODING_SA 0 398#define DEFAULT_ENCODING_SA 0
399 399
400static bool send_sci(const struct macsec_secy *secy)
401{
402 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
403
404 return tx_sc->send_sci ||
405 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
406}
407
400static sci_t make_sci(u8 *addr, __be16 port) 408static sci_t make_sci(u8 *addr, __be16 port)
401{ 409{
402 sci_t sci; 410 sci_t sci;
@@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
437 445
438/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 446/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
439static void macsec_fill_sectag(struct macsec_eth_header *h, 447static void macsec_fill_sectag(struct macsec_eth_header *h,
440 const struct macsec_secy *secy, u32 pn) 448 const struct macsec_secy *secy, u32 pn,
449 bool sci_present)
441{ 450{
442 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 451 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
443 452
444 memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci)); 453 memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
445 h->eth.h_proto = htons(ETH_P_MACSEC); 454 h->eth.h_proto = htons(ETH_P_MACSEC);
446 455
447 if (tx_sc->send_sci || 456 if (sci_present) {
448 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
449 h->tci_an |= MACSEC_TCI_SC; 457 h->tci_an |= MACSEC_TCI_SC;
450 memcpy(&h->secure_channel_id, &secy->sci, 458 memcpy(&h->secure_channel_id, &secy->sci,
451 sizeof(h->secure_channel_id)); 459 sizeof(h->secure_channel_id));
@@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
650 struct macsec_tx_sc *tx_sc; 658 struct macsec_tx_sc *tx_sc;
651 struct macsec_tx_sa *tx_sa; 659 struct macsec_tx_sa *tx_sa;
652 struct macsec_dev *macsec = macsec_priv(dev); 660 struct macsec_dev *macsec = macsec_priv(dev);
661 bool sci_present;
653 u32 pn; 662 u32 pn;
654 663
655 secy = &macsec->secy; 664 secy = &macsec->secy;
@@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
687 696
688 unprotected_len = skb->len; 697 unprotected_len = skb->len;
689 eth = eth_hdr(skb); 698 eth = eth_hdr(skb);
690 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci)); 699 sci_present = send_sci(secy);
700 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
691 memmove(hh, eth, 2 * ETH_ALEN); 701 memmove(hh, eth, 2 * ETH_ALEN);
692 702
693 pn = tx_sa_update_pn(tx_sa, secy); 703 pn = tx_sa_update_pn(tx_sa, secy);
@@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
696 kfree_skb(skb); 706 kfree_skb(skb);
697 return ERR_PTR(-ENOLINK); 707 return ERR_PTR(-ENOLINK);
698 } 708 }
699 macsec_fill_sectag(hh, secy, pn); 709 macsec_fill_sectag(hh, secy, pn, sci_present);
700 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 710 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
701 711
702 skb_put(skb, secy->icv_len); 712 skb_put(skb, secy->icv_len);
@@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
726 skb_to_sgvec(skb, sg, 0, skb->len); 736 skb_to_sgvec(skb, sg, 0, skb->len);
727 737
728 if (tx_sc->encrypt) { 738 if (tx_sc->encrypt) {
729 int len = skb->len - macsec_hdr_len(tx_sc->send_sci) - 739 int len = skb->len - macsec_hdr_len(sci_present) -
730 secy->icv_len; 740 secy->icv_len;
731 aead_request_set_crypt(req, sg, sg, len, iv); 741 aead_request_set_crypt(req, sg, sg, len, iv);
732 aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci)); 742 aead_request_set_ad(req, macsec_hdr_len(sci_present));
733 } else { 743 } else {
734 aead_request_set_crypt(req, sg, sg, 0, iv); 744 aead_request_set_crypt(req, sg, sg, 0, iv);
735 aead_request_set_ad(req, skb->len - secy->icv_len); 745 aead_request_set_ad(req, skb->len - secy->icv_len);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f279a897a5c7..a52b560e428b 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -42,19 +42,24 @@
42#define AT803X_MMD_ACCESS_CONTROL 0x0D 42#define AT803X_MMD_ACCESS_CONTROL 0x0D
43#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E 43#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
44#define AT803X_FUNC_DATA 0x4003 44#define AT803X_FUNC_DATA 0x4003
45#define AT803X_REG_CHIP_CONFIG 0x1f
46#define AT803X_BT_BX_REG_SEL 0x8000
45 47
46#define AT803X_DEBUG_ADDR 0x1D 48#define AT803X_DEBUG_ADDR 0x1D
47#define AT803X_DEBUG_DATA 0x1E 49#define AT803X_DEBUG_DATA 0x1E
48 50
51#define AT803X_MODE_CFG_MASK 0x0F
52#define AT803X_MODE_CFG_SGMII 0x01
53
54#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
55#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
56
49#define AT803X_DEBUG_REG_0 0x00 57#define AT803X_DEBUG_REG_0 0x00
50#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) 58#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
51 59
52#define AT803X_DEBUG_REG_5 0x05 60#define AT803X_DEBUG_REG_5 0x05
53#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) 61#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
54 62
55#define AT803X_REG_CHIP_CONFIG 0x1f
56#define AT803X_BT_BX_REG_SEL 0x8000
57
58#define ATH8030_PHY_ID 0x004dd076 63#define ATH8030_PHY_ID 0x004dd076
59#define ATH8031_PHY_ID 0x004dd074 64#define ATH8031_PHY_ID 0x004dd074
60#define ATH8035_PHY_ID 0x004dd072 65#define ATH8035_PHY_ID 0x004dd072
@@ -209,7 +214,6 @@ static int at803x_suspend(struct phy_device *phydev)
209{ 214{
210 int value; 215 int value;
211 int wol_enabled; 216 int wol_enabled;
212 int ccr;
213 217
214 mutex_lock(&phydev->lock); 218 mutex_lock(&phydev->lock);
215 219
@@ -225,16 +229,6 @@ static int at803x_suspend(struct phy_device *phydev)
225 229
226 phy_write(phydev, MII_BMCR, value); 230 phy_write(phydev, MII_BMCR, value);
227 231
228 if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
229 goto done;
230
231 /* also power-down SGMII interface */
232 ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
233 phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
234 phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN);
235 phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
236
237done:
238 mutex_unlock(&phydev->lock); 232 mutex_unlock(&phydev->lock);
239 233
240 return 0; 234 return 0;
@@ -243,7 +237,6 @@ done:
243static int at803x_resume(struct phy_device *phydev) 237static int at803x_resume(struct phy_device *phydev)
244{ 238{
245 int value; 239 int value;
246 int ccr;
247 240
248 mutex_lock(&phydev->lock); 241 mutex_lock(&phydev->lock);
249 242
@@ -251,17 +244,6 @@ static int at803x_resume(struct phy_device *phydev)
251 value &= ~(BMCR_PDOWN | BMCR_ISOLATE); 244 value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
252 phy_write(phydev, MII_BMCR, value); 245 phy_write(phydev, MII_BMCR, value);
253 246
254 if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
255 goto done;
256
257 /* also power-up SGMII interface */
258 ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
259 phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
260 value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE);
261 phy_write(phydev, MII_BMCR, value);
262 phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
263
264done:
265 mutex_unlock(&phydev->lock); 247 mutex_unlock(&phydev->lock);
266 248
267 return 0; 249 return 0;
@@ -381,6 +363,36 @@ static void at803x_link_change_notify(struct phy_device *phydev)
381 } 363 }
382} 364}
383 365
366static int at803x_aneg_done(struct phy_device *phydev)
367{
368 int ccr;
369
370 int aneg_done = genphy_aneg_done(phydev);
371 if (aneg_done != BMSR_ANEGCOMPLETE)
372 return aneg_done;
373
374 /*
375 * in SGMII mode, if copper side autoneg is successful,
376 * also check SGMII side autoneg result
377 */
378 ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
379 if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII)
380 return aneg_done;
381
382 /* switch to SGMII/fiber page */
383 phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
384
385 /* check if the SGMII link is OK. */
386 if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
387 pr_warn("803x_aneg_done: SGMII link is not ok\n");
388 aneg_done = 0;
389 }
390 /* switch back to copper page */
391 phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
392
393 return aneg_done;
394}
395
384static struct phy_driver at803x_driver[] = { 396static struct phy_driver at803x_driver[] = {
385{ 397{
386 /* ATHEROS 8035 */ 398 /* ATHEROS 8035 */
@@ -432,6 +444,7 @@ static struct phy_driver at803x_driver[] = {
432 .flags = PHY_HAS_INTERRUPT, 444 .flags = PHY_HAS_INTERRUPT,
433 .config_aneg = genphy_config_aneg, 445 .config_aneg = genphy_config_aneg,
434 .read_status = genphy_read_status, 446 .read_status = genphy_read_status,
447 .aneg_done = at803x_aneg_done,
435 .ack_interrupt = &at803x_ack_interrupt, 448 .ack_interrupt = &at803x_ack_interrupt,
436 .config_intr = &at803x_config_intr, 449 .config_intr = &at803x_config_intr,
437} }; 450} };
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 03d54c4adc88..800b39f06279 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -19,6 +19,7 @@
19#define TI_DP83848C_PHY_ID 0x20005ca0 19#define TI_DP83848C_PHY_ID 0x20005ca0
20#define NS_DP83848C_PHY_ID 0x20005c90 20#define NS_DP83848C_PHY_ID 0x20005c90
21#define TLK10X_PHY_ID 0x2000a210 21#define TLK10X_PHY_ID 0x2000a210
22#define TI_DP83822_PHY_ID 0x2000a240
22 23
23/* Registers */ 24/* Registers */
24#define DP83848_MICR 0x11 /* MII Interrupt Control Register */ 25#define DP83848_MICR 0x11 /* MII Interrupt Control Register */
@@ -77,6 +78,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
77 { TI_DP83848C_PHY_ID, 0xfffffff0 }, 78 { TI_DP83848C_PHY_ID, 0xfffffff0 },
78 { NS_DP83848C_PHY_ID, 0xfffffff0 }, 79 { NS_DP83848C_PHY_ID, 0xfffffff0 },
79 { TLK10X_PHY_ID, 0xfffffff0 }, 80 { TLK10X_PHY_ID, 0xfffffff0 },
81 { TI_DP83822_PHY_ID, 0xfffffff0 },
80 { } 82 { }
81}; 83};
82MODULE_DEVICE_TABLE(mdio, dp83848_tbl); 84MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
@@ -105,6 +107,7 @@ static struct phy_driver dp83848_driver[] = {
105 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), 107 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
106 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), 108 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
107 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), 109 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
110 DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
108}; 111};
109module_phy_driver(dp83848_driver); 112module_phy_driver(dp83848_driver);
110 113
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f79eb12c326a..125cff57c759 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -433,13 +433,13 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
433 mutex_lock(&dev->phy_mutex); 433 mutex_lock(&dev->phy_mutex);
434 do { 434 do {
435 ret = asix_set_sw_mii(dev, 0); 435 ret = asix_set_sw_mii(dev, 0);
436 if (ret == -ENODEV) 436 if (ret == -ENODEV || ret == -ETIMEDOUT)
437 break; 437 break;
438 usleep_range(1000, 1100); 438 usleep_range(1000, 1100);
439 ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 439 ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
440 0, 0, 1, &smsr, 0); 440 0, 0, 1, &smsr, 0);
441 } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); 441 } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
442 if (ret == -ENODEV) { 442 if (ret == -ENODEV || ret == -ETIMEDOUT) {
443 mutex_unlock(&dev->phy_mutex); 443 mutex_unlock(&dev->phy_mutex);
444 return ret; 444 return ret;
445 } 445 }
@@ -497,13 +497,13 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
497 mutex_lock(&dev->phy_mutex); 497 mutex_lock(&dev->phy_mutex);
498 do { 498 do {
499 ret = asix_set_sw_mii(dev, 1); 499 ret = asix_set_sw_mii(dev, 1);
500 if (ret == -ENODEV) 500 if (ret == -ENODEV || ret == -ETIMEDOUT)
501 break; 501 break;
502 usleep_range(1000, 1100); 502 usleep_range(1000, 1100);
503 ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 503 ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
504 0, 0, 1, &smsr, 1); 504 0, 0, 1, &smsr, 1);
505 } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); 505 } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
506 if (ret == -ENODEV) { 506 if (ret == -ENODEV || ret == -ETIMEDOUT) {
507 mutex_unlock(&dev->phy_mutex); 507 mutex_unlock(&dev->phy_mutex);
508 return ret; 508 return ret;
509 } 509 }
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5662babf0583..3e37724d30ae 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -151,7 +151,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
151 151
152 status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr); 152 status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
153 153
154 if (status < 0) { 154 if (status) {
155 usb_set_intfdata(intf, NULL); 155 usb_set_intfdata(intf, NULL);
156 usb_driver_release_interface(driver_of(intf), intf); 156 usb_driver_release_interface(driver_of(intf), intf);
157 return status; 157 return status;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b5554f2ebee4..ef83ae3b0a44 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2279,6 +2279,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2279 &adapter->shared->devRead.rxFilterConf; 2279 &adapter->shared->devRead.rxFilterConf;
2280 u8 *new_table = NULL; 2280 u8 *new_table = NULL;
2281 dma_addr_t new_table_pa = 0; 2281 dma_addr_t new_table_pa = 0;
2282 bool new_table_pa_valid = false;
2282 u32 new_mode = VMXNET3_RXM_UCAST; 2283 u32 new_mode = VMXNET3_RXM_UCAST;
2283 2284
2284 if (netdev->flags & IFF_PROMISC) { 2285 if (netdev->flags & IFF_PROMISC) {
@@ -2307,13 +2308,15 @@ vmxnet3_set_mc(struct net_device *netdev)
2307 new_table, 2308 new_table,
2308 sz, 2309 sz,
2309 PCI_DMA_TODEVICE); 2310 PCI_DMA_TODEVICE);
2311 if (!dma_mapping_error(&adapter->pdev->dev,
2312 new_table_pa)) {
2313 new_mode |= VMXNET3_RXM_MCAST;
2314 new_table_pa_valid = true;
2315 rxConf->mfTablePA = cpu_to_le64(
2316 new_table_pa);
2317 }
2310 } 2318 }
2311 2319 if (!new_table_pa_valid) {
2312 if (!dma_mapping_error(&adapter->pdev->dev,
2313 new_table_pa)) {
2314 new_mode |= VMXNET3_RXM_MCAST;
2315 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2316 } else {
2317 netdev_info(netdev, 2320 netdev_info(netdev,
2318 "failed to copy mcast list, setting ALL_MULTI\n"); 2321 "failed to copy mcast list, setting ALL_MULTI\n");
2319 new_mode |= VMXNET3_RXM_ALL_MULTI; 2322 new_mode |= VMXNET3_RXM_ALL_MULTI;
@@ -2338,7 +2341,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2338 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2341 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2339 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2342 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2340 2343
2341 if (new_table_pa) 2344 if (new_table_pa_valid)
2342 dma_unmap_single(&adapter->pdev->dev, new_table_pa, 2345 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2343 rxConf->mfTableLen, PCI_DMA_TODEVICE); 2346 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2344 kfree(new_table); 2347 kfree(new_table);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 85c271c70d42..820de6a9ddde 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -956,6 +956,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
956 if (skb->pkt_type == PACKET_LOOPBACK) { 956 if (skb->pkt_type == PACKET_LOOPBACK) {
957 skb->dev = vrf_dev; 957 skb->dev = vrf_dev;
958 skb->skb_iif = vrf_dev->ifindex; 958 skb->skb_iif = vrf_dev->ifindex;
959 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
959 skb->pkt_type = PACKET_HOST; 960 skb->pkt_type = PACKET_HOST;
960 goto out; 961 goto out;
961 } 962 }
@@ -996,6 +997,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
996{ 997{
997 skb->dev = vrf_dev; 998 skb->dev = vrf_dev;
998 skb->skb_iif = vrf_dev->ifindex; 999 skb->skb_iif = vrf_dev->ifindex;
1000 IPCB(skb)->flags |= IPSKB_L3SLAVE;
999 1001
1000 /* loopback traffic; do not push through packet taps again. 1002 /* loopback traffic; do not push through packet taps again.
1001 * Reset pkt_type for upper layers to process skb 1003 * Reset pkt_type for upper layers to process skb
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e7d16687538b..f3c2fa3ab0d5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -583,7 +583,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
583 } 583 }
584 } 584 }
585 585
586 pp = eth_gro_receive(head, skb); 586 pp = call_gro_receive(eth_gro_receive, head, skb);
587 flush = 0; 587 flush = 0;
588 588
589out: 589out:
@@ -943,17 +943,20 @@ static bool vxlan_snoop(struct net_device *dev,
943static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) 943static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
944{ 944{
945 struct vxlan_dev *vxlan; 945 struct vxlan_dev *vxlan;
946 struct vxlan_sock *sock4;
947 struct vxlan_sock *sock6 = NULL;
946 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 948 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
947 949
950 sock4 = rtnl_dereference(dev->vn4_sock);
951
948 /* The vxlan_sock is only used by dev, leaving group has 952 /* The vxlan_sock is only used by dev, leaving group has
949 * no effect on other vxlan devices. 953 * no effect on other vxlan devices.
950 */ 954 */
951 if (family == AF_INET && dev->vn4_sock && 955 if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
952 atomic_read(&dev->vn4_sock->refcnt) == 1)
953 return false; 956 return false;
954#if IS_ENABLED(CONFIG_IPV6) 957#if IS_ENABLED(CONFIG_IPV6)
955 if (family == AF_INET6 && dev->vn6_sock && 958 sock6 = rtnl_dereference(dev->vn6_sock);
956 atomic_read(&dev->vn6_sock->refcnt) == 1) 959 if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
957 return false; 960 return false;
958#endif 961#endif
959 962
@@ -961,10 +964,12 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
961 if (!netif_running(vxlan->dev) || vxlan == dev) 964 if (!netif_running(vxlan->dev) || vxlan == dev)
962 continue; 965 continue;
963 966
964 if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock) 967 if (family == AF_INET &&
968 rtnl_dereference(vxlan->vn4_sock) != sock4)
965 continue; 969 continue;
966#if IS_ENABLED(CONFIG_IPV6) 970#if IS_ENABLED(CONFIG_IPV6)
967 if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock) 971 if (family == AF_INET6 &&
972 rtnl_dereference(vxlan->vn6_sock) != sock6)
968 continue; 973 continue;
969#endif 974#endif
970 975
@@ -1005,22 +1010,25 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1005 1010
1006static void vxlan_sock_release(struct vxlan_dev *vxlan) 1011static void vxlan_sock_release(struct vxlan_dev *vxlan)
1007{ 1012{
1008 bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock); 1013 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1009#if IS_ENABLED(CONFIG_IPV6) 1014#if IS_ENABLED(CONFIG_IPV6)
1010 bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock); 1015 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1016
1017 rcu_assign_pointer(vxlan->vn6_sock, NULL);
1011#endif 1018#endif
1012 1019
1020 rcu_assign_pointer(vxlan->vn4_sock, NULL);
1013 synchronize_net(); 1021 synchronize_net();
1014 1022
1015 if (ipv4) { 1023 if (__vxlan_sock_release_prep(sock4)) {
1016 udp_tunnel_sock_release(vxlan->vn4_sock->sock); 1024 udp_tunnel_sock_release(sock4->sock);
1017 kfree(vxlan->vn4_sock); 1025 kfree(sock4);
1018 } 1026 }
1019 1027
1020#if IS_ENABLED(CONFIG_IPV6) 1028#if IS_ENABLED(CONFIG_IPV6)
1021 if (ipv6) { 1029 if (__vxlan_sock_release_prep(sock6)) {
1022 udp_tunnel_sock_release(vxlan->vn6_sock->sock); 1030 udp_tunnel_sock_release(sock6->sock);
1023 kfree(vxlan->vn6_sock); 1031 kfree(sock6);
1024 } 1032 }
1025#endif 1033#endif
1026} 1034}
@@ -1036,18 +1044,21 @@ static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1036 int ret = -EINVAL; 1044 int ret = -EINVAL;
1037 1045
1038 if (ip->sa.sa_family == AF_INET) { 1046 if (ip->sa.sa_family == AF_INET) {
1047 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1039 struct ip_mreqn mreq = { 1048 struct ip_mreqn mreq = {
1040 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1049 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1041 .imr_ifindex = ifindex, 1050 .imr_ifindex = ifindex,
1042 }; 1051 };
1043 1052
1044 sk = vxlan->vn4_sock->sock->sk; 1053 sk = sock4->sock->sk;
1045 lock_sock(sk); 1054 lock_sock(sk);
1046 ret = ip_mc_join_group(sk, &mreq); 1055 ret = ip_mc_join_group(sk, &mreq);
1047 release_sock(sk); 1056 release_sock(sk);
1048#if IS_ENABLED(CONFIG_IPV6) 1057#if IS_ENABLED(CONFIG_IPV6)
1049 } else { 1058 } else {
1050 sk = vxlan->vn6_sock->sock->sk; 1059 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1060
1061 sk = sock6->sock->sk;
1051 lock_sock(sk); 1062 lock_sock(sk);
1052 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, 1063 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1053 &ip->sin6.sin6_addr); 1064 &ip->sin6.sin6_addr);
@@ -1067,18 +1078,21 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1067 int ret = -EINVAL; 1078 int ret = -EINVAL;
1068 1079
1069 if (ip->sa.sa_family == AF_INET) { 1080 if (ip->sa.sa_family == AF_INET) {
1081 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1070 struct ip_mreqn mreq = { 1082 struct ip_mreqn mreq = {
1071 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, 1083 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1072 .imr_ifindex = ifindex, 1084 .imr_ifindex = ifindex,
1073 }; 1085 };
1074 1086
1075 sk = vxlan->vn4_sock->sock->sk; 1087 sk = sock4->sock->sk;
1076 lock_sock(sk); 1088 lock_sock(sk);
1077 ret = ip_mc_leave_group(sk, &mreq); 1089 ret = ip_mc_leave_group(sk, &mreq);
1078 release_sock(sk); 1090 release_sock(sk);
1079#if IS_ENABLED(CONFIG_IPV6) 1091#if IS_ENABLED(CONFIG_IPV6)
1080 } else { 1092 } else {
1081 sk = vxlan->vn6_sock->sock->sk; 1093 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1094
1095 sk = sock6->sock->sk;
1082 lock_sock(sk); 1096 lock_sock(sk);
1083 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, 1097 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1084 &ip->sin6.sin6_addr); 1098 &ip->sin6.sin6_addr);
@@ -1828,11 +1842,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1828 struct dst_cache *dst_cache, 1842 struct dst_cache *dst_cache,
1829 const struct ip_tunnel_info *info) 1843 const struct ip_tunnel_info *info)
1830{ 1844{
1845 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
1831 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 1846 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1832 struct dst_entry *ndst; 1847 struct dst_entry *ndst;
1833 struct flowi6 fl6; 1848 struct flowi6 fl6;
1834 int err; 1849 int err;
1835 1850
1851 if (!sock6)
1852 return ERR_PTR(-EIO);
1853
1836 if (tos && !info) 1854 if (tos && !info)
1837 use_cache = false; 1855 use_cache = false;
1838 if (use_cache) { 1856 if (use_cache) {
@@ -1850,7 +1868,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1850 fl6.flowi6_proto = IPPROTO_UDP; 1868 fl6.flowi6_proto = IPPROTO_UDP;
1851 1869
1852 err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1870 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1853 vxlan->vn6_sock->sock->sk, 1871 sock6->sock->sk,
1854 &ndst, &fl6); 1872 &ndst, &fl6);
1855 if (err < 0) 1873 if (err < 0)
1856 return ERR_PTR(err); 1874 return ERR_PTR(err);
@@ -1995,9 +2013,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1995 } 2013 }
1996 2014
1997 if (dst->sa.sa_family == AF_INET) { 2015 if (dst->sa.sa_family == AF_INET) {
1998 if (!vxlan->vn4_sock) 2016 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2017
2018 if (!sock4)
1999 goto drop; 2019 goto drop;
2000 sk = vxlan->vn4_sock->sock->sk; 2020 sk = sock4->sock->sk;
2001 2021
2002 rt = vxlan_get_route(vxlan, skb, 2022 rt = vxlan_get_route(vxlan, skb,
2003 rdst ? rdst->remote_ifindex : 0, tos, 2023 rdst ? rdst->remote_ifindex : 0, tos,
@@ -2050,12 +2070,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2050 src_port, dst_port, xnet, !udp_sum); 2070 src_port, dst_port, xnet, !udp_sum);
2051#if IS_ENABLED(CONFIG_IPV6) 2071#if IS_ENABLED(CONFIG_IPV6)
2052 } else { 2072 } else {
2073 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2053 struct dst_entry *ndst; 2074 struct dst_entry *ndst;
2054 u32 rt6i_flags; 2075 u32 rt6i_flags;
2055 2076
2056 if (!vxlan->vn6_sock) 2077 if (!sock6)
2057 goto drop; 2078 goto drop;
2058 sk = vxlan->vn6_sock->sock->sk; 2079 sk = sock6->sock->sk;
2059 2080
2060 ndst = vxlan6_get_route(vxlan, skb, 2081 ndst = vxlan6_get_route(vxlan, skb,
2061 rdst ? rdst->remote_ifindex : 0, tos, 2082 rdst ? rdst->remote_ifindex : 0, tos,
@@ -2415,9 +2436,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2415 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2436 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2416 2437
2417 if (ip_tunnel_info_af(info) == AF_INET) { 2438 if (ip_tunnel_info_af(info) == AF_INET) {
2439 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2418 struct rtable *rt; 2440 struct rtable *rt;
2419 2441
2420 if (!vxlan->vn4_sock) 2442 if (!sock4)
2421 return -EINVAL; 2443 return -EINVAL;
2422 rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, 2444 rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
2423 info->key.u.ipv4.dst, 2445 info->key.u.ipv4.dst,
@@ -2429,8 +2451,6 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2429#if IS_ENABLED(CONFIG_IPV6) 2451#if IS_ENABLED(CONFIG_IPV6)
2430 struct dst_entry *ndst; 2452 struct dst_entry *ndst;
2431 2453
2432 if (!vxlan->vn6_sock)
2433 return -EINVAL;
2434 ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, 2454 ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
2435 info->key.label, &info->key.u.ipv6.dst, 2455 info->key.label, &info->key.u.ipv6.dst,
2436 &info->key.u.ipv6.src, NULL, info); 2456 &info->key.u.ipv6.src, NULL, info);
@@ -2740,10 +2760,10 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
2740 return PTR_ERR(vs); 2760 return PTR_ERR(vs);
2741#if IS_ENABLED(CONFIG_IPV6) 2761#if IS_ENABLED(CONFIG_IPV6)
2742 if (ipv6) 2762 if (ipv6)
2743 vxlan->vn6_sock = vs; 2763 rcu_assign_pointer(vxlan->vn6_sock, vs);
2744 else 2764 else
2745#endif 2765#endif
2746 vxlan->vn4_sock = vs; 2766 rcu_assign_pointer(vxlan->vn4_sock, vs);
2747 vxlan_vs_add_dev(vs, vxlan); 2767 vxlan_vs_add_dev(vs, vxlan);
2748 return 0; 2768 return 0;
2749} 2769}
@@ -2754,9 +2774,9 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
2754 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; 2774 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
2755 int ret = 0; 2775 int ret = 0;
2756 2776
2757 vxlan->vn4_sock = NULL; 2777 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
2758#if IS_ENABLED(CONFIG_IPV6) 2778#if IS_ENABLED(CONFIG_IPV6)
2759 vxlan->vn6_sock = NULL; 2779 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
2760 if (ipv6 || metadata) 2780 if (ipv6 || metadata)
2761 ret = __vxlan_sock_add(vxlan, true); 2781 ret = __vxlan_sock_add(vxlan, true);
2762#endif 2782#endif
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 33ab3345d333..4e9fe75d7067 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -294,7 +294,7 @@ config FSL_UCC_HDLC
294config SLIC_DS26522 294config SLIC_DS26522
295 tristate "Slic Maxim ds26522 card support" 295 tristate "Slic Maxim ds26522 card support"
296 depends on SPI 296 depends on SPI
297 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE 297 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
298 help 298 help
299 This module initializes and configures the slic maxim card 299 This module initializes and configures the slic maxim card
300 in T1 or E1 mode. 300 in T1 or E1 mode.
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index d06a887a2352..b776a0ab106c 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -223,12 +223,19 @@ static int slic_ds26522_probe(struct spi_device *spi)
223 return ret; 223 return ret;
224} 224}
225 225
226static const struct spi_device_id slic_ds26522_id[] = {
227 { .name = "ds26522" },
228 { /* sentinel */ },
229};
230MODULE_DEVICE_TABLE(spi, slic_ds26522_id);
231
226static const struct of_device_id slic_ds26522_match[] = { 232static const struct of_device_id slic_ds26522_match[] = {
227 { 233 {
228 .compatible = "maxim,ds26522", 234 .compatible = "maxim,ds26522",
229 }, 235 },
230 {}, 236 {},
231}; 237};
238MODULE_DEVICE_TABLE(of, slic_ds26522_match);
232 239
233static struct spi_driver slic_ds26522_driver = { 240static struct spi_driver slic_ds26522_driver = {
234 .driver = { 241 .driver = {
@@ -239,6 +246,7 @@ static struct spi_driver slic_ds26522_driver = {
239 }, 246 },
240 .probe = slic_ds26522_probe, 247 .probe = slic_ds26522_probe,
241 .remove = slic_ds26522_remove, 248 .remove = slic_ds26522_remove,
249 .id_table = slic_ds26522_id,
242}; 250};
243 251
244static int __init slic_ds26522_init(void) 252static int __init slic_ds26522_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index dda49af1eb74..521f1c55c19e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -450,6 +450,7 @@ struct ath10k_debug {
450 u32 pktlog_filter; 450 u32 pktlog_filter;
451 u32 reg_addr; 451 u32 reg_addr;
452 u32 nf_cal_period; 452 u32 nf_cal_period;
453 void *cal_data;
453 454
454 struct ath10k_fw_crash_data *fw_crash_data; 455 struct ath10k_fw_crash_data *fw_crash_data;
455}; 456};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 832da6ed9f13..82a4c67f3672 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -30,6 +30,8 @@
30/* ms */ 30/* ms */
31#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 31#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
32 32
33#define ATH10K_DEBUG_CAL_DATA_LEN 12064
34
33#define ATH10K_FW_CRASH_DUMP_VERSION 1 35#define ATH10K_FW_CRASH_DUMP_VERSION 1
34 36
35/** 37/**
@@ -1451,56 +1453,51 @@ static const struct file_operations fops_fw_dbglog = {
1451 .llseek = default_llseek, 1453 .llseek = default_llseek,
1452}; 1454};
1453 1455
1454static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) 1456static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
1455{ 1457{
1456 struct ath10k *ar = inode->i_private;
1457 void *buf;
1458 u32 hi_addr; 1458 u32 hi_addr;
1459 __le32 addr; 1459 __le32 addr;
1460 int ret; 1460 int ret;
1461 1461
1462 mutex_lock(&ar->conf_mutex); 1462 lockdep_assert_held(&ar->conf_mutex);
1463
1464 if (ar->state != ATH10K_STATE_ON &&
1465 ar->state != ATH10K_STATE_UTF) {
1466 ret = -ENETDOWN;
1467 goto err;
1468 }
1469 1463
1470 buf = vmalloc(ar->hw_params.cal_data_len); 1464 if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
1471 if (!buf) { 1465 return -EINVAL;
1472 ret = -ENOMEM;
1473 goto err;
1474 }
1475 1466
1476 hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); 1467 hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
1477 1468
1478 ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); 1469 ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
1479 if (ret) { 1470 if (ret) {
1480 ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); 1471 ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
1481 goto err_vfree; 1472 ret);
1473 return ret;
1482 } 1474 }
1483 1475
1484 ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, 1476 ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
1485 ar->hw_params.cal_data_len); 1477 ar->hw_params.cal_data_len);
1486 if (ret) { 1478 if (ret) {
1487 ath10k_warn(ar, "failed to read calibration data: %d\n", ret); 1479 ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
1488 goto err_vfree; 1480 return ret;
1489 } 1481 }
1490 1482
1491 file->private_data = buf; 1483 return 0;
1484}
1492 1485
1493 mutex_unlock(&ar->conf_mutex); 1486static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
1487{
1488 struct ath10k *ar = inode->i_private;
1494 1489
1495 return 0; 1490 mutex_lock(&ar->conf_mutex);
1496 1491
1497err_vfree: 1492 if (ar->state == ATH10K_STATE_ON ||
1498 vfree(buf); 1493 ar->state == ATH10K_STATE_UTF) {
1494 ath10k_debug_cal_data_fetch(ar);
1495 }
1499 1496
1500err: 1497 file->private_data = ar;
1501 mutex_unlock(&ar->conf_mutex); 1498 mutex_unlock(&ar->conf_mutex);
1502 1499
1503 return ret; 1500 return 0;
1504} 1501}
1505 1502
1506static ssize_t ath10k_debug_cal_data_read(struct file *file, 1503static ssize_t ath10k_debug_cal_data_read(struct file *file,
@@ -1508,18 +1505,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
1508 size_t count, loff_t *ppos) 1505 size_t count, loff_t *ppos)
1509{ 1506{
1510 struct ath10k *ar = file->private_data; 1507 struct ath10k *ar = file->private_data;
1511 void *buf = file->private_data;
1512 1508
1513 return simple_read_from_buffer(user_buf, count, ppos, 1509 mutex_lock(&ar->conf_mutex);
1514 buf, ar->hw_params.cal_data_len);
1515}
1516 1510
1517static int ath10k_debug_cal_data_release(struct inode *inode, 1511 count = simple_read_from_buffer(user_buf, count, ppos,
1518 struct file *file) 1512 ar->debug.cal_data,
1519{ 1513 ar->hw_params.cal_data_len);
1520 vfree(file->private_data);
1521 1514
1522 return 0; 1515 mutex_unlock(&ar->conf_mutex);
1516
1517 return count;
1523} 1518}
1524 1519
1525static ssize_t ath10k_write_ani_enable(struct file *file, 1520static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1580,7 +1575,6 @@ static const struct file_operations fops_ani_enable = {
1580static const struct file_operations fops_cal_data = { 1575static const struct file_operations fops_cal_data = {
1581 .open = ath10k_debug_cal_data_open, 1576 .open = ath10k_debug_cal_data_open,
1582 .read = ath10k_debug_cal_data_read, 1577 .read = ath10k_debug_cal_data_read,
1583 .release = ath10k_debug_cal_data_release,
1584 .owner = THIS_MODULE, 1578 .owner = THIS_MODULE,
1585 .llseek = default_llseek, 1579 .llseek = default_llseek,
1586}; 1580};
@@ -1932,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar)
1932{ 1926{
1933 lockdep_assert_held(&ar->conf_mutex); 1927 lockdep_assert_held(&ar->conf_mutex);
1934 1928
1929 ath10k_debug_cal_data_fetch(ar);
1930
1935 /* Must not use _sync to avoid deadlock, we do that in 1931 /* Must not use _sync to avoid deadlock, we do that in
1936 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid 1932 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
1937 * warning from del_timer(). */ 1933 * warning from del_timer(). */
@@ -2344,6 +2340,10 @@ int ath10k_debug_create(struct ath10k *ar)
2344 if (!ar->debug.fw_crash_data) 2340 if (!ar->debug.fw_crash_data)
2345 return -ENOMEM; 2341 return -ENOMEM;
2346 2342
2343 ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
2344 if (!ar->debug.cal_data)
2345 return -ENOMEM;
2346
2347 INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); 2347 INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
2348 INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); 2348 INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
2349 INIT_LIST_HEAD(&ar->debug.fw_stats.peers); 2349 INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
@@ -2357,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
2357 vfree(ar->debug.fw_crash_data); 2357 vfree(ar->debug.fw_crash_data);
2358 ar->debug.fw_crash_data = NULL; 2358 ar->debug.fw_crash_data = NULL;
2359 2359
2360 vfree(ar->debug.cal_data);
2361 ar->debug.cal_data = NULL;
2362
2360 ath10k_debug_fw_stats_reset(ar); 2363 ath10k_debug_fw_stats_reset(ar);
2361 2364
2362 kfree(ar->debug.tpc_stats); 2365 kfree(ar->debug.tpc_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index eab0ab976af2..76eb33679d4b 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1401,6 +1401,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
1401 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, 1401 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
1402 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, 1402 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
1403 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))}, 1403 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
1404 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
1404 {}, 1405 {},
1405}; 1406};
1406 1407
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index b6f064a8d264..7e27a06e5df1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -33,7 +33,6 @@ struct coeff {
33 33
34enum ar9003_cal_types { 34enum ar9003_cal_types {
35 IQ_MISMATCH_CAL = BIT(0), 35 IQ_MISMATCH_CAL = BIT(0),
36 TEMP_COMP_CAL = BIT(1),
37}; 36};
38 37
39static void ar9003_hw_setup_calibration(struct ath_hw *ah, 38static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
59 /* Kick-off cal */ 58 /* Kick-off cal */
60 REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL); 59 REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
61 break; 60 break;
62 case TEMP_COMP_CAL:
63 ath_dbg(common, CALIBRATE,
64 "starting Temperature Compensation Calibration\n");
65 REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
66 REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
67 break;
68 default: 61 default:
69 ath_err(common, "Invalid calibration type\n"); 62 ath_err(common, "Invalid calibration type\n");
70 break; 63 break;
@@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
93 /* 86 /*
94 * Accumulate cal measures for active chains 87 * Accumulate cal measures for active chains
95 */ 88 */
96 if (cur_caldata->calCollect) 89 cur_caldata->calCollect(ah);
97 cur_caldata->calCollect(ah);
98 ah->cal_samples++; 90 ah->cal_samples++;
99 91
100 if (ah->cal_samples >= cur_caldata->calNumSamples) { 92 if (ah->cal_samples >= cur_caldata->calNumSamples) {
@@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
107 /* 99 /*
108 * Process accumulated data 100 * Process accumulated data
109 */ 101 */
110 if (cur_caldata->calPostProc) 102 cur_caldata->calPostProc(ah, numChains);
111 cur_caldata->calPostProc(ah, numChains);
112 103
113 /* Calibration has finished. */ 104 /* Calibration has finished. */
114 caldata->CalValid |= cur_caldata->calType; 105 caldata->CalValid |= cur_caldata->calType;
@@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
323 ar9003_hw_iqcalibrate 314 ar9003_hw_iqcalibrate
324}; 315};
325 316
326static const struct ath9k_percal_data temp_cal_single_sample = {
327 TEMP_COMP_CAL,
328 MIN_CAL_SAMPLES,
329 PER_MAX_LOG_COUNT,
330};
331
332static void ar9003_hw_init_cal_settings(struct ath_hw *ah) 317static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
333{ 318{
334 ah->iq_caldata.calData = &iq_cal_single_sample; 319 ah->iq_caldata.calData = &iq_cal_single_sample;
335 ah->temp_caldata.calData = &temp_cal_single_sample;
336 320
337 if (AR_SREV_9300_20_OR_LATER(ah)) { 321 if (AR_SREV_9300_20_OR_LATER(ah)) {
338 ah->enabled_cals |= TX_IQ_CAL; 322 ah->enabled_cals |= TX_IQ_CAL;
@@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
340 ah->enabled_cals |= TX_IQ_ON_AGC_CAL; 324 ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
341 } 325 }
342 326
343 ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL; 327 ah->supp_cals = IQ_MISMATCH_CAL;
344} 328}
345 329
346#define OFF_UPPER_LT 24 330#define OFF_UPPER_LT 24
@@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
1399 INIT_CAL(&ah->iq_caldata); 1383 INIT_CAL(&ah->iq_caldata);
1400 INSERT_CAL(ah, &ah->iq_caldata); 1384 INSERT_CAL(ah, &ah->iq_caldata);
1401 1385
1402 INIT_CAL(&ah->temp_caldata);
1403 INSERT_CAL(ah, &ah->temp_caldata);
1404
1405 /* Initialize current pointer to first element in list */ 1386 /* Initialize current pointer to first element in list */
1406 ah->cal_list_curr = ah->cal_list; 1387 ah->cal_list_curr = ah->cal_list;
1407 1388
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2a5d3ad1169c..9cbca1229bac 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -830,7 +830,6 @@ struct ath_hw {
830 /* Calibration */ 830 /* Calibration */
831 u32 supp_cals; 831 u32 supp_cals;
832 struct ath9k_cal_list iq_caldata; 832 struct ath9k_cal_list iq_caldata;
833 struct ath9k_cal_list temp_caldata;
834 struct ath9k_cal_list adcgain_caldata; 833 struct ath9k_cal_list adcgain_caldata;
835 struct ath9k_cal_list adcdc_caldata; 834 struct ath9k_cal_list adcdc_caldata;
836 struct ath9k_cal_list *cal_list; 835 struct ath9k_cal_list *cal_list;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 94480123efa3..274dd5a1574a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
45 skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length)); 45 skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
46 46
47 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, 47 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
48 priv->wdev.iftype, 0, false); 48 priv->wdev.iftype, 0, NULL, NULL);
49 49
50 while (!skb_queue_empty(&list)) { 50 while (!skb_queue_empty(&list)) {
51 struct rx_packet_hdr *rx_hdr; 51 struct rx_packet_hdr *rx_hdr;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 1016628926d2..08d587a342d3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -238,7 +238,7 @@ struct rtl8xxxu_rxdesc16 {
238 u32 pattern1match:1; 238 u32 pattern1match:1;
239 u32 pattern0match:1; 239 u32 pattern0match:1;
240#endif 240#endif
241 __le32 tsfl; 241 u32 tsfl;
242#if 0 242#if 0
243 u32 bassn:12; 243 u32 bassn:12;
244 u32 bavld:1; 244 u32 bavld:1;
@@ -368,7 +368,7 @@ struct rtl8xxxu_rxdesc24 {
368 u32 ldcp:1; 368 u32 ldcp:1;
369 u32 splcp:1; 369 u32 splcp:1;
370#endif 370#endif
371 __le32 tsfl; 371 u32 tsfl;
372}; 372};
373 373
374struct rtl8xxxu_txdesc32 { 374struct rtl8xxxu_txdesc32 {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index df54d27e7851..a793fedc3654 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1461,7 +1461,9 @@ static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
1461 int count, ret = 0; 1461 int count, ret = 0;
1462 1462
1463 /* Turn off RF */ 1463 /* Turn off RF */
1464 rtl8xxxu_write8(priv, REG_RF_CTRL, 0); 1464 val8 = rtl8xxxu_read8(priv, REG_RF_CTRL);
1465 val8 &= ~RF_ENABLE;
1466 rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
1465 1467
1466 /* Switch DPDT_SEL_P output from register 0x65[2] */ 1468 /* Switch DPDT_SEL_P output from register 0x65[2] */
1467 val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); 1469 val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
@@ -1593,6 +1595,10 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
1593 u32 val32; 1595 u32 val32;
1594 u8 val8; 1596 u8 val8;
1595 1597
1598 val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
1599 val32 |= (BIT(22) | BIT(23));
1600 rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
1601
1596 val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG); 1602 val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
1597 val8 |= BIT(5); 1603 val8 |= BIT(5);
1598 rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8); 1604 rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 6c086b5657e9..02b8ddd98a95 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
1498 u32 val32; 1498 u32 val32;
1499 u8 val8; 1499 u8 val8;
1500 1500
1501 val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
1502 val32 |= (BIT(22) | BIT(23));
1503 rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
1504
1501 /* 1505 /*
1502 * No indication anywhere as to what 0x0790 does. The 2 antenna 1506 * No indication anywhere as to what 0x0790 does. The 2 antenna
1503 * vendor code preserves bits 6-7 here. 1507 * vendor code preserves bits 6-7 here.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index b2d7f6e69667..a5e6ec2152bf 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5197,7 +5197,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
5197 pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift + 5197 pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
5198 sizeof(struct rtl8xxxu_rxdesc16), 128); 5198 sizeof(struct rtl8xxxu_rxdesc16), 128);
5199 5199
5200 if (pkt_cnt > 1) 5200 /*
5201 * Only clone the skb if there's enough data at the end to
5202 * at least cover the rx descriptor
5203 */
5204 if (pkt_cnt > 1 &&
5205 urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
5201 next_skb = skb_clone(skb, GFP_ATOMIC); 5206 next_skb = skb_clone(skb, GFP_ATOMIC);
5202 5207
5203 rx_status = IEEE80211_SKB_RXCB(skb); 5208 rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5215,7 +5220,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
5215 rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, 5220 rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
5216 rx_desc->rxmcs); 5221 rx_desc->rxmcs);
5217 5222
5218 rx_status->mactime = le32_to_cpu(rx_desc->tsfl); 5223 rx_status->mactime = rx_desc->tsfl;
5219 rx_status->flag |= RX_FLAG_MACTIME_START; 5224 rx_status->flag |= RX_FLAG_MACTIME_START;
5220 5225
5221 if (!rx_desc->swdec) 5226 if (!rx_desc->swdec)
@@ -5285,7 +5290,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
5285 rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, 5290 rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
5286 rx_desc->rxmcs); 5291 rx_desc->rxmcs);
5287 5292
5288 rx_status->mactime = le32_to_cpu(rx_desc->tsfl); 5293 rx_status->mactime = rx_desc->tsfl;
5289 rx_status->flag |= RX_FLAG_MACTIME_START; 5294 rx_status->flag |= RX_FLAG_MACTIME_START;
5290 5295
5291 if (!rx_desc->swdec) 5296 if (!rx_desc->swdec)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f95760c13c56..8e7f23c11680 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -111,7 +111,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
111 if (!err) 111 if (!err)
112 goto found_alt; 112 goto found_alt;
113 } 113 }
114 pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); 114 pr_err("Selected firmware is not available\n");
115 rtlpriv->max_fw_size = 0; 115 rtlpriv->max_fw_size = 0;
116 return; 116 return;
117 } 117 }
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index e7b11b40e68d..f361808def47 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -86,6 +86,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
86 struct rtl_priv *rtlpriv = rtl_priv(hw); 86 struct rtl_priv *rtlpriv = rtl_priv(hw);
87 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 87 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
88 u8 tid; 88 u8 tid;
89 char *fw_name;
89 90
90 rtl8188ee_bt_reg_init(hw); 91 rtl8188ee_bt_reg_init(hw);
91 rtlpriv->dm.dm_initialgain_enable = 1; 92 rtlpriv->dm.dm_initialgain_enable = 1;
@@ -169,10 +170,10 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
169 return 1; 170 return 1;
170 } 171 }
171 172
172 rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin"; 173 fw_name = "rtlwifi/rtl8188efw.bin";
173 rtlpriv->max_fw_size = 0x8000; 174 rtlpriv->max_fw_size = 0x8000;
174 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); 175 pr_info("Using firmware %s\n", fw_name);
175 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 176 err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
176 rtlpriv->io.dev, GFP_KERNEL, hw, 177 rtlpriv->io.dev, GFP_KERNEL, hw,
177 rtl_fw_cb); 178 rtl_fw_cb);
178 if (err) { 179 if (err) {
@@ -284,7 +285,6 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
284 .bar_id = 2, 285 .bar_id = 2,
285 .write_readback = true, 286 .write_readback = true,
286 .name = "rtl88e_pci", 287 .name = "rtl88e_pci",
287 .fw_name = "rtlwifi/rtl8188efw.bin",
288 .ops = &rtl8188ee_hal_ops, 288 .ops = &rtl8188ee_hal_ops,
289 .mod_params = &rtl88ee_mod_params, 289 .mod_params = &rtl88ee_mod_params,
290 290
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 87aa209ae325..8b6e37ce3f66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -96,6 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
96 struct rtl_priv *rtlpriv = rtl_priv(hw); 96 struct rtl_priv *rtlpriv = rtl_priv(hw);
97 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 97 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
98 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 98 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
99 char *fw_name = "rtlwifi/rtl8192cfwU.bin";
99 100
100 rtl8192ce_bt_reg_init(hw); 101 rtl8192ce_bt_reg_init(hw);
101 102
@@ -167,15 +168,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
167 } 168 }
168 169
169 /* request fw */ 170 /* request fw */
170 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && 171 if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
171 !IS_92C_SERIAL(rtlhal->version)) 172 fw_name = "rtlwifi/rtl8192cfwU_B.bin";
172 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
173 else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
174 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
175 173
176 rtlpriv->max_fw_size = 0x4000; 174 rtlpriv->max_fw_size = 0x4000;
177 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); 175 pr_info("Using firmware %s\n", fw_name);
178 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 176 err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
179 rtlpriv->io.dev, GFP_KERNEL, hw, 177 rtlpriv->io.dev, GFP_KERNEL, hw,
180 rtl_fw_cb); 178 rtl_fw_cb);
181 if (err) { 179 if (err) {
@@ -262,7 +260,6 @@ static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
262 .bar_id = 2, 260 .bar_id = 2,
263 .write_readback = true, 261 .write_readback = true,
264 .name = "rtl92c_pci", 262 .name = "rtl92c_pci",
265 .fw_name = "rtlwifi/rtl8192cfw.bin",
266 .ops = &rtl8192ce_hal_ops, 263 .ops = &rtl8192ce_hal_ops,
267 .mod_params = &rtl92ce_mod_params, 264 .mod_params = &rtl92ce_mod_params,
268 265
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 7c6f7f0d18c6..f953320f0e23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -59,6 +59,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
59{ 59{
60 struct rtl_priv *rtlpriv = rtl_priv(hw); 60 struct rtl_priv *rtlpriv = rtl_priv(hw);
61 int err; 61 int err;
62 char *fw_name;
62 63
63 rtlpriv->dm.dm_initialgain_enable = true; 64 rtlpriv->dm.dm_initialgain_enable = true;
64 rtlpriv->dm.dm_flag = 0; 65 rtlpriv->dm.dm_flag = 0;
@@ -77,18 +78,18 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
77 } 78 }
78 if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) && 79 if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
79 !IS_92C_SERIAL(rtlpriv->rtlhal.version)) { 80 !IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
80 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin"; 81 fw_name = "rtlwifi/rtl8192cufw_A.bin";
81 } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) { 82 } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
82 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin"; 83 fw_name = "rtlwifi/rtl8192cufw_B.bin";
83 } else { 84 } else {
84 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; 85 fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
85 } 86 }
86 /* provide name of alternative file */ 87 /* provide name of alternative file */
87 rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin"; 88 rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
88 pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name); 89 pr_info("Loading firmware %s\n", fw_name);
89 rtlpriv->max_fw_size = 0x4000; 90 rtlpriv->max_fw_size = 0x4000;
90 err = request_firmware_nowait(THIS_MODULE, 1, 91 err = request_firmware_nowait(THIS_MODULE, 1,
91 rtlpriv->cfg->fw_name, rtlpriv->io.dev, 92 fw_name, rtlpriv->io.dev,
92 GFP_KERNEL, hw, rtl_fw_cb); 93 GFP_KERNEL, hw, rtl_fw_cb);
93 return err; 94 return err;
94} 95}
@@ -187,7 +188,6 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
187 188
188static struct rtl_hal_cfg rtl92cu_hal_cfg = { 189static struct rtl_hal_cfg rtl92cu_hal_cfg = {
189 .name = "rtl92c_usb", 190 .name = "rtl92c_usb",
190 .fw_name = "rtlwifi/rtl8192cufw.bin",
191 .ops = &rtl8192cu_hal_ops, 191 .ops = &rtl8192cu_hal_ops,
192 .mod_params = &rtl92cu_mod_params, 192 .mod_params = &rtl92cu_mod_params,
193 .usb_interface_cfg = &rtl92cu_interface_cfg, 193 .usb_interface_cfg = &rtl92cu_interface_cfg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 0538a4d09568..1ebfee18882f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -92,6 +92,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
92 u8 tid; 92 u8 tid;
93 struct rtl_priv *rtlpriv = rtl_priv(hw); 93 struct rtl_priv *rtlpriv = rtl_priv(hw);
94 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 94 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
95 char *fw_name = "rtlwifi/rtl8192defw.bin";
95 96
96 rtlpriv->dm.dm_initialgain_enable = true; 97 rtlpriv->dm.dm_initialgain_enable = true;
97 rtlpriv->dm.dm_flag = 0; 98 rtlpriv->dm.dm_flag = 0;
@@ -181,10 +182,10 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
181 182
182 rtlpriv->max_fw_size = 0x8000; 183 rtlpriv->max_fw_size = 0x8000;
183 pr_info("Driver for Realtek RTL8192DE WLAN interface\n"); 184 pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
184 pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); 185 pr_info("Loading firmware file %s\n", fw_name);
185 186
186 /* request fw */ 187 /* request fw */
187 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 188 err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
188 rtlpriv->io.dev, GFP_KERNEL, hw, 189 rtlpriv->io.dev, GFP_KERNEL, hw,
189 rtl_fw_cb); 190 rtl_fw_cb);
190 if (err) { 191 if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = {
266 .bar_id = 2, 267 .bar_id = 2,
267 .write_readback = true, 268 .write_readback = true,
268 .name = "rtl8192de", 269 .name = "rtl8192de",
269 .fw_name = "rtlwifi/rtl8192defw.bin",
270 .ops = &rtl8192de_hal_ops, 270 .ops = &rtl8192de_hal_ops,
271 .mod_params = &rtl92de_mod_params, 271 .mod_params = &rtl92de_mod_params,
272 272
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index ac299cbe59b0..46b605de36e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -91,6 +91,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
91 struct rtl_priv *rtlpriv = rtl_priv(hw); 91 struct rtl_priv *rtlpriv = rtl_priv(hw);
92 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 92 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
93 int err = 0; 93 int err = 0;
94 char *fw_name;
94 95
95 rtl92ee_bt_reg_init(hw); 96 rtl92ee_bt_reg_init(hw);
96 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; 97 rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
@@ -170,11 +171,11 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
170 } 171 }
171 172
172 /* request fw */ 173 /* request fw */
173 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin"; 174 fw_name = "rtlwifi/rtl8192eefw.bin";
174 175
175 rtlpriv->max_fw_size = 0x8000; 176 rtlpriv->max_fw_size = 0x8000;
176 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); 177 pr_info("Using firmware %s\n", fw_name);
177 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 178 err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
178 rtlpriv->io.dev, GFP_KERNEL, hw, 179 rtlpriv->io.dev, GFP_KERNEL, hw,
179 rtl_fw_cb); 180 rtl_fw_cb);
180 if (err) { 181 if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
266 .bar_id = 2, 267 .bar_id = 2,
267 .write_readback = true, 268 .write_readback = true,
268 .name = "rtl92ee_pci", 269 .name = "rtl92ee_pci",
269 .fw_name = "rtlwifi/rtl8192eefw.bin",
270 .ops = &rtl8192ee_hal_ops, 270 .ops = &rtl8192ee_hal_ops,
271 .mod_params = &rtl92ee_mod_params, 271 .mod_params = &rtl92ee_mod_params,
272 272
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 5e8e02d5de8a..3e1eaeac4fdc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -89,12 +89,13 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
89 struct ieee80211_hw *hw = context; 89 struct ieee80211_hw *hw = context;
90 struct rtl_priv *rtlpriv = rtl_priv(hw); 90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91 struct rt_firmware *pfirmware = NULL; 91 struct rt_firmware *pfirmware = NULL;
92 char *fw_name = "rtlwifi/rtl8192sefw.bin";
92 93
93 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, 94 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
94 "Firmware callback routine entered!\n"); 95 "Firmware callback routine entered!\n");
95 complete(&rtlpriv->firmware_loading_complete); 96 complete(&rtlpriv->firmware_loading_complete);
96 if (!firmware) { 97 if (!firmware) {
97 pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); 98 pr_err("Firmware %s not available\n", fw_name);
98 rtlpriv->max_fw_size = 0; 99 rtlpriv->max_fw_size = 0;
99 return; 100 return;
100 } 101 }
@@ -117,6 +118,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
117 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 118 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
118 int err = 0; 119 int err = 0;
119 u16 earlyrxthreshold = 7; 120 u16 earlyrxthreshold = 7;
121 char *fw_name = "rtlwifi/rtl8192sefw.bin";
120 122
121 rtlpriv->dm.dm_initialgain_enable = true; 123 rtlpriv->dm.dm_initialgain_enable = true;
122 rtlpriv->dm.dm_flag = 0; 124 rtlpriv->dm.dm_flag = 0;
@@ -214,9 +216,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
214 rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 + 216 rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
215 sizeof(struct fw_hdr); 217 sizeof(struct fw_hdr);
216 pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" 218 pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
217 "Loading firmware %s\n", rtlpriv->cfg->fw_name); 219 "Loading firmware %s\n", fw_name);
218 /* request fw */ 220 /* request fw */
219 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 221 err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
220 rtlpriv->io.dev, GFP_KERNEL, hw, 222 rtlpriv->io.dev, GFP_KERNEL, hw,
221 rtl92se_fw_cb); 223 rtl92se_fw_cb);
222 if (err) { 224 if (err) {
@@ -310,7 +312,6 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = {
310 .bar_id = 1, 312 .bar_id = 1,
311 .write_readback = false, 313 .write_readback = false,
312 .name = "rtl92s_pci", 314 .name = "rtl92s_pci",
313 .fw_name = "rtlwifi/rtl8192sefw.bin",
314 .ops = &rtl8192se_hal_ops, 315 .ops = &rtl8192se_hal_ops,
315 .mod_params = &rtl92se_mod_params, 316 .mod_params = &rtl92se_mod_params,
316 317
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 89c828ad89f4..c51a9e8234e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -94,6 +94,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
94 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 94 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
95 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 95 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
96 int err = 0; 96 int err = 0;
97 char *fw_name = "rtlwifi/rtl8723fw.bin";
97 98
98 rtl8723e_bt_reg_init(hw); 99 rtl8723e_bt_reg_init(hw);
99 100
@@ -176,14 +177,12 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
176 return 1; 177 return 1;
177 } 178 }
178 179
179 if (IS_VENDOR_8723_A_CUT(rtlhal->version)) 180 if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
180 rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin"; 181 fw_name = "rtlwifi/rtl8723fw_B.bin";
181 else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
182 rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin";
183 182
184 rtlpriv->max_fw_size = 0x6000; 183 rtlpriv->max_fw_size = 0x6000;
185 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); 184 pr_info("Using firmware %s\n", fw_name);
186 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 185 err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
187 rtlpriv->io.dev, GFP_KERNEL, hw, 186 rtlpriv->io.dev, GFP_KERNEL, hw,
188 rtl_fw_cb); 187 rtl_fw_cb);
189 if (err) { 188 if (err) {
@@ -280,7 +279,6 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
280 .bar_id = 2, 279 .bar_id = 2,
281 .write_readback = true, 280 .write_readback = true,
282 .name = "rtl8723e_pci", 281 .name = "rtl8723e_pci",
283 .fw_name = "rtlwifi/rtl8723efw.bin",
284 .ops = &rtl8723e_hal_ops, 282 .ops = &rtl8723e_hal_ops,
285 .mod_params = &rtl8723e_mod_params, 283 .mod_params = &rtl8723e_mod_params,
286 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, 284 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 20b53f035483..847644d1f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -91,6 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
91 struct rtl_priv *rtlpriv = rtl_priv(hw); 91 struct rtl_priv *rtlpriv = rtl_priv(hw);
92 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 92 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
93 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 93 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
94 char *fw_name = "rtlwifi/rtl8723befw.bin";
94 95
95 rtl8723be_bt_reg_init(hw); 96 rtl8723be_bt_reg_init(hw);
96 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); 97 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -184,8 +185,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
184 } 185 }
185 186
186 rtlpriv->max_fw_size = 0x8000; 187 rtlpriv->max_fw_size = 0x8000;
187 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); 188 pr_info("Using firmware %s\n", fw_name);
188 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 189 err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
189 rtlpriv->io.dev, GFP_KERNEL, hw, 190 rtlpriv->io.dev, GFP_KERNEL, hw,
190 rtl_fw_cb); 191 rtl_fw_cb);
191 if (err) { 192 if (err) {
@@ -280,7 +281,6 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
280 .bar_id = 2, 281 .bar_id = 2,
281 .write_readback = true, 282 .write_readback = true,
282 .name = "rtl8723be_pci", 283 .name = "rtl8723be_pci",
283 .fw_name = "rtlwifi/rtl8723befw.bin",
284 .ops = &rtl8723be_hal_ops, 284 .ops = &rtl8723be_hal_ops,
285 .mod_params = &rtl8723be_mod_params, 285 .mod_params = &rtl8723be_mod_params,
286 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, 286 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 22f687b1f133..297938e0effd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -93,6 +93,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
93 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 93 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
94 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 94 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
95 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 95 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
96 char *fw_name, *wowlan_fw_name;
96 97
97 rtl8821ae_bt_reg_init(hw); 98 rtl8821ae_bt_reg_init(hw);
98 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); 99 rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -203,17 +204,17 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
203 } 204 }
204 205
205 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { 206 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
206 rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin"; 207 fw_name = "rtlwifi/rtl8812aefw.bin";
207 rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; 208 wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
208 } else { 209 } else {
209 rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin"; 210 fw_name = "rtlwifi/rtl8821aefw.bin";
210 rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; 211 wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
211 } 212 }
212 213
213 rtlpriv->max_fw_size = 0x8000; 214 rtlpriv->max_fw_size = 0x8000;
214 /*load normal firmware*/ 215 /*load normal firmware*/
215 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); 216 pr_info("Using firmware %s\n", fw_name);
216 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 217 err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
217 rtlpriv->io.dev, GFP_KERNEL, hw, 218 rtlpriv->io.dev, GFP_KERNEL, hw,
218 rtl_fw_cb); 219 rtl_fw_cb);
219 if (err) { 220 if (err) {
@@ -222,9 +223,9 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
222 return 1; 223 return 1;
223 } 224 }
224 /*load wowlan firmware*/ 225 /*load wowlan firmware*/
225 pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name); 226 pr_info("Using firmware %s\n", wowlan_fw_name);
226 err = request_firmware_nowait(THIS_MODULE, 1, 227 err = request_firmware_nowait(THIS_MODULE, 1,
227 rtlpriv->cfg->wowlan_fw_name, 228 wowlan_fw_name,
228 rtlpriv->io.dev, GFP_KERNEL, hw, 229 rtlpriv->io.dev, GFP_KERNEL, hw,
229 rtl_wowlan_fw_cb); 230 rtl_wowlan_fw_cb);
230 if (err) { 231 if (err) {
@@ -320,7 +321,6 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
320 .bar_id = 2, 321 .bar_id = 2,
321 .write_readback = true, 322 .write_readback = true,
322 .name = "rtl8821ae_pci", 323 .name = "rtl8821ae_pci",
323 .fw_name = "rtlwifi/rtl8821aefw.bin",
324 .ops = &rtl8821ae_hal_ops, 324 .ops = &rtl8821ae_hal_ops,
325 .mod_params = &rtl8821ae_mod_params, 325 .mod_params = &rtl8821ae_mod_params,
326 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, 326 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 595f7d5d091a..dafe486f8448 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2278,9 +2278,7 @@ struct rtl_hal_cfg {
2278 u8 bar_id; 2278 u8 bar_id;
2279 bool write_readback; 2279 bool write_readback;
2280 char *name; 2280 char *name;
2281 char *fw_name;
2282 char *alt_fw_name; 2281 char *alt_fw_name;
2283 char *wowlan_fw_name;
2284 struct rtl_hal_ops *ops; 2282 struct rtl_hal_ops *ops;
2285 struct rtl_mod_params *mod_params; 2283 struct rtl_mod_params *mod_params;
2286 struct rtl_hal_usbint_cfg *usb_interface_cfg; 2284 struct rtl_hal_usbint_cfg *usb_interface_cfg;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index a6e94b1a12cb..47fe7f96a242 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -391,7 +391,6 @@ static void wl1271_remove(struct sdio_func *func)
391 pm_runtime_get_noresume(&func->dev); 391 pm_runtime_get_noresume(&func->dev);
392 392
393 platform_device_unregister(glue->core); 393 platform_device_unregister(glue->core);
394 kfree(glue);
395} 394}
396 395
397#ifdef CONFIG_PM 396#ifdef CONFIG_PM
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 2037e7a77a37..d764236072b1 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -91,11 +91,9 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
91 */ 91 */
92bool afs_cm_incoming_call(struct afs_call *call) 92bool afs_cm_incoming_call(struct afs_call *call)
93{ 93{
94 u32 operation_id = ntohl(call->operation_ID); 94 _enter("{CB.OP %u}", call->operation_ID);
95 95
96 _enter("{CB.OP %u}", operation_id); 96 switch (call->operation_ID) {
97
98 switch (operation_id) {
99 case CBCallBack: 97 case CBCallBack:
100 call->type = &afs_SRXCBCallBack; 98 call->type = &afs_SRXCBCallBack;
101 return true; 99 return true;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 96f4d764d1a6..31c616ab9b40 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -364,7 +364,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
364 buffer = kmap(page); 364 buffer = kmap(page);
365 ret = afs_extract_data(call, buffer, 365 ret = afs_extract_data(call, buffer,
366 call->count, true); 366 call->count, true);
367 kunmap(buffer); 367 kunmap(page);
368 if (ret < 0) 368 if (ret < 0)
369 return ret; 369 return ret;
370 } 370 }
@@ -397,7 +397,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
397 page = call->reply3; 397 page = call->reply3;
398 buffer = kmap(page); 398 buffer = kmap(page);
399 memset(buffer + call->count, 0, PAGE_SIZE - call->count); 399 memset(buffer + call->count, 0, PAGE_SIZE - call->count);
400 kunmap(buffer); 400 kunmap(page);
401 } 401 }
402 402
403 _leave(" = 0 [done]"); 403 _leave(" = 0 [done]");
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5497c8496055..535a38d2c1d0 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -112,7 +112,7 @@ struct afs_call {
112 bool need_attention; /* T if RxRPC poked us */ 112 bool need_attention; /* T if RxRPC poked us */
113 u16 service_id; /* RxRPC service ID to call */ 113 u16 service_id; /* RxRPC service ID to call */
114 __be16 port; /* target UDP port */ 114 __be16 port; /* target UDP port */
115 __be32 operation_ID; /* operation ID for an incoming call */ 115 u32 operation_ID; /* operation ID for an incoming call */
116 u32 count; /* count for use in unmarshalling */ 116 u32 count; /* count for use in unmarshalling */
117 __be32 tmp; /* place to extract temporary data */ 117 __be32 tmp; /* place to extract temporary data */
118 afs_dataversion_t store_version; /* updated version expected from store */ 118 afs_dataversion_t store_version; /* updated version expected from store */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 477928b25940..25f05a8d21b1 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -676,10 +676,11 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
676 ASSERTCMP(call->offset, <, 4); 676 ASSERTCMP(call->offset, <, 4);
677 677
678 /* the operation ID forms the first four bytes of the request data */ 678 /* the operation ID forms the first four bytes of the request data */
679 ret = afs_extract_data(call, &call->operation_ID, 4, true); 679 ret = afs_extract_data(call, &call->tmp, 4, true);
680 if (ret < 0) 680 if (ret < 0)
681 return ret; 681 return ret;
682 682
683 call->operation_ID = ntohl(call->tmp);
683 call->state = AFS_CALL_AWAIT_REQUEST; 684 call->state = AFS_CALL_AWAIT_REQUEST;
684 call->offset = 0; 685 call->offset = 0;
685 686
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6824556d37ed..cd184bdca58f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1169 const char *mod_name); 1169 const char *mod_name);
1170void vmbus_driver_unregister(struct hv_driver *hv_driver); 1170void vmbus_driver_unregister(struct hv_driver *hv_driver);
1171 1171
1172static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
1173{
1174 const struct kobject *kobj = &device_obj->device.kobj;
1175
1176 return kobj->name;
1177}
1178
1179void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); 1172void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1180 1173
1181int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1174int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7e9a789be5e0..ca1ad9ebbc92 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -123,12 +123,12 @@ struct inet6_skb_parm {
123}; 123};
124 124
125#if defined(CONFIG_NET_L3_MASTER_DEV) 125#if defined(CONFIG_NET_L3_MASTER_DEV)
126static inline bool skb_l3mdev_slave(__u16 flags) 126static inline bool ipv6_l3mdev_skb(__u16 flags)
127{ 127{
128 return flags & IP6SKB_L3SLAVE; 128 return flags & IP6SKB_L3SLAVE;
129} 129}
130#else 130#else
131static inline bool skb_l3mdev_slave(__u16 flags) 131static inline bool ipv6_l3mdev_skb(__u16 flags)
132{ 132{
133 return false; 133 return false;
134} 134}
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
139 139
140static inline int inet6_iif(const struct sk_buff *skb) 140static inline int inet6_iif(const struct sk_buff *skb)
141{ 141{
142 bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); 142 bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
143 143
144 return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; 144 return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
145} 145}
146 146
147/* can not be used in TCP layer after tcp_v6_fill_cb */
148static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
149{
150#if defined(CONFIG_NET_L3_MASTER_DEV)
151 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
152 ipv6_l3mdev_skb(IP6CB(skb)->flags))
153 return true;
154#endif
155 return false;
156}
157
147struct tcp6_request_sock { 158struct tcp6_request_sock {
148 struct tcp_request_sock tcp6rsk_tcp; 159 struct tcp_request_sock tcp6rsk_tcp;
149}; 160};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f6a164297358..3be7abd6e722 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1399 u32 *lkey, u32 *rkey); 1399 u32 *lkey, u32 *rkey);
1400int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 1400int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1401int mlx4_SYNC_TPT(struct mlx4_dev *dev); 1401int mlx4_SYNC_TPT(struct mlx4_dev *dev);
1402int mlx4_test_interrupts(struct mlx4_dev *dev); 1402int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
1403int mlx4_test_async(struct mlx4_dev *dev);
1403int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, 1404int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
1404 const u32 offset[], u32 value[], 1405 const u32 offset[], u32 value[],
1405 size_t array_len, u8 port); 1406 size_t array_len, u8 port);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 85c4786427e4..ecc451d89ccd 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -418,8 +418,12 @@ struct mlx5_core_health {
418 u32 prev; 418 u32 prev;
419 int miss_counter; 419 int miss_counter;
420 bool sick; 420 bool sick;
421 /* wq spinlock to synchronize draining */
422 spinlock_t wq_lock;
421 struct workqueue_struct *wq; 423 struct workqueue_struct *wq;
424 unsigned long flags;
422 struct work_struct work; 425 struct work_struct work;
426 struct delayed_work recover_work;
423}; 427};
424 428
425struct mlx5_cq_table { 429struct mlx5_cq_table {
@@ -626,10 +630,6 @@ struct mlx5_db {
626}; 630};
627 631
628enum { 632enum {
629 MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
630};
631
632enum {
633 MLX5_COMP_EQ_SIZE = 1024, 633 MLX5_COMP_EQ_SIZE = 1024,
634}; 634};
635 635
@@ -638,13 +638,6 @@ enum {
638 MLX5_PTYS_EN = 1 << 2, 638 MLX5_PTYS_EN = 1 << 2,
639}; 639};
640 640
641struct mlx5_db_pgdir {
642 struct list_head list;
643 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
644 __be32 *db_page;
645 dma_addr_t db_dma;
646};
647
648typedef void (*mlx5_cmd_cbk_t)(int status, void *context); 641typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
649 642
650struct mlx5_cmd_work_ent { 643struct mlx5_cmd_work_ent {
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
789int mlx5_health_init(struct mlx5_core_dev *dev); 782int mlx5_health_init(struct mlx5_core_dev *dev);
790void mlx5_start_health_poll(struct mlx5_core_dev *dev); 783void mlx5_start_health_poll(struct mlx5_core_dev *dev);
791void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 784void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
785void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
792int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, 786int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
793 struct mlx5_buf *buf, int node); 787 struct mlx5_buf *buf, int node);
794int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); 788int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 136ae6bbe81e..91ee3643ccc8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2169,7 +2169,10 @@ struct napi_gro_cb {
2169 /* Used to determine if flush_id can be ignored */ 2169 /* Used to determine if flush_id can be ignored */
2170 u8 is_atomic:1; 2170 u8 is_atomic:1;
2171 2171
2172 /* 5 bit hole */ 2172 /* Number of gro_receive callbacks this packet already went through */
2173 u8 recursion_counter:4;
2174
2175 /* 1 bit hole */
2173 2176
2174 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 2177 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2175 __wsum csum; 2178 __wsum csum;
@@ -2180,6 +2183,40 @@ struct napi_gro_cb {
2180 2183
2181#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) 2184#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2182 2185
2186#define GRO_RECURSION_LIMIT 15
2187static inline int gro_recursion_inc_test(struct sk_buff *skb)
2188{
2189 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2190}
2191
2192typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
2193static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2194 struct sk_buff **head,
2195 struct sk_buff *skb)
2196{
2197 if (unlikely(gro_recursion_inc_test(skb))) {
2198 NAPI_GRO_CB(skb)->flush |= 1;
2199 return NULL;
2200 }
2201
2202 return cb(head, skb);
2203}
2204
2205typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
2206 struct sk_buff *);
2207static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
2208 struct sock *sk,
2209 struct sk_buff **head,
2210 struct sk_buff *skb)
2211{
2212 if (unlikely(gro_recursion_inc_test(skb))) {
2213 NAPI_GRO_CB(skb)->flush |= 1;
2214 return NULL;
2215 }
2216
2217 return cb(sk, head, skb);
2218}
2219
2183struct packet_type { 2220struct packet_type {
2184 __be16 type; /* This is really htons(ether_type). */ 2221 __be16 type; /* This is really htons(ether_type). */
2185 struct net_device *dev; /* NULL is wildcarded here */ 2222 struct net_device *dev; /* NULL is wildcarded here */
@@ -3877,7 +3914,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3877 ldev = netdev_all_lower_get_next(dev, &(iter))) 3914 ldev = netdev_all_lower_get_next(dev, &(iter)))
3878 3915
3879#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ 3916#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
3880 for (iter = (dev)->all_adj_list.lower.next, \ 3917 for (iter = &(dev)->all_adj_list.lower, \
3881 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ 3918 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
3882 ldev; \ 3919 ldev; \
3883 ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) 3920 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f9ae903bbb84..8978a60371f4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -146,6 +146,7 @@ enum qed_led_mode {
146#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) 146#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
147 147
148#define QED_COALESCE_MAX 0xFF 148#define QED_COALESCE_MAX 0xFF
149#define QED_DEFAULT_RX_USECS 12
149 150
150/* forward */ 151/* forward */
151struct qed_dev; 152struct qed_dev;
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index 99fbe6d55acb..f48d64b0e2fb 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
68 68
69bool qede_roce_supported(struct qede_dev *dev); 69bool qede_roce_supported(struct qede_dev *dev);
70 70
71#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 71#if IS_ENABLED(CONFIG_QED_RDMA)
72int qede_roce_dev_add(struct qede_dev *dev); 72int qede_roce_dev_add(struct qede_dev *dev);
73void qede_roce_dev_event_open(struct qede_dev *dev); 73void qede_roce_dev_event_open(struct qede_dev *dev);
74void qede_roce_dev_event_close(struct qede_dev *dev); 74void qede_roce_dev_event_close(struct qede_dev *dev);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 601258f6e621..32810f279f8e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -936,6 +936,7 @@ struct sk_buff_fclones {
936 936
937/** 937/**
938 * skb_fclone_busy - check if fclone is busy 938 * skb_fclone_busy - check if fclone is busy
939 * @sk: socket
939 * @skb: buffer 940 * @skb: buffer
940 * 941 *
941 * Returns true if skb is a fast clone, and its clone is not freed. 942 * Returns true if skb is a fast clone, and its clone is not freed.
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f2d072787947..8f998afc1384 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -174,6 +174,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex,
174 const struct in6_addr *addr); 174 const struct in6_addr *addr);
175int ipv6_sock_mc_drop(struct sock *sk, int ifindex, 175int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
176 const struct in6_addr *addr); 176 const struct in6_addr *addr);
177void __ipv6_sock_mc_close(struct sock *sk);
177void ipv6_sock_mc_close(struct sock *sk); 178void ipv6_sock_mc_close(struct sock *sk);
178bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, 179bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
179 const struct in6_addr *src_addr); 180 const struct in6_addr *src_addr);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bd19faad0d96..14b51d739c3b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4047,14 +4047,29 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
4047 */ 4047 */
4048 4048
4049/** 4049/**
4050 * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3
4051 * @skb: the 802.11 data frame
4052 * @ehdr: pointer to a &struct ethhdr that will get the header, instead
4053 * of it being pushed into the SKB
4054 * @addr: the device MAC address
4055 * @iftype: the virtual interface type
4056 * Return: 0 on success. Non-zero on error.
4057 */
4058int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
4059 const u8 *addr, enum nl80211_iftype iftype);
4060
4061/**
4050 * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 4062 * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
4051 * @skb: the 802.11 data frame 4063 * @skb: the 802.11 data frame
4052 * @addr: the device MAC address 4064 * @addr: the device MAC address
4053 * @iftype: the virtual interface type 4065 * @iftype: the virtual interface type
4054 * Return: 0 on success. Non-zero on error. 4066 * Return: 0 on success. Non-zero on error.
4055 */ 4067 */
4056int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, 4068static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
4057 enum nl80211_iftype iftype); 4069 enum nl80211_iftype iftype)
4070{
4071 return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype);
4072}
4058 4073
4059/** 4074/**
4060 * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11 4075 * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11
@@ -4072,22 +4087,23 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
4072/** 4087/**
4073 * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame 4088 * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
4074 * 4089 *
4075 * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of 4090 * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
4076 * 802.3 frames. The @list will be empty if the decode fails. The 4091 * The @list will be empty if the decode fails. The @skb must be fully
4077 * @skb is consumed after the function returns. 4092 * header-less before being passed in here; it is freed in this function.
4078 * 4093 *
4079 * @skb: The input IEEE 802.11n A-MSDU frame. 4094 * @skb: The input A-MSDU frame without any headers.
4080 * @list: The output list of 802.3 frames. It must be allocated and 4095 * @list: The output list of 802.3 frames. It must be allocated and
4081 * initialized by by the caller. 4096 * initialized by by the caller.
4082 * @addr: The device MAC address. 4097 * @addr: The device MAC address.
4083 * @iftype: The device interface type. 4098 * @iftype: The device interface type.
4084 * @extra_headroom: The hardware extra headroom for SKBs in the @list. 4099 * @extra_headroom: The hardware extra headroom for SKBs in the @list.
4085 * @has_80211_header: Set it true if SKB is with IEEE 802.11 header. 4100 * @check_da: DA to check in the inner ethernet header, or NULL
4101 * @check_sa: SA to check in the inner ethernet header, or NULL
4086 */ 4102 */
4087void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, 4103void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
4088 const u8 *addr, enum nl80211_iftype iftype, 4104 const u8 *addr, enum nl80211_iftype iftype,
4089 const unsigned int extra_headroom, 4105 const unsigned int extra_headroom,
4090 bool has_80211_header); 4106 const u8 *check_da, const u8 *check_sa);
4091 4107
4092/** 4108/**
4093 * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame 4109 * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 515352c6280a..b0576cb2ab25 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -190,8 +190,8 @@ struct inet6_dev {
190 __u32 if_flags; 190 __u32 if_flags;
191 int dead; 191 int dead;
192 192
193 u32 desync_factor;
193 u8 rndid[8]; 194 u8 rndid[8];
194 struct timer_list regen_timer;
195 struct list_head tempaddr_list; 195 struct list_head tempaddr_list;
196 196
197 struct in6_addr token; 197 struct in6_addr token;
diff --git a/include/net/ip.h b/include/net/ip.h
index bc43c0fcae12..5413883ac47f 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -38,7 +38,7 @@ struct sock;
38struct inet_skb_parm { 38struct inet_skb_parm {
39 int iif; 39 int iif;
40 struct ip_options opt; /* Compiled IP options */ 40 struct ip_options opt; /* Compiled IP options */
41 unsigned char flags; 41 u16 flags;
42 42
43#define IPSKB_FORWARDED BIT(0) 43#define IPSKB_FORWARDED BIT(0)
44#define IPSKB_XFRM_TUNNEL_SIZE BIT(1) 44#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
@@ -48,10 +48,16 @@ struct inet_skb_parm {
48#define IPSKB_DOREDIRECT BIT(5) 48#define IPSKB_DOREDIRECT BIT(5)
49#define IPSKB_FRAG_PMTU BIT(6) 49#define IPSKB_FRAG_PMTU BIT(6)
50#define IPSKB_FRAG_SEGS BIT(7) 50#define IPSKB_FRAG_SEGS BIT(7)
51#define IPSKB_L3SLAVE BIT(8)
51 52
52 u16 frag_max_size; 53 u16 frag_max_size;
53}; 54};
54 55
56static inline bool ipv4_l3mdev_skb(u16 flags)
57{
58 return !!(flags & IPSKB_L3SLAVE);
59}
60
55static inline unsigned int ip_hdrlen(const struct sk_buff *skb) 61static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
56{ 62{
57 return ip_hdr(skb)->ihl * 4; 63 return ip_hdr(skb)->ihl * 4;
@@ -572,7 +578,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
572 */ 578 */
573 579
574void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); 580void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
575void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset); 581void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
576int ip_cmsg_send(struct sock *sk, struct msghdr *msg, 582int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
577 struct ipcm_cookie *ipc, bool allow_ipv6); 583 struct ipcm_cookie *ipc, bool allow_ipv6);
578int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 584int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -594,7 +600,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
594 600
595static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) 601static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
596{ 602{
597 ip_cmsg_recv_offset(msg, skb, 0); 603 ip_cmsg_recv_offset(msg, skb, 0, 0);
598} 604}
599 605
600bool icmp_global_allow(void); 606bool icmp_global_allow(void);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fb961a576abe..a74e2aa40ef4 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -230,6 +230,8 @@ struct fib6_table {
230 rwlock_t tb6_lock; 230 rwlock_t tb6_lock;
231 struct fib6_node tb6_root; 231 struct fib6_node tb6_root;
232 struct inet_peer_base tb6_peers; 232 struct inet_peer_base tb6_peers;
233 unsigned int flags;
234#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
233}; 235};
234 236
235#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC 237#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index e0cd318d5103..f83e78d071a3 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -32,6 +32,7 @@ struct route_info {
32#define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 32#define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
33#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 33#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
34#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 34#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
35#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
35 36
36/* We do not (yet ?) support IPv6 jumbograms (RFC 2675) 37/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
37 * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header 38 * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index a810dfcb83c2..e2dba93e374f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -811,14 +811,18 @@ enum mac80211_rate_control_flags {
811 * in the control information, and it will be filled by the rate 811 * in the control information, and it will be filled by the rate
812 * control algorithm according to what should be sent. For example, 812 * control algorithm according to what should be sent. For example,
813 * if this array contains, in the format { <idx>, <count> } the 813 * if this array contains, in the format { <idx>, <count> } the
814 * information 814 * information::
815 *
815 * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 } 816 * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 }
817 *
816 * then this means that the frame should be transmitted 818 * then this means that the frame should be transmitted
817 * up to twice at rate 3, up to twice at rate 2, and up to four 819 * up to twice at rate 3, up to twice at rate 2, and up to four
818 * times at rate 1 if it doesn't get acknowledged. Say it gets 820 * times at rate 1 if it doesn't get acknowledged. Say it gets
819 * acknowledged by the peer after the fifth attempt, the status 821 * acknowledged by the peer after the fifth attempt, the status
820 * information should then contain 822 * information should then contain::
823 *
821 * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ... 824 * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ...
825 *
822 * since it was transmitted twice at rate 3, twice at rate 2 826 * since it was transmitted twice at rate 3, twice at rate 2
823 * and once at rate 1 after which we received an acknowledgement. 827 * and once at rate 1 after which we received an acknowledgement.
824 */ 828 */
@@ -1168,8 +1172,8 @@ enum mac80211_rx_vht_flags {
1168 * @rate_idx: index of data rate into band's supported rates or MCS index if 1172 * @rate_idx: index of data rate into band's supported rates or MCS index if
1169 * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) 1173 * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
1170 * @vht_nss: number of streams (VHT only) 1174 * @vht_nss: number of streams (VHT only)
1171 * @flag: %RX_FLAG_* 1175 * @flag: %RX_FLAG_\*
1172 * @vht_flag: %RX_VHT_FLAG_* 1176 * @vht_flag: %RX_VHT_FLAG_\*
1173 * @rx_flags: internal RX flags for mac80211 1177 * @rx_flags: internal RX flags for mac80211
1174 * @ampdu_reference: A-MPDU reference number, must be a different value for 1178 * @ampdu_reference: A-MPDU reference number, must be a different value for
1175 * each A-MPDU but the same for each subframe within one A-MPDU 1179 * each A-MPDU but the same for each subframe within one A-MPDU
@@ -1432,7 +1436,7 @@ enum ieee80211_vif_flags {
1432 * @probe_req_reg: probe requests should be reported to mac80211 for this 1436 * @probe_req_reg: probe requests should be reported to mac80211 for this
1433 * interface. 1437 * interface.
1434 * @drv_priv: data area for driver use, will always be aligned to 1438 * @drv_priv: data area for driver use, will always be aligned to
1435 * sizeof(void *). 1439 * sizeof(void \*).
1436 * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) 1440 * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
1437 */ 1441 */
1438struct ieee80211_vif { 1442struct ieee80211_vif {
@@ -1743,7 +1747,7 @@ struct ieee80211_sta_rates {
1743 * @wme: indicates whether the STA supports QoS/WME (if local devices does, 1747 * @wme: indicates whether the STA supports QoS/WME (if local devices does,
1744 * otherwise always false) 1748 * otherwise always false)
1745 * @drv_priv: data area for driver use, will always be aligned to 1749 * @drv_priv: data area for driver use, will always be aligned to
1746 * sizeof(void *), size is determined in hw information. 1750 * sizeof(void \*), size is determined in hw information.
1747 * @uapsd_queues: bitmap of queues configured for uapsd. Only valid 1751 * @uapsd_queues: bitmap of queues configured for uapsd. Only valid
1748 * if wme is supported. 1752 * if wme is supported.
1749 * @max_sp: max Service Period. Only valid if wme is supported. 1753 * @max_sp: max Service Period. Only valid if wme is supported.
@@ -2146,12 +2150,12 @@ enum ieee80211_hw_flags {
2146 * 2150 *
2147 * @radiotap_mcs_details: lists which MCS information can the HW 2151 * @radiotap_mcs_details: lists which MCS information can the HW
2148 * reports, by default it is set to _MCS, _GI and _BW but doesn't 2152 * reports, by default it is set to _MCS, _GI and _BW but doesn't
2149 * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only 2153 * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_\* values, only
2150 * adding _BW is supported today. 2154 * adding _BW is supported today.
2151 * 2155 *
2152 * @radiotap_vht_details: lists which VHT MCS information the HW reports, 2156 * @radiotap_vht_details: lists which VHT MCS information the HW reports,
2153 * the default is _GI | _BANDWIDTH. 2157 * the default is _GI | _BANDWIDTH.
2154 * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values. 2158 * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
2155 * 2159 *
2156 * @radiotap_timestamp: Information for the radiotap timestamp field; if the 2160 * @radiotap_timestamp: Information for the radiotap timestamp field; if the
2157 * 'units_pos' member is set to a non-negative value it must be set to 2161 * 'units_pos' member is set to a non-negative value it must be set to
@@ -2486,6 +2490,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
2486 * in the software stack cares about, we will, in the future, have mac80211 2490 * in the software stack cares about, we will, in the future, have mac80211
2487 * tell the driver which information elements are interesting in the sense 2491 * tell the driver which information elements are interesting in the sense
2488 * that we want to see changes in them. This will include 2492 * that we want to see changes in them. This will include
2493 *
2489 * - a list of information element IDs 2494 * - a list of information element IDs
2490 * - a list of OUIs for the vendor information element 2495 * - a list of OUIs for the vendor information element
2491 * 2496 *
diff --git a/include/net/sock.h b/include/net/sock.h
index ebf75db08e06..73c6b008f1b7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -252,6 +252,7 @@ struct sock_common {
252 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) 252 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
253 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) 253 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
254 * @sk_sndbuf: size of send buffer in bytes 254 * @sk_sndbuf: size of send buffer in bytes
255 * @sk_padding: unused element for alignment
255 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets 256 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
256 * @sk_no_check_rx: allow zero checksum in RX packets 257 * @sk_no_check_rx: allow zero checksum in RX packets
257 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 258 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -302,7 +303,8 @@ struct sock_common {
302 * @sk_backlog_rcv: callback to process the backlog 303 * @sk_backlog_rcv: callback to process the backlog
303 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 304 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
304 * @sk_reuseport_cb: reuseport group container 305 * @sk_reuseport_cb: reuseport group container
305 */ 306 * @sk_rcu: used during RCU grace period
307 */
306struct sock { 308struct sock {
307 /* 309 /*
308 * Now struct inet_timewait_sock also uses sock_common, so please just 310 * Now struct inet_timewait_sock also uses sock_common, so please just
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f83b7f220a65..5b82d4d94834 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -794,12 +794,23 @@ struct tcp_skb_cb {
794 */ 794 */
795static inline int tcp_v6_iif(const struct sk_buff *skb) 795static inline int tcp_v6_iif(const struct sk_buff *skb)
796{ 796{
797 bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags); 797 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
798 798
799 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; 799 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
800} 800}
801#endif 801#endif
802 802
803/* TCP_SKB_CB reference means this can not be used from early demux */
804static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
805{
806#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
807 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
808 ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
809 return true;
810#endif
811 return false;
812}
813
803/* Due to TSO, an SKB can be composed of multiple actual 814/* Due to TSO, an SKB can be composed of multiple actual
804 * packets. To keep these tracked properly, we use this. 815 * packets. To keep these tracked properly, we use this.
805 */ 816 */
diff --git a/include/net/udp.h b/include/net/udp.h
index ea53a87d880f..4948790d393d 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -258,6 +258,7 @@ void udp_flush_pending_frames(struct sock *sk);
258void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); 258void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
259int udp_rcv(struct sk_buff *skb); 259int udp_rcv(struct sk_buff *skb);
260int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); 260int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
261int __udp_disconnect(struct sock *sk, int flags);
261int udp_disconnect(struct sock *sk, int flags); 262int udp_disconnect(struct sock *sk, int flags);
262unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); 263unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
263struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 264struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0255613a54a4..308adc4154f4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -225,9 +225,9 @@ struct vxlan_config {
225struct vxlan_dev { 225struct vxlan_dev {
226 struct hlist_node hlist; /* vni hash table */ 226 struct hlist_node hlist; /* vni hash table */
227 struct list_head next; /* vxlan's per namespace list */ 227 struct list_head next; /* vxlan's per namespace list */
228 struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */ 228 struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
229#if IS_ENABLED(CONFIG_IPV6) 229#if IS_ENABLED(CONFIG_IPV6)
230 struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */ 230 struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */
231#endif 231#endif
232 struct net_device *dev; 232 struct net_device *dev;
233 struct net *net; /* netns for packet i/o */ 233 struct net *net; /* netns for packet i/o */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 099a4200732c..8e547231c1b7 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -119,8 +119,7 @@ struct ethtool_cmd {
119static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, 119static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
120 __u32 speed) 120 __u32 speed)
121{ 121{
122 122 ep->speed = (__u16)(speed & 0xFFFF);
123 ep->speed = (__u16)speed;
124 ep->speed_hi = (__u16)(speed >> 16); 123 ep->speed_hi = (__u16)(speed >> 16);
125} 124}
126 125
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 262f0379d83a..5a78be518101 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -350,7 +350,7 @@ struct rtnexthop {
350#define RTNH_F_OFFLOAD 8 /* offloaded route */ 350#define RTNH_F_OFFLOAD 8 /* offloaded route */
351#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ 351#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
352 352
353#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN) 353#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
354 354
355/* Macros to handle hexthops */ 355/* Macros to handle hexthops */
356 356
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 94346b4d8984..0362da0b66c3 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4831,7 +4831,7 @@ static struct bpf_test tests[] = {
4831 { }, 4831 { },
4832 INTERNAL, 4832 INTERNAL,
4833 { 0x34 }, 4833 { 0x34 },
4834 { { 1, 0xbef } }, 4834 { { ETH_HLEN, 0xbef } },
4835 .fill_helper = bpf_fill_ld_abs_vlan_push_pop, 4835 .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
4836 }, 4836 },
4837 /* 4837 /*
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8de138d3306b..f2531ad66b68 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
664 664
665 skb_gro_pull(skb, sizeof(*vhdr)); 665 skb_gro_pull(skb, sizeof(*vhdr));
666 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); 666 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
667 pp = ptype->callbacks.gro_receive(head, skb); 667 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
668 668
669out_unlock: 669out_unlock:
670 rcu_read_unlock(); 670 rcu_read_unlock();
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 08ce36147c4c..e034afbd1bb0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -652,7 +652,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
652 batadv_softif_destroy_sysfs(hard_iface->soft_iface); 652 batadv_softif_destroy_sysfs(hard_iface->soft_iface);
653 } 653 }
654 654
655 hard_iface->soft_iface = NULL;
656 batadv_hardif_put(hard_iface); 655 batadv_hardif_put(hard_iface);
657 656
658out: 657out:
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index e0e1a88c3e58..d2905a855d1b 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -63,7 +63,7 @@ enum batadv_dbg_level {
63 BATADV_DBG_NC = BIT(5), 63 BATADV_DBG_NC = BIT(5),
64 BATADV_DBG_MCAST = BIT(6), 64 BATADV_DBG_MCAST = BIT(6),
65 BATADV_DBG_TP_METER = BIT(7), 65 BATADV_DBG_TP_METER = BIT(7),
66 BATADV_DBG_ALL = 127, 66 BATADV_DBG_ALL = 255,
67}; 67};
68 68
69#ifdef CONFIG_BATMAN_ADV_DEBUG 69#ifdef CONFIG_BATMAN_ADV_DEBUG
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 5f3bfc41aeb1..7c8d16086f0f 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -544,7 +544,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
544 if (bat_priv->algo_ops->neigh.hardif_init) 544 if (bat_priv->algo_ops->neigh.hardif_init)
545 bat_priv->algo_ops->neigh.hardif_init(hardif_neigh); 545 bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
546 546
547 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list); 547 hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
548 548
549out: 549out:
550 spin_unlock_bh(&hard_iface->neigh_list_lock); 550 spin_unlock_bh(&hard_iface->neigh_list_lock);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e2288421fe6b..1015d9c8d97d 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -969,41 +969,38 @@ void __hci_req_enable_advertising(struct hci_request *req)
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970} 970}
971 971
972static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) 972u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
973{ 973{
974 size_t complete_len;
975 size_t short_len; 974 size_t short_len;
976 int max_len; 975 size_t complete_len;
977
978 max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
979 complete_len = strlen(hdev->dev_name);
980 short_len = strlen(hdev->short_name);
981
982 /* no space left for name */
983 if (max_len < 1)
984 return ad_len;
985 976
986 /* no name set */ 977 /* no space left for name (+ NULL + type + len) */
987 if (!complete_len) 978 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
988 return ad_len; 979 return ad_len;
989 980
990 /* complete name fits and is eq to max short name len or smaller */ 981 /* use complete name if present and fits */
991 if (complete_len <= max_len && 982 complete_len = strlen(hdev->dev_name);
992 complete_len <= HCI_MAX_SHORT_NAME_LENGTH) { 983 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
993 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, 984 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
994 hdev->dev_name, complete_len); 985 hdev->dev_name, complete_len + 1);
995 }
996 986
997 /* short name set and fits */ 987 /* use short name if present */
998 if (short_len && short_len <= max_len) { 988 short_len = strlen(hdev->short_name);
989 if (short_len)
999 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, 990 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1000 hdev->short_name, short_len); 991 hdev->short_name, short_len + 1);
1001 }
1002 992
1003 /* no short name set so shorten complete name */ 993 /* use shortened full name if present, we already know that name
1004 if (!short_len) { 994 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1005 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, 995 */
1006 hdev->dev_name, max_len); 996 if (complete_len) {
997 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
998
999 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1000 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1001
1002 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1003 sizeof(name));
1007 } 1004 }
1008 1005
1009 return ad_len; 1006 return ad_len;
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 6b06629245a8..dde77bd59f91 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -106,6 +106,8 @@ static inline void hci_update_background_scan(struct hci_dev *hdev)
106void hci_request_setup(struct hci_dev *hdev); 106void hci_request_setup(struct hci_dev *hdev);
107void hci_request_cancel_all(struct hci_dev *hdev); 107void hci_request_cancel_all(struct hci_dev *hdev);
108 108
109u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
110
109static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, 111static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
110 u8 *data, u8 data_len) 112 u8 *data, u8 data_len)
111{ 113{
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 736038085feb..1fba2a03f8ae 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -6017,7 +6017,15 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6017 return err; 6017 return err;
6018} 6018}
6019 6019
6020static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data) 6020static u8 calculate_name_len(struct hci_dev *hdev)
6021{
6022 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6023
6024 return append_local_name(hdev, buf, 0);
6025}
6026
6027static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6028 bool is_adv_data)
6021{ 6029{
6022 u8 max_len = HCI_MAX_AD_LENGTH; 6030 u8 max_len = HCI_MAX_AD_LENGTH;
6023 6031
@@ -6030,9 +6038,8 @@ static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
6030 if (adv_flags & MGMT_ADV_FLAG_TX_POWER) 6038 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6031 max_len -= 3; 6039 max_len -= 3;
6032 } else { 6040 } else {
6033 /* at least 1 byte of name should fit in */
6034 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME) 6041 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6035 max_len -= 3; 6042 max_len -= calculate_name_len(hdev);
6036 6043
6037 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE)) 6044 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6038 max_len -= 4; 6045 max_len -= 4;
@@ -6063,12 +6070,13 @@ static bool appearance_managed(u32 adv_flags)
6063 return adv_flags & MGMT_ADV_FLAG_APPEARANCE; 6070 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6064} 6071}
6065 6072
6066static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data) 6073static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6074 u8 len, bool is_adv_data)
6067{ 6075{
6068 int i, cur_len; 6076 int i, cur_len;
6069 u8 max_len; 6077 u8 max_len;
6070 6078
6071 max_len = tlv_data_max_len(adv_flags, is_adv_data); 6079 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6072 6080
6073 if (len > max_len) 6081 if (len > max_len)
6074 return false; 6082 return false;
@@ -6215,8 +6223,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6215 goto unlock; 6223 goto unlock;
6216 } 6224 }
6217 6225
6218 if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) || 6226 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6219 !tlv_data_is_valid(flags, cp->data + cp->adv_data_len, 6227 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6220 cp->scan_rsp_len, false)) { 6228 cp->scan_rsp_len, false)) {
6221 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, 6229 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6222 MGMT_STATUS_INVALID_PARAMS); 6230 MGMT_STATUS_INVALID_PARAMS);
@@ -6429,8 +6437,8 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6429 6437
6430 rp.instance = cp->instance; 6438 rp.instance = cp->instance;
6431 rp.flags = cp->flags; 6439 rp.flags = cp->flags;
6432 rp.max_adv_data_len = tlv_data_max_len(flags, true); 6440 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6433 rp.max_scan_rsp_len = tlv_data_max_len(flags, false); 6441 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6434 6442
6435 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, 6443 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6436 MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); 6444 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index c5fea9393946..2136e45f5277 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
972 mod_timer(&query->timer, jiffies); 972 mod_timer(&query->timer, jiffies);
973} 973}
974 974
975void br_multicast_enable_port(struct net_bridge_port *port) 975static void __br_multicast_enable_port(struct net_bridge_port *port)
976{ 976{
977 struct net_bridge *br = port->br; 977 struct net_bridge *br = port->br;
978 978
979 spin_lock(&br->multicast_lock);
980 if (br->multicast_disabled || !netif_running(br->dev)) 979 if (br->multicast_disabled || !netif_running(br->dev))
981 goto out; 980 return;
982 981
983 br_multicast_enable(&port->ip4_own_query); 982 br_multicast_enable(&port->ip4_own_query);
984#if IS_ENABLED(CONFIG_IPV6) 983#if IS_ENABLED(CONFIG_IPV6)
@@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
987 if (port->multicast_router == MDB_RTR_TYPE_PERM && 986 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
988 hlist_unhashed(&port->rlist)) 987 hlist_unhashed(&port->rlist))
989 br_multicast_add_router(br, port); 988 br_multicast_add_router(br, port);
989}
990 990
991out: 991void br_multicast_enable_port(struct net_bridge_port *port)
992{
993 struct net_bridge *br = port->br;
994
995 spin_lock(&br->multicast_lock);
996 __br_multicast_enable_port(port);
992 spin_unlock(&br->multicast_lock); 997 spin_unlock(&br->multicast_lock);
993} 998}
994 999
@@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
1994 1999
1995int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2000int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1996{ 2001{
1997 int err = 0;
1998 struct net_bridge_mdb_htable *mdb; 2002 struct net_bridge_mdb_htable *mdb;
2003 struct net_bridge_port *port;
2004 int err = 0;
1999 2005
2000 spin_lock_bh(&br->multicast_lock); 2006 spin_lock_bh(&br->multicast_lock);
2001 if (br->multicast_disabled == !val) 2007 if (br->multicast_disabled == !val)
@@ -2023,10 +2029,9 @@ rollback:
2023 goto rollback; 2029 goto rollback;
2024 } 2030 }
2025 2031
2026 br_multicast_start_querier(br, &br->ip4_own_query); 2032 br_multicast_open(br);
2027#if IS_ENABLED(CONFIG_IPV6) 2033 list_for_each_entry(port, &br->port_list, list)
2028 br_multicast_start_querier(br, &br->ip6_own_query); 2034 __br_multicast_enable_port(port);
2029#endif
2030 2035
2031unlock: 2036unlock:
2032 spin_unlock_bh(&br->multicast_lock); 2037 spin_unlock_bh(&br->multicast_lock);
diff --git a/net/core/dev.c b/net/core/dev.c
index 4bc19a164ba5..820bac239738 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3035,6 +3035,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
3035 } 3035 }
3036 return head; 3036 return head;
3037} 3037}
3038EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3038 3039
3039static void qdisc_pkt_len_init(struct sk_buff *skb) 3040static void qdisc_pkt_len_init(struct sk_buff *skb)
3040{ 3041{
@@ -4511,6 +4512,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
4511 NAPI_GRO_CB(skb)->flush = 0; 4512 NAPI_GRO_CB(skb)->flush = 0;
4512 NAPI_GRO_CB(skb)->free = 0; 4513 NAPI_GRO_CB(skb)->free = 0;
4513 NAPI_GRO_CB(skb)->encap_mark = 0; 4514 NAPI_GRO_CB(skb)->encap_mark = 0;
4515 NAPI_GRO_CB(skb)->recursion_counter = 0;
4514 NAPI_GRO_CB(skb)->is_fou = 0; 4516 NAPI_GRO_CB(skb)->is_fou = 0;
4515 NAPI_GRO_CB(skb)->is_atomic = 1; 4517 NAPI_GRO_CB(skb)->is_atomic = 1;
4516 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 4518 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
@@ -5511,10 +5513,14 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
5511{ 5513{
5512 struct netdev_adjacent *lower; 5514 struct netdev_adjacent *lower;
5513 5515
5514 lower = list_first_or_null_rcu(&dev->all_adj_list.lower, 5516 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5515 struct netdev_adjacent, list); 5517
5518 if (&lower->list == &dev->all_adj_list.lower)
5519 return NULL;
5520
5521 *iter = &lower->list;
5516 5522
5517 return lower ? lower->dev : NULL; 5523 return lower->dev;
5518} 5524}
5519EXPORT_SYMBOL(netdev_all_lower_get_next_rcu); 5525EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
5520 5526
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1a7b80f73376..ab193e5def07 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -246,15 +246,13 @@ ipv6:
246 case htons(ETH_P_8021AD): 246 case htons(ETH_P_8021AD):
247 case htons(ETH_P_8021Q): { 247 case htons(ETH_P_8021Q): {
248 const struct vlan_hdr *vlan; 248 const struct vlan_hdr *vlan;
249 struct vlan_hdr _vlan;
250 bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
249 251
250 if (skb_vlan_tag_present(skb)) 252 if (vlan_tag_present)
251 proto = skb->protocol; 253 proto = skb->protocol;
252 254
253 if (!skb_vlan_tag_present(skb) || 255 if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
254 proto == cpu_to_be16(ETH_P_8021Q) ||
255 proto == cpu_to_be16(ETH_P_8021AD)) {
256 struct vlan_hdr _vlan;
257
258 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), 256 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
259 data, hlen, &_vlan); 257 data, hlen, &_vlan);
260 if (!vlan) 258 if (!vlan)
@@ -272,7 +270,7 @@ ipv6:
272 FLOW_DISSECTOR_KEY_VLAN, 270 FLOW_DISSECTOR_KEY_VLAN,
273 target_container); 271 target_container);
274 272
275 if (skb_vlan_tag_present(skb)) { 273 if (vlan_tag_present) {
276 key_vlan->vlan_id = skb_vlan_tag_get_id(skb); 274 key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
277 key_vlan->vlan_priority = 275 key_vlan->vlan_priority =
278 (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); 276 (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 989434f36f96..f61c0e02a413 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -215,13 +215,14 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
215 */ 215 */
216int peernet2id_alloc(struct net *net, struct net *peer) 216int peernet2id_alloc(struct net *net, struct net *peer)
217{ 217{
218 unsigned long flags;
218 bool alloc; 219 bool alloc;
219 int id; 220 int id;
220 221
221 spin_lock_bh(&net->nsid_lock); 222 spin_lock_irqsave(&net->nsid_lock, flags);
222 alloc = atomic_read(&peer->count) == 0 ? false : true; 223 alloc = atomic_read(&peer->count) == 0 ? false : true;
223 id = __peernet2id_alloc(net, peer, &alloc); 224 id = __peernet2id_alloc(net, peer, &alloc);
224 spin_unlock_bh(&net->nsid_lock); 225 spin_unlock_irqrestore(&net->nsid_lock, flags);
225 if (alloc && id >= 0) 226 if (alloc && id >= 0)
226 rtnl_net_notifyid(net, RTM_NEWNSID, id); 227 rtnl_net_notifyid(net, RTM_NEWNSID, id);
227 return id; 228 return id;
@@ -230,11 +231,12 @@ int peernet2id_alloc(struct net *net, struct net *peer)
230/* This function returns, if assigned, the id of a peer netns. */ 231/* This function returns, if assigned, the id of a peer netns. */
231int peernet2id(struct net *net, struct net *peer) 232int peernet2id(struct net *net, struct net *peer)
232{ 233{
234 unsigned long flags;
233 int id; 235 int id;
234 236
235 spin_lock_bh(&net->nsid_lock); 237 spin_lock_irqsave(&net->nsid_lock, flags);
236 id = __peernet2id(net, peer); 238 id = __peernet2id(net, peer);
237 spin_unlock_bh(&net->nsid_lock); 239 spin_unlock_irqrestore(&net->nsid_lock, flags);
238 return id; 240 return id;
239} 241}
240EXPORT_SYMBOL(peernet2id); 242EXPORT_SYMBOL(peernet2id);
@@ -249,17 +251,18 @@ bool peernet_has_id(struct net *net, struct net *peer)
249 251
250struct net *get_net_ns_by_id(struct net *net, int id) 252struct net *get_net_ns_by_id(struct net *net, int id)
251{ 253{
254 unsigned long flags;
252 struct net *peer; 255 struct net *peer;
253 256
254 if (id < 0) 257 if (id < 0)
255 return NULL; 258 return NULL;
256 259
257 rcu_read_lock(); 260 rcu_read_lock();
258 spin_lock_bh(&net->nsid_lock); 261 spin_lock_irqsave(&net->nsid_lock, flags);
259 peer = idr_find(&net->netns_ids, id); 262 peer = idr_find(&net->netns_ids, id);
260 if (peer) 263 if (peer)
261 get_net(peer); 264 get_net(peer);
262 spin_unlock_bh(&net->nsid_lock); 265 spin_unlock_irqrestore(&net->nsid_lock, flags);
263 rcu_read_unlock(); 266 rcu_read_unlock();
264 267
265 return peer; 268 return peer;
@@ -422,17 +425,17 @@ static void cleanup_net(struct work_struct *work)
422 for_each_net(tmp) { 425 for_each_net(tmp) {
423 int id; 426 int id;
424 427
425 spin_lock_bh(&tmp->nsid_lock); 428 spin_lock_irq(&tmp->nsid_lock);
426 id = __peernet2id(tmp, net); 429 id = __peernet2id(tmp, net);
427 if (id >= 0) 430 if (id >= 0)
428 idr_remove(&tmp->netns_ids, id); 431 idr_remove(&tmp->netns_ids, id);
429 spin_unlock_bh(&tmp->nsid_lock); 432 spin_unlock_irq(&tmp->nsid_lock);
430 if (id >= 0) 433 if (id >= 0)
431 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 434 rtnl_net_notifyid(tmp, RTM_DELNSID, id);
432 } 435 }
433 spin_lock_bh(&net->nsid_lock); 436 spin_lock_irq(&net->nsid_lock);
434 idr_destroy(&net->netns_ids); 437 idr_destroy(&net->netns_ids);
435 spin_unlock_bh(&net->nsid_lock); 438 spin_unlock_irq(&net->nsid_lock);
436 439
437 } 440 }
438 rtnl_unlock(); 441 rtnl_unlock();
@@ -561,6 +564,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
561{ 564{
562 struct net *net = sock_net(skb->sk); 565 struct net *net = sock_net(skb->sk);
563 struct nlattr *tb[NETNSA_MAX + 1]; 566 struct nlattr *tb[NETNSA_MAX + 1];
567 unsigned long flags;
564 struct net *peer; 568 struct net *peer;
565 int nsid, err; 569 int nsid, err;
566 570
@@ -581,15 +585,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
581 if (IS_ERR(peer)) 585 if (IS_ERR(peer))
582 return PTR_ERR(peer); 586 return PTR_ERR(peer);
583 587
584 spin_lock_bh(&net->nsid_lock); 588 spin_lock_irqsave(&net->nsid_lock, flags);
585 if (__peernet2id(net, peer) >= 0) { 589 if (__peernet2id(net, peer) >= 0) {
586 spin_unlock_bh(&net->nsid_lock); 590 spin_unlock_irqrestore(&net->nsid_lock, flags);
587 err = -EEXIST; 591 err = -EEXIST;
588 goto out; 592 goto out;
589 } 593 }
590 594
591 err = alloc_netid(net, peer, nsid); 595 err = alloc_netid(net, peer, nsid);
592 spin_unlock_bh(&net->nsid_lock); 596 spin_unlock_irqrestore(&net->nsid_lock, flags);
593 if (err >= 0) { 597 if (err >= 0) {
594 rtnl_net_notifyid(net, RTM_NEWNSID, err); 598 rtnl_net_notifyid(net, RTM_NEWNSID, err);
595 err = 0; 599 err = 0;
@@ -711,10 +715,11 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
711 .idx = 0, 715 .idx = 0,
712 .s_idx = cb->args[0], 716 .s_idx = cb->args[0],
713 }; 717 };
718 unsigned long flags;
714 719
715 spin_lock_bh(&net->nsid_lock); 720 spin_lock_irqsave(&net->nsid_lock, flags);
716 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 721 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
717 spin_unlock_bh(&net->nsid_lock); 722 spin_unlock_irqrestore(&net->nsid_lock, flags);
718 723
719 cb->args[0] = net_cb.idx; 724 cb->args[0] = net_cb.idx;
720 return skb->len; 725 return skb->len;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5219a9e2127a..306b8f0e03c1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -216,8 +216,8 @@
216#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */ 216#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
217 217
218/* If lock -- protects updating of if_list */ 218/* If lock -- protects updating of if_list */
219#define if_lock(t) spin_lock(&(t->if_lock)); 219#define if_lock(t) mutex_lock(&(t->if_lock));
220#define if_unlock(t) spin_unlock(&(t->if_lock)); 220#define if_unlock(t) mutex_unlock(&(t->if_lock));
221 221
222/* Used to help with determining the pkts on receive */ 222/* Used to help with determining the pkts on receive */
223#define PKTGEN_MAGIC 0xbe9be955 223#define PKTGEN_MAGIC 0xbe9be955
@@ -423,7 +423,7 @@ struct pktgen_net {
423}; 423};
424 424
425struct pktgen_thread { 425struct pktgen_thread {
426 spinlock_t if_lock; /* for list of devices */ 426 struct mutex if_lock; /* for list of devices */
427 struct list_head if_list; /* All device here */ 427 struct list_head if_list; /* All device here */
428 struct list_head th_list; 428 struct list_head th_list;
429 struct task_struct *tsk; 429 struct task_struct *tsk;
@@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
2010{ 2010{
2011 struct pktgen_thread *t; 2011 struct pktgen_thread *t;
2012 2012
2013 mutex_lock(&pktgen_thread_lock);
2014
2013 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 2015 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
2014 struct pktgen_dev *pkt_dev; 2016 struct pktgen_dev *pkt_dev;
2015 2017
2016 rcu_read_lock(); 2018 if_lock(t);
2017 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { 2019 list_for_each_entry(pkt_dev, &t->if_list, list) {
2018 if (pkt_dev->odev != dev) 2020 if (pkt_dev->odev != dev)
2019 continue; 2021 continue;
2020 2022
@@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
2029 dev->name); 2031 dev->name);
2030 break; 2032 break;
2031 } 2033 }
2032 rcu_read_unlock(); 2034 if_unlock(t);
2033 } 2035 }
2036 mutex_unlock(&pktgen_thread_lock);
2034} 2037}
2035 2038
2036static int pktgen_device_event(struct notifier_block *unused, 2039static int pktgen_device_event(struct notifier_block *unused,
@@ -3762,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3762 return -ENOMEM; 3765 return -ENOMEM;
3763 } 3766 }
3764 3767
3765 spin_lock_init(&t->if_lock); 3768 mutex_init(&t->if_lock);
3766 t->cpu = cpu; 3769 t->cpu = cpu;
3767 3770
3768 INIT_LIST_HEAD(&t->if_list); 3771 INIT_LIST_HEAD(&t->if_list);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index e92b759d906c..9a1a352fd1eb 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -129,7 +129,6 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
129 129
130 return 0; 130 return 0;
131} 131}
132EXPORT_SYMBOL(reuseport_add_sock);
133 132
134static void reuseport_free_rcu(struct rcu_head *head) 133static void reuseport_free_rcu(struct rcu_head *head)
135{ 134{
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 66dff5e3d772..02acfff36028 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
439 439
440 skb_gro_pull(skb, sizeof(*eh)); 440 skb_gro_pull(skb, sizeof(*eh));
441 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); 441 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
442 pp = ptype->callbacks.gro_receive(head, skb); 442 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
443 443
444out_unlock: 444out_unlock:
445 rcu_read_unlock(); 445 rcu_read_unlock();
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 5ee1d43f1310..4ebe2aa3e7d3 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -300,10 +300,6 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
300static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, 300static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
301 struct hsr_frame_info *frame) 301 struct hsr_frame_info *frame)
302{ 302{
303 struct net_device *master_dev;
304
305 master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev;
306
307 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { 303 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
308 frame->is_local_exclusive = true; 304 frame->is_local_exclusive = true;
309 skb->pkt_type = PACKET_HOST; 305 skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1effc986739e..9648c97e541f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1391,7 +1391,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
1391 skb_gro_pull(skb, sizeof(*iph)); 1391 skb_gro_pull(skb, sizeof(*iph));
1392 skb_set_transport_header(skb, skb_gro_offset(skb)); 1392 skb_set_transport_header(skb, skb_gro_offset(skb));
1393 1393
1394 pp = ops->callbacks.gro_receive(head, skb); 1394 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1395 1395
1396out_unlock: 1396out_unlock:
1397 rcu_read_unlock(); 1397 rcu_read_unlock();
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index cf50f7e2b012..030d1531e897 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
249 if (!ops || !ops->callbacks.gro_receive) 249 if (!ops || !ops->callbacks.gro_receive)
250 goto out_unlock; 250 goto out_unlock;
251 251
252 pp = ops->callbacks.gro_receive(head, skb); 252 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
253 253
254out_unlock: 254out_unlock:
255 rcu_read_unlock(); 255 rcu_read_unlock();
@@ -441,7 +441,7 @@ next_proto:
441 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) 441 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
442 goto out_unlock; 442 goto out_unlock;
443 443
444 pp = ops->callbacks.gro_receive(head, skb); 444 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
445 flush = 0; 445 flush = 0;
446 446
447out_unlock: 447out_unlock:
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 96e0efecefa6..d5cac99170b1 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -229,7 +229,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
229 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ 229 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
230 skb_gro_postpull_rcsum(skb, greh, grehlen); 230 skb_gro_postpull_rcsum(skb, greh, grehlen);
231 231
232 pp = ptype->callbacks.gro_receive(head, skb); 232 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
233 flush = 0; 233 flush = 0;
234 234
235out_unlock: 235out_unlock:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 77c20a489218..ca97835bfec4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -25,6 +25,7 @@
25#include <net/inet_hashtables.h> 25#include <net/inet_hashtables.h>
26#include <net/secure_seq.h> 26#include <net/secure_seq.h>
27#include <net/ip.h> 27#include <net/ip.h>
28#include <net/tcp.h>
28#include <net/sock_reuseport.h> 29#include <net/sock_reuseport.h>
29 30
30static u32 inet_ehashfn(const struct net *net, const __be32 laddr, 31static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
@@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port);
172 173
173static inline int compute_score(struct sock *sk, struct net *net, 174static inline int compute_score(struct sock *sk, struct net *net,
174 const unsigned short hnum, const __be32 daddr, 175 const unsigned short hnum, const __be32 daddr,
175 const int dif) 176 const int dif, bool exact_dif)
176{ 177{
177 int score = -1; 178 int score = -1;
178 struct inet_sock *inet = inet_sk(sk); 179 struct inet_sock *inet = inet_sk(sk);
@@ -186,7 +187,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
186 return -1; 187 return -1;
187 score += 4; 188 score += 4;
188 } 189 }
189 if (sk->sk_bound_dev_if) { 190 if (sk->sk_bound_dev_if || exact_dif) {
190 if (sk->sk_bound_dev_if != dif) 191 if (sk->sk_bound_dev_if != dif)
191 return -1; 192 return -1;
192 score += 4; 193 score += 4;
@@ -215,11 +216,12 @@ struct sock *__inet_lookup_listener(struct net *net,
215 unsigned int hash = inet_lhashfn(net, hnum); 216 unsigned int hash = inet_lhashfn(net, hnum);
216 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; 217 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
217 int score, hiscore = 0, matches = 0, reuseport = 0; 218 int score, hiscore = 0, matches = 0, reuseport = 0;
219 bool exact_dif = inet_exact_dif_match(net, skb);
218 struct sock *sk, *result = NULL; 220 struct sock *sk, *result = NULL;
219 u32 phash = 0; 221 u32 phash = 0;
220 222
221 sk_for_each_rcu(sk, &ilb->head) { 223 sk_for_each_rcu(sk, &ilb->head) {
222 score = compute_score(sk, net, hnum, daddr, dif); 224 score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
223 if (score > hiscore) { 225 if (score > hiscore) {
224 reuseport = sk->sk_reuseport; 226 reuseport = sk->sk_reuseport;
225 if (reuseport) { 227 if (reuseport) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 05d105832bdb..03e7f7310423 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -538,7 +538,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
538{ 538{
539 struct iphdr *iph; 539 struct iphdr *iph;
540 int ptr; 540 int ptr;
541 struct net_device *dev;
542 struct sk_buff *skb2; 541 struct sk_buff *skb2;
543 unsigned int mtu, hlen, left, len, ll_rs; 542 unsigned int mtu, hlen, left, len, ll_rs;
544 int offset; 543 int offset;
@@ -546,8 +545,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
546 struct rtable *rt = skb_rtable(skb); 545 struct rtable *rt = skb_rtable(skb);
547 int err = 0; 546 int err = 0;
548 547
549 dev = rt->dst.dev;
550
551 /* for offloaded checksums cleanup checksum before fragmentation */ 548 /* for offloaded checksums cleanup checksum before fragmentation */
552 if (skb->ip_summed == CHECKSUM_PARTIAL && 549 if (skb->ip_summed == CHECKSUM_PARTIAL &&
553 (err = skb_checksum_help(skb))) 550 (err = skb_checksum_help(skb)))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index af4919792b6a..b8a2d63d1fb8 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
98} 98}
99 99
100static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, 100static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
101 int offset) 101 int tlen, int offset)
102{ 102{
103 __wsum csum = skb->csum; 103 __wsum csum = skb->csum;
104 104
@@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
106 return; 106 return;
107 107
108 if (offset != 0) 108 if (offset != 0)
109 csum = csum_sub(csum, csum_partial(skb_transport_header(skb), 109 csum = csum_sub(csum,
110 offset, 0)); 110 csum_partial(skb_transport_header(skb) + tlen,
111 offset, 0));
111 112
112 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); 113 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
113} 114}
@@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
153} 154}
154 155
155void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, 156void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
156 int offset) 157 int tlen, int offset)
157{ 158{
158 struct inet_sock *inet = inet_sk(skb->sk); 159 struct inet_sock *inet = inet_sk(skb->sk);
159 unsigned int flags = inet->cmsg_flags; 160 unsigned int flags = inet->cmsg_flags;
@@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
216 } 217 }
217 218
218 if (flags & IP_CMSG_CHECKSUM) 219 if (flags & IP_CMSG_CHECKSUM)
219 ip_cmsg_recv_checksum(msg, skb, offset); 220 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
220} 221}
221EXPORT_SYMBOL(ip_cmsg_recv_offset); 222EXPORT_SYMBOL(ip_cmsg_recv_offset);
222 223
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 7cf7d6e380c2..205e2000d395 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -994,7 +994,7 @@ struct proto ping_prot = {
994 .init = ping_init_sock, 994 .init = ping_init_sock,
995 .close = ping_close, 995 .close = ping_close,
996 .connect = ip4_datagram_connect, 996 .connect = ip4_datagram_connect,
997 .disconnect = udp_disconnect, 997 .disconnect = __udp_disconnect,
998 .setsockopt = ip_setsockopt, 998 .setsockopt = ip_setsockopt,
999 .getsockopt = ip_getsockopt, 999 .getsockopt = ip_getsockopt,
1000 .sendmsg = ping_v4_sendmsg, 1000 .sendmsg = ping_v4_sendmsg,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 90a85c955872..ecbe5a7c2d6d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -918,7 +918,7 @@ struct proto raw_prot = {
918 .close = raw_close, 918 .close = raw_close,
919 .destroy = raw_destroy, 919 .destroy = raw_destroy,
920 .connect = ip4_datagram_connect, 920 .connect = ip4_datagram_connect,
921 .disconnect = udp_disconnect, 921 .disconnect = __udp_disconnect,
922 .ioctl = raw_ioctl, 922 .ioctl = raw_ioctl,
923 .init = raw_init, 923 .init = raw_init,
924 .setsockopt = raw_setsockopt, 924 .setsockopt = raw_setsockopt,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1cb67de106fe..80bc36b25de2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
96 container_of(table->data, struct net, ipv4.ping_group_range.range); 96 container_of(table->data, struct net, ipv4.ping_group_range.range);
97 unsigned int seq; 97 unsigned int seq;
98 do { 98 do {
99 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); 99 seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
100 100
101 *low = data[0]; 101 *low = data[0];
102 *high = data[1]; 102 *high = data[1];
103 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); 103 } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
104} 104}
105 105
106/* Update system visible IP port range */ 106/* Update system visible IP port range */
@@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
109 kgid_t *data = table->data; 109 kgid_t *data = table->data;
110 struct net *net = 110 struct net *net =
111 container_of(table->data, struct net, ipv4.ping_group_range.range); 111 container_of(table->data, struct net, ipv4.ping_group_range.range);
112 write_seqlock(&net->ipv4.ip_local_ports.lock); 112 write_seqlock(&net->ipv4.ping_group_range.lock);
113 data[0] = low; 113 data[0] = low;
114 data[1] = high; 114 data[1] = high;
115 write_sequnlock(&net->ipv4.ip_local_ports.lock); 115 write_sequnlock(&net->ipv4.ping_group_range.lock);
116} 116}
117 117
118/* Validate changes from /proc interface. */ 118/* Validate changes from /proc interface. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bd5e8d10893f..61b7be303eec 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -86,7 +86,6 @@
86 86
87int sysctl_tcp_tw_reuse __read_mostly; 87int sysctl_tcp_tw_reuse __read_mostly;
88int sysctl_tcp_low_latency __read_mostly; 88int sysctl_tcp_low_latency __read_mostly;
89EXPORT_SYMBOL(sysctl_tcp_low_latency);
90 89
91#ifdef CONFIG_TCP_MD5SIG 90#ifdef CONFIG_TCP_MD5SIG
92static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, 91static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
@@ -1887,7 +1886,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1887 struct tcp_iter_state *st = seq->private; 1886 struct tcp_iter_state *st = seq->private;
1888 struct net *net = seq_file_net(seq); 1887 struct net *net = seq_file_net(seq);
1889 struct inet_listen_hashbucket *ilb; 1888 struct inet_listen_hashbucket *ilb;
1890 struct inet_connection_sock *icsk;
1891 struct sock *sk = cur; 1889 struct sock *sk = cur;
1892 1890
1893 if (!sk) { 1891 if (!sk) {
@@ -1909,7 +1907,6 @@ get_sk:
1909 continue; 1907 continue;
1910 if (sk->sk_family == st->family) 1908 if (sk->sk_family == st->family)
1911 return sk; 1909 return sk;
1912 icsk = inet_csk(sk);
1913 } 1910 }
1914 spin_unlock_bh(&ilb->lock); 1911 spin_unlock_bh(&ilb->lock);
1915 st->offset = 0; 1912 st->offset = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7d96dc2d3d08..d123d68f4d1d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1322,7 +1322,7 @@ try_again:
1322 *addr_len = sizeof(*sin); 1322 *addr_len = sizeof(*sin);
1323 } 1323 }
1324 if (inet->cmsg_flags) 1324 if (inet->cmsg_flags)
1325 ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off); 1325 ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
1326 1326
1327 err = copied; 1327 err = copied;
1328 if (flags & MSG_TRUNC) 1328 if (flags & MSG_TRUNC)
@@ -1345,7 +1345,7 @@ csum_copy_err:
1345 goto try_again; 1345 goto try_again;
1346} 1346}
1347 1347
1348int udp_disconnect(struct sock *sk, int flags) 1348int __udp_disconnect(struct sock *sk, int flags)
1349{ 1349{
1350 struct inet_sock *inet = inet_sk(sk); 1350 struct inet_sock *inet = inet_sk(sk);
1351 /* 1351 /*
@@ -1367,6 +1367,15 @@ int udp_disconnect(struct sock *sk, int flags)
1367 sk_dst_reset(sk); 1367 sk_dst_reset(sk);
1368 return 0; 1368 return 0;
1369} 1369}
1370EXPORT_SYMBOL(__udp_disconnect);
1371
1372int udp_disconnect(struct sock *sk, int flags)
1373{
1374 lock_sock(sk);
1375 __udp_disconnect(sk, flags);
1376 release_sock(sk);
1377 return 0;
1378}
1370EXPORT_SYMBOL(udp_disconnect); 1379EXPORT_SYMBOL(udp_disconnect);
1371 1380
1372void udp_lib_unhash(struct sock *sk) 1381void udp_lib_unhash(struct sock *sk)
@@ -2193,7 +2202,7 @@ int udp_abort(struct sock *sk, int err)
2193 2202
2194 sk->sk_err = err; 2203 sk->sk_err = err;
2195 sk->sk_error_report(sk); 2204 sk->sk_error_report(sk);
2196 udp_disconnect(sk, 0); 2205 __udp_disconnect(sk, 0);
2197 2206
2198 release_sock(sk); 2207 release_sock(sk);
2199 2208
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index f9333c963607..b2be1d9757ef 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -295,7 +295,7 @@ unflush:
295 295
296 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 296 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
297 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); 297 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
298 pp = udp_sk(sk)->gro_receive(sk, head, skb); 298 pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
299 299
300out_unlock: 300out_unlock:
301 rcu_read_unlock(); 301 rcu_read_unlock();
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d8983e15f859..060dd9922018 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -147,9 +147,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
147} 147}
148#endif 148#endif
149 149
150static void __ipv6_regen_rndid(struct inet6_dev *idev); 150static void ipv6_regen_rndid(struct inet6_dev *idev);
151static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); 151static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
152static void ipv6_regen_rndid(unsigned long data);
153 152
154static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); 153static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
155static int ipv6_count_addresses(struct inet6_dev *idev); 154static int ipv6_count_addresses(struct inet6_dev *idev);
@@ -409,9 +408,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
409 goto err_release; 408 goto err_release;
410 } 409 }
411 410
412 /* One reference from device. We must do this before 411 /* One reference from device. */
413 * we invoke __ipv6_regen_rndid().
414 */
415 in6_dev_hold(ndev); 412 in6_dev_hold(ndev);
416 413
417 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 414 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
@@ -425,17 +422,15 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
425#endif 422#endif
426 423
427 INIT_LIST_HEAD(&ndev->tempaddr_list); 424 INIT_LIST_HEAD(&ndev->tempaddr_list);
428 setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); 425 ndev->desync_factor = U32_MAX;
429 if ((dev->flags&IFF_LOOPBACK) || 426 if ((dev->flags&IFF_LOOPBACK) ||
430 dev->type == ARPHRD_TUNNEL || 427 dev->type == ARPHRD_TUNNEL ||
431 dev->type == ARPHRD_TUNNEL6 || 428 dev->type == ARPHRD_TUNNEL6 ||
432 dev->type == ARPHRD_SIT || 429 dev->type == ARPHRD_SIT ||
433 dev->type == ARPHRD_NONE) { 430 dev->type == ARPHRD_NONE) {
434 ndev->cnf.use_tempaddr = -1; 431 ndev->cnf.use_tempaddr = -1;
435 } else { 432 } else
436 in6_dev_hold(ndev); 433 ipv6_regen_rndid(ndev);
437 ipv6_regen_rndid((unsigned long) ndev);
438 }
439 434
440 ndev->token = in6addr_any; 435 ndev->token = in6addr_any;
441 436
@@ -447,7 +442,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
447 err = addrconf_sysctl_register(ndev); 442 err = addrconf_sysctl_register(ndev);
448 if (err) { 443 if (err) {
449 ipv6_mc_destroy_dev(ndev); 444 ipv6_mc_destroy_dev(ndev);
450 del_timer(&ndev->regen_timer);
451 snmp6_unregister_dev(ndev); 445 snmp6_unregister_dev(ndev);
452 goto err_release; 446 goto err_release;
453 } 447 }
@@ -1190,6 +1184,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
1190 int ret = 0; 1184 int ret = 0;
1191 u32 addr_flags; 1185 u32 addr_flags;
1192 unsigned long now = jiffies; 1186 unsigned long now = jiffies;
1187 long max_desync_factor;
1188 s32 cnf_temp_preferred_lft;
1193 1189
1194 write_lock_bh(&idev->lock); 1190 write_lock_bh(&idev->lock);
1195 if (ift) { 1191 if (ift) {
@@ -1222,23 +1218,42 @@ retry:
1222 } 1218 }
1223 in6_ifa_hold(ifp); 1219 in6_ifa_hold(ifp);
1224 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8); 1220 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1225 __ipv6_try_regen_rndid(idev, tmpaddr); 1221 ipv6_try_regen_rndid(idev, tmpaddr);
1226 memcpy(&addr.s6_addr[8], idev->rndid, 8); 1222 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1227 age = (now - ifp->tstamp) / HZ; 1223 age = (now - ifp->tstamp) / HZ;
1224
1225 regen_advance = idev->cnf.regen_max_retry *
1226 idev->cnf.dad_transmits *
1227 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1228
1229 /* recalculate max_desync_factor each time and update
1230 * idev->desync_factor if it's larger
1231 */
1232 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1233 max_desync_factor = min_t(__u32,
1234 idev->cnf.max_desync_factor,
1235 cnf_temp_preferred_lft - regen_advance);
1236
1237 if (unlikely(idev->desync_factor > max_desync_factor)) {
1238 if (max_desync_factor > 0) {
1239 get_random_bytes(&idev->desync_factor,
1240 sizeof(idev->desync_factor));
1241 idev->desync_factor %= max_desync_factor;
1242 } else {
1243 idev->desync_factor = 0;
1244 }
1245 }
1246
1228 tmp_valid_lft = min_t(__u32, 1247 tmp_valid_lft = min_t(__u32,
1229 ifp->valid_lft, 1248 ifp->valid_lft,
1230 idev->cnf.temp_valid_lft + age); 1249 idev->cnf.temp_valid_lft + age);
1231 tmp_prefered_lft = min_t(__u32, 1250 tmp_prefered_lft = cnf_temp_preferred_lft + age -
1232 ifp->prefered_lft, 1251 idev->desync_factor;
1233 idev->cnf.temp_prefered_lft + age - 1252 tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
1234 idev->cnf.max_desync_factor);
1235 tmp_plen = ifp->prefix_len; 1253 tmp_plen = ifp->prefix_len;
1236 tmp_tstamp = ifp->tstamp; 1254 tmp_tstamp = ifp->tstamp;
1237 spin_unlock_bh(&ifp->lock); 1255 spin_unlock_bh(&ifp->lock);
1238 1256
1239 regen_advance = idev->cnf.regen_max_retry *
1240 idev->cnf.dad_transmits *
1241 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1242 write_unlock_bh(&idev->lock); 1257 write_unlock_bh(&idev->lock);
1243 1258
1244 /* A temporary address is created only if this calculated Preferred 1259 /* A temporary address is created only if this calculated Preferred
@@ -2150,7 +2165,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2150} 2165}
2151 2166
2152/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */ 2167/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2153static void __ipv6_regen_rndid(struct inet6_dev *idev) 2168static void ipv6_regen_rndid(struct inet6_dev *idev)
2154{ 2169{
2155regen: 2170regen:
2156 get_random_bytes(idev->rndid, sizeof(idev->rndid)); 2171 get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -2179,43 +2194,10 @@ regen:
2179 } 2194 }
2180} 2195}
2181 2196
2182static void ipv6_regen_rndid(unsigned long data) 2197static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2183{
2184 struct inet6_dev *idev = (struct inet6_dev *) data;
2185 unsigned long expires;
2186
2187 rcu_read_lock_bh();
2188 write_lock_bh(&idev->lock);
2189
2190 if (idev->dead)
2191 goto out;
2192
2193 __ipv6_regen_rndid(idev);
2194
2195 expires = jiffies +
2196 idev->cnf.temp_prefered_lft * HZ -
2197 idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
2198 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
2199 idev->cnf.max_desync_factor * HZ;
2200 if (time_before(expires, jiffies)) {
2201 pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
2202 __func__, idev->dev->name);
2203 goto out;
2204 }
2205
2206 if (!mod_timer(&idev->regen_timer, expires))
2207 in6_dev_hold(idev);
2208
2209out:
2210 write_unlock_bh(&idev->lock);
2211 rcu_read_unlock_bh();
2212 in6_dev_put(idev);
2213}
2214
2215static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2216{ 2198{
2217 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) 2199 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2218 __ipv6_regen_rndid(idev); 2200 ipv6_regen_rndid(idev);
2219} 2201}
2220 2202
2221/* 2203/*
@@ -2356,7 +2338,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
2356 max_valid = 0; 2338 max_valid = 0;
2357 2339
2358 max_prefered = idev->cnf.temp_prefered_lft - 2340 max_prefered = idev->cnf.temp_prefered_lft -
2359 idev->cnf.max_desync_factor - age; 2341 idev->desync_factor - age;
2360 if (max_prefered < 0) 2342 if (max_prefered < 0)
2361 max_prefered = 0; 2343 max_prefered = 0;
2362 2344
@@ -3018,7 +3000,7 @@ static void init_loopback(struct net_device *dev)
3018 * lo device down, release this obsolete dst and 3000 * lo device down, release this obsolete dst and
3019 * reallocate a new router for ifa. 3001 * reallocate a new router for ifa.
3020 */ 3002 */
3021 if (sp_ifa->rt->dst.obsolete > 0) { 3003 if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
3022 ip6_rt_put(sp_ifa->rt); 3004 ip6_rt_put(sp_ifa->rt);
3023 sp_ifa->rt = NULL; 3005 sp_ifa->rt = NULL;
3024 } else { 3006 } else {
@@ -3594,9 +3576,6 @@ restart:
3594 if (!how) 3576 if (!how)
3595 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); 3577 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3596 3578
3597 if (how && del_timer(&idev->regen_timer))
3598 in6_dev_put(idev);
3599
3600 /* Step 3: clear tempaddr list */ 3579 /* Step 3: clear tempaddr list */
3601 while (!list_empty(&idev->tempaddr_list)) { 3580 while (!list_empty(&idev->tempaddr_list)) {
3602 ifa = list_first_entry(&idev->tempaddr_list, 3581 ifa = list_first_entry(&idev->tempaddr_list,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 00cf28ad4565..02761c9fe43e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established);
96static inline int compute_score(struct sock *sk, struct net *net, 96static inline int compute_score(struct sock *sk, struct net *net,
97 const unsigned short hnum, 97 const unsigned short hnum,
98 const struct in6_addr *daddr, 98 const struct in6_addr *daddr,
99 const int dif) 99 const int dif, bool exact_dif)
100{ 100{
101 int score = -1; 101 int score = -1;
102 102
@@ -109,7 +109,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
109 return -1; 109 return -1;
110 score++; 110 score++;
111 } 111 }
112 if (sk->sk_bound_dev_if) { 112 if (sk->sk_bound_dev_if || exact_dif) {
113 if (sk->sk_bound_dev_if != dif) 113 if (sk->sk_bound_dev_if != dif)
114 return -1; 114 return -1;
115 score++; 115 score++;
@@ -131,11 +131,12 @@ struct sock *inet6_lookup_listener(struct net *net,
131 unsigned int hash = inet_lhashfn(net, hnum); 131 unsigned int hash = inet_lhashfn(net, hnum);
132 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; 132 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
133 int score, hiscore = 0, matches = 0, reuseport = 0; 133 int score, hiscore = 0, matches = 0, reuseport = 0;
134 bool exact_dif = inet6_exact_dif_match(net, skb);
134 struct sock *sk, *result = NULL; 135 struct sock *sk, *result = NULL;
135 u32 phash = 0; 136 u32 phash = 0;
136 137
137 sk_for_each(sk, &ilb->head) { 138 sk_for_each(sk, &ilb->head) {
138 score = compute_score(sk, net, hnum, daddr, dif); 139 score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
139 if (score > hiscore) { 140 if (score > hiscore) {
140 reuseport = sk->sk_reuseport; 141 reuseport = sk->sk_reuseport;
141 if (reuseport) { 142 if (reuseport) {
@@ -263,13 +264,15 @@ EXPORT_SYMBOL_GPL(inet6_hash_connect);
263 264
264int inet6_hash(struct sock *sk) 265int inet6_hash(struct sock *sk)
265{ 266{
267 int err = 0;
268
266 if (sk->sk_state != TCP_CLOSE) { 269 if (sk->sk_state != TCP_CLOSE) {
267 local_bh_disable(); 270 local_bh_disable();
268 __inet_hash(sk, NULL, ipv6_rcv_saddr_equal); 271 err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
269 local_bh_enable(); 272 local_bh_enable();
270 } 273 }
271 274
272 return 0; 275 return err;
273} 276}
274EXPORT_SYMBOL_GPL(inet6_hash); 277EXPORT_SYMBOL_GPL(inet6_hash);
275 278
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e7bfd55899a3..1fcf61f1cbc3 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -246,7 +246,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
246 246
247 skb_gro_postpull_rcsum(skb, iph, nlen); 247 skb_gro_postpull_rcsum(skb, iph, nlen);
248 248
249 pp = ops->callbacks.gro_receive(head, skb); 249 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
250 250
251out_unlock: 251out_unlock:
252 rcu_read_unlock(); 252 rcu_read_unlock();
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6a66adba0c22..87784560dc46 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -157,6 +157,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
157 hash = HASH(&any, local); 157 hash = HASH(&any, local);
158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 if (ipv6_addr_equal(local, &t->parms.laddr) && 159 if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 ipv6_addr_any(&t->parms.raddr) &&
160 (t->dev->flags & IFF_UP)) 161 (t->dev->flags & IFF_UP))
161 return t; 162 return t;
162 } 163 }
@@ -164,6 +165,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
164 hash = HASH(remote, &any); 165 hash = HASH(remote, &any);
165 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
166 if (ipv6_addr_equal(remote, &t->parms.raddr) && 167 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 ipv6_addr_any(&t->parms.laddr) &&
167 (t->dev->flags & IFF_UP)) 169 (t->dev->flags & IFF_UP))
168 return t; 170 return t;
169 } 171 }
@@ -1170,6 +1172,7 @@ route_lookup:
1170 if (err) 1172 if (err)
1171 return err; 1173 return err;
1172 1174
1175 skb->protocol = htons(ETH_P_IPV6);
1173 skb_push(skb, sizeof(struct ipv6hdr)); 1176 skb_push(skb, sizeof(struct ipv6hdr));
1174 skb_reset_network_header(skb); 1177 skb_reset_network_header(skb);
1175 ipv6h = ipv6_hdr(skb); 1178 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 5330262ab673..636ec56f5f50 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -120,6 +120,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
120static bool setsockopt_needs_rtnl(int optname) 120static bool setsockopt_needs_rtnl(int optname)
121{ 121{
122 switch (optname) { 122 switch (optname) {
123 case IPV6_ADDRFORM:
123 case IPV6_ADD_MEMBERSHIP: 124 case IPV6_ADD_MEMBERSHIP:
124 case IPV6_DROP_MEMBERSHIP: 125 case IPV6_DROP_MEMBERSHIP:
125 case IPV6_JOIN_ANYCAST: 126 case IPV6_JOIN_ANYCAST:
@@ -198,7 +199,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
198 } 199 }
199 200
200 fl6_free_socklist(sk); 201 fl6_free_socklist(sk);
201 ipv6_sock_mc_close(sk); 202 __ipv6_sock_mc_close(sk);
202 203
203 /* 204 /*
204 * Sock is moving from IPv6 to IPv4 (sk_prot), so 205 * Sock is moving from IPv6 to IPv4 (sk_prot), so
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 75c1fc54f188..14a3903f1c82 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -276,16 +276,14 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
276 return idev; 276 return idev;
277} 277}
278 278
279void ipv6_sock_mc_close(struct sock *sk) 279void __ipv6_sock_mc_close(struct sock *sk)
280{ 280{
281 struct ipv6_pinfo *np = inet6_sk(sk); 281 struct ipv6_pinfo *np = inet6_sk(sk);
282 struct ipv6_mc_socklist *mc_lst; 282 struct ipv6_mc_socklist *mc_lst;
283 struct net *net = sock_net(sk); 283 struct net *net = sock_net(sk);
284 284
285 if (!rcu_access_pointer(np->ipv6_mc_list)) 285 ASSERT_RTNL();
286 return;
287 286
288 rtnl_lock();
289 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) { 287 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
290 struct net_device *dev; 288 struct net_device *dev;
291 289
@@ -303,8 +301,17 @@ void ipv6_sock_mc_close(struct sock *sk)
303 301
304 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 302 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
305 kfree_rcu(mc_lst, rcu); 303 kfree_rcu(mc_lst, rcu);
306
307 } 304 }
305}
306
307void ipv6_sock_mc_close(struct sock *sk)
308{
309 struct ipv6_pinfo *np = inet6_sk(sk);
310
311 if (!rcu_access_pointer(np->ipv6_mc_list))
312 return;
313 rtnl_lock();
314 __ipv6_sock_mc_close(sk);
308 rtnl_unlock(); 315 rtnl_unlock();
309} 316}
310 317
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 0e983b694ee8..66e2d9dfc43a 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -180,7 +180,7 @@ struct proto pingv6_prot = {
180 .init = ping_init_sock, 180 .init = ping_init_sock,
181 .close = ping_close, 181 .close = ping_close,
182 .connect = ip6_datagram_connect_v6_only, 182 .connect = ip6_datagram_connect_v6_only,
183 .disconnect = udp_disconnect, 183 .disconnect = __udp_disconnect,
184 .setsockopt = ipv6_setsockopt, 184 .setsockopt = ipv6_setsockopt,
185 .getsockopt = ipv6_getsockopt, 185 .getsockopt = ipv6_getsockopt,
186 .sendmsg = ping_v6_sendmsg, 186 .sendmsg = ping_v6_sendmsg,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 54404f08efcc..054a1d84fc5e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1241,7 +1241,7 @@ struct proto rawv6_prot = {
1241 .close = rawv6_close, 1241 .close = rawv6_close,
1242 .destroy = raw6_destroy, 1242 .destroy = raw6_destroy,
1243 .connect = ip6_datagram_connect_v6_only, 1243 .connect = ip6_datagram_connect_v6_only,
1244 .disconnect = udp_disconnect, 1244 .disconnect = __udp_disconnect,
1245 .ioctl = rawv6_ioctl, 1245 .ioctl = rawv6_ioctl,
1246 .init = rawv6_init_sk, 1246 .init = rawv6_init_sk,
1247 .setsockopt = rawv6_setsockopt, 1247 .setsockopt = rawv6_setsockopt,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2160d5d009cb..3815e8505ed2 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -456,7 +456,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
456 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; 456 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
457 memmove(head->head + sizeof(struct frag_hdr), head->head, 457 memmove(head->head + sizeof(struct frag_hdr), head->head,
458 (head->data - head->head) - sizeof(struct frag_hdr)); 458 (head->data - head->head) - sizeof(struct frag_hdr));
459 head->mac_header += sizeof(struct frag_hdr); 459 if (skb_mac_header_was_set(head))
460 head->mac_header += sizeof(struct frag_hdr);
460 head->network_header += sizeof(struct frag_hdr); 461 head->network_header += sizeof(struct frag_hdr);
461 462
462 skb_reset_transport_header(head); 463 skb_reset_transport_header(head);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bdbc38e8bf29..947ed1ded026 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -102,11 +102,13 @@ static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
102#ifdef CONFIG_IPV6_ROUTE_INFO 102#ifdef CONFIG_IPV6_ROUTE_INFO
103static struct rt6_info *rt6_add_route_info(struct net *net, 103static struct rt6_info *rt6_add_route_info(struct net *net,
104 const struct in6_addr *prefix, int prefixlen, 104 const struct in6_addr *prefix, int prefixlen,
105 const struct in6_addr *gwaddr, int ifindex, 105 const struct in6_addr *gwaddr,
106 struct net_device *dev,
106 unsigned int pref); 107 unsigned int pref);
107static struct rt6_info *rt6_get_route_info(struct net *net, 108static struct rt6_info *rt6_get_route_info(struct net *net,
108 const struct in6_addr *prefix, int prefixlen, 109 const struct in6_addr *prefix, int prefixlen,
109 const struct in6_addr *gwaddr, int ifindex); 110 const struct in6_addr *gwaddr,
111 struct net_device *dev);
110#endif 112#endif
111 113
112struct uncached_list { 114struct uncached_list {
@@ -656,7 +658,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
656 struct net_device *dev = rt->dst.dev; 658 struct net_device *dev = rt->dst.dev;
657 659
658 if (dev && !netif_carrier_ok(dev) && 660 if (dev && !netif_carrier_ok(dev) &&
659 idev->cnf.ignore_routes_with_linkdown) 661 idev->cnf.ignore_routes_with_linkdown &&
662 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
660 goto out; 663 goto out;
661 664
662 if (rt6_check_expired(rt)) 665 if (rt6_check_expired(rt))
@@ -803,7 +806,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
803 rt = rt6_get_dflt_router(gwaddr, dev); 806 rt = rt6_get_dflt_router(gwaddr, dev);
804 else 807 else
805 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, 808 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
806 gwaddr, dev->ifindex); 809 gwaddr, dev);
807 810
808 if (rt && !lifetime) { 811 if (rt && !lifetime) {
809 ip6_del_rt(rt); 812 ip6_del_rt(rt);
@@ -811,8 +814,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
811 } 814 }
812 815
813 if (!rt && lifetime) 816 if (!rt && lifetime)
814 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex, 817 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
815 pref); 818 dev, pref);
816 else if (rt) 819 else if (rt)
817 rt->rt6i_flags = RTF_ROUTEINFO | 820 rt->rt6i_flags = RTF_ROUTEINFO |
818 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 821 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1050,6 +1053,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1050 int strict = 0; 1053 int strict = 0;
1051 1054
1052 strict |= flags & RT6_LOOKUP_F_IFACE; 1055 strict |= flags & RT6_LOOKUP_F_IFACE;
1056 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1053 if (net->ipv6.devconf_all->forwarding == 0) 1057 if (net->ipv6.devconf_all->forwarding == 0)
1054 strict |= RT6_LOOKUP_F_REACHABLE; 1058 strict |= RT6_LOOKUP_F_REACHABLE;
1055 1059
@@ -1789,7 +1793,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1789 }; 1793 };
1790 struct fib6_table *table; 1794 struct fib6_table *table;
1791 struct rt6_info *rt; 1795 struct rt6_info *rt;
1792 int flags = RT6_LOOKUP_F_IFACE; 1796 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
1793 1797
1794 table = fib6_get_table(net, cfg->fc_table); 1798 table = fib6_get_table(net, cfg->fc_table);
1795 if (!table) 1799 if (!table)
@@ -2325,13 +2329,16 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2325#ifdef CONFIG_IPV6_ROUTE_INFO 2329#ifdef CONFIG_IPV6_ROUTE_INFO
2326static struct rt6_info *rt6_get_route_info(struct net *net, 2330static struct rt6_info *rt6_get_route_info(struct net *net,
2327 const struct in6_addr *prefix, int prefixlen, 2331 const struct in6_addr *prefix, int prefixlen,
2328 const struct in6_addr *gwaddr, int ifindex) 2332 const struct in6_addr *gwaddr,
2333 struct net_device *dev)
2329{ 2334{
2335 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
2336 int ifindex = dev->ifindex;
2330 struct fib6_node *fn; 2337 struct fib6_node *fn;
2331 struct rt6_info *rt = NULL; 2338 struct rt6_info *rt = NULL;
2332 struct fib6_table *table; 2339 struct fib6_table *table;
2333 2340
2334 table = fib6_get_table(net, RT6_TABLE_INFO); 2341 table = fib6_get_table(net, tb_id);
2335 if (!table) 2342 if (!table)
2336 return NULL; 2343 return NULL;
2337 2344
@@ -2357,12 +2364,13 @@ out:
2357 2364
2358static struct rt6_info *rt6_add_route_info(struct net *net, 2365static struct rt6_info *rt6_add_route_info(struct net *net,
2359 const struct in6_addr *prefix, int prefixlen, 2366 const struct in6_addr *prefix, int prefixlen,
2360 const struct in6_addr *gwaddr, int ifindex, 2367 const struct in6_addr *gwaddr,
2368 struct net_device *dev,
2361 unsigned int pref) 2369 unsigned int pref)
2362{ 2370{
2363 struct fib6_config cfg = { 2371 struct fib6_config cfg = {
2364 .fc_metric = IP6_RT_PRIO_USER, 2372 .fc_metric = IP6_RT_PRIO_USER,
2365 .fc_ifindex = ifindex, 2373 .fc_ifindex = dev->ifindex,
2366 .fc_dst_len = prefixlen, 2374 .fc_dst_len = prefixlen,
2367 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 2375 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2368 RTF_UP | RTF_PREF(pref), 2376 RTF_UP | RTF_PREF(pref),
@@ -2371,7 +2379,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
2371 .fc_nlinfo.nl_net = net, 2379 .fc_nlinfo.nl_net = net,
2372 }; 2380 };
2373 2381
2374 cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO; 2382 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
2375 cfg.fc_dst = *prefix; 2383 cfg.fc_dst = *prefix;
2376 cfg.fc_gateway = *gwaddr; 2384 cfg.fc_gateway = *gwaddr;
2377 2385
@@ -2381,16 +2389,17 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
2381 2389
2382 ip6_route_add(&cfg); 2390 ip6_route_add(&cfg);
2383 2391
2384 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex); 2392 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
2385} 2393}
2386#endif 2394#endif
2387 2395
2388struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) 2396struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2389{ 2397{
2398 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
2390 struct rt6_info *rt; 2399 struct rt6_info *rt;
2391 struct fib6_table *table; 2400 struct fib6_table *table;
2392 2401
2393 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT); 2402 table = fib6_get_table(dev_net(dev), tb_id);
2394 if (!table) 2403 if (!table)
2395 return NULL; 2404 return NULL;
2396 2405
@@ -2424,20 +2433,20 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2424 2433
2425 cfg.fc_gateway = *gwaddr; 2434 cfg.fc_gateway = *gwaddr;
2426 2435
2427 ip6_route_add(&cfg); 2436 if (!ip6_route_add(&cfg)) {
2437 struct fib6_table *table;
2438
2439 table = fib6_get_table(dev_net(dev), cfg.fc_table);
2440 if (table)
2441 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
2442 }
2428 2443
2429 return rt6_get_dflt_router(gwaddr, dev); 2444 return rt6_get_dflt_router(gwaddr, dev);
2430} 2445}
2431 2446
2432void rt6_purge_dflt_routers(struct net *net) 2447static void __rt6_purge_dflt_routers(struct fib6_table *table)
2433{ 2448{
2434 struct rt6_info *rt; 2449 struct rt6_info *rt;
2435 struct fib6_table *table;
2436
2437 /* NOTE: Keep consistent with rt6_get_dflt_router */
2438 table = fib6_get_table(net, RT6_TABLE_DFLT);
2439 if (!table)
2440 return;
2441 2450
2442restart: 2451restart:
2443 read_lock_bh(&table->tb6_lock); 2452 read_lock_bh(&table->tb6_lock);
@@ -2451,6 +2460,27 @@ restart:
2451 } 2460 }
2452 } 2461 }
2453 read_unlock_bh(&table->tb6_lock); 2462 read_unlock_bh(&table->tb6_lock);
2463
2464 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
2465}
2466
2467void rt6_purge_dflt_routers(struct net *net)
2468{
2469 struct fib6_table *table;
2470 struct hlist_head *head;
2471 unsigned int h;
2472
2473 rcu_read_lock();
2474
2475 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
2476 head = &net->ipv6.fib_table_hash[h];
2477 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
2478 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
2479 __rt6_purge_dflt_routers(table);
2480 }
2481 }
2482
2483 rcu_read_unlock();
2454} 2484}
2455 2485
2456static void rtmsg_to_fib6_config(struct net *net, 2486static void rtmsg_to_fib6_config(struct net *net,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9aa7c1c7a9ce..b2ef061e6836 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -427,7 +427,8 @@ try_again:
427 427
428 if (is_udp4) { 428 if (is_udp4) {
429 if (inet->cmsg_flags) 429 if (inet->cmsg_flags)
430 ip_cmsg_recv(msg, skb); 430 ip_cmsg_recv_offset(msg, skb,
431 sizeof(struct udphdr), off);
431 } else { 432 } else {
432 if (np->rxopt.all) 433 if (np->rxopt.all)
433 ip6_datagram_recv_specific_ctl(sk, msg, skb); 434 ip6_datagram_recv_specific_ctl(sk, msg, skb);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 42de4ccd159f..fce25afb652a 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -338,7 +338,7 @@ static int l2tp_ip_disconnect(struct sock *sk, int flags)
338 if (sock_flag(sk, SOCK_ZAPPED)) 338 if (sock_flag(sk, SOCK_ZAPPED))
339 return 0; 339 return 0;
340 340
341 return udp_disconnect(sk, flags); 341 return __udp_disconnect(sk, flags);
342} 342}
343 343
344static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, 344static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index ea2ae6664cc8..ad3468c32b53 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -410,7 +410,7 @@ static int l2tp_ip6_disconnect(struct sock *sk, int flags)
410 if (sock_flag(sk, SOCK_ZAPPED)) 410 if (sock_flag(sk, SOCK_ZAPPED))
411 return 0; 411 return 0;
412 412
413 return udp_disconnect(sk, flags); 413 return __udp_disconnect(sk, flags);
414} 414}
415 415
416static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, 416static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7663c28ba353..a4e0d59a40dd 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -18,21 +18,24 @@
18#include "key.h" 18#include "key.h"
19#include "aes_ccm.h" 19#include "aes_ccm.h"
20 20
21void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 21int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
22 u8 *data, size_t data_len, u8 *mic, 22 u8 *data, size_t data_len, u8 *mic,
23 size_t mic_len) 23 size_t mic_len)
24{ 24{
25 struct scatterlist sg[3]; 25 struct scatterlist sg[3];
26 struct aead_request *aead_req;
27 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
28 u8 *__aad;
26 29
27 char aead_req_data[sizeof(struct aead_request) + 30 aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
28 crypto_aead_reqsize(tfm)] 31 if (!aead_req)
29 __aligned(__alignof__(struct aead_request)); 32 return -ENOMEM;
30 struct aead_request *aead_req = (void *) aead_req_data;
31 33
32 memset(aead_req, 0, sizeof(aead_req_data)); 34 __aad = (u8 *)aead_req + reqsize;
35 memcpy(__aad, aad, CCM_AAD_LEN);
33 36
34 sg_init_table(sg, 3); 37 sg_init_table(sg, 3);
35 sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); 38 sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
36 sg_set_buf(&sg[1], data, data_len); 39 sg_set_buf(&sg[1], data, data_len);
37 sg_set_buf(&sg[2], mic, mic_len); 40 sg_set_buf(&sg[2], mic, mic_len);
38 41
@@ -41,6 +44,9 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
41 aead_request_set_ad(aead_req, sg[0].length); 44 aead_request_set_ad(aead_req, sg[0].length);
42 45
43 crypto_aead_encrypt(aead_req); 46 crypto_aead_encrypt(aead_req);
47 kzfree(aead_req);
48
49 return 0;
44} 50}
45 51
46int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 52int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
@@ -48,18 +54,23 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
48 size_t mic_len) 54 size_t mic_len)
49{ 55{
50 struct scatterlist sg[3]; 56 struct scatterlist sg[3];
51 char aead_req_data[sizeof(struct aead_request) + 57 struct aead_request *aead_req;
52 crypto_aead_reqsize(tfm)] 58 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
53 __aligned(__alignof__(struct aead_request)); 59 u8 *__aad;
54 struct aead_request *aead_req = (void *) aead_req_data; 60 int err;
55 61
56 if (data_len == 0) 62 if (data_len == 0)
57 return -EINVAL; 63 return -EINVAL;
58 64
59 memset(aead_req, 0, sizeof(aead_req_data)); 65 aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
66 if (!aead_req)
67 return -ENOMEM;
68
69 __aad = (u8 *)aead_req + reqsize;
70 memcpy(__aad, aad, CCM_AAD_LEN);
60 71
61 sg_init_table(sg, 3); 72 sg_init_table(sg, 3);
62 sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); 73 sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
63 sg_set_buf(&sg[1], data, data_len); 74 sg_set_buf(&sg[1], data, data_len);
64 sg_set_buf(&sg[2], mic, mic_len); 75 sg_set_buf(&sg[2], mic, mic_len);
65 76
@@ -67,7 +78,10 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
67 aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0); 78 aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
68 aead_request_set_ad(aead_req, sg[0].length); 79 aead_request_set_ad(aead_req, sg[0].length);
69 80
70 return crypto_aead_decrypt(aead_req); 81 err = crypto_aead_decrypt(aead_req);
82 kzfree(aead_req);
83
84 return err;
71} 85}
72 86
73struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], 87struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 6a73d1e4d186..fcd3254c5cf0 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,12 +12,14 @@
12 12
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14 14
15#define CCM_AAD_LEN 32
16
15struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], 17struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
16 size_t key_len, 18 size_t key_len,
17 size_t mic_len); 19 size_t mic_len);
18void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 20int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
19 u8 *data, size_t data_len, u8 *mic, 21 u8 *data, size_t data_len, u8 *mic,
20 size_t mic_len); 22 size_t mic_len);
21int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 23int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
22 u8 *data, size_t data_len, u8 *mic, 24 u8 *data, size_t data_len, u8 *mic,
23 size_t mic_len); 25 size_t mic_len);
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
index 3afe361fd27c..8a4397cc1b08 100644
--- a/net/mac80211/aes_gcm.c
+++ b/net/mac80211/aes_gcm.c
@@ -15,20 +15,23 @@
15#include "key.h" 15#include "key.h"
16#include "aes_gcm.h" 16#include "aes_gcm.h"
17 17
18void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, 18int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
19 u8 *data, size_t data_len, u8 *mic) 19 u8 *data, size_t data_len, u8 *mic)
20{ 20{
21 struct scatterlist sg[3]; 21 struct scatterlist sg[3];
22 struct aead_request *aead_req;
23 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
24 u8 *__aad;
22 25
23 char aead_req_data[sizeof(struct aead_request) + 26 aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
24 crypto_aead_reqsize(tfm)] 27 if (!aead_req)
25 __aligned(__alignof__(struct aead_request)); 28 return -ENOMEM;
26 struct aead_request *aead_req = (void *)aead_req_data;
27 29
28 memset(aead_req, 0, sizeof(aead_req_data)); 30 __aad = (u8 *)aead_req + reqsize;
31 memcpy(__aad, aad, GCM_AAD_LEN);
29 32
30 sg_init_table(sg, 3); 33 sg_init_table(sg, 3);
31 sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); 34 sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
32 sg_set_buf(&sg[1], data, data_len); 35 sg_set_buf(&sg[1], data, data_len);
33 sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); 36 sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
34 37
@@ -37,24 +40,31 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
37 aead_request_set_ad(aead_req, sg[0].length); 40 aead_request_set_ad(aead_req, sg[0].length);
38 41
39 crypto_aead_encrypt(aead_req); 42 crypto_aead_encrypt(aead_req);
43 kzfree(aead_req);
44 return 0;
40} 45}
41 46
42int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, 47int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
43 u8 *data, size_t data_len, u8 *mic) 48 u8 *data, size_t data_len, u8 *mic)
44{ 49{
45 struct scatterlist sg[3]; 50 struct scatterlist sg[3];
46 char aead_req_data[sizeof(struct aead_request) + 51 struct aead_request *aead_req;
47 crypto_aead_reqsize(tfm)] 52 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
48 __aligned(__alignof__(struct aead_request)); 53 u8 *__aad;
49 struct aead_request *aead_req = (void *)aead_req_data; 54 int err;
50 55
51 if (data_len == 0) 56 if (data_len == 0)
52 return -EINVAL; 57 return -EINVAL;
53 58
54 memset(aead_req, 0, sizeof(aead_req_data)); 59 aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
60 if (!aead_req)
61 return -ENOMEM;
62
63 __aad = (u8 *)aead_req + reqsize;
64 memcpy(__aad, aad, GCM_AAD_LEN);
55 65
56 sg_init_table(sg, 3); 66 sg_init_table(sg, 3);
57 sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); 67 sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
58 sg_set_buf(&sg[1], data, data_len); 68 sg_set_buf(&sg[1], data, data_len);
59 sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); 69 sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
60 70
@@ -63,7 +73,10 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
63 data_len + IEEE80211_GCMP_MIC_LEN, j_0); 73 data_len + IEEE80211_GCMP_MIC_LEN, j_0);
64 aead_request_set_ad(aead_req, sg[0].length); 74 aead_request_set_ad(aead_req, sg[0].length);
65 75
66 return crypto_aead_decrypt(aead_req); 76 err = crypto_aead_decrypt(aead_req);
77 kzfree(aead_req);
78
79 return err;
67} 80}
68 81
69struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], 82struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
index 1347fda6b76a..55aed5352494 100644
--- a/net/mac80211/aes_gcm.h
+++ b/net/mac80211/aes_gcm.h
@@ -11,8 +11,10 @@
11 11
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13 13
14void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, 14#define GCM_AAD_LEN 32
15 u8 *data, size_t data_len, u8 *mic); 15
16int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
17 u8 *data, size_t data_len, u8 *mic);
16int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, 18int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
17 u8 *data, size_t data_len, u8 *mic); 19 u8 *data, size_t data_len, u8 *mic);
18struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], 20struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
index 3ddd927aaf30..bd72a862ddb7 100644
--- a/net/mac80211/aes_gmac.c
+++ b/net/mac80211/aes_gmac.c
@@ -17,28 +17,27 @@
17#include "key.h" 17#include "key.h"
18#include "aes_gmac.h" 18#include "aes_gmac.h"
19 19
20#define GMAC_MIC_LEN 16
21#define GMAC_NONCE_LEN 12
22#define AAD_LEN 20
23
24int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, 20int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
25 const u8 *data, size_t data_len, u8 *mic) 21 const u8 *data, size_t data_len, u8 *mic)
26{ 22{
27 struct scatterlist sg[4]; 23 struct scatterlist sg[4];
28 char aead_req_data[sizeof(struct aead_request) + 24 u8 *zero, *__aad, iv[AES_BLOCK_SIZE];
29 crypto_aead_reqsize(tfm)] 25 struct aead_request *aead_req;
30 __aligned(__alignof__(struct aead_request)); 26 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
31 struct aead_request *aead_req = (void *)aead_req_data;
32 u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
33 27
34 if (data_len < GMAC_MIC_LEN) 28 if (data_len < GMAC_MIC_LEN)
35 return -EINVAL; 29 return -EINVAL;
36 30
37 memset(aead_req, 0, sizeof(aead_req_data)); 31 aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC);
32 if (!aead_req)
33 return -ENOMEM;
34
35 zero = (u8 *)aead_req + reqsize;
36 __aad = zero + GMAC_MIC_LEN;
37 memcpy(__aad, aad, GMAC_AAD_LEN);
38 38
39 memset(zero, 0, GMAC_MIC_LEN);
40 sg_init_table(sg, 4); 39 sg_init_table(sg, 4);
41 sg_set_buf(&sg[0], aad, AAD_LEN); 40 sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
42 sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); 41 sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
43 sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); 42 sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
44 sg_set_buf(&sg[3], mic, GMAC_MIC_LEN); 43 sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
@@ -49,9 +48,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
49 48
50 aead_request_set_tfm(aead_req, tfm); 49 aead_request_set_tfm(aead_req, tfm);
51 aead_request_set_crypt(aead_req, sg, sg, 0, iv); 50 aead_request_set_crypt(aead_req, sg, sg, 0, iv);
52 aead_request_set_ad(aead_req, AAD_LEN + data_len); 51 aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
53 52
54 crypto_aead_encrypt(aead_req); 53 crypto_aead_encrypt(aead_req);
54 kzfree(aead_req);
55 55
56 return 0; 56 return 0;
57} 57}
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
index d328204d73a8..32e6442c95be 100644
--- a/net/mac80211/aes_gmac.h
+++ b/net/mac80211/aes_gmac.h
@@ -11,6 +11,10 @@
11 11
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13 13
14#define GMAC_AAD_LEN 20
15#define GMAC_MIC_LEN 16
16#define GMAC_NONCE_LEN 12
17
14struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[], 18struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
15 size_t key_len); 19 size_t key_len);
16int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, 20int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c3f610bba3fe..eede5c6db8d5 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -820,7 +820,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
820 mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) 820 mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
821 break; 821 break;
822 rcu_read_lock(); 822 rcu_read_lock();
823 sta = sta_info_get(sdata, mgmt->da); 823 sta = sta_info_get_bss(sdata, mgmt->da);
824 rcu_read_unlock(); 824 rcu_read_unlock();
825 if (!sta) 825 if (!sta)
826 return -ENOLINK; 826 return -ENOLINK;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6175db385ba7..a47bbc973f2d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2298,6 +2298,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2298 __le16 fc = hdr->frame_control; 2298 __le16 fc = hdr->frame_control;
2299 struct sk_buff_head frame_list; 2299 struct sk_buff_head frame_list;
2300 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2300 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2301 struct ethhdr ethhdr;
2302 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2301 2303
2302 if (unlikely(!ieee80211_is_data(fc))) 2304 if (unlikely(!ieee80211_is_data(fc)))
2303 return RX_CONTINUE; 2305 return RX_CONTINUE;
@@ -2308,24 +2310,53 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2308 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2310 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2309 return RX_CONTINUE; 2311 return RX_CONTINUE;
2310 2312
2311 if (ieee80211_has_a4(hdr->frame_control) && 2313 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2312 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2314 switch (rx->sdata->vif.type) {
2313 !rx->sdata->u.vlan.sta) 2315 case NL80211_IFTYPE_AP_VLAN:
2314 return RX_DROP_UNUSABLE; 2316 if (!rx->sdata->u.vlan.sta)
2317 return RX_DROP_UNUSABLE;
2318 break;
2319 case NL80211_IFTYPE_STATION:
2320 if (!rx->sdata->u.mgd.use_4addr)
2321 return RX_DROP_UNUSABLE;
2322 break;
2323 default:
2324 return RX_DROP_UNUSABLE;
2325 }
2326 check_da = NULL;
2327 check_sa = NULL;
2328 } else switch (rx->sdata->vif.type) {
2329 case NL80211_IFTYPE_AP:
2330 case NL80211_IFTYPE_AP_VLAN:
2331 check_da = NULL;
2332 break;
2333 case NL80211_IFTYPE_STATION:
2334 if (!rx->sta ||
2335 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2336 check_sa = NULL;
2337 break;
2338 case NL80211_IFTYPE_MESH_POINT:
2339 check_sa = NULL;
2340 break;
2341 default:
2342 break;
2343 }
2315 2344
2316 if (is_multicast_ether_addr(hdr->addr1) && 2345 if (is_multicast_ether_addr(hdr->addr1))
2317 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2318 rx->sdata->u.vlan.sta) ||
2319 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
2320 rx->sdata->u.mgd.use_4addr)))
2321 return RX_DROP_UNUSABLE; 2346 return RX_DROP_UNUSABLE;
2322 2347
2323 skb->dev = dev; 2348 skb->dev = dev;
2324 __skb_queue_head_init(&frame_list); 2349 __skb_queue_head_init(&frame_list);
2325 2350
2351 if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
2352 rx->sdata->vif.addr,
2353 rx->sdata->vif.type))
2354 return RX_DROP_UNUSABLE;
2355
2326 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2356 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2327 rx->sdata->vif.type, 2357 rx->sdata->vif.type,
2328 rx->local->hw.extra_tx_headroom, true); 2358 rx->local->hw.extra_tx_headroom,
2359 check_da, check_sa);
2329 2360
2330 while (!skb_queue_empty(&frame_list)) { 2361 while (!skb_queue_empty(&frame_list)) {
2331 rx->skb = __skb_dequeue(&frame_list); 2362 rx->skb = __skb_dequeue(&frame_list);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b48c1e13e281..42ce9bd4426f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -405,7 +405,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
405 u8 *pos; 405 u8 *pos;
406 u8 pn[6]; 406 u8 pn[6];
407 u64 pn64; 407 u64 pn64;
408 u8 aad[2 * AES_BLOCK_SIZE]; 408 u8 aad[CCM_AAD_LEN];
409 u8 b_0[AES_BLOCK_SIZE]; 409 u8 b_0[AES_BLOCK_SIZE];
410 410
411 if (info->control.hw_key && 411 if (info->control.hw_key &&
@@ -461,10 +461,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
461 461
462 pos += IEEE80211_CCMP_HDR_LEN; 462 pos += IEEE80211_CCMP_HDR_LEN;
463 ccmp_special_blocks(skb, pn, b_0, aad); 463 ccmp_special_blocks(skb, pn, b_0, aad);
464 ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, 464 return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
465 skb_put(skb, mic_len), mic_len); 465 skb_put(skb, mic_len), mic_len);
466
467 return 0;
468} 466}
469 467
470 468
@@ -639,7 +637,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
639 u8 *pos; 637 u8 *pos;
640 u8 pn[6]; 638 u8 pn[6];
641 u64 pn64; 639 u64 pn64;
642 u8 aad[2 * AES_BLOCK_SIZE]; 640 u8 aad[GCM_AAD_LEN];
643 u8 j_0[AES_BLOCK_SIZE]; 641 u8 j_0[AES_BLOCK_SIZE];
644 642
645 if (info->control.hw_key && 643 if (info->control.hw_key &&
@@ -696,10 +694,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
696 694
697 pos += IEEE80211_GCMP_HDR_LEN; 695 pos += IEEE80211_GCMP_HDR_LEN;
698 gcmp_special_blocks(skb, pn, j_0, aad); 696 gcmp_special_blocks(skb, pn, j_0, aad);
699 ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, 697 return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
700 skb_put(skb, IEEE80211_GCMP_MIC_LEN)); 698 skb_put(skb, IEEE80211_GCMP_MIC_LEN));
701
702 return 0;
703} 699}
704 700
705ieee80211_tx_result 701ieee80211_tx_result
@@ -1123,9 +1119,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
1123 struct ieee80211_key *key = tx->key; 1119 struct ieee80211_key *key = tx->key;
1124 struct ieee80211_mmie_16 *mmie; 1120 struct ieee80211_mmie_16 *mmie;
1125 struct ieee80211_hdr *hdr; 1121 struct ieee80211_hdr *hdr;
1126 u8 aad[20]; 1122 u8 aad[GMAC_AAD_LEN];
1127 u64 pn64; 1123 u64 pn64;
1128 u8 nonce[12]; 1124 u8 nonce[GMAC_NONCE_LEN];
1129 1125
1130 if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) 1126 if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
1131 return TX_DROP; 1127 return TX_DROP;
@@ -1171,7 +1167,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
1171 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1167 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1172 struct ieee80211_key *key = rx->key; 1168 struct ieee80211_key *key = rx->key;
1173 struct ieee80211_mmie_16 *mmie; 1169 struct ieee80211_mmie_16 *mmie;
1174 u8 aad[20], mic[16], ipn[6], nonce[12]; 1170 u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
1175 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1171 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1176 1172
1177 if (!ieee80211_is_mgmt(hdr->frame_control)) 1173 if (!ieee80211_is_mgmt(hdr->frame_control))
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 13290a70fa71..1308a56f2591 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -246,6 +246,7 @@ enum {
246 ncsi_dev_state_config_gls, 246 ncsi_dev_state_config_gls,
247 ncsi_dev_state_config_done, 247 ncsi_dev_state_config_done,
248 ncsi_dev_state_suspend_select = 0x0401, 248 ncsi_dev_state_suspend_select = 0x0401,
249 ncsi_dev_state_suspend_gls,
249 ncsi_dev_state_suspend_dcnt, 250 ncsi_dev_state_suspend_dcnt,
250 ncsi_dev_state_suspend_dc, 251 ncsi_dev_state_suspend_dc,
251 ncsi_dev_state_suspend_deselect, 252 ncsi_dev_state_suspend_deselect,
@@ -264,6 +265,7 @@ struct ncsi_dev_priv {
264#endif 265#endif
265 unsigned int package_num; /* Number of packages */ 266 unsigned int package_num; /* Number of packages */
266 struct list_head packages; /* List of packages */ 267 struct list_head packages; /* List of packages */
268 struct ncsi_channel *hot_channel; /* Channel was ever active */
267 struct ncsi_request requests[256]; /* Request table */ 269 struct ncsi_request requests[256]; /* Request table */
268 unsigned int request_id; /* Last used request ID */ 270 unsigned int request_id; /* Last used request ID */
269#define NCSI_REQ_START_IDX 1 271#define NCSI_REQ_START_IDX 1
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index b41a6617d498..6898e7229285 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -141,23 +141,35 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
141 return -ENODEV; 141 return -ENODEV;
142 142
143 /* If the channel is active one, we need reconfigure it */ 143 /* If the channel is active one, we need reconfigure it */
144 spin_lock_irqsave(&nc->lock, flags);
144 ncm = &nc->modes[NCSI_MODE_LINK]; 145 ncm = &nc->modes[NCSI_MODE_LINK];
145 hncdsc = (struct ncsi_aen_hncdsc_pkt *)h; 146 hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
146 ncm->data[3] = ntohl(hncdsc->status); 147 ncm->data[3] = ntohl(hncdsc->status);
147 if (!list_empty(&nc->link) || 148 if (!list_empty(&nc->link) ||
148 nc->state != NCSI_CHANNEL_ACTIVE || 149 nc->state != NCSI_CHANNEL_ACTIVE) {
149 (ncm->data[3] & 0x1)) 150 spin_unlock_irqrestore(&nc->lock, flags);
150 return 0; 151 return 0;
152 }
151 153
152 if (ndp->flags & NCSI_DEV_HWA) 154 spin_unlock_irqrestore(&nc->lock, flags);
155 if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
153 ndp->flags |= NCSI_DEV_RESHUFFLE; 156 ndp->flags |= NCSI_DEV_RESHUFFLE;
154 157
155 /* If this channel is the active one and the link doesn't 158 /* If this channel is the active one and the link doesn't
156 * work, we have to choose another channel to be active one. 159 * work, we have to choose another channel to be active one.
157 * The logic here is exactly similar to what we do when link 160 * The logic here is exactly similar to what we do when link
158 * is down on the active channel. 161 * is down on the active channel.
162 *
163 * On the other hand, we need configure it when host driver
164 * state on the active channel becomes ready.
159 */ 165 */
160 ncsi_stop_channel_monitor(nc); 166 ncsi_stop_channel_monitor(nc);
167
168 spin_lock_irqsave(&nc->lock, flags);
169 nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
170 NCSI_CHANNEL_ACTIVE;
171 spin_unlock_irqrestore(&nc->lock, flags);
172
161 spin_lock_irqsave(&ndp->lock, flags); 173 spin_lock_irqsave(&ndp->lock, flags);
162 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 174 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
163 spin_unlock_irqrestore(&ndp->lock, flags); 175 spin_unlock_irqrestore(&ndp->lock, flags);
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 5e509e547c2d..a3bd5fa8ad09 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -540,42 +540,86 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
540 nd->state = ncsi_dev_state_suspend_select; 540 nd->state = ncsi_dev_state_suspend_select;
541 /* Fall through */ 541 /* Fall through */
542 case ncsi_dev_state_suspend_select: 542 case ncsi_dev_state_suspend_select:
543 case ncsi_dev_state_suspend_dcnt:
544 case ncsi_dev_state_suspend_dc:
545 case ncsi_dev_state_suspend_deselect:
546 ndp->pending_req_num = 1; 543 ndp->pending_req_num = 1;
547 544
548 np = ndp->active_package; 545 nca.type = NCSI_PKT_CMD_SP;
549 nc = ndp->active_channel;
550 nca.package = np->id; 546 nca.package = np->id;
551 if (nd->state == ncsi_dev_state_suspend_select) { 547 nca.channel = NCSI_RESERVED_CHANNEL;
552 nca.type = NCSI_PKT_CMD_SP; 548 if (ndp->flags & NCSI_DEV_HWA)
553 nca.channel = NCSI_RESERVED_CHANNEL; 549 nca.bytes[0] = 0;
554 if (ndp->flags & NCSI_DEV_HWA) 550 else
555 nca.bytes[0] = 0; 551 nca.bytes[0] = 1;
556 else 552
557 nca.bytes[0] = 1; 553 /* To retrieve the last link states of channels in current
554 * package when current active channel needs fail over to
555 * another one. It means we will possibly select another
556 * channel as next active one. The link states of channels
557 * are most important factor of the selection. So we need
558 * accurate link states. Unfortunately, the link states on
559 * inactive channels can't be updated with LSC AEN in time.
560 */
561 if (ndp->flags & NCSI_DEV_RESHUFFLE)
562 nd->state = ncsi_dev_state_suspend_gls;
563 else
558 nd->state = ncsi_dev_state_suspend_dcnt; 564 nd->state = ncsi_dev_state_suspend_dcnt;
559 } else if (nd->state == ncsi_dev_state_suspend_dcnt) { 565 ret = ncsi_xmit_cmd(&nca);
560 nca.type = NCSI_PKT_CMD_DCNT; 566 if (ret)
561 nca.channel = nc->id; 567 goto error;
562 nd->state = ncsi_dev_state_suspend_dc; 568
563 } else if (nd->state == ncsi_dev_state_suspend_dc) { 569 break;
564 nca.type = NCSI_PKT_CMD_DC; 570 case ncsi_dev_state_suspend_gls:
571 ndp->pending_req_num = np->channel_num;
572
573 nca.type = NCSI_PKT_CMD_GLS;
574 nca.package = np->id;
575
576 nd->state = ncsi_dev_state_suspend_dcnt;
577 NCSI_FOR_EACH_CHANNEL(np, nc) {
565 nca.channel = nc->id; 578 nca.channel = nc->id;
566 nca.bytes[0] = 1; 579 ret = ncsi_xmit_cmd(&nca);
567 nd->state = ncsi_dev_state_suspend_deselect; 580 if (ret)
568 } else if (nd->state == ncsi_dev_state_suspend_deselect) { 581 goto error;
569 nca.type = NCSI_PKT_CMD_DP;
570 nca.channel = NCSI_RESERVED_CHANNEL;
571 nd->state = ncsi_dev_state_suspend_done;
572 } 582 }
573 583
584 break;
585 case ncsi_dev_state_suspend_dcnt:
586 ndp->pending_req_num = 1;
587
588 nca.type = NCSI_PKT_CMD_DCNT;
589 nca.package = np->id;
590 nca.channel = nc->id;
591
592 nd->state = ncsi_dev_state_suspend_dc;
574 ret = ncsi_xmit_cmd(&nca); 593 ret = ncsi_xmit_cmd(&nca);
575 if (ret) { 594 if (ret)
576 nd->state = ncsi_dev_state_functional; 595 goto error;
577 return; 596
578 } 597 break;
598 case ncsi_dev_state_suspend_dc:
599 ndp->pending_req_num = 1;
600
601 nca.type = NCSI_PKT_CMD_DC;
602 nca.package = np->id;
603 nca.channel = nc->id;
604 nca.bytes[0] = 1;
605
606 nd->state = ncsi_dev_state_suspend_deselect;
607 ret = ncsi_xmit_cmd(&nca);
608 if (ret)
609 goto error;
610
611 break;
612 case ncsi_dev_state_suspend_deselect:
613 ndp->pending_req_num = 1;
614
615 nca.type = NCSI_PKT_CMD_DP;
616 nca.package = np->id;
617 nca.channel = NCSI_RESERVED_CHANNEL;
618
619 nd->state = ncsi_dev_state_suspend_done;
620 ret = ncsi_xmit_cmd(&nca);
621 if (ret)
622 goto error;
579 623
580 break; 624 break;
581 case ncsi_dev_state_suspend_done: 625 case ncsi_dev_state_suspend_done:
@@ -589,6 +633,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
589 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", 633 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
590 nd->state); 634 nd->state);
591 } 635 }
636
637 return;
638error:
639 nd->state = ncsi_dev_state_functional;
592} 640}
593 641
594static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) 642static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
@@ -597,6 +645,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
597 struct net_device *dev = nd->dev; 645 struct net_device *dev = nd->dev;
598 struct ncsi_package *np = ndp->active_package; 646 struct ncsi_package *np = ndp->active_package;
599 struct ncsi_channel *nc = ndp->active_channel; 647 struct ncsi_channel *nc = ndp->active_channel;
648 struct ncsi_channel *hot_nc = NULL;
600 struct ncsi_cmd_arg nca; 649 struct ncsi_cmd_arg nca;
601 unsigned char index; 650 unsigned char index;
602 unsigned long flags; 651 unsigned long flags;
@@ -702,12 +751,20 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
702 break; 751 break;
703 case ncsi_dev_state_config_done: 752 case ncsi_dev_state_config_done:
704 spin_lock_irqsave(&nc->lock, flags); 753 spin_lock_irqsave(&nc->lock, flags);
705 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) 754 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
755 hot_nc = nc;
706 nc->state = NCSI_CHANNEL_ACTIVE; 756 nc->state = NCSI_CHANNEL_ACTIVE;
707 else 757 } else {
758 hot_nc = NULL;
708 nc->state = NCSI_CHANNEL_INACTIVE; 759 nc->state = NCSI_CHANNEL_INACTIVE;
760 }
709 spin_unlock_irqrestore(&nc->lock, flags); 761 spin_unlock_irqrestore(&nc->lock, flags);
710 762
763 /* Update the hot channel */
764 spin_lock_irqsave(&ndp->lock, flags);
765 ndp->hot_channel = hot_nc;
766 spin_unlock_irqrestore(&ndp->lock, flags);
767
711 ncsi_start_channel_monitor(nc); 768 ncsi_start_channel_monitor(nc);
712 ncsi_process_next_channel(ndp); 769 ncsi_process_next_channel(ndp);
713 break; 770 break;
@@ -725,10 +782,14 @@ error:
725static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) 782static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
726{ 783{
727 struct ncsi_package *np; 784 struct ncsi_package *np;
728 struct ncsi_channel *nc, *found; 785 struct ncsi_channel *nc, *found, *hot_nc;
729 struct ncsi_channel_mode *ncm; 786 struct ncsi_channel_mode *ncm;
730 unsigned long flags; 787 unsigned long flags;
731 788
789 spin_lock_irqsave(&ndp->lock, flags);
790 hot_nc = ndp->hot_channel;
791 spin_unlock_irqrestore(&ndp->lock, flags);
792
732 /* The search is done once an inactive channel with up 793 /* The search is done once an inactive channel with up
733 * link is found. 794 * link is found.
734 */ 795 */
@@ -746,6 +807,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
746 if (!found) 807 if (!found)
747 found = nc; 808 found = nc;
748 809
810 if (nc == hot_nc)
811 found = nc;
812
749 ncm = &nc->modes[NCSI_MODE_LINK]; 813 ncm = &nc->modes[NCSI_MODE_LINK];
750 if (ncm->data[2] & 0x1) { 814 if (ncm->data[2] & 0x1) {
751 spin_unlock_irqrestore(&nc->lock, flags); 815 spin_unlock_irqrestore(&nc->lock, flags);
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index fcb5d1df11e9..004af030ef1a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -361,16 +361,9 @@ next_hook:
361 if (ret == 0) 361 if (ret == 0)
362 ret = -EPERM; 362 ret = -EPERM;
363 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { 363 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
364 int err; 364 ret = nf_queue(skb, state, &entry, verdict);
365 365 if (ret == 1 && entry)
366 RCU_INIT_POINTER(state->hook_entries, entry); 366 goto next_hook;
367 err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
368 if (err < 0) {
369 if (err == -ESRCH &&
370 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
371 goto next_hook;
372 kfree_skb(skb);
373 }
374 } 367 }
375 return ret; 368 return ret;
376} 369}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ba6a1d421222..df2f5a3901df 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -983,7 +983,7 @@ static void gc_worker(struct work_struct *work)
983 return; 983 return;
984 984
985 ratio = scanned ? expired_count * 100 / scanned : 0; 985 ratio = scanned ? expired_count * 100 / scanned : 0;
986 if (ratio >= 90) 986 if (ratio >= 90 || expired_count == GC_MAX_EVICTS)
987 next_run = 0; 987 next_run = 0;
988 988
989 gc_work->last_bucket = i; 989 gc_work->last_bucket = i;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index e0adb5959342..9fdb655f85bc 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -18,7 +18,7 @@ unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
18 18
19/* nf_queue.c */ 19/* nf_queue.c */
20int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, 20int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
21 unsigned int queuenum); 21 struct nf_hook_entry **entryp, unsigned int verdict);
22void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry); 22void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
23int __init netfilter_queue_init(void); 23int __init netfilter_queue_init(void);
24 24
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 96964a0070e1..8f08d759844a 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -107,13 +107,8 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
107 rcu_read_unlock(); 107 rcu_read_unlock();
108} 108}
109 109
110/* 110static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
111 * Any packet that leaves via this function must come back 111 unsigned int queuenum)
112 * through nf_reinject().
113 */
114int nf_queue(struct sk_buff *skb,
115 struct nf_hook_state *state,
116 unsigned int queuenum)
117{ 112{
118 int status = -ENOENT; 113 int status = -ENOENT;
119 struct nf_queue_entry *entry = NULL; 114 struct nf_queue_entry *entry = NULL;
@@ -161,6 +156,27 @@ err:
161 return status; 156 return status;
162} 157}
163 158
159/* Packets leaving via this function must come back through nf_reinject(). */
160int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
161 struct nf_hook_entry **entryp, unsigned int verdict)
162{
163 struct nf_hook_entry *entry = *entryp;
164 int ret;
165
166 RCU_INIT_POINTER(state->hook_entries, entry);
167 ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
168 if (ret < 0) {
169 if (ret == -ESRCH &&
170 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
171 *entryp = rcu_dereference(entry->next);
172 return 1;
173 }
174 kfree_skb(skb);
175 }
176
177 return 0;
178}
179
164void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) 180void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
165{ 181{
166 struct nf_hook_entry *hook_entry; 182 struct nf_hook_entry *hook_entry;
@@ -187,26 +203,26 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
187 entry->state.thresh = INT_MIN; 203 entry->state.thresh = INT_MIN;
188 204
189 if (verdict == NF_ACCEPT) { 205 if (verdict == NF_ACCEPT) {
190 next_hook: 206 hook_entry = rcu_dereference(hook_entry->next);
191 verdict = nf_iterate(skb, &entry->state, &hook_entry); 207 if (hook_entry)
208next_hook:
209 verdict = nf_iterate(skb, &entry->state, &hook_entry);
192 } 210 }
193 211
194 switch (verdict & NF_VERDICT_MASK) { 212 switch (verdict & NF_VERDICT_MASK) {
195 case NF_ACCEPT: 213 case NF_ACCEPT:
196 case NF_STOP: 214 case NF_STOP:
215okfn:
197 local_bh_disable(); 216 local_bh_disable();
198 entry->state.okfn(entry->state.net, entry->state.sk, skb); 217 entry->state.okfn(entry->state.net, entry->state.sk, skb);
199 local_bh_enable(); 218 local_bh_enable();
200 break; 219 break;
201 case NF_QUEUE: 220 case NF_QUEUE:
202 RCU_INIT_POINTER(entry->state.hook_entries, hook_entry); 221 err = nf_queue(skb, &entry->state, &hook_entry, verdict);
203 err = nf_queue(skb, &entry->state, 222 if (err == 1) {
204 verdict >> NF_VERDICT_QBITS); 223 if (hook_entry)
205 if (err < 0) {
206 if (err == -ESRCH &&
207 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
208 goto next_hook; 224 goto next_hook;
209 kfree_skb(skb); 225 goto okfn;
210 } 226 }
211 break; 227 break;
212 case NF_STOLEN: 228 case NF_STOLEN:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index b70d3ea1430e..24db22257586 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4423,7 +4423,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
4423 */ 4423 */
4424unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) 4424unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
4425{ 4425{
4426 int val; 4426 u32 val;
4427 4427
4428 val = ntohl(nla_get_be32(attr)); 4428 val = ntohl(nla_get_be32(attr));
4429 if (val > max) 4429 if (val > max)
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index e3b83c31da2e..517f08767a3c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -158,7 +158,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
158 if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { 158 if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
159 if (!(set->flags & NFT_SET_TIMEOUT)) 159 if (!(set->flags & NFT_SET_TIMEOUT))
160 return -EINVAL; 160 return -EINVAL;
161 timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT])); 161 timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
162 tb[NFTA_DYNSET_TIMEOUT])));
162 } 163 }
163 164
164 priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); 165 priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
@@ -246,7 +247,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
246 goto nla_put_failure; 247 goto nla_put_failure;
247 if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) 248 if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
248 goto nla_put_failure; 249 goto nla_put_failure;
249 if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout), 250 if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
251 cpu_to_be64(jiffies_to_msecs(priv->timeout)),
250 NFTA_DYNSET_PAD)) 252 NFTA_DYNSET_PAD))
251 goto nla_put_failure; 253 goto nla_put_failure;
252 if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr)) 254 if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index a84cf3d66056..47beb3abcc9d 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,7 +59,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
59 const struct nlattr * const tb[]) 59 const struct nlattr * const tb[])
60{ 60{
61 struct nft_exthdr *priv = nft_expr_priv(expr); 61 struct nft_exthdr *priv = nft_expr_priv(expr);
62 u32 offset, len, err; 62 u32 offset, len;
63 int err;
63 64
64 if (tb[NFTA_EXTHDR_DREG] == NULL || 65 if (tb[NFTA_EXTHDR_DREG] == NULL ||
65 tb[NFTA_EXTHDR_TYPE] == NULL || 66 tb[NFTA_EXTHDR_TYPE] == NULL ||
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 09473b415b95..baf694de3935 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -44,6 +44,7 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
44 [NFTA_HASH_LEN] = { .type = NLA_U32 }, 44 [NFTA_HASH_LEN] = { .type = NLA_U32 },
45 [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, 45 [NFTA_HASH_MODULUS] = { .type = NLA_U32 },
46 [NFTA_HASH_SEED] = { .type = NLA_U32 }, 46 [NFTA_HASH_SEED] = { .type = NLA_U32 },
47 [NFTA_HASH_OFFSET] = { .type = NLA_U32 },
47}; 48};
48 49
49static int nft_hash_init(const struct nft_ctx *ctx, 50static int nft_hash_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index c6d5358482d1..fbc88009ca2e 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -28,22 +28,20 @@ static void nft_range_eval(const struct nft_expr *expr,
28 const struct nft_pktinfo *pkt) 28 const struct nft_pktinfo *pkt)
29{ 29{
30 const struct nft_range_expr *priv = nft_expr_priv(expr); 30 const struct nft_range_expr *priv = nft_expr_priv(expr);
31 bool mismatch;
32 int d1, d2; 31 int d1, d2;
33 32
34 d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len); 33 d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len);
35 d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len); 34 d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len);
36 switch (priv->op) { 35 switch (priv->op) {
37 case NFT_RANGE_EQ: 36 case NFT_RANGE_EQ:
38 mismatch = (d1 < 0 || d2 > 0); 37 if (d1 < 0 || d2 > 0)
38 regs->verdict.code = NFT_BREAK;
39 break; 39 break;
40 case NFT_RANGE_NEQ: 40 case NFT_RANGE_NEQ:
41 mismatch = (d1 >= 0 && d2 <= 0); 41 if (d1 >= 0 && d2 <= 0)
42 regs->verdict.code = NFT_BREAK;
42 break; 43 break;
43 } 44 }
44
45 if (mismatch)
46 regs->verdict.code = NFT_BREAK;
47} 45}
48 46
49static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = { 47static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
@@ -59,6 +57,7 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
59 struct nft_range_expr *priv = nft_expr_priv(expr); 57 struct nft_range_expr *priv = nft_expr_priv(expr);
60 struct nft_data_desc desc_from, desc_to; 58 struct nft_data_desc desc_from, desc_to;
61 int err; 59 int err;
60 u32 op;
62 61
63 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), 62 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
64 &desc_from, tb[NFTA_RANGE_FROM_DATA]); 63 &desc_from, tb[NFTA_RANGE_FROM_DATA]);
@@ -80,7 +79,20 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
80 if (err < 0) 79 if (err < 0)
81 goto err2; 80 goto err2;
82 81
83 priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP])); 82 err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
83 if (err < 0)
84 goto err2;
85
86 switch (op) {
87 case NFT_RANGE_EQ:
88 case NFT_RANGE_NEQ:
89 break;
90 default:
91 err = -EINVAL;
92 goto err2;
93 }
94
95 priv->op = op;
84 priv->len = desc_from.len; 96 priv->len = desc_from.len;
85 return 0; 97 return 0;
86err2: 98err2:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index e0aa7c1d0224..fc4977456c30 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1513,7 +1513,7 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1513 if (!num_hooks) 1513 if (!num_hooks)
1514 return ERR_PTR(-EINVAL); 1514 return ERR_PTR(-EINVAL);
1515 1515
1516 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); 1516 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1517 if (ops == NULL) 1517 if (ops == NULL)
1518 return ERR_PTR(-ENOMEM); 1518 return ERR_PTR(-ENOMEM);
1519 1519
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index 018eed7e1ff1..8668a5c18dc3 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
32 li.u.ulog.copy_len = info->len; 32 li.u.ulog.copy_len = info->len;
33 li.u.ulog.group = info->group; 33 li.u.ulog.group = info->group;
34 li.u.ulog.qthreshold = info->threshold; 34 li.u.ulog.qthreshold = info->threshold;
35 li.u.ulog.flags = 0;
35 36
36 if (info->flags & XT_NFLOG_F_COPY_LEN) 37 if (info->flags & XT_NFLOG_F_COPY_LEN)
37 li.u.ulog.flags |= NF_LOG_F_COPY_LEN; 38 li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 2fab0c65aa94..b89b688e9d01 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -431,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
431 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie. 431 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
432*/ 432*/
433#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24)) 433#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
434#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24)) 434#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
435 435
436/* Repeated shift and or gives us all 1s, final shift and add 1 gives 436/* Repeated shift and or gives us all 1s, final shift and add 1 gives
437 * us the power of 2 below the theoretical max, so GCC simply does a 437 * us the power of 2 below the theoretical max, so GCC simply does a
@@ -473,7 +473,7 @@ static u64 user2credits(u64 user, int revision)
473 return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1, 473 return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
474 XT_HASHLIMIT_SCALE); 474 XT_HASHLIMIT_SCALE);
475 } else { 475 } else {
476 if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) 476 if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
477 return div64_u64(user, XT_HASHLIMIT_SCALE_v2) 477 return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
478 * HZ * CREDITS_PER_JIFFY; 478 * HZ * CREDITS_PER_JIFFY;
479 479
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 89d53104c6b3..000e70377f85 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -26,6 +26,8 @@
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Fan Du <fan.du@windriver.com>"); 27MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
28MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match"); 28MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
29MODULE_ALIAS("ipt_ipcomp");
30MODULE_ALIAS("ip6t_ipcomp");
29 31
30/* Returns 1 if the spi is matched by the range, 0 otherwise */ 32/* Returns 1 if the spi is matched by the range, 0 otherwise */
31static inline bool 33static inline bool
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 11db0d619c00..d2238b204691 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
250static int packet_direct_xmit(struct sk_buff *skb) 250static int packet_direct_xmit(struct sk_buff *skb)
251{ 251{
252 struct net_device *dev = skb->dev; 252 struct net_device *dev = skb->dev;
253 netdev_features_t features; 253 struct sk_buff *orig_skb = skb;
254 struct netdev_queue *txq; 254 struct netdev_queue *txq;
255 int ret = NETDEV_TX_BUSY; 255 int ret = NETDEV_TX_BUSY;
256 256
@@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
258 !netif_carrier_ok(dev))) 258 !netif_carrier_ok(dev)))
259 goto drop; 259 goto drop;
260 260
261 features = netif_skb_features(skb); 261 skb = validate_xmit_skb_list(skb, dev);
262 if (skb_needs_linearize(skb, features) && 262 if (skb != orig_skb)
263 __skb_linearize(skb))
264 goto drop; 263 goto drop;
265 264
266 txq = skb_get_tx_queue(dev, skb); 265 txq = skb_get_tx_queue(dev, skb);
@@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
280 return ret; 279 return ret;
281drop: 280drop:
282 atomic_long_inc(&dev->tx_dropped); 281 atomic_long_inc(&dev->tx_dropped);
283 kfree_skb(skb); 282 kfree_skb_list(skb);
284 return NET_XMIT_DROP; 283 return NET_XMIT_DROP;
285} 284}
286 285
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 0e72bec1529f..56c7d27eefee 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -13,5 +13,5 @@ obj-$(CONFIG_RDS_TCP) += rds_tcp.o
13rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \ 13rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
14 tcp_send.o tcp_stats.o 14 tcp_send.o tcp_stats.o
15 15
16ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG 16ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG
17 17
diff --git a/net/rds/rds.h b/net/rds/rds.h
index fd0bccb2f9f9..67ba67c058b1 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -33,7 +33,7 @@
33#define KERNEL_HAS_ATOMIC64 33#define KERNEL_HAS_ATOMIC64
34#endif 34#endif
35 35
36#ifdef DEBUG 36#ifdef RDS_DEBUG
37#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) 37#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38#else 38#else
39/* sigh, pr_debug() causes unused variable warnings */ 39/* sigh, pr_debug() causes unused variable warnings */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 4353a29f3b57..1ed18d8c9c9f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -276,7 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
276 goto error; 276 goto error;
277 277
278 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), 278 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
279 here, ERR_PTR(ret)); 279 here, NULL);
280 280
281 spin_lock_bh(&call->conn->params.peer->lock); 281 spin_lock_bh(&call->conn->params.peer->lock);
282 hlist_add_head(&call->error_link, 282 hlist_add_head(&call->error_link,
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 941b724d523b..862eea6b266c 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -193,8 +193,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
193 fl6->fl6_dport = htons(7001); 193 fl6->fl6_dport = htons(7001);
194 fl6->fl6_sport = htons(7000); 194 fl6->fl6_sport = htons(7000);
195 dst = ip6_route_output(&init_net, NULL, fl6); 195 dst = ip6_route_output(&init_net, NULL, fl6);
196 if (IS_ERR(dst)) { 196 if (dst->error) {
197 _leave(" [route err %ld]", PTR_ERR(dst)); 197 _leave(" [route err %d]", dst->error);
198 return; 198 return;
199 } 199 }
200 break; 200 break;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a512b18c0088..f893d180da1c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1028,8 +1028,7 @@ static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
1028 1028
1029 if (tb[1] == NULL) 1029 if (tb[1] == NULL)
1030 return NULL; 1030 return NULL;
1031 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), 1031 if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0)
1032 nla_len(tb[1]), NULL) < 0)
1033 return NULL; 1032 return NULL;
1034 kind = tb2[TCA_ACT_KIND]; 1033 kind = tb2[TCA_ACT_KIND];
1035 1034
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 667dc382df82..6b07fba5770b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -207,8 +207,11 @@ out:
207static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, 207static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
208 u64 lastuse) 208 u64 lastuse)
209{ 209{
210 tcf_lastuse_update(&a->tcfa_tm); 210 struct tcf_mirred *m = to_mirred(a);
211 struct tcf_t *tm = &m->tcf_tm;
212
211 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 213 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
214 tm->lastuse = lastuse;
212} 215}
213 216
214static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, 217static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2ee29a3375f6..2b2a7974e4bb 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -345,7 +345,8 @@ replay:
345 if (err == 0) { 345 if (err == 0) {
346 struct tcf_proto *next = rtnl_dereference(tp->next); 346 struct tcf_proto *next = rtnl_dereference(tp->next);
347 347
348 tfilter_notify(net, skb, n, tp, fh, 348 tfilter_notify(net, skb, n, tp,
349 t->tcm_handle,
349 RTM_DELTFILTER, false); 350 RTM_DELTFILTER, false);
350 if (tcf_destroy(tp, false)) 351 if (tcf_destroy(tp, false))
351 RCU_INIT_POINTER(*back, next); 352 RCU_INIT_POINTER(*back, next);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 2a5c1896d18f..6cb0df859195 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -418,6 +418,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
418 __u8 has_data = 0; 418 __u8 has_data = 0;
419 int gso = 0; 419 int gso = 0;
420 int pktcount = 0; 420 int pktcount = 0;
421 int auth_len = 0;
421 struct dst_entry *dst; 422 struct dst_entry *dst;
422 unsigned char *auth = NULL; /* pointer to auth in skb data */ 423 unsigned char *auth = NULL; /* pointer to auth in skb data */
423 424
@@ -510,7 +511,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
510 list_for_each_entry(chunk, &packet->chunk_list, list) { 511 list_for_each_entry(chunk, &packet->chunk_list, list) {
511 int padded = SCTP_PAD4(chunk->skb->len); 512 int padded = SCTP_PAD4(chunk->skb->len);
512 513
513 if (pkt_size + padded > tp->pathmtu) 514 if (chunk == packet->auth)
515 auth_len = padded;
516 else if (auth_len + padded + packet->overhead >
517 tp->pathmtu)
518 goto nomem;
519 else if (pkt_size + padded > tp->pathmtu)
514 break; 520 break;
515 pkt_size += padded; 521 pkt_size += padded;
516 } 522 }
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 026e3bca4a94..8ec20a64a3f8 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
3422 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3422 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3423 commands); 3423 commands);
3424 3424
3425 /* Report violation if chunk len overflows */
3426 ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
3427 if (ch_end > skb_tail_pointer(skb))
3428 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3429 commands);
3430
3425 /* Now that we know we at least have a chunk header, 3431 /* Now that we know we at least have a chunk header,
3426 * do things that are type appropriate. 3432 * do things that are type appropriate.
3427 */ 3433 */
@@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
3453 } 3459 }
3454 } 3460 }
3455 3461
3456 /* Report violation if chunk len overflows */
3457 ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
3458 if (ch_end > skb_tail_pointer(skb))
3459 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3460 commands);
3461
3462 ch = (sctp_chunkhdr_t *) ch_end; 3462 ch = (sctp_chunkhdr_t *) ch_end;
3463 } while (ch_end < skb_tail_pointer(skb)); 3463 } while (ch_end < skb_tail_pointer(skb));
3464 3464
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fb02c7033307..9fbb6feb8c27 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4687,7 +4687,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4687static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4687static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4688 int __user *optlen) 4688 int __user *optlen)
4689{ 4689{
4690 if (len <= 0) 4690 if (len == 0)
4691 return -EINVAL; 4691 return -EINVAL;
4692 if (len > sizeof(struct sctp_event_subscribe)) 4692 if (len > sizeof(struct sctp_event_subscribe))
4693 len = sizeof(struct sctp_event_subscribe); 4693 len = sizeof(struct sctp_event_subscribe);
@@ -6430,6 +6430,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
6430 if (get_user(len, optlen)) 6430 if (get_user(len, optlen))
6431 return -EFAULT; 6431 return -EFAULT;
6432 6432
6433 if (len < 0)
6434 return -EINVAL;
6435
6433 lock_sock(sk); 6436 lock_sock(sk);
6434 6437
6435 switch (optname) { 6438 switch (optname) {
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 02beb35f577f..3b95fe980fa2 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -771,6 +771,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
771 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; 771 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
772 int err; 772 int err;
773 773
774 if (!netif_is_bridge_port(dev))
775 return -EOPNOTSUPP;
776
774 err = switchdev_port_attr_get(dev, &attr); 777 err = switchdev_port_attr_get(dev, &attr);
775 if (err && err != -EOPNOTSUPP) 778 if (err && err != -EOPNOTSUPP)
776 return err; 779 return err;
@@ -926,6 +929,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
926 struct nlattr *afspec; 929 struct nlattr *afspec;
927 int err = 0; 930 int err = 0;
928 931
932 if (!netif_is_bridge_port(dev))
933 return -EOPNOTSUPP;
934
929 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 935 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
930 IFLA_PROTINFO); 936 IFLA_PROTINFO);
931 if (protinfo) { 937 if (protinfo) {
@@ -959,6 +965,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
959{ 965{
960 struct nlattr *afspec; 966 struct nlattr *afspec;
961 967
968 if (!netif_is_bridge_port(dev))
969 return -EOPNOTSUPP;
970
962 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 971 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
963 IFLA_AF_SPEC); 972 IFLA_AF_SPEC);
964 if (afspec) 973 if (afspec)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 753f774cb46f..aa1babbea385 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -247,11 +247,17 @@ int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
247 * 247 *
248 * RCU is locked, no other locks set 248 * RCU is locked, no other locks set
249 */ 249 */
250void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked) 250void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
251 struct tipc_msg *hdr)
251{ 252{
252 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq; 253 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
254 u16 acked = msg_bcast_ack(hdr);
253 struct sk_buff_head xmitq; 255 struct sk_buff_head xmitq;
254 256
257 /* Ignore bc acks sent by peer before bcast synch point was received */
258 if (msg_bc_ack_invalid(hdr))
259 return;
260
255 __skb_queue_head_init(&xmitq); 261 __skb_queue_head_init(&xmitq);
256 262
257 tipc_bcast_lock(net); 263 tipc_bcast_lock(net);
@@ -279,11 +285,11 @@ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
279 __skb_queue_head_init(&xmitq); 285 __skb_queue_head_init(&xmitq);
280 286
281 tipc_bcast_lock(net); 287 tipc_bcast_lock(net);
282 if (msg_type(hdr) == STATE_MSG) { 288 if (msg_type(hdr) != STATE_MSG) {
289 tipc_link_bc_init_rcv(l, hdr);
290 } else if (!msg_bc_ack_invalid(hdr)) {
283 tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq); 291 tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
284 rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq); 292 rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
285 } else {
286 tipc_link_bc_init_rcv(l, hdr);
287 } 293 }
288 tipc_bcast_unlock(net); 294 tipc_bcast_unlock(net);
289 295
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5ffe34472ccd..855d53c64ab3 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -55,7 +55,8 @@ void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
55int tipc_bcast_get_mtu(struct net *net); 55int tipc_bcast_get_mtu(struct net *net);
56int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list); 56int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
57int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); 57int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
58void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked); 58void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
59 struct tipc_msg *hdr);
59int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l, 60int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
60 struct tipc_msg *hdr); 61 struct tipc_msg *hdr);
61int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); 62int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b36e16cdc945..1055164c6232 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1312,6 +1312,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1312 msg_set_next_sent(hdr, l->snd_nxt); 1312 msg_set_next_sent(hdr, l->snd_nxt);
1313 msg_set_ack(hdr, l->rcv_nxt - 1); 1313 msg_set_ack(hdr, l->rcv_nxt - 1);
1314 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); 1314 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1315 msg_set_bc_ack_invalid(hdr, !node_up);
1315 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1316 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1316 msg_set_link_tolerance(hdr, tolerance); 1317 msg_set_link_tolerance(hdr, tolerance);
1317 msg_set_linkprio(hdr, priority); 1318 msg_set_linkprio(hdr, priority);
@@ -1574,6 +1575,7 @@ static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1574 __skb_queue_head_init(&list); 1575 __skb_queue_head_init(&list);
1575 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) 1576 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1576 return; 1577 return;
1578 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1577 tipc_link_xmit(l, &list, xmitq); 1579 tipc_link_xmit(l, &list, xmitq);
1578} 1580}
1579 1581
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index c3832cdf2278..50a739860d37 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -714,6 +714,23 @@ static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
714 msg_set_bits(m, 5, 13, 0x1, s); 714 msg_set_bits(m, 5, 13, 0x1, s);
715} 715}
716 716
717static inline bool msg_bc_ack_invalid(struct tipc_msg *m)
718{
719 switch (msg_user(m)) {
720 case BCAST_PROTOCOL:
721 case NAME_DISTRIBUTOR:
722 case LINK_PROTOCOL:
723 return msg_bits(m, 5, 14, 0x1);
724 default:
725 return false;
726 }
727}
728
729static inline void msg_set_bc_ack_invalid(struct tipc_msg *m, bool invalid)
730{
731 msg_set_bits(m, 5, 14, 0x1, invalid);
732}
733
717static inline char *msg_media_addr(struct tipc_msg *m) 734static inline char *msg_media_addr(struct tipc_msg *m)
718{ 735{
719 return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET]; 736 return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index a04fe9be1c60..c1cfd92de17a 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -156,6 +156,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
156 pr_warn("Bulk publication failure\n"); 156 pr_warn("Bulk publication failure\n");
157 return; 157 return;
158 } 158 }
159 msg_set_bc_ack_invalid(buf_msg(skb), true);
159 item = (struct distr_item *)msg_data(buf_msg(skb)); 160 item = (struct distr_item *)msg_data(buf_msg(skb));
160 } 161 }
161 162
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 7ef14e2d2356..9d2f4c2b08ab 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1535,7 +1535,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1535 if (unlikely(usr == LINK_PROTOCOL)) 1535 if (unlikely(usr == LINK_PROTOCOL))
1536 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 1536 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
1537 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1537 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
1538 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack); 1538 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
1539 1539
1540 /* Receive packet directly if conditions permit */ 1540 /* Receive packet directly if conditions permit */
1541 tipc_node_read_lock(n); 1541 tipc_node_read_lock(n);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 0082f4b01795..14b3f007826d 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -104,13 +104,16 @@ static int wiphy_suspend(struct device *dev)
104 104
105 rtnl_lock(); 105 rtnl_lock();
106 if (rdev->wiphy.registered) { 106 if (rdev->wiphy.registered) {
107 if (!rdev->wiphy.wowlan_config) 107 if (!rdev->wiphy.wowlan_config) {
108 cfg80211_leave_all(rdev); 108 cfg80211_leave_all(rdev);
109 cfg80211_process_rdev_events(rdev);
110 }
109 if (rdev->ops->suspend) 111 if (rdev->ops->suspend)
110 ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); 112 ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
111 if (ret == 1) { 113 if (ret == 1) {
112 /* Driver refuse to configure wowlan */ 114 /* Driver refuse to configure wowlan */
113 cfg80211_leave_all(rdev); 115 cfg80211_leave_all(rdev);
116 cfg80211_process_rdev_events(rdev);
114 ret = rdev_suspend(rdev, NULL); 117 ret = rdev_suspend(rdev, NULL);
115 } 118 }
116 } 119 }
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8edce22d1b93..5ea12afc7706 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -420,8 +420,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
420} 420}
421EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); 421EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
422 422
423static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr, 423int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
424 const u8 *addr, enum nl80211_iftype iftype) 424 const u8 *addr, enum nl80211_iftype iftype)
425{ 425{
426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
427 struct { 427 struct {
@@ -525,13 +525,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
525 525
526 return 0; 526 return 0;
527} 527}
528 528EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr);
529int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
530 enum nl80211_iftype iftype)
531{
532 return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
533}
534EXPORT_SYMBOL(ieee80211_data_to_8023);
535 529
536int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, 530int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
537 enum nl80211_iftype iftype, 531 enum nl80211_iftype iftype,
@@ -746,24 +740,18 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
746void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, 740void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
747 const u8 *addr, enum nl80211_iftype iftype, 741 const u8 *addr, enum nl80211_iftype iftype,
748 const unsigned int extra_headroom, 742 const unsigned int extra_headroom,
749 bool has_80211_header) 743 const u8 *check_da, const u8 *check_sa)
750{ 744{
751 unsigned int hlen = ALIGN(extra_headroom, 4); 745 unsigned int hlen = ALIGN(extra_headroom, 4);
752 struct sk_buff *frame = NULL; 746 struct sk_buff *frame = NULL;
753 u16 ethertype; 747 u16 ethertype;
754 u8 *payload; 748 u8 *payload;
755 int offset = 0, remaining, err; 749 int offset = 0, remaining;
756 struct ethhdr eth; 750 struct ethhdr eth;
757 bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb); 751 bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
758 bool reuse_skb = false; 752 bool reuse_skb = false;
759 bool last = false; 753 bool last = false;
760 754
761 if (has_80211_header) {
762 err = __ieee80211_data_to_8023(skb, &eth, addr, iftype);
763 if (err)
764 goto out;
765 }
766
767 while (!last) { 755 while (!last) {
768 unsigned int subframe_len; 756 unsigned int subframe_len;
769 int len; 757 int len;
@@ -780,8 +768,17 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
780 goto purge; 768 goto purge;
781 769
782 offset += sizeof(struct ethhdr); 770 offset += sizeof(struct ethhdr);
783 /* reuse skb for the last subframe */
784 last = remaining <= subframe_len + padding; 771 last = remaining <= subframe_len + padding;
772
773 /* FIXME: should we really accept multicast DA? */
774 if ((check_da && !is_multicast_ether_addr(eth.h_dest) &&
775 !ether_addr_equal(check_da, eth.h_dest)) ||
776 (check_sa && !ether_addr_equal(check_sa, eth.h_source))) {
777 offset += len + padding;
778 continue;
779 }
780
781 /* reuse skb for the last subframe */
785 if (!skb_is_nonlinear(skb) && !reuse_frag && last) { 782 if (!skb_is_nonlinear(skb) && !reuse_frag && last) {
786 skb_pull(skb, offset); 783 skb_pull(skb, offset);
787 frame = skb; 784 frame = skb;
@@ -819,7 +816,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
819 816
820 purge: 817 purge:
821 __skb_queue_purge(list); 818 __skb_queue_purge(list);
822 out:
823 dev_kfree_skb(skb); 819 dev_kfree_skb(skb);
824} 820}
825EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); 821EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c
index d17550198d06..6db6b21fdc6d 100644
--- a/samples/bpf/parse_ldabs.c
+++ b/samples/bpf/parse_ldabs.c
@@ -4,6 +4,7 @@
4 * modify it under the terms of version 2 of the GNU General Public 4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation. 5 * License as published by the Free Software Foundation.
6 */ 6 */
7#define KBUILD_MODNAME "foo"
7#include <linux/ip.h> 8#include <linux/ip.h>
8#include <linux/ipv6.h> 9#include <linux/ipv6.h>
9#include <linux/in.h> 10#include <linux/in.h>
diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c
index cf2511c33905..10af53d33cc2 100644
--- a/samples/bpf/parse_simple.c
+++ b/samples/bpf/parse_simple.c
@@ -4,6 +4,7 @@
4 * modify it under the terms of version 2 of the GNU General Public 4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation. 5 * License as published by the Free Software Foundation.
6 */ 6 */
7#define KBUILD_MODNAME "foo"
7#include <linux/ip.h> 8#include <linux/ip.h>
8#include <linux/ipv6.h> 9#include <linux/ipv6.h>
9#include <linux/in.h> 10#include <linux/in.h>
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
index edab34dce79b..95c16324760c 100644
--- a/samples/bpf/parse_varlen.c
+++ b/samples/bpf/parse_varlen.c
@@ -4,6 +4,7 @@
4 * modify it under the terms of version 2 of the GNU General Public 4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation. 5 * License as published by the Free Software Foundation.
6 */ 6 */
7#define KBUILD_MODNAME "foo"
7#include <linux/if_ether.h> 8#include <linux/if_ether.h>
8#include <linux/ip.h> 9#include <linux/ip.h>
9#include <linux/ipv6.h> 10#include <linux/ipv6.h>
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
index fa051b3d53ee..274c884c87fe 100644
--- a/samples/bpf/tcbpf1_kern.c
+++ b/samples/bpf/tcbpf1_kern.c
@@ -1,3 +1,4 @@
1#define KBUILD_MODNAME "foo"
1#include <uapi/linux/bpf.h> 2#include <uapi/linux/bpf.h>
2#include <uapi/linux/if_ether.h> 3#include <uapi/linux/if_ether.h>
3#include <uapi/linux/if_packet.h> 4#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
index 3303bb85593b..9c823a609e75 100644
--- a/samples/bpf/tcbpf2_kern.c
+++ b/samples/bpf/tcbpf2_kern.c
@@ -5,6 +5,7 @@
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation. 6 * License as published by the Free Software Foundation.
7 */ 7 */
8#define KBUILD_MODNAME "foo"
8#include <uapi/linux/bpf.h> 9#include <uapi/linux/bpf.h>
9#include <uapi/linux/if_ether.h> 10#include <uapi/linux/if_ether.h>
10#include <uapi/linux/if_packet.h> 11#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c
index 10ff73404e3a..1547b36a7b7b 100644
--- a/samples/bpf/test_cgrp2_tc_kern.c
+++ b/samples/bpf/test_cgrp2_tc_kern.c
@@ -4,6 +4,7 @@
4 * modify it under the terms of version 2 of the GNU General Public 4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation. 5 * License as published by the Free Software Foundation.
6 */ 6 */
7#define KBUILD_MODNAME "foo"
7#include <uapi/linux/if_ether.h> 8#include <uapi/linux/if_ether.h>
8#include <uapi/linux/in6.h> 9#include <uapi/linux/in6.h>
9#include <uapi/linux/ipv6.h> 10#include <uapi/linux/ipv6.h>