aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/scaling.txt2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c7
-rw-r--r--arch/mips/boot/Makefile10
-rw-r--r--arch/mips/include/asm/highmem.h5
-rw-r--r--arch/mips/include/asm/kprobes.h3
-rw-r--r--arch/mips/include/asm/pgtable-32.h7
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/ftrace.c24
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c6
-rw-r--r--arch/mips/mm/pgtable-32.c6
-rw-r--r--arch/powerpc/include/asm/bug.h2
-rw-r--r--arch/powerpc/include/asm/xive.h12
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c4
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c3
-rw-r--r--arch/powerpc/sysdev/xive/common.c2
-rw-r--r--arch/s390/configs/default_defconfig39
-rw-r--r--arch/s390/configs/gcov_defconfig28
-rw-r--r--arch/s390/configs/performance_defconfig27
-rw-r--r--arch/s390/configs/zfcpdump_defconfig6
-rw-r--r--arch/s390/defconfig8
-rw-r--r--arch/s390/kernel/entry.S19
-rw-r--r--arch/x86/include/asm/extable.h1
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/mm/extable.c3
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--arch/xtensa/include/asm/irq.h3
-rw-r--r--arch/xtensa/kernel/irq.c5
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S6
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c3
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h6
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c10
-rw-r--r--block/blk-sysfs.c34
-rw-r--r--drivers/acpi/acpica/tbutils.c34
-rw-r--r--drivers/acpi/acpica/utresrc.c9
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c4
-rw-r--r--drivers/cpuidle/dt_idle_states.c4
-rw-r--r--drivers/devfreq/event/exynos-nocp.c6
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c8
-rw-r--r--drivers/firmware/dmi-id.c4
-rw-r--r--drivers/firmware/dmi_scan.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h8
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c36
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c42
-rw-r--r--drivers/gpu/drm/radeon/cik.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c7
-rw-r--r--drivers/gpu/drm/tegra/drm.c22
-rw-r--r--drivers/gpu/host1x/dev.c2
-rw-r--r--drivers/hid/hid-core.c282
-rw-r--r--drivers/hsi/clients/ssi_protocol.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c4
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c7
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c39
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h3
-rw-r--r--drivers/infiniband/core/addr.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c471
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h22
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c384
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c314
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h61
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c333
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h5
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c68
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c11
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c2
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c2
-rw-r--r--drivers/leds/leds-bcm6328.c4
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c31
-rw-r--r--drivers/media/cec/Kconfig1
-rw-r--r--drivers/media/cec/cec-api.c8
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/rc/sir_ir.c6
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c1
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c9
-rw-r--r--drivers/net/bonding/bond_3ad.c27
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/caif/caif_hsi.c2
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/caif/caif_spi.c2
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/dev.c3
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/slcan.c7
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/can/vcan.c4
-rw-r--r--drivers/net/can/vxcan.c4
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c179
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c67
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h3
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c2
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c54
-rw-r--r--drivers/net/hyperv/rndis_filter.c30
-rw-r--r--drivers/net/ifb.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macsec.c4
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/nlmon.c2
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/slip/slip.c7
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/vrf.c38
-rw-r--r--drivers/net/vsockmon.c2
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/dlci.c2
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c3
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c2
-rw-r--r--drivers/pci/access.c12
-rw-r--r--drivers/pci/endpoint/functions/Kconfig1
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c16
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c12
-rw-r--r--drivers/s390/crypto/ap_bus.c38
-rw-r--r--drivers/s390/crypto/ap_card.c9
-rw-r--r--drivers/s390/crypto/ap_queue.c9
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/staging/iio/cdc/ad7152.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c3
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c2
-rw-r--r--drivers/usb/gadget/composite.c11
-rw-r--r--drivers/usb/gadget/function/f_phonet.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c9
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c13
-rw-r--r--drivers/usb/gadget/udc/net2280.c9
-rw-r--r--drivers/usb/host/xhci-mem.c7
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/smscufx.c5
-rw-r--r--drivers/video/fbdev/udlfb.c9
-rw-r--r--drivers/video/fbdev/via/viafbdev.c8
-rw-r--r--fs/btrfs/hash.c5
-rw-r--r--fs/ceph/acl.c1
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/ceph/inode.c5
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/configfs/item.c8
-rw-r--r--fs/configfs/symlink.c3
-rw-r--r--fs/dcache.c10
-rw-r--r--fs/f2fs/f2fs.h5
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/ufs/balloc.c22
-rw-r--r--fs/ufs/inode.c47
-rw-r--r--fs/ufs/super.c64
-rw-r--r--fs/ufs/ufs_fs.h7
-rw-r--r--fs/ufs/util.c17
-rw-r--r--fs/ufs/util.h9
-rw-r--r--fs/userfaultfd.c29
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_icache.c5
-rw-r--r--include/acpi/actbl.h14
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/configfs.h3
-rw-r--r--include/linux/dmi.h2
-rw-r--r--include/linux/netdevice.h15
-rw-r--r--include/media/cec-notifier.h10
-rw-r--r--include/media/cec.h2
-rw-r--r--include/uapi/linux/ethtool.h6
-rw-r--r--include/uapi/linux/openvswitch.h1
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c3
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/time/alarmtimer.c14
-rw-r--r--kernel/time/tick-broadcast.c4
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--lib/libcrc32c.c6
-rw-r--r--mm/huge_memory.c8
-rw-r--r--mm/memory-failure.c5
-rw-r--r--mm/swap_cgroup.c3
-rw-r--r--mm/vmpressure.c6
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/batman-adv/distributed-arp-table.c5
-rw-r--r--net/batman-adv/routing.c2
-rw-r--r--net/batman-adv/soft-interface.c5
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/caif/cfpkt_skbuff.c6
-rw-r--r--net/caif/chnl_net.c4
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/core/dev.c42
-rw-r--r--net/core/dst.c14
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c4
-rw-r--r--net/hsr/hsr_device.c4
-rw-r--r--net/hsr/hsr_forward.c3
-rw-r--r--net/hsr/hsr_framereg.c9
-rw-r--r--net/hsr/hsr_framereg.h2
-rw-r--r--net/ieee802154/6lowpan/core.c2
-rw-r--r--net/ipv4/icmp.c8
-rw-r--r--net/ipv4/igmp.c21
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ipmr.c34
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ila/ila_xlat.c1
-rw-r--r--net/ipv6/ip6_gre.c9
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/route.c1
-rw-r--r--net/ipv6/sit.c6
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/l2tp/l2tp_eth.c15
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c7
-rw-r--r--net/mac80211/mlme.c62
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/mac80211/wpa.c9
-rw-r--r--net/mac802154/iface.c7
-rw-r--r--net/openvswitch/vport-internal_dev.c4
-rw-r--r--net/phonet/pep-gprs.c2
-rw-r--r--net/sched/act_pedit.c4
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/tipc/msg.c2
-rw-r--r--net/unix/af_unix.c7
-rw-r--r--security/selinux/hooks.c5
-rw-r--r--tools/objtool/builtin-check.c3
-rw-r--r--tools/perf/Makefile.config38
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/arch/Build2
-rw-r--r--tools/perf/pmu-events/Build4
-rw-r--r--tools/perf/tests/Build2
-rw-r--r--tools/perf/tests/task-exit.c2
-rw-r--r--tools/perf/util/evsel.c12
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/unwind-libdw.c8
-rw-r--r--tools/testing/selftests/bpf/bpf_endian.h41
299 files changed, 2818 insertions, 1859 deletions
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 59f4db2a0c85..f55639d71d35 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -122,7 +122,7 @@ associated flow of the packet. The hash is either provided by hardware
122or will be computed in the stack. Capable hardware can pass the hash in 122or will be computed in the stack. Capable hardware can pass the hash in
123the receive descriptor for the packet; this would usually be the same 123the receive descriptor for the packet; this would usually be the same
124hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in 124hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
125skb->rx_hash and can be used elsewhere in the stack as a hash of the 125skb->hash and can be used elsewhere in the stack as a hash of the
126packet’s flow. 126packet’s flow.
127 127
128Each receive hardware queue has an associated list of CPUs to which 128Each receive hardware queue has an associated list of CPUs to which
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 71f930501ade..c870d6f01ac2 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly;
36#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) 36#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
37#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) 37#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
38#define TCALL_CNT (MAX_BPF_JIT_REG + 2) 38#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
39#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
39 40
40/* Map BPF registers to A64 registers */ 41/* Map BPF registers to A64 registers */
41static const int bpf2a64[] = { 42static const int bpf2a64[] = {
@@ -57,6 +58,7 @@ static const int bpf2a64[] = {
57 /* temporary registers for internal BPF JIT */ 58 /* temporary registers for internal BPF JIT */
58 [TMP_REG_1] = A64_R(10), 59 [TMP_REG_1] = A64_R(10),
59 [TMP_REG_2] = A64_R(11), 60 [TMP_REG_2] = A64_R(11),
61 [TMP_REG_3] = A64_R(12),
60 /* tail_call_cnt */ 62 /* tail_call_cnt */
61 [TCALL_CNT] = A64_R(26), 63 [TCALL_CNT] = A64_R(26),
62 /* temporary register for blinding constants */ 64 /* temporary register for blinding constants */
@@ -319,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
319 const u8 src = bpf2a64[insn->src_reg]; 321 const u8 src = bpf2a64[insn->src_reg];
320 const u8 tmp = bpf2a64[TMP_REG_1]; 322 const u8 tmp = bpf2a64[TMP_REG_1];
321 const u8 tmp2 = bpf2a64[TMP_REG_2]; 323 const u8 tmp2 = bpf2a64[TMP_REG_2];
324 const u8 tmp3 = bpf2a64[TMP_REG_3];
322 const s16 off = insn->off; 325 const s16 off = insn->off;
323 const s32 imm = insn->imm; 326 const s32 imm = insn->imm;
324 const int i = insn - ctx->prog->insnsi; 327 const int i = insn - ctx->prog->insnsi;
@@ -689,10 +692,10 @@ emit_cond_jmp:
689 emit(A64_PRFM(tmp, PST, L1, STRM), ctx); 692 emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
690 emit(A64_LDXR(isdw, tmp2, tmp), ctx); 693 emit(A64_LDXR(isdw, tmp2, tmp), ctx);
691 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); 694 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
692 emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); 695 emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
693 jmp_offset = -3; 696 jmp_offset = -3;
694 check_imm19(jmp_offset); 697 check_imm19(jmp_offset);
695 emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); 698 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
696 break; 699 break;
697 700
698 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ 701 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2728a9a9c7c5..145b5ce8eb7e 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@
128 -DADDR_BITS=$(ADDR_BITS) \ 128 -DADDR_BITS=$(ADDR_BITS) \
129 -DADDR_CELLS=$(itb_addr_cells) 129 -DADDR_CELLS=$(itb_addr_cells)
130 130
131$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 131$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
132 $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) 132 $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
133 133
134$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 134$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
135 $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) 135 $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
136 136
137$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 137$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
138 $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) 138 $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
139 139
140$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 140$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
141 $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) 141 $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
142 142
143$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 143$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
144 $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) 144 $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
145 145
146quiet_cmd_itb-image = ITB $@ 146quiet_cmd_itb-image = ITB $@
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index d34536e7653f..279b6d14ffeb 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table;
35 * easily, subsequent pte tables have to be allocated in one physical 35 * easily, subsequent pte tables have to be allocated in one physical
36 * chunk of RAM. 36 * chunk of RAM.
37 */ 37 */
38#ifdef CONFIG_PHYS_ADDR_T_64BIT
39#define LAST_PKMAP 512
40#else
38#define LAST_PKMAP 1024 41#define LAST_PKMAP 1024
42#endif
43
39#define LAST_PKMAP_MASK (LAST_PKMAP-1) 44#define LAST_PKMAP_MASK (LAST_PKMAP-1)
40#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 45#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
41#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 46#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index 291846d9ba83..ad1a99948f27 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t;
43 43
44#define flush_insn_slot(p) \ 44#define flush_insn_slot(p) \
45do { \ 45do { \
46 flush_icache_range((unsigned long)p->addr, \ 46 if (p->addr) \
47 flush_icache_range((unsigned long)p->addr, \
47 (unsigned long)p->addr + \ 48 (unsigned long)p->addr + \
48 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ 49 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
49} while (0) 50} while (0)
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 6f94bed571c4..74afe8c76bdd 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -19,6 +19,10 @@
19#define __ARCH_USE_5LEVEL_HACK 19#define __ARCH_USE_5LEVEL_HACK
20#include <asm-generic/pgtable-nopmd.h> 20#include <asm-generic/pgtable-nopmd.h>
21 21
22#ifdef CONFIG_HIGHMEM
23#include <asm/highmem.h>
24#endif
25
22extern int temp_tlb_entry; 26extern int temp_tlb_entry;
23 27
24/* 28/*
@@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
62 66
63#define VMALLOC_START MAP_BASE 67#define VMALLOC_START MAP_BASE
64 68
65#define PKMAP_BASE (0xfe000000UL) 69#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
70#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
66 71
67#ifdef CONFIG_HIGHMEM 72#ifdef CONFIG_HIGHMEM
68# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 73# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index b11facd11c9d..f702a459a830 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
804 break; 804 break;
805 } 805 }
806 /* Compact branch: BNEZC || JIALC */ 806 /* Compact branch: BNEZC || JIALC */
807 if (insn.i_format.rs) 807 if (!insn.i_format.rs) {
808 /* JIALC: set $31/ra */
808 regs->regs[31] = epc + 4; 809 regs->regs[31] = epc + 4;
810 }
809 regs->cp0_epc += 8; 811 regs->cp0_epc += 8;
810 break; 812 break;
811#endif 813#endif
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 30a3b75e88eb..9d9b8fbae202 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command)
38 38
39#endif 39#endif
40 40
41/*
42 * Check if the address is in kernel space
43 *
44 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
45 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
46 */
47static inline int in_kernel_space(unsigned long ip)
48{
49 if (ip >= (unsigned long)_stext &&
50 ip <= (unsigned long)_etext)
51 return 1;
52 return 0;
53}
54
55#ifdef CONFIG_DYNAMIC_FTRACE 41#ifdef CONFIG_DYNAMIC_FTRACE
56 42
57#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 43#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod,
198 * If ip is in kernel space, no long call, otherwise, long call is 184 * If ip is in kernel space, no long call, otherwise, long call is
199 * needed. 185 * needed.
200 */ 186 */
201 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 187 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
202#ifdef CONFIG_64BIT 188#ifdef CONFIG_64BIT
203 return ftrace_modify_code(ip, new); 189 return ftrace_modify_code(ip, new);
204#else 190#else
@@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
218 unsigned int new; 204 unsigned int new;
219 unsigned long ip = rec->ip; 205 unsigned long ip = rec->ip;
220 206
221 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 207 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
222 208
223#ifdef CONFIG_64BIT 209#ifdef CONFIG_64BIT
224 return ftrace_modify_code(ip, new); 210 return ftrace_modify_code(ip, new);
225#else 211#else
226 return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? 212 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
227 INSN_NOP : insn_la_mcount[1]); 213 INSN_NOP : insn_la_mcount[1]);
228#endif 214#endif
229} 215}
@@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
289 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 275 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
290 * kernel, move after the instruction "move ra, at"(offset is 16) 276 * kernel, move after the instruction "move ra, at"(offset is 16)
291 */ 277 */
292 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); 278 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
293 279
294 /* 280 /*
295 * search the text until finding the non-store instruction or "s{d,w} 281 * search the text until finding the non-store instruction or "s{d,w}
@@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
394 * entries configured through the tracing/set_graph_function interface. 380 * entries configured through the tracing/set_graph_function interface.
395 */ 381 */
396 382
397 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 383 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
398 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 384 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
399 385
400 /* Only trace if the calling function expects to */ 386 /* Only trace if the calling function expects to */
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 313a88b2973f..f3e301f95aef 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1597 break; 1597 break;
1598 case CPU_P5600: 1598 case CPU_P5600:
1599 case CPU_P6600: 1599 case CPU_P6600:
1600 case CPU_I6400:
1601 /* 8-bit event numbers */ 1600 /* 8-bit event numbers */
1602 raw_id = config & 0x1ff; 1601 raw_id = config & 0x1ff;
1603 base_id = raw_id & 0xff; 1602 base_id = raw_id & 0xff;
@@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1610 raw_event.range = P; 1609 raw_event.range = P;
1611#endif 1610#endif
1612 break; 1611 break;
1612 case CPU_I6400:
1613 /* 8-bit event numbers */
1614 base_id = config & 0xff;
1615 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1616 break;
1613 case CPU_1004K: 1617 case CPU_1004K:
1614 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) 1618 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1615 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1619 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911ba748..b19a3c506b1e 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
51 /* 51 /*
52 * Fixed mappings: 52 * Fixed mappings:
53 */ 53 */
54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
55 fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); 55 fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
56 56
57#ifdef CONFIG_HIGHMEM 57#ifdef CONFIG_HIGHMEM
58 /* 58 /*
59 * Permanent kmaps: 59 * Permanent kmaps:
60 */ 60 */
61 vaddr = PKMAP_BASE; 61 vaddr = PKMAP_BASE;
62 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 62 fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
63 63
64 pgd = swapper_pg_dir + __pgd_offset(vaddr); 64 pgd = swapper_pg_dir + __pgd_offset(vaddr);
65 pud = pud_offset(pgd, vaddr); 65 pud = pud_offset(pgd, vaddr);
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index f2c562a0a427..0151af6c2a50 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -104,7 +104,7 @@
104 "1: "PPC_TLNEI" %4,0\n" \ 104 "1: "PPC_TLNEI" %4,0\n" \
105 _EMIT_BUG_ENTRY \ 105 _EMIT_BUG_ENTRY \
106 : : "i" (__FILE__), "i" (__LINE__), \ 106 : : "i" (__FILE__), "i" (__LINE__), \
107 "i" (BUGFLAG_TAINT(TAINT_WARN)), \ 107 "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
108 "i" (sizeof(struct bug_entry)), \ 108 "i" (sizeof(struct bug_entry)), \
109 "r" (__ret_warn_on)); \ 109 "r" (__ret_warn_on)); \
110 } \ 110 } \
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index c8a822acf962..c23ff4389ca2 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -94,11 +94,13 @@ struct xive_q {
94 * store at 0 and some ESBs support doing a trigger via a 94 * store at 0 and some ESBs support doing a trigger via a
95 * separate trigger page. 95 * separate trigger page.
96 */ 96 */
97#define XIVE_ESB_GET 0x800 97#define XIVE_ESB_STORE_EOI 0x400 /* Store */
98#define XIVE_ESB_SET_PQ_00 0xc00 98#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
99#define XIVE_ESB_SET_PQ_01 0xd00 99#define XIVE_ESB_GET 0x800 /* Load */
100#define XIVE_ESB_SET_PQ_10 0xe00 100#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
101#define XIVE_ESB_SET_PQ_11 0xf00 101#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
102#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
103#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
102 104
103#define XIVE_ESB_VAL_P 0x2 105#define XIVE_ESB_VAL_P 0x2
104#define XIVE_ESB_VAL_Q 0x1 106#define XIVE_ESB_VAL_Q 0x1
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 023a31133c37..4636ca6e7d38 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
69{ 69{
70 /* If the XIVE supports the new "store EOI facility, use it */ 70 /* If the XIVE supports the new "store EOI facility, use it */
71 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 71 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
72 __x_writeq(0, __x_eoi_page(xd)); 72 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
73 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 73 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
74 opal_int_eoi(hw_irq); 74 opal_int_eoi(hw_irq);
75 } else { 75 } else {
@@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
89 * properly. 89 * properly.
90 */ 90 */
91 if (xd->flags & XIVE_IRQ_FLAG_LSI) 91 if (xd->flags & XIVE_IRQ_FLAG_LSI)
92 __x_readq(__x_eoi_page(xd)); 92 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
93 else { 93 else {
94 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); 94 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
95 95
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 78fa9395b8c5..e6f444b46207 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
75 if (WARN_ON(!gpdev)) 75 if (WARN_ON(!gpdev))
76 return NULL; 76 return NULL;
77 77
78 if (WARN_ON(!gpdev->dev.of_node)) 78 /* Not all PCI devices have device-tree nodes */
79 if (!gpdev->dev.of_node)
79 return NULL; 80 return NULL;
80 81
81 /* Get assoicated PCI device */ 82 /* Get assoicated PCI device */
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 913825086b8d..8f5e3035483b 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
297{ 297{
298 /* If the XIVE supports the new "store EOI facility, use it */ 298 /* If the XIVE supports the new "store EOI facility, use it */
299 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 299 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
300 out_be64(xd->eoi_mmio, 0); 300 out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
301 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 301 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
302 /* 302 /*
303 * The FW told us to call it. This happens for some 303 * The FW told us to call it. This happens for some
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index a5039fa89314..282072206df7 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -30,6 +30,7 @@ CONFIG_USER_NS=y
30CONFIG_SCHED_AUTOGROUP=y 30CONFIG_SCHED_AUTOGROUP=y
31CONFIG_BLK_DEV_INITRD=y 31CONFIG_BLK_DEV_INITRD=y
32CONFIG_EXPERT=y 32CONFIG_EXPERT=y
33# CONFIG_SYSFS_SYSCALL is not set
33CONFIG_BPF_SYSCALL=y 34CONFIG_BPF_SYSCALL=y
34CONFIG_USERFAULTFD=y 35CONFIG_USERFAULTFD=y
35# CONFIG_COMPAT_BRK is not set 36# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
44CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
45CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
46CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_BLK_DEV_INTEGRITY=y
47CONFIG_BLK_DEV_THROTTLING=y 49CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y
48CONFIG_PARTITION_ADVANCED=y 52CONFIG_PARTITION_ADVANCED=y
49CONFIG_IBM_PARTITION=y 53CONFIG_IBM_PARTITION=y
50CONFIG_BSD_DISKLABEL=y 54CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@ CONFIG_UNIX=y
90CONFIG_UNIX_DIAG=m 94CONFIG_UNIX_DIAG=m
91CONFIG_XFRM_USER=m 95CONFIG_XFRM_USER=m
92CONFIG_NET_KEY=m 96CONFIG_NET_KEY=m
97CONFIG_SMC=m
98CONFIG_SMC_DIAG=m
93CONFIG_INET=y 99CONFIG_INET=y
94CONFIG_IP_MULTICAST=y 100CONFIG_IP_MULTICAST=y
95CONFIG_IP_ADVANCED_ROUTER=y 101CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m
359CONFIG_NET_ACT_SKBEDIT=m 365CONFIG_NET_ACT_SKBEDIT=m
360CONFIG_NET_ACT_CSUM=m 366CONFIG_NET_ACT_CSUM=m
361CONFIG_DNS_RESOLVER=y 367CONFIG_DNS_RESOLVER=y
368CONFIG_NETLINK_DIAG=m
362CONFIG_CGROUP_NET_PRIO=y 369CONFIG_CGROUP_NET_PRIO=y
363CONFIG_BPF_JIT=y 370CONFIG_BPF_JIT=y
364CONFIG_NET_PKTGEN=m 371CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y
367CONFIG_DMA_CMA=y 374CONFIG_DMA_CMA=y
368CONFIG_CMA_SIZE_MBYTES=0 375CONFIG_CMA_SIZE_MBYTES=0
369CONFIG_CONNECTOR=y 376CONFIG_CONNECTOR=y
377CONFIG_ZRAM=m
370CONFIG_BLK_DEV_LOOP=m 378CONFIG_BLK_DEV_LOOP=m
371CONFIG_BLK_DEV_CRYPTOLOOP=m 379CONFIG_BLK_DEV_CRYPTOLOOP=m
380CONFIG_BLK_DEV_DRBD=m
372CONFIG_BLK_DEV_NBD=m 381CONFIG_BLK_DEV_NBD=m
373CONFIG_BLK_DEV_OSD=m 382CONFIG_BLK_DEV_OSD=m
374CONFIG_BLK_DEV_RAM=y 383CONFIG_BLK_DEV_RAM=y
375CONFIG_BLK_DEV_RAM_SIZE=32768 384CONFIG_BLK_DEV_RAM_SIZE=32768
376CONFIG_CDROM_PKTCDVD=m 385CONFIG_BLK_DEV_RAM_DAX=y
377CONFIG_ATA_OVER_ETH=m
378CONFIG_VIRTIO_BLK=y 386CONFIG_VIRTIO_BLK=y
387CONFIG_BLK_DEV_RBD=m
379CONFIG_ENCLOSURE_SERVICES=m 388CONFIG_ENCLOSURE_SERVICES=m
389CONFIG_GENWQE=m
380CONFIG_RAID_ATTRS=m 390CONFIG_RAID_ATTRS=m
381CONFIG_SCSI=y 391CONFIG_SCSI=y
382CONFIG_BLK_DEV_SD=y 392CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@ CONFIG_NLMON=m
442# CONFIG_NET_VENDOR_INTEL is not set 452# CONFIG_NET_VENDOR_INTEL is not set
443# CONFIG_NET_VENDOR_MARVELL is not set 453# CONFIG_NET_VENDOR_MARVELL is not set
444CONFIG_MLX4_EN=m 454CONFIG_MLX4_EN=m
455CONFIG_MLX5_CORE=m
456CONFIG_MLX5_CORE_EN=y
445# CONFIG_NET_VENDOR_NATSEMI is not set 457# CONFIG_NET_VENDOR_NATSEMI is not set
446CONFIG_PPP=m 458CONFIG_PPP=m
447CONFIG_PPP_BSDCOMP=m 459CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@ CONFIG_PPTP=m
452CONFIG_PPPOL2TP=m 464CONFIG_PPPOL2TP=m
453CONFIG_PPP_ASYNC=m 465CONFIG_PPP_ASYNC=m
454CONFIG_PPP_SYNC_TTY=m 466CONFIG_PPP_SYNC_TTY=m
455# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
456# CONFIG_INPUT_KEYBOARD is not set 467# CONFIG_INPUT_KEYBOARD is not set
457# CONFIG_INPUT_MOUSE is not set 468# CONFIG_INPUT_MOUSE is not set
458# CONFIG_SERIO is not set 469# CONFIG_SERIO is not set
@@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m
471CONFIG_INFINIBAND=m 482CONFIG_INFINIBAND=m
472CONFIG_INFINIBAND_USER_ACCESS=m 483CONFIG_INFINIBAND_USER_ACCESS=m
473CONFIG_MLX4_INFINIBAND=m 484CONFIG_MLX4_INFINIBAND=m
485CONFIG_MLX5_INFINIBAND=m
474CONFIG_VIRTIO_BALLOON=m 486CONFIG_VIRTIO_BALLOON=m
475CONFIG_EXT4_FS=y 487CONFIG_EXT4_FS=y
476CONFIG_EXT4_FS_POSIX_ACL=y 488CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y
487CONFIG_XFS_RT=y 499CONFIG_XFS_RT=y
488CONFIG_XFS_DEBUG=y 500CONFIG_XFS_DEBUG=y
489CONFIG_GFS2_FS=m 501CONFIG_GFS2_FS=m
502CONFIG_GFS2_FS_LOCKING_DLM=y
490CONFIG_OCFS2_FS=m 503CONFIG_OCFS2_FS=m
491CONFIG_BTRFS_FS=y 504CONFIG_BTRFS_FS=y
492CONFIG_BTRFS_FS_POSIX_ACL=y 505CONFIG_BTRFS_FS_POSIX_ACL=y
506CONFIG_BTRFS_DEBUG=y
493CONFIG_NILFS2_FS=m 507CONFIG_NILFS2_FS=m
508CONFIG_FS_DAX=y
509CONFIG_EXPORTFS_BLOCK_OPS=y
494CONFIG_FANOTIFY=y 510CONFIG_FANOTIFY=y
511CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
495CONFIG_QUOTA_NETLINK_INTERFACE=y 512CONFIG_QUOTA_NETLINK_INTERFACE=y
513CONFIG_QUOTA_DEBUG=y
496CONFIG_QFMT_V1=m 514CONFIG_QFMT_V1=m
497CONFIG_QFMT_V2=m 515CONFIG_QFMT_V2=m
498CONFIG_AUTOFS4_FS=m 516CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y
558CONFIG_DEBUG_SECTION_MISMATCH=y 576CONFIG_DEBUG_SECTION_MISMATCH=y
559CONFIG_MAGIC_SYSRQ=y 577CONFIG_MAGIC_SYSRQ=y
560CONFIG_DEBUG_PAGEALLOC=y 578CONFIG_DEBUG_PAGEALLOC=y
579CONFIG_DEBUG_RODATA_TEST=y
561CONFIG_DEBUG_OBJECTS=y 580CONFIG_DEBUG_OBJECTS=y
562CONFIG_DEBUG_OBJECTS_SELFTEST=y 581CONFIG_DEBUG_OBJECTS_SELFTEST=y
563CONFIG_DEBUG_OBJECTS_FREE=y 582CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y
580CONFIG_WQ_WATCHDOG=y 599CONFIG_WQ_WATCHDOG=y
581CONFIG_PANIC_ON_OOPS=y 600CONFIG_PANIC_ON_OOPS=y
582CONFIG_DEBUG_TIMEKEEPING=y 601CONFIG_DEBUG_TIMEKEEPING=y
583CONFIG_TIMER_STATS=y
584CONFIG_DEBUG_RT_MUTEXES=y 602CONFIG_DEBUG_RT_MUTEXES=y
585CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 603CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
586CONFIG_PROVE_LOCKING=y 604CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m
595CONFIG_RCU_CPU_STALL_TIMEOUT=300 613CONFIG_RCU_CPU_STALL_TIMEOUT=300
596CONFIG_NOTIFIER_ERROR_INJECTION=m 614CONFIG_NOTIFIER_ERROR_INJECTION=m
597CONFIG_PM_NOTIFIER_ERROR_INJECT=m 615CONFIG_PM_NOTIFIER_ERROR_INJECT=m
616CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
598CONFIG_FAULT_INJECTION=y 617CONFIG_FAULT_INJECTION=y
599CONFIG_FAILSLAB=y 618CONFIG_FAILSLAB=y
600CONFIG_FAIL_PAGE_ALLOC=y 619CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y
616CONFIG_TRACE_ENUM_MAP_FILE=y 635CONFIG_TRACE_ENUM_MAP_FILE=y
617CONFIG_LKDTM=m 636CONFIG_LKDTM=m
618CONFIG_TEST_LIST_SORT=y 637CONFIG_TEST_LIST_SORT=y
638CONFIG_TEST_SORT=y
619CONFIG_KPROBES_SANITY_TEST=y 639CONFIG_KPROBES_SANITY_TEST=y
620CONFIG_RBTREE_TEST=y 640CONFIG_RBTREE_TEST=y
621CONFIG_INTERVAL_TREE_TEST=m 641CONFIG_INTERVAL_TREE_TEST=m
622CONFIG_PERCPU_TEST=m 642CONFIG_PERCPU_TEST=m
623CONFIG_ATOMIC64_SELFTEST=y 643CONFIG_ATOMIC64_SELFTEST=y
624CONFIG_TEST_STRING_HELPERS=y
625CONFIG_TEST_KSTRTOX=y
626CONFIG_DMA_API_DEBUG=y 644CONFIG_DMA_API_DEBUG=y
627CONFIG_TEST_BPF=m 645CONFIG_TEST_BPF=m
628CONFIG_BUG_ON_DATA_CORRUPTION=y 646CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y
630CONFIG_ENCRYPTED_KEYS=m 648CONFIG_ENCRYPTED_KEYS=m
631CONFIG_SECURITY=y 649CONFIG_SECURITY=y
632CONFIG_SECURITY_NETWORK=y 650CONFIG_SECURITY_NETWORK=y
651CONFIG_HARDENED_USERCOPY=y
633CONFIG_SECURITY_SELINUX=y 652CONFIG_SECURITY_SELINUX=y
634CONFIG_SECURITY_SELINUX_BOOTPARAM=y 653CONFIG_SECURITY_SELINUX_BOOTPARAM=y
635CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 654CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m
640CONFIG_CRYPTO_DH=m 659CONFIG_CRYPTO_DH=m
641CONFIG_CRYPTO_ECDH=m 660CONFIG_CRYPTO_ECDH=m
642CONFIG_CRYPTO_USER=m 661CONFIG_CRYPTO_USER=m
662CONFIG_CRYPTO_PCRYPT=m
643CONFIG_CRYPTO_CRYPTD=m 663CONFIG_CRYPTO_CRYPTD=m
664CONFIG_CRYPTO_MCRYPTD=m
644CONFIG_CRYPTO_TEST=m 665CONFIG_CRYPTO_TEST=m
645CONFIG_CRYPTO_CCM=m 666CONFIG_CRYPTO_CCM=m
646CONFIG_CRYPTO_GCM=m 667CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
648CONFIG_CRYPTO_LRW=m 669CONFIG_CRYPTO_LRW=m
649CONFIG_CRYPTO_PCBC=m 670CONFIG_CRYPTO_PCBC=m
650CONFIG_CRYPTO_KEYWRAP=m 671CONFIG_CRYPTO_KEYWRAP=m
672CONFIG_CRYPTO_CMAC=m
651CONFIG_CRYPTO_XCBC=m 673CONFIG_CRYPTO_XCBC=m
652CONFIG_CRYPTO_VMAC=m 674CONFIG_CRYPTO_VMAC=m
653CONFIG_CRYPTO_CRC32=m 675CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m
657CONFIG_CRYPTO_RMD256=m 679CONFIG_CRYPTO_RMD256=m
658CONFIG_CRYPTO_RMD320=m 680CONFIG_CRYPTO_RMD320=m
659CONFIG_CRYPTO_SHA512=m 681CONFIG_CRYPTO_SHA512=m
682CONFIG_CRYPTO_SHA3=m
660CONFIG_CRYPTO_TGR192=m 683CONFIG_CRYPTO_TGR192=m
661CONFIG_CRYPTO_WP512=m 684CONFIG_CRYPTO_WP512=m
685CONFIG_CRYPTO_AES_TI=m
662CONFIG_CRYPTO_ANUBIS=m 686CONFIG_CRYPTO_ANUBIS=m
663CONFIG_CRYPTO_BLOWFISH=m 687CONFIG_CRYPTO_BLOWFISH=m
664CONFIG_CRYPTO_CAMELLIA=m 688CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m
674CONFIG_CRYPTO_842=m 698CONFIG_CRYPTO_842=m
675CONFIG_CRYPTO_LZ4=m 699CONFIG_CRYPTO_LZ4=m
676CONFIG_CRYPTO_LZ4HC=m 700CONFIG_CRYPTO_LZ4HC=m
701CONFIG_CRYPTO_ANSI_CPRNG=m
677CONFIG_CRYPTO_USER_API_HASH=m 702CONFIG_CRYPTO_USER_API_HASH=m
678CONFIG_CRYPTO_USER_API_SKCIPHER=m 703CONFIG_CRYPTO_USER_API_SKCIPHER=m
679CONFIG_CRYPTO_USER_API_RNG=m 704CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m
685CONFIG_CRYPTO_SHA512_S390=m 710CONFIG_CRYPTO_SHA512_S390=m
686CONFIG_CRYPTO_DES_S390=m 711CONFIG_CRYPTO_DES_S390=m
687CONFIG_CRYPTO_AES_S390=m 712CONFIG_CRYPTO_AES_S390=m
713CONFIG_CRYPTO_PAES_S390=m
688CONFIG_CRYPTO_GHASH_S390=m 714CONFIG_CRYPTO_GHASH_S390=m
689CONFIG_CRYPTO_CRC32_S390=y 715CONFIG_CRYPTO_CRC32_S390=y
690CONFIG_ASYMMETRIC_KEY_TYPE=y 716CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
692CONFIG_X509_CERTIFICATE_PARSER=m 718CONFIG_X509_CERTIFICATE_PARSER=m
693CONFIG_CRC7=m 719CONFIG_CRC7=m
694CONFIG_CRC8=m 720CONFIG_CRC8=m
721CONFIG_RANDOM32_SELFTEST=y
695CONFIG_CORDIC=m 722CONFIG_CORDIC=m
696CONFIG_CMM=m 723CONFIG_CMM=m
697CONFIG_APPLDATA_BASE=y 724CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 83970b5afb2b..3c6b78189fbc 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y
46CONFIG_MODULE_FORCE_UNLOAD=y 47CONFIG_MODULE_FORCE_UNLOAD=y
47CONFIG_MODVERSIONS=y 48CONFIG_MODVERSIONS=y
48CONFIG_MODULE_SRCVERSION_ALL=y 49CONFIG_MODULE_SRCVERSION_ALL=y
50CONFIG_BLK_DEV_INTEGRITY=y
49CONFIG_BLK_DEV_THROTTLING=y 51CONFIG_BLK_DEV_THROTTLING=y
52CONFIG_BLK_WBT=y
53CONFIG_BLK_WBT_SQ=y
50CONFIG_PARTITION_ADVANCED=y 54CONFIG_PARTITION_ADVANCED=y
51CONFIG_IBM_PARTITION=y 55CONFIG_IBM_PARTITION=y
52CONFIG_BSD_DISKLABEL=y 56CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@ CONFIG_UNIX=y
88CONFIG_UNIX_DIAG=m 92CONFIG_UNIX_DIAG=m
89CONFIG_XFRM_USER=m 93CONFIG_XFRM_USER=m
90CONFIG_NET_KEY=m 94CONFIG_NET_KEY=m
95CONFIG_SMC=m
96CONFIG_SMC_DIAG=m
91CONFIG_INET=y 97CONFIG_INET=y
92CONFIG_IP_MULTICAST=y 98CONFIG_IP_MULTICAST=y
93CONFIG_IP_ADVANCED_ROUTER=y 99CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m
356CONFIG_NET_ACT_SKBEDIT=m 362CONFIG_NET_ACT_SKBEDIT=m
357CONFIG_NET_ACT_CSUM=m 363CONFIG_NET_ACT_CSUM=m
358CONFIG_DNS_RESOLVER=y 364CONFIG_DNS_RESOLVER=y
365CONFIG_NETLINK_DIAG=m
359CONFIG_CGROUP_NET_PRIO=y 366CONFIG_CGROUP_NET_PRIO=y
360CONFIG_BPF_JIT=y 367CONFIG_BPF_JIT=y
361CONFIG_NET_PKTGEN=m 368CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y
364CONFIG_DMA_CMA=y 371CONFIG_DMA_CMA=y
365CONFIG_CMA_SIZE_MBYTES=0 372CONFIG_CMA_SIZE_MBYTES=0
366CONFIG_CONNECTOR=y 373CONFIG_CONNECTOR=y
374CONFIG_ZRAM=m
367CONFIG_BLK_DEV_LOOP=m 375CONFIG_BLK_DEV_LOOP=m
368CONFIG_BLK_DEV_CRYPTOLOOP=m 376CONFIG_BLK_DEV_CRYPTOLOOP=m
377CONFIG_BLK_DEV_DRBD=m
369CONFIG_BLK_DEV_NBD=m 378CONFIG_BLK_DEV_NBD=m
370CONFIG_BLK_DEV_OSD=m 379CONFIG_BLK_DEV_OSD=m
371CONFIG_BLK_DEV_RAM=y 380CONFIG_BLK_DEV_RAM=y
372CONFIG_BLK_DEV_RAM_SIZE=32768 381CONFIG_BLK_DEV_RAM_SIZE=32768
373CONFIG_CDROM_PKTCDVD=m 382CONFIG_BLK_DEV_RAM_DAX=y
374CONFIG_ATA_OVER_ETH=m
375CONFIG_VIRTIO_BLK=y 383CONFIG_VIRTIO_BLK=y
376CONFIG_ENCLOSURE_SERVICES=m 384CONFIG_ENCLOSURE_SERVICES=m
385CONFIG_GENWQE=m
377CONFIG_RAID_ATTRS=m 386CONFIG_RAID_ATTRS=m
378CONFIG_SCSI=y 387CONFIG_SCSI=y
379CONFIG_BLK_DEV_SD=y 388CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@ CONFIG_NLMON=m
439# CONFIG_NET_VENDOR_INTEL is not set 448# CONFIG_NET_VENDOR_INTEL is not set
440# CONFIG_NET_VENDOR_MARVELL is not set 449# CONFIG_NET_VENDOR_MARVELL is not set
441CONFIG_MLX4_EN=m 450CONFIG_MLX4_EN=m
451CONFIG_MLX5_CORE=m
452CONFIG_MLX5_CORE_EN=y
442# CONFIG_NET_VENDOR_NATSEMI is not set 453# CONFIG_NET_VENDOR_NATSEMI is not set
443CONFIG_PPP=m 454CONFIG_PPP=m
444CONFIG_PPP_BSDCOMP=m 455CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@ CONFIG_PPTP=m
449CONFIG_PPPOL2TP=m 460CONFIG_PPPOL2TP=m
450CONFIG_PPP_ASYNC=m 461CONFIG_PPP_ASYNC=m
451CONFIG_PPP_SYNC_TTY=m 462CONFIG_PPP_SYNC_TTY=m
452# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
453# CONFIG_INPUT_KEYBOARD is not set 463# CONFIG_INPUT_KEYBOARD is not set
454# CONFIG_INPUT_MOUSE is not set 464# CONFIG_INPUT_MOUSE is not set
455# CONFIG_SERIO is not set 465# CONFIG_SERIO is not set
@@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m
468CONFIG_INFINIBAND=m 478CONFIG_INFINIBAND=m
469CONFIG_INFINIBAND_USER_ACCESS=m 479CONFIG_INFINIBAND_USER_ACCESS=m
470CONFIG_MLX4_INFINIBAND=m 480CONFIG_MLX4_INFINIBAND=m
481CONFIG_MLX5_INFINIBAND=m
471CONFIG_VIRTIO_BALLOON=m 482CONFIG_VIRTIO_BALLOON=m
472CONFIG_EXT4_FS=y 483CONFIG_EXT4_FS=y
473CONFIG_EXT4_FS_POSIX_ACL=y 484CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y
483CONFIG_XFS_POSIX_ACL=y 494CONFIG_XFS_POSIX_ACL=y
484CONFIG_XFS_RT=y 495CONFIG_XFS_RT=y
485CONFIG_GFS2_FS=m 496CONFIG_GFS2_FS=m
497CONFIG_GFS2_FS_LOCKING_DLM=y
486CONFIG_OCFS2_FS=m 498CONFIG_OCFS2_FS=m
487CONFIG_BTRFS_FS=y 499CONFIG_BTRFS_FS=y
488CONFIG_BTRFS_FS_POSIX_ACL=y 500CONFIG_BTRFS_FS_POSIX_ACL=y
489CONFIG_NILFS2_FS=m 501CONFIG_NILFS2_FS=m
502CONFIG_FS_DAX=y
503CONFIG_EXPORTFS_BLOCK_OPS=y
490CONFIG_FANOTIFY=y 504CONFIG_FANOTIFY=y
505CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
491CONFIG_QUOTA_NETLINK_INTERFACE=y 506CONFIG_QUOTA_NETLINK_INTERFACE=y
492CONFIG_QFMT_V1=m 507CONFIG_QFMT_V1=m
493CONFIG_QFMT_V2=m 508CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y
553CONFIG_MAGIC_SYSRQ=y 568CONFIG_MAGIC_SYSRQ=y
554CONFIG_DEBUG_MEMORY_INIT=y 569CONFIG_DEBUG_MEMORY_INIT=y
555CONFIG_PANIC_ON_OOPS=y 570CONFIG_PANIC_ON_OOPS=y
556CONFIG_TIMER_STATS=y
557CONFIG_RCU_TORTURE_TEST=m 571CONFIG_RCU_TORTURE_TEST=m
558CONFIG_RCU_CPU_STALL_TIMEOUT=60 572CONFIG_RCU_CPU_STALL_TIMEOUT=60
559CONFIG_LATENCYTOP=y 573CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y
576CONFIG_ENCRYPTED_KEYS=m 590CONFIG_ENCRYPTED_KEYS=m
577CONFIG_SECURITY=y 591CONFIG_SECURITY=y
578CONFIG_SECURITY_NETWORK=y 592CONFIG_SECURITY_NETWORK=y
593CONFIG_HARDENED_USERCOPY=y
579CONFIG_SECURITY_SELINUX=y 594CONFIG_SECURITY_SELINUX=y
580CONFIG_SECURITY_SELINUX_BOOTPARAM=y 595CONFIG_SECURITY_SELINUX_BOOTPARAM=y
581CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 596CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
599CONFIG_CRYPTO_LRW=m 614CONFIG_CRYPTO_LRW=m
600CONFIG_CRYPTO_PCBC=m 615CONFIG_CRYPTO_PCBC=m
601CONFIG_CRYPTO_KEYWRAP=m 616CONFIG_CRYPTO_KEYWRAP=m
617CONFIG_CRYPTO_CMAC=m
602CONFIG_CRYPTO_XCBC=m 618CONFIG_CRYPTO_XCBC=m
603CONFIG_CRYPTO_VMAC=m 619CONFIG_CRYPTO_VMAC=m
604CONFIG_CRYPTO_CRC32=m 620CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m
611CONFIG_CRYPTO_SHA3=m 627CONFIG_CRYPTO_SHA3=m
612CONFIG_CRYPTO_TGR192=m 628CONFIG_CRYPTO_TGR192=m
613CONFIG_CRYPTO_WP512=m 629CONFIG_CRYPTO_WP512=m
630CONFIG_CRYPTO_AES_TI=m
614CONFIG_CRYPTO_ANUBIS=m 631CONFIG_CRYPTO_ANUBIS=m
615CONFIG_CRYPTO_BLOWFISH=m 632CONFIG_CRYPTO_BLOWFISH=m
616CONFIG_CRYPTO_CAMELLIA=m 633CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m
626CONFIG_CRYPTO_842=m 643CONFIG_CRYPTO_842=m
627CONFIG_CRYPTO_LZ4=m 644CONFIG_CRYPTO_LZ4=m
628CONFIG_CRYPTO_LZ4HC=m 645CONFIG_CRYPTO_LZ4HC=m
646CONFIG_CRYPTO_ANSI_CPRNG=m
629CONFIG_CRYPTO_USER_API_HASH=m 647CONFIG_CRYPTO_USER_API_HASH=m
630CONFIG_CRYPTO_USER_API_SKCIPHER=m 648CONFIG_CRYPTO_USER_API_SKCIPHER=m
631CONFIG_CRYPTO_USER_API_RNG=m 649CONFIG_CRYPTO_USER_API_RNG=m
632CONFIG_CRYPTO_USER_API_AEAD=m 650CONFIG_CRYPTO_USER_API_AEAD=m
633CONFIG_ZCRYPT=m 651CONFIG_ZCRYPT=m
652CONFIG_PKEY=m
634CONFIG_CRYPTO_SHA1_S390=m 653CONFIG_CRYPTO_SHA1_S390=m
635CONFIG_CRYPTO_SHA256_S390=m 654CONFIG_CRYPTO_SHA256_S390=m
636CONFIG_CRYPTO_SHA512_S390=m 655CONFIG_CRYPTO_SHA512_S390=m
637CONFIG_CRYPTO_DES_S390=m 656CONFIG_CRYPTO_DES_S390=m
638CONFIG_CRYPTO_AES_S390=m 657CONFIG_CRYPTO_AES_S390=m
658CONFIG_CRYPTO_PAES_S390=m
639CONFIG_CRYPTO_GHASH_S390=m 659CONFIG_CRYPTO_GHASH_S390=m
640CONFIG_CRYPTO_CRC32_S390=y 660CONFIG_CRYPTO_CRC32_S390=y
641CONFIG_CRC7=m 661CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index fbc6542aaf59..653d72bcc007 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
44CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
45CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
46CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_BLK_DEV_INTEGRITY=y
47CONFIG_BLK_DEV_THROTTLING=y 49CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y
48CONFIG_PARTITION_ADVANCED=y 52CONFIG_PARTITION_ADVANCED=y
49CONFIG_IBM_PARTITION=y 53CONFIG_IBM_PARTITION=y
50CONFIG_BSD_DISKLABEL=y 54CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@ CONFIG_UNIX=y
86CONFIG_UNIX_DIAG=m 90CONFIG_UNIX_DIAG=m
87CONFIG_XFRM_USER=m 91CONFIG_XFRM_USER=m
88CONFIG_NET_KEY=m 92CONFIG_NET_KEY=m
93CONFIG_SMC=m
94CONFIG_SMC_DIAG=m
89CONFIG_INET=y 95CONFIG_INET=y
90CONFIG_IP_MULTICAST=y 96CONFIG_IP_MULTICAST=y
91CONFIG_IP_ADVANCED_ROUTER=y 97CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m
354CONFIG_NET_ACT_SKBEDIT=m 360CONFIG_NET_ACT_SKBEDIT=m
355CONFIG_NET_ACT_CSUM=m 361CONFIG_NET_ACT_CSUM=m
356CONFIG_DNS_RESOLVER=y 362CONFIG_DNS_RESOLVER=y
363CONFIG_NETLINK_DIAG=m
357CONFIG_CGROUP_NET_PRIO=y 364CONFIG_CGROUP_NET_PRIO=y
358CONFIG_BPF_JIT=y 365CONFIG_BPF_JIT=y
359CONFIG_NET_PKTGEN=m 366CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y
362CONFIG_DMA_CMA=y 369CONFIG_DMA_CMA=y
363CONFIG_CMA_SIZE_MBYTES=0 370CONFIG_CMA_SIZE_MBYTES=0
364CONFIG_CONNECTOR=y 371CONFIG_CONNECTOR=y
372CONFIG_ZRAM=m
365CONFIG_BLK_DEV_LOOP=m 373CONFIG_BLK_DEV_LOOP=m
366CONFIG_BLK_DEV_CRYPTOLOOP=m 374CONFIG_BLK_DEV_CRYPTOLOOP=m
375CONFIG_BLK_DEV_DRBD=m
367CONFIG_BLK_DEV_NBD=m 376CONFIG_BLK_DEV_NBD=m
368CONFIG_BLK_DEV_OSD=m 377CONFIG_BLK_DEV_OSD=m
369CONFIG_BLK_DEV_RAM=y 378CONFIG_BLK_DEV_RAM=y
370CONFIG_BLK_DEV_RAM_SIZE=32768 379CONFIG_BLK_DEV_RAM_SIZE=32768
371CONFIG_CDROM_PKTCDVD=m 380CONFIG_BLK_DEV_RAM_DAX=y
372CONFIG_ATA_OVER_ETH=m
373CONFIG_VIRTIO_BLK=y 381CONFIG_VIRTIO_BLK=y
374CONFIG_ENCLOSURE_SERVICES=m 382CONFIG_ENCLOSURE_SERVICES=m
383CONFIG_GENWQE=m
375CONFIG_RAID_ATTRS=m 384CONFIG_RAID_ATTRS=m
376CONFIG_SCSI=y 385CONFIG_SCSI=y
377CONFIG_BLK_DEV_SD=y 386CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@ CONFIG_NLMON=m
437# CONFIG_NET_VENDOR_INTEL is not set 446# CONFIG_NET_VENDOR_INTEL is not set
438# CONFIG_NET_VENDOR_MARVELL is not set 447# CONFIG_NET_VENDOR_MARVELL is not set
439CONFIG_MLX4_EN=m 448CONFIG_MLX4_EN=m
449CONFIG_MLX5_CORE=m
450CONFIG_MLX5_CORE_EN=y
440# CONFIG_NET_VENDOR_NATSEMI is not set 451# CONFIG_NET_VENDOR_NATSEMI is not set
441CONFIG_PPP=m 452CONFIG_PPP=m
442CONFIG_PPP_BSDCOMP=m 453CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@ CONFIG_PPTP=m
447CONFIG_PPPOL2TP=m 458CONFIG_PPPOL2TP=m
448CONFIG_PPP_ASYNC=m 459CONFIG_PPP_ASYNC=m
449CONFIG_PPP_SYNC_TTY=m 460CONFIG_PPP_SYNC_TTY=m
450# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
451# CONFIG_INPUT_KEYBOARD is not set 461# CONFIG_INPUT_KEYBOARD is not set
452# CONFIG_INPUT_MOUSE is not set 462# CONFIG_INPUT_MOUSE is not set
453# CONFIG_SERIO is not set 463# CONFIG_SERIO is not set
@@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m
466CONFIG_INFINIBAND=m 476CONFIG_INFINIBAND=m
467CONFIG_INFINIBAND_USER_ACCESS=m 477CONFIG_INFINIBAND_USER_ACCESS=m
468CONFIG_MLX4_INFINIBAND=m 478CONFIG_MLX4_INFINIBAND=m
479CONFIG_MLX5_INFINIBAND=m
469CONFIG_VIRTIO_BALLOON=m 480CONFIG_VIRTIO_BALLOON=m
470CONFIG_EXT4_FS=y 481CONFIG_EXT4_FS=y
471CONFIG_EXT4_FS_POSIX_ACL=y 482CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y
481CONFIG_XFS_POSIX_ACL=y 492CONFIG_XFS_POSIX_ACL=y
482CONFIG_XFS_RT=y 493CONFIG_XFS_RT=y
483CONFIG_GFS2_FS=m 494CONFIG_GFS2_FS=m
495CONFIG_GFS2_FS_LOCKING_DLM=y
484CONFIG_OCFS2_FS=m 496CONFIG_OCFS2_FS=m
485CONFIG_BTRFS_FS=y 497CONFIG_BTRFS_FS=y
486CONFIG_BTRFS_FS_POSIX_ACL=y 498CONFIG_BTRFS_FS_POSIX_ACL=y
487CONFIG_NILFS2_FS=m 499CONFIG_NILFS2_FS=m
500CONFIG_FS_DAX=y
501CONFIG_EXPORTFS_BLOCK_OPS=y
488CONFIG_FANOTIFY=y 502CONFIG_FANOTIFY=y
503CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
489CONFIG_QUOTA_NETLINK_INTERFACE=y 504CONFIG_QUOTA_NETLINK_INTERFACE=y
490CONFIG_QFMT_V1=m 505CONFIG_QFMT_V1=m
491CONFIG_QFMT_V2=m 506CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y
551CONFIG_MAGIC_SYSRQ=y 566CONFIG_MAGIC_SYSRQ=y
552CONFIG_DEBUG_MEMORY_INIT=y 567CONFIG_DEBUG_MEMORY_INIT=y
553CONFIG_PANIC_ON_OOPS=y 568CONFIG_PANIC_ON_OOPS=y
554CONFIG_TIMER_STATS=y
555CONFIG_RCU_TORTURE_TEST=m 569CONFIG_RCU_TORTURE_TEST=m
556CONFIG_RCU_CPU_STALL_TIMEOUT=60 570CONFIG_RCU_CPU_STALL_TIMEOUT=60
557CONFIG_LATENCYTOP=y 571CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y
574CONFIG_ENCRYPTED_KEYS=m 588CONFIG_ENCRYPTED_KEYS=m
575CONFIG_SECURITY=y 589CONFIG_SECURITY=y
576CONFIG_SECURITY_NETWORK=y 590CONFIG_SECURITY_NETWORK=y
591CONFIG_HARDENED_USERCOPY=y
577CONFIG_SECURITY_SELINUX=y 592CONFIG_SECURITY_SELINUX=y
578CONFIG_SECURITY_SELINUX_BOOTPARAM=y 593CONFIG_SECURITY_SELINUX_BOOTPARAM=y
579CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 594CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
597CONFIG_CRYPTO_LRW=m 612CONFIG_CRYPTO_LRW=m
598CONFIG_CRYPTO_PCBC=m 613CONFIG_CRYPTO_PCBC=m
599CONFIG_CRYPTO_KEYWRAP=m 614CONFIG_CRYPTO_KEYWRAP=m
615CONFIG_CRYPTO_CMAC=m
600CONFIG_CRYPTO_XCBC=m 616CONFIG_CRYPTO_XCBC=m
601CONFIG_CRYPTO_VMAC=m 617CONFIG_CRYPTO_VMAC=m
602CONFIG_CRYPTO_CRC32=m 618CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m
609CONFIG_CRYPTO_SHA3=m 625CONFIG_CRYPTO_SHA3=m
610CONFIG_CRYPTO_TGR192=m 626CONFIG_CRYPTO_TGR192=m
611CONFIG_CRYPTO_WP512=m 627CONFIG_CRYPTO_WP512=m
628CONFIG_CRYPTO_AES_TI=m
612CONFIG_CRYPTO_ANUBIS=m 629CONFIG_CRYPTO_ANUBIS=m
613CONFIG_CRYPTO_BLOWFISH=m 630CONFIG_CRYPTO_BLOWFISH=m
614CONFIG_CRYPTO_CAMELLIA=m 631CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m
624CONFIG_CRYPTO_842=m 641CONFIG_CRYPTO_842=m
625CONFIG_CRYPTO_LZ4=m 642CONFIG_CRYPTO_LZ4=m
626CONFIG_CRYPTO_LZ4HC=m 643CONFIG_CRYPTO_LZ4HC=m
644CONFIG_CRYPTO_ANSI_CPRNG=m
627CONFIG_CRYPTO_USER_API_HASH=m 645CONFIG_CRYPTO_USER_API_HASH=m
628CONFIG_CRYPTO_USER_API_SKCIPHER=m 646CONFIG_CRYPTO_USER_API_SKCIPHER=m
629CONFIG_CRYPTO_USER_API_RNG=m 647CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m
635CONFIG_CRYPTO_SHA512_S390=m 653CONFIG_CRYPTO_SHA512_S390=m
636CONFIG_CRYPTO_DES_S390=m 654CONFIG_CRYPTO_DES_S390=m
637CONFIG_CRYPTO_AES_S390=m 655CONFIG_CRYPTO_AES_S390=m
656CONFIG_CRYPTO_PAES_S390=m
638CONFIG_CRYPTO_GHASH_S390=m 657CONFIG_CRYPTO_GHASH_S390=m
639CONFIG_CRYPTO_CRC32_S390=y 658CONFIG_CRYPTO_CRC32_S390=y
640CONFIG_CRC7=m 659CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e23d97c13735..afa46a7406ea 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y
12CONFIG_NR_CPUS=2 12CONFIG_NR_CPUS=2
13# CONFIG_HOTPLUG_CPU is not set 13# CONFIG_HOTPLUG_CPU is not set
14CONFIG_HZ_100=y 14CONFIG_HZ_100=y
15# CONFIG_ARCH_RANDOM is not set
15# CONFIG_COMPACTION is not set 16# CONFIG_COMPACTION is not set
16# CONFIG_MIGRATION is not set 17# CONFIG_MIGRATION is not set
18# CONFIG_BOUNCE is not set
17# CONFIG_CHECK_STACK is not set 19# CONFIG_CHECK_STACK is not set
18# CONFIG_CHSC_SCH is not set 20# CONFIG_CHSC_SCH is not set
19# CONFIG_SCM_BUS is not set 21# CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y
36CONFIG_SCSI_LOGGING=y 38CONFIG_SCSI_LOGGING=y
37CONFIG_SCSI_FC_ATTRS=y 39CONFIG_SCSI_FC_ATTRS=y
38CONFIG_ZFCP=y 40CONFIG_ZFCP=y
39# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
40# CONFIG_INPUT_KEYBOARD is not set 41# CONFIG_INPUT_KEYBOARD is not set
41# CONFIG_INPUT_MOUSE is not set 42# CONFIG_INPUT_MOUSE is not set
42# CONFIG_SERIO is not set 43# CONFIG_SERIO is not set
43# CONFIG_HVC_IUCV is not set 44# CONFIG_HVC_IUCV is not set
45# CONFIG_HW_RANDOM_S390 is not set
44CONFIG_RAW_DRIVER=y 46CONFIG_RAW_DRIVER=y
45# CONFIG_SCLP_ASYNC is not set 47# CONFIG_SCLP_ASYNC is not set
46# CONFIG_HMC_DRV is not set 48# CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y
54# CONFIG_INOTIFY_USER is not set 56# CONFIG_INOTIFY_USER is not set
55CONFIG_CONFIGFS_FS=y 57CONFIG_CONFIGFS_FS=y
56# CONFIG_MISC_FILESYSTEMS is not set 58# CONFIG_MISC_FILESYSTEMS is not set
59# CONFIG_NETWORK_FILESYSTEMS is not set
57CONFIG_PRINTK_TIME=y 60CONFIG_PRINTK_TIME=y
58CONFIG_DEBUG_INFO=y 61CONFIG_DEBUG_INFO=y
59CONFIG_DEBUG_FS=y
60CONFIG_DEBUG_KERNEL=y 62CONFIG_DEBUG_KERNEL=y
61CONFIG_PANIC_ON_OOPS=y 63CONFIG_PANIC_ON_OOPS=y
62# CONFIG_SCHED_DEBUG is not set 64# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 97189dbaf34b..20244a38c886 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 28CONFIG_USER_NS=y
29CONFIG_BLK_DEV_INITRD=y 29CONFIG_BLK_DEV_INITRD=y
30CONFIG_EXPERT=y 30CONFIG_EXPERT=y
31# CONFIG_SYSFS_SYSCALL is not set
31CONFIG_BPF_SYSCALL=y 32CONFIG_BPF_SYSCALL=y
32CONFIG_USERFAULTFD=y 33CONFIG_USERFAULTFD=y
33# CONFIG_COMPAT_BRK is not set 34# CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@ CONFIG_ZFCP=y
108CONFIG_SCSI_VIRTIO=y 109CONFIG_SCSI_VIRTIO=y
109CONFIG_MD=y 110CONFIG_MD=y
110CONFIG_MD_LINEAR=m 111CONFIG_MD_LINEAR=m
111CONFIG_MD_RAID0=m
112CONFIG_MD_MULTIPATH=m 112CONFIG_MD_MULTIPATH=m
113CONFIG_BLK_DEV_DM=y 113CONFIG_BLK_DEV_DM=y
114CONFIG_DM_CRYPT=m 114CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@ CONFIG_TUN=m
131CONFIG_VIRTIO_NET=y 131CONFIG_VIRTIO_NET=y
132# CONFIG_NET_VENDOR_ALACRITECH is not set 132# CONFIG_NET_VENDOR_ALACRITECH is not set
133# CONFIG_NET_VENDOR_SOLARFLARE is not set 133# CONFIG_NET_VENDOR_SOLARFLARE is not set
134# CONFIG_NET_VENDOR_SYNOPSYS is not set
134# CONFIG_INPUT is not set 135# CONFIG_INPUT is not set
135# CONFIG_SERIO is not set 136# CONFIG_SERIO is not set
136CONFIG_DEVKMEM=y 137CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y
162CONFIG_DEBUG_PAGEALLOC=y 163CONFIG_DEBUG_PAGEALLOC=y
163CONFIG_DETECT_HUNG_TASK=y 164CONFIG_DETECT_HUNG_TASK=y
164CONFIG_PANIC_ON_OOPS=y 165CONFIG_PANIC_ON_OOPS=y
165CONFIG_TIMER_STATS=y
166CONFIG_DEBUG_RT_MUTEXES=y 166CONFIG_DEBUG_RT_MUTEXES=y
167CONFIG_PROVE_LOCKING=y 167CONFIG_PROVE_LOCKING=y
168CONFIG_LOCK_STAT=y 168CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y
172CONFIG_DEBUG_SG=y 172CONFIG_DEBUG_SG=y
173CONFIG_DEBUG_NOTIFIERS=y 173CONFIG_DEBUG_NOTIFIERS=y
174CONFIG_RCU_CPU_STALL_TIMEOUT=60 174CONFIG_RCU_CPU_STALL_TIMEOUT=60
175CONFIG_RCU_TRACE=y
176CONFIG_LATENCYTOP=y 175CONFIG_LATENCYTOP=y
177CONFIG_SCHED_TRACER=y 176CONFIG_SCHED_TRACER=y
178CONFIG_FTRACE_SYSCALLS=y 177CONFIG_FTRACE_SYSCALLS=y
179CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 178CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
180CONFIG_STACK_TRACER=y 179CONFIG_STACK_TRACER=y
181CONFIG_BLK_DEV_IO_TRACE=y 180CONFIG_BLK_DEV_IO_TRACE=y
182CONFIG_UPROBE_EVENTS=y
183CONFIG_FUNCTION_PROFILER=y 181CONFIG_FUNCTION_PROFILER=y
184CONFIG_TRACE_ENUM_MAP_FILE=y 182CONFIG_TRACE_ENUM_MAP_FILE=y
185CONFIG_KPROBES_SANITY_TEST=y 183CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m
190CONFIG_CRYPTO_GCM=m 188CONFIG_CRYPTO_GCM=m
191CONFIG_CRYPTO_CBC=y 189CONFIG_CRYPTO_CBC=y
192CONFIG_CRYPTO_CTS=m 190CONFIG_CRYPTO_CTS=m
193CONFIG_CRYPTO_ECB=m
194CONFIG_CRYPTO_LRW=m 191CONFIG_CRYPTO_LRW=m
195CONFIG_CRYPTO_PCBC=m 192CONFIG_CRYPTO_PCBC=m
196CONFIG_CRYPTO_XTS=m 193CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
230CONFIG_CRYPTO_USER_API_RNG=m 227CONFIG_CRYPTO_USER_API_RNG=m
231CONFIG_ZCRYPT=m 228CONFIG_ZCRYPT=m
232CONFIG_PKEY=m 229CONFIG_PKEY=m
230CONFIG_CRYPTO_PAES_S390=m
233CONFIG_CRYPTO_SHA1_S390=m 231CONFIG_CRYPTO_SHA1_S390=m
234CONFIG_CRYPTO_SHA256_S390=m 232CONFIG_CRYPTO_SHA256_S390=m
235CONFIG_CRYPTO_SHA512_S390=m 233CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e408d9cc5b96..6315037335ba 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -231,12 +231,17 @@ ENTRY(sie64a)
231 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 231 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
232.Lsie_done: 232.Lsie_done:
233# some program checks are suppressing. C code (e.g. do_protection_exception) 233# some program checks are suppressing. C code (e.g. do_protection_exception)
234# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 234# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
235# instructions between sie64a and .Lsie_done should not cause program 235# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
236# interrupts. So lets use a nop (47 00 00 00) as a landing pad. 236# Other instructions between sie64a and .Lsie_done should not cause program
237# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
237# See also .Lcleanup_sie 238# See also .Lcleanup_sie
238.Lrewind_pad: 239.Lrewind_pad6:
239 nop 0 240 nopr 7
241.Lrewind_pad4:
242 nopr 7
243.Lrewind_pad2:
244 nopr 7
240 .globl sie_exit 245 .globl sie_exit
241sie_exit: 246sie_exit:
242 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 247 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -249,7 +254,9 @@ sie_exit:
249 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 254 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
250 j sie_exit 255 j sie_exit
251 256
252 EX_TABLE(.Lrewind_pad,.Lsie_fault) 257 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
258 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
259 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
253 EX_TABLE(sie_exit,.Lsie_fault) 260 EX_TABLE(sie_exit,.Lsie_fault)
254EXPORT_SYMBOL(sie64a) 261EXPORT_SYMBOL(sie64a)
255EXPORT_SYMBOL(sie_exit) 262EXPORT_SYMBOL(sie_exit)
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index b8ad261d11dc..c66d19e3c23e 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -29,6 +29,7 @@ struct pt_regs;
29 } while (0) 29 } while (0)
30 30
31extern int fixup_exception(struct pt_regs *regs, int trapnr); 31extern int fixup_exception(struct pt_regs *regs, int trapnr);
32extern int fixup_bug(struct pt_regs *regs, int trapnr);
32extern bool ex_has_fault_handler(unsigned long ip); 33extern bool ex_has_fault_handler(unsigned long ip);
33extern void early_fixup_exception(struct pt_regs *regs, int trapnr); 34extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
34 35
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3995d3a777d4..bf54309b85da 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr)
182 return ud == INSN_UD0 || ud == INSN_UD2; 182 return ud == INSN_UD0 || ud == INSN_UD2;
183} 183}
184 184
185static int fixup_bug(struct pt_regs *regs, int trapnr) 185int fixup_bug(struct pt_regs *regs, int trapnr)
186{ 186{
187 if (trapnr != X86_TRAP_UD) 187 if (trapnr != X86_TRAP_UD)
188 return 0; 188 return 0;
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 35ea061010a1..0ea8afcb929c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
162 if (fixup_exception(regs, trapnr)) 162 if (fixup_exception(regs, trapnr))
163 return; 163 return;
164 164
165 if (fixup_bug(regs, trapnr))
166 return;
167
165fail: 168fail:
166 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", 169 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
167 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip, 170 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cbc87ea98751..9b3f9fa5b283 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -161,16 +161,16 @@ static int page_size_mask;
161 161
162static void __init probe_page_size_mask(void) 162static void __init probe_page_size_mask(void)
163{ 163{
164#if !defined(CONFIG_KMEMCHECK)
165 /* 164 /*
166 * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will 165 * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
167 * use small pages. 166 * use small pages.
168 * This will simplify cpa(), which otherwise needs to support splitting 167 * This will simplify cpa(), which otherwise needs to support splitting
169 * large pages into small in interrupt context, etc. 168 * large pages into small in interrupt context, etc.
170 */ 169 */
171 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) 170 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
172 page_size_mask |= 1 << PG_LEVEL_2M; 171 page_size_mask |= 1 << PG_LEVEL_2M;
173#endif 172 else
173 direct_gbpages = 0;
174 174
175 /* Enable PSE if available */ 175 /* Enable PSE if available */
176 if (boot_cpu_has(X86_FEATURE_PSE)) 176 if (boot_cpu_has(X86_FEATURE_PSE))
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88ea7646..19707db966f1 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
29# define PLATFORM_NR_IRQS 0 29# define PLATFORM_NR_IRQS 0
30#endif 30#endif
31#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS 31#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
32#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) 32#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
33#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
33 34
34#if VARIANT_NR_IRQS == 0 35#if VARIANT_NR_IRQS == 0
35static inline void variant_init_irq(void) { } 36static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a265edd6ac37..99341028cc77 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
34{ 34{
35 int irq = irq_find_mapping(NULL, hwirq); 35 int irq = irq_find_mapping(NULL, hwirq);
36 36
37 if (hwirq >= NR_IRQS) {
38 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
39 __func__, hwirq);
40 }
41
42#ifdef CONFIG_DEBUG_STACKOVERFLOW 37#ifdef CONFIG_DEBUG_STACKOVERFLOW
43 /* Debugging check for stack overflow: is there less than 1KB free? */ 38 /* Debugging check for stack overflow: is there less than 1KB free? */
44 { 39 {
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 394ef08300b6..33bfa5270d95 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot)
593 (ccount_freq/10000) % 100, 593 (ccount_freq/10000) % 100,
594 loops_per_jiffy/(500000/HZ), 594 loops_per_jiffy/(500000/HZ),
595 (loops_per_jiffy/(5000/HZ)) % 100); 595 (loops_per_jiffy/(5000/HZ)) % 100);
596 596 seq_puts(f, "flags\t\t: "
597 seq_printf(f,"flags\t\t: "
598#if XCHAL_HAVE_NMI 597#if XCHAL_HAVE_NMI
599 "nmi " 598 "nmi "
600#endif 599#endif
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 30d9fc21e076..162c77e53ca8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -118,7 +118,7 @@ SECTIONS
118 SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) 118 SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
119 SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) 119 SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
120 SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) 120 SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
121 SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) 121 SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
122 SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) 122 SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
123#endif 123#endif
124 124
@@ -306,13 +306,13 @@ SECTIONS
306 .UserExceptionVector.literal) 306 .UserExceptionVector.literal)
307 SECTION_VECTOR (_DoubleExceptionVector_literal, 307 SECTION_VECTOR (_DoubleExceptionVector_literal,
308 .DoubleExceptionVector.literal, 308 .DoubleExceptionVector.literal,
309 DOUBLEEXC_VECTOR_VADDR - 48, 309 DOUBLEEXC_VECTOR_VADDR - 20,
310 SIZEOF(.UserExceptionVector.text), 310 SIZEOF(.UserExceptionVector.text),
311 .UserExceptionVector.text) 311 .UserExceptionVector.text)
312 SECTION_VECTOR (_DoubleExceptionVector_text, 312 SECTION_VECTOR (_DoubleExceptionVector_text,
313 .DoubleExceptionVector.text, 313 .DoubleExceptionVector.text,
314 DOUBLEEXC_VECTOR_VADDR, 314 DOUBLEEXC_VECTOR_VADDR,
315 48, 315 20,
316 .DoubleExceptionVector.literal) 316 .DoubleExceptionVector.literal)
317 317
318 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 318 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 02e94bb3ad3e..c45b90bb9339 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -317,8 +317,7 @@ static int __init simdisk_init(void)
317 if (simdisk_count > MAX_SIMDISK_COUNT) 317 if (simdisk_count > MAX_SIMDISK_COUNT)
318 simdisk_count = MAX_SIMDISK_COUNT; 318 simdisk_count = MAX_SIMDISK_COUNT;
319 319
320 sddev = kmalloc(simdisk_count * sizeof(struct simdisk), 320 sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
321 GFP_KERNEL);
322 if (sddev == NULL) 321 if (sddev == NULL)
323 goto out_unregister; 322 goto out_unregister;
324 323
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b440a1..1fda7e20dfcb 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
24 24
25/* Interrupt configuration. */ 25/* Interrupt configuration. */
26 26
27#define PLATFORM_NR_IRQS 10 27#define PLATFORM_NR_IRQS 0
28 28
29/* Default assignment of LX60 devices to external interrupts. */ 29/* Default assignment of LX60 devices to external interrupts. */
30 30
31#ifdef CONFIG_XTENSA_MX 31#ifdef CONFIG_XTENSA_MX
32#define DUART16552_INTNUM XCHAL_EXTINT3_NUM 32#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
33#define OETH_IRQ XCHAL_EXTINT4_NUM 33#define OETH_IRQ XCHAL_EXTINT4_NUM
34#define C67X00_IRQ XCHAL_EXTINT8_NUM
34#else 35#else
35#define DUART16552_INTNUM XCHAL_EXTINT0_NUM 36#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
36#define OETH_IRQ XCHAL_EXTINT1_NUM 37#define OETH_IRQ XCHAL_EXTINT1_NUM
38#define C67X00_IRQ XCHAL_EXTINT5_NUM
37#endif 39#endif
38 40
39/* 41/*
@@ -63,5 +65,5 @@
63 65
64#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) 66#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
65#define C67X00_SIZE 0x10 67#define C67X00_SIZE 0x10
66#define C67X00_IRQ 5 68
67#endif /* __XTENSA_XTAVNET_HARDWARE_H */ 69#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be723eb2b..42285f35d313 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
175 .flags = IORESOURCE_MEM, 175 .flags = IORESOURCE_MEM,
176 }, 176 },
177 [2] = { /* IRQ number */ 177 [2] = { /* IRQ number */
178 .start = OETH_IRQ, 178 .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
179 .end = OETH_IRQ, 179 .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
180 .flags = IORESOURCE_IRQ, 180 .flags = IORESOURCE_IRQ,
181 }, 181 },
182}; 182};
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
213 .flags = IORESOURCE_MEM, 213 .flags = IORESOURCE_MEM,
214 }, 214 },
215 [1] = { /* IRQ number */ 215 [1] = { /* IRQ number */
216 .start = C67X00_IRQ, 216 .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
217 .end = C67X00_IRQ, 217 .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
218 .flags = IORESOURCE_IRQ, 218 .flags = IORESOURCE_IRQ,
219 }, 219 },
220}; 220};
@@ -247,7 +247,7 @@ static struct resource serial_resource = {
247static struct plat_serial8250_port serial_platform_data[] = { 247static struct plat_serial8250_port serial_platform_data[] = {
248 [0] = { 248 [0] = {
249 .mapbase = DUART16552_PADDR, 249 .mapbase = DUART16552_PADDR,
250 .irq = DUART16552_INTNUM, 250 .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
251 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | 251 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
252 UPF_IOREMAP, 252 UPF_IOREMAP,
253 .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, 253 .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 283da7fbe034..27aceab1cc31 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
777} 777}
778 778
779/** 779/**
780 * blk_release_queue: - release a &struct request_queue when it is no longer needed 780 * __blk_release_queue - release a request queue when it is no longer needed
781 * @kobj: the kobj belonging to the request queue to be released 781 * @work: pointer to the release_work member of the request queue to be released
782 * 782 *
783 * Description: 783 * Description:
784 * blk_release_queue is the pair to blk_init_queue() or 784 * blk_release_queue is the counterpart of blk_init_queue(). It should be
785 * blk_queue_make_request(). It should be called when a request queue is 785 * called when a request queue is being released; typically when a block
786 * being released; typically when a block device is being de-registered. 786 * device is being de-registered. Its primary task it to free the queue
787 * Currently, its primary task it to free all the &struct request 787 * itself.
788 * structures that were allocated to the queue and the queue itself.
789 * 788 *
790 * Note: 789 * Notes:
791 * The low level driver must have finished any outstanding requests first 790 * The low level driver must have finished any outstanding requests first
792 * via blk_cleanup_queue(). 791 * via blk_cleanup_queue().
793 **/ 792 *
794static void blk_release_queue(struct kobject *kobj) 793 * Although blk_release_queue() may be called with preemption disabled,
794 * __blk_release_queue() may sleep.
795 */
796static void __blk_release_queue(struct work_struct *work)
795{ 797{
796 struct request_queue *q = 798 struct request_queue *q = container_of(work, typeof(*q), release_work);
797 container_of(kobj, struct request_queue, kobj);
798 799
799 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 800 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
800 blk_stat_remove_callback(q, q->poll_cb); 801 blk_stat_remove_callback(q, q->poll_cb);
@@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj)
834 call_rcu(&q->rcu_head, blk_free_queue_rcu); 835 call_rcu(&q->rcu_head, blk_free_queue_rcu);
835} 836}
836 837
838static void blk_release_queue(struct kobject *kobj)
839{
840 struct request_queue *q =
841 container_of(kobj, struct request_queue, kobj);
842
843 INIT_WORK(&q->release_work, __blk_release_queue);
844 schedule_work(&q->release_work);
845}
846
837static const struct sysfs_ops queue_sysfs_ops = { 847static const struct sysfs_ops queue_sysfs_ops = {
838 .show = queue_attr_show, 848 .show = queue_attr_show,
839 .store = queue_attr_store, 849 .store = queue_attr_store,
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 7abe66505739..0d2e98920069 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -416,9 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
416 } 416 }
417 } 417 }
418 418
419 table_desc->validation_count++; 419 if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
420 if (table_desc->validation_count == 0) { 420 table_desc->validation_count++;
421 table_desc->validation_count--; 421
422 /*
423 * Detect validation_count overflows to ensure that the warning
424 * message will only be printed once.
425 */
426 if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
427 ACPI_WARNING((AE_INFO,
428 "Table %p, Validation count overflows\n",
429 table_desc));
430 }
422 } 431 }
423 432
424 *out_table = table_desc->pointer; 433 *out_table = table_desc->pointer;
@@ -445,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc)
445 454
446 ACPI_FUNCTION_TRACE(acpi_tb_put_table); 455 ACPI_FUNCTION_TRACE(acpi_tb_put_table);
447 456
448 if (table_desc->validation_count == 0) { 457 if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
449 ACPI_WARNING((AE_INFO, 458 table_desc->validation_count--;
450 "Table %p, Validation count is zero before decrement\n", 459
451 table_desc)); 460 /*
452 return_VOID; 461 * Detect validation_count underflows to ensure that the warning
462 * message will only be printed once.
463 */
464 if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
465 ACPI_WARNING((AE_INFO,
466 "Table %p, Validation count underflows\n",
467 table_desc));
468 return_VOID;
469 }
453 } 470 }
454 table_desc->validation_count--;
455 471
456 if (table_desc->validation_count == 0) { 472 if (table_desc->validation_count == 0) {
457 473
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index e0587c85bafd..ff096d9755b9 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -474,15 +474,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
474 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 474 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
475 } 475 }
476 476
477 /*
478 * The end_tag opcode must be followed by a zero byte.
479 * Although this byte is technically defined to be a checksum,
480 * in practice, all ASL compilers set this byte to zero.
481 */
482 if (*(aml + 1) != 0) {
483 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
484 }
485
486 /* Return the pointer to the end_tag if requested */ 477 /* Return the pointer to the end_tag if requested */
487 478
488 if (!user_function) { 479 if (!user_function) {
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 992f7c20760f..88220ff3e1c2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -185,8 +185,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
185 int ret; 185 int ret;
186 ret = sscanf(buf, "%u", &input); 186 ret = sscanf(buf, "%u", &input);
187 187
188 /* cannot be lower than 11 otherwise freq will not fall */ 188 /* cannot be lower than 1 otherwise freq will not fall */
189 if (ret != 1 || input < 11 || input > 100 || 189 if (ret != 1 || input < 1 || input > 100 ||
190 input >= dbs_data->up_threshold) 190 input >= dbs_data->up_threshold)
191 return -EINVAL; 191 return -EINVAL;
192 192
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index ffca4fc0061d..ae8eb0359889 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -180,8 +180,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
180 if (!state_node) 180 if (!state_node)
181 break; 181 break;
182 182
183 if (!of_device_is_available(state_node)) 183 if (!of_device_is_available(state_node)) {
184 of_node_put(state_node);
184 continue; 185 continue;
186 }
185 187
186 if (!idle_state_valid(state_node, i, cpumask)) { 188 if (!idle_state_valid(state_node, i, cpumask)) {
187 pr_warn("%s idle state not valid, bailing out\n", 189 pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 5c3e7b11e8a6..f6e7956fc91a 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -267,7 +267,11 @@ static int exynos_nocp_probe(struct platform_device *pdev)
267 } 267 }
268 platform_set_drvdata(pdev, nocp); 268 platform_set_drvdata(pdev, nocp);
269 269
270 clk_prepare_enable(nocp->clk); 270 ret = clk_prepare_enable(nocp->clk);
271 if (ret) {
272 dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
273 return ret;
274 }
271 275
272 pr_info("exynos-nocp: new NoC Probe device registered: %s\n", 276 pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
273 dev_name(dev)); 277 dev_name(dev));
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 9b7350935b73..d96e3dc71cf8 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -44,7 +44,7 @@ struct exynos_ppmu {
44 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ 44 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
45 { "ppmu-event3-"#name, PPMU_PMNCNT3 } 45 { "ppmu-event3-"#name, PPMU_PMNCNT3 }
46 46
47struct __exynos_ppmu_events { 47static struct __exynos_ppmu_events {
48 char *name; 48 char *name;
49 int id; 49 int id;
50} ppmu_events[] = { 50} ppmu_events[] = {
@@ -648,7 +648,11 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
648 dev_name(&pdev->dev), desc[i].name); 648 dev_name(&pdev->dev), desc[i].name);
649 } 649 }
650 650
651 clk_prepare_enable(info->ppmu.clk); 651 ret = clk_prepare_enable(info->ppmu.clk);
652 if (ret) {
653 dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
654 return ret;
655 }
652 656
653 return 0; 657 return 0;
654} 658}
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index dc269cb288c2..951b6c79f166 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -47,7 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
47DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); 47DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
48DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); 48DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
49DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); 49DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
50DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY); 50DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
51DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); 51DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
52DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); 52DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
53DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); 53DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
@@ -192,7 +192,7 @@ static void __init dmi_id_init_attr_table(void)
192 ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); 192 ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
193 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); 193 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
194 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); 194 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
195 ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); 195 ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
196 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); 196 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
197 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); 197 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
198 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); 198 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 93f7acdaac7a..783041964439 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
144 144
145 buf = dmi_early_remap(dmi_base, orig_dmi_len); 145 buf = dmi_early_remap(dmi_base, orig_dmi_len);
146 if (buf == NULL) 146 if (buf == NULL)
147 return -1; 147 return -ENOMEM;
148 148
149 dmi_decode_table(buf, decode, NULL); 149 dmi_decode_table(buf, decode, NULL);
150 150
@@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
178 const char *d = (const char *) dm; 178 const char *d = (const char *) dm;
179 const char *p; 179 const char *p;
180 180
181 if (dmi_ident[slot]) 181 if (dmi_ident[slot] || dm->length <= string)
182 return; 182 return;
183 183
184 p = dmi_string(dm, d[string]); 184 p = dmi_string(dm, d[string]);
@@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
191static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, 191static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
192 int index) 192 int index)
193{ 193{
194 const u8 *d = (u8 *) dm + index; 194 const u8 *d;
195 char *s; 195 char *s;
196 int is_ff = 1, is_00 = 1, i; 196 int is_ff = 1, is_00 = 1, i;
197 197
198 if (dmi_ident[slot]) 198 if (dmi_ident[slot] || dm->length <= index + 16)
199 return; 199 return;
200 200
201 d = (u8 *) dm + index;
201 for (i = 0; i < 16 && (is_ff || is_00); i++) { 202 for (i = 0; i < 16 && (is_ff || is_00); i++) {
202 if (d[i] != 0x00) 203 if (d[i] != 0x00)
203 is_00 = 0; 204 is_00 = 0;
@@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
228static void __init dmi_save_type(const struct dmi_header *dm, int slot, 229static void __init dmi_save_type(const struct dmi_header *dm, int slot,
229 int index) 230 int index)
230{ 231{
231 const u8 *d = (u8 *) dm + index; 232 const u8 *d;
232 char *s; 233 char *s;
233 234
234 if (dmi_ident[slot]) 235 if (dmi_ident[slot] || dm->length <= index)
235 return; 236 return;
236 237
237 s = dmi_alloc(4); 238 s = dmi_alloc(4);
238 if (!s) 239 if (!s)
239 return; 240 return;
240 241
242 d = (u8 *) dm + index;
241 sprintf(s, "%u", *d & 0x7F); 243 sprintf(s, "%u", *d & 0x7F);
242 dmi_ident[slot] = s; 244 dmi_ident[slot] = s;
243} 245}
@@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
278 280
279static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) 281static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
280{ 282{
281 int i, count = *(u8 *)(dm + 1); 283 int i, count;
282 struct dmi_device *dev; 284 struct dmi_device *dev;
283 285
286 if (dm->length < 0x05)
287 return;
288
289 count = *(u8 *)(dm + 1);
284 for (i = 1; i <= count; i++) { 290 for (i = 1; i <= count; i++) {
285 const char *devname = dmi_string(dm, i); 291 const char *devname = dmi_string(dm, i);
286 292
@@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
353 const char *name; 359 const char *name;
354 const u8 *d = (u8 *)dm; 360 const u8 *d = (u8 *)dm;
355 361
362 if (dm->length < 0x0B)
363 return;
364
356 /* Skip disabled device */ 365 /* Skip disabled device */
357 if ((d[0x5] & 0x80) == 0) 366 if ((d[0x5] & 0x80) == 0)
358 return; 367 return;
@@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
387 const char *d = (const char *)dm; 396 const char *d = (const char *)dm;
388 static int nr; 397 static int nr;
389 398
390 if (dm->type != DMI_ENTRY_MEM_DEVICE) 399 if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
391 return; 400 return;
392 if (nr >= dmi_memdev_nr) { 401 if (nr >= dmi_memdev_nr) {
393 pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); 402 pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -650,6 +659,21 @@ void __init dmi_scan_machine(void)
650 goto error; 659 goto error;
651 660
652 /* 661 /*
662 * Same logic as above, look for a 64-bit entry point
663 * first, and if not found, fall back to 32-bit entry point.
664 */
665 memcpy_fromio(buf, p, 16);
666 for (q = p + 16; q < p + 0x10000; q += 16) {
667 memcpy_fromio(buf + 16, q, 16);
668 if (!dmi_smbios3_present(buf)) {
669 dmi_available = 1;
670 dmi_early_unmap(p, 0x10000);
671 goto out;
672 }
673 memcpy(buf, buf + 16, 16);
674 }
675
676 /*
653 * Iterate over all possible DMI header addresses q. 677 * Iterate over all possible DMI header addresses q.
654 * Maintain the 32 bytes around q in buf. On the 678 * Maintain the 32 bytes around q in buf. On the
655 * first iteration, substitute zero for the 679 * first iteration, substitute zero for the
@@ -659,7 +683,7 @@ void __init dmi_scan_machine(void)
659 memset(buf, 0, 16); 683 memset(buf, 0, 16);
660 for (q = p; q < p + 0x10000; q += 16) { 684 for (q = p; q < p + 0x10000; q += 16) {
661 memcpy_fromio(buf + 16, q, 16); 685 memcpy_fromio(buf + 16, q, 16);
662 if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { 686 if (!dmi_present(buf)) {
663 dmi_available = 1; 687 dmi_available = 1;
664 dmi_early_unmap(p, 0x10000); 688 dmi_early_unmap(p, 0x10000);
665 goto out; 689 goto out;
@@ -993,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date);
993 * @decode: Callback function 1017 * @decode: Callback function
994 * @private_data: Private data to be passed to the callback function 1018 * @private_data: Private data to be passed to the callback function
995 * 1019 *
996 * Returns -1 when the DMI table can't be reached, 0 on success. 1020 * Returns 0 on success, -ENXIO if DMI is not selected or not present,
1021 * or a different negative error code if DMI walking fails.
997 */ 1022 */
998int dmi_walk(void (*decode)(const struct dmi_header *, void *), 1023int dmi_walk(void (*decode)(const struct dmi_header *, void *),
999 void *private_data) 1024 void *private_data)
@@ -1001,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
1001 u8 *buf; 1026 u8 *buf;
1002 1027
1003 if (!dmi_available) 1028 if (!dmi_available)
1004 return -1; 1029 return -ENXIO;
1005 1030
1006 buf = dmi_remap(dmi_base, dmi_len); 1031 buf = dmi_remap(dmi_base, dmi_len);
1007 if (buf == NULL) 1032 if (buf == NULL)
1008 return -1; 1033 return -ENOMEM;
1009 1034
1010 dmi_decode_table(buf, decode, private_data); 1035 dmi_decode_table(buf, decode, private_data);
1011 1036
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 0cdeb6a2e4a0..5dffa27afa45 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1207 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1207 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1208 1208
1209 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1209 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1210 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1210 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1211 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1211 (u32)mode->clock);
1212 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1213 (u32)mode->clock);
1214 line_time = min(line_time, (u32)65535);
1212 1215
1213 /* watermark for high clocks */ 1216 /* watermark for high clocks */
1214 if (adev->pm.dpm_enabled) { 1217 if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 773654a19749..47bbc87f96d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1176 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1176 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1177 1177
1178 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1178 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1179 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1179 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1180 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1180 (u32)mode->clock);
1181 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1182 (u32)mode->clock);
1183 line_time = min(line_time, (u32)65535);
1181 1184
1182 /* watermark for high clocks */ 1185 /* watermark for high clocks */
1183 if (adev->pm.dpm_enabled) { 1186 if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 1f3552967ba3..d8c9a959493e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
983 fixed20_12 a, b, c; 983 fixed20_12 a, b, c;
984 984
985 if (amdgpu_crtc->base.enabled && num_heads && mode) { 985 if (amdgpu_crtc->base.enabled && num_heads && mode) {
986 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 986 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
987 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 987 (u32)mode->clock);
988 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
989 (u32)mode->clock);
990 line_time = min(line_time, (u32)65535);
988 priority_a_cnt = 0; 991 priority_a_cnt = 0;
989 priority_b_cnt = 0; 992 priority_b_cnt = 0;
990 993
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3c558c170e5e..db30c6ba563a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1091 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1091 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1092 1092
1093 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1093 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1094 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1094 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1095 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1095 (u32)mode->clock);
1096 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1097 (u32)mode->clock);
1098 line_time = min(line_time, (u32)65535);
1096 1099
1097 /* watermark for high clocks */ 1100 /* watermark for high clocks */
1098 if (adev->pm.dpm_enabled) { 1101 if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 40d2827a6d19..53e78d092d18 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,6 +1,7 @@
1config DRM_DW_HDMI 1config DRM_DW_HDMI
2 tristate 2 tristate
3 select DRM_KMS_HELPER 3 select DRM_KMS_HELPER
4 select REGMAP_MMIO
4 5
5config DRM_DW_HDMI_AHB_AUDIO 6config DRM_DW_HDMI_AHB_AUDIO
6 tristate "Synopsys Designware AHB Audio interface" 7 tristate "Synopsys Designware AHB Audio interface"
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb2974caac..2cfe96d3e5d1 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
36#define VGT_VERSION_MAJOR 1 36#define VGT_VERSION_MAJOR 1
37#define VGT_VERSION_MINOR 0 37#define VGT_VERSION_MINOR 0
38 38
39#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
40#define INTEL_VGT_IF_VERSION \
41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
42
43/* 39/*
44 * notifications from guest to vgpu device model 40 * notifications from guest to vgpu device model
45 */ 41 */
@@ -55,8 +51,8 @@ enum vgt_g2v_type {
55 51
56struct vgt_if { 52struct vgt_if {
57 u64 magic; /* VGT_MAGIC */ 53 u64 magic; /* VGT_MAGIC */
58 uint16_t version_major; 54 u16 version_major;
59 uint16_t version_minor; 55 u16 version_minor;
60 u32 vgt_id; /* ID of vGT instance */ 56 u32 vgt_id; /* ID of vGT instance */
61 u32 rsv1[12]; /* pad to offset 0x40 */ 57 u32 rsv1[12]; /* pad to offset 0x40 */
62 /* 58 /*
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 4ab8a973b61f..2e739018fb4c 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
60 */ 60 */
61void i915_check_vgpu(struct drm_i915_private *dev_priv) 61void i915_check_vgpu(struct drm_i915_private *dev_priv)
62{ 62{
63 uint64_t magic; 63 u64 magic;
64 uint32_t version; 64 u16 version_major;
65 65
66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
67 67
@@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
69 if (magic != VGT_MAGIC) 69 if (magic != VGT_MAGIC)
70 return; 70 return;
71 71
72 version = INTEL_VGT_IF_VERSION_ENCODE( 72 version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
73 __raw_i915_read16(dev_priv, vgtif_reg(version_major)), 73 if (version_major < VGT_VERSION_MAJOR) {
74 __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
75 if (version != INTEL_VGT_IF_VERSION) {
76 DRM_INFO("VGT interface version mismatch!\n"); 74 DRM_INFO("VGT interface version mismatch!\n");
77 return; 75 return;
78 } 76 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 569717a12723..96b0b01677e2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4598,7 +4598,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4598 4598
4599static int 4599static int
4600skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4600skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4601 unsigned scaler_user, int *scaler_id, unsigned int rotation, 4601 unsigned int scaler_user, int *scaler_id,
4602 int src_w, int src_h, int dst_w, int dst_h) 4602 int src_w, int src_h, int dst_w, int dst_h)
4603{ 4603{
4604 struct intel_crtc_scaler_state *scaler_state = 4604 struct intel_crtc_scaler_state *scaler_state =
@@ -4607,9 +4607,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4607 to_intel_crtc(crtc_state->base.crtc); 4607 to_intel_crtc(crtc_state->base.crtc);
4608 int need_scaling; 4608 int need_scaling;
4609 4609
4610 need_scaling = drm_rotation_90_or_270(rotation) ? 4610 /*
4611 (src_h != dst_w || src_w != dst_h): 4611 * Src coordinates are already rotated by 270 degrees for
4612 (src_w != dst_w || src_h != dst_h); 4612 * the 90/270 degree plane rotation cases (to match the
4613 * GTT mapping), hence no need to account for rotation here.
4614 */
4615 need_scaling = src_w != dst_w || src_h != dst_h;
4613 4616
4614 /* 4617 /*
4615 * if plane is being disabled or scaler is no more required or force detach 4618 * if plane is being disabled or scaler is no more required or force detach
@@ -4671,7 +4674,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4671 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4674 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4672 4675
4673 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4676 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4674 &state->scaler_state.scaler_id, DRM_ROTATE_0, 4677 &state->scaler_state.scaler_id,
4675 state->pipe_src_w, state->pipe_src_h, 4678 state->pipe_src_w, state->pipe_src_h,
4676 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4679 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4677} 4680}
@@ -4700,7 +4703,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4700 ret = skl_update_scaler(crtc_state, force_detach, 4703 ret = skl_update_scaler(crtc_state, force_detach,
4701 drm_plane_index(&intel_plane->base), 4704 drm_plane_index(&intel_plane->base),
4702 &plane_state->scaler_id, 4705 &plane_state->scaler_id,
4703 plane_state->base.rotation,
4704 drm_rect_width(&plane_state->base.src) >> 16, 4706 drm_rect_width(&plane_state->base.src) >> 16,
4705 drm_rect_height(&plane_state->base.src) >> 16, 4707 drm_rect_height(&plane_state->base.src) >> 16,
4706 drm_rect_width(&plane_state->base.dst), 4708 drm_rect_width(&plane_state->base.dst),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2ca481b5aa69..078fd1bfa5ea 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
3373 3373
3374 /* n.b., src is 16.16 fixed point, dst is whole integer */ 3374 /* n.b., src is 16.16 fixed point, dst is whole integer */
3375 if (plane->id == PLANE_CURSOR) { 3375 if (plane->id == PLANE_CURSOR) {
3376 /*
3377 * Cursors only support 0/180 degree rotation,
3378 * hence no need to account for rotation here.
3379 */
3376 src_w = pstate->base.src_w; 3380 src_w = pstate->base.src_w;
3377 src_h = pstate->base.src_h; 3381 src_h = pstate->base.src_h;
3378 dst_w = pstate->base.crtc_w; 3382 dst_w = pstate->base.crtc_w;
3379 dst_h = pstate->base.crtc_h; 3383 dst_h = pstate->base.crtc_h;
3380 } else { 3384 } else {
3385 /*
3386 * Src coordinates are already rotated by 270 degrees for
3387 * the 90/270 degree plane rotation cases (to match the
3388 * GTT mapping), hence no need to account for rotation here.
3389 */
3381 src_w = drm_rect_width(&pstate->base.src); 3390 src_w = drm_rect_width(&pstate->base.src);
3382 src_h = drm_rect_height(&pstate->base.src); 3391 src_h = drm_rect_height(&pstate->base.src);
3383 dst_w = drm_rect_width(&pstate->base.dst); 3392 dst_w = drm_rect_width(&pstate->base.dst);
3384 dst_h = drm_rect_height(&pstate->base.dst); 3393 dst_h = drm_rect_height(&pstate->base.dst);
3385 } 3394 }
3386 3395
3387 if (drm_rotation_90_or_270(pstate->base.rotation))
3388 swap(dst_w, dst_h);
3389
3390 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3396 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3391 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3397 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3392 3398
@@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3417 if (y && format != DRM_FORMAT_NV12) 3423 if (y && format != DRM_FORMAT_NV12)
3418 return 0; 3424 return 0;
3419 3425
3426 /*
3427 * Src coordinates are already rotated by 270 degrees for
3428 * the 90/270 degree plane rotation cases (to match the
3429 * GTT mapping), hence no need to account for rotation here.
3430 */
3420 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3431 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3421 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3432 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3422 3433
3423 if (drm_rotation_90_or_270(pstate->rotation))
3424 swap(width, height);
3425
3426 /* for planar format */ 3434 /* for planar format */
3427 if (format == DRM_FORMAT_NV12) { 3435 if (format == DRM_FORMAT_NV12) {
3428 if (y) /* y-plane data rate */ 3436 if (y) /* y-plane data rate */
@@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3505 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 3513 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
3506 return 8; 3514 return 8;
3507 3515
3516 /*
3517 * Src coordinates are already rotated by 270 degrees for
3518 * the 90/270 degree plane rotation cases (to match the
3519 * GTT mapping), hence no need to account for rotation here.
3520 */
3508 src_w = drm_rect_width(&intel_pstate->base.src) >> 16; 3521 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3509 src_h = drm_rect_height(&intel_pstate->base.src) >> 16; 3522 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3510 3523
3511 if (drm_rotation_90_or_270(pstate->rotation))
3512 swap(src_w, src_h);
3513
3514 /* Halve UV plane width and height for NV12 */ 3524 /* Halve UV plane width and height for NV12 */
3515 if (fb->format->format == DRM_FORMAT_NV12 && !y) { 3525 if (fb->format->format == DRM_FORMAT_NV12 && !y) {
3516 src_w /= 2; 3526 src_w /= 2;
@@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3794 width = intel_pstate->base.crtc_w; 3804 width = intel_pstate->base.crtc_w;
3795 height = intel_pstate->base.crtc_h; 3805 height = intel_pstate->base.crtc_h;
3796 } else { 3806 } else {
3807 /*
3808 * Src coordinates are already rotated by 270 degrees for
3809 * the 90/270 degree plane rotation cases (to match the
3810 * GTT mapping), hence no need to account for rotation here.
3811 */
3797 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3812 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3798 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3813 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3799 } 3814 }
3800 3815
3801 if (drm_rotation_90_or_270(pstate->rotation))
3802 swap(width, height);
3803
3804 cpp = fb->format->cpp[0]; 3816 cpp = fb->format->cpp[0];
3805 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); 3817 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3806 3818
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index adb411a078e8..f4b53588e071 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1173 1173
1174 1174
1175 if (IS_G200_SE(mdev)) { 1175 if (IS_G200_SE(mdev)) {
1176 if (mdev->unique_rev_id >= 0x02) { 1176 if (mdev->unique_rev_id >= 0x04) {
1177 WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
1178 WREG8(MGAREG_CRTCEXT_DATA, 0);
1179 } else if (mdev->unique_rev_id >= 0x02) {
1177 u8 hi_pri_lvl; 1180 u8 hi_pri_lvl;
1178 u32 bpp; 1181 u32 bpp;
1179 u32 mb; 1182 u32 mb;
@@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1639 if (mga_vga_calculate_mode_bandwidth(mode, bpp) 1642 if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1640 > (30100 * 1024)) 1643 > (30100 * 1024))
1641 return MODE_BANDWIDTH; 1644 return MODE_BANDWIDTH;
1645 } else {
1646 if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1647 > (55000 * 1024))
1648 return MODE_BANDWIDTH;
1642 } 1649 }
1643 } else if (mdev->type == G200_WB) { 1650 } else if (mdev->type == G200_WB) {
1644 if (mode->hdisplay > 1280) 1651 if (mode->hdisplay > 1280)
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 1144e0c9e894..0abe77675b76 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -35,6 +35,13 @@
35#include "mxsfb_drv.h" 35#include "mxsfb_drv.h"
36#include "mxsfb_regs.h" 36#include "mxsfb_regs.h"
37 37
38#define MXS_SET_ADDR 0x4
39#define MXS_CLR_ADDR 0x8
40#define MODULE_CLKGATE BIT(30)
41#define MODULE_SFTRST BIT(31)
42/* 1 second delay should be plenty of time for block reset */
43#define RESET_TIMEOUT 1000000
44
38static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) 45static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
39{ 46{
40 return (val & mxsfb->devdata->hs_wdth_mask) << 47 return (val & mxsfb->devdata->hs_wdth_mask) <<
@@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
159 clk_disable_unprepare(mxsfb->clk_disp_axi); 166 clk_disable_unprepare(mxsfb->clk_disp_axi);
160} 167}
161 168
169/*
170 * Clear the bit and poll it cleared. This is usually called with
171 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
172 * (bit 30).
173 */
174static int clear_poll_bit(void __iomem *addr, u32 mask)
175{
176 u32 reg;
177
178 writel(mask, addr + MXS_CLR_ADDR);
179 return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
180}
181
182static int mxsfb_reset_block(void __iomem *reset_addr)
183{
184 int ret;
185
186 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
187 if (ret)
188 return ret;
189
190 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
191
192 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
193 if (ret)
194 return ret;
195
196 return clear_poll_bit(reset_addr, MODULE_CLKGATE);
197}
198
162static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) 199static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
163{ 200{
164 struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; 201 struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
173 */ 210 */
174 mxsfb_enable_axi_clk(mxsfb); 211 mxsfb_enable_axi_clk(mxsfb);
175 212
213 /* Mandatory eLCDIF reset as per the Reference Manual */
214 err = mxsfb_reset_block(mxsfb->base);
215 if (err)
216 return;
217
176 /* Clear the FIFOs */ 218 /* Clear the FIFOs */
177 writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); 219 writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
178 220
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 008c145b7f29..ca44233ceacc 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
9267 u32 tmp, wm_mask; 9267 u32 tmp, wm_mask;
9268 9268
9269 if (radeon_crtc->base.enabled && num_heads && mode) { 9269 if (radeon_crtc->base.enabled && num_heads && mode) {
9270 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 9270 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
9271 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 9271 (u32)mode->clock);
9272 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
9273 (u32)mode->clock);
9274 line_time = min(line_time, (u32)65535);
9272 9275
9273 /* watermark for high clocks */ 9276 /* watermark for high clocks */
9274 if ((rdev->pm.pm_method == PM_METHOD_DPM) && 9277 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0bf103536404..534637203e70 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2266 fixed20_12 a, b, c; 2266 fixed20_12 a, b, c;
2267 2267
2268 if (radeon_crtc->base.enabled && num_heads && mode) { 2268 if (radeon_crtc->base.enabled && num_heads && mode) {
2269 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 2269 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2270 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 2270 (u32)mode->clock);
2271 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2272 (u32)mode->clock);
2273 line_time = min(line_time, (u32)65535);
2271 priority_a_cnt = 0; 2274 priority_a_cnt = 0;
2272 priority_b_cnt = 0; 2275 priority_b_cnt = 0;
2273 dram_channels = evergreen_get_number_of_dram_channels(rdev); 2276 dram_channels = evergreen_get_number_of_dram_channels(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 7431eb4a11b7..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
621 } 621 }
622 622
623 /* TODO: is this still necessary on NI+ ? */ 623 /* TODO: is this still necessary on NI+ ? */
624 if ((cmd == 0 || cmd == 1 || cmd == 0x3) && 624 if ((cmd == 0 || cmd == 0x3) &&
625 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 625 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
626 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 626 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
627 start, end); 627 start, end);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 76d1888528e6..5303f25d5280 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
2284 fixed20_12 a, b, c; 2284 fixed20_12 a, b, c;
2285 2285
2286 if (radeon_crtc->base.enabled && num_heads && mode) { 2286 if (radeon_crtc->base.enabled && num_heads && mode) {
2287 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 2287 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2288 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 2288 (u32)mode->clock);
2289 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2290 (u32)mode->clock);
2291 line_time = min(line_time, (u32)65535);
2289 priority_a_cnt = 0; 2292 priority_a_cnt = 0;
2290 priority_b_cnt = 0; 2293 priority_b_cnt = 0;
2291 2294
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 9a1e34e48f64..81f86a67c10d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -451,18 +451,6 @@ fail:
451 451
452 452
453#ifdef CONFIG_DRM_TEGRA_STAGING 453#ifdef CONFIG_DRM_TEGRA_STAGING
454static struct tegra_drm_context *
455tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
456{
457 struct tegra_drm_context *context;
458
459 mutex_lock(&file->lock);
460 context = idr_find(&file->contexts, id);
461 mutex_unlock(&file->lock);
462
463 return context;
464}
465
466static int tegra_gem_create(struct drm_device *drm, void *data, 454static int tegra_gem_create(struct drm_device *drm, void *data,
467 struct drm_file *file) 455 struct drm_file *file)
468{ 456{
@@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
551 if (err < 0) 539 if (err < 0)
552 return err; 540 return err;
553 541
554 err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); 542 err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
555 if (err < 0) { 543 if (err < 0) {
556 client->ops->close_channel(context); 544 client->ops->close_channel(context);
557 return err; 545 return err;
@@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
606 594
607 mutex_lock(&fpriv->lock); 595 mutex_lock(&fpriv->lock);
608 596
609 context = tegra_drm_file_get_context(fpriv, args->context); 597 context = idr_find(&fpriv->contexts, args->context);
610 if (!context) { 598 if (!context) {
611 err = -EINVAL; 599 err = -EINVAL;
612 goto unlock; 600 goto unlock;
@@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
631 619
632 mutex_lock(&fpriv->lock); 620 mutex_lock(&fpriv->lock);
633 621
634 context = tegra_drm_file_get_context(fpriv, args->context); 622 context = idr_find(&fpriv->contexts, args->context);
635 if (!context) { 623 if (!context) {
636 err = -ENODEV; 624 err = -ENODEV;
637 goto unlock; 625 goto unlock;
@@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
660 648
661 mutex_lock(&fpriv->lock); 649 mutex_lock(&fpriv->lock);
662 650
663 context = tegra_drm_file_get_context(fpriv, args->context); 651 context = idr_find(&fpriv->contexts, args->context);
664 if (!context) { 652 if (!context) {
665 err = -ENODEV; 653 err = -ENODEV;
666 goto unlock; 654 goto unlock;
@@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
685 673
686 mutex_lock(&fpriv->lock); 674 mutex_lock(&fpriv->lock);
687 675
688 context = tegra_drm_file_get_context(fpriv, args->context); 676 context = idr_find(&fpriv->contexts, args->context);
689 if (!context) { 677 if (!context) {
690 err = -ENODEV; 678 err = -ENODEV;
691 goto unlock; 679 goto unlock;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f05ebb14fa63..ac65f52850a6 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev)
172 172
173 host->rst = devm_reset_control_get(&pdev->dev, "host1x"); 173 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
174 if (IS_ERR(host->rst)) { 174 if (IS_ERR(host->rst)) {
175 err = PTR_ERR(host->clk); 175 err = PTR_ERR(host->rst);
176 dev_err(&pdev->dev, "failed to get reset: %d\n", err); 176 dev_err(&pdev->dev, "failed to get reset: %d\n", err);
177 return err; 177 return err;
178 } 178 }
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 04cee65531d7..6e040692f1d8 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -826,11 +826,35 @@ static int hid_scan_report(struct hid_device *hid)
826 * hid-rmi should take care of them, 826 * hid-rmi should take care of them,
827 * not hid-generic 827 * not hid-generic
828 */ 828 */
829 if (IS_ENABLED(CONFIG_HID_RMI)) 829 hid->group = HID_GROUP_RMI;
830 hid->group = HID_GROUP_RMI;
831 break; 830 break;
832 } 831 }
833 832
833 /* fall back to generic driver in case specific driver doesn't exist */
834 switch (hid->group) {
835 case HID_GROUP_MULTITOUCH_WIN_8:
836 /* fall-through */
837 case HID_GROUP_MULTITOUCH:
838 if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
839 hid->group = HID_GROUP_GENERIC;
840 break;
841 case HID_GROUP_SENSOR_HUB:
842 if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
843 hid->group = HID_GROUP_GENERIC;
844 break;
845 case HID_GROUP_RMI:
846 if (!IS_ENABLED(CONFIG_HID_RMI))
847 hid->group = HID_GROUP_GENERIC;
848 break;
849 case HID_GROUP_WACOM:
850 if (!IS_ENABLED(CONFIG_HID_WACOM))
851 hid->group = HID_GROUP_GENERIC;
852 break;
853 case HID_GROUP_LOGITECH_DJ_DEVICE:
854 if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
855 hid->group = HID_GROUP_GENERIC;
856 break;
857 }
834 vfree(parser); 858 vfree(parser);
835 return 0; 859 return 0;
836} 860}
@@ -1763,15 +1787,23 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
1763 * used as a driver. See hid_scan_report(). 1787 * used as a driver. See hid_scan_report().
1764 */ 1788 */
1765static const struct hid_device_id hid_have_special_driver[] = { 1789static const struct hid_device_id hid_have_special_driver[] = {
1790#if IS_ENABLED(CONFIG_HID_A4TECH)
1766 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, 1791 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
1767 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, 1792 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
1768 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, 1793 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
1794#endif
1795#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
1796 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
1797#endif
1798#if IS_ENABLED(CONFIG_HID_ACRUX)
1769 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, 1799 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
1770 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, 1800 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
1801#endif
1802#if IS_ENABLED(CONFIG_HID_ALPS)
1771 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, 1803 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
1804#endif
1805#if IS_ENABLED(CONFIG_HID_APPLE)
1772 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, 1806 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
1773 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
1774 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
1775 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, 1807 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
1776 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, 1808 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
1777 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, 1809 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@@ -1792,11 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1792 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, 1824 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
1793 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, 1825 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
1794 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, 1826 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
1795 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
1796 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
1797 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
1798 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
1799 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
1800 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, 1827 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
1801 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, 1828 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
1802 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, 1829 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
@@ -1851,62 +1878,100 @@ static const struct hid_device_id hid_have_special_driver[] = {
1851 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, 1878 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
1852 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1879 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1853 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1880 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1881#endif
1882#if IS_ENABLED(CONFIG_HID_APPLEIR)
1883 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
1884 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
1885 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
1886 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
1887 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
1888#endif
1889#if IS_ENABLED(CONFIG_HID_ASUS)
1854 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, 1890 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
1855 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, 1891 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
1856 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, 1892 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
1857 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, 1893 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
1858 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, 1894 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
1895#endif
1896#if IS_ENABLED(CONFIG_HID_AUREAL)
1859 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, 1897 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
1898#endif
1899#if IS_ENABLED(CONFIG_HID_BELKIN)
1860 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1900 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1901 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
1902#endif
1903#if IS_ENABLED(CONFIG_HID_BETOP_FF)
1861 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, 1904 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
1862 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, 1905 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
1863 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, 1906 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
1864 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, 1907 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
1865 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 1908#endif
1866 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, 1909#if IS_ENABLED(CONFIG_HID_CHERRY)
1867 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1910 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
1868 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, 1911 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
1912#endif
1913#if IS_ENABLED(CONFIG_HID_CHICONY)
1869 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, 1914 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1870 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
1871 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, 1915 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
1872 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 1916 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1873 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 1917 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
1918 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
1919#endif
1920#if IS_ENABLED(CONFIG_HID_CMEDIA)
1921 { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
1922#endif
1923#if IS_ENABLED(CONFIG_HID_CORSAIR)
1874 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, 1924 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, 1925 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
1876 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1926#endif
1927#if IS_ENABLED(CONFIG_HID_CP2112)
1877 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, 1928 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
1929#endif
1930#if IS_ENABLED(CONFIG_HID_CYPRESS)
1878 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1931 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
1879 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, 1932 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
1880 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, 1933 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
1881 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, 1934 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
1882 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, 1935 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
1883 { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, 1936#endif
1937#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
1884 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, 1938 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1885 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, 1939 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
1886#if IS_ENABLED(CONFIG_HID_MAYFLASH)
1887 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
1888 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
1889 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
1890 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
1891#endif 1940#endif
1892 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, 1941#if IS_ENABLED(CONFIG_HID_ELECOM)
1893 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
1894 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 1942 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
1895 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 1943 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
1896 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 1944 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
1945#endif
1946#if IS_ENABLED(CONFIG_HID_ELO)
1897 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, 1947 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
1898 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, 1948 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
1899 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, 1949#endif
1950#if IS_ENABLED(CONFIG_HID_EMS_FF)
1900 { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, 1951 { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
1952#endif
1953#if IS_ENABLED(CONFIG_HID_EZKEY)
1901 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, 1954 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
1902 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, 1955#endif
1903 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, 1956#if IS_ENABLED(CONFIG_HID_GEMBIRD)
1904 { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, 1957 { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
1905 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, 1958#endif
1959#if IS_ENABLED(CONFIG_HID_GFRM)
1960 { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
1961 { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
1962#endif
1963#if IS_ENABLED(CONFIG_HID_GREENASIA)
1906 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, 1964 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
1965#endif
1966#if IS_ENABLED(CONFIG_HID_GT683R)
1967 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
1968#endif
1969#if IS_ENABLED(CONFIG_HID_GYRATION)
1907 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 1970 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
1908 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 1971 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1909 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, 1972 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
1973#endif
1974#if IS_ENABLED(CONFIG_HID_HOLTEK)
1910 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, 1975 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
1911 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, 1976 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
1912 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, 1977 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
@@ -1915,12 +1980,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
1915 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, 1980 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
1916 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, 1981 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
1917 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, 1982 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
1918 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, 1983#endif
1919 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, 1984#if IS_ENABLED(CONFIG_HID_ICADE)
1920 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
1921 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1985 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
1986#endif
1987#if IS_ENABLED(CONFIG_HID_KENSINGTON)
1922 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1988 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1989#endif
1990#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
1923 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, 1991 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
1992#endif
1993#if IS_ENABLED(CONFIG_HID_KYE)
1924 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, 1994 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1925 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, 1995 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
1926 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, 1996 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
@@ -1930,21 +2000,29 @@ static const struct hid_device_id hid_have_special_driver[] = {
1930 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, 2000 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
1931 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 2001 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
1932 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, 2002 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
1933 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 2003#endif
2004#if IS_ENABLED(CONFIG_HID_LCPOWER)
1934 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, 2005 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
2006#endif
2007#if IS_ENABLED(CONFIG_HID_LED)
2008 { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
2009 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
2010 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
2011 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
2012 { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
2013 { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
2014#endif
1935#if IS_ENABLED(CONFIG_HID_LENOVO) 2015#if IS_ENABLED(CONFIG_HID_LENOVO)
1936 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, 2016 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
1937 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, 2017 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
1938 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, 2018 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
1939 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, 2019 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
1940#endif 2020#endif
1941 { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, 2021#if IS_ENABLED(CONFIG_HID_LOGITECH)
1942 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, 2022 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
1943 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, 2023 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
1944 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, 2024 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
1945 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, 2025 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
1946 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
1947 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
1948 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, 2026 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
1949 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, 2027 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
1950 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, 2028 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
@@ -1957,7 +2035,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1957 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, 2035 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
1958 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, 2036 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
1959 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, 2037 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
1960 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
1961 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, 2038 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, 2039 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
1963 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, 2040 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
@@ -1969,17 +2046,30 @@ static const struct hid_device_id hid_have_special_driver[] = {
1969 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, 2046 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
1970 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, 2047 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
1971 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, 2048 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
1972#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
1973 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
1974 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
1975#endif
1976 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, 2049 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
1977 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, 2050 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
1978 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, 2051 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
1979 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, 2052 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
1980 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, 2053#endif
1981 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, 2054#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
1982 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, 2055 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
2056 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
2057#endif
2058#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
2059 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
2060 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
2061#endif
2062#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
2063 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
2064 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
2065#endif
2066#if IS_ENABLED(CONFIG_HID_MAYFLASH)
2067 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
2068 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
2069 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
2070 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
2071#endif
2072#if IS_ENABLED(CONFIG_HID_MICROSOFT)
1983 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, 2073 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
1984 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, 2074 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
1985 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, 2075 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
@@ -1995,9 +2085,22 @@ static const struct hid_device_id hid_have_special_driver[] = {
1995 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, 2085 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
1996 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, 2086 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
1997 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, 2087 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
2088 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
2089#endif
2090#if IS_ENABLED(CONFIG_HID_MONTEREY)
1998 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 2091 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
1999 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, 2092#endif
2093#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
2094 { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
2095#endif
2096#if IS_ENABLED(CONFIG_HID_WIIMOTE)
2097 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
2098 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
2099#endif
2100#if IS_ENABLED(CONFIG_HID_NTI)
2000 { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, 2101 { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
2102#endif
2103#if IS_ENABLED(CONFIG_HID_NTRIG)
2001 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 2104 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
2002 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, 2105 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
2003 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, 2106 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
@@ -2017,13 +2120,41 @@ static const struct hid_device_id hid_have_special_driver[] = {
2017 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, 2120 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
2018 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, 2121 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
2019 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, 2122 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
2123#endif
2124#if IS_ENABLED(CONFIG_HID_ORTEK)
2020 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, 2125 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
2021 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 2126 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
2127 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
2128#endif
2129#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
2130 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
2131 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
2132 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
2133 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
2134#endif
2135#if IS_ENABLED(CONFIG_HID_PENMOUNT)
2022 { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, 2136 { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
2137#endif
2138#if IS_ENABLED(CONFIG_HID_PETALYNX)
2023 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, 2139 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
2140#endif
2141#if IS_ENABLED(CONFIG_HID_PICOLCD)
2142 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
2143 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
2144#endif
2145#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
2024 { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, 2146 { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
2147#endif
2148#if IS_ENABLED(CONFIG_HID_PRIMAX)
2025 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, 2149 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
2026 { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, 2150#endif
2151#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
2152 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
2153#endif
2154#if IS_ENABLED(CONFIG_HID_RMI)
2155 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
2156 { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
2157#endif
2027#if IS_ENABLED(CONFIG_HID_ROCCAT) 2158#if IS_ENABLED(CONFIG_HID_ROCCAT)
2028 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, 2159 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
2029 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, 2160 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
@@ -2051,9 +2182,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
2051 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, 2182 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
2052 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, 2183 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
2053#endif 2184#endif
2185#if IS_ENABLED(CONFIG_HID_SAMSUNG)
2054 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, 2186 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
2055 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, 2187 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
2056 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 2188#endif
2189#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
2190 { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
2191 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
2192 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
2193 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
2194 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
2195 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
2196 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
2197#endif
2198#if IS_ENABLED(CONFIG_HID_SONY)
2199 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
2057 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, 2200 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
2058 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, 2201 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
2059 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, 2202 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -2072,9 +2215,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
2072 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, 2215 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
2073 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, 2216 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
2074 { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, 2217 { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
2218#endif
2219#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
2220 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
2221#endif
2222#if IS_ENABLED(CONFIG_HID_STEELSERIES)
2075 { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, 2223 { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
2224#endif
2225#if IS_ENABLED(CONFIG_HID_SUNPLUS)
2076 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, 2226 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
2077 { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, 2227#endif
2228#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
2078 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, 2229 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
2079 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, 2230 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
2080 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, 2231 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2083,12 +2234,25 @@ static const struct hid_device_id hid_have_special_driver[] = {
2083 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, 2234 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
2084 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, 2235 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
2085 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, 2236 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
2237#endif
2238#if IS_ENABLED(CONFIG_HID_TIVO)
2086 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 2239 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
2087 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 2240 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
2088 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, 2241 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
2242#endif
2243#if IS_ENABLED(CONFIG_HID_TOPSEED)
2244 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
2245 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
2246 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
2089 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 2247 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
2090 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 2248 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
2249#endif
2250#if IS_ENABLED(CONFIG_HID_TWINHAN)
2091 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 2251 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
2252#endif
2253#if IS_ENABLED(CONFIG_HID_UCLOGIC)
2254 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
2255 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
2092 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, 2256 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
2093 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, 2257 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
2094 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, 2258 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -2096,20 +2260,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, 2260 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
2097 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, 2261 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, 2262 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
2099 { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
2100 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, 2263 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
2101 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, 2264 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
2102 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 2265 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
2103 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 2266 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
2104 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2105 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, 2267 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
2106 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2268 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2107 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2269#endif
2108 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2270#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
2109 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, 2271 { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
2110 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, 2272#endif
2111 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, 2273#if IS_ENABLED(CONFIG_HID_WALTOP)
2112 { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
2113 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, 2274 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
2114 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, 2275 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, 2276 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
@@ -2117,19 +2278,18 @@ static const struct hid_device_id hid_have_special_driver[] = {
2117 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, 2278 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
2118 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, 2279 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
2119 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, 2280 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
2120 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2281#endif
2282#if IS_ENABLED(CONFIG_HID_XINMO)
2121 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 2283 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
2122 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, 2284 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
2285#endif
2286#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
2123 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 2287 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
2124 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 2288 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
2289#endif
2290#if IS_ENABLED(CONFIG_HID_ZYDACRON)
2125 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, 2291 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
2126 2292#endif
2127 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
2128 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
2129 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
2130 { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
2131 { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
2132 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
2133 { } 2293 { }
2134}; 2294};
2135 2295
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 26b05106f0d3..93d28c0ec8bf 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -1066,7 +1066,7 @@ static void ssip_pn_setup(struct net_device *dev)
1066 dev->addr_len = 1; 1066 dev->addr_len = 1;
1067 dev->tx_queue_len = SSIP_TXQUEUE_LEN; 1067 dev->tx_queue_len = SSIP_TXQUEUE_LEN;
1068 1068
1069 dev->destructor = free_netdev; 1069 dev->needs_free_netdev = true;
1070 dev->header_ops = &phonet_header_ops; 1070 dev->header_ops = &phonet_header_ops;
1071} 1071}
1072 1072
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index dd4190b50df6..6066bbfc42fe 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
468static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) 468static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev)
469{ 469{
470 struct meson_sar_adc_priv *priv = iio_priv(indio_dev); 470 struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
471 int count; 471 unsigned int count, tmp;
472 472
473 for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { 473 for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) {
474 if (!meson_sar_adc_get_fifo_count(indio_dev)) 474 if (!meson_sar_adc_get_fifo_count(indio_dev))
475 break; 475 break;
476 476
477 regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); 477 regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp);
478 } 478 }
479} 479}
480 480
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index b0c7d8ee5cb8..6888167ca1e6 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
718 adc->dev = dev; 718 adc->dev = dev;
719 719
720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
721 if (!iores)
722 return -EINVAL;
723
721 adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); 724 adc->base = devm_ioremap(dev, iores->start, resource_size(iores));
722 if (IS_ERR(adc->base)) 725 if (!adc->base)
723 return PTR_ERR(adc->base); 726 return -ENOMEM;
724 727
725 init_completion(&adc->completion); 728 init_completion(&adc->completion);
726 spin_lock_init(&adc->lock); 729 spin_lock_init(&adc->lock);
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index dd99d273bae9..ff03324dee13 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/iio/buffer.h> 16#include <linux/iio/buffer.h>
17#include <linux/iio/buffer_impl.h>
17#include <linux/iio/buffer-dma.h> 18#include <linux/iio/buffer-dma.h>
18#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
19#include <linux/sizes.h> 20#include <linux/sizes.h>
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 9fabed47053d..2b5a320f42c5 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/iio/iio.h> 15#include <linux/iio/iio.h>
16#include <linux/iio/buffer.h> 16#include <linux/iio/buffer.h>
17#include <linux/iio/buffer_impl.h>
17#include <linux/iio/buffer-dma.h> 18#include <linux/iio/buffer-dma.h>
18#include <linux/iio/buffer-dmaengine.h> 19#include <linux/iio/buffer-dmaengine.h>
19 20
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 96dabbd2f004..88a7c5d4e4d2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785};
41static const struct inv_mpu6050_reg_map reg_set_6500 = { 41static const struct inv_mpu6050_reg_map reg_set_6500 = {
42 .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, 42 .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
43 .lpf = INV_MPU6050_REG_CONFIG, 43 .lpf = INV_MPU6050_REG_CONFIG,
44 .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
44 .user_ctrl = INV_MPU6050_REG_USER_CTRL, 45 .user_ctrl = INV_MPU6050_REG_USER_CTRL,
45 .fifo_en = INV_MPU6050_REG_FIFO_EN, 46 .fifo_en = INV_MPU6050_REG_FIFO_EN,
46 .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, 47 .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
@@ -211,6 +212,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
211EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); 212EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
212 213
213/** 214/**
215 * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
216 *
217 * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
218 * MPU6500 and above have a dedicated register for accelerometer
219 */
220static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
221 enum inv_mpu6050_filter_e val)
222{
223 int result;
224
225 result = regmap_write(st->map, st->reg->lpf, val);
226 if (result)
227 return result;
228
229 switch (st->chip_type) {
230 case INV_MPU6050:
231 case INV_MPU6000:
232 case INV_MPU9150:
233 /* old chips, nothing to do */
234 result = 0;
235 break;
236 default:
237 /* set accel lpf */
238 result = regmap_write(st->map, st->reg->accel_lpf, val);
239 break;
240 }
241
242 return result;
243}
244
245/**
214 * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. 246 * inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
215 * 247 *
216 * Initial configuration: 248 * Initial configuration:
@@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
233 if (result) 265 if (result)
234 return result; 266 return result;
235 267
236 d = INV_MPU6050_FILTER_20HZ; 268 result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
237 result = regmap_write(st->map, st->reg->lpf, d);
238 if (result) 269 if (result)
239 return result; 270 return result;
240 271
@@ -537,6 +568,8 @@ error_write_raw:
537 * would be alising. This function basically search for the 568 * would be alising. This function basically search for the
538 * correct low pass parameters based on the fifo rate, e.g, 569 * correct low pass parameters based on the fifo rate, e.g,
539 * sampling frequency. 570 * sampling frequency.
571 *
572 * lpf is set automatically when setting sampling rate to avoid any aliases.
540 */ 573 */
541static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) 574static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
542{ 575{
@@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
552 while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) 585 while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
553 i++; 586 i++;
554 data = d[i]; 587 data = d[i];
555 result = regmap_write(st->map, st->reg->lpf, data); 588 result = inv_mpu6050_set_lpf_regs(st, data);
556 if (result) 589 if (result)
557 return result; 590 return result;
558 st->chip_config.lpf = data; 591 st->chip_config.lpf = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index ef13de7a2c20..953a0c09d568 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -28,6 +28,7 @@
28 * struct inv_mpu6050_reg_map - Notable registers. 28 * struct inv_mpu6050_reg_map - Notable registers.
29 * @sample_rate_div: Divider applied to gyro output rate. 29 * @sample_rate_div: Divider applied to gyro output rate.
30 * @lpf: Configures internal low pass filter. 30 * @lpf: Configures internal low pass filter.
31 * @accel_lpf: Configures accelerometer low pass filter.
31 * @user_ctrl: Enables/resets the FIFO. 32 * @user_ctrl: Enables/resets the FIFO.
32 * @fifo_en: Determines which data will appear in FIFO. 33 * @fifo_en: Determines which data will appear in FIFO.
33 * @gyro_config: gyro config register. 34 * @gyro_config: gyro config register.
@@ -47,6 +48,7 @@
47struct inv_mpu6050_reg_map { 48struct inv_mpu6050_reg_map {
48 u8 sample_rate_div; 49 u8 sample_rate_div;
49 u8 lpf; 50 u8 lpf;
51 u8 accel_lpf;
50 u8 user_ctrl; 52 u8 user_ctrl;
51 u8 fifo_en; 53 u8 fifo_en;
52 u8 gyro_config; 54 u8 gyro_config;
@@ -188,6 +190,7 @@ struct inv_mpu6050_state {
188#define INV_MPU6050_FIFO_THRESHOLD 500 190#define INV_MPU6050_FIFO_THRESHOLD 500
189 191
190/* mpu6500 registers */ 192/* mpu6500 registers */
193#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
191#define INV_MPU6500_REG_ACCEL_OFFSET 0x77 194#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
192 195
193/* delay time in milliseconds */ 196/* delay time in milliseconds */
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 02971e239a18..ece6926fa2e6 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
449 return ret; 449 return ret;
450 450
451 rt = (struct rt6_info *)dst; 451 rt = (struct rt6_info *)dst;
452 if (ipv6_addr_any(&fl6.saddr)) { 452 if (ipv6_addr_any(&src_in->sin6_addr)) {
453 ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
454 &fl6.daddr, 0, &fl6.saddr);
455 if (ret)
456 goto put;
457
458 src_in->sin6_family = AF_INET6; 453 src_in->sin6_family = AF_INET6;
459 src_in->sin6_addr = fl6.saddr; 454 src_in->sin6_addr = fl6.saddr;
460 } 455 }
@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
471 466
472 *pdst = dst; 467 *pdst = dst;
473 return 0; 468 return 0;
474put:
475 dst_release(dst);
476 return ret;
477} 469}
478#else 470#else
479static int addr6_resolve(struct sockaddr_in6 *src_in, 471static int addr6_resolve(struct sockaddr_in6 *src_in,
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ebf7be8d4139..08772836fded 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -56,6 +56,10 @@
56#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) 56#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
57#define BNXT_RE_MAX_CQ_COUNT (64 * 1024) 57#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
58 58
59#define BNXT_RE_UD_QP_HW_STALL 0x400000
60
61#define BNXT_RE_RQ_WQE_THRESHOLD 32
62
59struct bnxt_re_work { 63struct bnxt_re_work {
60 struct work_struct work; 64 struct work_struct work;
61 unsigned long event; 65 unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 7ba9e699d7ab..c7bd68311d0c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -61,6 +61,48 @@
61#include "ib_verbs.h" 61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h> 62#include <rdma/bnxt_re-abi.h>
63 63
64static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
64static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, 106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
65 struct bnxt_qplib_sge *sg_list, int num) 107 struct bnxt_qplib_sge *sg_list, int num)
66{ 108{
@@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
149 ib_attr->max_total_mcast_qp_attach = 0; 191 ib_attr->max_total_mcast_qp_attach = 0;
150 ib_attr->max_ah = dev_attr->max_ah; 192 ib_attr->max_ah = dev_attr->max_ah;
151 193
152 ib_attr->max_fmr = dev_attr->max_fmr; 194 ib_attr->max_fmr = 0;
153 ib_attr->max_map_per_fmr = 1; /* ? */ 195 ib_attr->max_map_per_fmr = 0;
154 196
155 ib_attr->max_srq = dev_attr->max_srq; 197 ib_attr->max_srq = dev_attr->max_srq;
156 ib_attr->max_srq_wr = dev_attr->max_srq_wqes; 198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
@@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
410 return IB_LINK_LAYER_ETHERNET; 452 return IB_LINK_LAYER_ETHERNET;
411} 453}
412 454
455#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
456
457static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
458{
459 struct bnxt_re_fence_data *fence = &pd->fence;
460 struct ib_mr *ib_mr = &fence->mr->ib_mr;
461 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
462
463 memset(wqe, 0, sizeof(*wqe));
464 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
465 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
466 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
467 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
468 wqe->bind.zero_based = false;
469 wqe->bind.parent_l_key = ib_mr->lkey;
470 wqe->bind.va = (u64)(unsigned long)fence->va;
471 wqe->bind.length = fence->size;
472 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
473 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
474
475 /* Save the initial rkey in fence structure for now;
476 * wqe->bind.r_key will be set at (re)bind time.
477 */
478 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
479}
480
481static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
482{
483 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
484 qplib_qp);
485 struct ib_pd *ib_pd = qp->ib_qp.pd;
486 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
487 struct bnxt_re_fence_data *fence = &pd->fence;
488 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
489 struct bnxt_qplib_swqe wqe;
490 int rc;
491
492 memcpy(&wqe, fence_wqe, sizeof(wqe));
493 wqe.bind.r_key = fence->bind_rkey;
494 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
495
496 dev_dbg(rdev_to_dev(qp->rdev),
497 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
498 wqe.bind.r_key, qp->qplib_qp.id, pd);
499 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
500 if (rc) {
501 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
502 return rc;
503 }
504 bnxt_qplib_post_send_db(&qp->qplib_qp);
505
506 return rc;
507}
508
509static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
510{
511 struct bnxt_re_fence_data *fence = &pd->fence;
512 struct bnxt_re_dev *rdev = pd->rdev;
513 struct device *dev = &rdev->en_dev->pdev->dev;
514 struct bnxt_re_mr *mr = fence->mr;
515
516 if (fence->mw) {
517 bnxt_re_dealloc_mw(fence->mw);
518 fence->mw = NULL;
519 }
520 if (mr) {
521 if (mr->ib_mr.rkey)
522 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
523 true);
524 if (mr->ib_mr.lkey)
525 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
526 kfree(mr);
527 fence->mr = NULL;
528 }
529 if (fence->dma_addr) {
530 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
531 DMA_BIDIRECTIONAL);
532 fence->dma_addr = 0;
533 }
534}
535
536static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
537{
538 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
539 struct bnxt_re_fence_data *fence = &pd->fence;
540 struct bnxt_re_dev *rdev = pd->rdev;
541 struct device *dev = &rdev->en_dev->pdev->dev;
542 struct bnxt_re_mr *mr = NULL;
543 dma_addr_t dma_addr = 0;
544 struct ib_mw *mw;
545 u64 pbl_tbl;
546 int rc;
547
548 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
549 DMA_BIDIRECTIONAL);
550 rc = dma_mapping_error(dev, dma_addr);
551 if (rc) {
552 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
553 rc = -EIO;
554 fence->dma_addr = 0;
555 goto fail;
556 }
557 fence->dma_addr = dma_addr;
558
559 /* Allocate a MR */
560 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
561 if (!mr) {
562 rc = -ENOMEM;
563 goto fail;
564 }
565 fence->mr = mr;
566 mr->rdev = rdev;
567 mr->qplib_mr.pd = &pd->qplib_pd;
568 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
569 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
570 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
571 if (rc) {
572 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
573 goto fail;
574 }
575
576 /* Register MR */
577 mr->ib_mr.lkey = mr->qplib_mr.lkey;
578 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
579 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
580 pbl_tbl = dma_addr;
581 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
582 BNXT_RE_FENCE_PBL_SIZE, false);
583 if (rc) {
584 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
585 goto fail;
586 }
587 mr->ib_mr.rkey = mr->qplib_mr.rkey;
588
589 /* Create a fence MW only for kernel consumers */
590 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
591 if (!mw) {
592 dev_err(rdev_to_dev(rdev),
593 "Failed to create fence-MW for PD: %p\n", pd);
594 rc = -EINVAL;
595 goto fail;
596 }
597 fence->mw = mw;
598
599 bnxt_re_create_fence_wqe(pd);
600 return 0;
601
602fail:
603 bnxt_re_destroy_fence_mr(pd);
604 return rc;
605}
606
413/* Protection Domains */ 607/* Protection Domains */
414int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) 608int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
415{ 609{
@@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
417 struct bnxt_re_dev *rdev = pd->rdev; 611 struct bnxt_re_dev *rdev = pd->rdev;
418 int rc; 612 int rc;
419 613
614 bnxt_re_destroy_fence_mr(pd);
420 if (ib_pd->uobject && pd->dpi.dbr) { 615 if (ib_pd->uobject && pd->dpi.dbr) {
421 struct ib_ucontext *ib_uctx = ib_pd->uobject->context; 616 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
422 struct bnxt_re_ucontext *ucntx; 617 struct bnxt_re_ucontext *ucntx;
@@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
498 } 693 }
499 } 694 }
500 695
696 if (!udata)
697 if (bnxt_re_create_fence_mr(pd))
698 dev_warn(rdev_to_dev(rdev),
699 "Failed to create Fence-MR\n");
501 return &pd->ib_pd; 700 return &pd->ib_pd;
502dbfail: 701dbfail:
503 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, 702 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
849 /* Shadow QP SQ depth should be same as QP1 RQ depth */ 1048 /* Shadow QP SQ depth should be same as QP1 RQ depth */
850 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; 1049 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
851 qp->qplib_qp.sq.max_sge = 2; 1050 qp->qplib_qp.sq.max_sge = 2;
1051 /* Q full delta can be 1 since it is internal QP */
1052 qp->qplib_qp.sq.q_full_delta = 1;
852 1053
853 qp->qplib_qp.scq = qp1_qp->scq; 1054 qp->qplib_qp.scq = qp1_qp->scq;
854 qp->qplib_qp.rcq = qp1_qp->rcq; 1055 qp->qplib_qp.rcq = qp1_qp->rcq;
855 1056
856 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; 1057 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
857 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; 1058 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1059 /* Q full delta can be 1 since it is internal QP */
1060 qp->qplib_qp.rq.q_full_delta = 1;
858 1061
859 qp->qplib_qp.mtu = qp1_qp->mtu; 1062 qp->qplib_qp.mtu = qp1_qp->mtu;
860 1063
@@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
917 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == 1120 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
918 IB_SIGNAL_ALL_WR) ? true : false); 1121 IB_SIGNAL_ALL_WR) ? true : false);
919 1122
920 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
921 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
922 dev_attr->max_qp_wqes + 1);
923
924 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; 1123 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
925 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) 1124 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
926 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; 1125 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
@@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
959 qp->qplib_qp.rq.max_wqe = min_t(u32, entries, 1158 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
960 dev_attr->max_qp_wqes + 1); 1159 dev_attr->max_qp_wqes + 1);
961 1160
1161 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1162 qp_init_attr->cap.max_recv_wr;
1163
962 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; 1164 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
963 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) 1165 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
964 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; 1166 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
967 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 1169 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
968 1170
969 if (qp_init_attr->qp_type == IB_QPT_GSI) { 1171 if (qp_init_attr->qp_type == IB_QPT_GSI) {
1172 /* Allocate 1 more than what's provided */
1173 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1174 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1175 dev_attr->max_qp_wqes + 1);
1176 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1177 qp_init_attr->cap.max_send_wr;
970 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; 1178 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
971 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) 1179 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
972 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; 1180 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1006 } 1214 }
1007 1215
1008 } else { 1216 } else {
1217 /* Allocate 128 + 1 more than what's provided */
1218 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1219 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1220 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1221 dev_attr->max_qp_wqes +
1222 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1223 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1224
1225 /*
1226 * Reserving one slot for Phantom WQE. Application can
1227 * post one extra entry in this case. But allowing this to avoid
1228 * unexpected Queue full condition
1229 */
1230
1231 qp->qplib_qp.sq.q_full_delta -= 1;
1232
1009 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; 1233 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1010 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; 1234 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1011 if (udata) { 1235 if (udata) {
@@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1025 1249
1026 qp->ib_qp.qp_num = qp->qplib_qp.id; 1250 qp->ib_qp.qp_num = qp->qplib_qp.id;
1027 spin_lock_init(&qp->sq_lock); 1251 spin_lock_init(&qp->sq_lock);
1252 spin_lock_init(&qp->rq_lock);
1028 1253
1029 if (udata) { 1254 if (udata) {
1030 struct bnxt_re_qp_resp resp; 1255 struct bnxt_re_qp_resp resp;
@@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
1129 } 1354 }
1130} 1355}
1131 1356
1132static int __from_ib_access_flags(int iflags)
1133{
1134 int qflags = 0;
1135
1136 if (iflags & IB_ACCESS_LOCAL_WRITE)
1137 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1138 if (iflags & IB_ACCESS_REMOTE_READ)
1139 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
1140 if (iflags & IB_ACCESS_REMOTE_WRITE)
1141 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
1142 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
1143 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
1144 if (iflags & IB_ACCESS_MW_BIND)
1145 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
1146 if (iflags & IB_ZERO_BASED)
1147 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
1148 if (iflags & IB_ACCESS_ON_DEMAND)
1149 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
1150 return qflags;
1151};
1152
1153static enum ib_access_flags __to_ib_access_flags(int qflags)
1154{
1155 enum ib_access_flags iflags = 0;
1156
1157 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
1158 iflags |= IB_ACCESS_LOCAL_WRITE;
1159 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
1160 iflags |= IB_ACCESS_REMOTE_WRITE;
1161 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
1162 iflags |= IB_ACCESS_REMOTE_READ;
1163 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
1164 iflags |= IB_ACCESS_REMOTE_ATOMIC;
1165 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
1166 iflags |= IB_ACCESS_MW_BIND;
1167 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
1168 iflags |= IB_ZERO_BASED;
1169 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
1170 iflags |= IB_ACCESS_ON_DEMAND;
1171 return iflags;
1172};
1173
1174static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, 1357static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1175 struct bnxt_re_qp *qp1_qp, 1358 struct bnxt_re_qp *qp1_qp,
1176 int qp_attr_mask) 1359 int qp_attr_mask)
@@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1378 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); 1561 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1379 qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 1562 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1380 dev_attr->max_qp_wqes + 1); 1563 dev_attr->max_qp_wqes + 1);
1564 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1565 qp_attr->cap.max_send_wr;
1566 /*
1567 * Reserving one slot for Phantom WQE. Some application can
1568 * post one extra entry in this case. Allowing this to avoid
1569 * unexpected Queue full condition
1570 */
1571 qp->qplib_qp.sq.q_full_delta -= 1;
1381 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; 1572 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1382 if (qp->qplib_qp.rq.max_wqe) { 1573 if (qp->qplib_qp.rq.max_wqe) {
1383 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); 1574 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1384 qp->qplib_qp.rq.max_wqe = 1575 qp->qplib_qp.rq.max_wqe =
1385 min_t(u32, entries, dev_attr->max_qp_wqes + 1); 1576 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1577 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1578 qp_attr->cap.max_recv_wr;
1386 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; 1579 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1387 } else { 1580 } else {
1388 /* SRQ was used prior, just ignore the RQ caps */ 1581 /* SRQ was used prior, just ignore the RQ caps */
@@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
1883 return payload_sz; 2076 return payload_sz;
1884} 2077}
1885 2078
2079static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2080{
2081 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2082 qp->ib_qp.qp_type == IB_QPT_GSI ||
2083 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2084 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2085 int qp_attr_mask;
2086 struct ib_qp_attr qp_attr;
2087
2088 qp_attr_mask = IB_QP_STATE;
2089 qp_attr.qp_state = IB_QPS_RTS;
2090 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2091 qp->qplib_qp.wqe_cnt = 0;
2092 }
2093}
2094
1886static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, 2095static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
1887 struct bnxt_re_qp *qp, 2096 struct bnxt_re_qp *qp,
1888 struct ib_send_wr *wr) 2097 struct ib_send_wr *wr)
@@ -1928,6 +2137,7 @@ bad:
1928 wr = wr->next; 2137 wr = wr->next;
1929 } 2138 }
1930 bnxt_qplib_post_send_db(&qp->qplib_qp); 2139 bnxt_qplib_post_send_db(&qp->qplib_qp);
2140 bnxt_ud_qp_hw_stall_workaround(qp);
1931 spin_unlock_irqrestore(&qp->sq_lock, flags); 2141 spin_unlock_irqrestore(&qp->sq_lock, flags);
1932 return rc; 2142 return rc;
1933} 2143}
@@ -2024,6 +2234,7 @@ bad:
2024 wr = wr->next; 2234 wr = wr->next;
2025 } 2235 }
2026 bnxt_qplib_post_send_db(&qp->qplib_qp); 2236 bnxt_qplib_post_send_db(&qp->qplib_qp);
2237 bnxt_ud_qp_hw_stall_workaround(qp);
2027 spin_unlock_irqrestore(&qp->sq_lock, flags); 2238 spin_unlock_irqrestore(&qp->sq_lock, flags);
2028 2239
2029 return rc; 2240 return rc;
@@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2071 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2282 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2072 struct bnxt_qplib_swqe wqe; 2283 struct bnxt_qplib_swqe wqe;
2073 int rc = 0, payload_sz = 0; 2284 int rc = 0, payload_sz = 0;
2285 unsigned long flags;
2286 u32 count = 0;
2074 2287
2288 spin_lock_irqsave(&qp->rq_lock, flags);
2075 while (wr) { 2289 while (wr) {
2076 /* House keeping */ 2290 /* House keeping */
2077 memset(&wqe, 0, sizeof(wqe)); 2291 memset(&wqe, 0, sizeof(wqe));
@@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2100 *bad_wr = wr; 2314 *bad_wr = wr;
2101 break; 2315 break;
2102 } 2316 }
2317
2318 /* Ring DB if the RQEs posted reaches a threshold value */
2319 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2320 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2321 count = 0;
2322 }
2323
2103 wr = wr->next; 2324 wr = wr->next;
2104 } 2325 }
2105 bnxt_qplib_post_recv_db(&qp->qplib_qp); 2326
2327 if (count)
2328 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2329
2330 spin_unlock_irqrestore(&qp->rq_lock, flags);
2331
2106 return rc; 2332 return rc;
2107} 2333}
2108 2334
@@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2643 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2869 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2644} 2870}
2645 2871
2872static int send_phantom_wqe(struct bnxt_re_qp *qp)
2873{
2874 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2875 unsigned long flags;
2876 int rc = 0;
2877
2878 spin_lock_irqsave(&qp->sq_lock, flags);
2879
2880 rc = bnxt_re_bind_fence_mw(lib_qp);
2881 if (!rc) {
2882 lib_qp->sq.phantom_wqe_cnt++;
2883 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2884 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2885 lib_qp->id, lib_qp->sq.hwq.prod,
2886 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2887 lib_qp->sq.phantom_wqe_cnt);
2888 }
2889
2890 spin_unlock_irqrestore(&qp->sq_lock, flags);
2891 return rc;
2892}
2893
2646int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) 2894int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2647{ 2895{
2648 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 2896 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2649 struct bnxt_re_qp *qp; 2897 struct bnxt_re_qp *qp;
2650 struct bnxt_qplib_cqe *cqe; 2898 struct bnxt_qplib_cqe *cqe;
2651 int i, ncqe, budget; 2899 int i, ncqe, budget;
2900 struct bnxt_qplib_q *sq;
2901 struct bnxt_qplib_qp *lib_qp;
2652 u32 tbl_idx; 2902 u32 tbl_idx;
2653 struct bnxt_re_sqp_entries *sqp_entry = NULL; 2903 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2654 unsigned long flags; 2904 unsigned long flags;
@@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2661 } 2911 }
2662 cqe = &cq->cql[0]; 2912 cqe = &cq->cql[0];
2663 while (budget) { 2913 while (budget) {
2664 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); 2914 lib_qp = NULL;
2915 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2916 if (lib_qp) {
2917 sq = &lib_qp->sq;
2918 if (sq->send_phantom) {
2919 qp = container_of(lib_qp,
2920 struct bnxt_re_qp, qplib_qp);
2921 if (send_phantom_wqe(qp) == -ENOMEM)
2922 dev_err(rdev_to_dev(cq->rdev),
2923 "Phantom failed! Scheduled to send again\n");
2924 else
2925 sq->send_phantom = false;
2926 }
2927 }
2928
2665 if (!ncqe) 2929 if (!ncqe)
2666 break; 2930 break;
2667 2931
@@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
2822 struct bnxt_re_dev *rdev = mr->rdev; 3086 struct bnxt_re_dev *rdev = mr->rdev;
2823 int rc; 3087 int rc;
2824 3088
3089 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3090 if (rc) {
3091 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3092 return rc;
3093 }
3094
2825 if (mr->npages && mr->pages) { 3095 if (mr->npages && mr->pages) {
2826 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3096 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
2827 &mr->qplib_frpl); 3097 &mr->qplib_frpl);
@@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
2829 mr->npages = 0; 3099 mr->npages = 0;
2830 mr->pages = NULL; 3100 mr->pages = NULL;
2831 } 3101 }
2832 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
2833
2834 if (!IS_ERR_OR_NULL(mr->ib_umem)) 3102 if (!IS_ERR_OR_NULL(mr->ib_umem))
2835 ib_umem_release(mr->ib_umem); 3103 ib_umem_release(mr->ib_umem);
2836 3104
@@ -2914,97 +3182,52 @@ fail:
2914 return ERR_PTR(rc); 3182 return ERR_PTR(rc);
2915} 3183}
2916 3184
2917/* Fast Memory Regions */ 3185struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
2918struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, 3186 struct ib_udata *udata)
2919 struct ib_fmr_attr *fmr_attr)
2920{ 3187{
2921 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3188 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2922 struct bnxt_re_dev *rdev = pd->rdev; 3189 struct bnxt_re_dev *rdev = pd->rdev;
2923 struct bnxt_re_fmr *fmr; 3190 struct bnxt_re_mw *mw;
2924 int rc; 3191 int rc;
2925 3192
2926 if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || 3193 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
2927 fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { 3194 if (!mw)
2928 dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
2929 return ERR_PTR(-ENOMEM); 3195 return ERR_PTR(-ENOMEM);
2930 } 3196 mw->rdev = rdev;
2931 fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); 3197 mw->qplib_mw.pd = &pd->qplib_pd;
2932 if (!fmr)
2933 return ERR_PTR(-ENOMEM);
2934
2935 fmr->rdev = rdev;
2936 fmr->qplib_fmr.pd = &pd->qplib_pd;
2937 fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2938 3198
2939 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); 3199 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
2940 if (rc) 3200 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3201 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3202 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3203 if (rc) {
3204 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
2941 goto fail; 3205 goto fail;
3206 }
3207 mw->ib_mw.rkey = mw->qplib_mw.rkey;
2942 3208
2943 fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); 3209 atomic_inc(&rdev->mw_count);
2944 fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; 3210 return &mw->ib_mw;
2945 fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
2946 3211
2947 atomic_inc(&rdev->mr_count);
2948 return &fmr->ib_fmr;
2949fail: 3212fail:
2950 kfree(fmr); 3213 kfree(mw);
2951 return ERR_PTR(rc); 3214 return ERR_PTR(rc);
2952} 3215}
2953 3216
2954int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, 3217int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
2955 u64 iova)
2956{ 3218{
2957 struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, 3219 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
2958 ib_fmr); 3220 struct bnxt_re_dev *rdev = mw->rdev;
2959 struct bnxt_re_dev *rdev = fmr->rdev;
2960 int rc; 3221 int rc;
2961 3222
2962 fmr->qplib_fmr.va = iova; 3223 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
2963 fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; 3224 if (rc) {
2964 3225 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
2965 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, 3226 return rc;
2966 list_len, true);
2967 if (rc)
2968 dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
2969 fmr->ib_fmr.lkey);
2970 return rc;
2971}
2972
2973int bnxt_re_unmap_fmr(struct list_head *fmr_list)
2974{
2975 struct bnxt_re_dev *rdev;
2976 struct bnxt_re_fmr *fmr;
2977 struct ib_fmr *ib_fmr;
2978 int rc = 0;
2979
2980 /* Validate each FMRs inside the fmr_list */
2981 list_for_each_entry(ib_fmr, fmr_list, list) {
2982 fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
2983 rdev = fmr->rdev;
2984
2985 if (rdev) {
2986 rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
2987 &fmr->qplib_fmr, true);
2988 if (rc)
2989 break;
2990 }
2991 } 3227 }
2992 return rc;
2993}
2994
2995int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
2996{
2997 struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
2998 ib_fmr);
2999 struct bnxt_re_dev *rdev = fmr->rdev;
3000 int rc;
3001 3228
3002 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); 3229 kfree(mw);
3003 if (rc) 3230 atomic_dec(&rdev->mw_count);
3004 dev_err(rdev_to_dev(rdev), "Failed to free FMR");
3005
3006 kfree(fmr);
3007 atomic_dec(&rdev->mr_count);
3008 return rc; 3231 return rc;
3009} 3232}
3010 3233
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 5c3d71765454..6c160f6a5398 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
44 u32 refcnt; 44 u32 refcnt;
45}; 45};
46 46
47#define BNXT_RE_FENCE_BYTES 64
48struct bnxt_re_fence_data {
49 u32 size;
50 u8 va[BNXT_RE_FENCE_BYTES];
51 dma_addr_t dma_addr;
52 struct bnxt_re_mr *mr;
53 struct ib_mw *mw;
54 struct bnxt_qplib_swqe bind_wqe;
55 u32 bind_rkey;
56};
57
47struct bnxt_re_pd { 58struct bnxt_re_pd {
48 struct bnxt_re_dev *rdev; 59 struct bnxt_re_dev *rdev;
49 struct ib_pd ib_pd; 60 struct ib_pd ib_pd;
50 struct bnxt_qplib_pd qplib_pd; 61 struct bnxt_qplib_pd qplib_pd;
51 struct bnxt_qplib_dpi dpi; 62 struct bnxt_qplib_dpi dpi;
63 struct bnxt_re_fence_data fence;
52}; 64};
53 65
54struct bnxt_re_ah { 66struct bnxt_re_ah {
@@ -62,6 +74,7 @@ struct bnxt_re_qp {
62 struct bnxt_re_dev *rdev; 74 struct bnxt_re_dev *rdev;
63 struct ib_qp ib_qp; 75 struct ib_qp ib_qp;
64 spinlock_t sq_lock; /* protect sq */ 76 spinlock_t sq_lock; /* protect sq */
77 spinlock_t rq_lock; /* protect rq */
65 struct bnxt_qplib_qp qplib_qp; 78 struct bnxt_qplib_qp qplib_qp;
66 struct ib_umem *sumem; 79 struct ib_umem *sumem;
67 struct ib_umem *rumem; 80 struct ib_umem *rumem;
@@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
181struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, 194struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
182 u32 max_num_sg); 195 u32 max_num_sg);
183int bnxt_re_dereg_mr(struct ib_mr *mr); 196int bnxt_re_dereg_mr(struct ib_mr *mr);
184struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 197struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
185 struct ib_fmr_attr *fmr_attr); 198 struct ib_udata *udata);
186int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, 199int bnxt_re_dealloc_mw(struct ib_mw *mw);
187 u64 iova);
188int bnxt_re_unmap_fmr(struct list_head *fmr_list);
189int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
190struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 200struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
191 u64 virt_addr, int mr_access_flags, 201 u64 virt_addr, int mr_access_flags,
192 struct ib_udata *udata); 202 struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 5d355401179b..1fce5e73216b 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
507 ibdev->dereg_mr = bnxt_re_dereg_mr; 507 ibdev->dereg_mr = bnxt_re_dereg_mr;
508 ibdev->alloc_mr = bnxt_re_alloc_mr; 508 ibdev->alloc_mr = bnxt_re_alloc_mr;
509 ibdev->map_mr_sg = bnxt_re_map_mr_sg; 509 ibdev->map_mr_sg = bnxt_re_map_mr_sg;
510 ibdev->alloc_fmr = bnxt_re_alloc_fmr;
511 ibdev->map_phys_fmr = bnxt_re_map_phys_fmr;
512 ibdev->unmap_fmr = bnxt_re_unmap_fmr;
513 ibdev->dealloc_fmr = bnxt_re_dealloc_fmr;
514 510
515 ibdev->reg_user_mr = bnxt_re_reg_user_mr; 511 ibdev->reg_user_mr = bnxt_re_reg_user_mr;
516 ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; 512 ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 43d08b5e9085..f05500bcdcf1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
284{ 284{
285 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 285 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
286 struct cmdq_create_qp1 req; 286 struct cmdq_create_qp1 req;
287 struct creq_create_qp1_resp *resp; 287 struct creq_create_qp1_resp resp;
288 struct bnxt_qplib_pbl *pbl; 288 struct bnxt_qplib_pbl *pbl;
289 struct bnxt_qplib_q *sq = &qp->sq; 289 struct bnxt_qplib_q *sq = &qp->sq;
290 struct bnxt_qplib_q *rq = &qp->rq; 290 struct bnxt_qplib_q *rq = &qp->rq;
@@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
394 394
395 req.pd_id = cpu_to_le32(qp->pd->id); 395 req.pd_id = cpu_to_le32(qp->pd->id);
396 396
397 resp = (struct creq_create_qp1_resp *) 397 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
398 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 398 (void *)&resp, NULL, 0);
399 NULL, 0); 399 if (rc)
400 if (!resp) {
401 dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed");
402 rc = -EINVAL;
403 goto fail;
404 }
405 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
406 /* Cmd timed out */
407 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out");
408 rc = -ETIMEDOUT;
409 goto fail;
410 }
411 if (resp->status ||
412 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
413 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed ");
414 dev_err(&rcfw->pdev->dev,
415 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
416 resp->status, le16_to_cpu(req.cookie),
417 le16_to_cpu(resp->cookie));
418 rc = -EINVAL;
419 goto fail; 400 goto fail;
420 } 401
421 qp->id = le32_to_cpu(resp->xid); 402 qp->id = le32_to_cpu(resp.xid);
422 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 403 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
423 sq->flush_in_progress = false; 404 sq->flush_in_progress = false;
424 rq->flush_in_progress = false; 405 rq->flush_in_progress = false;
@@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
442 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 423 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
443 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; 424 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
444 struct cmdq_create_qp req; 425 struct cmdq_create_qp req;
445 struct creq_create_qp_resp *resp; 426 struct creq_create_qp_resp resp;
446 struct bnxt_qplib_pbl *pbl; 427 struct bnxt_qplib_pbl *pbl;
447 struct sq_psn_search **psn_search_ptr; 428 struct sq_psn_search **psn_search_ptr;
448 unsigned long int psn_search, poff = 0; 429 unsigned long int psn_search, poff = 0;
@@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
627 } 608 }
628 req.pd_id = cpu_to_le32(qp->pd->id); 609 req.pd_id = cpu_to_le32(qp->pd->id);
629 610
630 resp = (struct creq_create_qp_resp *) 611 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
631 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 612 (void *)&resp, NULL, 0);
632 NULL, 0); 613 if (rc)
633 if (!resp) {
634 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed");
635 rc = -EINVAL;
636 goto fail;
637 }
638 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
639 /* Cmd timed out */
640 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out");
641 rc = -ETIMEDOUT;
642 goto fail;
643 }
644 if (resp->status ||
645 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
646 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed ");
647 dev_err(&rcfw->pdev->dev,
648 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
649 resp->status, le16_to_cpu(req.cookie),
650 le16_to_cpu(resp->cookie));
651 rc = -EINVAL;
652 goto fail; 614 goto fail;
653 } 615
654 qp->id = le32_to_cpu(resp->xid); 616 qp->id = le32_to_cpu(resp.xid);
655 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 617 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
656 sq->flush_in_progress = false; 618 sq->flush_in_progress = false;
657 rq->flush_in_progress = false; 619 rq->flush_in_progress = false;
@@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
769{ 731{
770 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 732 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
771 struct cmdq_modify_qp req; 733 struct cmdq_modify_qp req;
772 struct creq_modify_qp_resp *resp; 734 struct creq_modify_qp_resp resp;
773 u16 cmd_flags = 0, pkey; 735 u16 cmd_flags = 0, pkey;
774 u32 temp32[4]; 736 u32 temp32[4];
775 u32 bmask; 737 u32 bmask;
738 int rc;
776 739
777 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); 740 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
778 741
@@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
862 825
863 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); 826 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
864 827
865 resp = (struct creq_modify_qp_resp *) 828 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
866 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 829 (void *)&resp, NULL, 0);
867 NULL, 0); 830 if (rc)
868 if (!resp) { 831 return rc;
869 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed");
870 return -EINVAL;
871 }
872 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
873 /* Cmd timed out */
874 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out");
875 return -ETIMEDOUT;
876 }
877 if (resp->status ||
878 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
879 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed ");
880 dev_err(&rcfw->pdev->dev,
881 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
882 resp->status, le16_to_cpu(req.cookie),
883 le16_to_cpu(resp->cookie));
884 return -EINVAL;
885 }
886 qp->cur_qp_state = qp->state; 832 qp->cur_qp_state = qp->state;
887 return 0; 833 return 0;
888} 834}
@@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
891{ 837{
892 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 838 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
893 struct cmdq_query_qp req; 839 struct cmdq_query_qp req;
894 struct creq_query_qp_resp *resp; 840 struct creq_query_qp_resp resp;
841 struct bnxt_qplib_rcfw_sbuf *sbuf;
895 struct creq_query_qp_resp_sb *sb; 842 struct creq_query_qp_resp_sb *sb;
896 u16 cmd_flags = 0; 843 u16 cmd_flags = 0;
897 u32 temp32[4]; 844 u32 temp32[4];
898 int i; 845 int i, rc = 0;
899 846
900 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); 847 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
901 848
849 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
850 if (!sbuf)
851 return -ENOMEM;
852 sb = sbuf->sb;
853
902 req.qp_cid = cpu_to_le32(qp->id); 854 req.qp_cid = cpu_to_le32(qp->id);
903 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 855 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
904 resp = (struct creq_query_qp_resp *) 856 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
905 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 857 (void *)sbuf, 0);
906 (void **)&sb, 0); 858 if (rc)
907 if (!resp) { 859 goto bail;
908 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed");
909 return -EINVAL;
910 }
911 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
912 /* Cmd timed out */
913 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out");
914 return -ETIMEDOUT;
915 }
916 if (resp->status ||
917 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
918 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed ");
919 dev_err(&rcfw->pdev->dev,
920 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
921 resp->status, le16_to_cpu(req.cookie),
922 le16_to_cpu(resp->cookie));
923 return -EINVAL;
924 }
925 /* Extract the context from the side buffer */ 860 /* Extract the context from the side buffer */
926 qp->state = sb->en_sqd_async_notify_state & 861 qp->state = sb->en_sqd_async_notify_state &
927 CREQ_QUERY_QP_RESP_SB_STATE_MASK; 862 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
@@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
976 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); 911 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
977 memcpy(qp->smac, sb->src_mac, 6); 912 memcpy(qp->smac, sb->src_mac, 6);
978 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); 913 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
979 return 0; 914bail:
915 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
916 return rc;
980} 917}
981 918
982static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) 919static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
@@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1021{ 958{
1022 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 959 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1023 struct cmdq_destroy_qp req; 960 struct cmdq_destroy_qp req;
1024 struct creq_destroy_qp_resp *resp; 961 struct creq_destroy_qp_resp resp;
1025 unsigned long flags; 962 unsigned long flags;
1026 u16 cmd_flags = 0; 963 u16 cmd_flags = 0;
964 int rc;
1027 965
1028 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); 966 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1029 967
1030 req.qp_cid = cpu_to_le32(qp->id); 968 req.qp_cid = cpu_to_le32(qp->id);
1031 resp = (struct creq_destroy_qp_resp *) 969 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1032 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 970 (void *)&resp, NULL, 0);
1033 NULL, 0); 971 if (rc)
1034 if (!resp) { 972 return rc;
1035 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed");
1036 return -EINVAL;
1037 }
1038 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
1039 /* Cmd timed out */
1040 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out");
1041 return -ETIMEDOUT;
1042 }
1043 if (resp->status ||
1044 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
1045 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed ");
1046 dev_err(&rcfw->pdev->dev,
1047 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
1048 resp->status, le16_to_cpu(req.cookie),
1049 le16_to_cpu(resp->cookie));
1050 return -EINVAL;
1051 }
1052 973
1053 /* Must walk the associated CQs to nullified the QP ptr */ 974 /* Must walk the associated CQs to nullified the QP ptr */
1054 spin_lock_irqsave(&qp->scq->hwq.lock, flags); 975 spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1162 rc = -EINVAL; 1083 rc = -EINVAL;
1163 goto done; 1084 goto done;
1164 } 1085 }
1165 if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == 1086
1166 HWQ_CMP(sq->hwq.cons, &sq->hwq)) { 1087 if (bnxt_qplib_queue_full(sq)) {
1088 dev_err(&sq->hwq.pdev->dev,
1089 "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1090 sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1091 sq->q_full_delta);
1167 rc = -ENOMEM; 1092 rc = -ENOMEM;
1168 goto done; 1093 goto done;
1169 } 1094 }
@@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1373 } 1298 }
1374 1299
1375 sq->hwq.prod++; 1300 sq->hwq.prod++;
1301
1302 qp->wqe_cnt++;
1303
1376done: 1304done:
1377 return rc; 1305 return rc;
1378} 1306}
@@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1411 rc = -EINVAL; 1339 rc = -EINVAL;
1412 goto done; 1340 goto done;
1413 } 1341 }
1414 if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == 1342 if (bnxt_qplib_queue_full(rq)) {
1415 HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
1416 dev_err(&rq->hwq.pdev->dev, 1343 dev_err(&rq->hwq.pdev->dev,
1417 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); 1344 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1418 rc = -EINVAL; 1345 rc = -EINVAL;
@@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1483{ 1410{
1484 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1411 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1485 struct cmdq_create_cq req; 1412 struct cmdq_create_cq req;
1486 struct creq_create_cq_resp *resp; 1413 struct creq_create_cq_resp resp;
1487 struct bnxt_qplib_pbl *pbl; 1414 struct bnxt_qplib_pbl *pbl;
1488 u16 cmd_flags = 0; 1415 u16 cmd_flags = 0;
1489 int rc; 1416 int rc;
@@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1525 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << 1452 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1526 CMDQ_CREATE_CQ_CNQ_ID_SFT); 1453 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1527 1454
1528 resp = (struct creq_create_cq_resp *) 1455 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1529 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1456 (void *)&resp, NULL, 0);
1530 NULL, 0); 1457 if (rc)
1531 if (!resp) {
1532 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed");
1533 return -EINVAL;
1534 }
1535 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
1536 /* Cmd timed out */
1537 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out");
1538 rc = -ETIMEDOUT;
1539 goto fail;
1540 }
1541 if (resp->status ||
1542 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
1543 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed ");
1544 dev_err(&rcfw->pdev->dev,
1545 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
1546 resp->status, le16_to_cpu(req.cookie),
1547 le16_to_cpu(resp->cookie));
1548 rc = -EINVAL;
1549 goto fail; 1458 goto fail;
1550 } 1459
1551 cq->id = le32_to_cpu(resp->xid); 1460 cq->id = le32_to_cpu(resp.xid);
1552 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; 1461 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1553 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; 1462 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1554 init_waitqueue_head(&cq->waitq); 1463 init_waitqueue_head(&cq->waitq);
@@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1566{ 1475{
1567 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1476 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1568 struct cmdq_destroy_cq req; 1477 struct cmdq_destroy_cq req;
1569 struct creq_destroy_cq_resp *resp; 1478 struct creq_destroy_cq_resp resp;
1570 u16 cmd_flags = 0; 1479 u16 cmd_flags = 0;
1480 int rc;
1571 1481
1572 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); 1482 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1573 1483
1574 req.cq_cid = cpu_to_le32(cq->id); 1484 req.cq_cid = cpu_to_le32(cq->id);
1575 resp = (struct creq_destroy_cq_resp *) 1485 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1576 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1486 (void *)&resp, NULL, 0);
1577 NULL, 0); 1487 if (rc)
1578 if (!resp) { 1488 return rc;
1579 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed");
1580 return -EINVAL;
1581 }
1582 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
1583 /* Cmd timed out */
1584 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out");
1585 return -ETIMEDOUT;
1586 }
1587 if (resp->status ||
1588 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
1589 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed ");
1590 dev_err(&rcfw->pdev->dev,
1591 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
1592 resp->status, le16_to_cpu(req.cookie),
1593 le16_to_cpu(resp->cookie));
1594 return -EINVAL;
1595 }
1596 bnxt_qplib_free_hwq(res->pdev, &cq->hwq); 1489 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1597 return 0; 1490 return 0;
1598} 1491}
@@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
1664 return rc; 1557 return rc;
1665} 1558}
1666 1559
1560/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
1561 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
1562 */
1563static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
1564 u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
1565{
1566 struct bnxt_qplib_q *sq = &qp->sq;
1567 struct bnxt_qplib_swq *swq;
1568 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
1569 struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
1570 struct cq_req *peek_req_hwcqe;
1571 struct bnxt_qplib_qp *peek_qp;
1572 struct bnxt_qplib_q *peek_sq;
1573 int i, rc = 0;
1574
1575 /* Normal mode */
1576 /* Check for the psn_search marking before completing */
1577 swq = &sq->swq[sw_sq_cons];
1578 if (swq->psn_search &&
1579 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
1580 /* Unmark */
1581 swq->psn_search->flags_next_psn = cpu_to_le32
1582 (le32_to_cpu(swq->psn_search->flags_next_psn)
1583 & ~0x80000000);
1584 dev_dbg(&cq->hwq.pdev->dev,
1585 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
1586 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1587 sq->condition = true;
1588 sq->send_phantom = true;
1589
1590 /* TODO: Only ARM if the previous SQE is ARMALL */
1591 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
1592
1593 rc = -EAGAIN;
1594 goto out;
1595 }
1596 if (sq->condition) {
1597 /* Peek at the completions */
1598 peek_raw_cq_cons = cq->hwq.cons;
1599 peek_sw_cq_cons = cq_cons;
1600 i = cq->hwq.max_elements;
1601 while (i--) {
1602 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
1603 peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1604 peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
1605 [CQE_IDX(peek_sw_cq_cons)];
1606 /* If the next hwcqe is VALID */
1607 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
1608 cq->hwq.max_elements)) {
1609 /* If the next hwcqe is a REQ */
1610 if ((peek_hwcqe->cqe_type_toggle &
1611 CQ_BASE_CQE_TYPE_MASK) ==
1612 CQ_BASE_CQE_TYPE_REQ) {
1613 peek_req_hwcqe = (struct cq_req *)
1614 peek_hwcqe;
1615 peek_qp = (struct bnxt_qplib_qp *)
1616 ((unsigned long)
1617 le64_to_cpu
1618 (peek_req_hwcqe->qp_handle));
1619 peek_sq = &peek_qp->sq;
1620 peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
1621 peek_req_hwcqe->sq_cons_idx) - 1
1622 , &sq->hwq);
1623 /* If the hwcqe's sq's wr_id matches */
1624 if (peek_sq == sq &&
1625 sq->swq[peek_sq_cons_idx].wr_id ==
1626 BNXT_QPLIB_FENCE_WRID) {
1627 /*
1628 * Unbreak only if the phantom
1629 * comes back
1630 */
1631 dev_dbg(&cq->hwq.pdev->dev,
1632 "FP:Got Phantom CQE");
1633 sq->condition = false;
1634 sq->single = true;
1635 rc = 0;
1636 goto out;
1637 }
1638 }
1639 /* Valid but not the phantom, so keep looping */
1640 } else {
1641 /* Not valid yet, just exit and wait */
1642 rc = -EINVAL;
1643 goto out;
1644 }
1645 peek_sw_cq_cons++;
1646 peek_raw_cq_cons++;
1647 }
1648 dev_err(&cq->hwq.pdev->dev,
1649 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
1650 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1651 rc = -EINVAL;
1652 }
1653out:
1654 return rc;
1655}
1656
1667static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, 1657static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1668 struct cq_req *hwcqe, 1658 struct cq_req *hwcqe,
1669 struct bnxt_qplib_cqe **pcqe, int *budget) 1659 struct bnxt_qplib_cqe **pcqe, int *budget,
1660 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
1670{ 1661{
1671 struct bnxt_qplib_qp *qp; 1662 struct bnxt_qplib_qp *qp;
1672 struct bnxt_qplib_q *sq; 1663 struct bnxt_qplib_q *sq;
1673 struct bnxt_qplib_cqe *cqe; 1664 struct bnxt_qplib_cqe *cqe;
1674 u32 sw_cons, cqe_cons; 1665 u32 sw_sq_cons, cqe_sq_cons;
1666 struct bnxt_qplib_swq *swq;
1675 int rc = 0; 1667 int rc = 0;
1676 1668
1677 qp = (struct bnxt_qplib_qp *)((unsigned long) 1669 qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1683 } 1675 }
1684 sq = &qp->sq; 1676 sq = &qp->sq;
1685 1677
1686 cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); 1678 cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
1687 if (cqe_cons > sq->hwq.max_elements) { 1679 if (cqe_sq_cons > sq->hwq.max_elements) {
1688 dev_err(&cq->hwq.pdev->dev, 1680 dev_err(&cq->hwq.pdev->dev,
1689 "QPLIB: FP: CQ Process req reported "); 1681 "QPLIB: FP: CQ Process req reported ");
1690 dev_err(&cq->hwq.pdev->dev, 1682 dev_err(&cq->hwq.pdev->dev,
1691 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", 1683 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
1692 cqe_cons, sq->hwq.max_elements); 1684 cqe_sq_cons, sq->hwq.max_elements);
1693 return -EINVAL; 1685 return -EINVAL;
1694 } 1686 }
1695 /* If we were in the middle of flushing the SQ, continue */ 1687 /* If we were in the middle of flushing the SQ, continue */
@@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1698 1690
1699 /* Require to walk the sq's swq to fabricate CQEs for all previously 1691 /* Require to walk the sq's swq to fabricate CQEs for all previously
1700 * signaled SWQEs due to CQE aggregation from the current sq cons 1692 * signaled SWQEs due to CQE aggregation from the current sq cons
1701 * to the cqe_cons 1693 * to the cqe_sq_cons
1702 */ 1694 */
1703 cqe = *pcqe; 1695 cqe = *pcqe;
1704 while (*budget) { 1696 while (*budget) {
1705 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1697 sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1706 if (sw_cons == cqe_cons) 1698 if (sw_sq_cons == cqe_sq_cons)
1699 /* Done */
1707 break; 1700 break;
1701
1702 swq = &sq->swq[sw_sq_cons];
1708 memset(cqe, 0, sizeof(*cqe)); 1703 memset(cqe, 0, sizeof(*cqe));
1709 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 1704 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1710 cqe->qp_handle = (u64)(unsigned long)qp; 1705 cqe->qp_handle = (u64)(unsigned long)qp;
1711 cqe->src_qp = qp->id; 1706 cqe->src_qp = qp->id;
1712 cqe->wr_id = sq->swq[sw_cons].wr_id; 1707 cqe->wr_id = swq->wr_id;
1713 cqe->type = sq->swq[sw_cons].type; 1708 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
1709 goto skip;
1710 cqe->type = swq->type;
1714 1711
1715 /* For the last CQE, check for status. For errors, regardless 1712 /* For the last CQE, check for status. For errors, regardless
1716 * of the request being signaled or not, it must complete with 1713 * of the request being signaled or not, it must complete with
1717 * the hwcqe error status 1714 * the hwcqe error status
1718 */ 1715 */
1719 if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && 1716 if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
1720 hwcqe->status != CQ_REQ_STATUS_OK) { 1717 hwcqe->status != CQ_REQ_STATUS_OK) {
1721 cqe->status = hwcqe->status; 1718 cqe->status = hwcqe->status;
1722 dev_err(&cq->hwq.pdev->dev, 1719 dev_err(&cq->hwq.pdev->dev,
1723 "QPLIB: FP: CQ Processed Req "); 1720 "QPLIB: FP: CQ Processed Req ");
1724 dev_err(&cq->hwq.pdev->dev, 1721 dev_err(&cq->hwq.pdev->dev,
1725 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", 1722 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
1726 sw_cons, cqe->wr_id, cqe->status); 1723 sw_sq_cons, cqe->wr_id, cqe->status);
1727 cqe++; 1724 cqe++;
1728 (*budget)--; 1725 (*budget)--;
1729 sq->flush_in_progress = true; 1726 sq->flush_in_progress = true;
1730 /* Must block new posting of SQ and RQ */ 1727 /* Must block new posting of SQ and RQ */
1731 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 1728 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
1729 sq->condition = false;
1730 sq->single = false;
1732 } else { 1731 } else {
1733 if (sq->swq[sw_cons].flags & 1732 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
1734 SQ_SEND_FLAGS_SIGNAL_COMP) { 1733 /* Before we complete, do WA 9060 */
1734 if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
1735 cqe_sq_cons)) {
1736 *lib_qp = qp;
1737 goto out;
1738 }
1735 cqe->status = CQ_REQ_STATUS_OK; 1739 cqe->status = CQ_REQ_STATUS_OK;
1736 cqe++; 1740 cqe++;
1737 (*budget)--; 1741 (*budget)--;
1738 } 1742 }
1739 } 1743 }
1744skip:
1740 sq->hwq.cons++; 1745 sq->hwq.cons++;
1746 if (sq->single)
1747 break;
1741 } 1748 }
1749out:
1742 *pcqe = cqe; 1750 *pcqe = cqe;
1743 if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { 1751 if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
1744 /* Out of budget */ 1752 /* Out of budget */
1745 rc = -EAGAIN; 1753 rc = -EAGAIN;
1746 goto done; 1754 goto done;
1747 } 1755 }
1756 /*
1757 * Back to normal completion mode only after it has completed all of
1758 * the WC for this CQE
1759 */
1760 sq->single = false;
1748 if (!sq->flush_in_progress) 1761 if (!sq->flush_in_progress)
1749 goto done; 1762 goto done;
1750flush: 1763flush:
@@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2074} 2087}
2075 2088
2076int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 2089int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2077 int num_cqes) 2090 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2078{ 2091{
2079 struct cq_base *hw_cqe, **hw_cqe_ptr; 2092 struct cq_base *hw_cqe, **hw_cqe_ptr;
2080 unsigned long flags; 2093 unsigned long flags;
@@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2099 case CQ_BASE_CQE_TYPE_REQ: 2112 case CQ_BASE_CQE_TYPE_REQ:
2100 rc = bnxt_qplib_cq_process_req(cq, 2113 rc = bnxt_qplib_cq_process_req(cq,
2101 (struct cq_req *)hw_cqe, 2114 (struct cq_req *)hw_cqe,
2102 &cqe, &budget); 2115 &cqe, &budget,
2116 sw_cons, lib_qp);
2103 break; 2117 break;
2104 case CQ_BASE_CQE_TYPE_RES_RC: 2118 case CQ_BASE_CQE_TYPE_RES_RC:
2105 rc = bnxt_qplib_cq_process_res_rc(cq, 2119 rc = bnxt_qplib_cq_process_res_rc(cq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index f0150f8da1e3..36b7b7db0e3f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
88 88
89struct bnxt_qplib_swqe { 89struct bnxt_qplib_swqe {
90 /* General */ 90 /* General */
91#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
91 u64 wr_id; 92 u64 wr_id;
92 u8 reqs_type; 93 u8 reqs_type;
93 u8 type; 94 u8 type;
@@ -216,9 +217,16 @@ struct bnxt_qplib_q {
216 struct scatterlist *sglist; 217 struct scatterlist *sglist;
217 u32 nmap; 218 u32 nmap;
218 u32 max_wqe; 219 u32 max_wqe;
220 u16 q_full_delta;
219 u16 max_sge; 221 u16 max_sge;
220 u32 psn; 222 u32 psn;
221 bool flush_in_progress; 223 bool flush_in_progress;
224 bool condition;
225 bool single;
226 bool send_phantom;
227 u32 phantom_wqe_cnt;
228 u32 phantom_cqe_cnt;
229 u32 next_cq_cons;
222}; 230};
223 231
224struct bnxt_qplib_qp { 232struct bnxt_qplib_qp {
@@ -242,6 +250,7 @@ struct bnxt_qplib_qp {
242 u8 timeout; 250 u8 timeout;
243 u8 retry_cnt; 251 u8 retry_cnt;
244 u8 rnr_retry; 252 u8 rnr_retry;
253 u64 wqe_cnt;
245 u32 min_rnr_timer; 254 u32 min_rnr_timer;
246 u32 max_rd_atomic; 255 u32 max_rd_atomic;
247 u32 max_dest_rd_atomic; 256 u32 max_dest_rd_atomic;
@@ -301,6 +310,13 @@ struct bnxt_qplib_qp {
301 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ 310 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
302 !((raw_cons) & (cp_bit))) 311 !((raw_cons) & (cp_bit)))
303 312
313static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
314{
315 return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
316 &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
317 &qplib_q->hwq);
318}
319
304struct bnxt_qplib_cqe { 320struct bnxt_qplib_cqe {
305 u8 status; 321 u8 status;
306 u8 type; 322 u8 type;
@@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
432int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 448int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
433int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 449int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
434int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 450int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
435 int num); 451 int num, struct bnxt_qplib_qp **qp);
436void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 452void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
437void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 453void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
438int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); 454int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 23fb7260662b..16e42754dbec 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -39,72 +39,55 @@
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/prefetch.h> 41#include <linux/prefetch.h>
42#include <linux/delay.h>
43
42#include "roce_hsi.h" 44#include "roce_hsi.h"
43#include "qplib_res.h" 45#include "qplib_res.h"
44#include "qplib_rcfw.h" 46#include "qplib_rcfw.h"
45static void bnxt_qplib_service_creq(unsigned long data); 47static void bnxt_qplib_service_creq(unsigned long data);
46 48
47/* Hardware communication channel */ 49/* Hardware communication channel */
48int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 50static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
49{ 51{
50 u16 cbit; 52 u16 cbit;
51 int rc; 53 int rc;
52 54
53 cookie &= RCFW_MAX_COOKIE_VALUE;
54 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 55 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
55 if (!test_bit(cbit, rcfw->cmdq_bitmap))
56 dev_warn(&rcfw->pdev->dev,
57 "QPLIB: CMD bit %d for cookie 0x%x is not set?",
58 cbit, cookie);
59
60 rc = wait_event_timeout(rcfw->waitq, 56 rc = wait_event_timeout(rcfw->waitq,
61 !test_bit(cbit, rcfw->cmdq_bitmap), 57 !test_bit(cbit, rcfw->cmdq_bitmap),
62 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 58 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
63 if (!rc) { 59 return rc ? 0 : -ETIMEDOUT;
64 dev_warn(&rcfw->pdev->dev,
65 "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
66 RCFW_CMD_WAIT_TIME_MS, cookie);
67 }
68
69 return rc;
70}; 60};
71 61
72int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 62static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
73{ 63{
74 u32 count = -1; 64 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
75 u16 cbit; 65 u16 cbit;
76 66
77 cookie &= RCFW_MAX_COOKIE_VALUE;
78 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 67 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
79 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 68 if (!test_bit(cbit, rcfw->cmdq_bitmap))
80 goto done; 69 goto done;
81 do { 70 do {
71 mdelay(1); /* 1m sec */
82 bnxt_qplib_service_creq((unsigned long)rcfw); 72 bnxt_qplib_service_creq((unsigned long)rcfw);
83 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 73 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
84done: 74done:
85 return count; 75 return count ? 0 : -ETIMEDOUT;
86}; 76};
87 77
88void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 78static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
89 struct cmdq_base *req, void **crsbe, 79 struct creq_base *resp, void *sb, u8 is_block)
90 u8 is_block)
91{ 80{
92 struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
93 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 81 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
94 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 82 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
95 struct bnxt_qplib_hwq *crsb = &rcfw->crsb; 83 struct bnxt_qplib_crsq *crsqe;
96 struct bnxt_qplib_crsqe *crsqe = NULL;
97 struct bnxt_qplib_crsbe **crsb_ptr;
98 u32 sw_prod, cmdq_prod; 84 u32 sw_prod, cmdq_prod;
99 u8 retry_cnt = 0xFF;
100 dma_addr_t dma_addr;
101 unsigned long flags; 85 unsigned long flags;
102 u32 size, opcode; 86 u32 size, opcode;
103 u16 cookie, cbit; 87 u16 cookie, cbit;
104 int pg, idx; 88 int pg, idx;
105 u8 *preq; 89 u8 *preq;
106 90
107retry:
108 opcode = req->opcode; 91 opcode = req->opcode;
109 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 92 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
110 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && 93 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
@@ -112,63 +95,50 @@ retry:
112 dev_err(&rcfw->pdev->dev, 95 dev_err(&rcfw->pdev->dev,
113 "QPLIB: RCFW not initialized, reject opcode 0x%x", 96 "QPLIB: RCFW not initialized, reject opcode 0x%x",
114 opcode); 97 opcode);
115 return NULL; 98 return -EINVAL;
116 } 99 }
117 100
118 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 101 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
119 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 102 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
120 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); 103 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
121 return NULL; 104 return -EINVAL;
122 } 105 }
123 106
124 /* Cmdq are in 16-byte units, each request can consume 1 or more 107 /* Cmdq are in 16-byte units, each request can consume 1 or more
125 * cmdqe 108 * cmdqe
126 */ 109 */
127 spin_lock_irqsave(&cmdq->lock, flags); 110 spin_lock_irqsave(&cmdq->lock, flags);
128 if (req->cmd_size > cmdq->max_elements - 111 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
129 ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
130 (cmdq->max_elements - 1))) {
131 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); 112 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
132 spin_unlock_irqrestore(&cmdq->lock, flags); 113 spin_unlock_irqrestore(&cmdq->lock, flags);
133 114 return -EAGAIN;
134 if (!retry_cnt--)
135 return NULL;
136 goto retry;
137 } 115 }
138 116
139 retry_cnt = 0xFF;
140 117
141 cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; 118 cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
142 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 119 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
143 if (is_block) 120 if (is_block)
144 cookie |= RCFW_CMD_IS_BLOCKING; 121 cookie |= RCFW_CMD_IS_BLOCKING;
122
123 set_bit(cbit, rcfw->cmdq_bitmap);
145 req->cookie = cpu_to_le16(cookie); 124 req->cookie = cpu_to_le16(cookie);
146 if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { 125 crsqe = &rcfw->crsqe_tbl[cbit];
147 dev_err(&rcfw->pdev->dev, 126 if (crsqe->resp) {
148 "QPLIB: RCFW MAX outstanding cmd reached!");
149 atomic_dec(&rcfw->seq_num);
150 spin_unlock_irqrestore(&cmdq->lock, flags); 127 spin_unlock_irqrestore(&cmdq->lock, flags);
151 128 return -EBUSY;
152 if (!retry_cnt--)
153 return NULL;
154 goto retry;
155 } 129 }
156 /* Reserve a resp buffer slot if requested */ 130 memset(resp, 0, sizeof(*resp));
157 if (req->resp_size && crsbe) { 131 crsqe->resp = (struct creq_qp_event *)resp;
158 spin_lock(&crsb->lock); 132 crsqe->resp->cookie = req->cookie;
159 sw_prod = HWQ_CMP(crsb->prod, crsb); 133 crsqe->req_size = req->cmd_size;
160 crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; 134 if (req->resp_size && sb) {
161 *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] 135 struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
162 [get_crsb_idx(sw_prod)]; 136
163 bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); 137 req->resp_addr = cpu_to_le64(sbuf->dma_addr);
164 req->resp_addr = cpu_to_le64(dma_addr); 138 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
165 crsb->prod++; 139 BNXT_QPLIB_CMDQE_UNITS;
166 spin_unlock(&crsb->lock);
167
168 req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
169 BNXT_QPLIB_CMDQE_UNITS - 1) /
170 BNXT_QPLIB_CMDQE_UNITS;
171 } 140 }
141
172 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 142 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
173 preq = (u8 *)req; 143 preq = (u8 *)req;
174 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; 144 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
@@ -190,23 +160,24 @@ retry:
190 preq += min_t(u32, size, sizeof(*cmdqe)); 160 preq += min_t(u32, size, sizeof(*cmdqe));
191 size -= min_t(u32, size, sizeof(*cmdqe)); 161 size -= min_t(u32, size, sizeof(*cmdqe));
192 cmdq->prod++; 162 cmdq->prod++;
163 rcfw->seq_num++;
193 } while (size > 0); 164 } while (size > 0);
194 165
166 rcfw->seq_num++;
167
195 cmdq_prod = cmdq->prod; 168 cmdq_prod = cmdq->prod;
196 if (rcfw->flags & FIRMWARE_FIRST_FLAG) { 169 if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
197 /* The very first doorbell write is required to set this flag 170 /* The very first doorbell write
198 * which prompts the FW to reset its internal pointers 171 * is required to set this flag
172 * which prompts the FW to reset
173 * its internal pointers
199 */ 174 */
200 cmdq_prod |= FIRMWARE_FIRST_FLAG; 175 cmdq_prod |= FIRMWARE_FIRST_FLAG;
201 rcfw->flags &= ~FIRMWARE_FIRST_FLAG; 176 rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
202 } 177 }
203 sw_prod = HWQ_CMP(crsq->prod, crsq);
204 crsqe = &crsq->crsq[sw_prod];
205 memset(crsqe, 0, sizeof(*crsqe));
206 crsq->prod++;
207 crsqe->req_size = req->cmd_size;
208 178
209 /* ring CMDQ DB */ 179 /* ring CMDQ DB */
180 wmb();
210 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 181 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
211 rcfw->cmdq_bar_reg_prod_off); 182 rcfw->cmdq_bar_reg_prod_off);
212 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + 183 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
@@ -214,9 +185,56 @@ retry:
214done: 185done:
215 spin_unlock_irqrestore(&cmdq->lock, flags); 186 spin_unlock_irqrestore(&cmdq->lock, flags);
216 /* Return the CREQ response pointer */ 187 /* Return the CREQ response pointer */
217 return crsqe ? &crsqe->qp_event : NULL; 188 return 0;
218} 189}
219 190
191int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
192 struct cmdq_base *req,
193 struct creq_base *resp,
194 void *sb, u8 is_block)
195{
196 struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
197 u16 cookie;
198 u8 opcode, retry_cnt = 0xFF;
199 int rc = 0;
200
201 do {
202 opcode = req->opcode;
203 rc = __send_message(rcfw, req, resp, sb, is_block);
204 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
205 if (!rc)
206 break;
207
208 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
209 /* send failed */
210 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
211 cookie, opcode);
212 return rc;
213 }
214 is_block ? mdelay(1) : usleep_range(500, 1000);
215
216 } while (retry_cnt--);
217
218 if (is_block)
219 rc = __block_for_resp(rcfw, cookie);
220 else
221 rc = __wait_for_resp(rcfw, cookie);
222 if (rc) {
223 /* timed out */
224 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
225 cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
226 return rc;
227 }
228
229 if (evnt->status) {
230 /* failed with status */
231 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
232 cookie, opcode, evnt->status);
233 rc = -EFAULT;
234 }
235
236 return rc;
237}
220/* Completions */ 238/* Completions */
221static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 239static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
222 struct creq_func_event *func_event) 240 struct creq_func_event *func_event)
@@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
260static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 278static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
261 struct creq_qp_event *qp_event) 279 struct creq_qp_event *qp_event)
262{ 280{
263 struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
264 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 281 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
265 struct bnxt_qplib_crsqe *crsqe; 282 struct bnxt_qplib_crsq *crsqe;
266 u16 cbit, cookie, blocked = 0;
267 unsigned long flags; 283 unsigned long flags;
268 u32 sw_cons; 284 u16 cbit, blocked = 0;
285 u16 cookie;
286 __le16 mcookie;
269 287
270 switch (qp_event->event) { 288 switch (qp_event->event) {
271 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: 289 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
@@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
275 default: 293 default:
276 /* Command Response */ 294 /* Command Response */
277 spin_lock_irqsave(&cmdq->lock, flags); 295 spin_lock_irqsave(&cmdq->lock, flags);
278 sw_cons = HWQ_CMP(crsq->cons, crsq); 296 cookie = le16_to_cpu(qp_event->cookie);
279 crsqe = &crsq->crsq[sw_cons]; 297 mcookie = qp_event->cookie;
280 crsq->cons++;
281 memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
282
283 cookie = le16_to_cpu(crsqe->qp_event.cookie);
284 blocked = cookie & RCFW_CMD_IS_BLOCKING; 298 blocked = cookie & RCFW_CMD_IS_BLOCKING;
285 cookie &= RCFW_MAX_COOKIE_VALUE; 299 cookie &= RCFW_MAX_COOKIE_VALUE;
286 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 300 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
301 crsqe = &rcfw->crsqe_tbl[cbit];
302 if (crsqe->resp &&
303 crsqe->resp->cookie == mcookie) {
304 memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
305 crsqe->resp = NULL;
306 } else {
307 dev_err(&rcfw->pdev->dev,
308 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
309 crsqe->resp ? "mismatch" : "collision",
310 crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
311 }
287 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 312 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
288 dev_warn(&rcfw->pdev->dev, 313 dev_warn(&rcfw->pdev->dev,
289 "QPLIB: CMD bit %d was not requested", cbit); 314 "QPLIB: CMD bit %d was not requested", cbit);
290
291 cmdq->cons += crsqe->req_size; 315 cmdq->cons += crsqe->req_size;
292 spin_unlock_irqrestore(&cmdq->lock, flags); 316 crsqe->req_size = 0;
317
293 if (!blocked) 318 if (!blocked)
294 wake_up(&rcfw->waitq); 319 wake_up(&rcfw->waitq);
295 break; 320 spin_unlock_irqrestore(&cmdq->lock, flags);
296 } 321 }
297 return 0; 322 return 0;
298} 323}
@@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
305 struct creq_base *creqe, **creq_ptr; 330 struct creq_base *creqe, **creq_ptr;
306 u32 sw_cons, raw_cons; 331 u32 sw_cons, raw_cons;
307 unsigned long flags; 332 unsigned long flags;
308 u32 type; 333 u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
309 334
310 /* Service the CREQ until empty */ 335 /* Service the CREQ until budget is over */
311 spin_lock_irqsave(&creq->lock, flags); 336 spin_lock_irqsave(&creq->lock, flags);
312 raw_cons = creq->cons; 337 raw_cons = creq->cons;
313 while (1) { 338 while (budget > 0) {
314 sw_cons = HWQ_CMP(raw_cons, creq); 339 sw_cons = HWQ_CMP(raw_cons, creq);
315 creq_ptr = (struct creq_base **)creq->pbl_ptr; 340 creq_ptr = (struct creq_base **)creq->pbl_ptr;
316 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; 341 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
@@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
320 type = creqe->type & CREQ_BASE_TYPE_MASK; 345 type = creqe->type & CREQ_BASE_TYPE_MASK;
321 switch (type) { 346 switch (type) {
322 case CREQ_BASE_TYPE_QP_EVENT: 347 case CREQ_BASE_TYPE_QP_EVENT:
323 if (!bnxt_qplib_process_qp_event 348 bnxt_qplib_process_qp_event
324 (rcfw, (struct creq_qp_event *)creqe)) 349 (rcfw, (struct creq_qp_event *)creqe);
325 rcfw->creq_qp_event_processed++; 350 rcfw->creq_qp_event_processed++;
326 else {
327 dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
328 dev_warn(&rcfw->pdev->dev,
329 "QPLIB: type = 0x%x not handled",
330 type);
331 }
332 break; 351 break;
333 case CREQ_BASE_TYPE_FUNC_EVENT: 352 case CREQ_BASE_TYPE_FUNC_EVENT:
334 if (!bnxt_qplib_process_func_event 353 if (!bnxt_qplib_process_func_event
@@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
346 break; 365 break;
347 } 366 }
348 raw_cons++; 367 raw_cons++;
368 budget--;
349 } 369 }
370
350 if (creq->cons != raw_cons) { 371 if (creq->cons != raw_cons) {
351 creq->cons = raw_cons; 372 creq->cons = raw_cons;
352 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, 373 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
@@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
375/* RCFW */ 396/* RCFW */
376int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 397int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
377{ 398{
378 struct creq_deinitialize_fw_resp *resp;
379 struct cmdq_deinitialize_fw req; 399 struct cmdq_deinitialize_fw req;
400 struct creq_deinitialize_fw_resp resp;
380 u16 cmd_flags = 0; 401 u16 cmd_flags = 0;
402 int rc;
381 403
382 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 404 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
383 resp = (struct creq_deinitialize_fw_resp *) 405 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
384 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 406 NULL, 0);
385 NULL, 0); 407 if (rc)
386 if (!resp) 408 return rc;
387 return -EINVAL;
388
389 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
390 return -ETIMEDOUT;
391
392 if (resp->status ||
393 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
394 return -EFAULT;
395 409
396 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 410 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
397 return 0; 411 return 0;
@@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
417int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 431int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
418 struct bnxt_qplib_ctx *ctx, int is_virtfn) 432 struct bnxt_qplib_ctx *ctx, int is_virtfn)
419{ 433{
420 struct creq_initialize_fw_resp *resp;
421 struct cmdq_initialize_fw req; 434 struct cmdq_initialize_fw req;
435 struct creq_initialize_fw_resp resp;
422 u16 cmd_flags = 0, level; 436 u16 cmd_flags = 0, level;
437 int rc;
423 438
424 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 439 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
425 440
@@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
482 497
483skip_ctx_setup: 498skip_ctx_setup:
484 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 499 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
485 resp = (struct creq_initialize_fw_resp *) 500 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
486 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 501 NULL, 0);
487 NULL, 0); 502 if (rc)
488 if (!resp) { 503 return rc;
489 dev_err(&rcfw->pdev->dev,
490 "QPLIB: RCFW: INITIALIZE_FW send failed");
491 return -EINVAL;
492 }
493 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
494 /* Cmd timed out */
495 dev_err(&rcfw->pdev->dev,
496 "QPLIB: RCFW: INITIALIZE_FW timed out");
497 return -ETIMEDOUT;
498 }
499 if (resp->status ||
500 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
501 dev_err(&rcfw->pdev->dev,
502 "QPLIB: RCFW: INITIALIZE_FW failed");
503 return -EINVAL;
504 }
505 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 504 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
506 return 0; 505 return 0;
507} 506}
508 507
509void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 508void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
510{ 509{
511 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); 510 kfree(rcfw->crsqe_tbl);
512 kfree(rcfw->crsq.crsq);
513 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 511 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
514 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 512 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
515
516 rcfw->pdev = NULL; 513 rcfw->pdev = NULL;
517} 514}
518 515
@@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
539 goto fail; 536 goto fail;
540 } 537 }
541 538
542 rcfw->crsq.max_elements = rcfw->cmdq.max_elements; 539 rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
543 rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, 540 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
544 sizeof(*rcfw->crsq.crsq), GFP_KERNEL); 541 if (!rcfw->crsqe_tbl)
545 if (!rcfw->crsq.crsq)
546 goto fail; 542 goto fail;
547 543
548 rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
549 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
550 &rcfw->crsb.max_elements,
551 BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
552 HWQ_TYPE_CTX)) {
553 dev_err(&rcfw->pdev->dev,
554 "QPLIB: HW channel CRSB allocation failed");
555 goto fail;
556 }
557 return 0; 544 return 0;
558 545
559fail: 546fail:
@@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
606 int rc; 593 int rc;
607 594
608 /* General */ 595 /* General */
609 atomic_set(&rcfw->seq_num, 0); 596 rcfw->seq_num = 0;
610 rcfw->flags = FIRMWARE_FIRST_FLAG; 597 rcfw->flags = FIRMWARE_FIRST_FLAG;
611 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * 598 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
612 sizeof(unsigned long)); 599 sizeof(unsigned long));
@@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
636 623
637 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 624 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
638 625
639 /* CRSQ */
640 rcfw->crsq.prod = 0;
641 rcfw->crsq.cons = 0;
642
643 /* CREQ */ 626 /* CREQ */
644 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; 627 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
645 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); 628 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
@@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
692 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 675 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
693 return 0; 676 return 0;
694} 677}
678
679struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
680 struct bnxt_qplib_rcfw *rcfw,
681 u32 size)
682{
683 struct bnxt_qplib_rcfw_sbuf *sbuf;
684
685 sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
686 if (!sbuf)
687 return NULL;
688
689 sbuf->size = size;
690 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
691 &sbuf->dma_addr, GFP_ATOMIC);
692 if (!sbuf->sb)
693 goto bail;
694
695 return sbuf;
696bail:
697 kfree(sbuf);
698 return NULL;
699}
700
701void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
702 struct bnxt_qplib_rcfw_sbuf *sbuf)
703{
704 if (sbuf->sb)
705 dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
706 sbuf->sb, sbuf->dma_addr);
707 kfree(sbuf);
708}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index d3567d75bf58..09ce121770cd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -73,6 +73,7 @@
73#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT 73#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
74#define RCFW_MAX_COOKIE_VALUE 0x7FFF 74#define RCFW_MAX_COOKIE_VALUE 0x7FFF
75#define RCFW_CMD_IS_BLOCKING 0x8000 75#define RCFW_CMD_IS_BLOCKING 0x8000
76#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
76 77
77/* Cmdq contains a fix number of a 16-Byte slots */ 78/* Cmdq contains a fix number of a 16-Byte slots */
78struct bnxt_qplib_cmdqe { 79struct bnxt_qplib_cmdqe {
@@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe {
94 u8 data[1024]; 95 u8 data[1024];
95}; 96};
96 97
97/* CRSQ SB */
98#define BNXT_QPLIB_CRSBE_MAX_CNT 4
99#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
100#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
101
102#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
103#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
104
105static inline u32 get_crsb_pg(u32 val)
106{
107 return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
108}
109
110static inline u32 get_crsb_idx(u32 val)
111{
112 return val & MAX_CRSB_IDX_PER_PG;
113}
114
115static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
116 u32 prod, dma_addr_t *dma_addr)
117{
118 *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
119 *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
120 BNXT_QPLIB_CRSBE_UNITS;
121}
122
123/* CREQ */ 98/* CREQ */
124/* Allocate 1 per QP for async error notification for now */ 99/* Allocate 1 per QP for async error notification for now */
125#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) 100#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
@@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val)
158#define CREQ_DB(db, raw_cons, cp_bit) \ 133#define CREQ_DB(db, raw_cons, cp_bit) \
159 writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) 134 writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
160 135
136#define CREQ_ENTRY_POLL_BUDGET 0x100
137
161/* HWQ */ 138/* HWQ */
162struct bnxt_qplib_crsqe { 139
163 struct creq_qp_event qp_event; 140struct bnxt_qplib_crsq {
141 struct creq_qp_event *resp;
164 u32 req_size; 142 u32 req_size;
165}; 143};
166 144
167struct bnxt_qplib_crsq { 145struct bnxt_qplib_rcfw_sbuf {
168 struct bnxt_qplib_crsqe *crsq; 146 void *sb;
169 u32 prod; 147 dma_addr_t dma_addr;
170 u32 cons; 148 u32 size;
171 u32 max_elements;
172}; 149};
173 150
174/* RCFW Communication Channels */ 151/* RCFW Communication Channels */
@@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw {
185 wait_queue_head_t waitq; 162 wait_queue_head_t waitq;
186 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 163 int (*aeq_handler)(struct bnxt_qplib_rcfw *,
187 struct creq_func_event *); 164 struct creq_func_event *);
188 atomic_t seq_num; 165 u32 seq_num;
189 166
190 /* Bar region info */ 167 /* Bar region info */
191 void __iomem *cmdq_bar_reg_iomem; 168 void __iomem *cmdq_bar_reg_iomem;
@@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw {
203 180
204 /* Actual Cmd and Resp Queues */ 181 /* Actual Cmd and Resp Queues */
205 struct bnxt_qplib_hwq cmdq; 182 struct bnxt_qplib_hwq cmdq;
206 struct bnxt_qplib_crsq crsq; 183 struct bnxt_qplib_crsq *crsqe_tbl;
207 struct bnxt_qplib_hwq crsb;
208}; 184};
209 185
210void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); 186void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
@@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
219 (struct bnxt_qplib_rcfw *, 195 (struct bnxt_qplib_rcfw *,
220 struct creq_func_event *)); 196 struct creq_func_event *));
221 197
222int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); 198struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
223int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); 199 struct bnxt_qplib_rcfw *rcfw,
224void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 200 u32 size);
225 struct cmdq_base *req, void **crsbe, 201void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
226 u8 is_block); 202 struct bnxt_qplib_rcfw_sbuf *sbuf);
203int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
204 struct cmdq_base *req, struct creq_base *resp,
205 void *sbuf, u8 is_block);
227 206
228int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); 207int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
229int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 208int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 6277d802ca4b..2e4855509719 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
48 48
49#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) 49#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
50 50
51#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
52 ((HWQ_CMP(hwq->prod, hwq)\
53 - HWQ_CMP(hwq->cons, hwq))\
54 & (hwq->max_elements - 1)))
51enum bnxt_qplib_hwq_type { 55enum bnxt_qplib_hwq_type {
52 HWQ_TYPE_CTX, 56 HWQ_TYPE_CTX,
53 HWQ_TYPE_QUEUE, 57 HWQ_TYPE_QUEUE,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 7b31eccedf11..fde18cf0e406 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
55 struct bnxt_qplib_dev_attr *attr) 55 struct bnxt_qplib_dev_attr *attr)
56{ 56{
57 struct cmdq_query_func req; 57 struct cmdq_query_func req;
58 struct creq_query_func_resp *resp; 58 struct creq_query_func_resp resp;
59 struct bnxt_qplib_rcfw_sbuf *sbuf;
59 struct creq_query_func_resp_sb *sb; 60 struct creq_query_func_resp_sb *sb;
60 u16 cmd_flags = 0; 61 u16 cmd_flags = 0;
61 u32 temp; 62 u32 temp;
62 u8 *tqm_alloc; 63 u8 *tqm_alloc;
63 int i; 64 int i, rc = 0;
64 65
65 RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); 66 RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
66 67
67 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 68 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
68 resp = (struct creq_query_func_resp *) 69 if (!sbuf) {
69 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
70 0);
71 if (!resp) {
72 dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
73 return -EINVAL;
74 }
75 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
76 /* Cmd timed out */
77 dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
78 return -ETIMEDOUT;
79 }
80 if (resp->status ||
81 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
82 dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
83 dev_err(&rcfw->pdev->dev, 70 dev_err(&rcfw->pdev->dev,
84 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 71 "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
85 resp->status, le16_to_cpu(req.cookie), 72 return -ENOMEM;
86 le16_to_cpu(resp->cookie));
87 return -EINVAL;
88 } 73 }
74
75 sb = sbuf->sb;
76 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
77 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
78 (void *)sbuf, 0);
79 if (rc)
80 goto bail;
81
89 /* Extract the context from the side buffer */ 82 /* Extract the context from the side buffer */
90 attr->max_qp = le32_to_cpu(sb->max_qp); 83 attr->max_qp = le32_to_cpu(sb->max_qp);
91 attr->max_qp_rd_atom = 84 attr->max_qp_rd_atom =
@@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
95 sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? 88 sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
96 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; 89 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
97 attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); 90 attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
91 /*
92 * 128 WQEs needs to be reserved for the HW (8916). Prevent
93 * reporting the max number
94 */
95 attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
98 attr->max_qp_sges = sb->max_sge; 96 attr->max_qp_sges = sb->max_sge;
99 attr->max_cq = le32_to_cpu(sb->max_cq); 97 attr->max_cq = le32_to_cpu(sb->max_cq);
100 attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); 98 attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
@@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
130 attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); 128 attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
131 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); 129 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
132 } 130 }
133 return 0; 131
132bail:
133 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
134 return rc;
134} 135}
135 136
136/* SGID */ 137/* SGID */
@@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
178 /* Remove GID from the SGID table */ 179 /* Remove GID from the SGID table */
179 if (update) { 180 if (update) {
180 struct cmdq_delete_gid req; 181 struct cmdq_delete_gid req;
181 struct creq_delete_gid_resp *resp; 182 struct creq_delete_gid_resp resp;
182 u16 cmd_flags = 0; 183 u16 cmd_flags = 0;
184 int rc;
183 185
184 RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); 186 RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
185 if (sgid_tbl->hw_id[index] == 0xFFFF) { 187 if (sgid_tbl->hw_id[index] == 0xFFFF) {
@@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
188 return -EINVAL; 190 return -EINVAL;
189 } 191 }
190 req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); 192 req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
191 resp = (struct creq_delete_gid_resp *) 193 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
192 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 194 (void *)&resp, NULL, 0);
193 0); 195 if (rc)
194 if (!resp) { 196 return rc;
195 dev_err(&res->pdev->dev,
196 "QPLIB: SP: DELETE_GID send failed");
197 return -EINVAL;
198 }
199 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
200 le16_to_cpu(req.cookie))) {
201 /* Cmd timed out */
202 dev_err(&res->pdev->dev,
203 "QPLIB: SP: DELETE_GID timed out");
204 return -ETIMEDOUT;
205 }
206 if (resp->status ||
207 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
208 dev_err(&res->pdev->dev,
209 "QPLIB: SP: DELETE_GID failed ");
210 dev_err(&res->pdev->dev,
211 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
212 resp->status, le16_to_cpu(req.cookie),
213 le16_to_cpu(resp->cookie));
214 return -EINVAL;
215 }
216 } 197 }
217 memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, 198 memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
218 sizeof(bnxt_qplib_gid_zero)); 199 sizeof(bnxt_qplib_gid_zero));
@@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
234 struct bnxt_qplib_res, 215 struct bnxt_qplib_res,
235 sgid_tbl); 216 sgid_tbl);
236 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 217 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
237 int i, free_idx, rc = 0; 218 int i, free_idx;
238 219
239 if (!sgid_tbl) { 220 if (!sgid_tbl) {
240 dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); 221 dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
@@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
266 } 247 }
267 if (update) { 248 if (update) {
268 struct cmdq_add_gid req; 249 struct cmdq_add_gid req;
269 struct creq_add_gid_resp *resp; 250 struct creq_add_gid_resp resp;
270 u16 cmd_flags = 0; 251 u16 cmd_flags = 0;
271 u32 temp32[4]; 252 u32 temp32[4];
272 u16 temp16[3]; 253 u16 temp16[3];
254 int rc;
273 255
274 RCFW_CMD_PREP(req, ADD_GID, cmd_flags); 256 RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
275 257
@@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
290 req.src_mac[1] = cpu_to_be16(temp16[1]); 272 req.src_mac[1] = cpu_to_be16(temp16[1]);
291 req.src_mac[2] = cpu_to_be16(temp16[2]); 273 req.src_mac[2] = cpu_to_be16(temp16[2]);
292 274
293 resp = (struct creq_add_gid_resp *) 275 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
294 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 276 (void *)&resp, NULL, 0);
295 NULL, 0); 277 if (rc)
296 if (!resp) { 278 return rc;
297 dev_err(&res->pdev->dev, 279 sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
298 "QPLIB: SP: ADD_GID send failed");
299 return -EINVAL;
300 }
301 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
302 le16_to_cpu(req.cookie))) {
303 /* Cmd timed out */
304 dev_err(&res->pdev->dev,
305 "QPIB: SP: ADD_GID timed out");
306 return -ETIMEDOUT;
307 }
308 if (resp->status ||
309 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
310 dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
311 dev_err(&res->pdev->dev,
312 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
313 resp->status, le16_to_cpu(req.cookie),
314 le16_to_cpu(resp->cookie));
315 return -EINVAL;
316 }
317 sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
318 } 280 }
319 /* Add GID to the sgid_tbl */ 281 /* Add GID to the sgid_tbl */
320 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); 282 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
@@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
325 287
326 *index = free_idx; 288 *index = free_idx;
327 /* unlock */ 289 /* unlock */
328 return rc; 290 return 0;
329} 291}
330 292
331/* pkeys */ 293/* pkeys */
@@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
422{ 384{
423 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 385 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
424 struct cmdq_create_ah req; 386 struct cmdq_create_ah req;
425 struct creq_create_ah_resp *resp; 387 struct creq_create_ah_resp resp;
426 u16 cmd_flags = 0; 388 u16 cmd_flags = 0;
427 u32 temp32[4]; 389 u32 temp32[4];
428 u16 temp16[3]; 390 u16 temp16[3];
391 int rc;
429 392
430 RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); 393 RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
431 394
@@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
450 req.dest_mac[1] = cpu_to_le16(temp16[1]); 413 req.dest_mac[1] = cpu_to_le16(temp16[1]);
451 req.dest_mac[2] = cpu_to_le16(temp16[2]); 414 req.dest_mac[2] = cpu_to_le16(temp16[2]);
452 415
453 resp = (struct creq_create_ah_resp *) 416 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
454 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 417 NULL, 1);
455 NULL, 1); 418 if (rc)
456 if (!resp) { 419 return rc;
457 dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); 420
458 return -EINVAL; 421 ah->id = le32_to_cpu(resp.xid);
459 }
460 if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
461 /* Cmd timed out */
462 dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
463 return -ETIMEDOUT;
464 }
465 if (resp->status ||
466 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
467 dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
468 dev_err(&rcfw->pdev->dev,
469 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
470 resp->status, le16_to_cpu(req.cookie),
471 le16_to_cpu(resp->cookie));
472 return -EINVAL;
473 }
474 ah->id = le32_to_cpu(resp->xid);
475 return 0; 422 return 0;
476} 423}
477 424
@@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
479{ 426{
480 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 427 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
481 struct cmdq_destroy_ah req; 428 struct cmdq_destroy_ah req;
482 struct creq_destroy_ah_resp *resp; 429 struct creq_destroy_ah_resp resp;
483 u16 cmd_flags = 0; 430 u16 cmd_flags = 0;
431 int rc;
484 432
485 /* Clean up the AH table in the device */ 433 /* Clean up the AH table in the device */
486 RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); 434 RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
487 435
488 req.ah_cid = cpu_to_le32(ah->id); 436 req.ah_cid = cpu_to_le32(ah->id);
489 437
490 resp = (struct creq_destroy_ah_resp *) 438 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
491 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 439 NULL, 1);
492 NULL, 1); 440 if (rc)
493 if (!resp) { 441 return rc;
494 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
495 return -EINVAL;
496 }
497 if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
498 /* Cmd timed out */
499 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
500 return -ETIMEDOUT;
501 }
502 if (resp->status ||
503 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
504 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
505 dev_err(&rcfw->pdev->dev,
506 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
507 resp->status, le16_to_cpu(req.cookie),
508 le16_to_cpu(resp->cookie));
509 return -EINVAL;
510 }
511 return 0; 442 return 0;
512} 443}
513 444
@@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
516{ 447{
517 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 448 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
518 struct cmdq_deallocate_key req; 449 struct cmdq_deallocate_key req;
519 struct creq_deallocate_key_resp *resp; 450 struct creq_deallocate_key_resp resp;
520 u16 cmd_flags = 0; 451 u16 cmd_flags = 0;
452 int rc;
521 453
522 if (mrw->lkey == 0xFFFFFFFF) { 454 if (mrw->lkey == 0xFFFFFFFF) {
523 dev_info(&res->pdev->dev, 455 dev_info(&res->pdev->dev,
@@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
536 else 468 else
537 req.key = cpu_to_le32(mrw->lkey); 469 req.key = cpu_to_le32(mrw->lkey);
538 470
539 resp = (struct creq_deallocate_key_resp *) 471 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
540 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 472 NULL, 0);
541 NULL, 0); 473 if (rc)
542 if (!resp) { 474 return rc;
543 dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); 475
544 return -EINVAL;
545 }
546 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
547 /* Cmd timed out */
548 dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
549 return -ETIMEDOUT;
550 }
551 if (resp->status ||
552 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
553 dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
554 dev_err(&res->pdev->dev,
555 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
556 resp->status, le16_to_cpu(req.cookie),
557 le16_to_cpu(resp->cookie));
558 return -EINVAL;
559 }
560 /* Free the qplib's MRW memory */ 476 /* Free the qplib's MRW memory */
561 if (mrw->hwq.max_elements) 477 if (mrw->hwq.max_elements)
562 bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); 478 bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
@@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
568{ 484{
569 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 485 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
570 struct cmdq_allocate_mrw req; 486 struct cmdq_allocate_mrw req;
571 struct creq_allocate_mrw_resp *resp; 487 struct creq_allocate_mrw_resp resp;
572 u16 cmd_flags = 0; 488 u16 cmd_flags = 0;
573 unsigned long tmp; 489 unsigned long tmp;
490 int rc;
574 491
575 RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); 492 RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
576 493
@@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
584 tmp = (unsigned long)mrw; 501 tmp = (unsigned long)mrw;
585 req.mrw_handle = cpu_to_le64(tmp); 502 req.mrw_handle = cpu_to_le64(tmp);
586 503
587 resp = (struct creq_allocate_mrw_resp *) 504 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
588 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 505 (void *)&resp, NULL, 0);
589 NULL, 0); 506 if (rc)
590 if (!resp) { 507 return rc;
591 dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); 508
592 return -EINVAL;
593 }
594 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
595 /* Cmd timed out */
596 dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
597 return -ETIMEDOUT;
598 }
599 if (resp->status ||
600 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
601 dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
602 dev_err(&rcfw->pdev->dev,
603 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
604 resp->status, le16_to_cpu(req.cookie),
605 le16_to_cpu(resp->cookie));
606 return -EINVAL;
607 }
608 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || 509 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
609 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || 510 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
610 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) 511 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
611 mrw->rkey = le32_to_cpu(resp->xid); 512 mrw->rkey = le32_to_cpu(resp.xid);
612 else 513 else
613 mrw->lkey = le32_to_cpu(resp->xid); 514 mrw->lkey = le32_to_cpu(resp.xid);
614 return 0; 515 return 0;
615} 516}
616 517
@@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
619{ 520{
620 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 521 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
621 struct cmdq_deregister_mr req; 522 struct cmdq_deregister_mr req;
622 struct creq_deregister_mr_resp *resp; 523 struct creq_deregister_mr_resp resp;
623 u16 cmd_flags = 0; 524 u16 cmd_flags = 0;
624 int rc; 525 int rc;
625 526
626 RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); 527 RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
627 528
628 req.lkey = cpu_to_le32(mrw->lkey); 529 req.lkey = cpu_to_le32(mrw->lkey);
629 resp = (struct creq_deregister_mr_resp *) 530 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
630 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 531 (void *)&resp, NULL, block);
631 NULL, block); 532 if (rc)
632 if (!resp) { 533 return rc;
633 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
634 return -EINVAL;
635 }
636 if (block)
637 rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
638 le16_to_cpu(req.cookie));
639 else
640 rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
641 le16_to_cpu(req.cookie));
642 if (!rc) {
643 /* Cmd timed out */
644 dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
645 return -ETIMEDOUT;
646 }
647 if (resp->status ||
648 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
649 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
650 dev_err(&rcfw->pdev->dev,
651 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
652 resp->status, le16_to_cpu(req.cookie),
653 le16_to_cpu(resp->cookie));
654 return -EINVAL;
655 }
656 534
657 /* Free the qplib's MR memory */ 535 /* Free the qplib's MR memory */
658 if (mrw->hwq.max_elements) { 536 if (mrw->hwq.max_elements) {
@@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
669{ 547{
670 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 548 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
671 struct cmdq_register_mr req; 549 struct cmdq_register_mr req;
672 struct creq_register_mr_resp *resp; 550 struct creq_register_mr_resp resp;
673 u16 cmd_flags = 0, level; 551 u16 cmd_flags = 0, level;
674 int pg_ptrs, pages, i, rc; 552 int pg_ptrs, pages, i, rc;
675 dma_addr_t **pbl_ptr; 553 dma_addr_t **pbl_ptr;
@@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
730 req.key = cpu_to_le32(mr->lkey); 608 req.key = cpu_to_le32(mr->lkey);
731 req.mr_size = cpu_to_le64(mr->total_size); 609 req.mr_size = cpu_to_le64(mr->total_size);
732 610
733 resp = (struct creq_register_mr_resp *) 611 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
734 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 612 (void *)&resp, NULL, block);
735 NULL, block); 613 if (rc)
736 if (!resp) {
737 dev_err(&res->pdev->dev, "SP: REG_MR send failed");
738 rc = -EINVAL;
739 goto fail;
740 }
741 if (block)
742 rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
743 le16_to_cpu(req.cookie));
744 else
745 rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
746 le16_to_cpu(req.cookie));
747 if (!rc) {
748 /* Cmd timed out */
749 dev_err(&res->pdev->dev, "SP: REG_MR timed out");
750 rc = -ETIMEDOUT;
751 goto fail;
752 }
753 if (resp->status ||
754 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
755 dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
756 dev_err(&res->pdev->dev,
757 "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
758 resp->status, le16_to_cpu(req.cookie),
759 le16_to_cpu(resp->cookie));
760 rc = -EINVAL;
761 goto fail; 614 goto fail;
762 } 615
763 return 0; 616 return 0;
764 617
765fail: 618fail:
@@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
804{ 657{
805 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 658 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
806 struct cmdq_map_tc_to_cos req; 659 struct cmdq_map_tc_to_cos req;
807 struct creq_map_tc_to_cos_resp *resp; 660 struct creq_map_tc_to_cos_resp resp;
808 u16 cmd_flags = 0; 661 u16 cmd_flags = 0;
809 int tleft; 662 int rc = 0;
810 663
811 RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); 664 RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
812 req.cos0 = cpu_to_le16(cids[0]); 665 req.cos0 = cpu_to_le16(cids[0]);
813 req.cos1 = cpu_to_le16(cids[1]); 666 req.cos1 = cpu_to_le16(cids[1]);
814 667
815 resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); 668 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
816 if (!resp) { 669 (void *)&resp, NULL, 0);
817 dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
818 return -EINVAL;
819 }
820
821 tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
822 if (!tleft) {
823 dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
824 return -ETIMEDOUT;
825 }
826
827 if (resp->status ||
828 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
829 dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
830 dev_err(&res->pdev->dev,
831 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
832 resp->status, le16_to_cpu(req.cookie),
833 le16_to_cpu(resp->cookie));
834 return -EINVAL;
835 }
836
837 return 0; 670 return 0;
838} 671}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 1442a617e968..a543f959098b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -40,6 +40,8 @@
40#ifndef __BNXT_QPLIB_SP_H__ 40#ifndef __BNXT_QPLIB_SP_H__
41#define __BNXT_QPLIB_SP_H__ 41#define __BNXT_QPLIB_SP_H__
42 42
43#define BNXT_QPLIB_RESERVED_QP_WRS 128
44
43struct bnxt_qplib_dev_attr { 45struct bnxt_qplib_dev_attr {
44 char fw_ver[32]; 46 char fw_ver[32];
45 u16 max_sgid; 47 u16 max_sgid;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index f96a96dbcf1f..ae0b79aeea2e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
767 kfree(entry); 767 kfree(entry);
768 } 768 }
769 769
770 list_for_each_safe(pos, nxt, &uctx->qpids) { 770 list_for_each_safe(pos, nxt, &uctx->cqids) {
771 entry = list_entry(pos, struct c4iw_qid_list, entry); 771 entry = list_entry(pos, struct c4iw_qid_list, entry);
772 list_del_init(&entry->entry); 772 list_del_init(&entry->entry);
773 kfree(entry); 773 kfree(entry);
@@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
880 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); 880 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
881 if (!rdev->free_workq) { 881 if (!rdev->free_workq) {
882 err = -ENOMEM; 882 err = -ENOMEM;
883 goto err_free_status_page; 883 goto err_free_status_page_and_wr_log;
884 } 884 }
885 885
886 rdev->status_page->db_off = 0; 886 rdev->status_page->db_off = 0;
887 887
888 return 0; 888 return 0;
889err_free_status_page: 889err_free_status_page_and_wr_log:
890 if (c4iw_wr_log && rdev->wr_log)
891 kfree(rdev->wr_log);
890 free_page((unsigned long)rdev->status_page); 892 free_page((unsigned long)rdev->status_page);
891destroy_ocqp_pool: 893destroy_ocqp_pool:
892 c4iw_ocqp_pool_destroy(rdev); 894 c4iw_ocqp_pool_destroy(rdev);
@@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
903{ 905{
904 destroy_workqueue(rdev->free_workq); 906 destroy_workqueue(rdev->free_workq);
905 kfree(rdev->wr_log); 907 kfree(rdev->wr_log);
908 c4iw_release_dev_ucontext(rdev, &rdev->uctx);
906 free_page((unsigned long)rdev->status_page); 909 free_page((unsigned long)rdev->status_page);
907 c4iw_pblpool_destroy(rdev); 910 c4iw_pblpool_destroy(rdev);
908 c4iw_rqtpool_destroy(rdev); 911 c4iw_rqtpool_destroy(rdev);
912 c4iw_ocqp_pool_destroy(rdev);
909 c4iw_destroy_resource(&rdev->resource); 913 c4iw_destroy_resource(&rdev->resource);
910} 914}
911 915
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 0c79983c8b1a..9ecc089d4529 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3692,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
3692 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3692 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
3693 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3693 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
3694 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3694 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
3695 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; 3695 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
3696 dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; 3696 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
3697 dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
3698 }
3697 if (mlx5_core_is_pf(mdev)) { 3699 if (mlx5_core_is_pf(mdev)) {
3698 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3700 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
3699 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 3701 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index aa08c76a4245..d961f79b317c 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -58,7 +58,10 @@
58#define QEDR_MSG_QP " QP" 58#define QEDR_MSG_QP " QP"
59#define QEDR_MSG_GSI " GSI" 59#define QEDR_MSG_GSI " GSI"
60 60
61#define QEDR_CQ_MAGIC_NUMBER (0x11223344) 61#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
62
63#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
64#define FW_PAGE_SHIFT (12)
62 65
63struct qedr_dev; 66struct qedr_dev;
64 67
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 17685cfea6a2..d6723c365c7f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
653 653
654static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, 654static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
655 struct qedr_pbl *pbl, 655 struct qedr_pbl *pbl,
656 struct qedr_pbl_info *pbl_info) 656 struct qedr_pbl_info *pbl_info, u32 pg_shift)
657{ 657{
658 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; 658 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
659 u32 fw_pg_cnt, fw_pg_per_umem_pg;
659 struct qedr_pbl *pbl_tbl; 660 struct qedr_pbl *pbl_tbl;
660 struct scatterlist *sg; 661 struct scatterlist *sg;
661 struct regpair *pbe; 662 struct regpair *pbe;
663 u64 pg_addr;
662 int entry; 664 int entry;
663 u32 addr;
664 665
665 if (!pbl_info->num_pbes) 666 if (!pbl_info->num_pbes)
666 return; 667 return;
@@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
683 684
684 shift = umem->page_shift; 685 shift = umem->page_shift;
685 686
687 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
688
686 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 689 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
687 pages = sg_dma_len(sg) >> shift; 690 pages = sg_dma_len(sg) >> shift;
691 pg_addr = sg_dma_address(sg);
688 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { 692 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
689 /* store the page address in pbe */ 693 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
690 pbe->lo = cpu_to_le32(sg_dma_address(sg) + 694 pbe->lo = cpu_to_le32(pg_addr);
691 (pg_cnt << shift)); 695 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
692 addr = upper_32_bits(sg_dma_address(sg) + 696
693 (pg_cnt << shift)); 697 pg_addr += BIT(pg_shift);
694 pbe->hi = cpu_to_le32(addr); 698 pbe_cnt++;
695 pbe_cnt++; 699 total_num_pbes++;
696 total_num_pbes++; 700 pbe++;
697 pbe++; 701
698 702 if (total_num_pbes == pbl_info->num_pbes)
699 if (total_num_pbes == pbl_info->num_pbes) 703 return;
700 return; 704
701 705 /* If the given pbl is full storing the pbes,
702 /* If the given pbl is full storing the pbes, 706 * move to next pbl.
703 * move to next pbl. 707 */
704 */ 708 if (pbe_cnt ==
705 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { 709 (pbl_info->pbl_size / sizeof(u64))) {
706 pbl_tbl++; 710 pbl_tbl++;
707 pbe = (struct regpair *)pbl_tbl->va; 711 pbe = (struct regpair *)pbl_tbl->va;
708 pbe_cnt = 0; 712 pbe_cnt = 0;
713 }
714
715 fw_pg_cnt++;
709 } 716 }
710 } 717 }
711 } 718 }
@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
754 u64 buf_addr, size_t buf_len, 761 u64 buf_addr, size_t buf_len,
755 int access, int dmasync) 762 int access, int dmasync)
756{ 763{
757 int page_cnt; 764 u32 fw_pages;
758 int rc; 765 int rc;
759 766
760 q->buf_addr = buf_addr; 767 q->buf_addr = buf_addr;
@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
766 return PTR_ERR(q->umem); 773 return PTR_ERR(q->umem);
767 } 774 }
768 775
769 page_cnt = ib_umem_page_count(q->umem); 776 fw_pages = ib_umem_page_count(q->umem) <<
770 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); 777 (q->umem->page_shift - FW_PAGE_SHIFT);
778
779 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
771 if (rc) 780 if (rc)
772 goto err0; 781 goto err0;
773 782
@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
777 goto err0; 786 goto err0;
778 } 787 }
779 788
780 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); 789 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
790 FW_PAGE_SHIFT);
781 791
782 return 0; 792 return 0;
783 793
@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2226 goto err1; 2236 goto err1;
2227 2237
2228 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, 2238 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2229 &mr->info.pbl_info); 2239 &mr->info.pbl_info, mr->umem->page_shift);
2230 2240
2231 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); 2241 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2232 if (rc) { 2242 if (rc) {
@@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3209 case IB_WC_REG_MR: 3219 case IB_WC_REG_MR:
3210 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; 3220 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3211 break; 3221 break;
3222 case IB_WC_RDMA_READ:
3223 case IB_WC_SEND:
3224 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3225 break;
3212 default: 3226 default:
3213 break; 3227 break;
3214 } 3228 }
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index ecdba2fce083..1ac5b8551a4d 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -68,6 +68,7 @@
68static inline u32 rxe_crc32(struct rxe_dev *rxe, 68static inline u32 rxe_crc32(struct rxe_dev *rxe,
69 u32 crc, void *next, size_t len) 69 u32 crc, void *next, size_t len)
70{ 70{
71 u32 retval;
71 int err; 72 int err;
72 73
73 SHASH_DESC_ON_STACK(shash, rxe->tfm); 74 SHASH_DESC_ON_STACK(shash, rxe->tfm);
@@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
81 return crc32_le(crc, next, len); 82 return crc32_le(crc, next, len);
82 } 83 }
83 84
84 return *(u32 *)shash_desc_ctx(shash); 85 retval = *(u32 *)shash_desc_ctx(shash);
86 barrier_data(shash_desc_ctx(shash));
87 return retval;
85} 88}
86 89
87int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); 90int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 83d709e74dfb..073e66783f1d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
740 740
741 sge = ibwr->sg_list; 741 sge = ibwr->sg_list;
742 for (i = 0; i < num_sge; i++, sge++) { 742 for (i = 0; i < num_sge; i++, sge++) {
743 if (qp->is_user && copy_from_user(p, (__user void *) 743 memcpy(p, (void *)(uintptr_t)sge->addr,
744 (uintptr_t)sge->addr, sge->length)) 744 sge->length);
745 return -EFAULT;
746
747 else if (!qp->is_user)
748 memcpy(p, (void *)(uintptr_t)sge->addr,
749 sge->length);
750 745
751 p += sge->length; 746 p += sge->length;
752 } 747 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0060b2f9f659..efe7402f4885 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -863,7 +863,6 @@ dev_stop:
863 set_bit(IPOIB_STOP_REAPER, &priv->flags); 863 set_bit(IPOIB_STOP_REAPER, &priv->flags);
864 cancel_delayed_work(&priv->ah_reap_task); 864 cancel_delayed_work(&priv->ah_reap_task);
865 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 865 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
866 napi_enable(&priv->napi);
867 ipoib_ib_dev_stop(dev); 866 ipoib_ib_dev_stop(dev);
868 return -1; 867 return -1;
869} 868}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index a115c0b7a310..1015a63de6ae 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev)
1596 1596
1597 ipoib_transport_dev_cleanup(dev); 1597 ipoib_transport_dev_cleanup(dev);
1598 1598
1599 netif_napi_del(&priv->napi);
1600
1599 ipoib_cm_dev_cleanup(dev); 1601 ipoib_cm_dev_cleanup(dev);
1600 1602
1601 kfree(priv->rx_ring); 1603 kfree(priv->rx_ring);
@@ -1649,6 +1651,7 @@ out_rx_ring_cleanup:
1649 kfree(priv->rx_ring); 1651 kfree(priv->rx_ring);
1650 1652
1651out: 1653out:
1654 netif_napi_del(&priv->napi);
1652 return -ENOMEM; 1655 return -ENOMEM;
1653} 1656}
1654 1657
@@ -2237,6 +2240,7 @@ event_failed:
2237 2240
2238device_init_failed: 2241device_init_failed:
2239 free_netdev(priv->dev); 2242 free_netdev(priv->dev);
2243 kfree(priv);
2240 2244
2241alloc_mem_failed: 2245alloc_mem_failed:
2242 return ERR_PTR(result); 2246 return ERR_PTR(result);
@@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device)
2277 2281
2278static void ipoib_remove_one(struct ib_device *device, void *client_data) 2282static void ipoib_remove_one(struct ib_device *device, void *client_data)
2279{ 2283{
2280 struct ipoib_dev_priv *priv, *tmp; 2284 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2281 struct list_head *dev_list = client_data; 2285 struct list_head *dev_list = client_data;
2282 2286
2283 if (!dev_list) 2287 if (!dev_list)
@@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2300 flush_workqueue(priv->wq); 2304 flush_workqueue(priv->wq);
2301 2305
2302 unregister_netdev(priv->dev); 2306 unregister_netdev(priv->dev);
2303 free_netdev(priv->dev); 2307 if (device->free_rdma_netdev)
2308 device->free_rdma_netdev(priv->dev);
2309 else
2310 free_netdev(priv->dev);
2311
2312 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
2313 kfree(cpriv);
2314
2304 kfree(priv); 2315 kfree(priv);
2305 } 2316 }
2306 2317
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 36dc4fcaa3cd..081b33deff1b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
133 snprintf(intf_name, sizeof intf_name, "%s.%04x", 133 snprintf(intf_name, sizeof intf_name, "%s.%04x",
134 ppriv->dev->name, pkey); 134 ppriv->dev->name, pkey);
135 135
136 if (!rtnl_trylock())
137 return restart_syscall();
138
136 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); 139 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
137 if (!priv) 140 if (!priv)
138 return -ENOMEM; 141 return -ENOMEM;
139 142
140 if (!rtnl_trylock())
141 return restart_syscall();
142
143 down_write(&ppriv->vlan_rwsem); 143 down_write(&ppriv->vlan_rwsem);
144 144
145 /* 145 /*
@@ -167,8 +167,10 @@ out:
167 167
168 rtnl_unlock(); 168 rtnl_unlock();
169 169
170 if (result) 170 if (result) {
171 free_netdev(priv->dev); 171 free_netdev(priv->dev);
172 kfree(priv);
173 }
172 174
173 return result; 175 return result;
174} 176}
@@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
209 211
210 if (dev) { 212 if (dev) {
211 free_netdev(dev); 213 free_netdev(dev);
214 kfree(priv);
212 return 0; 215 return 0;
213 } 216 }
214 217
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5fe5846..72a391e01011 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
142int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) 142int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
143{ 143{
144 struct irq_domain *root_domain = 144 struct irq_domain *root_domain =
145 irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, 145 irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
146 &xtensa_mx_irq_domain_ops, 146 &xtensa_mx_irq_domain_ops,
147 &xtensa_mx_irq_chip); 147 &xtensa_mx_irq_chip);
148 irq_set_default_host(root_domain); 148 irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae1770964..f728755fa292 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
89int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) 89int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
90{ 90{
91 struct irq_domain *root_domain = 91 struct irq_domain *root_domain =
92 irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, 92 irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
93 &xtensa_irq_domain_ops, &xtensa_irq_chip); 93 &xtensa_irq_domain_ops, &xtensa_irq_chip);
94 irq_set_default_host(root_domain); 94 irq_set_default_host(root_domain);
95 return 0; 95 return 0;
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 1548259297c1..2cfd9389ee96 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -242,7 +242,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
242 242
243 spin_lock_irqsave(lock, flags); 243 spin_lock_irqsave(lock, flags);
244 val = bcm6328_led_read(addr); 244 val = bcm6328_led_read(addr);
245 val |= (BIT(reg) << (((sel % 4) * 4) + 16)); 245 val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16));
246 bcm6328_led_write(addr, val); 246 bcm6328_led_write(addr, val);
247 spin_unlock_irqrestore(lock, flags); 247 spin_unlock_irqrestore(lock, flags);
248 } 248 }
@@ -269,7 +269,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
269 269
270 spin_lock_irqsave(lock, flags); 270 spin_lock_irqsave(lock, flags);
271 val = bcm6328_led_read(addr); 271 val = bcm6328_led_read(addr);
272 val |= (BIT(reg) << ((sel % 4) * 4)); 272 val |= (BIT(reg % 4) << ((sel % 4) * 4));
273 bcm6328_led_write(addr, val); 273 bcm6328_led_write(addr, val);
274 spin_unlock_irqrestore(lock, flags); 274 spin_unlock_irqrestore(lock, flags);
275 } 275 }
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index afa3b4099214..e95ea65380c8 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -20,7 +20,6 @@
20#include <linux/sched/loadavg.h> 20#include <linux/sched/loadavg.h>
21#include <linux/leds.h> 21#include <linux/leds.h>
22#include <linux/reboot.h> 22#include <linux/reboot.h>
23#include <linux/suspend.h>
24#include "../leds.h" 23#include "../leds.h"
25 24
26static int panic_heartbeats; 25static int panic_heartbeats;
@@ -163,30 +162,6 @@ static struct led_trigger heartbeat_led_trigger = {
163 .deactivate = heartbeat_trig_deactivate, 162 .deactivate = heartbeat_trig_deactivate,
164}; 163};
165 164
166static int heartbeat_pm_notifier(struct notifier_block *nb,
167 unsigned long pm_event, void *unused)
168{
169 int rc;
170
171 switch (pm_event) {
172 case PM_SUSPEND_PREPARE:
173 case PM_HIBERNATION_PREPARE:
174 case PM_RESTORE_PREPARE:
175 led_trigger_unregister(&heartbeat_led_trigger);
176 break;
177 case PM_POST_SUSPEND:
178 case PM_POST_HIBERNATION:
179 case PM_POST_RESTORE:
180 rc = led_trigger_register(&heartbeat_led_trigger);
181 if (rc)
182 pr_err("could not re-register heartbeat trigger\n");
183 break;
184 default:
185 break;
186 }
187 return NOTIFY_DONE;
188}
189
190static int heartbeat_reboot_notifier(struct notifier_block *nb, 165static int heartbeat_reboot_notifier(struct notifier_block *nb,
191 unsigned long code, void *unused) 166 unsigned long code, void *unused)
192{ 167{
@@ -201,10 +176,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
201 return NOTIFY_DONE; 176 return NOTIFY_DONE;
202} 177}
203 178
204static struct notifier_block heartbeat_pm_nb = {
205 .notifier_call = heartbeat_pm_notifier,
206};
207
208static struct notifier_block heartbeat_reboot_nb = { 179static struct notifier_block heartbeat_reboot_nb = {
209 .notifier_call = heartbeat_reboot_notifier, 180 .notifier_call = heartbeat_reboot_notifier,
210}; 181};
@@ -221,14 +192,12 @@ static int __init heartbeat_trig_init(void)
221 atomic_notifier_chain_register(&panic_notifier_list, 192 atomic_notifier_chain_register(&panic_notifier_list,
222 &heartbeat_panic_nb); 193 &heartbeat_panic_nb);
223 register_reboot_notifier(&heartbeat_reboot_nb); 194 register_reboot_notifier(&heartbeat_reboot_nb);
224 register_pm_notifier(&heartbeat_pm_nb);
225 } 195 }
226 return rc; 196 return rc;
227} 197}
228 198
229static void __exit heartbeat_trig_exit(void) 199static void __exit heartbeat_trig_exit(void)
230{ 200{
231 unregister_pm_notifier(&heartbeat_pm_nb);
232 unregister_reboot_notifier(&heartbeat_reboot_nb); 201 unregister_reboot_notifier(&heartbeat_reboot_nb);
233 atomic_notifier_chain_unregister(&panic_notifier_list, 202 atomic_notifier_chain_unregister(&panic_notifier_list,
234 &heartbeat_panic_nb); 203 &heartbeat_panic_nb);
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index 4e25a950ae6f..43428cec3a01 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -1,5 +1,6 @@
1config MEDIA_CEC_RC 1config MEDIA_CEC_RC
2 bool "HDMI CEC RC integration" 2 bool "HDMI CEC RC integration"
3 depends on CEC_CORE && RC_CORE 3 depends on CEC_CORE && RC_CORE
4 depends on CEC_CORE=m || RC_CORE=y
4 ---help--- 5 ---help---
5 Pass on CEC remote control messages to the RC framework. 6 Pass on CEC remote control messages to the RC framework.
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 0860fb458757..999926f731c8 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -271,16 +271,10 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
271 bool block, struct cec_msg __user *parg) 271 bool block, struct cec_msg __user *parg)
272{ 272{
273 struct cec_msg msg = {}; 273 struct cec_msg msg = {};
274 long err = 0; 274 long err;
275 275
276 if (copy_from_user(&msg, parg, sizeof(msg))) 276 if (copy_from_user(&msg, parg, sizeof(msg)))
277 return -EFAULT; 277 return -EFAULT;
278 mutex_lock(&adap->lock);
279 if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
280 err = -ENONET;
281 mutex_unlock(&adap->lock);
282 if (err)
283 return err;
284 278
285 err = cec_receive_msg(fh, &msg, block); 279 err = cec_receive_msg(fh, &msg, block);
286 if (err) 280 if (err)
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index acef4eca269f..3251cba89e8f 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -223,7 +223,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
223static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, 223static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
224 u8 mask, u8 val) 224 u8 mask, u8 val)
225{ 225{
226 i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); 226 i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
227} 227}
228 228
229static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) 229static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index e12ec50bf0bf..90a5f8fd5eea 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -183,9 +183,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
183 static unsigned long delt; 183 static unsigned long delt;
184 unsigned long deltintr; 184 unsigned long deltintr;
185 unsigned long flags; 185 unsigned long flags;
186 int counter = 0;
186 int iir, lsr; 187 int iir, lsr;
187 188
188 while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { 189 while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
190 if (++counter > 256) {
191 dev_err(&sir_ir_dev->dev, "Trapped in interrupt");
192 break;
193 }
194
189 switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */ 195 switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
190 case UART_IIR_MSI: 196 case UART_IIR_MSI:
191 (void)inb(io + UART_MSR); 197 (void)inb(io + UART_MSR);
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index 71bd68548c9c..4126552c9055 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -336,6 +336,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv)
336 serio_set_drvdata(serio, rain); 336 serio_set_drvdata(serio, rain);
337 INIT_WORK(&rain->work, rain_irq_work_handler); 337 INIT_WORK(&rain->work, rain_irq_work_handler);
338 mutex_init(&rain->write_lock); 338 mutex_init(&rain->write_lock);
339 spin_lock_init(&rain->buf_lock);
339 340
340 err = serio_open(serio, drv); 341 err = serio_open(serio, drv);
341 if (err) 342 if (err)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 94afbbf92807..c0175ea7e7ad 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
868 868
869void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 869void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
870{ 870{
871 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) 871 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
872 return NULL; 872 return NULL;
873 873
874 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); 874 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 1842ed341af1..de962c2d5e00 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -210,6 +210,15 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
210 int i; 210 int i;
211 bool use_desc_chain_mode = true; 211 bool use_desc_chain_mode = true;
212 212
213 /*
214 * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
215 * reported. For some strange reason this occurs in descriptor
216 * chain mode only. So let's fall back to bounce buffer mode
217 * for command SD_IO_RW_EXTENDED.
218 */
219 if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
220 return;
221
213 for_each_sg(data->sg, sg, data->sg_len, i) 222 for_each_sg(data->sg, sg, data->sg_len, i)
214 /* check for 8 byte alignment */ 223 /* check for 8 byte alignment */
215 if (sg->offset & 7) { 224 if (sg->offset & 7) {
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b44a6aeb346d..e5386ab706ec 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -90,10 +90,13 @@ enum ad_link_speed_type {
90 AD_LINK_SPEED_100MBPS, 90 AD_LINK_SPEED_100MBPS,
91 AD_LINK_SPEED_1000MBPS, 91 AD_LINK_SPEED_1000MBPS,
92 AD_LINK_SPEED_2500MBPS, 92 AD_LINK_SPEED_2500MBPS,
93 AD_LINK_SPEED_5000MBPS,
93 AD_LINK_SPEED_10000MBPS, 94 AD_LINK_SPEED_10000MBPS,
95 AD_LINK_SPEED_14000MBPS,
94 AD_LINK_SPEED_20000MBPS, 96 AD_LINK_SPEED_20000MBPS,
95 AD_LINK_SPEED_25000MBPS, 97 AD_LINK_SPEED_25000MBPS,
96 AD_LINK_SPEED_40000MBPS, 98 AD_LINK_SPEED_40000MBPS,
99 AD_LINK_SPEED_50000MBPS,
97 AD_LINK_SPEED_56000MBPS, 100 AD_LINK_SPEED_56000MBPS,
98 AD_LINK_SPEED_100000MBPS, 101 AD_LINK_SPEED_100000MBPS,
99}; 102};
@@ -259,10 +262,13 @@ static inline int __check_agg_selection_timer(struct port *port)
259 * %AD_LINK_SPEED_100MBPS, 262 * %AD_LINK_SPEED_100MBPS,
260 * %AD_LINK_SPEED_1000MBPS, 263 * %AD_LINK_SPEED_1000MBPS,
261 * %AD_LINK_SPEED_2500MBPS, 264 * %AD_LINK_SPEED_2500MBPS,
265 * %AD_LINK_SPEED_5000MBPS,
262 * %AD_LINK_SPEED_10000MBPS 266 * %AD_LINK_SPEED_10000MBPS
267 * %AD_LINK_SPEED_14000MBPS,
263 * %AD_LINK_SPEED_20000MBPS 268 * %AD_LINK_SPEED_20000MBPS
264 * %AD_LINK_SPEED_25000MBPS 269 * %AD_LINK_SPEED_25000MBPS
265 * %AD_LINK_SPEED_40000MBPS 270 * %AD_LINK_SPEED_40000MBPS
271 * %AD_LINK_SPEED_50000MBPS
266 * %AD_LINK_SPEED_56000MBPS 272 * %AD_LINK_SPEED_56000MBPS
267 * %AD_LINK_SPEED_100000MBPS 273 * %AD_LINK_SPEED_100000MBPS
268 */ 274 */
@@ -296,10 +302,18 @@ static u16 __get_link_speed(struct port *port)
296 speed = AD_LINK_SPEED_2500MBPS; 302 speed = AD_LINK_SPEED_2500MBPS;
297 break; 303 break;
298 304
305 case SPEED_5000:
306 speed = AD_LINK_SPEED_5000MBPS;
307 break;
308
299 case SPEED_10000: 309 case SPEED_10000:
300 speed = AD_LINK_SPEED_10000MBPS; 310 speed = AD_LINK_SPEED_10000MBPS;
301 break; 311 break;
302 312
313 case SPEED_14000:
314 speed = AD_LINK_SPEED_14000MBPS;
315 break;
316
303 case SPEED_20000: 317 case SPEED_20000:
304 speed = AD_LINK_SPEED_20000MBPS; 318 speed = AD_LINK_SPEED_20000MBPS;
305 break; 319 break;
@@ -312,6 +326,10 @@ static u16 __get_link_speed(struct port *port)
312 speed = AD_LINK_SPEED_40000MBPS; 326 speed = AD_LINK_SPEED_40000MBPS;
313 break; 327 break;
314 328
329 case SPEED_50000:
330 speed = AD_LINK_SPEED_50000MBPS;
331 break;
332
315 case SPEED_56000: 333 case SPEED_56000:
316 speed = AD_LINK_SPEED_56000MBPS; 334 speed = AD_LINK_SPEED_56000MBPS;
317 break; 335 break;
@@ -707,9 +725,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
707 case AD_LINK_SPEED_2500MBPS: 725 case AD_LINK_SPEED_2500MBPS:
708 bandwidth = nports * 2500; 726 bandwidth = nports * 2500;
709 break; 727 break;
728 case AD_LINK_SPEED_5000MBPS:
729 bandwidth = nports * 5000;
730 break;
710 case AD_LINK_SPEED_10000MBPS: 731 case AD_LINK_SPEED_10000MBPS:
711 bandwidth = nports * 10000; 732 bandwidth = nports * 10000;
712 break; 733 break;
734 case AD_LINK_SPEED_14000MBPS:
735 bandwidth = nports * 14000;
736 break;
713 case AD_LINK_SPEED_20000MBPS: 737 case AD_LINK_SPEED_20000MBPS:
714 bandwidth = nports * 20000; 738 bandwidth = nports * 20000;
715 break; 739 break;
@@ -719,6 +743,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
719 case AD_LINK_SPEED_40000MBPS: 743 case AD_LINK_SPEED_40000MBPS:
720 bandwidth = nports * 40000; 744 bandwidth = nports * 40000;
721 break; 745 break;
746 case AD_LINK_SPEED_50000MBPS:
747 bandwidth = nports * 50000;
748 break;
722 case AD_LINK_SPEED_56000MBPS: 749 case AD_LINK_SPEED_56000MBPS:
723 bandwidth = nports * 56000; 750 bandwidth = nports * 56000;
724 break; 751 break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2359478b977f..8ab6bdbe1682 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4192,7 +4192,6 @@ static void bond_destructor(struct net_device *bond_dev)
4192 struct bonding *bond = netdev_priv(bond_dev); 4192 struct bonding *bond = netdev_priv(bond_dev);
4193 if (bond->wq) 4193 if (bond->wq)
4194 destroy_workqueue(bond->wq); 4194 destroy_workqueue(bond->wq);
4195 free_netdev(bond_dev);
4196} 4195}
4197 4196
4198void bond_setup(struct net_device *bond_dev) 4197void bond_setup(struct net_device *bond_dev)
@@ -4212,7 +4211,8 @@ void bond_setup(struct net_device *bond_dev)
4212 bond_dev->netdev_ops = &bond_netdev_ops; 4211 bond_dev->netdev_ops = &bond_netdev_ops;
4213 bond_dev->ethtool_ops = &bond_ethtool_ops; 4212 bond_dev->ethtool_ops = &bond_ethtool_ops;
4214 4213
4215 bond_dev->destructor = bond_destructor; 4214 bond_dev->needs_free_netdev = true;
4215 bond_dev->priv_destructor = bond_destructor;
4216 4216
4217 SET_NETDEV_DEVTYPE(bond_dev, &bond_type); 4217 SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
4218 4218
@@ -4736,7 +4736,7 @@ int bond_create(struct net *net, const char *name)
4736 4736
4737 rtnl_unlock(); 4737 rtnl_unlock();
4738 if (res < 0) 4738 if (res < 0)
4739 bond_destructor(bond_dev); 4739 free_netdev(bond_dev);
4740 return res; 4740 return res;
4741} 4741}
4742 4742
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index ddabce759456..71a7c3b44fdd 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev)
1121 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1121 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1122 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1122 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1123 dev->priv_flags |= IFF_NO_QUEUE; 1123 dev->priv_flags |= IFF_NO_QUEUE;
1124 dev->destructor = free_netdev; 1124 dev->needs_free_netdev = true;
1125 dev->netdev_ops = &cfhsi_netdevops; 1125 dev->netdev_ops = &cfhsi_netdevops;
1126 for (i = 0; i < CFHSI_PRIO_LAST; ++i) 1126 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1127 skb_queue_head_init(&cfhsi->qhead[i]); 1127 skb_queue_head_init(&cfhsi->qhead[i]);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index c2dea4916e5d..76e1d3545105 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev)
428 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 428 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
429 dev->mtu = CAIF_MAX_MTU; 429 dev->mtu = CAIF_MAX_MTU;
430 dev->priv_flags |= IFF_NO_QUEUE; 430 dev->priv_flags |= IFF_NO_QUEUE;
431 dev->destructor = free_netdev; 431 dev->needs_free_netdev = true;
432 skb_queue_head_init(&serdev->head); 432 skb_queue_head_init(&serdev->head);
433 serdev->common.link_select = CAIF_LINK_LOW_LATENCY; 433 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
434 serdev->common.use_frag = true; 434 serdev->common.use_frag = true;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 3a529fbe539f..fc21afe852b9 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev)
712 dev->flags = IFF_NOARP | IFF_POINTOPOINT; 712 dev->flags = IFF_NOARP | IFF_POINTOPOINT;
713 dev->priv_flags |= IFF_NO_QUEUE; 713 dev->priv_flags |= IFF_NO_QUEUE;
714 dev->mtu = SPI_MAX_PAYLOAD_SIZE; 714 dev->mtu = SPI_MAX_PAYLOAD_SIZE;
715 dev->destructor = free_netdev; 715 dev->needs_free_netdev = true;
716 skb_queue_head_init(&cfspi->qhead); 716 skb_queue_head_init(&cfspi->qhead);
717 skb_queue_head_init(&cfspi->chead); 717 skb_queue_head_init(&cfspi->chead);
718 cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 718 cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 6122768c8644..1794ea0420b7 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev)
617 netdev->tx_queue_len = 100; 617 netdev->tx_queue_len = 100;
618 netdev->flags = IFF_POINTOPOINT | IFF_NOARP; 618 netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
619 netdev->mtu = CFV_DEF_MTU_SIZE; 619 netdev->mtu = CFV_DEF_MTU_SIZE;
620 netdev->destructor = free_netdev; 620 netdev->needs_free_netdev = true;
621} 621}
622 622
623/* Create debugfs counters for the device */ 623/* Create debugfs counters for the device */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 611d16a7061d..ae4ed03dc642 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -391,6 +391,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
391 can_update_state_error_stats(dev, new_state); 391 can_update_state_error_stats(dev, new_state);
392 priv->state = new_state; 392 priv->state = new_state;
393 393
394 if (!cf)
395 return;
396
394 if (unlikely(new_state == CAN_STATE_BUS_OFF)) { 397 if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
395 cf->can_id |= CAN_ERR_BUSOFF; 398 cf->can_id |= CAN_ERR_BUSOFF;
396 return; 399 return;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 0d57be5ea97b..85268be0c913 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -489,7 +489,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
489 struct pucan_rx_msg *msg_list, int msg_count) 489 struct pucan_rx_msg *msg_list, int msg_count)
490{ 490{
491 void *msg_ptr = msg_list; 491 void *msg_ptr = msg_list;
492 int i, msg_size; 492 int i, msg_size = 0;
493 493
494 for (i = 0; i < msg_count; i++) { 494 for (i = 0; i < msg_count; i++) {
495 msg_size = peak_canfd_handle_msg(priv, msg_ptr); 495 msg_size = peak_canfd_handle_msg(priv, msg_ptr);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index eb7173713bbc..6a6e896e52fa 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev)
417static void slc_free_netdev(struct net_device *dev) 417static void slc_free_netdev(struct net_device *dev)
418{ 418{
419 int i = dev->base_addr; 419 int i = dev->base_addr;
420 free_netdev(dev); 420
421 slcan_devs[i] = NULL; 421 slcan_devs[i] = NULL;
422} 422}
423 423
@@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = {
436static void slc_setup(struct net_device *dev) 436static void slc_setup(struct net_device *dev)
437{ 437{
438 dev->netdev_ops = &slc_netdev_ops; 438 dev->netdev_ops = &slc_netdev_ops;
439 dev->destructor = slc_free_netdev; 439 dev->needs_free_netdev = true;
440 dev->priv_destructor = slc_free_netdev;
440 441
441 dev->hard_header_len = 0; 442 dev->hard_header_len = 0;
442 dev->addr_len = 0; 443 dev->addr_len = 0;
@@ -761,8 +762,6 @@ static void __exit slcan_exit(void)
761 if (sl->tty) { 762 if (sl->tty) {
762 printk(KERN_ERR "%s: tty discipline still running\n", 763 printk(KERN_ERR "%s: tty discipline still running\n",
763 dev->name); 764 dev->name);
764 /* Intentionally leak the control block. */
765 dev->destructor = NULL;
766 } 765 }
767 766
768 unregister_netdev(dev); 767 unregister_netdev(dev);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index eecee7f8dfb7..afcc1312dbaf 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
265 sizeof(*dm), 265 sizeof(*dm),
266 1000); 266 1000);
267 267
268 kfree(dm);
269
268 return rc; 270 return rc;
269} 271}
270 272
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 57913dbbae0a..1ca76e03e965 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -908,8 +908,6 @@ static int peak_usb_probe(struct usb_interface *intf,
908 const struct peak_usb_adapter *peak_usb_adapter = NULL; 908 const struct peak_usb_adapter *peak_usb_adapter = NULL;
909 int i, err = -ENOMEM; 909 int i, err = -ENOMEM;
910 910
911 usb_dev = interface_to_usbdev(intf);
912
913 /* get corresponding PCAN-USB adapter */ 911 /* get corresponding PCAN-USB adapter */
914 for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) 912 for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
915 if (peak_usb_adapters_list[i]->device_id == usb_id_product) { 913 if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
@@ -920,7 +918,7 @@ static int peak_usb_probe(struct usb_interface *intf,
920 if (!peak_usb_adapter) { 918 if (!peak_usb_adapter) {
921 /* should never come except device_id bad usage in this file */ 919 /* should never come except device_id bad usage in this file */
922 pr_err("%s: didn't find device id. 0x%x in devices list\n", 920 pr_err("%s: didn't find device id. 0x%x in devices list\n",
923 PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); 921 PCAN_USB_DRIVER_NAME, usb_id_product);
924 return -ENODEV; 922 return -ENODEV;
925 } 923 }
926 924
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index facca33d53e9..a8cb33264ff1 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -152,7 +152,7 @@ static const struct net_device_ops vcan_netdev_ops = {
152static void vcan_setup(struct net_device *dev) 152static void vcan_setup(struct net_device *dev)
153{ 153{
154 dev->type = ARPHRD_CAN; 154 dev->type = ARPHRD_CAN;
155 dev->mtu = CAN_MTU; 155 dev->mtu = CANFD_MTU;
156 dev->hard_header_len = 0; 156 dev->hard_header_len = 0;
157 dev->addr_len = 0; 157 dev->addr_len = 0;
158 dev->tx_queue_len = 0; 158 dev->tx_queue_len = 0;
@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
163 dev->flags |= IFF_ECHO; 163 dev->flags |= IFF_ECHO;
164 164
165 dev->netdev_ops = &vcan_netdev_ops; 165 dev->netdev_ops = &vcan_netdev_ops;
166 dev->destructor = free_netdev; 166 dev->needs_free_netdev = true;
167} 167}
168 168
169static struct rtnl_link_ops vcan_link_ops __read_mostly = { 169static struct rtnl_link_ops vcan_link_ops __read_mostly = {
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 7fbb24795681..cfe889e8f172 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -150,13 +150,13 @@ static const struct net_device_ops vxcan_netdev_ops = {
150static void vxcan_setup(struct net_device *dev) 150static void vxcan_setup(struct net_device *dev)
151{ 151{
152 dev->type = ARPHRD_CAN; 152 dev->type = ARPHRD_CAN;
153 dev->mtu = CAN_MTU; 153 dev->mtu = CANFD_MTU;
154 dev->hard_header_len = 0; 154 dev->hard_header_len = 0;
155 dev->addr_len = 0; 155 dev->addr_len = 0;
156 dev->tx_queue_len = 0; 156 dev->tx_queue_len = 0;
157 dev->flags = (IFF_NOARP|IFF_ECHO); 157 dev->flags = (IFF_NOARP|IFF_ECHO);
158 dev->netdev_ops = &vxcan_netdev_ops; 158 dev->netdev_ops = &vxcan_netdev_ops;
159 dev->destructor = free_netdev; 159 dev->needs_free_netdev = true;
160} 160}
161 161
162/* forward declaration for rtnl_create_link() */ 162/* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 149244aac20a..9905b52fe293 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -328,7 +328,6 @@ static void dummy_free_netdev(struct net_device *dev)
328 struct dummy_priv *priv = netdev_priv(dev); 328 struct dummy_priv *priv = netdev_priv(dev);
329 329
330 kfree(priv->vfinfo); 330 kfree(priv->vfinfo);
331 free_netdev(dev);
332} 331}
333 332
334static void dummy_setup(struct net_device *dev) 333static void dummy_setup(struct net_device *dev)
@@ -338,7 +337,8 @@ static void dummy_setup(struct net_device *dev)
338 /* Initialize the device structure. */ 337 /* Initialize the device structure. */
339 dev->netdev_ops = &dummy_netdev_ops; 338 dev->netdev_ops = &dummy_netdev_ops;
340 dev->ethtool_ops = &dummy_ethtool_ops; 339 dev->ethtool_ops = &dummy_ethtool_ops;
341 dev->destructor = dummy_free_netdev; 340 dev->needs_free_netdev = true;
341 dev->priv_destructor = dummy_free_netdev;
342 342
343 /* Fill in device structure with ethernet-generic values. */ 343 /* Fill in device structure with ethernet-generic values. */
344 dev->flags |= IFF_NOARP; 344 dev->flags |= IFF_NOARP;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 08d11cede9c9..f5b237e0bd60 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -61,6 +61,8 @@
61 61
62#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 62#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
63 63
64#define ENA_REGS_ADMIN_INTR_MASK 1
65
64/*****************************************************************************/ 66/*****************************************************************************/
65/*****************************************************************************/ 67/*****************************************************************************/
66/*****************************************************************************/ 68/*****************************************************************************/
@@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
232 tail_masked = admin_queue->sq.tail & queue_size_mask; 234 tail_masked = admin_queue->sq.tail & queue_size_mask;
233 235
234 /* In case of queue FULL */ 236 /* In case of queue FULL */
235 cnt = admin_queue->sq.tail - admin_queue->sq.head; 237 cnt = atomic_read(&admin_queue->outstanding_cmds);
236 if (cnt >= admin_queue->q_depth) { 238 if (cnt >= admin_queue->q_depth) {
237 pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", 239 pr_debug("admin queue is full.\n");
238 admin_queue->sq.tail, admin_queue->sq.head,
239 admin_queue->q_depth);
240 admin_queue->stats.out_of_space++; 240 admin_queue->stats.out_of_space++;
241 return ERR_PTR(-ENOSPC); 241 return ERR_PTR(-ENOSPC);
242 } 242 }
@@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
508static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 508static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
509 struct ena_com_admin_queue *admin_queue) 509 struct ena_com_admin_queue *admin_queue)
510{ 510{
511 unsigned long flags; 511 unsigned long flags, timeout;
512 u32 start_time;
513 int ret; 512 int ret;
514 513
515 start_time = ((u32)jiffies_to_usecs(jiffies)); 514 timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
515
516 while (1) {
517 spin_lock_irqsave(&admin_queue->q_lock, flags);
518 ena_com_handle_admin_completion(admin_queue);
519 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
520
521 if (comp_ctx->status != ENA_CMD_SUBMITTED)
522 break;
516 523
517 while (comp_ctx->status == ENA_CMD_SUBMITTED) { 524 if (time_is_before_jiffies(timeout)) {
518 if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
519 ADMIN_CMD_TIMEOUT_US) {
520 pr_err("Wait for completion (polling) timeout\n"); 525 pr_err("Wait for completion (polling) timeout\n");
521 /* ENA didn't have any completion */ 526 /* ENA didn't have any completion */
522 spin_lock_irqsave(&admin_queue->q_lock, flags); 527 spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
528 goto err; 533 goto err;
529 } 534 }
530 535
531 spin_lock_irqsave(&admin_queue->q_lock, flags);
532 ena_com_handle_admin_completion(admin_queue);
533 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
534
535 msleep(100); 536 msleep(100);
536 } 537 }
537 538
@@ -1455,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1455 1456
1456void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1457void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1457{ 1458{
1459 u32 mask_value = 0;
1460
1461 if (polling)
1462 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1463
1464 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1458 ena_dev->admin_queue.polling = polling; 1465 ena_dev->admin_queue.polling = polling;
1459} 1466}
1460 1467
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 67b2338f8fb3..3ee55e2fd694 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = {
80 ENA_STAT_TX_ENTRY(tx_poll), 80 ENA_STAT_TX_ENTRY(tx_poll),
81 ENA_STAT_TX_ENTRY(doorbells), 81 ENA_STAT_TX_ENTRY(doorbells),
82 ENA_STAT_TX_ENTRY(prepare_ctx_err), 82 ENA_STAT_TX_ENTRY(prepare_ctx_err),
83 ENA_STAT_TX_ENTRY(missing_tx_comp),
84 ENA_STAT_TX_ENTRY(bad_req_id), 83 ENA_STAT_TX_ENTRY(bad_req_id),
85}; 84};
86 85
@@ -94,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
94 ENA_STAT_RX_ENTRY(dma_mapping_err), 93 ENA_STAT_RX_ENTRY(dma_mapping_err),
95 ENA_STAT_RX_ENTRY(bad_desc_num), 94 ENA_STAT_RX_ENTRY(bad_desc_num),
96 ENA_STAT_RX_ENTRY(rx_copybreak_pkt), 95 ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
96 ENA_STAT_RX_ENTRY(empty_rx_ring),
97}; 97};
98 98
99static const struct ena_stats ena_stats_ena_com_strings[] = { 99static const struct ena_stats ena_stats_ena_com_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7c1214d78855..4f16ed38bcf3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
190 rxr->sgl_size = adapter->max_rx_sgl_size; 190 rxr->sgl_size = adapter->max_rx_sgl_size;
191 rxr->smoothed_interval = 191 rxr->smoothed_interval =
192 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 192 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
193 rxr->empty_rx_queue = 0;
193 } 194 }
194} 195}
195 196
@@ -1078,6 +1079,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1078 rx_ring->per_napi_bytes = 0; 1079 rx_ring->per_napi_bytes = 0;
1079} 1080}
1080 1081
1082static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1083 struct ena_ring *rx_ring)
1084{
1085 struct ena_eth_io_intr_reg intr_reg;
1086
1087 /* Update intr register: rx intr delay,
1088 * tx intr delay and interrupt unmask
1089 */
1090 ena_com_update_intr_reg(&intr_reg,
1091 rx_ring->smoothed_interval,
1092 tx_ring->smoothed_interval,
1093 true);
1094
1095 /* It is a shared MSI-X.
1096 * Tx and Rx CQ have pointer to it.
1097 * So we use one of them to reach the intr reg
1098 */
1099 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1100}
1101
1081static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, 1102static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1082 struct ena_ring *rx_ring) 1103 struct ena_ring *rx_ring)
1083{ 1104{
@@ -1108,7 +1129,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
1108{ 1129{
1109 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); 1130 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1110 struct ena_ring *tx_ring, *rx_ring; 1131 struct ena_ring *tx_ring, *rx_ring;
1111 struct ena_eth_io_intr_reg intr_reg;
1112 1132
1113 u32 tx_work_done; 1133 u32 tx_work_done;
1114 u32 rx_work_done; 1134 u32 rx_work_done;
@@ -1149,22 +1169,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
1149 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) 1169 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1150 ena_adjust_intr_moderation(rx_ring, tx_ring); 1170 ena_adjust_intr_moderation(rx_ring, tx_ring);
1151 1171
1152 /* Update intr register: rx intr delay, 1172 ena_unmask_interrupt(tx_ring, rx_ring);
1153 * tx intr delay and interrupt unmask
1154 */
1155 ena_com_update_intr_reg(&intr_reg,
1156 rx_ring->smoothed_interval,
1157 tx_ring->smoothed_interval,
1158 true);
1159
1160 /* It is a shared MSI-X.
1161 * Tx and Rx CQ have pointer to it.
1162 * So we use one of them to reach the intr reg
1163 */
1164 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1165 } 1173 }
1166 1174
1167
1168 ena_update_ring_numa_node(tx_ring, rx_ring); 1175 ena_update_ring_numa_node(tx_ring, rx_ring);
1169 1176
1170 ret = rx_work_done; 1177 ret = rx_work_done;
@@ -1485,6 +1492,11 @@ static int ena_up_complete(struct ena_adapter *adapter)
1485 1492
1486 ena_napi_enable_all(adapter); 1493 ena_napi_enable_all(adapter);
1487 1494
1495 /* Enable completion queues interrupt */
1496 for (i = 0; i < adapter->num_queues; i++)
1497 ena_unmask_interrupt(&adapter->tx_ring[i],
1498 &adapter->rx_ring[i]);
1499
1488 /* schedule napi in case we had pending packets 1500 /* schedule napi in case we had pending packets
1489 * from the last time we disable napi 1501 * from the last time we disable napi
1490 */ 1502 */
@@ -1532,6 +1544,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1532 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 1544 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1533 qid, rc); 1545 qid, rc);
1534 ena_com_destroy_io_queue(ena_dev, ena_qid); 1546 ena_com_destroy_io_queue(ena_dev, ena_qid);
1547 return rc;
1535 } 1548 }
1536 1549
1537 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); 1550 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1596,6 +1609,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1596 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 1609 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1597 qid, rc); 1610 qid, rc);
1598 ena_com_destroy_io_queue(ena_dev, ena_qid); 1611 ena_com_destroy_io_queue(ena_dev, ena_qid);
1612 return rc;
1599 } 1613 }
1600 1614
1601 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); 1615 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1981,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
1981 1995
1982 tx_info->tx_descs = nb_hw_desc; 1996 tx_info->tx_descs = nb_hw_desc;
1983 tx_info->last_jiffies = jiffies; 1997 tx_info->last_jiffies = jiffies;
1998 tx_info->print_once = 0;
1984 1999
1985 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2000 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
1986 tx_ring->ring_size); 2001 tx_ring->ring_size);
@@ -2550,13 +2565,44 @@ err:
2550 "Reset attempt failed. Can not reset the device\n"); 2565 "Reset attempt failed. Can not reset the device\n");
2551} 2566}
2552 2567
2553static void check_for_missing_tx_completions(struct ena_adapter *adapter) 2568static int check_missing_comp_in_queue(struct ena_adapter *adapter,
2569 struct ena_ring *tx_ring)
2554{ 2570{
2555 struct ena_tx_buffer *tx_buf; 2571 struct ena_tx_buffer *tx_buf;
2556 unsigned long last_jiffies; 2572 unsigned long last_jiffies;
2573 u32 missed_tx = 0;
2574 int i;
2575
2576 for (i = 0; i < tx_ring->ring_size; i++) {
2577 tx_buf = &tx_ring->tx_buffer_info[i];
2578 last_jiffies = tx_buf->last_jiffies;
2579 if (unlikely(last_jiffies &&
2580 time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
2581 if (!tx_buf->print_once)
2582 netif_notice(adapter, tx_err, adapter->netdev,
2583 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2584 tx_ring->qid, i);
2585
2586 tx_buf->print_once = 1;
2587 missed_tx++;
2588
2589 if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
2590 netif_err(adapter, tx_err, adapter->netdev,
2591 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2592 missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
2593 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2594 return -EIO;
2595 }
2596 }
2597 }
2598
2599 return 0;
2600}
2601
2602static void check_for_missing_tx_completions(struct ena_adapter *adapter)
2603{
2557 struct ena_ring *tx_ring; 2604 struct ena_ring *tx_ring;
2558 int i, j, budget; 2605 int i, budget, rc;
2559 u32 missed_tx;
2560 2606
2561 /* Make sure the driver doesn't turn the device in other process */ 2607 /* Make sure the driver doesn't turn the device in other process */
2562 smp_rmb(); 2608 smp_rmb();
@@ -2572,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
2572 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { 2618 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2573 tx_ring = &adapter->tx_ring[i]; 2619 tx_ring = &adapter->tx_ring[i];
2574 2620
2575 for (j = 0; j < tx_ring->ring_size; j++) { 2621 rc = check_missing_comp_in_queue(adapter, tx_ring);
2576 tx_buf = &tx_ring->tx_buffer_info[j]; 2622 if (unlikely(rc))
2577 last_jiffies = tx_buf->last_jiffies; 2623 return;
2578 if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
2579 netif_notice(adapter, tx_err, adapter->netdev,
2580 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2581 tx_ring->qid, j);
2582
2583 u64_stats_update_begin(&tx_ring->syncp);
2584 missed_tx = tx_ring->tx_stats.missing_tx_comp++;
2585 u64_stats_update_end(&tx_ring->syncp);
2586
2587 /* Clear last jiffies so the lost buffer won't
2588 * be counted twice.
2589 */
2590 tx_buf->last_jiffies = 0;
2591
2592 if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
2593 netif_err(adapter, tx_err, adapter->netdev,
2594 "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
2595 missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
2596 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2597 }
2598 }
2599 }
2600 2624
2601 budget--; 2625 budget--;
2602 if (!budget) 2626 if (!budget)
@@ -2606,6 +2630,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
2606 adapter->last_monitored_tx_qid = i % adapter->num_queues; 2630 adapter->last_monitored_tx_qid = i % adapter->num_queues;
2607} 2631}
2608 2632
2633/* trigger napi schedule after 2 consecutive detections */
2634#define EMPTY_RX_REFILL 2
2635/* For the rare case where the device runs out of Rx descriptors and the
2636 * napi handler failed to refill new Rx descriptors (due to a lack of memory
2637 * for example).
2638 * This case will lead to a deadlock:
2639 * The device won't send interrupts since all the new Rx packets will be dropped
2640 * The napi handler won't allocate new Rx descriptors so the device will be
2641 * able to send new packets.
2642 *
2643 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2644 * It is recommended to have at least 512MB, with a minimum of 128MB for
2645 * constrained environment).
2646 *
2647 * When such a situation is detected - Reschedule napi
2648 */
2649static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2650{
2651 struct ena_ring *rx_ring;
2652 int i, refill_required;
2653
2654 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2655 return;
2656
2657 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2658 return;
2659
2660 for (i = 0; i < adapter->num_queues; i++) {
2661 rx_ring = &adapter->rx_ring[i];
2662
2663 refill_required =
2664 ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
2665 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2666 rx_ring->empty_rx_queue++;
2667
2668 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2669 u64_stats_update_begin(&rx_ring->syncp);
2670 rx_ring->rx_stats.empty_rx_ring++;
2671 u64_stats_update_end(&rx_ring->syncp);
2672
2673 netif_err(adapter, drv, adapter->netdev,
2674 "trigger refill for ring %d\n", i);
2675
2676 napi_schedule(rx_ring->napi);
2677 rx_ring->empty_rx_queue = 0;
2678 }
2679 } else {
2680 rx_ring->empty_rx_queue = 0;
2681 }
2682 }
2683}
2684
2609/* Check for keep alive expiration */ 2685/* Check for keep alive expiration */
2610static void check_for_missing_keep_alive(struct ena_adapter *adapter) 2686static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2611{ 2687{
@@ -2660,6 +2736,8 @@ static void ena_timer_service(unsigned long data)
2660 2736
2661 check_for_missing_tx_completions(adapter); 2737 check_for_missing_tx_completions(adapter);
2662 2738
2739 check_for_empty_rx_ring(adapter);
2740
2663 if (debug_area) 2741 if (debug_area)
2664 ena_dump_stats_to_buf(adapter, debug_area); 2742 ena_dump_stats_to_buf(adapter, debug_area);
2665 2743
@@ -2840,6 +2918,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
2840{ 2918{
2841 int release_bars; 2919 int release_bars;
2842 2920
2921 if (ena_dev->mem_bar)
2922 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
2923
2924 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
2925
2843 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 2926 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
2844 pci_release_selected_regions(pdev, release_bars); 2927 pci_release_selected_regions(pdev, release_bars);
2845} 2928}
@@ -2927,8 +3010,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2927 goto err_free_ena_dev; 3010 goto err_free_ena_dev;
2928 } 3011 }
2929 3012
2930 ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), 3013 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
2931 pci_resource_len(pdev, ENA_REG_BAR)); 3014 pci_resource_start(pdev, ENA_REG_BAR),
3015 pci_resource_len(pdev, ENA_REG_BAR));
2932 if (!ena_dev->reg_bar) { 3016 if (!ena_dev->reg_bar) {
2933 dev_err(&pdev->dev, "failed to remap regs bar\n"); 3017 dev_err(&pdev->dev, "failed to remap regs bar\n");
2934 rc = -EFAULT; 3018 rc = -EFAULT;
@@ -2948,8 +3032,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2948 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); 3032 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
2949 3033
2950 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 3034 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2951 ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), 3035 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
2952 pci_resource_len(pdev, ENA_MEM_BAR)); 3036 pci_resource_start(pdev, ENA_MEM_BAR),
3037 pci_resource_len(pdev, ENA_MEM_BAR));
2953 if (!ena_dev->mem_bar) { 3038 if (!ena_dev->mem_bar) {
2954 rc = -EFAULT; 3039 rc = -EFAULT;
2955 goto err_device_destroy; 3040 goto err_device_destroy;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0e22bce6239d..a4d3d5e21068 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
45 45
46#define DRV_MODULE_VER_MAJOR 1 46#define DRV_MODULE_VER_MAJOR 1
47#define DRV_MODULE_VER_MINOR 1 47#define DRV_MODULE_VER_MINOR 1
48#define DRV_MODULE_VER_SUBMINOR 2 48#define DRV_MODULE_VER_SUBMINOR 7
49 49
50#define DRV_MODULE_NAME "ena" 50#define DRV_MODULE_NAME "ena"
51#ifndef DRV_MODULE_VERSION 51#ifndef DRV_MODULE_VERSION
@@ -146,7 +146,18 @@ struct ena_tx_buffer {
146 u32 tx_descs; 146 u32 tx_descs;
147 /* num of buffers used by this skb */ 147 /* num of buffers used by this skb */
148 u32 num_of_bufs; 148 u32 num_of_bufs;
149 /* Save the last jiffies to detect missing tx packets */ 149
150 /* Used for detect missing tx packets to limit the number of prints */
151 u32 print_once;
152 /* Save the last jiffies to detect missing tx packets
153 *
154 * sets to non zero value on ena_start_xmit and set to zero on
155 * napi and timer_Service_routine.
156 *
157 * while this value is not protected by lock,
158 * a given packet is not expected to be handled by ena_start_xmit
159 * and by napi/timer_service at the same time.
160 */
150 unsigned long last_jiffies; 161 unsigned long last_jiffies;
151 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; 162 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
152} ____cacheline_aligned; 163} ____cacheline_aligned;
@@ -170,7 +181,6 @@ struct ena_stats_tx {
170 u64 napi_comp; 181 u64 napi_comp;
171 u64 tx_poll; 182 u64 tx_poll;
172 u64 doorbells; 183 u64 doorbells;
173 u64 missing_tx_comp;
174 u64 bad_req_id; 184 u64 bad_req_id;
175}; 185};
176 186
@@ -184,6 +194,7 @@ struct ena_stats_rx {
184 u64 dma_mapping_err; 194 u64 dma_mapping_err;
185 u64 bad_desc_num; 195 u64 bad_desc_num;
186 u64 rx_copybreak_pkt; 196 u64 rx_copybreak_pkt;
197 u64 empty_rx_ring;
187}; 198};
188 199
189struct ena_ring { 200struct ena_ring {
@@ -231,6 +242,7 @@ struct ena_ring {
231 struct ena_stats_tx tx_stats; 242 struct ena_stats_tx tx_stats;
232 struct ena_stats_rx rx_stats; 243 struct ena_stats_rx rx_stats;
233 }; 244 };
245 int empty_rx_queue;
234} ____cacheline_aligned; 246} ____cacheline_aligned;
235 247
236struct ena_stats_dev { 248struct ena_stats_dev {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index b8e3d88f0879..a66aee51ab5b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -193,9 +193,6 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
193 struct aq_hw_caps_s *aq_hw_caps, 193 struct aq_hw_caps_s *aq_hw_caps,
194 u32 *regs_buff); 194 u32 *regs_buff);
195 195
196int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
197 struct ethtool_cmd *cmd);
198
199int hw_atl_utils_hw_set_power(struct aq_hw_s *self, 196int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
200 unsigned int power_state); 197 unsigned int power_state);
201 198
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5f49334dcad5..f619c4cac51f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3883,15 +3883,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3883 /* when transmitting in a vf, start bd must hold the ethertype 3883 /* when transmitting in a vf, start bd must hold the ethertype
3884 * for fw to enforce it 3884 * for fw to enforce it
3885 */ 3885 */
3886 u16 vlan_tci = 0;
3886#ifndef BNX2X_STOP_ON_ERROR 3887#ifndef BNX2X_STOP_ON_ERROR
3887 if (IS_VF(bp)) 3888 if (IS_VF(bp)) {
3888#endif 3889#endif
3889 tx_start_bd->vlan_or_ethertype = 3890 /* Still need to consider inband vlan for enforced */
3890 cpu_to_le16(ntohs(eth->h_proto)); 3891 if (__vlan_get_tag(skb, &vlan_tci)) {
3892 tx_start_bd->vlan_or_ethertype =
3893 cpu_to_le16(ntohs(eth->h_proto));
3894 } else {
3895 tx_start_bd->bd_flags.as_bitfield |=
3896 (X_ETH_INBAND_VLAN <<
3897 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3898 tx_start_bd->vlan_or_ethertype =
3899 cpu_to_le16(vlan_tci);
3900 }
3891#ifndef BNX2X_STOP_ON_ERROR 3901#ifndef BNX2X_STOP_ON_ERROR
3892 else 3902 } else {
3893 /* used by FW for packet accounting */ 3903 /* used by FW for packet accounting */
3894 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3904 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3905 }
3895#endif 3906#endif
3896 } 3907 }
3897 3908
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index bdfd53b46bc5..9ca994d0bab6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -901,6 +901,8 @@ static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
901 /* release VF resources */ 901 /* release VF resources */
902 bnx2x_vf_free_resc(bp, vf); 902 bnx2x_vf_free_resc(bp, vf);
903 903
904 vf->malicious = false;
905
904 /* re-open the mailbox */ 906 /* re-open the mailbox */
905 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 907 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
906 return; 908 return;
@@ -1822,9 +1824,11 @@ get_vf:
1822 vf->abs_vfid, qidx); 1824 vf->abs_vfid, qidx);
1823 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1825 bnx2x_vf_handle_rss_update_eqe(bp, vf);
1824 case EVENT_RING_OPCODE_VF_FLR: 1826 case EVENT_RING_OPCODE_VF_FLR:
1825 case EVENT_RING_OPCODE_MALICIOUS_VF:
1826 /* Do nothing for now */ 1827 /* Do nothing for now */
1827 return 0; 1828 return 0;
1829 case EVENT_RING_OPCODE_MALICIOUS_VF:
1830 vf->malicious = true;
1831 return 0;
1828 } 1832 }
1829 1833
1830 return 0; 1834 return 0;
@@ -1905,6 +1909,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1905 continue; 1909 continue;
1906 } 1910 }
1907 1911
1912 if (vf->malicious) {
1913 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1914 "vf %d malicious so no stats for it\n",
1915 vf->abs_vfid);
1916 continue;
1917 }
1918
1908 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1919 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1909 "add addresses for vf %d\n", vf->abs_vfid); 1920 "add addresses for vf %d\n", vf->abs_vfid);
1910 for_each_vfq(vf, j) { 1921 for_each_vfq(vf, j) {
@@ -3042,7 +3053,7 @@ void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3042{ 3053{
3043 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3054 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3044 sizeof(struct bnx2x_vf_mbx_msg)); 3055 sizeof(struct bnx2x_vf_mbx_msg));
3045 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3056 BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
3046 sizeof(union pf_vf_bulletin)); 3057 sizeof(union pf_vf_bulletin));
3047} 3058}
3048 3059
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 888d0b6632e8..53466f6cebab 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -141,6 +141,7 @@ struct bnx2x_virtf {
141#define VF_RESET 3 /* VF FLR'd, pending cleanup */ 141#define VF_RESET 3 /* VF FLR'd, pending cleanup */
142 142
143 bool flr_clnup_stage; /* true during flr cleanup */ 143 bool flr_clnup_stage; /* true during flr cleanup */
144 bool malicious; /* true if FW indicated so, until FLR */
144 145
145 /* dma */ 146 /* dma */
146 dma_addr_t fw_stat_map; 147 dma_addr_t fw_stat_map;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 77ed2f628f9c..ea1bfcf1870a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4525,7 +4525,7 @@ static void dummy_setup(struct net_device *dev)
4525 /* Initialize the device structure. */ 4525 /* Initialize the device structure. */
4526 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; 4526 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
4527 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; 4527 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
4528 dev->destructor = free_netdev; 4528 dev->needs_free_netdev = true;
4529} 4529}
4530 4530
4531static int config_mgmt_dev(struct pci_dev *pdev) 4531static int config_mgmt_dev(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 508923f39ccf..259e69a52ec5 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -343,6 +343,7 @@ static int emac_reset(struct emac_instance *dev)
343{ 343{
344 struct emac_regs __iomem *p = dev->emacp; 344 struct emac_regs __iomem *p = dev->emacp;
345 int n = 20; 345 int n = 20;
346 bool __maybe_unused try_internal_clock = false;
346 347
347 DBG(dev, "reset" NL); 348 DBG(dev, "reset" NL);
348 349
@@ -355,6 +356,7 @@ static int emac_reset(struct emac_instance *dev)
355 } 356 }
356 357
357#ifdef CONFIG_PPC_DCR_NATIVE 358#ifdef CONFIG_PPC_DCR_NATIVE
359do_retry:
358 /* 360 /*
359 * PPC460EX/GT Embedded Processor Advanced User's Manual 361 * PPC460EX/GT Embedded Processor Advanced User's Manual
360 * section 28.10.1 Mode Register 0 (EMACx_MR0) states: 362 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -362,10 +364,19 @@ static int emac_reset(struct emac_instance *dev)
362 * of the EMAC. If none is present, select the internal clock 364 * of the EMAC. If none is present, select the internal clock
363 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). 365 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
364 * After a soft reset, select the external clock. 366 * After a soft reset, select the external clock.
367 *
368 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
369 * ethernet cable is not attached. This causes the reset to timeout
370 * and the PHY detection code in emac_init_phy() is unable to
371 * communicate and detect the AR8035-A PHY. As a result, the emac
372 * driver bails out early and the user has no ethernet.
373 * In order to stay compatible with existing configurations, the
374 * driver will temporarily switch to the internal clock, after
375 * the first reset fails.
365 */ 376 */
366 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { 377 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
367 if (dev->phy_address == 0xffffffff && 378 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
368 dev->phy_map == 0xffffffff) { 379 dev->phy_map == 0xffffffff)) {
369 /* No PHY: select internal loop clock before reset */ 380 /* No PHY: select internal loop clock before reset */
370 dcri_clrset(SDR0, SDR0_ETH_CFG, 381 dcri_clrset(SDR0, SDR0_ETH_CFG,
371 0, SDR0_ETH_CFG_ECS << dev->cell_index); 382 0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -383,8 +394,15 @@ static int emac_reset(struct emac_instance *dev)
383 394
384#ifdef CONFIG_PPC_DCR_NATIVE 395#ifdef CONFIG_PPC_DCR_NATIVE
385 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { 396 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
386 if (dev->phy_address == 0xffffffff && 397 if (!n && !try_internal_clock) {
387 dev->phy_map == 0xffffffff) { 398 /* first attempt has timed out. */
399 n = 20;
400 try_internal_clock = true;
401 goto do_retry;
402 }
403
404 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
405 dev->phy_map == 0xffffffff)) {
388 /* No PHY: restore external clock source after reset */ 406 /* No PHY: restore external clock source after reset */
389 dcri_clrset(SDR0, SDR0_ETH_CFG, 407 dcri_clrset(SDR0, SDR0_ETH_CFG,
390 SDR0_ETH_CFG_ECS << dev->cell_index, 0); 408 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
@@ -2460,20 +2478,24 @@ static int emac_mii_bus_reset(struct mii_bus *bus)
2460 return emac_reset(dev); 2478 return emac_reset(dev);
2461} 2479}
2462 2480
2481static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2482 struct phy_device *phy_dev)
2483{
2484 phy_dev->autoneg = phy->autoneg;
2485 phy_dev->speed = phy->speed;
2486 phy_dev->duplex = phy->duplex;
2487 phy_dev->advertising = phy->advertising;
2488 return phy_start_aneg(phy_dev);
2489}
2490
2463static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) 2491static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2464{ 2492{
2465 struct net_device *ndev = phy->dev; 2493 struct net_device *ndev = phy->dev;
2466 struct emac_instance *dev = netdev_priv(ndev); 2494 struct emac_instance *dev = netdev_priv(ndev);
2467 2495
2468 dev->phy.autoneg = AUTONEG_ENABLE;
2469 dev->phy.speed = SPEED_1000;
2470 dev->phy.duplex = DUPLEX_FULL;
2471 dev->phy.advertising = advertise;
2472 phy->autoneg = AUTONEG_ENABLE; 2496 phy->autoneg = AUTONEG_ENABLE;
2473 phy->speed = dev->phy.speed;
2474 phy->duplex = dev->phy.duplex;
2475 phy->advertising = advertise; 2497 phy->advertising = advertise;
2476 return phy_start_aneg(dev->phy_dev); 2498 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2477} 2499}
2478 2500
2479static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) 2501static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
@@ -2481,13 +2503,10 @@ static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2481 struct net_device *ndev = phy->dev; 2503 struct net_device *ndev = phy->dev;
2482 struct emac_instance *dev = netdev_priv(ndev); 2504 struct emac_instance *dev = netdev_priv(ndev);
2483 2505
2484 dev->phy.autoneg = AUTONEG_DISABLE;
2485 dev->phy.speed = speed;
2486 dev->phy.duplex = fd;
2487 phy->autoneg = AUTONEG_DISABLE; 2506 phy->autoneg = AUTONEG_DISABLE;
2488 phy->speed = speed; 2507 phy->speed = speed;
2489 phy->duplex = fd; 2508 phy->duplex = fd;
2490 return phy_start_aneg(dev->phy_dev); 2509 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2491} 2510}
2492 2511
2493static int emac_mdio_poll_link(struct mii_phy *phy) 2512static int emac_mdio_poll_link(struct mii_phy *phy)
@@ -2509,16 +2528,17 @@ static int emac_mdio_read_link(struct mii_phy *phy)
2509{ 2528{
2510 struct net_device *ndev = phy->dev; 2529 struct net_device *ndev = phy->dev;
2511 struct emac_instance *dev = netdev_priv(ndev); 2530 struct emac_instance *dev = netdev_priv(ndev);
2531 struct phy_device *phy_dev = dev->phy_dev;
2512 int res; 2532 int res;
2513 2533
2514 res = phy_read_status(dev->phy_dev); 2534 res = phy_read_status(phy_dev);
2515 if (res) 2535 if (res)
2516 return res; 2536 return res;
2517 2537
2518 dev->phy.speed = phy->speed; 2538 phy->speed = phy_dev->speed;
2519 dev->phy.duplex = phy->duplex; 2539 phy->duplex = phy_dev->duplex;
2520 dev->phy.pause = phy->pause; 2540 phy->pause = phy_dev->pause;
2521 dev->phy.asym_pause = phy->asym_pause; 2541 phy->asym_pause = phy_dev->asym_pause;
2522 return 0; 2542 return 0;
2523} 2543}
2524 2544
@@ -2528,13 +2548,6 @@ static int emac_mdio_init_phy(struct mii_phy *phy)
2528 struct emac_instance *dev = netdev_priv(ndev); 2548 struct emac_instance *dev = netdev_priv(ndev);
2529 2549
2530 phy_start(dev->phy_dev); 2550 phy_start(dev->phy_dev);
2531 dev->phy.autoneg = phy->autoneg;
2532 dev->phy.speed = phy->speed;
2533 dev->phy.duplex = phy->duplex;
2534 dev->phy.advertising = phy->advertising;
2535 dev->phy.pause = phy->pause;
2536 dev->phy.asym_pause = phy->asym_pause;
2537
2538 return phy_init_hw(dev->phy_dev); 2551 return phy_init_hw(dev->phy_dev);
2539} 2552}
2540 2553
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a93757c255f7..c0fbeb387db4 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1468,6 +1468,11 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
1468} 1468}
1469#endif 1469#endif
1470 1470
1471static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
1472{
1473 return -EOPNOTSUPP;
1474}
1475
1471static const struct net_device_ops ibmvnic_netdev_ops = { 1476static const struct net_device_ops ibmvnic_netdev_ops = {
1472 .ndo_open = ibmvnic_open, 1477 .ndo_open = ibmvnic_open,
1473 .ndo_stop = ibmvnic_close, 1478 .ndo_stop = ibmvnic_close,
@@ -1479,6 +1484,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
1479#ifdef CONFIG_NET_POLL_CONTROLLER 1484#ifdef CONFIG_NET_POLL_CONTROLLER
1480 .ndo_poll_controller = ibmvnic_netpoll_controller, 1485 .ndo_poll_controller = ibmvnic_netpoll_controller,
1481#endif 1486#endif
1487 .ndo_change_mtu = ibmvnic_change_mtu,
1482}; 1488};
1483 1489
1484/* ethtool functions */ 1490/* ethtool functions */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index cdde3cc28fb5..44d9610f7a15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -399,6 +399,7 @@ struct i40e_pf {
399#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) 399#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
400#define I40E_FLAG_MSI_ENABLED BIT_ULL(2) 400#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
401#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) 401#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
402#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4)
402#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) 403#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
403#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) 404#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
404#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) 405#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7a8eb486b9ea..894c8e57ba00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -224,7 +224,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
224 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), 224 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
225 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), 225 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
226 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), 226 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
227 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), 227 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
228 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), 228 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
229}; 229};
230 230
@@ -4092,7 +4092,7 @@ flags_complete:
4092 4092
4093 /* Only allow ATR evict on hardware that is capable of handling it */ 4093 /* Only allow ATR evict on hardware that is capable of handling it */
4094 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) 4094 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
4095 pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; 4095 pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED;
4096 4096
4097 if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { 4097 if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
4098 u16 sw_flags = 0, valid_flags = 0; 4098 u16 sw_flags = 0, valid_flags = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 150caf6ca2b4..a7a4b28b4144 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8821,11 +8821,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
8821 (pf->hw.aq.api_min_ver > 4))) { 8821 (pf->hw.aq.api_min_ver > 4))) {
8822 /* Supported in FW API version higher than 1.4 */ 8822 /* Supported in FW API version higher than 1.4 */
8823 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8823 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8824 pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8825 } else {
8826 pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8827 } 8824 }
8828 8825
8826 /* Enable HW ATR eviction if possible */
8827 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
8828 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
8829
8829 pf->eeprom_version = 0xDEAD; 8830 pf->eeprom_version = 0xDEAD;
8830 pf->lan_veb = I40E_NO_VEB; 8831 pf->lan_veb = I40E_NO_VEB;
8831 pf->lan_vsi = I40E_NO_VSI; 8832 pf->lan_vsi = I40E_NO_VSI;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index cd894f4023b1..77115c25d96f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2341,7 +2341,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2341 /* Due to lack of space, no more new filters can be programmed */ 2341 /* Due to lack of space, no more new filters can be programmed */
2342 if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) 2342 if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
2343 return; 2343 return;
2344 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { 2344 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2345 /* HW ATR eviction will take care of removing filters on FIN 2345 /* HW ATR eviction will take care of removing filters on FIN
2346 * and RST packets. 2346 * and RST packets.
2347 */ 2347 */
@@ -2403,7 +2403,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2403 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2403 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2404 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2404 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2405 2405
2406 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) 2406 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2407 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; 2407 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2408 2408
2409 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 2409 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 95c23fbaa211..0fb38ca78900 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3017,10 +3017,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3017 VLAN_VID_MASK)); 3017 VLAN_VID_MASK));
3018 } 3018 }
3019 3019
3020 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3020 if (vlan_id || qos) 3021 if (vlan_id || qos)
3021 ret = i40e_vsi_add_pvid(vsi, vlanprio); 3022 ret = i40e_vsi_add_pvid(vsi, vlanprio);
3022 else 3023 else
3023 i40e_vsi_remove_pvid(vsi); 3024 i40e_vsi_remove_pvid(vsi);
3025 spin_lock_bh(&vsi->mac_filter_hash_lock);
3024 3026
3025 if (vlan_id) { 3027 if (vlan_id) {
3026 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 3028 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 9b875d776b29..33c901622ed5 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3719,7 +3719,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3719 dma_addr_t *dma_addr, 3719 dma_addr_t *dma_addr,
3720 phys_addr_t *phys_addr) 3720 phys_addr_t *phys_addr)
3721{ 3721{
3722 int cpu = smp_processor_id(); 3722 int cpu = get_cpu();
3723 3723
3724 *dma_addr = mvpp2_percpu_read(priv, cpu, 3724 *dma_addr = mvpp2_percpu_read(priv, cpu,
3725 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 3725 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
@@ -3740,6 +3740,8 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3740 if (sizeof(phys_addr_t) == 8) 3740 if (sizeof(phys_addr_t) == 8)
3741 *phys_addr |= (u64)phys_addr_highbits << 32; 3741 *phys_addr |= (u64)phys_addr_highbits << 32;
3742 } 3742 }
3743
3744 put_cpu();
3743} 3745}
3744 3746
3745/* Free all buffers from the pool */ 3747/* Free all buffers from the pool */
@@ -3920,18 +3922,12 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3920 return bm; 3922 return bm;
3921} 3923}
3922 3924
3923/* Get pool number from a BM cookie */
3924static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
3925{
3926 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3927}
3928
3929/* Release buffer to BM */ 3925/* Release buffer to BM */
3930static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 3926static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3931 dma_addr_t buf_dma_addr, 3927 dma_addr_t buf_dma_addr,
3932 phys_addr_t buf_phys_addr) 3928 phys_addr_t buf_phys_addr)
3933{ 3929{
3934 int cpu = smp_processor_id(); 3930 int cpu = get_cpu();
3935 3931
3936 if (port->priv->hw_version == MVPP22) { 3932 if (port->priv->hw_version == MVPP22) {
3937 u32 val = 0; 3933 u32 val = 0;
@@ -3958,15 +3954,15 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3958 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 3954 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3959 mvpp2_percpu_write(port->priv, cpu, 3955 mvpp2_percpu_write(port->priv, cpu,
3960 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 3956 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3957
3958 put_cpu();
3961} 3959}
3962 3960
3963/* Refill BM pool */ 3961/* Refill BM pool */
3964static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 3962static void mvpp2_pool_refill(struct mvpp2_port *port, int pool,
3965 dma_addr_t dma_addr, 3963 dma_addr_t dma_addr,
3966 phys_addr_t phys_addr) 3964 phys_addr_t phys_addr)
3967{ 3965{
3968 int pool = mvpp2_bm_cookie_pool_get(bm);
3969
3970 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3966 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3971} 3967}
3972 3968
@@ -4186,8 +4182,6 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port)
4186{ 4182{
4187 u32 val; 4183 u32 val;
4188 4184
4189 return;
4190
4191 /* Only GOP port 0 has an XLG MAC */ 4185 /* Only GOP port 0 has an XLG MAC */
4192 if (port->gop_id == 0) { 4186 if (port->gop_id == 0) {
4193 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 4187 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4515,21 +4509,6 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4515 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 4509 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4516} 4510}
4517 4511
4518/* Obtain BM cookie information from descriptor */
4519static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
4520 struct mvpp2_rx_desc *rx_desc)
4521{
4522 int cpu = smp_processor_id();
4523 int pool;
4524
4525 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
4526 MVPP2_RXD_BM_POOL_ID_MASK) >>
4527 MVPP2_RXD_BM_POOL_ID_OFFS;
4528
4529 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4530 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4531}
4532
4533/* Tx descriptors helper methods */ 4512/* Tx descriptors helper methods */
4534 4513
4535/* Get pointer to next Tx descriptor to be processed (send) by HW */ 4514/* Get pointer to next Tx descriptor to be processed (send) by HW */
@@ -4757,7 +4736,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4757static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 4736static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4758 struct mvpp2_rx_queue *rxq) 4737 struct mvpp2_rx_queue *rxq)
4759{ 4738{
4760 int cpu = smp_processor_id(); 4739 int cpu = get_cpu();
4761 4740
4762 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 4741 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4763 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 4742 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
@@ -4765,6 +4744,8 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4765 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 4744 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4766 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, 4745 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4767 rxq->pkts_coal); 4746 rxq->pkts_coal);
4747
4748 put_cpu();
4768} 4749}
4769 4750
4770static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 4751static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4945,7 +4926,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4945 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4926 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4946 4927
4947 /* Set Rx descriptors queue starting address - indirect access */ 4928 /* Set Rx descriptors queue starting address - indirect access */
4948 cpu = smp_processor_id(); 4929 cpu = get_cpu();
4949 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 4930 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4950 if (port->priv->hw_version == MVPP21) 4931 if (port->priv->hw_version == MVPP21)
4951 rxq_dma = rxq->descs_dma; 4932 rxq_dma = rxq->descs_dma;
@@ -4954,6 +4935,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4954 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4935 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4955 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4936 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4956 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); 4937 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
4938 put_cpu();
4957 4939
4958 /* Set Offset */ 4940 /* Set Offset */
4959 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4941 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4980,9 +4962,13 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4980 4962
4981 for (i = 0; i < rx_received; i++) { 4963 for (i = 0; i < rx_received; i++) {
4982 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4964 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4983 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4965 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4966 int pool;
4967
4968 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4969 MVPP2_RXD_BM_POOL_ID_OFFS;
4984 4970
4985 mvpp2_pool_refill(port, bm, 4971 mvpp2_pool_refill(port, pool,
4986 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4972 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4987 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4973 mvpp2_rxdesc_cookie_get(port, rx_desc));
4988 } 4974 }
@@ -5012,10 +4998,11 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5012 * free descriptor number 4998 * free descriptor number
5013 */ 4999 */
5014 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 5000 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5015 cpu = smp_processor_id(); 5001 cpu = get_cpu();
5016 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 5002 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5017 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); 5003 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5018 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); 5004 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
5005 put_cpu();
5019} 5006}
5020 5007
5021/* Create and initialize a Tx queue */ 5008/* Create and initialize a Tx queue */
@@ -5038,7 +5025,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5038 txq->last_desc = txq->size - 1; 5025 txq->last_desc = txq->size - 1;
5039 5026
5040 /* Set Tx descriptors queue starting address - indirect access */ 5027 /* Set Tx descriptors queue starting address - indirect access */
5041 cpu = smp_processor_id(); 5028 cpu = get_cpu();
5042 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5029 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5043 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 5030 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5044 txq->descs_dma); 5031 txq->descs_dma);
@@ -5063,6 +5050,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5063 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, 5050 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5064 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 5051 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5065 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 5052 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
5053 put_cpu();
5066 5054
5067 /* WRR / EJP configuration - indirect access */ 5055 /* WRR / EJP configuration - indirect access */
5068 tx_port_num = mvpp2_egress_port(port); 5056 tx_port_num = mvpp2_egress_port(port);
@@ -5133,10 +5121,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
5133 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 5121 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5134 5122
5135 /* Set Tx descriptors queue starting address and size */ 5123 /* Set Tx descriptors queue starting address and size */
5136 cpu = smp_processor_id(); 5124 cpu = get_cpu();
5137 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5125 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5138 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); 5126 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5139 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); 5127 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
5128 put_cpu();
5140} 5129}
5141 5130
5142/* Cleanup Tx ports */ 5131/* Cleanup Tx ports */
@@ -5146,7 +5135,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5146 int delay, pending, cpu; 5135 int delay, pending, cpu;
5147 u32 val; 5136 u32 val;
5148 5137
5149 cpu = smp_processor_id(); 5138 cpu = get_cpu();
5150 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5139 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5151 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); 5140 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
5152 val |= MVPP2_TXQ_DRAIN_EN_MASK; 5141 val |= MVPP2_TXQ_DRAIN_EN_MASK;
@@ -5173,6 +5162,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5173 5162
5174 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 5163 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
5175 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); 5164 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5165 put_cpu();
5176 5166
5177 for_each_present_cpu(cpu) { 5167 for_each_present_cpu(cpu) {
5178 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 5168 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -5420,7 +5410,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5420 5410
5421/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 5411/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5422static int mvpp2_rx_refill(struct mvpp2_port *port, 5412static int mvpp2_rx_refill(struct mvpp2_port *port,
5423 struct mvpp2_bm_pool *bm_pool, u32 bm) 5413 struct mvpp2_bm_pool *bm_pool, int pool)
5424{ 5414{
5425 dma_addr_t dma_addr; 5415 dma_addr_t dma_addr;
5426 phys_addr_t phys_addr; 5416 phys_addr_t phys_addr;
@@ -5432,7 +5422,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
5432 if (!buf) 5422 if (!buf)
5433 return -ENOMEM; 5423 return -ENOMEM;
5434 5424
5435 mvpp2_pool_refill(port, bm, dma_addr, phys_addr); 5425 mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
5436 5426
5437 return 0; 5427 return 0;
5438} 5428}
@@ -5490,7 +5480,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5490 unsigned int frag_size; 5480 unsigned int frag_size;
5491 dma_addr_t dma_addr; 5481 dma_addr_t dma_addr;
5492 phys_addr_t phys_addr; 5482 phys_addr_t phys_addr;
5493 u32 bm, rx_status; 5483 u32 rx_status;
5494 int pool, rx_bytes, err; 5484 int pool, rx_bytes, err;
5495 void *data; 5485 void *data;
5496 5486
@@ -5502,8 +5492,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5502 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 5492 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5503 data = (void *)phys_to_virt(phys_addr); 5493 data = (void *)phys_to_virt(phys_addr);
5504 5494
5505 bm = mvpp2_bm_cookie_build(port, rx_desc); 5495 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5506 pool = mvpp2_bm_cookie_pool_get(bm); 5496 MVPP2_RXD_BM_POOL_ID_OFFS;
5507 bm_pool = &port->priv->bm_pools[pool]; 5497 bm_pool = &port->priv->bm_pools[pool];
5508 5498
5509 /* In case of an error, release the requested buffer pointer 5499 /* In case of an error, release the requested buffer pointer
@@ -5516,7 +5506,7 @@ err_drop_frame:
5516 dev->stats.rx_errors++; 5506 dev->stats.rx_errors++;
5517 mvpp2_rx_error(port, rx_desc); 5507 mvpp2_rx_error(port, rx_desc);
5518 /* Return the buffer to the pool */ 5508 /* Return the buffer to the pool */
5519 mvpp2_pool_refill(port, bm, dma_addr, phys_addr); 5509 mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
5520 continue; 5510 continue;
5521 } 5511 }
5522 5512
@@ -5531,7 +5521,7 @@ err_drop_frame:
5531 goto err_drop_frame; 5521 goto err_drop_frame;
5532 } 5522 }
5533 5523
5534 err = mvpp2_rx_refill(port, bm_pool, bm); 5524 err = mvpp2_rx_refill(port, bm_pool, pool);
5535 if (err) { 5525 if (err) {
5536 netdev_err(port->dev, "failed to refill BM pools\n"); 5526 netdev_err(port->dev, "failed to refill BM pools\n");
5537 goto err_drop_frame; 5527 goto err_drop_frame;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2fd044b23875..944fc1742464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -458,13 +458,15 @@ struct mlx5e_mpw_info {
458 458
459struct mlx5e_rx_am_stats { 459struct mlx5e_rx_am_stats {
460 int ppms; /* packets per msec */ 460 int ppms; /* packets per msec */
461 int bpms; /* bytes per msec */
461 int epms; /* events per msec */ 462 int epms; /* events per msec */
462}; 463};
463 464
464struct mlx5e_rx_am_sample { 465struct mlx5e_rx_am_sample {
465 ktime_t time; 466 ktime_t time;
466 unsigned int pkt_ctr; 467 u32 pkt_ctr;
467 u16 event_ctr; 468 u32 byte_ctr;
469 u16 event_ctr;
468}; 470};
469 471
470struct mlx5e_rx_am { /* Adaptive Moderation */ 472struct mlx5e_rx_am { /* Adaptive Moderation */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 02dd3a95ed8f..acf32fe952cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
183 mlx5e_am_step(am); 183 mlx5e_am_step(am);
184} 184}
185 185
186#define IS_SIGNIFICANT_DIFF(val, ref) \
187 (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
188
186static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, 189static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
187 struct mlx5e_rx_am_stats *prev) 190 struct mlx5e_rx_am_stats *prev)
188{ 191{
189 int diff; 192 if (!prev->bpms)
190 193 return curr->bpms ? MLX5E_AM_STATS_BETTER :
191 if (!prev->ppms)
192 return curr->ppms ? MLX5E_AM_STATS_BETTER :
193 MLX5E_AM_STATS_SAME; 194 MLX5E_AM_STATS_SAME;
194 195
195 diff = curr->ppms - prev->ppms; 196 if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
196 if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ 197 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
197 return (diff > 0) ? MLX5E_AM_STATS_BETTER : 198 MLX5E_AM_STATS_WORSE;
198 MLX5E_AM_STATS_WORSE;
199 199
200 if (!prev->epms) 200 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
201 return curr->epms ? MLX5E_AM_STATS_WORSE : 201 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
202 MLX5E_AM_STATS_SAME; 202 MLX5E_AM_STATS_WORSE;
203 203
204 diff = curr->epms - prev->epms; 204 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
205 if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ 205 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
206 return (diff < 0) ? MLX5E_AM_STATS_BETTER : 206 MLX5E_AM_STATS_WORSE;
207 MLX5E_AM_STATS_WORSE;
208 207
209 return MLX5E_AM_STATS_SAME; 208 return MLX5E_AM_STATS_SAME;
210} 209}
@@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq,
266{ 265{
267 s->time = ktime_get(); 266 s->time = ktime_get();
268 s->pkt_ctr = rq->stats.packets; 267 s->pkt_ctr = rq->stats.packets;
268 s->byte_ctr = rq->stats.bytes;
269 s->event_ctr = rq->cq.event_ctr; 269 s->event_ctr = rq->cq.event_ctr;
270} 270}
271 271
272#define MLX5E_AM_NEVENTS 64 272#define MLX5E_AM_NEVENTS 64
273#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
274#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
273 275
274static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, 276static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
275 struct mlx5e_rx_am_sample *end, 277 struct mlx5e_rx_am_sample *end,
@@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
277{ 279{
278 /* u32 holds up to 71 minutes, should be enough */ 280 /* u32 holds up to 71 minutes, should be enough */
279 u32 delta_us = ktime_us_delta(end->time, start->time); 281 u32 delta_us = ktime_us_delta(end->time, start->time);
280 unsigned int npkts = end->pkt_ctr - start->pkt_ctr; 282 u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
283 u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
284 start->byte_ctr);
281 285
282 if (!delta_us) 286 if (!delta_us)
283 return; 287 return;
284 288
285 curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; 289 curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
286 curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; 290 curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
291 curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
292 delta_us);
287} 293}
288 294
289void mlx5e_rx_am_work(struct work_struct *work) 295void mlx5e_rx_am_work(struct work_struct *work)
@@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq)
308 314
309 switch (am->state) { 315 switch (am->state) {
310 case MLX5E_AM_MEASURE_IN_PROGRESS: 316 case MLX5E_AM_MEASURE_IN_PROGRESS:
311 nevents = rq->cq.event_ctr - am->start_sample.event_ctr; 317 nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
318 am->start_sample.event_ctr);
312 if (nevents < MLX5E_AM_NEVENTS) 319 if (nevents < MLX5E_AM_NEVENTS)
313 break; 320 break;
314 mlx5e_am_sample(rq, &end_sample); 321 mlx5e_am_sample(rq, &end_sample);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 53e4992d6511..f81c3aa60b46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -417,20 +417,13 @@ struct mlx5e_stats {
417}; 417};
418 418
419static const struct counter_desc mlx5e_pme_status_desc[] = { 419static const struct counter_desc mlx5e_pme_status_desc[] = {
420 { "module_plug", 0 },
421 { "module_unplug", 8 }, 420 { "module_unplug", 8 },
422}; 421};
423 422
424static const struct counter_desc mlx5e_pme_error_desc[] = { 423static const struct counter_desc mlx5e_pme_error_desc[] = {
425 { "module_pwr_budget_exd", 0 }, /* power budget exceed */ 424 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
426 { "module_long_range", 8 }, /* long range for non MLNX cable */ 425 { "module_high_temp", 48 }, /* high temperature */
427 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
428 { "module_no_eeprom", 24 }, /* no eeprom/retry time out */
429 { "module_enforce_part", 32 }, /* enforce part number list */
430 { "module_unknown_id", 40 }, /* unknown identifier */
431 { "module_high_temp", 48 }, /* high temperature */
432 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ 426 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
433 { "module_unknown_status", 64 },
434}; 427};
435 428
436#endif /* __MLX5_EN_STATS_H__ */ 429#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0e487e8ca634..8f5125ccd8d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -862,7 +862,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace
862 ft_attr.level = level; 862 ft_attr.level = level;
863 ft_attr.prio = prio; 863 ft_attr.prio = prio;
864 864
865 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); 865 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
866} 866}
867 867
868struct mlx5_flow_table* 868struct mlx5_flow_table*
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 44f59b1d6f0f..f27f84ffbc85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -275,10 +275,8 @@ static void poll_health(unsigned long data)
275 struct mlx5_core_health *health = &dev->priv.health; 275 struct mlx5_core_health *health = &dev->priv.health;
276 u32 count; 276 u32 count;
277 277
278 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 278 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
279 mod_timer(&health->timer, get_next_poll_jiffies()); 279 goto out;
280 return;
281 }
282 280
283 count = ioread32be(health->health_counter); 281 count = ioread32be(health->health_counter);
284 if (count == health->prev) 282 if (count == health->prev)
@@ -290,8 +288,6 @@ static void poll_health(unsigned long data)
290 if (health->miss_counter == MAX_MISSES) { 288 if (health->miss_counter == MAX_MISSES) {
291 dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); 289 dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
292 print_health_info(dev); 290 print_health_info(dev);
293 } else {
294 mod_timer(&health->timer, get_next_poll_jiffies());
295 } 291 }
296 292
297 if (in_fatal(dev) && !health->sick) { 293 if (in_fatal(dev) && !health->sick) {
@@ -305,6 +301,9 @@ static void poll_health(unsigned long data)
305 "new health works are not permitted at this stage\n"); 301 "new health works are not permitted at this stage\n");
306 spin_unlock(&health->wq_lock); 302 spin_unlock(&health->wq_lock);
307 } 303 }
304
305out:
306 mod_timer(&health->timer, get_next_poll_jiffies());
308} 307}
309 308
310void mlx5_start_health_poll(struct mlx5_core_dev *dev) 309void mlx5_start_health_poll(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index af945edfee19..4f577a5abf88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -537,8 +537,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
537 /* disable cmdif checksum */ 537 /* disable cmdif checksum */
538 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 538 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
539 539
540 /* If the HCA supports 4K UARs use it */ 540 /* Enable 4K UAR only when HCA supports it and page size is bigger
541 if (MLX5_CAP_GEN_MAX(dev, uar_4k)) 541 * than 4K.
542 */
543 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
542 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); 544 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
543 545
544 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 546 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 483241b4b05d..a672f6a860dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -2956,7 +2956,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2956 qed_wr(p_hwfn, 2956 qed_wr(p_hwfn,
2957 p_ptt, 2957 p_ptt,
2958 s_storm_defs[storm_id].cm_ctx_wr_addr, 2958 s_storm_defs[storm_id].cm_ctx_wr_addr,
2959 BIT(9) | lid); 2959 (i << 9) | lid);
2960 *(dump_buf + offset) = qed_rd(p_hwfn, 2960 *(dump_buf + offset) = qed_rd(p_hwfn,
2961 p_ptt, 2961 p_ptt,
2962 rd_reg_addr); 2962 rd_reg_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index aa6476439aee..e0ef02f9503b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
214{ 214{
215 /* Context type from W/B descriptor must be zero */ 215 /* Context type from W/B descriptor must be zero */
216 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) 216 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
217 return -EINVAL; 217 return 0;
218 218
219 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ 219 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
220 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) 220 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
221 return 0; 221 return 1;
222 222
223 return 1; 223 return 0;
224} 224}
225 225
226static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) 226static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
@@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
282 } 282 }
283 } 283 }
284exit: 284exit:
285 return ret; 285 if (likely(ret == 0))
286 return 1;
287
288 return 0;
286} 289}
287 290
288static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 291static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 12236daf7bb6..d16d11bfc046 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -434,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
434 return; 434 return;
435 435
436 /* check tx tstamp status */ 436 /* check tx tstamp status */
437 if (!priv->hw->desc->get_tx_timestamp_status(p)) { 437 if (priv->hw->desc->get_tx_timestamp_status(p)) {
438 /* get the valid tstamp */ 438 /* get the valid tstamp */
439 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 439 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
440 440
441 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 441 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
442 shhwtstamp.hwtstamp = ns_to_ktime(ns); 442 shhwtstamp.hwtstamp = ns_to_ktime(ns);
443 443
444 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); 444 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
445 /* pass tstamp to stack */ 445 /* pass tstamp to stack */
446 skb_tstamp_tx(skb, &shhwtstamp); 446 skb_tstamp_tx(skb, &shhwtstamp);
447 } 447 }
@@ -468,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
468 return; 468 return;
469 469
470 /* Check if timestamp is available */ 470 /* Check if timestamp is available */
471 if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 471 if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
472 /* For GMAC4, the valid timestamp is from CTX next desc. */ 472 /* For GMAC4, the valid timestamp is from CTX next desc. */
473 if (priv->plat->has_gmac4) 473 if (priv->plat->has_gmac4)
474 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); 474 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
475 else 475 else
476 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 476 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
477 477
478 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); 478 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
479 shhwtstamp = skb_hwtstamps(skb); 479 shhwtstamp = skb_hwtstamps(skb);
480 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 480 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
481 shhwtstamp->hwtstamp = ns_to_ktime(ns); 481 shhwtstamp->hwtstamp = ns_to_ktime(ns);
482 } else { 482 } else {
483 netdev_err(priv->dev, "cannot get RX hw timestamp\n"); 483 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
484 } 484 }
485} 485}
486 486
@@ -546,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
546 /* PTP v1, UDP, any kind of event packet */ 546 /* PTP v1, UDP, any kind of event packet */
547 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 547 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
548 /* take time stamp for all event messages */ 548 /* take time stamp for all event messages */
549 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 549 if (priv->plat->has_gmac4)
550 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
551 else
552 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
550 553
551 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 554 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
552 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 555 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -578,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
578 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 581 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
579 ptp_v2 = PTP_TCR_TSVER2ENA; 582 ptp_v2 = PTP_TCR_TSVER2ENA;
580 /* take time stamp for all event messages */ 583 /* take time stamp for all event messages */
581 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 584 if (priv->plat->has_gmac4)
585 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
586 else
587 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582 588
583 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 589 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 590 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -612,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
612 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 618 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
613 ptp_v2 = PTP_TCR_TSVER2ENA; 619 ptp_v2 = PTP_TCR_TSVER2ENA;
614 /* take time stamp for all event messages */ 620 /* take time stamp for all event messages */
615 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 621 if (priv->plat->has_gmac4)
622 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
623 else
624 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
616 625
617 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 626 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
618 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 627 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 48fb72fc423c..f4b31d69f60e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -59,7 +59,8 @@
59/* Enable Snapshot for Messages Relevant to Master */ 59/* Enable Snapshot for Messages Relevant to Master */
60#define PTP_TCR_TSMSTRENA BIT(15) 60#define PTP_TCR_TSMSTRENA BIT(15)
61/* Select PTP packets for Taking Snapshots */ 61/* Select PTP packets for Taking Snapshots */
62#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) 62#define PTP_TCR_SNAPTYPSEL_1 BIT(16)
63#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
63/* Enable MAC address for PTP Frame Filtering */ 64/* Enable MAC address for PTP Frame Filtering */
64#define PTP_TCR_TSENMACADDR BIT(18) 65#define PTP_TCR_TSENMACADDR BIT(18)
65 66
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6ebb0f559a42..199459bd6961 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev)
1007 1007
1008 dev->netdev_ops = &geneve_netdev_ops; 1008 dev->netdev_ops = &geneve_netdev_ops;
1009 dev->ethtool_ops = &geneve_ethtool_ops; 1009 dev->ethtool_ops = &geneve_ethtool_ops;
1010 dev->destructor = free_netdev; 1010 dev->needs_free_netdev = true;
1011 1011
1012 SET_NETDEV_DEVTYPE(dev, &geneve_type); 1012 SET_NETDEV_DEVTYPE(dev, &geneve_type);
1013 1013
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7b652bb7ebe4..ca110cd2a4e4 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -611,7 +611,7 @@ static const struct net_device_ops gtp_netdev_ops = {
611static void gtp_link_setup(struct net_device *dev) 611static void gtp_link_setup(struct net_device *dev)
612{ 612{
613 dev->netdev_ops = &gtp_netdev_ops; 613 dev->netdev_ops = &gtp_netdev_ops;
614 dev->destructor = free_netdev; 614 dev->needs_free_netdev = true;
615 615
616 dev->hard_header_len = 0; 616 dev->hard_header_len = 0;
617 dev->addr_len = 0; 617 dev->addr_len = 0;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 922bf440e9f1..021a8ec411ab 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev)
311{ 311{
312 /* Finish setting up the DEVICE info. */ 312 /* Finish setting up the DEVICE info. */
313 dev->netdev_ops = &sp_netdev_ops; 313 dev->netdev_ops = &sp_netdev_ops;
314 dev->destructor = free_netdev; 314 dev->needs_free_netdev = true;
315 dev->mtu = SIXP_MTU; 315 dev->mtu = SIXP_MTU;
316 dev->hard_header_len = AX25_MAX_HEADER_LEN; 316 dev->hard_header_len = AX25_MAX_HEADER_LEN;
317 dev->header_ops = &ax25_header_ops; 317 dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f62e7f325cf9..78a6414c5fd9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = {
476static void bpq_setup(struct net_device *dev) 476static void bpq_setup(struct net_device *dev)
477{ 477{
478 dev->netdev_ops = &bpq_netdev_ops; 478 dev->netdev_ops = &bpq_netdev_ops;
479 dev->destructor = free_netdev; 479 dev->needs_free_netdev = true;
480 480
481 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 481 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 262b2ea576a3..6066f1bcaf2d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -171,6 +171,8 @@ struct rndis_device {
171 spinlock_t request_lock; 171 spinlock_t request_lock;
172 struct list_head req_list; 172 struct list_head req_list;
173 173
174 struct work_struct mcast_work;
175
174 u8 hw_mac_adr[ETH_ALEN]; 176 u8 hw_mac_adr[ETH_ALEN];
175 u8 rss_key[NETVSC_HASH_KEYLEN]; 177 u8 rss_key[NETVSC_HASH_KEYLEN];
176 u16 ind_table[ITAB_NUM]; 178 u16 ind_table[ITAB_NUM];
@@ -201,6 +203,7 @@ int rndis_filter_open(struct netvsc_device *nvdev);
201int rndis_filter_close(struct netvsc_device *nvdev); 203int rndis_filter_close(struct netvsc_device *nvdev);
202int rndis_filter_device_add(struct hv_device *dev, 204int rndis_filter_device_add(struct hv_device *dev,
203 struct netvsc_device_info *info); 205 struct netvsc_device_info *info);
206void rndis_filter_update(struct netvsc_device *nvdev);
204void rndis_filter_device_remove(struct hv_device *dev, 207void rndis_filter_device_remove(struct hv_device *dev,
205 struct netvsc_device *nvdev); 208 struct netvsc_device *nvdev);
206int rndis_filter_set_rss_param(struct rndis_device *rdev, 209int rndis_filter_set_rss_param(struct rndis_device *rdev,
@@ -211,7 +214,6 @@ int rndis_filter_receive(struct net_device *ndev,
211 struct vmbus_channel *channel, 214 struct vmbus_channel *channel,
212 void *data, u32 buflen); 215 void *data, u32 buflen);
213 216
214int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
215int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); 217int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
216 218
217void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); 219void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
@@ -696,7 +698,6 @@ struct net_device_context {
696 /* list protection */ 698 /* list protection */
697 spinlock_t lock; 699 spinlock_t lock;
698 700
699 struct work_struct work;
700 u32 msg_enable; /* debug level */ 701 u32 msg_enable; /* debug level */
701 702
702 u32 tx_checksum_mask; 703 u32 tx_checksum_mask;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4421a6d00375..82d6c022ca85 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -56,37 +56,12 @@ static int debug = -1;
56module_param(debug, int, S_IRUGO); 56module_param(debug, int, S_IRUGO);
57MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 57MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
58 58
59static void do_set_multicast(struct work_struct *w)
60{
61 struct net_device_context *ndevctx =
62 container_of(w, struct net_device_context, work);
63 struct hv_device *device_obj = ndevctx->device_ctx;
64 struct net_device *ndev = hv_get_drvdata(device_obj);
65 struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
66 struct rndis_device *rdev;
67
68 if (!nvdev)
69 return;
70
71 rdev = nvdev->extension;
72 if (rdev == NULL)
73 return;
74
75 if (ndev->flags & IFF_PROMISC)
76 rndis_filter_set_packet_filter(rdev,
77 NDIS_PACKET_TYPE_PROMISCUOUS);
78 else
79 rndis_filter_set_packet_filter(rdev,
80 NDIS_PACKET_TYPE_BROADCAST |
81 NDIS_PACKET_TYPE_ALL_MULTICAST |
82 NDIS_PACKET_TYPE_DIRECTED);
83}
84
85static void netvsc_set_multicast_list(struct net_device *net) 59static void netvsc_set_multicast_list(struct net_device *net)
86{ 60{
87 struct net_device_context *net_device_ctx = netdev_priv(net); 61 struct net_device_context *net_device_ctx = netdev_priv(net);
62 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
88 63
89 schedule_work(&net_device_ctx->work); 64 rndis_filter_update(nvdev);
90} 65}
91 66
92static int netvsc_open(struct net_device *net) 67static int netvsc_open(struct net_device *net)
@@ -123,8 +98,6 @@ static int netvsc_close(struct net_device *net)
123 98
124 netif_tx_disable(net); 99 netif_tx_disable(net);
125 100
126 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
127 cancel_work_sync(&net_device_ctx->work);
128 ret = rndis_filter_close(nvdev); 101 ret = rndis_filter_close(nvdev);
129 if (ret != 0) { 102 if (ret != 0) {
130 netdev_err(net, "unable to close device (ret %d).\n", ret); 103 netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -1028,7 +1001,7 @@ static const struct {
1028static int netvsc_get_sset_count(struct net_device *dev, int string_set) 1001static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1029{ 1002{
1030 struct net_device_context *ndc = netdev_priv(dev); 1003 struct net_device_context *ndc = netdev_priv(dev);
1031 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); 1004 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1032 1005
1033 if (!nvdev) 1006 if (!nvdev)
1034 return -ENODEV; 1007 return -ENODEV;
@@ -1158,11 +1131,22 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1158} 1131}
1159 1132
1160#ifdef CONFIG_NET_POLL_CONTROLLER 1133#ifdef CONFIG_NET_POLL_CONTROLLER
1161static void netvsc_poll_controller(struct net_device *net) 1134static void netvsc_poll_controller(struct net_device *dev)
1162{ 1135{
1163 /* As netvsc_start_xmit() works synchronous we don't have to 1136 struct net_device_context *ndc = netdev_priv(dev);
1164 * trigger anything here. 1137 struct netvsc_device *ndev;
1165 */ 1138 int i;
1139
1140 rcu_read_lock();
1141 ndev = rcu_dereference(ndc->nvdev);
1142 if (ndev) {
1143 for (i = 0; i < ndev->num_chn; i++) {
1144 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1145
1146 napi_schedule(&nvchan->napi);
1147 }
1148 }
1149 rcu_read_unlock();
1166} 1150}
1167#endif 1151#endif
1168 1152
@@ -1552,7 +1536,6 @@ static int netvsc_probe(struct hv_device *dev,
1552 hv_set_drvdata(dev, net); 1536 hv_set_drvdata(dev, net);
1553 1537
1554 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 1538 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1555 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1556 1539
1557 spin_lock_init(&net_device_ctx->lock); 1540 spin_lock_init(&net_device_ctx->lock);
1558 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 1541 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1622,7 +1605,6 @@ static int netvsc_remove(struct hv_device *dev)
1622 netif_device_detach(net); 1605 netif_device_detach(net);
1623 1606
1624 cancel_delayed_work_sync(&ndev_ctx->dwork); 1607 cancel_delayed_work_sync(&ndev_ctx->dwork);
1625 cancel_work_sync(&ndev_ctx->work);
1626 1608
1627 /* 1609 /*
1628 * Call to the vsc driver to let it know that the device is being 1610 * Call to the vsc driver to let it know that the device is being
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index f9d5b0b8209a..cb79cd081f42 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,6 +31,7 @@
31 31
32#include "hyperv_net.h" 32#include "hyperv_net.h"
33 33
34static void rndis_set_multicast(struct work_struct *w);
34 35
35#define RNDIS_EXT_LEN PAGE_SIZE 36#define RNDIS_EXT_LEN PAGE_SIZE
36struct rndis_request { 37struct rndis_request {
@@ -76,6 +77,7 @@ static struct rndis_device *get_rndis_device(void)
76 spin_lock_init(&device->request_lock); 77 spin_lock_init(&device->request_lock);
77 78
78 INIT_LIST_HEAD(&device->req_list); 79 INIT_LIST_HEAD(&device->req_list);
80 INIT_WORK(&device->mcast_work, rndis_set_multicast);
79 81
80 device->state = RNDIS_DEV_UNINITIALIZED; 82 device->state = RNDIS_DEV_UNINITIALIZED;
81 83
@@ -815,7 +817,8 @@ static int rndis_filter_query_link_speed(struct rndis_device *dev)
815 return ret; 817 return ret;
816} 818}
817 819
818int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) 820static int rndis_filter_set_packet_filter(struct rndis_device *dev,
821 u32 new_filter)
819{ 822{
820 struct rndis_request *request; 823 struct rndis_request *request;
821 struct rndis_set_request *set; 824 struct rndis_set_request *set;
@@ -846,6 +849,28 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
846 return ret; 849 return ret;
847} 850}
848 851
852static void rndis_set_multicast(struct work_struct *w)
853{
854 struct rndis_device *rdev
855 = container_of(w, struct rndis_device, mcast_work);
856
857 if (rdev->ndev->flags & IFF_PROMISC)
858 rndis_filter_set_packet_filter(rdev,
859 NDIS_PACKET_TYPE_PROMISCUOUS);
860 else
861 rndis_filter_set_packet_filter(rdev,
862 NDIS_PACKET_TYPE_BROADCAST |
863 NDIS_PACKET_TYPE_ALL_MULTICAST |
864 NDIS_PACKET_TYPE_DIRECTED);
865}
866
867void rndis_filter_update(struct netvsc_device *nvdev)
868{
869 struct rndis_device *rdev = nvdev->extension;
870
871 schedule_work(&rdev->mcast_work);
872}
873
849static int rndis_filter_init_device(struct rndis_device *dev) 874static int rndis_filter_init_device(struct rndis_device *dev)
850{ 875{
851 struct rndis_request *request; 876 struct rndis_request *request;
@@ -973,6 +998,9 @@ static int rndis_filter_close_device(struct rndis_device *dev)
973 if (dev->state != RNDIS_DEV_DATAINITIALIZED) 998 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
974 return 0; 999 return 0;
975 1000
1001 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1002 cancel_work_sync(&dev->mcast_work);
1003
976 ret = rndis_filter_set_packet_filter(dev, 0); 1004 ret = rndis_filter_set_packet_filter(dev, 0);
977 if (ret == -ENODEV) 1005 if (ret == -ENODEV)
978 ret = 0; 1006 ret = 0;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 312fce7302d3..144ea5ae8ab4 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev)
207 __skb_queue_purge(&txp->tq); 207 __skb_queue_purge(&txp->tq);
208 } 208 }
209 kfree(dp->tx_private); 209 kfree(dp->tx_private);
210 free_netdev(dev);
211} 210}
212 211
213static void ifb_setup(struct net_device *dev) 212static void ifb_setup(struct net_device *dev)
@@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev)
230 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 229 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
231 netif_keep_dst(dev); 230 netif_keep_dst(dev);
232 eth_hw_addr_random(dev); 231 eth_hw_addr_random(dev);
233 dev->destructor = ifb_dev_free; 232 dev->needs_free_netdev = true;
233 dev->priv_destructor = ifb_dev_free;
234} 234}
235 235
236static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) 236static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 618ed88fad0f..7c7680c8f0e3 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -632,7 +632,7 @@ void ipvlan_link_setup(struct net_device *dev)
632 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 632 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
633 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; 633 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
634 dev->netdev_ops = &ipvlan_netdev_ops; 634 dev->netdev_ops = &ipvlan_netdev_ops;
635 dev->destructor = free_netdev; 635 dev->needs_free_netdev = true;
636 dev->header_ops = &ipvlan_header_ops; 636 dev->header_ops = &ipvlan_header_ops;
637 dev->ethtool_ops = &ipvlan_ethtool_ops; 637 dev->ethtool_ops = &ipvlan_ethtool_ops;
638} 638}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 224f65cb576b..30612497643c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -159,7 +159,6 @@ static void loopback_dev_free(struct net_device *dev)
159{ 159{
160 dev_net(dev)->loopback_dev = NULL; 160 dev_net(dev)->loopback_dev = NULL;
161 free_percpu(dev->lstats); 161 free_percpu(dev->lstats);
162 free_netdev(dev);
163} 162}
164 163
165static const struct net_device_ops loopback_ops = { 164static const struct net_device_ops loopback_ops = {
@@ -196,7 +195,8 @@ static void loopback_setup(struct net_device *dev)
196 dev->ethtool_ops = &loopback_ethtool_ops; 195 dev->ethtool_ops = &loopback_ethtool_ops;
197 dev->header_ops = &eth_header_ops; 196 dev->header_ops = &eth_header_ops;
198 dev->netdev_ops = &loopback_ops; 197 dev->netdev_ops = &loopback_ops;
199 dev->destructor = loopback_dev_free; 198 dev->needs_free_netdev = true;
199 dev->priv_destructor = loopback_dev_free;
200} 200}
201 201
202/* Setup and register the loopback device. */ 202/* Setup and register the loopback device. */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index cdc347be68f2..79411675f0e6 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2996,7 +2996,6 @@ static void macsec_free_netdev(struct net_device *dev)
2996 free_percpu(macsec->secy.tx_sc.stats); 2996 free_percpu(macsec->secy.tx_sc.stats);
2997 2997
2998 dev_put(real_dev); 2998 dev_put(real_dev);
2999 free_netdev(dev);
3000} 2999}
3001 3000
3002static void macsec_setup(struct net_device *dev) 3001static void macsec_setup(struct net_device *dev)
@@ -3006,7 +3005,8 @@ static void macsec_setup(struct net_device *dev)
3006 dev->max_mtu = ETH_MAX_MTU; 3005 dev->max_mtu = ETH_MAX_MTU;
3007 dev->priv_flags |= IFF_NO_QUEUE; 3006 dev->priv_flags |= IFF_NO_QUEUE;
3008 dev->netdev_ops = &macsec_netdev_ops; 3007 dev->netdev_ops = &macsec_netdev_ops;
3009 dev->destructor = macsec_free_netdev; 3008 dev->needs_free_netdev = true;
3009 dev->priv_destructor = macsec_free_netdev;
3010 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3010 SET_NETDEV_DEVTYPE(dev, &macsec_type);
3011 3011
3012 eth_zero_addr(dev->broadcast); 3012 eth_zero_addr(dev->broadcast);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 346ad2ff3998..67bf7ebae5c6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1092,7 +1092,7 @@ void macvlan_common_setup(struct net_device *dev)
1092 netif_keep_dst(dev); 1092 netif_keep_dst(dev);
1093 dev->priv_flags |= IFF_UNICAST_FLT; 1093 dev->priv_flags |= IFF_UNICAST_FLT;
1094 dev->netdev_ops = &macvlan_netdev_ops; 1094 dev->netdev_ops = &macvlan_netdev_ops;
1095 dev->destructor = free_netdev; 1095 dev->needs_free_netdev = true;
1096 dev->header_ops = &macvlan_hard_header_ops; 1096 dev->header_ops = &macvlan_hard_header_ops;
1097 dev->ethtool_ops = &macvlan_ethtool_ops; 1097 dev->ethtool_ops = &macvlan_ethtool_ops;
1098} 1098}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 06ee6395117f..0e27920c2b6b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -358,7 +358,7 @@ static ssize_t enabled_store(struct config_item *item,
358 if (err) 358 if (err)
359 goto out_unlock; 359 goto out_unlock;
360 360
361 pr_info("netconsole: network logging started\n"); 361 pr_info("network logging started\n");
362 } else { /* false */ 362 } else { /* false */
363 /* We need to disable the netconsole before cleaning it up 363 /* We need to disable the netconsole before cleaning it up
364 * otherwise we might end up in write_msg() with 364 * otherwise we might end up in write_msg() with
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index b91603835d26..c4b3362da4a2 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev)
113 113
114 dev->netdev_ops = &nlmon_ops; 114 dev->netdev_ops = &nlmon_ops;
115 dev->ethtool_ops = &nlmon_ethtool_ops; 115 dev->ethtool_ops = &nlmon_ethtool_ops;
116 dev->destructor = free_netdev; 116 dev->needs_free_netdev = true;
117 117
118 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | 118 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
119 NETIF_F_HIGHDMA | NETIF_F_LLTX; 119 NETIF_F_HIGHDMA | NETIF_F_LLTX;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c360dd6ead22..3ab6c58d4be6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -127,6 +127,7 @@ config MDIO_THUNDER
127 tristate "ThunderX SOCs MDIO buses" 127 tristate "ThunderX SOCs MDIO buses"
128 depends on 64BIT 128 depends on 64BIT
129 depends on PCI 129 depends on PCI
130 depends on !(MDIO_DEVICE=y && PHYLIB=m)
130 select MDIO_CAVIUM 131 select MDIO_CAVIUM
131 help 132 help
132 This driver supports the MDIO interfaces found on Cavium 133 This driver supports the MDIO interfaces found on Cavium
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7524caa0f29d..eebb0e1c70ff 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -54,6 +54,8 @@ static const char *phy_speed_to_str(int speed)
54 return "5Gbps"; 54 return "5Gbps";
55 case SPEED_10000: 55 case SPEED_10000:
56 return "10Gbps"; 56 return "10Gbps";
57 case SPEED_14000:
58 return "14Gbps";
57 case SPEED_20000: 59 case SPEED_20000:
58 return "20Gbps"; 60 return "20Gbps";
59 case SPEED_25000: 61 case SPEED_25000:
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 1da31dc47f86..74b907206aa7 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev)
629static void sl_free_netdev(struct net_device *dev) 629static void sl_free_netdev(struct net_device *dev)
630{ 630{
631 int i = dev->base_addr; 631 int i = dev->base_addr;
632 free_netdev(dev); 632
633 slip_devs[i] = NULL; 633 slip_devs[i] = NULL;
634} 634}
635 635
@@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = {
651static void sl_setup(struct net_device *dev) 651static void sl_setup(struct net_device *dev)
652{ 652{
653 dev->netdev_ops = &sl_netdev_ops; 653 dev->netdev_ops = &sl_netdev_ops;
654 dev->destructor = sl_free_netdev; 654 dev->needs_free_netdev = true;
655 dev->priv_destructor = sl_free_netdev;
655 656
656 dev->hard_header_len = 0; 657 dev->hard_header_len = 0;
657 dev->addr_len = 0; 658 dev->addr_len = 0;
@@ -1369,8 +1370,6 @@ static void __exit slip_exit(void)
1369 if (sl->tty) { 1370 if (sl->tty) {
1370 printk(KERN_ERR "%s: tty discipline still running\n", 1371 printk(KERN_ERR "%s: tty discipline still running\n",
1371 dev->name); 1372 dev->name);
1372 /* Intentionally leak the control block. */
1373 dev->destructor = NULL;
1374 } 1373 }
1375 1374
1376 unregister_netdev(dev); 1375 unregister_netdev(dev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6c5d5ef46f75..fba8c136aa7c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev)
1643 struct team *team = netdev_priv(dev); 1643 struct team *team = netdev_priv(dev);
1644 1644
1645 free_percpu(team->pcpu_stats); 1645 free_percpu(team->pcpu_stats);
1646 free_netdev(dev);
1647} 1646}
1648 1647
1649static int team_open(struct net_device *dev) 1648static int team_open(struct net_device *dev)
@@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev)
2079 2078
2080 dev->netdev_ops = &team_netdev_ops; 2079 dev->netdev_ops = &team_netdev_ops;
2081 dev->ethtool_ops = &team_ethtool_ops; 2080 dev->ethtool_ops = &team_ethtool_ops;
2082 dev->destructor = team_destructor; 2081 dev->needs_free_netdev = true;
2082 dev->priv_destructor = team_destructor;
2083 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 2083 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2084 dev->priv_flags |= IFF_NO_QUEUE; 2084 dev->priv_flags |= IFF_NO_QUEUE;
2085 dev->priv_flags |= IFF_TEAM; 2085 dev->priv_flags |= IFF_TEAM;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bbd707b9ef7a..9ee7d4275640 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev)
1560 free_percpu(tun->pcpu_stats); 1560 free_percpu(tun->pcpu_stats);
1561 tun_flow_uninit(tun); 1561 tun_flow_uninit(tun);
1562 security_tun_dev_free_security(tun->security); 1562 security_tun_dev_free_security(tun->security);
1563 free_netdev(dev);
1564} 1563}
1565 1564
1566static void tun_setup(struct net_device *dev) 1565static void tun_setup(struct net_device *dev)
@@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev)
1571 tun->group = INVALID_GID; 1570 tun->group = INVALID_GID;
1572 1571
1573 dev->ethtool_ops = &tun_ethtool_ops; 1572 dev->ethtool_ops = &tun_ethtool_ops;
1574 dev->destructor = tun_free_netdev; 1573 dev->needs_free_netdev = true;
1574 dev->priv_destructor = tun_free_netdev;
1575 /* We prefer our own queue length */ 1575 /* We prefer our own queue length */
1576 dev->tx_queue_len = TUN_READQ_SIZE; 1576 dev->tx_queue_len = TUN_READQ_SIZE;
1577} 1577}
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index eb52de8205f0..c7a350bbaaa7 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev)
298 dev->addr_len = 1; 298 dev->addr_len = 1;
299 dev->tx_queue_len = 3; 299 dev->tx_queue_len = 3;
300 300
301 dev->destructor = free_netdev; 301 dev->needs_free_netdev = true;
302} 302}
303 303
304/* 304/*
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8f923a147fa9..32a22f4e8356 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,7 +123,7 @@ static void qmimux_setup(struct net_device *dev)
123 dev->addr_len = 0; 123 dev->addr_len = 0;
124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
125 dev->netdev_ops = &qmimux_netdev_ops; 125 dev->netdev_ops = &qmimux_netdev_ops;
126 dev->destructor = free_netdev; 126 dev->needs_free_netdev = true;
127} 127}
128 128
129static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) 129static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id)
@@ -1192,6 +1192,8 @@ static const struct usb_device_id products[] = {
1192 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 1192 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
1193 {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, 1193 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
1194 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 1194 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1195 {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
1196 {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
1195 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ 1197 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1196 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1198 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
1197 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ 1199 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
@@ -1206,6 +1208,8 @@ static const struct usb_device_id products[] = {
1206 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ 1208 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1207 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1209 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1208 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1210 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1211 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
1212 {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
1209 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 1213 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
1210 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 1214 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
1211 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 1215 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ddc62cb69be8..1a419a45e2a2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4368,6 +4368,8 @@ static u8 rtl_get_version(struct usb_interface *intf)
4368 break; 4368 break;
4369 } 4369 }
4370 4370
4371 dev_dbg(&intf->dev, "Detected version 0x%04x\n", version);
4372
4371 return version; 4373 return version;
4372} 4374}
4373 4375
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 38f0f03a29c8..0156fe8cac17 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -222,7 +222,6 @@ static int veth_dev_init(struct net_device *dev)
222static void veth_dev_free(struct net_device *dev) 222static void veth_dev_free(struct net_device *dev)
223{ 223{
224 free_percpu(dev->vstats); 224 free_percpu(dev->vstats);
225 free_netdev(dev);
226} 225}
227 226
228#ifdef CONFIG_NET_POLL_CONTROLLER 227#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -317,7 +316,8 @@ static void veth_setup(struct net_device *dev)
317 NETIF_F_HW_VLAN_STAG_TX | 316 NETIF_F_HW_VLAN_STAG_TX |
318 NETIF_F_HW_VLAN_CTAG_RX | 317 NETIF_F_HW_VLAN_CTAG_RX |
319 NETIF_F_HW_VLAN_STAG_RX); 318 NETIF_F_HW_VLAN_STAG_RX);
320 dev->destructor = veth_dev_free; 319 dev->needs_free_netdev = true;
320 dev->priv_destructor = veth_dev_free;
321 dev->max_mtu = ETH_MAX_MTU; 321 dev->max_mtu = ETH_MAX_MTU;
322 322
323 dev->hw_features = VETH_FEATURES; 323 dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index db882493875c..022c0b5f9844 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -36,12 +36,14 @@
36#include <net/addrconf.h> 36#include <net/addrconf.h>
37#include <net/l3mdev.h> 37#include <net/l3mdev.h>
38#include <net/fib_rules.h> 38#include <net/fib_rules.h>
39#include <net/netns/generic.h>
39 40
40#define DRV_NAME "vrf" 41#define DRV_NAME "vrf"
41#define DRV_VERSION "1.0" 42#define DRV_VERSION "1.0"
42 43
43#define FIB_RULE_PREF 1000 /* default preference for FIB rules */ 44#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
44static bool add_fib_rules = true; 45
46static unsigned int vrf_net_id;
45 47
46struct net_vrf { 48struct net_vrf {
47 struct rtable __rcu *rth; 49 struct rtable __rcu *rth;
@@ -1348,7 +1350,7 @@ static void vrf_setup(struct net_device *dev)
1348 dev->netdev_ops = &vrf_netdev_ops; 1350 dev->netdev_ops = &vrf_netdev_ops;
1349 dev->l3mdev_ops = &vrf_l3mdev_ops; 1351 dev->l3mdev_ops = &vrf_l3mdev_ops;
1350 dev->ethtool_ops = &vrf_ethtool_ops; 1352 dev->ethtool_ops = &vrf_ethtool_ops;
1351 dev->destructor = free_netdev; 1353 dev->needs_free_netdev = true;
1352 1354
1353 /* Fill in device structure with ethernet-generic values. */ 1355 /* Fill in device structure with ethernet-generic values. */
1354 eth_hw_addr_random(dev); 1356 eth_hw_addr_random(dev);
@@ -1394,6 +1396,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
1394 struct nlattr *tb[], struct nlattr *data[]) 1396 struct nlattr *tb[], struct nlattr *data[])
1395{ 1397{
1396 struct net_vrf *vrf = netdev_priv(dev); 1398 struct net_vrf *vrf = netdev_priv(dev);
1399 bool *add_fib_rules;
1400 struct net *net;
1397 int err; 1401 int err;
1398 1402
1399 if (!data || !data[IFLA_VRF_TABLE]) 1403 if (!data || !data[IFLA_VRF_TABLE])
@@ -1409,13 +1413,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
1409 if (err) 1413 if (err)
1410 goto out; 1414 goto out;
1411 1415
1412 if (add_fib_rules) { 1416 net = dev_net(dev);
1417 add_fib_rules = net_generic(net, vrf_net_id);
1418 if (*add_fib_rules) {
1413 err = vrf_add_fib_rules(dev); 1419 err = vrf_add_fib_rules(dev);
1414 if (err) { 1420 if (err) {
1415 unregister_netdevice(dev); 1421 unregister_netdevice(dev);
1416 goto out; 1422 goto out;
1417 } 1423 }
1418 add_fib_rules = false; 1424 *add_fib_rules = false;
1419 } 1425 }
1420 1426
1421out: 1427out:
@@ -1498,16 +1504,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = {
1498 .notifier_call = vrf_device_event, 1504 .notifier_call = vrf_device_event,
1499}; 1505};
1500 1506
1507/* Initialize per network namespace state */
1508static int __net_init vrf_netns_init(struct net *net)
1509{
1510 bool *add_fib_rules = net_generic(net, vrf_net_id);
1511
1512 *add_fib_rules = true;
1513
1514 return 0;
1515}
1516
1517static struct pernet_operations vrf_net_ops __net_initdata = {
1518 .init = vrf_netns_init,
1519 .id = &vrf_net_id,
1520 .size = sizeof(bool),
1521};
1522
1501static int __init vrf_init_module(void) 1523static int __init vrf_init_module(void)
1502{ 1524{
1503 int rc; 1525 int rc;
1504 1526
1505 register_netdevice_notifier(&vrf_notifier_block); 1527 register_netdevice_notifier(&vrf_notifier_block);
1506 1528
1507 rc = rtnl_link_register(&vrf_link_ops); 1529 rc = register_pernet_subsys(&vrf_net_ops);
1508 if (rc < 0) 1530 if (rc < 0)
1509 goto error; 1531 goto error;
1510 1532
1533 rc = rtnl_link_register(&vrf_link_ops);
1534 if (rc < 0) {
1535 unregister_pernet_subsys(&vrf_net_ops);
1536 goto error;
1537 }
1538
1511 return 0; 1539 return 0;
1512 1540
1513error: 1541error:
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 7f0136f2dd9d..c28bdce14fd5 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -135,7 +135,7 @@ static void vsockmon_setup(struct net_device *dev)
135 135
136 dev->netdev_ops = &vsockmon_ops; 136 dev->netdev_ops = &vsockmon_ops;
137 dev->ethtool_ops = &vsockmon_ethtool_ops; 137 dev->ethtool_ops = &vsockmon_ethtool_ops;
138 dev->destructor = free_netdev; 138 dev->needs_free_netdev = true;
139 139
140 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | 140 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
141 NETIF_F_HIGHDMA | NETIF_F_LLTX; 141 NETIF_F_HIGHDMA | NETIF_F_LLTX;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a6b5052c1d36..5fa798a5c9a6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2611,7 +2611,7 @@ static void vxlan_setup(struct net_device *dev)
2611 eth_hw_addr_random(dev); 2611 eth_hw_addr_random(dev);
2612 ether_setup(dev); 2612 ether_setup(dev);
2613 2613
2614 dev->destructor = free_netdev; 2614 dev->needs_free_netdev = true;
2615 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2615 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2616 2616
2617 dev->features |= NETIF_F_LLTX; 2617 dev->features |= NETIF_F_LLTX;
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 65ee2a6f248c..a0d76f70c428 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev)
475 dev->flags = 0; 475 dev->flags = 0;
476 dev->header_ops = &dlci_header_ops; 476 dev->header_ops = &dlci_header_ops;
477 dev->netdev_ops = &dlci_netdev_ops; 477 dev->netdev_ops = &dlci_netdev_ops;
478 dev->destructor = free_netdev; 478 dev->needs_free_netdev = true;
479 479
480 dlp->receive = dlci_receive; 480 dlp->receive = dlci_receive;
481 481
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index eb915281197e..78596e42a3f3 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1106 return -EIO; 1106 return -EIO;
1107 } 1107 }
1108 1108
1109 dev->destructor = free_netdev; 1109 dev->needs_free_netdev = true;
1110 *get_dev_p(pvc, type) = dev; 1110 *get_dev_p(pvc, type) = dev;
1111 if (!used) { 1111 if (!used) {
1112 state(hdlc)->dce_changed = 1; 1112 state(hdlc)->dce_changed = 1;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 9df9ed62beff..63f749078a1f 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = {
306static void lapbeth_setup(struct net_device *dev) 306static void lapbeth_setup(struct net_device *dev)
307{ 307{
308 dev->netdev_ops = &lapbeth_netdev_ops; 308 dev->netdev_ops = &lapbeth_netdev_ops;
309 dev->destructor = free_netdev; 309 dev->needs_free_netdev = true;
310 dev->type = ARPHRD_X25; 310 dev->type = ARPHRD_X25;
311 dev->hard_header_len = 3; 311 dev->hard_header_len = 3;
312 dev->mtu = 1000; 312 dev->mtu = 1000;
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 91ee542de3d7..b90c77ef792e 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev)
1287 struct ath6kl *ar = ath6kl_priv(dev); 1287 struct ath6kl *ar = ath6kl_priv(dev);
1288 1288
1289 dev->netdev_ops = &ath6kl_netdev_ops; 1289 dev->netdev_ops = &ath6kl_netdev_ops;
1290 dev->destructor = free_netdev; 1290 dev->needs_free_netdev = true;
1291 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; 1291 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1292 1292
1293 dev->needed_headroom = ETH_HLEN; 1293 dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index cd1d6730eab7..617199c0e5a0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5225,7 +5225,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev)
5225 5225
5226 if (vif) 5226 if (vif)
5227 brcmf_free_vif(vif); 5227 brcmf_free_vif(vif);
5228 free_netdev(ndev);
5229} 5228}
5230 5229
5231static bool brcmf_is_linkup(const struct brcmf_event_msg *e) 5230static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index a3d82368f1a9..511d190c6cca 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -624,7 +624,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
624 if (!ndev) 624 if (!ndev)
625 return ERR_PTR(-ENOMEM); 625 return ERR_PTR(-ENOMEM);
626 626
627 ndev->destructor = brcmf_cfg80211_free_netdev; 627 ndev->needs_free_netdev = true;
628 ndev->priv_destructor = brcmf_cfg80211_free_netdev;
628 ifp = netdev_priv(ndev); 629 ifp = netdev_priv(ndev);
629 ifp->ndev = ndev; 630 ifp->ndev = ndev;
630 /* store mapping ifidx to bsscfgidx */ 631 /* store mapping ifidx to bsscfgidx */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 544fc09dcb62..1372b20f931e 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
73 dev->mem_end = mdev->mem_end; 73 dev->mem_end = mdev->mem_end;
74 74
75 hostap_setup_dev(dev, local, type); 75 hostap_setup_dev(dev, local, type);
76 dev->destructor = free_netdev; 76 dev->needs_free_netdev = true;
77 77
78 sprintf(dev->name, "%s%s", prefix, name); 78 sprintf(dev->name, "%s%s", prefix, name);
79 if (!rtnl_locked) 79 if (!rtnl_locked)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 002b25cff5b6..c854a557998b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2861,7 +2861,7 @@ static const struct net_device_ops hwsim_netdev_ops = {
2861static void hwsim_mon_setup(struct net_device *dev) 2861static void hwsim_mon_setup(struct net_device *dev)
2862{ 2862{
2863 dev->netdev_ops = &hwsim_netdev_ops; 2863 dev->netdev_ops = &hwsim_netdev_ops;
2864 dev->destructor = free_netdev; 2864 dev->needs_free_netdev = true;
2865 ether_setup(dev); 2865 ether_setup(dev);
2866 dev->priv_flags |= IFF_NO_QUEUE; 2866 dev->priv_flags |= IFF_NO_QUEUE;
2867 dev->type = ARPHRD_IEEE80211_RADIOTAP; 2867 dev->type = ARPHRD_IEEE80211_RADIOTAP;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index dd87b9ff64c3..39b6b5e3f6e0 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1280,7 +1280,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
1280 struct net_device *dev) 1280 struct net_device *dev)
1281{ 1281{
1282 dev->netdev_ops = &mwifiex_netdev_ops; 1282 dev->netdev_ops = &mwifiex_netdev_ops;
1283 dev->destructor = free_netdev; 1283 dev->needs_free_netdev = true;
1284 /* Initialize private structure */ 1284 /* Initialize private structure */
1285 priv->current_key_index = 0; 1285 priv->current_key_index = 0;
1286 priv->media_connected = false; 1286 priv->media_connected = false;
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 74cf5fffb1e1..c80e37a69305 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -896,7 +896,7 @@ int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
896{ 896{
897 if (pci_dev_is_disconnected(dev)) { 897 if (pci_dev_is_disconnected(dev)) {
898 *val = ~0; 898 *val = ~0;
899 return -ENODEV; 899 return PCIBIOS_DEVICE_NOT_FOUND;
900 } 900 }
901 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); 901 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
902} 902}
@@ -906,7 +906,7 @@ int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
906{ 906{
907 if (pci_dev_is_disconnected(dev)) { 907 if (pci_dev_is_disconnected(dev)) {
908 *val = ~0; 908 *val = ~0;
909 return -ENODEV; 909 return PCIBIOS_DEVICE_NOT_FOUND;
910 } 910 }
911 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); 911 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
912} 912}
@@ -917,7 +917,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where,
917{ 917{
918 if (pci_dev_is_disconnected(dev)) { 918 if (pci_dev_is_disconnected(dev)) {
919 *val = ~0; 919 *val = ~0;
920 return -ENODEV; 920 return PCIBIOS_DEVICE_NOT_FOUND;
921 } 921 }
922 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); 922 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
923} 923}
@@ -926,7 +926,7 @@ EXPORT_SYMBOL(pci_read_config_dword);
926int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) 926int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
927{ 927{
928 if (pci_dev_is_disconnected(dev)) 928 if (pci_dev_is_disconnected(dev))
929 return -ENODEV; 929 return PCIBIOS_DEVICE_NOT_FOUND;
930 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); 930 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
931} 931}
932EXPORT_SYMBOL(pci_write_config_byte); 932EXPORT_SYMBOL(pci_write_config_byte);
@@ -934,7 +934,7 @@ EXPORT_SYMBOL(pci_write_config_byte);
934int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) 934int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
935{ 935{
936 if (pci_dev_is_disconnected(dev)) 936 if (pci_dev_is_disconnected(dev))
937 return -ENODEV; 937 return PCIBIOS_DEVICE_NOT_FOUND;
938 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); 938 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
939} 939}
940EXPORT_SYMBOL(pci_write_config_word); 940EXPORT_SYMBOL(pci_write_config_word);
@@ -943,7 +943,7 @@ int pci_write_config_dword(const struct pci_dev *dev, int where,
943 u32 val) 943 u32 val)
944{ 944{
945 if (pci_dev_is_disconnected(dev)) 945 if (pci_dev_is_disconnected(dev))
946 return -ENODEV; 946 return PCIBIOS_DEVICE_NOT_FOUND;
947 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 947 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
948} 948}
949EXPORT_SYMBOL(pci_write_config_dword); 949EXPORT_SYMBOL(pci_write_config_dword);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 175edad42d2f..2942066607e0 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -5,6 +5,7 @@
5config PCI_EPF_TEST 5config PCI_EPF_TEST
6 tristate "PCI Endpoint Test driver" 6 tristate "PCI Endpoint Test driver"
7 depends on PCI_ENDPOINT 7 depends on PCI_ENDPOINT
8 select CRC32
8 help 9 help
9 Enable this configuration option to enable the test driver 10 Enable this configuration option to enable the test driver
10 for PCI Endpoint. 11 for PCI Endpoint.
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index ef29f18b1951..4cc2f4ea0a25 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -97,11 +97,9 @@
97 } \ 97 } \
98} 98}
99 99
100#ifdef CONFIG_PM_SLEEP
101static u8 suspend_prep_ok; 100static u8 suspend_prep_ok;
102static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; 101static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
103static u64 suspend_shlw_res_temp, suspend_deep_res_temp; 102static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
104#endif
105 103
106struct telemetry_susp_stats { 104struct telemetry_susp_stats {
107 u32 shlw_swake_ctr; 105 u32 shlw_swake_ctr;
@@ -807,7 +805,6 @@ static const struct file_operations telem_ioss_trc_verb_ops = {
807 .release = single_release, 805 .release = single_release,
808}; 806};
809 807
810#ifdef CONFIG_PM_SLEEP
811static int pm_suspend_prep_cb(void) 808static int pm_suspend_prep_cb(void)
812{ 809{
813 struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; 810 struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
@@ -937,7 +934,6 @@ static int pm_notification(struct notifier_block *this,
937static struct notifier_block pm_notifier = { 934static struct notifier_block pm_notifier = {
938 .notifier_call = pm_notification, 935 .notifier_call = pm_notification,
939}; 936};
940#endif /* CONFIG_PM_SLEEP */
941 937
942static int __init telemetry_debugfs_init(void) 938static int __init telemetry_debugfs_init(void)
943{ 939{
@@ -960,14 +956,13 @@ static int __init telemetry_debugfs_init(void)
960 if (err < 0) 956 if (err < 0)
961 return -EINVAL; 957 return -EINVAL;
962 958
963
964#ifdef CONFIG_PM_SLEEP
965 register_pm_notifier(&pm_notifier); 959 register_pm_notifier(&pm_notifier);
966#endif /* CONFIG_PM_SLEEP */
967 960
968 debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); 961 debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL);
969 if (!debugfs_conf->telemetry_dbg_dir) 962 if (!debugfs_conf->telemetry_dbg_dir) {
970 return -ENOMEM; 963 err = -ENOMEM;
964 goto out_pm;
965 }
971 966
972 f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, 967 f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO,
973 debugfs_conf->telemetry_dbg_dir, NULL, 968 debugfs_conf->telemetry_dbg_dir, NULL,
@@ -1014,6 +1009,8 @@ static int __init telemetry_debugfs_init(void)
1014out: 1009out:
1015 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); 1010 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
1016 debugfs_conf->telemetry_dbg_dir = NULL; 1011 debugfs_conf->telemetry_dbg_dir = NULL;
1012out_pm:
1013 unregister_pm_notifier(&pm_notifier);
1017 1014
1018 return err; 1015 return err;
1019} 1016}
@@ -1022,6 +1019,7 @@ static void __exit telemetry_debugfs_exit(void)
1022{ 1019{
1023 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); 1020 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
1024 debugfs_conf->telemetry_dbg_dir = NULL; 1021 debugfs_conf->telemetry_dbg_dir = NULL;
1022 unregister_pm_notifier(&pm_notifier);
1025} 1023}
1026 1024
1027late_initcall(telemetry_debugfs_init); 1025late_initcall(telemetry_debugfs_init);
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index e72abbc18ee3..a66a317f3e4f 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
70{ 70{
71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); 71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
72} 72}
73MDEV_TYPE_ATTR_RO(name); 73static MDEV_TYPE_ATTR_RO(name);
74 74
75static ssize_t device_api_show(struct kobject *kobj, struct device *dev, 75static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
76 char *buf) 76 char *buf)
77{ 77{
78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); 78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
79} 79}
80MDEV_TYPE_ATTR_RO(device_api); 80static MDEV_TYPE_ATTR_RO(device_api);
81 81
82static ssize_t available_instances_show(struct kobject *kobj, 82static ssize_t available_instances_show(struct kobject *kobj,
83 struct device *dev, char *buf) 83 struct device *dev, char *buf)
@@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj,
86 86
87 return sprintf(buf, "%d\n", atomic_read(&private->avail)); 87 return sprintf(buf, "%d\n", atomic_read(&private->avail));
88} 88}
89MDEV_TYPE_ATTR_RO(available_instances); 89static MDEV_TYPE_ATTR_RO(available_instances);
90 90
91static struct attribute *mdev_types_attrs[] = { 91static struct attribute *mdev_types_attrs[] = {
92 &mdev_type_attr_name.attr, 92 &mdev_type_attr_name.attr,
@@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = {
100 .attrs = mdev_types_attrs, 100 .attrs = mdev_types_attrs,
101}; 101};
102 102
103struct attribute_group *mdev_type_groups[] = { 103static struct attribute_group *mdev_type_groups[] = {
104 &mdev_type_group, 104 &mdev_type_group,
105 NULL, 105 NULL,
106}; 106};
@@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
152 &events, &private->nb); 152 &events, &private->nb);
153} 153}
154 154
155void vfio_ccw_mdev_release(struct mdev_device *mdev) 155static void vfio_ccw_mdev_release(struct mdev_device *mdev)
156{ 156{
157 struct vfio_ccw_private *private = 157 struct vfio_ccw_private *private =
158 dev_get_drvdata(mdev_parent_dev(mdev)); 158 dev_get_drvdata(mdev_parent_dev(mdev));
@@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
233 } 233 }
234} 234}
235 235
236int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) 236static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
237{ 237{
238 if (info->index != VFIO_CCW_IO_IRQ_INDEX) 238 if (info->index != VFIO_CCW_IO_IRQ_INDEX)
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9be4596d8a08..ea099910b4e9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev)
668 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 668 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
669 int rc; 669 int rc;
670 670
671 /* Add queue/card to list of active queues/cards */
672 spin_lock_bh(&ap_list_lock);
673 if (is_card_dev(dev))
674 list_add(&to_ap_card(dev)->list, &ap_card_list);
675 else
676 list_add(&to_ap_queue(dev)->list,
677 &to_ap_queue(dev)->card->queues);
678 spin_unlock_bh(&ap_list_lock);
679
671 ap_dev->drv = ap_drv; 680 ap_dev->drv = ap_drv;
672 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 681 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
673 if (rc) 682
683 if (rc) {
684 spin_lock_bh(&ap_list_lock);
685 if (is_card_dev(dev))
686 list_del_init(&to_ap_card(dev)->list);
687 else
688 list_del_init(&to_ap_queue(dev)->list);
689 spin_unlock_bh(&ap_list_lock);
674 ap_dev->drv = NULL; 690 ap_dev->drv = NULL;
691 }
692
675 return rc; 693 return rc;
676} 694}
677 695
@@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev)
680 struct ap_device *ap_dev = to_ap_dev(dev); 698 struct ap_device *ap_dev = to_ap_dev(dev);
681 struct ap_driver *ap_drv = ap_dev->drv; 699 struct ap_driver *ap_drv = ap_dev->drv;
682 700
701 if (ap_drv->remove)
702 ap_drv->remove(ap_dev);
703
704 /* Remove queue/card from list of active queues/cards */
683 spin_lock_bh(&ap_list_lock); 705 spin_lock_bh(&ap_list_lock);
684 if (is_card_dev(dev)) 706 if (is_card_dev(dev))
685 list_del_init(&to_ap_card(dev)->list); 707 list_del_init(&to_ap_card(dev)->list);
686 else 708 else
687 list_del_init(&to_ap_queue(dev)->list); 709 list_del_init(&to_ap_queue(dev)->list);
688 spin_unlock_bh(&ap_list_lock); 710 spin_unlock_bh(&ap_list_lock);
689 if (ap_drv->remove) 711
690 ap_drv->remove(ap_dev);
691 return 0; 712 return 0;
692} 713}
693 714
@@ -1056,10 +1077,6 @@ static void ap_scan_bus(struct work_struct *unused)
1056 } 1077 }
1057 /* get it and thus adjust reference counter */ 1078 /* get it and thus adjust reference counter */
1058 get_device(&ac->ap_dev.device); 1079 get_device(&ac->ap_dev.device);
1059 /* Add card device to card list */
1060 spin_lock_bh(&ap_list_lock);
1061 list_add(&ac->list, &ap_card_list);
1062 spin_unlock_bh(&ap_list_lock);
1063 } 1080 }
1064 /* now create the new queue device */ 1081 /* now create the new queue device */
1065 aq = ap_queue_create(qid, type); 1082 aq = ap_queue_create(qid, type);
@@ -1070,10 +1087,6 @@ static void ap_scan_bus(struct work_struct *unused)
1070 aq->ap_dev.device.parent = &ac->ap_dev.device; 1087 aq->ap_dev.device.parent = &ac->ap_dev.device;
1071 dev_set_name(&aq->ap_dev.device, 1088 dev_set_name(&aq->ap_dev.device,
1072 "%02x.%04x", id, dom); 1089 "%02x.%04x", id, dom);
1073 /* Add queue device to card queue list */
1074 spin_lock_bh(&ap_list_lock);
1075 list_add(&aq->list, &ac->queues);
1076 spin_unlock_bh(&ap_list_lock);
1077 /* Start with a device reset */ 1090 /* Start with a device reset */
1078 spin_lock_bh(&aq->lock); 1091 spin_lock_bh(&aq->lock);
1079 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 1092 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
@@ -1081,9 +1094,6 @@ static void ap_scan_bus(struct work_struct *unused)
1081 /* Register device */ 1094 /* Register device */
1082 rc = device_register(&aq->ap_dev.device); 1095 rc = device_register(&aq->ap_dev.device);
1083 if (rc) { 1096 if (rc) {
1084 spin_lock_bh(&ap_list_lock);
1085 list_del_init(&aq->list);
1086 spin_unlock_bh(&ap_list_lock);
1087 put_device(&aq->ap_dev.device); 1097 put_device(&aq->ap_dev.device);
1088 continue; 1098 continue;
1089 } 1099 }
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index cfa161ccc74e..836efac96813 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -160,7 +160,14 @@ static struct device_type ap_card_type = {
160 160
161static void ap_card_device_release(struct device *dev) 161static void ap_card_device_release(struct device *dev)
162{ 162{
163 kfree(to_ap_card(dev)); 163 struct ap_card *ac = to_ap_card(dev);
164
165 if (!list_empty(&ac->list)) {
166 spin_lock_bh(&ap_list_lock);
167 list_del_init(&ac->list);
168 spin_unlock_bh(&ap_list_lock);
169 }
170 kfree(ac);
164} 171}
165 172
166struct ap_card *ap_card_create(int id, int queue_depth, int device_type, 173struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 480c58a63769..0f1a5d02acb0 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -584,7 +584,14 @@ static struct device_type ap_queue_type = {
584 584
585static void ap_queue_device_release(struct device *dev) 585static void ap_queue_device_release(struct device *dev)
586{ 586{
587 kfree(to_ap_queue(dev)); 587 struct ap_queue *aq = to_ap_queue(dev);
588
589 if (!list_empty(&aq->list)) {
590 spin_lock_bh(&ap_list_lock);
591 list_del_init(&aq->list);
592 spin_unlock_bh(&ap_list_lock);
593 }
594 kfree(aq);
588} 595}
589 596
590struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) 597struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index dba94b486f05..fa732bd86729 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1954,7 +1954,6 @@ static void netiucv_free_netdevice(struct net_device *dev)
1954 privptr->conn = NULL; privptr->fsm = NULL; 1954 privptr->conn = NULL; privptr->fsm = NULL;
1955 /* privptr gets freed by free_netdev() */ 1955 /* privptr gets freed by free_netdev() */
1956 } 1956 }
1957 free_netdev(dev);
1958} 1957}
1959 1958
1960/** 1959/**
@@ -1972,7 +1971,8 @@ static void netiucv_setup_netdevice(struct net_device *dev)
1972 dev->mtu = NETIUCV_MTU_DEFAULT; 1971 dev->mtu = NETIUCV_MTU_DEFAULT;
1973 dev->min_mtu = 576; 1972 dev->min_mtu = 576;
1974 dev->max_mtu = NETIUCV_MTU_MAX; 1973 dev->max_mtu = NETIUCV_MTU_MAX;
1975 dev->destructor = netiucv_free_netdevice; 1974 dev->needs_free_netdev = true;
1975 dev->priv_destructor = netiucv_free_netdevice;
1976 dev->hard_header_len = NETIUCV_HDRLEN; 1976 dev->hard_header_len = NETIUCV_HDRLEN;
1977 dev->addr_len = 0; 1977 dev->addr_len = 0;
1978 dev->type = ARPHRD_SLIP; 1978 dev->type = ARPHRD_SLIP;
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index dc6ecd824365..ff10d1f0a7e4 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -231,16 +231,12 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val)
231 if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) 231 if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
232 i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; 232 i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
233 233
234 mutex_lock(&chip->state_lock);
235 ret = i2c_smbus_write_byte_data(chip->client, 234 ret = i2c_smbus_write_byte_data(chip->client,
236 AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); 235 AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
237 if (ret < 0) { 236 if (ret < 0)
238 mutex_unlock(&chip->state_lock);
239 return ret; 237 return ret;
240 }
241 238
242 chip->filter_rate_setup = i; 239 chip->filter_rate_setup = i;
243 mutex_unlock(&chip->state_lock);
244 240
245 return ret; 241 return ret;
246} 242}
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index cfe37eb026d6..859d0d6051cd 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -152,7 +152,7 @@ static const struct net_device_ops mon_netdev_ops = {
152static void mon_setup(struct net_device *dev) 152static void mon_setup(struct net_device *dev)
153{ 153{
154 dev->netdev_ops = &mon_netdev_ops; 154 dev->netdev_ops = &mon_netdev_ops;
155 dev->destructor = free_netdev; 155 dev->needs_free_netdev = true;
156 ether_setup(dev); 156 ether_setup(dev);
157 dev->priv_flags |= IFF_NO_QUEUE; 157 dev->priv_flags |= IFF_NO_QUEUE;
158 dev->type = ARPHRD_IEEE80211; 158 dev->type = ARPHRD_IEEE80211;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 36c3189fc4b7..bd4352fe2de3 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2667,7 +2667,8 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
2667 mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP; 2667 mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
2668 strncpy(mon_ndev->name, name, IFNAMSIZ); 2668 strncpy(mon_ndev->name, name, IFNAMSIZ);
2669 mon_ndev->name[IFNAMSIZ - 1] = 0; 2669 mon_ndev->name[IFNAMSIZ - 1] = 0;
2670 mon_ndev->destructor = rtw_ndev_destructor; 2670 mon_ndev->needs_free_netdev = true;
2671 mon_ndev->priv_destructor = rtw_ndev_destructor;
2671 2672
2672 mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops; 2673 mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
2673 2674
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index f83cfc76505c..021589913681 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -1207,8 +1207,6 @@ void rtw_ndev_destructor(struct net_device *ndev)
1207 1207
1208 if (ndev->ieee80211_ptr) 1208 if (ndev->ieee80211_ptr)
1209 kfree((u8 *)ndev->ieee80211_ptr); 1209 kfree((u8 *)ndev->ieee80211_ptr);
1210
1211 free_netdev(ndev);
1212} 1210}
1213 1211
1214void rtw_dev_unload(struct adapter *padapter) 1212void rtw_dev_unload(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 02db59e8b593..aa16d1ab955b 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -160,7 +160,7 @@ static int isFileReadable(char *path)
160 oldfs = get_fs(); set_fs(get_ds()); 160 oldfs = get_fs(); set_fs(get_ds());
161 161
162 if (1!=readFile(fp, &buf, 1)) 162 if (1!=readFile(fp, &buf, 1))
163 ret = PTR_ERR(fp); 163 ret = -EINVAL;
164 164
165 set_fs(oldfs); 165 set_fs(oldfs);
166 filp_close(fp, NULL); 166 filp_close(fp, NULL);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 49d685ad0da9..45b554032332 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -315,6 +315,9 @@ void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
315 list_del(&f->list); 315 list_del(&f->list);
316 if (f->unbind) 316 if (f->unbind)
317 f->unbind(c, f); 317 f->unbind(c, f);
318
319 if (f->bind_deactivated)
320 usb_function_activate(f);
318} 321}
319EXPORT_SYMBOL_GPL(usb_remove_function); 322EXPORT_SYMBOL_GPL(usb_remove_function);
320 323
@@ -956,12 +959,8 @@ static void remove_config(struct usb_composite_dev *cdev,
956 959
957 f = list_first_entry(&config->functions, 960 f = list_first_entry(&config->functions,
958 struct usb_function, list); 961 struct usb_function, list);
959 list_del(&f->list); 962
960 if (f->unbind) { 963 usb_remove_function(config, f);
961 DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
962 f->unbind(config, f);
963 /* may free memory for "f" */
964 }
965 } 964 }
966 list_del(&config->list); 965 list_del(&config->list);
967 if (config->unbind) { 966 if (config->unbind) {
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index b4058f0000e4..6a1ce6a55158 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev)
281 dev->tx_queue_len = 1; 281 dev->tx_queue_len = 1;
282 282
283 dev->netdev_ops = &pn_netdev_ops; 283 dev->netdev_ops = &pn_netdev_ops;
284 dev->destructor = free_netdev; 284 dev->needs_free_netdev = true;
285 dev->header_ops = &phonet_header_ops; 285 dev->header_ops = &phonet_header_ops;
286} 286}
287 287
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index b9ca0a26cbd9..684900fcfe24 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd)
1183 1183
1184 /* closing ep0 === shutdown all */ 1184 /* closing ep0 === shutdown all */
1185 1185
1186 if (dev->gadget_registered) 1186 if (dev->gadget_registered) {
1187 usb_gadget_unregister_driver (&gadgetfs_driver); 1187 usb_gadget_unregister_driver (&gadgetfs_driver);
1188 dev->gadget_registered = false;
1189 }
1188 1190
1189 /* at this point "good" hardware has disconnected the 1191 /* at this point "good" hardware has disconnected the
1190 * device from USB; the host won't see it any more. 1192 * device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@ static void
1677gadgetfs_suspend (struct usb_gadget *gadget) 1679gadgetfs_suspend (struct usb_gadget *gadget)
1678{ 1680{
1679 struct dev_data *dev = get_gadget_data (gadget); 1681 struct dev_data *dev = get_gadget_data (gadget);
1682 unsigned long flags;
1680 1683
1681 INFO (dev, "suspended from state %d\n", dev->state); 1684 INFO (dev, "suspended from state %d\n", dev->state);
1682 spin_lock (&dev->lock); 1685 spin_lock_irqsave(&dev->lock, flags);
1683 switch (dev->state) { 1686 switch (dev->state) {
1684 case STATE_DEV_SETUP: // VERY odd... host died?? 1687 case STATE_DEV_SETUP: // VERY odd... host died??
1685 case STATE_DEV_CONNECTED: 1688 case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget)
1690 default: 1693 default:
1691 break; 1694 break;
1692 } 1695 }
1693 spin_unlock (&dev->lock); 1696 spin_unlock_irqrestore(&dev->lock, flags);
1694} 1697}
1695 1698
1696static struct usb_gadget_driver gadgetfs_driver = { 1699static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ccabb51cb98d..7635fd7cc328 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
442 /* Report reset and disconnect events to the driver */ 442 /* Report reset and disconnect events to the driver */
443 if (dum->driver && (disconnect || reset)) { 443 if (dum->driver && (disconnect || reset)) {
444 stop_activity(dum); 444 stop_activity(dum);
445 spin_unlock(&dum->lock);
446 if (reset) 445 if (reset)
447 usb_gadget_udc_reset(&dum->gadget, dum->driver); 446 usb_gadget_udc_reset(&dum->gadget, dum->driver);
448 else 447 else
449 dum->driver->disconnect(&dum->gadget); 448 dum->driver->disconnect(&dum->gadget);
450 spin_lock(&dum->lock);
451 } 449 }
452 } else if (dum_hcd->active != dum_hcd->old_active) { 450 } else if (dum_hcd->active != dum_hcd->old_active) {
453 if (dum_hcd->old_active && dum->driver->suspend) { 451 if (dum_hcd->old_active && dum->driver->suspend)
454 spin_unlock(&dum->lock);
455 dum->driver->suspend(&dum->gadget); 452 dum->driver->suspend(&dum->gadget);
456 spin_lock(&dum->lock); 453 else if (!dum_hcd->old_active && dum->driver->resume)
457 } else if (!dum_hcd->old_active && dum->driver->resume) {
458 spin_unlock(&dum->lock);
459 dum->driver->resume(&dum->gadget); 454 dum->driver->resume(&dum->gadget);
460 spin_lock(&dum->lock);
461 }
462 } 455 }
463 456
464 dum_hcd->old_status = dum_hcd->port_status; 457 dum_hcd->old_status = dum_hcd->port_status;
@@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g)
983 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); 976 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
984 struct dummy *dum = dum_hcd->dum; 977 struct dummy *dum = dum_hcd->dum;
985 978
979 spin_lock_irq(&dum->lock);
986 dum->driver = NULL; 980 dum->driver = NULL;
981 spin_unlock_irq(&dum->lock);
987 982
988 return 0; 983 return 0;
989} 984}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 6cf07857eaca..f2cbd7f8005e 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2470,11 +2470,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2470 nuke(&dev->ep[i]); 2470 nuke(&dev->ep[i]);
2471 2471
2472 /* report disconnect; the driver is already quiesced */ 2472 /* report disconnect; the driver is already quiesced */
2473 if (driver) { 2473 if (driver)
2474 spin_unlock(&dev->lock);
2475 driver->disconnect(&dev->gadget); 2474 driver->disconnect(&dev->gadget);
2476 spin_lock(&dev->lock);
2477 }
2478 2475
2479 usb_reinit(dev); 2476 usb_reinit(dev);
2480} 2477}
@@ -3348,8 +3345,6 @@ next_endpoints:
3348 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3345 BIT(PCI_RETRY_ABORT_INTERRUPT))
3349 3346
3350static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3347static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3351__releases(dev->lock)
3352__acquires(dev->lock)
3353{ 3348{
3354 struct net2280_ep *ep; 3349 struct net2280_ep *ep;
3355 u32 tmp, num, mask, scratch; 3350 u32 tmp, num, mask, scratch;
@@ -3390,14 +3385,12 @@ __acquires(dev->lock)
3390 if (disconnect || reset) { 3385 if (disconnect || reset) {
3391 stop_activity(dev, dev->driver); 3386 stop_activity(dev, dev->driver);
3392 ep0_start(dev); 3387 ep0_start(dev);
3393 spin_unlock(&dev->lock);
3394 if (reset) 3388 if (reset)
3395 usb_gadget_udc_reset 3389 usb_gadget_udc_reset
3396 (&dev->gadget, dev->driver); 3390 (&dev->gadget, dev->driver);
3397 else 3391 else
3398 (dev->driver->disconnect) 3392 (dev->driver->disconnect)
3399 (&dev->gadget); 3393 (&dev->gadget);
3400 spin_lock(&dev->lock);
3401 return; 3394 return;
3402 } 3395 }
3403 } 3396 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1f1687e888d6..fddf2731f798 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2119,11 +2119,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2119{ 2119{
2120 u32 temp, port_offset, port_count; 2120 u32 temp, port_offset, port_count;
2121 int i; 2121 int i;
2122 u8 major_revision; 2122 u8 major_revision, minor_revision;
2123 struct xhci_hub *rhub; 2123 struct xhci_hub *rhub;
2124 2124
2125 temp = readl(addr); 2125 temp = readl(addr);
2126 major_revision = XHCI_EXT_PORT_MAJOR(temp); 2126 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2127 minor_revision = XHCI_EXT_PORT_MINOR(temp);
2127 2128
2128 if (major_revision == 0x03) { 2129 if (major_revision == 0x03) {
2129 rhub = &xhci->usb3_rhub; 2130 rhub = &xhci->usb3_rhub;
@@ -2137,7 +2138,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2137 return; 2138 return;
2138 } 2139 }
2139 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); 2140 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2140 rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); 2141
2142 if (rhub->min_rev < minor_revision)
2143 rhub->min_rev = minor_revision;
2141 2144
2142 /* Port offset and count in the third dword, see section 7.2 */ 2145 /* Port offset and count in the third dword, see section 7.2 */
2143 temp = readl(addr + 2); 2146 temp = readl(addr + 2);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fcf1f3f63e7a..1bcf971141c0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
201 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 201 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
202 pdev->device == 0x1042) 202 pdev->device == 0x1042)
203 xhci->quirks |= XHCI_BROKEN_STREAMS; 203 xhci->quirks |= XHCI_BROKEN_STREAMS;
204 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
205 pdev->device == 0x1142)
206 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
204 207
205 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) 208 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
206 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; 209 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 687ebb053438..41d7979d81c5 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1048,7 +1048,7 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
1048 1048
1049 for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; 1049 for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
1050 i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) 1050 i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
1051 if (PIXEL_CLOCK) 1051 if (PIXEL_CLOCK != 0)
1052 edt[num++] = block - edid; 1052 edt[num++] = block - edid;
1053 1053
1054 /* Yikes, EDID data is totally useless */ 1054 /* Yikes, EDID data is totally useless */
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index ec2e7e353685..449fceaf79d5 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1646,8 +1646,9 @@ static int ufx_usb_probe(struct usb_interface *interface,
1646 dev_dbg(dev->gdev, "%s %s - serial #%s\n", 1646 dev_dbg(dev->gdev, "%s %s - serial #%s\n",
1647 usbdev->manufacturer, usbdev->product, usbdev->serial); 1647 usbdev->manufacturer, usbdev->product, usbdev->serial);
1648 dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", 1648 dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n",
1649 usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1649 le16_to_cpu(usbdev->descriptor.idVendor),
1650 usbdev->descriptor.bcdDevice, dev); 1650 le16_to_cpu(usbdev->descriptor.idProduct),
1651 le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
1651 dev_dbg(dev->gdev, "console enable=%d\n", console); 1652 dev_dbg(dev->gdev, "console enable=%d\n", console);
1652 dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); 1653 dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio);
1653 1654
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 6a3c353de7c3..05ef657235df 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1105,8 +1105,8 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
1105 char *bufptr; 1105 char *bufptr;
1106 struct urb *urb; 1106 struct urb *urb;
1107 1107
1108 pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", 1108 pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n",
1109 info->node, dev->blank_mode, blank_mode); 1109 info->node, dev->blank_mode, blank_mode);
1110 1110
1111 if ((dev->blank_mode == FB_BLANK_POWERDOWN) && 1111 if ((dev->blank_mode == FB_BLANK_POWERDOWN) &&
1112 (blank_mode != FB_BLANK_POWERDOWN)) { 1112 (blank_mode != FB_BLANK_POWERDOWN)) {
@@ -1613,8 +1613,9 @@ static int dlfb_usb_probe(struct usb_interface *interface,
1613 pr_info("%s %s - serial #%s\n", 1613 pr_info("%s %s - serial #%s\n",
1614 usbdev->manufacturer, usbdev->product, usbdev->serial); 1614 usbdev->manufacturer, usbdev->product, usbdev->serial);
1615 pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", 1615 pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n",
1616 usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1616 le16_to_cpu(usbdev->descriptor.idVendor),
1617 usbdev->descriptor.bcdDevice, dev); 1617 le16_to_cpu(usbdev->descriptor.idProduct),
1618 le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
1618 pr_info("console enable=%d\n", console); 1619 pr_info("console enable=%d\n", console);
1619 pr_info("fb_defio enable=%d\n", fb_defio); 1620 pr_info("fb_defio enable=%d\n", fb_defio);
1620 pr_info("shadow enable=%d\n", shadow); 1621 pr_info("shadow enable=%d\n", shadow);
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f9718f012aae..badee04ef496 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
1630} 1630}
1631static void viafb_remove_proc(struct viafb_shared *shared) 1631static void viafb_remove_proc(struct viafb_shared *shared)
1632{ 1632{
1633 struct proc_dir_entry *viafb_entry = shared->proc_entry, 1633 struct proc_dir_entry *viafb_entry = shared->proc_entry;
1634 *iga1_entry = shared->iga1_proc_entry,
1635 *iga2_entry = shared->iga2_proc_entry;
1636 1634
1637 if (!viafb_entry) 1635 if (!viafb_entry)
1638 return; 1636 return;
1639 1637
1640 remove_proc_entry("output_devices", iga2_entry); 1638 remove_proc_entry("output_devices", shared->iga2_proc_entry);
1641 remove_proc_entry("iga2", viafb_entry); 1639 remove_proc_entry("iga2", viafb_entry);
1642 remove_proc_entry("output_devices", iga1_entry); 1640 remove_proc_entry("output_devices", shared->iga1_proc_entry);
1643 remove_proc_entry("iga1", viafb_entry); 1641 remove_proc_entry("iga1", viafb_entry);
1644 remove_proc_entry("supported_output_devices", viafb_entry); 1642 remove_proc_entry("supported_output_devices", viafb_entry);
1645 1643
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index a97fdc156a03..baacc1866861 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
38{ 38{
39 SHASH_DESC_ON_STACK(shash, tfm); 39 SHASH_DESC_ON_STACK(shash, tfm);
40 u32 *ctx = (u32 *)shash_desc_ctx(shash); 40 u32 *ctx = (u32 *)shash_desc_ctx(shash);
41 u32 retval;
41 int err; 42 int err;
42 43
43 shash->tfm = tfm; 44 shash->tfm = tfm;
@@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
47 err = crypto_shash_update(shash, address, length); 48 err = crypto_shash_update(shash, address, length);
48 BUG_ON(err); 49 BUG_ON(err);
49 50
50 return *ctx; 51 retval = *ctx;
52 barrier_data(ctx);
53 return retval;
51} 54}
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 987044bca1c2..59cb307b15fb 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -131,6 +131,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
131 } 131 }
132 132
133 if (new_mode != old_mode) { 133 if (new_mode != old_mode) {
134 newattrs.ia_ctime = current_time(inode);
134 newattrs.ia_mode = new_mode; 135 newattrs.ia_mode = new_mode;
135 newattrs.ia_valid = ATTR_MODE; 136 newattrs.ia_valid = ATTR_MODE;
136 ret = __ceph_setattr(inode, &newattrs); 137 ret = __ceph_setattr(inode, &newattrs);
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index e8f11fa565c5..7df550c13d7f 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -91,6 +91,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
91 ceph_mdsc_put_request(req); 91 ceph_mdsc_put_request(req);
92 if (!inode) 92 if (!inode)
93 return ERR_PTR(-ESTALE); 93 return ERR_PTR(-ESTALE);
94 if (inode->i_nlink == 0) {
95 iput(inode);
96 return ERR_PTR(-ESTALE);
97 }
94 } 98 }
95 99
96 return d_obtain_alias(inode); 100 return d_obtain_alias(inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index dcce79b84406..4de6cdddf059 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2022,7 +2022,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
2022 attr->ia_size > inode->i_size) { 2022 attr->ia_size > inode->i_size) {
2023 i_size_write(inode, attr->ia_size); 2023 i_size_write(inode, attr->ia_size);
2024 inode->i_blocks = calc_inode_blocks(attr->ia_size); 2024 inode->i_blocks = calc_inode_blocks(attr->ia_size);
2025 inode->i_ctime = attr->ia_ctime;
2026 ci->i_reported_size = attr->ia_size; 2025 ci->i_reported_size = attr->ia_size;
2027 dirtied |= CEPH_CAP_FILE_EXCL; 2026 dirtied |= CEPH_CAP_FILE_EXCL;
2028 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2027 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
@@ -2044,7 +2043,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
2044 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 2043 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2045 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2044 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2046 only ? "ctime only" : "ignored"); 2045 only ? "ctime only" : "ignored");
2047 inode->i_ctime = attr->ia_ctime;
2048 if (only) { 2046 if (only) {
2049 /* 2047 /*
2050 * if kernel wants to dirty ctime but nothing else, 2048 * if kernel wants to dirty ctime but nothing else,
@@ -2067,7 +2065,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
2067 if (dirtied) { 2065 if (dirtied) {
2068 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, 2066 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2069 &prealloc_cf); 2067 &prealloc_cf);
2070 inode->i_ctime = current_time(inode); 2068 inode->i_ctime = attr->ia_ctime;
2071 } 2069 }
2072 2070
2073 release &= issued; 2071 release &= issued;
@@ -2085,6 +2083,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
2085 req->r_inode_drop = release; 2083 req->r_inode_drop = release;
2086 req->r_args.setattr.mask = cpu_to_le32(mask); 2084 req->r_args.setattr.mask = cpu_to_le32(mask);
2087 req->r_num_caps = 1; 2085 req->r_num_caps = 1;
2086 req->r_stamp = attr->ia_ctime;
2088 err = ceph_mdsc_do_request(mdsc, NULL, req); 2087 err = ceph_mdsc_do_request(mdsc, NULL, req);
2089 } 2088 }
2090 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 2089 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f38e56fa9712..0c05df44cc6c 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1687,7 +1687,6 @@ struct ceph_mds_request *
1687ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) 1687ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1688{ 1688{
1689 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); 1689 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1690 struct timespec ts;
1691 1690
1692 if (!req) 1691 if (!req)
1693 return ERR_PTR(-ENOMEM); 1692 return ERR_PTR(-ENOMEM);
@@ -1706,8 +1705,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1706 init_completion(&req->r_safe_completion); 1705 init_completion(&req->r_safe_completion);
1707 INIT_LIST_HEAD(&req->r_unsafe_item); 1706 INIT_LIST_HEAD(&req->r_unsafe_item);
1708 1707
1709 ktime_get_real_ts(&ts); 1708 req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran);
1710 req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran);
1711 1709
1712 req->r_op = op; 1710 req->r_op = op;
1713 req->r_direct_mode = mode; 1711 req->r_direct_mode = mode;
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 8b2a994042dd..a66f6624d899 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -138,6 +138,14 @@ struct config_item *config_item_get(struct config_item *item)
138} 138}
139EXPORT_SYMBOL(config_item_get); 139EXPORT_SYMBOL(config_item_get);
140 140
141struct config_item *config_item_get_unless_zero(struct config_item *item)
142{
143 if (item && kref_get_unless_zero(&item->ci_kref))
144 return item;
145 return NULL;
146}
147EXPORT_SYMBOL(config_item_get_unless_zero);
148
141static void config_item_cleanup(struct config_item *item) 149static void config_item_cleanup(struct config_item *item)
142{ 150{
143 struct config_item_type *t = item->ci_type; 151 struct config_item_type *t = item->ci_type;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index a6ab012a2c6a..c8aabba502f6 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item,
83 ret = -ENOMEM; 83 ret = -ENOMEM;
84 sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); 84 sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
85 if (sl) { 85 if (sl) {
86 sl->sl_target = config_item_get(item);
87 spin_lock(&configfs_dirent_lock); 86 spin_lock(&configfs_dirent_lock);
88 if (target_sd->s_type & CONFIGFS_USET_DROPPING) { 87 if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
89 spin_unlock(&configfs_dirent_lock); 88 spin_unlock(&configfs_dirent_lock);
90 config_item_put(item);
91 kfree(sl); 89 kfree(sl);
92 return -ENOENT; 90 return -ENOENT;
93 } 91 }
92 sl->sl_target = config_item_get(item);
94 list_add(&sl->sl_list, &target_sd->s_links); 93 list_add(&sl->sl_list, &target_sd->s_links);
95 spin_unlock(&configfs_dirent_lock); 94 spin_unlock(&configfs_dirent_lock);
96 ret = configfs_create_link(sl, parent_item->ci_dentry, 95 ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/dcache.c b/fs/dcache.c
index cddf39777835..a9f995f6859e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1494,7 +1494,7 @@ static void check_and_drop(void *_data)
1494{ 1494{
1495 struct detach_data *data = _data; 1495 struct detach_data *data = _data;
1496 1496
1497 if (!data->mountpoint && !data->select.found) 1497 if (!data->mountpoint && list_empty(&data->select.dispose))
1498 __d_drop(data->select.start); 1498 __d_drop(data->select.start);
1499} 1499}
1500 1500
@@ -1536,17 +1536,15 @@ void d_invalidate(struct dentry *dentry)
1536 1536
1537 d_walk(dentry, &data, detach_and_collect, check_and_drop); 1537 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1538 1538
1539 if (data.select.found) 1539 if (!list_empty(&data.select.dispose))
1540 shrink_dentry_list(&data.select.dispose); 1540 shrink_dentry_list(&data.select.dispose);
1541 else if (!data.mountpoint)
1542 return;
1541 1543
1542 if (data.mountpoint) { 1544 if (data.mountpoint) {
1543 detach_mounts(data.mountpoint); 1545 detach_mounts(data.mountpoint);
1544 dput(data.mountpoint); 1546 dput(data.mountpoint);
1545 } 1547 }
1546
1547 if (!data.mountpoint && !data.select.found)
1548 break;
1549
1550 cond_resched(); 1548 cond_resched();
1551 } 1549 }
1552} 1550}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 2185c7a040a1..fd2e651bad6d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1078,6 +1078,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1078{ 1078{
1079 SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver); 1079 SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
1080 u32 *ctx = (u32 *)shash_desc_ctx(shash); 1080 u32 *ctx = (u32 *)shash_desc_ctx(shash);
1081 u32 retval;
1081 int err; 1082 int err;
1082 1083
1083 shash->tfm = sbi->s_chksum_driver; 1084 shash->tfm = sbi->s_chksum_driver;
@@ -1087,7 +1088,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1087 err = crypto_shash_update(shash, address, length); 1088 err = crypto_shash_update(shash, address, length);
1088 BUG_ON(err); 1089 BUG_ON(err);
1089 1090
1090 return *ctx; 1091 retval = *ctx;
1092 barrier_data(ctx);
1093 return retval;
1091} 1094}
1092 1095
1093static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1096static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
diff --git a/fs/namespace.c b/fs/namespace.c
index 8bd3e4d448b9..5a4438445bf7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3488,6 +3488,8 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
3488 return err; 3488 return err;
3489 } 3489 }
3490 3490
3491 put_mnt_ns(old_mnt_ns);
3492
3491 /* Update the pwd and root */ 3493 /* Update the pwd and root */
3492 set_fs_pwd(fs, &root); 3494 set_fs_pwd(fs, &root);
3493 set_fs_root(fs, &root); 3495 set_fs_root(fs, &root);
diff --git a/fs/read_write.c b/fs/read_write.c
index 47c1d4484df9..19d4d88fa285 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1285,7 +1285,7 @@ static size_t compat_writev(struct file *file,
1285 if (!(file->f_mode & FMODE_CAN_WRITE)) 1285 if (!(file->f_mode & FMODE_CAN_WRITE))
1286 goto out; 1286 goto out;
1287 1287
1288 ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0); 1288 ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags);
1289 1289
1290out: 1290out:
1291 if (ret > 0) 1291 if (ret > 0)
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index d642cc0a8271..0315fea1d589 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -400,10 +400,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
400 /* 400 /*
401 * There is not enough space for user on the device 401 * There is not enough space for user on the device
402 */ 402 */
403 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { 403 if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
404 mutex_unlock(&UFS_SB(sb)->s_lock); 404 if (!capable(CAP_SYS_RESOURCE)) {
405 UFSD("EXIT (FAILED)\n"); 405 mutex_unlock(&UFS_SB(sb)->s_lock);
406 return 0; 406 UFSD("EXIT (FAILED)\n");
407 return 0;
408 }
407 } 409 }
408 410
409 if (goal >= uspi->s_size) 411 if (goal >= uspi->s_size)
@@ -421,12 +423,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
421 if (result) { 423 if (result) {
422 ufs_clear_frags(inode, result + oldcount, 424 ufs_clear_frags(inode, result + oldcount,
423 newcount - oldcount, locked_page != NULL); 425 newcount - oldcount, locked_page != NULL);
426 *err = 0;
424 write_seqlock(&UFS_I(inode)->meta_lock); 427 write_seqlock(&UFS_I(inode)->meta_lock);
425 ufs_cpu_to_data_ptr(sb, p, result); 428 ufs_cpu_to_data_ptr(sb, p, result);
426 write_sequnlock(&UFS_I(inode)->meta_lock);
427 *err = 0;
428 UFS_I(inode)->i_lastfrag = 429 UFS_I(inode)->i_lastfrag =
429 max(UFS_I(inode)->i_lastfrag, fragment + count); 430 max(UFS_I(inode)->i_lastfrag, fragment + count);
431 write_sequnlock(&UFS_I(inode)->meta_lock);
430 } 432 }
431 mutex_unlock(&UFS_SB(sb)->s_lock); 433 mutex_unlock(&UFS_SB(sb)->s_lock);
432 UFSD("EXIT, result %llu\n", (unsigned long long)result); 434 UFSD("EXIT, result %llu\n", (unsigned long long)result);
@@ -439,8 +441,10 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
439 result = ufs_add_fragments(inode, tmp, oldcount, newcount); 441 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
440 if (result) { 442 if (result) {
441 *err = 0; 443 *err = 0;
444 read_seqlock_excl(&UFS_I(inode)->meta_lock);
442 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, 445 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443 fragment + count); 446 fragment + count);
447 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
444 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 448 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445 locked_page != NULL); 449 locked_page != NULL);
446 mutex_unlock(&UFS_SB(sb)->s_lock); 450 mutex_unlock(&UFS_SB(sb)->s_lock);
@@ -474,16 +478,16 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
474 if (result) { 478 if (result) {
475 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 479 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
476 locked_page != NULL); 480 locked_page != NULL);
481 mutex_unlock(&UFS_SB(sb)->s_lock);
477 ufs_change_blocknr(inode, fragment - oldcount, oldcount, 482 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
478 uspi->s_sbbase + tmp, 483 uspi->s_sbbase + tmp,
479 uspi->s_sbbase + result, locked_page); 484 uspi->s_sbbase + result, locked_page);
485 *err = 0;
480 write_seqlock(&UFS_I(inode)->meta_lock); 486 write_seqlock(&UFS_I(inode)->meta_lock);
481 ufs_cpu_to_data_ptr(sb, p, result); 487 ufs_cpu_to_data_ptr(sb, p, result);
482 write_sequnlock(&UFS_I(inode)->meta_lock);
483 *err = 0;
484 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, 488 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
485 fragment + count); 489 fragment + count);
486 mutex_unlock(&UFS_SB(sb)->s_lock); 490 write_sequnlock(&UFS_I(inode)->meta_lock);
487 if (newcount < request) 491 if (newcount < request)
488 ufs_free_fragments (inode, result + newcount, request - newcount); 492 ufs_free_fragments (inode, result + newcount, request - newcount);
489 ufs_free_fragments (inode, tmp, oldcount); 493 ufs_free_fragments (inode, tmp, oldcount);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index da553ffec85b..9f4590261134 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -401,13 +401,20 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
401 u64 phys64 = 0; 401 u64 phys64 = 0;
402 unsigned frag = fragment & uspi->s_fpbmask; 402 unsigned frag = fragment & uspi->s_fpbmask;
403 403
404 if (!create) { 404 phys64 = ufs_frag_map(inode, offsets, depth);
405 phys64 = ufs_frag_map(inode, offsets, depth); 405 if (!create)
406 if (phys64) 406 goto done;
407 map_bh(bh_result, sb, phys64 + frag);
408 return 0;
409 }
410 407
408 if (phys64) {
409 if (fragment >= UFS_NDIR_FRAGMENT)
410 goto done;
411 read_seqlock_excl(&UFS_I(inode)->meta_lock);
412 if (fragment < UFS_I(inode)->i_lastfrag) {
413 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
414 goto done;
415 }
416 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
417 }
411 /* This code entered only while writing ....? */ 418 /* This code entered only while writing ....? */
412 419
413 mutex_lock(&UFS_I(inode)->truncate_mutex); 420 mutex_lock(&UFS_I(inode)->truncate_mutex);
@@ -451,6 +458,11 @@ out:
451 } 458 }
452 mutex_unlock(&UFS_I(inode)->truncate_mutex); 459 mutex_unlock(&UFS_I(inode)->truncate_mutex);
453 return err; 460 return err;
461
462done:
463 if (phys64)
464 map_bh(bh_result, sb, phys64 + frag);
465 return 0;
454} 466}
455 467
456static int ufs_writepage(struct page *page, struct writeback_control *wbc) 468static int ufs_writepage(struct page *page, struct writeback_control *wbc)
@@ -874,7 +886,6 @@ static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
874 ctx->to = from + count; 886 ctx->to = from + count;
875} 887}
876 888
877#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
878#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 889#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
879 890
880static void ufs_trunc_direct(struct inode *inode) 891static void ufs_trunc_direct(struct inode *inode)
@@ -1112,19 +1123,24 @@ static void ufs_truncate_blocks(struct inode *inode)
1112 struct super_block *sb = inode->i_sb; 1123 struct super_block *sb = inode->i_sb;
1113 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1124 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1114 unsigned offsets[4]; 1125 unsigned offsets[4];
1115 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); 1126 int depth;
1116 int depth2; 1127 int depth2;
1117 unsigned i; 1128 unsigned i;
1118 struct ufs_buffer_head *ubh[3]; 1129 struct ufs_buffer_head *ubh[3];
1119 void *p; 1130 void *p;
1120 u64 block; 1131 u64 block;
1121 1132
1122 if (!depth) 1133 if (inode->i_size) {
1123 return; 1134 sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1135 depth = ufs_block_to_path(inode, last, offsets);
1136 if (!depth)
1137 return;
1138 } else {
1139 depth = 1;
1140 }
1124 1141
1125 /* find the last non-zero in offsets[] */
1126 for (depth2 = depth - 1; depth2; depth2--) 1142 for (depth2 = depth - 1; depth2; depth2--)
1127 if (offsets[depth2]) 1143 if (offsets[depth2] != uspi->s_apb - 1)
1128 break; 1144 break;
1129 1145
1130 mutex_lock(&ufsi->truncate_mutex); 1146 mutex_lock(&ufsi->truncate_mutex);
@@ -1133,9 +1149,8 @@ static void ufs_truncate_blocks(struct inode *inode)
1133 offsets[0] = UFS_IND_BLOCK; 1149 offsets[0] = UFS_IND_BLOCK;
1134 } else { 1150 } else {
1135 /* get the blocks that should be partially emptied */ 1151 /* get the blocks that should be partially emptied */
1136 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); 1152 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1137 for (i = 0; i < depth2; i++) { 1153 for (i = 0; i < depth2; i++) {
1138 offsets[i]++; /* next branch is fully freed */
1139 block = ufs_data_ptr_to_cpu(sb, p); 1154 block = ufs_data_ptr_to_cpu(sb, p);
1140 if (!block) 1155 if (!block)
1141 break; 1156 break;
@@ -1146,7 +1161,7 @@ static void ufs_truncate_blocks(struct inode *inode)
1146 write_sequnlock(&ufsi->meta_lock); 1161 write_sequnlock(&ufsi->meta_lock);
1147 break; 1162 break;
1148 } 1163 }
1149 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); 1164 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1150 } 1165 }
1151 while (i--) 1166 while (i--)
1152 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); 1167 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
@@ -1161,7 +1176,9 @@ static void ufs_truncate_blocks(struct inode *inode)
1161 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1176 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1162 } 1177 }
1163 } 1178 }
1179 read_seqlock_excl(&ufsi->meta_lock);
1164 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1180 ufsi->i_lastfrag = DIRECT_FRAGMENT;
1181 read_sequnlock_excl(&ufsi->meta_lock);
1165 mark_inode_dirty(inode); 1182 mark_inode_dirty(inode);
1166 mutex_unlock(&ufsi->truncate_mutex); 1183 mutex_unlock(&ufsi->truncate_mutex);
1167} 1184}
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 878cc6264f1a..d5300adbfd79 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -480,7 +480,7 @@ static void ufs_setup_cstotal(struct super_block *sb)
480 usb3 = ubh_get_usb_third(uspi); 480 usb3 = ubh_get_usb_third(uspi);
481 481
482 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && 482 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
483 (usb1->fs_flags & UFS_FLAGS_UPDATED)) || 483 (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) ||
484 mtype == UFS_MOUNT_UFSTYPE_UFS2) { 484 mtype == UFS_MOUNT_UFSTYPE_UFS2) {
485 /*we have statistic in different place, then usual*/ 485 /*we have statistic in different place, then usual*/
486 uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir); 486 uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir);
@@ -596,9 +596,7 @@ static void ufs_put_cstotal(struct super_block *sb)
596 usb2 = ubh_get_usb_second(uspi); 596 usb2 = ubh_get_usb_second(uspi);
597 usb3 = ubh_get_usb_third(uspi); 597 usb3 = ubh_get_usb_third(uspi);
598 598
599 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && 599 if (mtype == UFS_MOUNT_UFSTYPE_UFS2) {
600 (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
601 mtype == UFS_MOUNT_UFSTYPE_UFS2) {
602 /*we have statistic in different place, then usual*/ 600 /*we have statistic in different place, then usual*/
603 usb2->fs_un.fs_u2.cs_ndir = 601 usb2->fs_un.fs_u2.cs_ndir =
604 cpu_to_fs64(sb, uspi->cs_total.cs_ndir); 602 cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
@@ -608,16 +606,26 @@ static void ufs_put_cstotal(struct super_block *sb)
608 cpu_to_fs64(sb, uspi->cs_total.cs_nifree); 606 cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
609 usb3->fs_un1.fs_u2.cs_nffree = 607 usb3->fs_un1.fs_u2.cs_nffree =
610 cpu_to_fs64(sb, uspi->cs_total.cs_nffree); 608 cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
611 } else { 609 goto out;
612 usb1->fs_cstotal.cs_ndir = 610 }
613 cpu_to_fs32(sb, uspi->cs_total.cs_ndir); 611
614 usb1->fs_cstotal.cs_nbfree = 612 if (mtype == UFS_MOUNT_UFSTYPE_44BSD &&
615 cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); 613 (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) {
616 usb1->fs_cstotal.cs_nifree = 614 /* store stats in both old and new places */
617 cpu_to_fs32(sb, uspi->cs_total.cs_nifree); 615 usb2->fs_un.fs_u2.cs_ndir =
618 usb1->fs_cstotal.cs_nffree = 616 cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
619 cpu_to_fs32(sb, uspi->cs_total.cs_nffree); 617 usb2->fs_un.fs_u2.cs_nbfree =
618 cpu_to_fs64(sb, uspi->cs_total.cs_nbfree);
619 usb3->fs_un1.fs_u2.cs_nifree =
620 cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
621 usb3->fs_un1.fs_u2.cs_nffree =
622 cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
620 } 623 }
624 usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
625 usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
626 usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
627 usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
628out:
621 ubh_mark_buffer_dirty(USPI_UBH(uspi)); 629 ubh_mark_buffer_dirty(USPI_UBH(uspi));
622 ufs_print_super_stuff(sb, usb1, usb2, usb3); 630 ufs_print_super_stuff(sb, usb1, usb2, usb3);
623 UFSD("EXIT\n"); 631 UFSD("EXIT\n");
@@ -996,6 +1004,13 @@ again:
996 flags |= UFS_ST_SUN; 1004 flags |= UFS_ST_SUN;
997 } 1005 }
998 1006
1007 if ((flags & UFS_ST_MASK) == UFS_ST_44BSD &&
1008 uspi->s_postblformat == UFS_42POSTBLFMT) {
1009 if (!silent)
1010 pr_err("this is not a 44bsd filesystem");
1011 goto failed;
1012 }
1013
999 /* 1014 /*
1000 * Check ufs magic number 1015 * Check ufs magic number
1001 */ 1016 */
@@ -1143,8 +1158,8 @@ magic_found:
1143 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); 1158 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
1144 1159
1145 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1160 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
1146 uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); 1161 uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
1147 uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1162 uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
1148 } else { 1163 } else {
1149 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); 1164 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size);
1150 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); 1165 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize);
@@ -1193,6 +1208,9 @@ magic_found:
1193 uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff); 1208 uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
1194 uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff); 1209 uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
1195 1210
1211 uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
1212 uspi->s_minfree, 100);
1213
1196 /* 1214 /*
1197 * Compute another frequently used values 1215 * Compute another frequently used values
1198 */ 1216 */
@@ -1382,19 +1400,17 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
1382 mutex_lock(&UFS_SB(sb)->s_lock); 1400 mutex_lock(&UFS_SB(sb)->s_lock);
1383 usb3 = ubh_get_usb_third(uspi); 1401 usb3 = ubh_get_usb_third(uspi);
1384 1402
1385 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1403 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
1386 buf->f_type = UFS2_MAGIC; 1404 buf->f_type = UFS2_MAGIC;
1387 buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1405 else
1388 } else {
1389 buf->f_type = UFS_MAGIC; 1406 buf->f_type = UFS_MAGIC;
1390 buf->f_blocks = uspi->s_dsize; 1407
1391 } 1408 buf->f_blocks = uspi->s_dsize;
1392 buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) + 1409 buf->f_bfree = ufs_freefrags(uspi);
1393 uspi->cs_total.cs_nffree;
1394 buf->f_ffree = uspi->cs_total.cs_nifree; 1410 buf->f_ffree = uspi->cs_total.cs_nifree;
1395 buf->f_bsize = sb->s_blocksize; 1411 buf->f_bsize = sb->s_blocksize;
1396 buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree)) 1412 buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks)
1397 ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0; 1413 ? (buf->f_bfree - uspi->s_root_blocks) : 0;
1398 buf->f_files = uspi->s_ncg * uspi->s_ipg; 1414 buf->f_files = uspi->s_ncg * uspi->s_ipg;
1399 buf->f_namelen = UFS_MAXNAMLEN; 1415 buf->f_namelen = UFS_MAXNAMLEN;
1400 buf->f_fsid.val[0] = (u32)id; 1416 buf->f_fsid.val[0] = (u32)id;
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 0cbd5d340b67..823d55a37586 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -733,10 +733,8 @@ struct ufs_sb_private_info {
733 __u32 s_dblkno; /* offset of first data after cg */ 733 __u32 s_dblkno; /* offset of first data after cg */
734 __u32 s_cgoffset; /* cylinder group offset in cylinder */ 734 __u32 s_cgoffset; /* cylinder group offset in cylinder */
735 __u32 s_cgmask; /* used to calc mod fs_ntrak */ 735 __u32 s_cgmask; /* used to calc mod fs_ntrak */
736 __u32 s_size; /* number of blocks (fragments) in fs */ 736 __u64 s_size; /* number of blocks (fragments) in fs */
737 __u32 s_dsize; /* number of data blocks in fs */ 737 __u64 s_dsize; /* number of data blocks in fs */
738 __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */
739 __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */
740 __u32 s_ncg; /* number of cylinder groups */ 738 __u32 s_ncg; /* number of cylinder groups */
741 __u32 s_bsize; /* size of basic blocks */ 739 __u32 s_bsize; /* size of basic blocks */
742 __u32 s_fsize; /* size of fragments */ 740 __u32 s_fsize; /* size of fragments */
@@ -793,6 +791,7 @@ struct ufs_sb_private_info {
793 __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */ 791 __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */
794 __s32 fs_magic; /* filesystem magic */ 792 __s32 fs_magic; /* filesystem magic */
795 unsigned int s_dirblksize; 793 unsigned int s_dirblksize;
794 __u64 s_root_blocks;
796}; 795};
797 796
798/* 797/*
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index f41ad0a6106f..02497a492eb2 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -243,9 +243,8 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev
243struct page *ufs_get_locked_page(struct address_space *mapping, 243struct page *ufs_get_locked_page(struct address_space *mapping,
244 pgoff_t index) 244 pgoff_t index)
245{ 245{
246 struct page *page; 246 struct inode *inode = mapping->host;
247 247 struct page *page = find_lock_page(mapping, index);
248 page = find_lock_page(mapping, index);
249 if (!page) { 248 if (!page) {
250 page = read_mapping_page(mapping, index, NULL); 249 page = read_mapping_page(mapping, index, NULL);
251 250
@@ -253,7 +252,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
253 printk(KERN_ERR "ufs_change_blocknr: " 252 printk(KERN_ERR "ufs_change_blocknr: "
254 "read_mapping_page error: ino %lu, index: %lu\n", 253 "read_mapping_page error: ino %lu, index: %lu\n",
255 mapping->host->i_ino, index); 254 mapping->host->i_ino, index);
256 goto out; 255 return page;
257 } 256 }
258 257
259 lock_page(page); 258 lock_page(page);
@@ -262,8 +261,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
262 /* Truncate got there first */ 261 /* Truncate got there first */
263 unlock_page(page); 262 unlock_page(page);
264 put_page(page); 263 put_page(page);
265 page = NULL; 264 return NULL;
266 goto out;
267 } 265 }
268 266
269 if (!PageUptodate(page) || PageError(page)) { 267 if (!PageUptodate(page) || PageError(page)) {
@@ -272,11 +270,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
272 270
273 printk(KERN_ERR "ufs_change_blocknr: " 271 printk(KERN_ERR "ufs_change_blocknr: "
274 "can not read page: ino %lu, index: %lu\n", 272 "can not read page: ino %lu, index: %lu\n",
275 mapping->host->i_ino, index); 273 inode->i_ino, index);
276 274
277 page = ERR_PTR(-EIO); 275 return ERR_PTR(-EIO);
278 } 276 }
279 } 277 }
280out: 278 if (!page_has_buffers(page))
279 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
281 return page; 280 return page;
282} 281}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 398019fb1448..9fc7119a1551 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -350,16 +350,11 @@ static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
350#define ubh_blkmap(ubh,begin,bit) \ 350#define ubh_blkmap(ubh,begin,bit) \
351 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) 351 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
352 352
353/*
354 * Determine the number of available frags given a
355 * percentage to hold in reserve.
356 */
357static inline u64 353static inline u64
358ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved) 354ufs_freefrags(struct ufs_sb_private_info *uspi)
359{ 355{
360 return ufs_blkstofrags(uspi->cs_total.cs_nbfree) + 356 return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
361 uspi->cs_total.cs_nffree - 357 uspi->cs_total.cs_nffree;
362 (uspi->s_dsize * (percentreserved) / 100);
363} 358}
364 359
365/* 360/*
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index f7555fc25877..1d622f276e3a 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -340,9 +340,28 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
340 bool must_wait, return_to_userland; 340 bool must_wait, return_to_userland;
341 long blocking_state; 341 long blocking_state;
342 342
343 BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
344
345 ret = VM_FAULT_SIGBUS; 343 ret = VM_FAULT_SIGBUS;
344
345 /*
346 * We don't do userfault handling for the final child pid update.
347 *
348 * We also don't do userfault handling during
349 * coredumping. hugetlbfs has the special
350 * follow_hugetlb_page() to skip missing pages in the
351 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
352 * the no_page_table() helper in follow_page_mask(), but the
353 * shmem_vm_ops->fault method is invoked even during
354 * coredumping without mmap_sem and it ends up here.
355 */
356 if (current->flags & (PF_EXITING|PF_DUMPCORE))
357 goto out;
358
359 /*
360 * Coredumping runs without mmap_sem so we can only check that
361 * the mmap_sem is held, if PF_DUMPCORE was not set.
362 */
363 WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
364
346 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; 365 ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
347 if (!ctx) 366 if (!ctx)
348 goto out; 367 goto out;
@@ -361,12 +380,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
361 goto out; 380 goto out;
362 381
363 /* 382 /*
364 * We don't do userfault handling for the final child pid update.
365 */
366 if (current->flags & PF_EXITING)
367 goto out;
368
369 /*
370 * Check that we can return VM_FAULT_RETRY. 383 * Check that we can return VM_FAULT_RETRY.
371 * 384 *
372 * NOTE: it should become possible to return VM_FAULT_RETRY 385 * NOTE: it should become possible to return VM_FAULT_RETRY
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 07b77b73b024..16d6a578fc16 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -117,7 +117,7 @@ static inline void
117__xfs_buf_ioacct_dec( 117__xfs_buf_ioacct_dec(
118 struct xfs_buf *bp) 118 struct xfs_buf *bp)
119{ 119{
120 ASSERT(spin_is_locked(&bp->b_lock)); 120 lockdep_assert_held(&bp->b_lock);
121 121
122 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { 122 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
123 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; 123 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index f61c84f8e31a..990210fcb9c3 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -66,7 +66,6 @@ xfs_inode_alloc(
66 66
67 XFS_STATS_INC(mp, vn_active); 67 XFS_STATS_INC(mp, vn_active);
68 ASSERT(atomic_read(&ip->i_pincount) == 0); 68 ASSERT(atomic_read(&ip->i_pincount) == 0);
69 ASSERT(!spin_is_locked(&ip->i_flags_lock));
70 ASSERT(!xfs_isiflocked(ip)); 69 ASSERT(!xfs_isiflocked(ip));
71 ASSERT(ip->i_ino == 0); 70 ASSERT(ip->i_ino == 0);
72 71
@@ -190,7 +189,7 @@ xfs_perag_set_reclaim_tag(
190{ 189{
191 struct xfs_mount *mp = pag->pag_mount; 190 struct xfs_mount *mp = pag->pag_mount;
192 191
193 ASSERT(spin_is_locked(&pag->pag_ici_lock)); 192 lockdep_assert_held(&pag->pag_ici_lock);
194 if (pag->pag_ici_reclaimable++) 193 if (pag->pag_ici_reclaimable++)
195 return; 194 return;
196 195
@@ -212,7 +211,7 @@ xfs_perag_clear_reclaim_tag(
212{ 211{
213 struct xfs_mount *mp = pag->pag_mount; 212 struct xfs_mount *mp = pag->pag_mount;
214 213
215 ASSERT(spin_is_locked(&pag->pag_ici_lock)); 214 lockdep_assert_held(&pag->pag_ici_lock);
216 if (--pag->pag_ici_reclaimable) 215 if (--pag->pag_ici_reclaimable)
217 return; 216 return;
218 217
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index d92543f3bbfd..bdc55c0da19c 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -374,6 +374,20 @@ struct acpi_table_desc {
374 u16 validation_count; 374 u16 validation_count;
375}; 375};
376 376
377/*
378 * Maximum value of the validation_count field in struct acpi_table_desc.
379 * When reached, validation_count cannot be changed any more and the table will
380 * be permanently regarded as validated.
381 *
382 * This is to prevent situations in which unbalanced table get/put operations
383 * may cause premature table unmapping in the OS to happen.
384 *
385 * The maximum validation count can be defined to any value, but should be
386 * greater than the maximum number of OS early stage mapping slots to avoid
387 * leaking early stage table mappings to the late stage.
388 */
389#define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX
390
377/* Masks for Flags field above */ 391/* Masks for Flags field above */
378 392
379#define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */ 393#define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ab92c4ea138b..b74a3edcb3da 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -586,6 +586,8 @@ struct request_queue {
586 586
587 size_t cmd_size; 587 size_t cmd_size;
588 void *rq_alloc_data; 588 void *rq_alloc_data;
589
590 struct work_struct release_work;
589}; 591};
590 592
591#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 593#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 2319b8c108e8..c96709049683 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -74,7 +74,8 @@ extern void config_item_init_type_name(struct config_item *item,
74 const char *name, 74 const char *name,
75 struct config_item_type *type); 75 struct config_item_type *type);
76 76
77extern struct config_item * config_item_get(struct config_item *); 77extern struct config_item *config_item_get(struct config_item *);
78extern struct config_item *config_item_get_unless_zero(struct config_item *);
78extern void config_item_put(struct config_item *); 79extern void config_item_put(struct config_item *);
79 80
80struct config_item_type { 81struct config_item_type {
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 5e9c74cf8894..9bbf21a516e4 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -136,7 +136,7 @@ static inline int dmi_name_in_vendors(const char *s) { return 0; }
136static inline int dmi_name_in_serial(const char *s) { return 0; } 136static inline int dmi_name_in_serial(const char *s) { return 0; }
137#define dmi_available 0 137#define dmi_available 0
138static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), 138static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
139 void *private_data) { return -1; } 139 void *private_data) { return -ENXIO; }
140static inline bool dmi_match(enum dmi_field f, const char *str) 140static inline bool dmi_match(enum dmi_field f, const char *str)
141 { return false; } 141 { return false; }
142static inline void dmi_memdev_name(u16 handle, const char **bank, 142static inline void dmi_memdev_name(u16 handle, const char **bank,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3f39d27decf4..4ed952c17fc7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -914,8 +914,7 @@ struct xfrmdev_ops {
914 * 914 *
915 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 915 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
916 * Called when a user wants to change the Maximum Transfer Unit 916 * Called when a user wants to change the Maximum Transfer Unit
917 * of a device. If not defined, any request to change MTU will 917 * of a device.
918 * will return an error.
919 * 918 *
920 * void (*ndo_tx_timeout)(struct net_device *dev); 919 * void (*ndo_tx_timeout)(struct net_device *dev);
921 * Callback used when the transmitter has not made any progress 920 * Callback used when the transmitter has not made any progress
@@ -1596,8 +1595,8 @@ enum netdev_priv_flags {
1596 * @rtnl_link_state: This enum represents the phases of creating 1595 * @rtnl_link_state: This enum represents the phases of creating
1597 * a new link 1596 * a new link
1598 * 1597 *
1599 * @destructor: Called from unregister, 1598 * @needs_free_netdev: Should unregister perform free_netdev?
1600 * can be used to call free_netdev 1599 * @priv_destructor: Called from unregister
1601 * @npinfo: XXX: need comments on this one 1600 * @npinfo: XXX: need comments on this one
1602 * @nd_net: Network namespace this network device is inside 1601 * @nd_net: Network namespace this network device is inside
1603 * 1602 *
@@ -1858,7 +1857,8 @@ struct net_device {
1858 RTNL_LINK_INITIALIZING, 1857 RTNL_LINK_INITIALIZING,
1859 } rtnl_link_state:16; 1858 } rtnl_link_state:16;
1860 1859
1861 void (*destructor)(struct net_device *dev); 1860 bool needs_free_netdev;
1861 void (*priv_destructor)(struct net_device *dev);
1862 1862
1863#ifdef CONFIG_NETPOLL 1863#ifdef CONFIG_NETPOLL
1864 struct netpoll_info __rcu *npinfo; 1864 struct netpoll_info __rcu *npinfo;
@@ -4261,6 +4261,11 @@ static inline const char *netdev_name(const struct net_device *dev)
4261 return dev->name; 4261 return dev->name;
4262} 4262}
4263 4263
4264static inline bool netdev_unregistering(const struct net_device *dev)
4265{
4266 return dev->reg_state == NETREG_UNREGISTERING;
4267}
4268
4264static inline const char *netdev_reg_state(const struct net_device *dev) 4269static inline const char *netdev_reg_state(const struct net_device *dev)
4265{ 4270{
4266 switch (dev->reg_state) { 4271 switch (dev->reg_state) {
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index 413335c8cb52..298f996969df 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -106,6 +106,16 @@ static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
106{ 106{
107} 107}
108 108
109static inline void cec_notifier_register(struct cec_notifier *n,
110 struct cec_adapter *adap,
111 void (*callback)(struct cec_adapter *adap, u16 pa))
112{
113}
114
115static inline void cec_notifier_unregister(struct cec_notifier *n)
116{
117}
118
109#endif 119#endif
110 120
111#endif 121#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index bfa88d4d67e1..201f060978da 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -206,7 +206,7 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
206#define cec_phys_addr_exp(pa) \ 206#define cec_phys_addr_exp(pa) \
207 ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf 207 ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
208 208
209#if IS_ENABLED(CONFIG_CEC_CORE) 209#if IS_REACHABLE(CONFIG_CEC_CORE)
210struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, 210struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
211 void *priv, const char *name, u32 caps, u8 available_las); 211 void *priv, const char *name, u32 caps, u8 available_las);
212int cec_register_adapter(struct cec_adapter *adap, struct device *parent); 212int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index d179d7767f51..7d4a594d5d58 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1486,8 +1486,10 @@ enum ethtool_link_mode_bit_indices {
1486 * it was forced up into this mode or autonegotiated. 1486 * it was forced up into this mode or autonegotiated.
1487 */ 1487 */
1488 1488
1489/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */ 1489/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal.
1490/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */ 1490 * Update drivers/net/phy/phy.c:phy_speed_to_str() and
1491 * drivers/net/bonding/bond_3ad.c:__get_link_speed() when adding new values.
1492 */
1491#define SPEED_10 10 1493#define SPEED_10 10
1492#define SPEED_100 100 1494#define SPEED_100 100
1493#define SPEED_1000 1000 1495#define SPEED_1000 1000
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 61b7d36dfe34..156ee4cab82e 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -343,6 +343,7 @@ enum ovs_key_attr {
343#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) 343#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
344 344
345enum ovs_tunnel_key_attr { 345enum ovs_tunnel_key_attr {
346 /* OVS_TUNNEL_KEY_ATTR_NONE, standard nl API requires this attribute! */
346 OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */ 347 OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */
347 OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */ 348 OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */
348 OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */ 349 OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 070be980c37a..425170d4439b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1312,8 +1312,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1312 ret = __irq_set_trigger(desc, 1312 ret = __irq_set_trigger(desc,
1313 new->flags & IRQF_TRIGGER_MASK); 1313 new->flags & IRQF_TRIGGER_MASK);
1314 1314
1315 if (ret) 1315 if (ret) {
1316 irq_release_resources(desc);
1316 goto out_mask; 1317 goto out_mask;
1318 }
1317 } 1319 }
1318 1320
1319 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 1321 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 803c3bc274c4..326d4f88e2b1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5605,7 +5605,7 @@ void idle_task_exit(void)
5605 BUG_ON(cpu_online(smp_processor_id())); 5605 BUG_ON(cpu_online(smp_processor_id()));
5606 5606
5607 if (mm != &init_mm) { 5607 if (mm != &init_mm) {
5608 switch_mm_irqs_off(mm, &init_mm, current); 5608 switch_mm(mm, &init_mm, current);
5609 finish_arch_post_lock_switch(); 5609 finish_arch_post_lock_switch();
5610 } 5610 }
5611 mmdrop(mm); 5611 mmdrop(mm);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 622eed1b7658..076a2e31951c 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -101,9 +101,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
101 if (sg_policy->next_freq == next_freq) 101 if (sg_policy->next_freq == next_freq)
102 return; 102 return;
103 103
104 if (sg_policy->next_freq > next_freq)
105 next_freq = (sg_policy->next_freq + next_freq) >> 1;
106
107 sg_policy->next_freq = next_freq; 104 sg_policy->next_freq = next_freq;
108 sg_policy->last_freq_update_time = time; 105 sg_policy->last_freq_update_time = time;
109 106
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d71109321841..c77e4b1d51c0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3563,7 +3563,7 @@ static inline void check_schedstat_required(void)
3563 trace_sched_stat_runtime_enabled()) { 3563 trace_sched_stat_runtime_enabled()) {
3564 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3564 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3565 "stat_blocked and stat_runtime require the " 3565 "stat_blocked and stat_runtime require the "
3566 "kernel parameter schedstats=enabled or " 3566 "kernel parameter schedstats=enable or "
3567 "kernel.sched_schedstats=1\n"); 3567 "kernel.sched_schedstats=1\n");
3568 } 3568 }
3569#endif 3569#endif
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 5cb5b0008d97..ee2f4202d82a 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -387,7 +387,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start)
387{ 387{
388 struct alarm_base *base = &alarm_bases[alarm->type]; 388 struct alarm_base *base = &alarm_bases[alarm->type];
389 389
390 start = ktime_add(start, base->gettime()); 390 start = ktime_add_safe(start, base->gettime());
391 alarm_start(alarm, start); 391 alarm_start(alarm, start);
392} 392}
393EXPORT_SYMBOL_GPL(alarm_start_relative); 393EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -475,7 +475,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
475 overrun++; 475 overrun++;
476 } 476 }
477 477
478 alarm->node.expires = ktime_add(alarm->node.expires, interval); 478 alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
479 return overrun; 479 return overrun;
480} 480}
481EXPORT_SYMBOL_GPL(alarm_forward); 481EXPORT_SYMBOL_GPL(alarm_forward);
@@ -660,13 +660,21 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
660 660
661 /* start the timer */ 661 /* start the timer */
662 timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval); 662 timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval);
663
664 /*
665 * Rate limit to the tick as a hot fix to prevent DOS. Will be
666 * mopped up later.
667 */
668 if (timr->it.alarm.interval < TICK_NSEC)
669 timr->it.alarm.interval = TICK_NSEC;
670
663 exp = timespec64_to_ktime(new_setting->it_value); 671 exp = timespec64_to_ktime(new_setting->it_value);
664 /* Convert (if necessary) to absolute time */ 672 /* Convert (if necessary) to absolute time */
665 if (flags != TIMER_ABSTIME) { 673 if (flags != TIMER_ABSTIME) {
666 ktime_t now; 674 ktime_t now;
667 675
668 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); 676 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
669 exp = ktime_add(now, exp); 677 exp = ktime_add_safe(now, exp);
670 } 678 }
671 679
672 alarm_start(&timr->it.alarm.alarmtimer, exp); 680 alarm_start(&timr->it.alarm.alarmtimer, exp);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 987e496bb51a..b398c2ea69b2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -37,9 +37,11 @@ static int tick_broadcast_forced;
37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); 37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
38 38
39#ifdef CONFIG_TICK_ONESHOT 39#ifdef CONFIG_TICK_ONESHOT
40static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
40static void tick_broadcast_clear_oneshot(int cpu); 41static void tick_broadcast_clear_oneshot(int cpu);
41static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); 42static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
42#else 43#else
44static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
43static inline void tick_broadcast_clear_oneshot(int cpu) { } 45static inline void tick_broadcast_clear_oneshot(int cpu) { }
44static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } 46static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
45#endif 47#endif
@@ -867,7 +869,7 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
867/** 869/**
868 * tick_broadcast_setup_oneshot - setup the broadcast device 870 * tick_broadcast_setup_oneshot - setup the broadcast device
869 */ 871 */
870void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 872static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
871{ 873{
872 int cpu = smp_processor_id(); 874 int cpu = smp_processor_id();
873 875
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index f738251000fe..be0ac01f2e12 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -126,7 +126,6 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
126 126
127/* Functions related to oneshot broadcasting */ 127/* Functions related to oneshot broadcasting */
128#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 128#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
129extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
130extern void tick_broadcast_switch_to_oneshot(void); 129extern void tick_broadcast_switch_to_oneshot(void);
131extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); 130extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
132extern int tick_broadcast_oneshot_active(void); 131extern int tick_broadcast_oneshot_active(void);
@@ -134,7 +133,6 @@ extern void tick_check_oneshot_broadcast_this_cpu(void);
134bool tick_broadcast_oneshot_available(void); 133bool tick_broadcast_oneshot_available(void);
135extern struct cpumask *tick_get_broadcast_oneshot_mask(void); 134extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
136#else /* !(BROADCAST && ONESHOT): */ 135#else /* !(BROADCAST && ONESHOT): */
137static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
138static inline void tick_broadcast_switch_to_oneshot(void) { } 136static inline void tick_broadcast_switch_to_oneshot(void) { }
139static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } 137static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
140static inline int tick_broadcast_oneshot_active(void) { return 0; } 138static inline int tick_broadcast_oneshot_active(void) { return 0; }
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 74a54b7f2562..9f79547d1b97 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -43,7 +43,7 @@ static struct crypto_shash *tfm;
43u32 crc32c(u32 crc, const void *address, unsigned int length) 43u32 crc32c(u32 crc, const void *address, unsigned int length)
44{ 44{
45 SHASH_DESC_ON_STACK(shash, tfm); 45 SHASH_DESC_ON_STACK(shash, tfm);
46 u32 *ctx = (u32 *)shash_desc_ctx(shash); 46 u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
47 int err; 47 int err;
48 48
49 shash->tfm = tfm; 49 shash->tfm = tfm;
@@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
53 err = crypto_shash_update(shash, address, length); 53 err = crypto_shash_update(shash, address, length);
54 BUG_ON(err); 54 BUG_ON(err);
55 55
56 return *ctx; 56 ret = *ctx;
57 barrier_data(ctx);
58 return ret;
57} 59}
58 60
59EXPORT_SYMBOL(crc32c); 61EXPORT_SYMBOL(crc32c);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a84909cf20d3..88c6167f194d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1426,8 +1426,11 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1426 */ 1426 */
1427 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 1427 if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
1428 page = pmd_page(*vmf->pmd); 1428 page = pmd_page(*vmf->pmd);
1429 if (!get_page_unless_zero(page))
1430 goto out_unlock;
1429 spin_unlock(vmf->ptl); 1431 spin_unlock(vmf->ptl);
1430 wait_on_page_locked(page); 1432 wait_on_page_locked(page);
1433 put_page(page);
1431 goto out; 1434 goto out;
1432 } 1435 }
1433 1436
@@ -1459,9 +1462,12 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1459 1462
1460 /* Migration could have started since the pmd_trans_migrating check */ 1463 /* Migration could have started since the pmd_trans_migrating check */
1461 if (!page_locked) { 1464 if (!page_locked) {
1465 page_nid = -1;
1466 if (!get_page_unless_zero(page))
1467 goto out_unlock;
1462 spin_unlock(vmf->ptl); 1468 spin_unlock(vmf->ptl);
1463 wait_on_page_locked(page); 1469 wait_on_page_locked(page);
1464 page_nid = -1; 1470 put_page(page);
1465 goto out; 1471 goto out;
1466 } 1472 }
1467 1473
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 342fac9ba89b..ecc183fd94f3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1184,7 +1184,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1184 * page_remove_rmap() in try_to_unmap_one(). So to determine page status 1184 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1185 * correctly, we save a copy of the page flags at this time. 1185 * correctly, we save a copy of the page flags at this time.
1186 */ 1186 */
1187 page_flags = p->flags; 1187 if (PageHuge(p))
1188 page_flags = hpage->flags;
1189 else
1190 page_flags = p->flags;
1188 1191
1189 /* 1192 /*
1190 * unpoison always clear PG_hwpoison inside page lock 1193 * unpoison always clear PG_hwpoison inside page lock
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index ac6318a064d3..3405b4ee1757 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
48 if (!page) 48 if (!page)
49 goto not_enough_page; 49 goto not_enough_page;
50 ctrl->map[idx] = page; 50 ctrl->map[idx] = page;
51
52 if (!(idx % SWAP_CLUSTER_MAX))
53 cond_resched();
51 } 54 }
52 return 0; 55 return 0;
53not_enough_page: 56not_enough_page:
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 6063581f705c..ce0618bfa8d0 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -115,9 +115,9 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
115 unsigned long pressure = 0; 115 unsigned long pressure = 0;
116 116
117 /* 117 /*
118 * reclaimed can be greater than scanned in cases 118 * reclaimed can be greater than scanned for things such as reclaimed
119 * like THP, where the scanned is 1 and reclaimed 119 * slab pages. shrink_node() just adds reclaimed pages without a
120 * could be 512 120 * related increment to scanned pages.
121 */ 121 */
122 if (reclaimed >= scanned) 122 if (reclaimed >= scanned)
123 goto out; 123 goto out;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 953b6728bd00..abc5f400fc71 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -813,7 +813,6 @@ static void vlan_dev_free(struct net_device *dev)
813 813
814 free_percpu(vlan->vlan_pcpu_stats); 814 free_percpu(vlan->vlan_pcpu_stats);
815 vlan->vlan_pcpu_stats = NULL; 815 vlan->vlan_pcpu_stats = NULL;
816 free_netdev(dev);
817} 816}
818 817
819void vlan_setup(struct net_device *dev) 818void vlan_setup(struct net_device *dev)
@@ -826,7 +825,8 @@ void vlan_setup(struct net_device *dev)
826 netif_keep_dst(dev); 825 netif_keep_dst(dev);
827 826
828 dev->netdev_ops = &vlan_netdev_ops; 827 dev->netdev_ops = &vlan_netdev_ops;
829 dev->destructor = vlan_dev_free; 828 dev->needs_free_netdev = true;
829 dev->priv_destructor = vlan_dev_free;
830 dev->ethtool_ops = &vlan_ethtool_ops; 830 dev->ethtool_ops = &vlan_ethtool_ops;
831 831
832 dev->min_mtu = 0; 832 dev->min_mtu = 0;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 013e970eff39..000ca2f113ab 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1064,8 +1064,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
1064 1064
1065 skb_new->protocol = eth_type_trans(skb_new, soft_iface); 1065 skb_new->protocol = eth_type_trans(skb_new, soft_iface);
1066 1066
1067 soft_iface->stats.rx_packets++; 1067 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
1068 soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; 1068 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
1069 skb->len + ETH_HLEN + hdr_size);
1069 1070
1070 netif_rx(skb_new); 1071 netif_rx(skb_new);
1071 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); 1072 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index e1ebe14ee2a6..ae9f4d37d34f 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -987,7 +987,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
987 batadv_dbg(BATADV_DBG_BLA, bat_priv, 987 batadv_dbg(BATADV_DBG_BLA, bat_priv,
988 "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", 988 "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n",
989 orig_addr_gw); 989 orig_addr_gw);
990 return NET_RX_DROP; 990 goto free_skb;
991 } 991 }
992 } 992 }
993 993
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b25789abf7b9..10f7edfb176e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1034,8 +1034,6 @@ static void batadv_softif_free(struct net_device *dev)
1034 * netdev and its private data (bat_priv) 1034 * netdev and its private data (bat_priv)
1035 */ 1035 */
1036 rcu_barrier(); 1036 rcu_barrier();
1037
1038 free_netdev(dev);
1039} 1037}
1040 1038
1041/** 1039/**
@@ -1047,7 +1045,8 @@ static void batadv_softif_init_early(struct net_device *dev)
1047 ether_setup(dev); 1045 ether_setup(dev);
1048 1046
1049 dev->netdev_ops = &batadv_netdev_ops; 1047 dev->netdev_ops = &batadv_netdev_ops;
1050 dev->destructor = batadv_softif_free; 1048 dev->needs_free_netdev = true;
1049 dev->priv_destructor = batadv_softif_free;
1051 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; 1050 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
1052 dev->priv_flags |= IFF_NO_QUEUE; 1051 dev->priv_flags |= IFF_NO_QUEUE;
1053 1052
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 608959989f8e..ab3b654b05cc 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -598,7 +598,7 @@ static void netdev_setup(struct net_device *dev)
598 598
599 dev->netdev_ops = &netdev_ops; 599 dev->netdev_ops = &netdev_ops;
600 dev->header_ops = &header_ops; 600 dev->header_ops = &header_ops;
601 dev->destructor = free_netdev; 601 dev->needs_free_netdev = true;
602} 602}
603 603
604static struct device_type bt_type = { 604static struct device_type bt_type = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 430b53e7d941..f0f3447e8aa4 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev)
379 ether_setup(dev); 379 ether_setup(dev);
380 380
381 dev->netdev_ops = &br_netdev_ops; 381 dev->netdev_ops = &br_netdev_ops;
382 dev->destructor = free_netdev; 382 dev->needs_free_netdev = true;
383 dev->ethtool_ops = &br_ethtool_ops; 383 dev->ethtool_ops = &br_ethtool_ops;
384 SET_NETDEV_DEVTYPE(dev, &br_type); 384 SET_NETDEV_DEVTYPE(dev, &br_type);
385 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 385 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index adcad344c843..21f18ea2fce4 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -754,6 +754,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
754 754
755 lock_sock(sk); 755 lock_sock(sk);
756 756
757 err = -EINVAL;
758 if (addr_len < offsetofend(struct sockaddr, sa_family))
759 goto out;
760
757 err = -EAFNOSUPPORT; 761 err = -EAFNOSUPPORT;
758 if (uaddr->sa_family != AF_CAIF) 762 if (uaddr->sa_family != AF_CAIF)
759 goto out; 763 goto out;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fcc220c..71b6ab240dea 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
81{ 81{
82 struct sk_buff *skb; 82 struct sk_buff *skb;
83 83
84 if (likely(in_interrupt())) 84 skb = alloc_skb(len + pfx, GFP_ATOMIC);
85 skb = alloc_skb(len + pfx, GFP_ATOMIC);
86 else
87 skb = alloc_skb(len + pfx, GFP_KERNEL);
88
89 if (unlikely(skb == NULL)) 85 if (unlikely(skb == NULL))
90 return NULL; 86 return NULL;
91 87
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 1816fc9f1ee7..fe3c53efb949 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev)
392{ 392{
393 struct chnl_net *priv = netdev_priv(dev); 393 struct chnl_net *priv = netdev_priv(dev);
394 caif_free_client(&priv->chnl); 394 caif_free_client(&priv->chnl);
395 free_netdev(dev);
396} 395}
397 396
398static void ipcaif_net_setup(struct net_device *dev) 397static void ipcaif_net_setup(struct net_device *dev)
399{ 398{
400 struct chnl_net *priv; 399 struct chnl_net *priv;
401 dev->netdev_ops = &netdev_ops; 400 dev->netdev_ops = &netdev_ops;
402 dev->destructor = chnl_net_destructor; 401 dev->needs_free_netdev = true;
402 dev->priv_destructor = chnl_net_destructor;
403 dev->flags |= IFF_NOARP; 403 dev->flags |= IFF_NOARP;
404 dev->flags |= IFF_POINTOPOINT; 404 dev->flags |= IFF_POINTOPOINT;
405 dev->mtu = GPRS_PDP_MTU; 405 dev->mtu = GPRS_PDP_MTU;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index b6406fe33c76..88edac0f3e36 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -872,8 +872,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
872 872
873static int can_pernet_init(struct net *net) 873static int can_pernet_init(struct net *net)
874{ 874{
875 net->can.can_rcvlists_lock = 875 spin_lock_init(&net->can.can_rcvlists_lock);
876 __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock);
877 net->can.can_rx_alldev_list = 876 net->can.can_rx_alldev_list =
878 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
879 878
diff --git a/net/core/dev.c b/net/core/dev.c
index fca407b4a6ea..6d60149287a1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1253,8 +1253,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1253 if (!new_ifalias) 1253 if (!new_ifalias)
1254 return -ENOMEM; 1254 return -ENOMEM;
1255 dev->ifalias = new_ifalias; 1255 dev->ifalias = new_ifalias;
1256 memcpy(dev->ifalias, alias, len);
1257 dev->ifalias[len] = 0;
1256 1258
1257 strlcpy(dev->ifalias, alias, len+1);
1258 return len; 1259 return len;
1259} 1260}
1260 1261
@@ -4948,6 +4949,19 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4948} 4949}
4949EXPORT_SYMBOL(__skb_gro_checksum_complete); 4950EXPORT_SYMBOL(__skb_gro_checksum_complete);
4950 4951
4952static void net_rps_send_ipi(struct softnet_data *remsd)
4953{
4954#ifdef CONFIG_RPS
4955 while (remsd) {
4956 struct softnet_data *next = remsd->rps_ipi_next;
4957
4958 if (cpu_online(remsd->cpu))
4959 smp_call_function_single_async(remsd->cpu, &remsd->csd);
4960 remsd = next;
4961 }
4962#endif
4963}
4964
4951/* 4965/*
4952 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 4966 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4953 * Note: called with local irq disabled, but exits with local irq enabled. 4967 * Note: called with local irq disabled, but exits with local irq enabled.
@@ -4963,14 +4977,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4963 local_irq_enable(); 4977 local_irq_enable();
4964 4978
4965 /* Send pending IPI's to kick RPS processing on remote cpus. */ 4979 /* Send pending IPI's to kick RPS processing on remote cpus. */
4966 while (remsd) { 4980 net_rps_send_ipi(remsd);
4967 struct softnet_data *next = remsd->rps_ipi_next;
4968
4969 if (cpu_online(remsd->cpu))
4970 smp_call_function_single_async(remsd->cpu,
4971 &remsd->csd);
4972 remsd = next;
4973 }
4974 } else 4981 } else
4975#endif 4982#endif
4976 local_irq_enable(); 4983 local_irq_enable();
@@ -7501,6 +7508,8 @@ out:
7501err_uninit: 7508err_uninit:
7502 if (dev->netdev_ops->ndo_uninit) 7509 if (dev->netdev_ops->ndo_uninit)
7503 dev->netdev_ops->ndo_uninit(dev); 7510 dev->netdev_ops->ndo_uninit(dev);
7511 if (dev->priv_destructor)
7512 dev->priv_destructor(dev);
7504 goto out; 7513 goto out;
7505} 7514}
7506EXPORT_SYMBOL(register_netdevice); 7515EXPORT_SYMBOL(register_netdevice);
@@ -7708,8 +7717,10 @@ void netdev_run_todo(void)
7708 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 7717 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
7709 WARN_ON(dev->dn_ptr); 7718 WARN_ON(dev->dn_ptr);
7710 7719
7711 if (dev->destructor) 7720 if (dev->priv_destructor)
7712 dev->destructor(dev); 7721 dev->priv_destructor(dev);
7722 if (dev->needs_free_netdev)
7723 free_netdev(dev);
7713 7724
7714 /* Report a network device has been unregistered */ 7725 /* Report a network device has been unregistered */
7715 rtnl_lock(); 7726 rtnl_lock();
@@ -8192,7 +8203,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
8192 struct sk_buff **list_skb; 8203 struct sk_buff **list_skb;
8193 struct sk_buff *skb; 8204 struct sk_buff *skb;
8194 unsigned int cpu; 8205 unsigned int cpu;
8195 struct softnet_data *sd, *oldsd; 8206 struct softnet_data *sd, *oldsd, *remsd = NULL;
8196 8207
8197 local_irq_disable(); 8208 local_irq_disable();
8198 cpu = smp_processor_id(); 8209 cpu = smp_processor_id();
@@ -8233,6 +8244,13 @@ static int dev_cpu_dead(unsigned int oldcpu)
8233 raise_softirq_irqoff(NET_TX_SOFTIRQ); 8244 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8234 local_irq_enable(); 8245 local_irq_enable();
8235 8246
8247#ifdef CONFIG_RPS
8248 remsd = oldsd->rps_ipi_list;
8249 oldsd->rps_ipi_list = NULL;
8250#endif
8251 /* send out pending IPI's on offline CPU */
8252 net_rps_send_ipi(remsd);
8253
8236 /* Process offline CPU's input_pkt_queue */ 8254 /* Process offline CPU's input_pkt_queue */
8237 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 8255 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
8238 netif_rx_ni(skb); 8256 netif_rx_ni(skb);
diff --git a/net/core/dst.c b/net/core/dst.c
index 6192f11beec9..13ba4a090c41 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -469,6 +469,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
469 spin_lock_bh(&dst_garbage.lock); 469 spin_lock_bh(&dst_garbage.lock);
470 dst = dst_garbage.list; 470 dst = dst_garbage.list;
471 dst_garbage.list = NULL; 471 dst_garbage.list = NULL;
472 /* The code in dst_ifdown places a hold on the loopback device.
473 * If the gc entry processing is set to expire after a lengthy
474 * interval, this hold can cause netdev_wait_allrefs() to hang
475 * out and wait for a long time -- until the the loopback
476 * interface is released. If we're really unlucky, it'll emit
477 * pr_emerg messages to console too. Reset the interval here,
478 * so dst cleanups occur in a more timely fashion.
479 */
480 if (dst_garbage.timer_inc > DST_GC_INC) {
481 dst_garbage.timer_inc = DST_GC_INC;
482 dst_garbage.timer_expires = DST_GC_MIN;
483 mod_delayed_work(system_wq, &dst_gc_work,
484 dst_garbage.timer_expires);
485 }
472 spin_unlock_bh(&dst_garbage.lock); 486 spin_unlock_bh(&dst_garbage.lock);
473 487
474 if (last) 488 if (last)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9e2c0a7cb325..5e61456f6bc7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1124,6 +1124,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1124 struct ifla_vf_mac vf_mac; 1124 struct ifla_vf_mac vf_mac;
1125 struct ifla_vf_info ivi; 1125 struct ifla_vf_info ivi;
1126 1126
1127 memset(&ivi, 0, sizeof(ivi));
1128
1127 /* Not all SR-IOV capable drivers support the 1129 /* Not all SR-IOV capable drivers support the
1128 * spoofcheck and "RSS query enable" query. Preset to 1130 * spoofcheck and "RSS query enable" query. Preset to
1129 * -1 so the user space tool can detect that the driver 1131 * -1 so the user space tool can detect that the driver
@@ -1132,7 +1134,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1132 ivi.spoofchk = -1; 1134 ivi.spoofchk = -1;
1133 ivi.rss_query_en = -1; 1135 ivi.rss_query_en = -1;
1134 ivi.trusted = -1; 1136 ivi.trusted = -1;
1135 memset(ivi.mac, 0, sizeof(ivi.mac));
1136 /* The default value for VF link state is "auto" 1137 /* The default value for VF link state is "auto"
1137 * IFLA_VF_LINK_STATE_AUTO which equals zero 1138 * IFLA_VF_LINK_STATE_AUTO which equals zero
1138 */ 1139 */
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1ed81ac6dd1a..aa8ffecc46a4 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
102{ 102{
103 struct nlmsghdr *nlh = nlmsg_hdr(skb); 103 struct nlmsghdr *nlh = nlmsg_hdr(skb);
104 104
105 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 105 if (skb->len < sizeof(*nlh) ||
106 nlh->nlmsg_len < sizeof(*nlh) ||
107 skb->len < nlh->nlmsg_len)
106 return; 108 return;
107 109
108 if (!netlink_capable(skb, CAP_NET_ADMIN)) 110 if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c73160fb11e7..0a0a392dc2bd 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
378 del_timer_sync(&hsr->announce_timer); 378 del_timer_sync(&hsr->announce_timer);
379 379
380 synchronize_rcu(); 380 synchronize_rcu();
381 free_netdev(hsr_dev);
382} 381}
383 382
384static const struct net_device_ops hsr_device_ops = { 383static const struct net_device_ops hsr_device_ops = {
@@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev)
404 SET_NETDEV_DEVTYPE(dev, &hsr_type); 403 SET_NETDEV_DEVTYPE(dev, &hsr_type);
405 dev->priv_flags |= IFF_NO_QUEUE; 404 dev->priv_flags |= IFF_NO_QUEUE;
406 405
407 dev->destructor = hsr_dev_destroy; 406 dev->needs_free_netdev = true;
407 dev->priv_destructor = hsr_dev_destroy;
408 408
409 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 409 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
410 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | 410 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 4ebe2aa3e7d3..04b5450c5a55 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
324 unsigned long irqflags; 324 unsigned long irqflags;
325 325
326 frame->is_supervision = is_supervision_frame(port->hsr, skb); 326 frame->is_supervision = is_supervision_frame(port->hsr, skb);
327 frame->node_src = hsr_get_node(&port->hsr->node_db, skb, 327 frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
328 frame->is_supervision);
329 if (frame->node_src == NULL) 328 if (frame->node_src == NULL)
330 return -1; /* Unknown node and !is_supervision, or no mem */ 329 return -1; /* Unknown node and !is_supervision, or no mem */
331 330
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 7ea925816f79..284a9b820df8 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
158 158
159/* Get the hsr_node from which 'skb' was sent. 159/* Get the hsr_node from which 'skb' was sent.
160 */ 160 */
161struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, 161struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
162 bool is_sup) 162 bool is_sup)
163{ 163{
164 struct list_head *node_db = &port->hsr->node_db;
164 struct hsr_node *node; 165 struct hsr_node *node;
165 struct ethhdr *ethhdr; 166 struct ethhdr *ethhdr;
166 u16 seq_out; 167 u16 seq_out;
@@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
186 */ 187 */
187 seq_out = hsr_get_skb_sequence_nr(skb) - 1; 188 seq_out = hsr_get_skb_sequence_nr(skb) - 1;
188 } else { 189 } else {
189 WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); 190 /* this is called also for frames from master port and
191 * so warn only for non master ports
192 */
193 if (port->type != HSR_PT_MASTER)
194 WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
190 seq_out = HSR_SEQNR_START; 195 seq_out = HSR_SEQNR_START;
191 } 196 }
192 197
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 438b40f98f5a..4e04f0e868e9 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -18,7 +18,7 @@ struct hsr_node;
18 18
19struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], 19struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
20 u16 seq_out); 20 u16 seq_out);
21struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, 21struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
22 bool is_sup); 22 bool is_sup);
23void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, 23void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
24 struct hsr_port *port); 24 struct hsr_port *port);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index d7efbf0dad20..0a866f332290 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev)
107 107
108 ldev->netdev_ops = &lowpan_netdev_ops; 108 ldev->netdev_ops = &lowpan_netdev_ops;
109 ldev->header_ops = &lowpan_header_ops; 109 ldev->header_ops = &lowpan_header_ops;
110 ldev->destructor = free_netdev; 110 ldev->needs_free_netdev = true;
111 ldev->features |= NETIF_F_NETNS_LOCAL; 111 ldev->features |= NETIF_F_NETNS_LOCAL;
112} 112}
113 113
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 43318b5f5647..9144fa7df2ad 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -657,8 +657,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
657 /* Needed by both icmp_global_allow and icmp_xmit_lock */ 657 /* Needed by both icmp_global_allow and icmp_xmit_lock */
658 local_bh_disable(); 658 local_bh_disable();
659 659
660 /* Check global sysctl_icmp_msgs_per_sec ratelimit */ 660 /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
661 if (!icmpv4_global_allow(net, type, code)) 661 * incoming dev is loopback. If outgoing dev change to not be
662 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
663 */
664 if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
665 !icmpv4_global_allow(net, type, code))
662 goto out_bh_enable; 666 goto out_bh_enable;
663 667
664 sk = icmp_xmit_lock(net); 668 sk = icmp_xmit_lock(net);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 44fd86de2823..8f6b5bbcbf69 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2071,21 +2071,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2071 2071
2072static void ip_mc_clear_src(struct ip_mc_list *pmc) 2072static void ip_mc_clear_src(struct ip_mc_list *pmc)
2073{ 2073{
2074 struct ip_sf_list *psf, *nextpsf; 2074 struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
2075 2075
2076 for (psf = pmc->tomb; psf; psf = nextpsf) { 2076 spin_lock_bh(&pmc->lock);
2077 tomb = pmc->tomb;
2078 pmc->tomb = NULL;
2079 sources = pmc->sources;
2080 pmc->sources = NULL;
2081 pmc->sfmode = MCAST_EXCLUDE;
2082 pmc->sfcount[MCAST_INCLUDE] = 0;
2083 pmc->sfcount[MCAST_EXCLUDE] = 1;
2084 spin_unlock_bh(&pmc->lock);
2085
2086 for (psf = tomb; psf; psf = nextpsf) {
2077 nextpsf = psf->sf_next; 2087 nextpsf = psf->sf_next;
2078 kfree(psf); 2088 kfree(psf);
2079 } 2089 }
2080 pmc->tomb = NULL; 2090 for (psf = sources; psf; psf = nextpsf) {
2081 for (psf = pmc->sources; psf; psf = nextpsf) {
2082 nextpsf = psf->sf_next; 2091 nextpsf = psf->sf_next;
2083 kfree(psf); 2092 kfree(psf);
2084 } 2093 }
2085 pmc->sources = NULL;
2086 pmc->sfmode = MCAST_EXCLUDE;
2087 pmc->sfcount[MCAST_INCLUDE] = 0;
2088 pmc->sfcount[MCAST_EXCLUDE] = 1;
2089} 2094}
2090 2095
2091/* Join a multicast group 2096/* Join a multicast group
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b878ecbc0608..b436d0775631 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -967,7 +967,6 @@ static void ip_tunnel_dev_free(struct net_device *dev)
967 gro_cells_destroy(&tunnel->gro_cells); 967 gro_cells_destroy(&tunnel->gro_cells);
968 dst_cache_destroy(&tunnel->dst_cache); 968 dst_cache_destroy(&tunnel->dst_cache);
969 free_percpu(dev->tstats); 969 free_percpu(dev->tstats);
970 free_netdev(dev);
971} 970}
972 971
973void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) 972void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
@@ -1155,7 +1154,8 @@ int ip_tunnel_init(struct net_device *dev)
1155 struct iphdr *iph = &tunnel->parms.iph; 1154 struct iphdr *iph = &tunnel->parms.iph;
1156 int err; 1155 int err;
1157 1156
1158 dev->destructor = ip_tunnel_dev_free; 1157 dev->needs_free_netdev = true;
1158 dev->priv_destructor = ip_tunnel_dev_free;
1159 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1159 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1160 if (!dev->tstats) 1160 if (!dev->tstats)
1161 return -ENOMEM; 1161 return -ENOMEM;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 551de4d023a8..8ae425cad818 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -101,8 +101,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id);
101static void ipmr_free_table(struct mr_table *mrt); 101static void ipmr_free_table(struct mr_table *mrt);
102 102
103static void ip_mr_forward(struct net *net, struct mr_table *mrt, 103static void ip_mr_forward(struct net *net, struct mr_table *mrt,
104 struct sk_buff *skb, struct mfc_cache *cache, 104 struct net_device *dev, struct sk_buff *skb,
105 int local); 105 struct mfc_cache *cache, int local);
106static int ipmr_cache_report(struct mr_table *mrt, 106static int ipmr_cache_report(struct mr_table *mrt,
107 struct sk_buff *pkt, vifi_t vifi, int assert); 107 struct sk_buff *pkt, vifi_t vifi, int assert);
108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev)
501 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 501 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
502 dev->flags = IFF_NOARP; 502 dev->flags = IFF_NOARP;
503 dev->netdev_ops = &reg_vif_netdev_ops; 503 dev->netdev_ops = &reg_vif_netdev_ops;
504 dev->destructor = free_netdev; 504 dev->needs_free_netdev = true;
505 dev->features |= NETIF_F_NETNS_LOCAL; 505 dev->features |= NETIF_F_NETNS_LOCAL;
506} 506}
507 507
@@ -988,7 +988,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
988 988
989 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 989 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
990 } else { 990 } else {
991 ip_mr_forward(net, mrt, skb, c, 0); 991 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
992 } 992 }
993 } 993 }
994} 994}
@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
1073 1073
1074/* Queue a packet for resolution. It gets locked cache entry! */ 1074/* Queue a packet for resolution. It gets locked cache entry! */
1075static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, 1075static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1076 struct sk_buff *skb) 1076 struct sk_buff *skb, struct net_device *dev)
1077{ 1077{
1078 const struct iphdr *iph = ip_hdr(skb); 1078 const struct iphdr *iph = ip_hdr(skb);
1079 struct mfc_cache *c; 1079 struct mfc_cache *c;
@@ -1130,6 +1130,10 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1130 kfree_skb(skb); 1130 kfree_skb(skb);
1131 err = -ENOBUFS; 1131 err = -ENOBUFS;
1132 } else { 1132 } else {
1133 if (dev) {
1134 skb->dev = dev;
1135 skb->skb_iif = dev->ifindex;
1136 }
1133 skb_queue_tail(&c->mfc_un.unres.unresolved, skb); 1137 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1134 err = 0; 1138 err = 0;
1135 } 1139 }
@@ -1828,10 +1832,10 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1828 1832
1829/* "local" means that we should preserve one skb (for local delivery) */ 1833/* "local" means that we should preserve one skb (for local delivery) */
1830static void ip_mr_forward(struct net *net, struct mr_table *mrt, 1834static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1831 struct sk_buff *skb, struct mfc_cache *cache, 1835 struct net_device *dev, struct sk_buff *skb,
1832 int local) 1836 struct mfc_cache *cache, int local)
1833{ 1837{
1834 int true_vifi = ipmr_find_vif(mrt, skb->dev); 1838 int true_vifi = ipmr_find_vif(mrt, dev);
1835 int psend = -1; 1839 int psend = -1;
1836 int vif, ct; 1840 int vif, ct;
1837 1841
@@ -1853,13 +1857,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1853 } 1857 }
1854 1858
1855 /* Wrong interface: drop packet and (maybe) send PIM assert. */ 1859 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1856 if (mrt->vif_table[vif].dev != skb->dev) { 1860 if (mrt->vif_table[vif].dev != dev) {
1857 struct net_device *mdev;
1858
1859 mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev);
1860 if (mdev == skb->dev)
1861 goto forward;
1862
1863 if (rt_is_output_route(skb_rtable(skb))) { 1861 if (rt_is_output_route(skb_rtable(skb))) {
1864 /* It is our own packet, looped back. 1862 /* It is our own packet, looped back.
1865 * Very complicated situation... 1863 * Very complicated situation...
@@ -2053,7 +2051,7 @@ int ip_mr_input(struct sk_buff *skb)
2053 read_lock(&mrt_lock); 2051 read_lock(&mrt_lock);
2054 vif = ipmr_find_vif(mrt, dev); 2052 vif = ipmr_find_vif(mrt, dev);
2055 if (vif >= 0) { 2053 if (vif >= 0) {
2056 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 2054 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
2057 read_unlock(&mrt_lock); 2055 read_unlock(&mrt_lock);
2058 2056
2059 return err2; 2057 return err2;
@@ -2064,7 +2062,7 @@ int ip_mr_input(struct sk_buff *skb)
2064 } 2062 }
2065 2063
2066 read_lock(&mrt_lock); 2064 read_lock(&mrt_lock);
2067 ip_mr_forward(net, mrt, skb, cache, local); 2065 ip_mr_forward(net, mrt, dev, skb, cache, local);
2068 read_unlock(&mrt_lock); 2066 read_unlock(&mrt_lock);
2069 2067
2070 if (local) 2068 if (local)
@@ -2238,7 +2236,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2238 iph->saddr = saddr; 2236 iph->saddr = saddr;
2239 iph->daddr = daddr; 2237 iph->daddr = daddr;
2240 iph->version = 0; 2238 iph->version = 0;
2241 err = ipmr_cache_unresolved(mrt, vif, skb2); 2239 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2242 read_unlock(&mrt_lock); 2240 read_unlock(&mrt_lock);
2243 rcu_read_unlock(); 2241 rcu_read_unlock();
2244 return err; 2242 return err;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 230b5aac9f03..8d7b113958b1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -491,7 +491,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
491 local_bh_disable(); 491 local_bh_disable();
492 492
493 /* Check global sysctl_icmp_msgs_per_sec ratelimit */ 493 /* Check global sysctl_icmp_msgs_per_sec ratelimit */
494 if (!icmpv6_global_allow(type)) 494 if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type))
495 goto out_bh_enable; 495 goto out_bh_enable;
496 496
497 mip6_addr_swap(skb); 497 mip6_addr_swap(skb);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 2fd5ca151dcf..77f7f8c7d93d 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -62,6 +62,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc)
62{ 62{
63 u32 *v = (u32 *)loc.v32; 63 u32 *v = (u32 *)loc.v32;
64 64
65 __ila_hash_secret_init();
65 return jhash_2words(v[0], v[1], hashrnd); 66 return jhash_2words(v[0], v[1], hashrnd);
66} 67}
67 68
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 0c5b4caa1949..64eea3962733 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -991,13 +991,13 @@ static void ip6gre_dev_free(struct net_device *dev)
991 991
992 dst_cache_destroy(&t->dst_cache); 992 dst_cache_destroy(&t->dst_cache);
993 free_percpu(dev->tstats); 993 free_percpu(dev->tstats);
994 free_netdev(dev);
995} 994}
996 995
997static void ip6gre_tunnel_setup(struct net_device *dev) 996static void ip6gre_tunnel_setup(struct net_device *dev)
998{ 997{
999 dev->netdev_ops = &ip6gre_netdev_ops; 998 dev->netdev_ops = &ip6gre_netdev_ops;
1000 dev->destructor = ip6gre_dev_free; 999 dev->needs_free_netdev = true;
1000 dev->priv_destructor = ip6gre_dev_free;
1001 1001
1002 dev->type = ARPHRD_IP6GRE; 1002 dev->type = ARPHRD_IP6GRE;
1003 1003
@@ -1148,7 +1148,7 @@ static int __net_init ip6gre_init_net(struct net *net)
1148 return 0; 1148 return 0;
1149 1149
1150err_reg_dev: 1150err_reg_dev:
1151 ip6gre_dev_free(ign->fb_tunnel_dev); 1151 free_netdev(ign->fb_tunnel_dev);
1152err_alloc_dev: 1152err_alloc_dev:
1153 return err; 1153 return err;
1154} 1154}
@@ -1300,7 +1300,8 @@ static void ip6gre_tap_setup(struct net_device *dev)
1300 ether_setup(dev); 1300 ether_setup(dev);
1301 1301
1302 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1302 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1303 dev->destructor = ip6gre_dev_free; 1303 dev->needs_free_netdev = true;
1304 dev->priv_destructor = ip6gre_dev_free;
1304 1305
1305 dev->features |= NETIF_F_NETNS_LOCAL; 1306 dev->features |= NETIF_F_NETNS_LOCAL;
1306 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1307 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9b37f9747fc6..c3581973f5d7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev)
254 gro_cells_destroy(&t->gro_cells); 254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache); 255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats); 256 free_percpu(dev->tstats);
257 free_netdev(dev);
258} 257}
259 258
260static int ip6_tnl_create2(struct net_device *dev) 259static int ip6_tnl_create2(struct net_device *dev)
@@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
322 return t; 321 return t;
323 322
324failed_free: 323failed_free:
325 ip6_dev_free(dev); 324 free_netdev(dev);
326failed: 325failed:
327 return ERR_PTR(err); 326 return ERR_PTR(err);
328} 327}
@@ -1777,7 +1776,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
1777static void ip6_tnl_dev_setup(struct net_device *dev) 1776static void ip6_tnl_dev_setup(struct net_device *dev)
1778{ 1777{
1779 dev->netdev_ops = &ip6_tnl_netdev_ops; 1778 dev->netdev_ops = &ip6_tnl_netdev_ops;
1780 dev->destructor = ip6_dev_free; 1779 dev->needs_free_netdev = true;
1780 dev->priv_destructor = ip6_dev_free;
1781 1781
1782 dev->type = ARPHRD_TUNNEL6; 1782 dev->type = ARPHRD_TUNNEL6;
1783 dev->flags |= IFF_NOARP; 1783 dev->flags |= IFF_NOARP;
@@ -2224,7 +2224,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
2224 return 0; 2224 return 0;
2225 2225
2226err_register: 2226err_register:
2227 ip6_dev_free(ip6n->fb_tnl_dev); 2227 free_netdev(ip6n->fb_tnl_dev);
2228err_alloc_dev: 2228err_alloc_dev:
2229 return err; 2229 return err;
2230} 2230}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d67ef56454b2..837ea1eefe7f 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
180static void vti6_dev_free(struct net_device *dev) 180static void vti6_dev_free(struct net_device *dev)
181{ 181{
182 free_percpu(dev->tstats); 182 free_percpu(dev->tstats);
183 free_netdev(dev);
184} 183}
185 184
186static int vti6_tnl_create2(struct net_device *dev) 185static int vti6_tnl_create2(struct net_device *dev)
@@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
235 return t; 234 return t;
236 235
237failed_free: 236failed_free:
238 vti6_dev_free(dev); 237 free_netdev(dev);
239failed: 238failed:
240 return NULL; 239 return NULL;
241} 240}
@@ -842,7 +841,8 @@ static const struct net_device_ops vti6_netdev_ops = {
842static void vti6_dev_setup(struct net_device *dev) 841static void vti6_dev_setup(struct net_device *dev)
843{ 842{
844 dev->netdev_ops = &vti6_netdev_ops; 843 dev->netdev_ops = &vti6_netdev_ops;
845 dev->destructor = vti6_dev_free; 844 dev->needs_free_netdev = true;
845 dev->priv_destructor = vti6_dev_free;
846 846
847 dev->type = ARPHRD_TUNNEL6; 847 dev->type = ARPHRD_TUNNEL6;
848 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); 848 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
@@ -1100,7 +1100,7 @@ static int __net_init vti6_init_net(struct net *net)
1100 return 0; 1100 return 0;
1101 1101
1102err_register: 1102err_register:
1103 vti6_dev_free(ip6n->fb_tnl_dev); 1103 free_netdev(ip6n->fb_tnl_dev);
1104err_alloc_dev: 1104err_alloc_dev:
1105 return err; 1105 return err;
1106} 1106}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 374997d26488..2ecb39b943b5 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev)
733 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 733 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
734 dev->flags = IFF_NOARP; 734 dev->flags = IFF_NOARP;
735 dev->netdev_ops = &reg_vif_netdev_ops; 735 dev->netdev_ops = &reg_vif_netdev_ops;
736 dev->destructor = free_netdev; 736 dev->needs_free_netdev = true;
737 dev->features |= NETIF_F_NETNS_LOCAL; 737 dev->features |= NETIF_F_NETNS_LOCAL;
738} 738}
739 739
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index cc8e3ae9ca73..e88bcb8ff0fd 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
219 u64 buff64[SNMP_MIB_MAX]; 219 u64 buff64[SNMP_MIB_MAX];
220 int i; 220 int i;
221 221
222 memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX); 222 memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
223 223
224 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); 224 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
225 for (i = 0; itemlist[i].name; i++) 225 for (i = 0; itemlist[i].name; i++)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dc61b0b5e64e..7cebd954d5bb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
2804 if ((rt->dst.dev == dev || !dev) && 2804 if ((rt->dst.dev == dev || !dev) &&
2805 rt != adn->net->ipv6.ip6_null_entry && 2805 rt != adn->net->ipv6.ip6_null_entry &&
2806 (rt->rt6i_nsiblings == 0 || 2806 (rt->rt6i_nsiblings == 0 ||
2807 (dev && netdev_unregistering(dev)) ||
2807 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) 2808 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
2808 return -1; 2809 return -1;
2809 2810
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 61e5902f0687..2378503577b0 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
265 return nt; 265 return nt;
266 266
267failed_free: 267failed_free:
268 ipip6_dev_free(dev); 268 free_netdev(dev);
269failed: 269failed:
270 return NULL; 270 return NULL;
271} 271}
@@ -1336,7 +1336,6 @@ static void ipip6_dev_free(struct net_device *dev)
1336 1336
1337 dst_cache_destroy(&tunnel->dst_cache); 1337 dst_cache_destroy(&tunnel->dst_cache);
1338 free_percpu(dev->tstats); 1338 free_percpu(dev->tstats);
1339 free_netdev(dev);
1340} 1339}
1341 1340
1342#define SIT_FEATURES (NETIF_F_SG | \ 1341#define SIT_FEATURES (NETIF_F_SG | \
@@ -1351,7 +1350,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1351 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 1350 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
1352 1351
1353 dev->netdev_ops = &ipip6_netdev_ops; 1352 dev->netdev_ops = &ipip6_netdev_ops;
1354 dev->destructor = ipip6_dev_free; 1353 dev->needs_free_netdev = true;
1354 dev->priv_destructor = ipip6_dev_free;
1355 1355
1356 dev->type = ARPHRD_SIT; 1356 dev->type = ARPHRD_SIT;
1357 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1357 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 74d09f91709e..3be852808a9d 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev)
65 ether_setup(dev); 65 ether_setup(dev);
66 66
67 dev->netdev_ops = &irlan_eth_netdev_ops; 67 dev->netdev_ops = &irlan_eth_netdev_ops;
68 dev->destructor = free_netdev; 68 dev->needs_free_netdev = true;
69 dev->min_mtu = 0; 69 dev->min_mtu = 0;
70 dev->max_mtu = ETH_MAX_MTU; 70 dev->max_mtu = ETH_MAX_MTU;
71 71
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8b21af7321b9..4de2ec94b08c 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -114,12 +114,13 @@ static void l2tp_eth_get_stats64(struct net_device *dev,
114{ 114{
115 struct l2tp_eth *priv = netdev_priv(dev); 115 struct l2tp_eth *priv = netdev_priv(dev);
116 116
117 stats->tx_bytes = atomic_long_read(&priv->tx_bytes); 117 stats->tx_bytes = (unsigned long) atomic_long_read(&priv->tx_bytes);
118 stats->tx_packets = atomic_long_read(&priv->tx_packets); 118 stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets);
119 stats->tx_dropped = atomic_long_read(&priv->tx_dropped); 119 stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped);
120 stats->rx_bytes = atomic_long_read(&priv->rx_bytes); 120 stats->rx_bytes = (unsigned long) atomic_long_read(&priv->rx_bytes);
121 stats->rx_packets = atomic_long_read(&priv->rx_packets); 121 stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets);
122 stats->rx_errors = atomic_long_read(&priv->rx_errors); 122 stats->rx_errors = (unsigned long) atomic_long_read(&priv->rx_errors);
123
123} 124}
124 125
125static const struct net_device_ops l2tp_eth_netdev_ops = { 126static const struct net_device_ops l2tp_eth_netdev_ops = {
@@ -141,7 +142,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
141 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 142 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
142 dev->features |= NETIF_F_LLTX; 143 dev->features |= NETIF_F_LLTX;
143 dev->netdev_ops = &l2tp_eth_netdev_ops; 144 dev->netdev_ops = &l2tp_eth_netdev_ops;
144 dev->destructor = free_netdev; 145 dev->needs_free_netdev = true;
145} 146}
146 147
147static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) 148static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6c2e6060cd54..4a388fe8c2d1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -902,6 +902,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
902 default: 902 default:
903 return -EINVAL; 903 return -EINVAL;
904 } 904 }
905 sdata->u.ap.req_smps = sdata->smps_mode;
906
905 sdata->needed_rx_chains = sdata->local->rx_chains; 907 sdata->needed_rx_chains = sdata->local->rx_chains;
906 908
907 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 909 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 665501ac358f..5e002f62c235 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1531,7 +1531,7 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
1531 return true; 1531 return true;
1532 /* can't handle non-legacy preamble yet */ 1532 /* can't handle non-legacy preamble yet */
1533 if (status->flag & RX_FLAG_MACTIME_PLCP_START && 1533 if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
1534 status->encoding != RX_ENC_LEGACY) 1534 status->encoding == RX_ENC_LEGACY)
1535 return true; 1535 return true;
1536 return false; 1536 return false;
1537} 1537}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8fae1a72e6a7..f5f50150ba1c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1213,7 +1213,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
1213static void ieee80211_if_free(struct net_device *dev) 1213static void ieee80211_if_free(struct net_device *dev)
1214{ 1214{
1215 free_percpu(dev->tstats); 1215 free_percpu(dev->tstats);
1216 free_netdev(dev);
1217} 1216}
1218 1217
1219static void ieee80211_if_setup(struct net_device *dev) 1218static void ieee80211_if_setup(struct net_device *dev)
@@ -1221,7 +1220,8 @@ static void ieee80211_if_setup(struct net_device *dev)
1221 ether_setup(dev); 1220 ether_setup(dev);
1222 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1221 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1223 dev->netdev_ops = &ieee80211_dataif_ops; 1222 dev->netdev_ops = &ieee80211_dataif_ops;
1224 dev->destructor = ieee80211_if_free; 1223 dev->needs_free_netdev = true;
1224 dev->priv_destructor = ieee80211_if_free;
1225} 1225}
1226 1226
1227static void ieee80211_if_setup_no_queue(struct net_device *dev) 1227static void ieee80211_if_setup_no_queue(struct net_device *dev)
@@ -1816,6 +1816,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1816 ret = dev_alloc_name(ndev, ndev->name); 1816 ret = dev_alloc_name(ndev, ndev->name);
1817 if (ret < 0) { 1817 if (ret < 0) {
1818 ieee80211_if_free(ndev); 1818 ieee80211_if_free(ndev);
1819 free_netdev(ndev);
1819 return ret; 1820 return ret;
1820 } 1821 }
1821 1822
@@ -1905,7 +1906,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1905 1906
1906 ret = register_netdevice(ndev); 1907 ret = register_netdevice(ndev);
1907 if (ret) { 1908 if (ret) {
1908 ieee80211_if_free(ndev); 1909 free_netdev(ndev);
1909 return ret; 1910 return ret;
1910 } 1911 }
1911 } 1912 }
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0ea9712bd99e..cc8e6ea1b27e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -601,7 +601,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
601 struct ieee80211_supported_band *sband; 601 struct ieee80211_supported_band *sband;
602 struct ieee80211_chanctx_conf *chanctx_conf; 602 struct ieee80211_chanctx_conf *chanctx_conf;
603 struct ieee80211_channel *chan; 603 struct ieee80211_channel *chan;
604 u32 rate_flags, rates = 0; 604 u32 rates = 0;
605 605
606 sdata_assert_lock(sdata); 606 sdata_assert_lock(sdata);
607 607
@@ -612,7 +612,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
612 return; 612 return;
613 } 613 }
614 chan = chanctx_conf->def.chan; 614 chan = chanctx_conf->def.chan;
615 rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
616 rcu_read_unlock(); 615 rcu_read_unlock();
617 sband = local->hw.wiphy->bands[chan->band]; 616 sband = local->hw.wiphy->bands[chan->band];
618 shift = ieee80211_vif_get_shift(&sdata->vif); 617 shift = ieee80211_vif_get_shift(&sdata->vif);
@@ -636,9 +635,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
636 */ 635 */
637 rates_len = 0; 636 rates_len = 0;
638 for (i = 0; i < sband->n_bitrates; i++) { 637 for (i = 0; i < sband->n_bitrates; i++) {
639 if ((rate_flags & sband->bitrates[i].flags)
640 != rate_flags)
641 continue;
642 rates |= BIT(i); 638 rates |= BIT(i);
643 rates_len++; 639 rates_len++;
644 } 640 }
@@ -2818,7 +2814,7 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
2818 u32 *rates, u32 *basic_rates, 2814 u32 *rates, u32 *basic_rates,
2819 bool *have_higher_than_11mbit, 2815 bool *have_higher_than_11mbit,
2820 int *min_rate, int *min_rate_index, 2816 int *min_rate, int *min_rate_index,
2821 int shift, u32 rate_flags) 2817 int shift)
2822{ 2818{
2823 int i, j; 2819 int i, j;
2824 2820
@@ -2846,8 +2842,6 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
2846 int brate; 2842 int brate;
2847 2843
2848 br = &sband->bitrates[j]; 2844 br = &sband->bitrates[j];
2849 if ((rate_flags & br->flags) != rate_flags)
2850 continue;
2851 2845
2852 brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); 2846 brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
2853 if (brate == rate) { 2847 if (brate == rate) {
@@ -4398,40 +4392,32 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
4398 return -ENOMEM; 4392 return -ENOMEM;
4399 } 4393 }
4400 4394
4401 if (new_sta || override) { 4395 /*
4402 err = ieee80211_prep_channel(sdata, cbss); 4396 * Set up the information for the new channel before setting the
4403 if (err) { 4397 * new channel. We can't - completely race-free - change the basic
4404 if (new_sta) 4398 * rates bitmap and the channel (sband) that it refers to, but if
4405 sta_info_free(local, new_sta); 4399 * we set it up before we at least avoid calling into the driver's
4406 return -EINVAL; 4400 * bss_info_changed() method with invalid information (since we do
4407 } 4401 * call that from changing the channel - only for IDLE and perhaps
4408 } 4402 * some others, but ...).
4409 4403 *
4404 * So to avoid that, just set up all the new information before the
4405 * channel, but tell the driver to apply it only afterwards, since
4406 * it might need the new channel for that.
4407 */
4410 if (new_sta) { 4408 if (new_sta) {
4411 u32 rates = 0, basic_rates = 0; 4409 u32 rates = 0, basic_rates = 0;
4412 bool have_higher_than_11mbit; 4410 bool have_higher_than_11mbit;
4413 int min_rate = INT_MAX, min_rate_index = -1; 4411 int min_rate = INT_MAX, min_rate_index = -1;
4414 struct ieee80211_chanctx_conf *chanctx_conf;
4415 const struct cfg80211_bss_ies *ies; 4412 const struct cfg80211_bss_ies *ies;
4416 int shift = ieee80211_vif_get_shift(&sdata->vif); 4413 int shift = ieee80211_vif_get_shift(&sdata->vif);
4417 u32 rate_flags;
4418
4419 rcu_read_lock();
4420 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
4421 if (WARN_ON(!chanctx_conf)) {
4422 rcu_read_unlock();
4423 sta_info_free(local, new_sta);
4424 return -EINVAL;
4425 }
4426 rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
4427 rcu_read_unlock();
4428 4414
4429 ieee80211_get_rates(sband, bss->supp_rates, 4415 ieee80211_get_rates(sband, bss->supp_rates,
4430 bss->supp_rates_len, 4416 bss->supp_rates_len,
4431 &rates, &basic_rates, 4417 &rates, &basic_rates,
4432 &have_higher_than_11mbit, 4418 &have_higher_than_11mbit,
4433 &min_rate, &min_rate_index, 4419 &min_rate, &min_rate_index,
4434 shift, rate_flags); 4420 shift);
4435 4421
4436 /* 4422 /*
4437 * This used to be a workaround for basic rates missing 4423 * This used to be a workaround for basic rates missing
@@ -4489,8 +4475,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
4489 sdata->vif.bss_conf.sync_dtim_count = 0; 4475 sdata->vif.bss_conf.sync_dtim_count = 0;
4490 } 4476 }
4491 rcu_read_unlock(); 4477 rcu_read_unlock();
4478 }
4492 4479
4493 /* tell driver about BSSID, basic rates and timing */ 4480 if (new_sta || override) {
4481 err = ieee80211_prep_channel(sdata, cbss);
4482 if (err) {
4483 if (new_sta)
4484 sta_info_free(local, new_sta);
4485 return -EINVAL;
4486 }
4487 }
4488
4489 if (new_sta) {
4490 /*
4491 * tell driver about BSSID, basic rates and timing
4492 * this was set up above, before setting the channel
4493 */
4494 ieee80211_bss_info_change_notify(sdata, 4494 ieee80211_bss_info_change_notify(sdata,
4495 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | 4495 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
4496 BSS_CHANGED_BEACON_INT); 4496 BSS_CHANGED_BEACON_INT);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1f75280ba26c..3674fe3d67dc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1613,12 +1613,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1613 */ 1613 */
1614 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1614 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1615 !ieee80211_has_morefrags(hdr->frame_control) && 1615 !ieee80211_has_morefrags(hdr->frame_control) &&
1616 !ieee80211_is_back_req(hdr->frame_control) &&
1616 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1617 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1617 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1618 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1618 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1619 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1619 /* PM bit is only checked in frames where it isn't reserved, 1620 /*
1621 * PM bit is only checked in frames where it isn't reserved,
1620 * in AP mode it's reserved in non-bufferable management frames 1622 * in AP mode it's reserved in non-bufferable management frames
1621 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1623 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
1624 * BAR frames should be ignored as specified in
1625 * IEEE 802.11-2012 10.2.1.2.
1622 */ 1626 */
1623 (!ieee80211_is_mgmt(hdr->frame_control) || 1627 (!ieee80211_is_mgmt(hdr->frame_control) ||
1624 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { 1628 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c1ef22df865f..cc19614ff4e6 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -17,6 +17,7 @@
17#include <asm/unaligned.h> 17#include <asm/unaligned.h>
18#include <net/mac80211.h> 18#include <net/mac80211.h>
19#include <crypto/aes.h> 19#include <crypto/aes.h>
20#include <crypto/algapi.h>
20 21
21#include "ieee80211_i.h" 22#include "ieee80211_i.h"
22#include "michael.h" 23#include "michael.h"
@@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
153 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 154 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
154 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 155 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
155 michael_mic(key, hdr, data, data_len, mic); 156 michael_mic(key, hdr, data, data_len, mic);
156 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) 157 if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
157 goto mic_fail; 158 goto mic_fail;
158 159
159 /* remove Michael MIC from payload */ 160 /* remove Michael MIC from payload */
@@ -1048,7 +1049,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
1048 bip_aad(skb, aad); 1049 bip_aad(skb, aad);
1049 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, 1050 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
1050 skb->data + 24, skb->len - 24, mic); 1051 skb->data + 24, skb->len - 24, mic);
1051 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1052 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1052 key->u.aes_cmac.icverrors++; 1053 key->u.aes_cmac.icverrors++;
1053 return RX_DROP_UNUSABLE; 1054 return RX_DROP_UNUSABLE;
1054 } 1055 }
@@ -1098,7 +1099,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
1098 bip_aad(skb, aad); 1099 bip_aad(skb, aad);
1099 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, 1100 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
1100 skb->data + 24, skb->len - 24, mic); 1101 skb->data + 24, skb->len - 24, mic);
1101 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1102 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1102 key->u.aes_cmac.icverrors++; 1103 key->u.aes_cmac.icverrors++;
1103 return RX_DROP_UNUSABLE; 1104 return RX_DROP_UNUSABLE;
1104 } 1105 }
@@ -1202,7 +1203,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
1202 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, 1203 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
1203 skb->data + 24, skb->len - 24, 1204 skb->data + 24, skb->len - 24,
1204 mic) < 0 || 1205 mic) < 0 ||
1205 memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1206 crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1206 key->u.aes_gmac.icverrors++; 1207 key->u.aes_gmac.icverrors++;
1207 return RX_DROP_UNUSABLE; 1208 return RX_DROP_UNUSABLE;
1208 } 1209 }
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 06019dba4b10..bd88a9b80773 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev)
526 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 526 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
527 527
528 mac802154_llsec_destroy(&sdata->sec); 528 mac802154_llsec_destroy(&sdata->sec);
529
530 free_netdev(dev);
531} 529}
532 530
533static void ieee802154_if_setup(struct net_device *dev) 531static void ieee802154_if_setup(struct net_device *dev)
@@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
593 sdata->dev->dev_addr); 591 sdata->dev->dev_addr);
594 592
595 sdata->dev->header_ops = &mac802154_header_ops; 593 sdata->dev->header_ops = &mac802154_header_ops;
596 sdata->dev->destructor = mac802154_wpan_free; 594 sdata->dev->needs_free_netdev = true;
595 sdata->dev->priv_destructor = mac802154_wpan_free;
597 sdata->dev->netdev_ops = &mac802154_wpan_ops; 596 sdata->dev->netdev_ops = &mac802154_wpan_ops;
598 sdata->dev->ml_priv = &mac802154_mlme_wpan; 597 sdata->dev->ml_priv = &mac802154_mlme_wpan;
599 wpan_dev->promiscuous_mode = false; 598 wpan_dev->promiscuous_mode = false;
@@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
608 607
609 break; 608 break;
610 case NL802154_IFTYPE_MONITOR: 609 case NL802154_IFTYPE_MONITOR:
611 sdata->dev->destructor = free_netdev; 610 sdata->dev->needs_free_netdev = true;
612 sdata->dev->netdev_ops = &mac802154_monitor_ops; 611 sdata->dev->netdev_ops = &mac802154_monitor_ops;
613 wpan_dev->promiscuous_mode = true; 612 wpan_dev->promiscuous_mode = true;
614 break; 613 break;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 89193a634da4..04a3128adcf0 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev)
94 struct vport *vport = ovs_internal_dev_get_vport(dev); 94 struct vport *vport = ovs_internal_dev_get_vport(dev);
95 95
96 ovs_vport_free(vport); 96 ovs_vport_free(vport);
97 free_netdev(dev);
98} 97}
99 98
100static void 99static void
@@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev)
156 netdev->priv_flags &= ~IFF_TX_SKB_SHARING; 155 netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
157 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | 156 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
158 IFF_PHONY_HEADROOM | IFF_NO_QUEUE; 157 IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
159 netdev->destructor = internal_dev_destructor; 158 netdev->needs_free_netdev = true;
159 netdev->priv_destructor = internal_dev_destructor;
160 netdev->ethtool_ops = &internal_dev_ethtool_ops; 160 netdev->ethtool_ops = &internal_dev_ethtool_ops;
161 netdev->rtnl_link_ops = &internal_dev_link_ops; 161 netdev->rtnl_link_ops = &internal_dev_link_ops;
162 162
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 21c28b51be94..2c9337946e30 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev)
236 dev->tx_queue_len = 10; 236 dev->tx_queue_len = 10;
237 237
238 dev->netdev_ops = &gprs_netdev_ops; 238 dev->netdev_ops = &gprs_netdev_ops;
239 dev->destructor = free_netdev; 239 dev->needs_free_netdev = true;
240} 240}
241 241
242/* 242/*
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 164b5ac094be..7dc5892671c8 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -94,8 +94,10 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
94 k++; 94 k++;
95 } 95 }
96 96
97 if (n) 97 if (n) {
98 err = -EINVAL;
98 goto err_out; 99 goto err_out;
100 }
99 101
100 return keys_ex; 102 return keys_ex;
101 103
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index f42008b29311..b062bc80c7cb 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -132,21 +132,21 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
132 } 132 }
133 } 133 }
134 134
135 spin_lock_bh(&police->tcf_lock);
136 if (est) { 135 if (est) {
137 err = gen_replace_estimator(&police->tcf_bstats, NULL, 136 err = gen_replace_estimator(&police->tcf_bstats, NULL,
138 &police->tcf_rate_est, 137 &police->tcf_rate_est,
139 &police->tcf_lock, 138 &police->tcf_lock,
140 NULL, est); 139 NULL, est);
141 if (err) 140 if (err)
142 goto failure_unlock; 141 goto failure;
143 } else if (tb[TCA_POLICE_AVRATE] && 142 } else if (tb[TCA_POLICE_AVRATE] &&
144 (ret == ACT_P_CREATED || 143 (ret == ACT_P_CREATED ||
145 !gen_estimator_active(&police->tcf_rate_est))) { 144 !gen_estimator_active(&police->tcf_rate_est))) {
146 err = -EINVAL; 145 err = -EINVAL;
147 goto failure_unlock; 146 goto failure;
148 } 147 }
149 148
149 spin_lock_bh(&police->tcf_lock);
150 /* No failure allowed after this point */ 150 /* No failure allowed after this point */
151 police->tcfp_mtu = parm->mtu; 151 police->tcfp_mtu = parm->mtu;
152 if (police->tcfp_mtu == 0) { 152 if (police->tcfp_mtu == 0) {
@@ -192,8 +192,6 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
192 192
193 return ret; 193 return ret;
194 194
195failure_unlock:
196 spin_unlock_bh(&police->tcf_lock);
197failure: 195failure:
198 qdisc_put_rtab(P_tab); 196 qdisc_put_rtab(P_tab);
199 qdisc_put_rtab(R_tab); 197 qdisc_put_rtab(R_tab);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f16c8d97b7f3..30aa0a529215 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4622,13 +4622,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
4622 4622
4623 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; 4623 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
4624 hash++, head++) { 4624 hash++, head++) {
4625 read_lock(&head->lock); 4625 read_lock_bh(&head->lock);
4626 sctp_for_each_hentry(epb, &head->chain) { 4626 sctp_for_each_hentry(epb, &head->chain) {
4627 err = cb(sctp_ep(epb), p); 4627 err = cb(sctp_ep(epb), p);
4628 if (err) 4628 if (err)
4629 break; 4629 break;
4630 } 4630 }
4631 read_unlock(&head->lock); 4631 read_unlock_bh(&head->lock);
4632 } 4632 }
4633 4633
4634 return err; 4634 return err;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 312ef7de57d7..ab3087687a32 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
508 } 508 }
509 509
510 if (skb_cloned(_skb) && 510 if (skb_cloned(_skb) &&
511 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) 511 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
512 goto exit; 512 goto exit;
513 513
514 /* Now reverse the concerned fields */ 514 /* Now reverse the concerned fields */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 6a7fe7660551..1a0c961f4ffe 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -999,7 +999,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
999 struct path path = { }; 999 struct path path = { };
1000 1000
1001 err = -EINVAL; 1001 err = -EINVAL;
1002 if (sunaddr->sun_family != AF_UNIX) 1002 if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
1003 sunaddr->sun_family != AF_UNIX)
1003 goto out; 1004 goto out;
1004 1005
1005 if (addr_len == sizeof(short)) { 1006 if (addr_len == sizeof(short)) {
@@ -1110,6 +1111,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1110 unsigned int hash; 1111 unsigned int hash;
1111 int err; 1112 int err;
1112 1113
1114 err = -EINVAL;
1115 if (alen < offsetofend(struct sockaddr, sa_family))
1116 goto out;
1117
1113 if (addr->sa_family != AF_UNSPEC) { 1118 if (addr->sa_family != AF_UNSPEC) {
1114 err = unix_mkname(sunaddr, alen, &hash); 1119 err = unix_mkname(sunaddr, alen, &hash);
1115 if (err < 0) 1120 if (err < 0)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e67a526d1f30..819fd6858b49 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1106,10 +1106,8 @@ static int selinux_parse_opts_str(char *options,
1106 1106
1107 opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), 1107 opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int),
1108 GFP_KERNEL); 1108 GFP_KERNEL);
1109 if (!opts->mnt_opts_flags) { 1109 if (!opts->mnt_opts_flags)
1110 kfree(opts->mnt_opts);
1111 goto out_err; 1110 goto out_err;
1112 }
1113 1111
1114 if (fscontext) { 1112 if (fscontext) {
1115 opts->mnt_opts[num_mnt_opts] = fscontext; 1113 opts->mnt_opts[num_mnt_opts] = fscontext;
@@ -1132,6 +1130,7 @@ static int selinux_parse_opts_str(char *options,
1132 return 0; 1130 return 0;
1133 1131
1134out_err: 1132out_err:
1133 security_free_mnt_opts(opts);
1135 kfree(context); 1134 kfree(context);
1136 kfree(defcontext); 1135 kfree(defcontext);
1137 kfree(fscontext); 1136 kfree(fscontext);
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 282a60368b14..5f66697fe1e0 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -192,7 +192,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
192 "complete_and_exit", 192 "complete_and_exit",
193 "kvm_spurious_fault", 193 "kvm_spurious_fault",
194 "__reiserfs_panic", 194 "__reiserfs_panic",
195 "lbug_with_loc" 195 "lbug_with_loc",
196 "fortify_panic",
196 }; 197 };
197 198
198 if (func->bind == STB_WEAK) 199 if (func->bind == STB_WEAK)
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 8354d04b392f..1f4fbc9a3292 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
19 19
20include $(srctree)/tools/scripts/Makefile.arch 20include $(srctree)/tools/scripts/Makefile.arch
21 21
22$(call detected_var,ARCH) 22$(call detected_var,SRCARCH)
23 23
24NO_PERF_REGS := 1 24NO_PERF_REGS := 1
25 25
26# Additional ARCH settings for ppc 26# Additional ARCH settings for ppc
27ifeq ($(ARCH),powerpc) 27ifeq ($(SRCARCH),powerpc)
28 NO_PERF_REGS := 0 28 NO_PERF_REGS := 0
29 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 29 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
30endif 30endif
31 31
32# Additional ARCH settings for x86 32# Additional ARCH settings for x86
33ifeq ($(ARCH),x86) 33ifeq ($(SRCARCH),x86)
34 $(call detected,CONFIG_X86) 34 $(call detected,CONFIG_X86)
35 ifeq (${IS_64_BIT}, 1) 35 ifeq (${IS_64_BIT}, 1)
36 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated 36 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
@@ -43,12 +43,12 @@ ifeq ($(ARCH),x86)
43 NO_PERF_REGS := 0 43 NO_PERF_REGS := 0
44endif 44endif
45 45
46ifeq ($(ARCH),arm) 46ifeq ($(SRCARCH),arm)
47 NO_PERF_REGS := 0 47 NO_PERF_REGS := 0
48 LIBUNWIND_LIBS = -lunwind -lunwind-arm 48 LIBUNWIND_LIBS = -lunwind -lunwind-arm
49endif 49endif
50 50
51ifeq ($(ARCH),arm64) 51ifeq ($(SRCARCH),arm64)
52 NO_PERF_REGS := 0 52 NO_PERF_REGS := 0
53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
54endif 54endif
@@ -61,7 +61,7 @@ endif
61# Disable it on all other architectures in case libdw unwind 61# Disable it on all other architectures in case libdw unwind
62# support is detected in system. Add supported architectures 62# support is detected in system. Add supported architectures
63# to the check. 63# to the check.
64ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) 64ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm))
65 NO_LIBDW_DWARF_UNWIND := 1 65 NO_LIBDW_DWARF_UNWIND := 1
66endif 66endif
67 67
@@ -115,9 +115,9 @@ endif
115FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) 115FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
116FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf 116FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
117 117
118FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi 118FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
119# include ARCH specific config 119# include ARCH specific config
120-include $(src-perf)/arch/$(ARCH)/Makefile 120-include $(src-perf)/arch/$(SRCARCH)/Makefile
121 121
122ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET 122ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
123 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET 123 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
@@ -228,12 +228,12 @@ ifeq ($(DEBUG),0)
228endif 228endif
229 229
230INC_FLAGS += -I$(src-perf)/util/include 230INC_FLAGS += -I$(src-perf)/util/include
231INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include 231INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
232INC_FLAGS += -I$(srctree)/tools/include/uapi 232INC_FLAGS += -I$(srctree)/tools/include/uapi
233INC_FLAGS += -I$(srctree)/tools/include/ 233INC_FLAGS += -I$(srctree)/tools/include/
234INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi 234INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi
235INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/ 235INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/
236INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/ 236INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/
237 237
238# $(obj-perf) for generated common-cmds.h 238# $(obj-perf) for generated common-cmds.h
239# $(obj-perf)/util for generated bison/flex headers 239# $(obj-perf)/util for generated bison/flex headers
@@ -355,7 +355,7 @@ ifndef NO_LIBELF
355 355
356 ifndef NO_DWARF 356 ifndef NO_DWARF
357 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) 357 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
358 msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); 358 msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
359 NO_DWARF := 1 359 NO_DWARF := 1
360 else 360 else
361 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) 361 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
@@ -380,7 +380,7 @@ ifndef NO_LIBELF
380 CFLAGS += -DHAVE_BPF_PROLOGUE 380 CFLAGS += -DHAVE_BPF_PROLOGUE
381 $(call detected,CONFIG_BPF_PROLOGUE) 381 $(call detected,CONFIG_BPF_PROLOGUE)
382 else 382 else
383 msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset()); 383 msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
384 endif 384 endif
385 else 385 else
386 msg := $(warning DWARF support is off, BPF prologue is disabled); 386 msg := $(warning DWARF support is off, BPF prologue is disabled);
@@ -406,7 +406,7 @@ ifdef PERF_HAVE_JITDUMP
406 endif 406 endif
407endif 407endif
408 408
409ifeq ($(ARCH),powerpc) 409ifeq ($(SRCARCH),powerpc)
410 ifndef NO_DWARF 410 ifndef NO_DWARF
411 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX 411 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
412 endif 412 endif
@@ -487,7 +487,7 @@ else
487endif 487endif
488 488
489ifndef NO_LOCAL_LIBUNWIND 489ifndef NO_LOCAL_LIBUNWIND
490 ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) 490 ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
491 $(call feature_check,libunwind-debug-frame) 491 $(call feature_check,libunwind-debug-frame)
492 ifneq ($(feature-libunwind-debug-frame), 1) 492 ifneq ($(feature-libunwind-debug-frame), 1)
493 msg := $(warning No debug_frame support found in libunwind); 493 msg := $(warning No debug_frame support found in libunwind);
@@ -740,7 +740,7 @@ ifeq (${IS_64_BIT}, 1)
740 NO_PERF_READ_VDSO32 := 1 740 NO_PERF_READ_VDSO32 := 1
741 endif 741 endif
742 endif 742 endif
743 ifneq ($(ARCH), x86) 743 ifneq ($(SRCARCH), x86)
744 NO_PERF_READ_VDSOX32 := 1 744 NO_PERF_READ_VDSOX32 := 1
745 endif 745 endif
746 ifndef NO_PERF_READ_VDSOX32 746 ifndef NO_PERF_READ_VDSOX32
@@ -769,7 +769,7 @@ ifdef LIBBABELTRACE
769endif 769endif
770 770
771ifndef NO_AUXTRACE 771ifndef NO_AUXTRACE
772 ifeq ($(ARCH),x86) 772 ifeq ($(SRCARCH),x86)
773 ifeq ($(feature-get_cpuid), 0) 773 ifeq ($(feature-get_cpuid), 0)
774 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); 774 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
775 NO_AUXTRACE := 1 775 NO_AUXTRACE := 1
@@ -872,7 +872,7 @@ sysconfdir = $(prefix)/etc
872ETC_PERFCONFIG = etc/perfconfig 872ETC_PERFCONFIG = etc/perfconfig
873endif 873endif
874ifndef lib 874ifndef lib
875ifeq ($(ARCH)$(IS_64_BIT), x861) 875ifeq ($(SRCARCH)$(IS_64_BIT), x861)
876lib = lib64 876lib = lib64
877else 877else
878lib = lib 878lib = lib
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 79fe31f20a17..5008f51a08a2 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -226,7 +226,7 @@ endif
226 226
227ifeq ($(config),0) 227ifeq ($(config),0)
228include $(srctree)/tools/scripts/Makefile.arch 228include $(srctree)/tools/scripts/Makefile.arch
229-include arch/$(ARCH)/Makefile 229-include arch/$(SRCARCH)/Makefile
230endif 230endif
231 231
232# The FEATURE_DUMP_EXPORT holds location of the actual 232# The FEATURE_DUMP_EXPORT holds location of the actual
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index 109eb75cf7de..d9b6af837c7d 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
1libperf-y += common.o 1libperf-y += common.o
2libperf-y += $(ARCH)/ 2libperf-y += $(SRCARCH)/
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 9213a1273697..999a4e878162 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -2,7 +2,7 @@ hostprogs := jevents
2 2
3jevents-y += json.o jsmn.o jevents.o 3jevents-y += json.o jsmn.o jevents.o
4pmu-events-y += pmu-events.o 4pmu-events-y += pmu-events.o
5JDIR = pmu-events/arch/$(ARCH) 5JDIR = pmu-events/arch/$(SRCARCH)
6JSON = $(shell [ -d $(JDIR) ] && \ 6JSON = $(shell [ -d $(JDIR) ] && \
7 find $(JDIR) -name '*.json' -o -name 'mapfile.csv') 7 find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
8# 8#
@@ -10,4 +10,4 @@ JSON = $(shell [ -d $(JDIR) ] && \
10# directory and create tables in pmu-events.c. 10# directory and create tables in pmu-events.c.
11# 11#
12$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) 12$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
13 $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) 13 $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index af58ebc243ef..84222bdb8689 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -75,7 +75,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B
75 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ 75 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
76 $(Q)echo ';' >> $@ 76 $(Q)echo ';' >> $@
77 77
78ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc)) 78ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
79perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o 79perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
80endif 80endif
81 81
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 32873ec91a4e..cf00ebad2ef5 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -83,7 +83,7 @@ int test__task_exit(int subtest __maybe_unused)
83 83
84 evsel = perf_evlist__first(evlist); 84 evsel = perf_evlist__first(evlist);
85 evsel->attr.task = 1; 85 evsel->attr.task = 1;
86 evsel->attr.sample_freq = 0; 86 evsel->attr.sample_freq = 1;
87 evsel->attr.inherit = 0; 87 evsel->attr.inherit = 0;
88 evsel->attr.watermark = 0; 88 evsel->attr.watermark = 0;
89 evsel->attr.wakeup_events = 1; 89 evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e4f7902d5afa..cda44b0e821c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -273,8 +273,20 @@ struct perf_evsel *perf_evsel__new_cycles(void)
273 struct perf_evsel *evsel; 273 struct perf_evsel *evsel;
274 274
275 event_attr_init(&attr); 275 event_attr_init(&attr);
276 /*
277 * Unnamed union member, not supported as struct member named
278 * initializer in older compilers such as gcc 4.4.7
279 *
280 * Just for probing the precise_ip:
281 */
282 attr.sample_period = 1;
276 283
277 perf_event_attr__set_max_precise_ip(&attr); 284 perf_event_attr__set_max_precise_ip(&attr);
285 /*
286 * Now let the usual logic to set up the perf_event_attr defaults
287 * to kick in when we return and before perf_evsel__open() is called.
288 */
289 attr.sample_period = 0;
278 290
279 evsel = perf_evsel__new(&attr); 291 evsel = perf_evsel__new(&attr);
280 if (evsel == NULL) 292 if (evsel == NULL)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 5cac8d5e009a..b5baff3007bb 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -841,7 +841,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
841 841
842/* 842/*
843 * default get_cpuid(): nothing gets recorded 843 * default get_cpuid(): nothing gets recorded
844 * actual implementation must be in arch/$(ARCH)/util/header.c 844 * actual implementation must be in arch/$(SRCARCH)/util/header.c
845 */ 845 */
846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
847{ 847{
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index da45c4be5fb3..7755a5e0fe5e 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -178,6 +178,14 @@ frame_callback(Dwfl_Frame *state, void *arg)
178 Dwarf_Addr pc; 178 Dwarf_Addr pc;
179 bool isactivation; 179 bool isactivation;
180 180
181 if (!dwfl_frame_pc(state, &pc, NULL)) {
182 pr_err("%s", dwfl_errmsg(-1));
183 return DWARF_CB_ABORT;
184 }
185
186 // report the module before we query for isactivation
187 report_module(pc, ui);
188
181 if (!dwfl_frame_pc(state, &pc, &isactivation)) { 189 if (!dwfl_frame_pc(state, &pc, &isactivation)) {
182 pr_err("%s", dwfl_errmsg(-1)); 190 pr_err("%s", dwfl_errmsg(-1));
183 return DWARF_CB_ABORT; 191 return DWARF_CB_ABORT;
diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h
index 19d0604f8694..487cbfb89beb 100644
--- a/tools/testing/selftests/bpf/bpf_endian.h
+++ b/tools/testing/selftests/bpf/bpf_endian.h
@@ -1,23 +1,42 @@
1#ifndef __BPF_ENDIAN__ 1#ifndef __BPF_ENDIAN__
2#define __BPF_ENDIAN__ 2#define __BPF_ENDIAN__
3 3
4#include <asm/byteorder.h> 4#include <linux/swab.h>
5 5
6#if __BYTE_ORDER == __LITTLE_ENDIAN 6/* LLVM's BPF target selects the endianness of the CPU
7# define __bpf_ntohs(x) __builtin_bswap16(x) 7 * it compiles on, or the user specifies (bpfel/bpfeb),
8# define __bpf_htons(x) __builtin_bswap16(x) 8 * respectively. The used __BYTE_ORDER__ is defined by
9#elif __BYTE_ORDER == __BIG_ENDIAN 9 * the compiler, we cannot rely on __BYTE_ORDER from
10# define __bpf_ntohs(x) (x) 10 * libc headers, since it doesn't reflect the actual
11# define __bpf_htons(x) (x) 11 * requested byte order.
12 *
13 * Note, LLVM's BPF target has different __builtin_bswapX()
14 * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
15 * in bpfel and bpfeb case, which means below, that we map
16 * to cpu_to_be16(). We could use it unconditionally in BPF
17 * case, but better not rely on it, so that this header here
18 * can be used from application and BPF program side, which
19 * use different targets.
20 */
21#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
22# define __bpf_ntohs(x) __builtin_bswap16(x)
23# define __bpf_htons(x) __builtin_bswap16(x)
24# define __bpf_constant_ntohs(x) ___constant_swab16(x)
25# define __bpf_constant_htons(x) ___constant_swab16(x)
26#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
27# define __bpf_ntohs(x) (x)
28# define __bpf_htons(x) (x)
29# define __bpf_constant_ntohs(x) (x)
30# define __bpf_constant_htons(x) (x)
12#else 31#else
13# error "Fix your __BYTE_ORDER?!" 32# error "Fix your compiler's __BYTE_ORDER__?!"
14#endif 33#endif
15 34
16#define bpf_htons(x) \ 35#define bpf_htons(x) \
17 (__builtin_constant_p(x) ? \ 36 (__builtin_constant_p(x) ? \
18 __constant_htons(x) : __bpf_htons(x)) 37 __bpf_constant_htons(x) : __bpf_htons(x))
19#define bpf_ntohs(x) \ 38#define bpf_ntohs(x) \
20 (__builtin_constant_p(x) ? \ 39 (__builtin_constant_p(x) ? \
21 __constant_ntohs(x) : __bpf_ntohs(x)) 40 __bpf_constant_ntohs(x) : __bpf_ntohs(x))
22 41
23#endif 42#endif /* __BPF_ENDIAN__ */