summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-12-20 13:53:28 -0500
committerDavid S. Miller <davem@davemloft.net>2018-12-20 14:53:36 -0500
commit2be09de7d6a06f58e768de1255a687c9aaa66606 (patch)
tree298f9e04caf105873d987e807eccba27710a49cc
parent44a7b3b6e3a458f9549c2cc28e74ecdc470e42f1 (diff)
parent1d51b4b1d3f2db0d6d144175e31a84e472fbd99a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Lots of conflicts, by happily all cases of overlapping changes, parallel adds, things of that nature. Thanks to Stephen Rothwell, Saeed Mahameed, and others for their guidance in these resolutions. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--CREDITS8
-rw-r--r--Documentation/core-api/xarray.rst5
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst10
-rw-r--r--MAINTAINERS27
-rw-r--r--Makefile15
-rw-r--r--arch/alpha/kernel/setup.c1
-rw-r--r--arch/alpha/mm/numa.c6
-rw-r--r--arch/arm/boot/dts/arm-realview-pb1176.dts4
-rw-r--r--arch/arm/boot/dts/arm-realview-pb11mp.dts4
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts2
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-nitrogen7.dts9
-rw-r--r--arch/arm/boot/dts/imx7d-pico.dtsi22
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts4
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c2
-rw-r--r--arch/arm/mach-mmp/cputype.h6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi27
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts7
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts7
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi10
-rw-r--r--arch/arm64/include/asm/memory.h9
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/init.c8
-rw-r--r--arch/m68k/kernel/setup_mm.c2
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/crt0.S4
-rw-r--r--arch/powerpc/include/asm/perf_event.h2
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--arch/powerpc/kernel/legacy_serial.c6
-rw-r--r--arch/powerpc/kernel/msi.c7
-rw-r--r--arch/powerpc/kernel/ptrace.c7
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c1
-rw-r--r--arch/powerpc/mm/init_64.c19
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig3
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c39
-rw-r--r--arch/sh/include/asm/io.h1
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--block/bio.c3
-rw-r--r--block/blk-zoned.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c78
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c20
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c54
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c44
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c591
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h37
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-ite.c1
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hv/Kconfig2
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c3
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c3
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c4
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c9
-rw-r--r--drivers/md/dm-cache-metadata.c4
-rw-r--r--drivers/md/dm-thin.c72
-rw-r--r--drivers/md/dm-zoned-target.c122
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/Kconfig13
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c44
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c13
-rw-r--r--drivers/media/media-device.c4
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c13
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c4
-rw-r--r--drivers/mmc/core/block.c15
-rw-r--r--drivers/mmc/core/mmc.c24
-rw-r--r--drivers/mmc/host/omap.c11
-rw-r--r--drivers/mmc/host/omap_hsmmc.c12
-rw-r--r--drivers/mmc/host/sdhci-omap.c12
-rw-r--r--drivers/mmc/host/sdhci-tegra.c8
-rw-r--r--drivers/mmc/host/sdhci.c22
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c48
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c503
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c43
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c11
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c11
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c28
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ieee802154/ca8210.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c4
-rw-r--r--drivers/net/phy/phy_device.c7
-rw-r--r--drivers/net/usb/hso.c18
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c33
-rw-r--r--drivers/net/vxlan.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c28
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c96
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c1
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/pci/pcie/aer.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c28
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c4
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/st/stm_thermal.c12
-rw-r--r--drivers/tty/serial/8250/8250_port.c29
-rw-r--r--drivers/uio/uio_hv_generic.c7
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/serial/option.c16
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/vhost.c23
-rw-r--r--drivers/video/backlight/pwm_bl.c41
-rw-r--r--fs/aio.c2
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/fuse/dir.c26
-rw-r--r--fs/fuse/file.c64
-rw-r--r--fs/fuse/fuse_i.h4
-rw-r--r--fs/fuse/inode.c3
-rw-r--r--fs/overlayfs/dir.c14
-rw-r--r--fs/overlayfs/export.c6
-rw-r--r--fs/overlayfs/inode.c17
-rw-r--r--fs/userfaultfd.c3
-rw-r--r--include/asm-generic/fixmap.h1
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h10
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h12
-rw-r--r--include/linux/t10-pi.h9
-rw-r--r--include/linux/xarray.h54
-rw-r--r--include/media/mpeg2-ctrls.h86
-rw-r--r--include/media/v4l2-ctrls.h6
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/ip_tunnels.h19
-rw-r--r--include/net/sock.h25
-rw-r--r--include/net/tls.h6
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/uapi/asm-generic/Kbuild.asm1
-rw-r--r--include/uapi/linux/blkzoned.h4
-rw-r--r--include/uapi/linux/if_tunnel.h20
-rw-r--r--include/uapi/linux/in.h10
-rw-r--r--include/uapi/linux/input-event-codes.h9
-rw-r--r--include/uapi/linux/net_tstamp.h4
-rw-r--r--include/uapi/linux/netlink.h2
-rw-r--r--include/uapi/linux/v4l2-controls.h68
-rw-r--r--include/uapi/linux/videodev2.h4
-rw-r--r--init/Kconfig4
-rw-r--r--kernel/bpf/core.c21
-rw-r--r--kernel/bpf/verifier.c13
-rw-r--r--kernel/dma/direct.c7
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/trace_events_filter.c5
-rw-r--r--kernel/trace/trace_events_trigger.c6
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/test_xarray.c155
-rw-r--r--lib/xarray.c8
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/sparse.c16
-rw-r--r--net/can/raw.c2
-rw-r--r--net/core/flow_dissector.c6
-rw-r--r--net/core/gro_cells.c1
-rw-r--r--net/core/neighbour.c7
-rw-r--r--net/core/sysctl_net_core.c20
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ip_fragment.c18
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv6/ip6_output.c1
-rw-r--r--net/ipv6/ip6_udp_tunnel.c3
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/status.c5
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/nf_conncount.c2
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c7
-rw-r--r--net/netfilter/nf_nat_core.c3
-rw-r--r--net/netfilter/nf_tables_api.c21
-rw-r--r--net/netfilter/nf_tables_core.c2
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/rds/message.c24
-rw-r--r--net/rds/rdma.c75
-rw-r--r--net/rds/rds.h23
-rw-r--r--net/rds/send.c61
-rw-r--r--net/sched/cls_flower.c7
-rw-r--r--net/sctp/ipv6.c1
-rw-r--r--net/smc/af_smc.c14
-rw-r--r--net/smc/smc.h4
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/xprt.c35
-rw-r--r--net/sunrpc/xprtsock.c10
-rw-r--r--net/tipc/socket.c40
-rw-r--r--net/tipc/udp_media.c9
-rw-r--r--net/tls/tls_main.c44
-rw-r--r--net/vmw_vsock/af_vsock.c7
-rw-r--r--net/vmw_vsock/vmci_transport.c67
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/xfrm/xfrm_input.c7
-rw-r--r--net/xfrm/xfrm_output.c1
-rw-r--r--net/xfrm/xfrm_state.c10
-rw-r--r--net/xfrm/xfrm_user.c4
-rwxr-xr-xscripts/checkstack.pl4
-rwxr-xr-xscripts/spdxcheck.py6
-rw-r--r--security/integrity/ima/ima_policy.c10
-rw-r--r--security/keys/keyctl_pkey.c2
-rw-r--r--security/keys/trusted.c2
-rw-r--r--sound/firewire/fireface/ff-protocol-ff400.c2
-rw-r--r--sound/pci/hda/patch_realtek.c77
-rw-r--r--tools/include/uapi/linux/netlink.h2
-rw-r--r--tools/testing/radix-tree/Makefile1
-rw-r--r--tools/testing/radix-tree/main.c1
-rw-r--r--tools/testing/radix-tree/regression.h1
-rw-r--r--tools/testing/radix-tree/regression4.c79
-rw-r--r--tools/testing/selftests/bpf/bpf_flow.c36
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c37
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_fdb_changelink.sh29
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c9
-rw-r--r--tools/virtio/linux/kernel.h4
-rw-r--r--virt/kvm/coalesced_mmio.c6
314 files changed, 3466 insertions, 1727 deletions
diff --git a/CREDITS b/CREDITS
index c9273393fe14..7d397ee67524 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2541,6 +2541,10 @@ S: Ormond
2541S: Victoria 3163 2541S: Victoria 3163
2542S: Australia 2542S: Australia
2543 2543
2544N: Eric Miao
2545E: eric.y.miao@gmail.com
2546D: MMP support
2547
2544N: Pauline Middelink 2548N: Pauline Middelink
2545E: middelin@polyware.nl 2549E: middelin@polyware.nl
2546D: General low-level bug fixes, /proc fixes, identd support 2550D: General low-level bug fixes, /proc fixes, identd support
@@ -4115,6 +4119,10 @@ S: 1507 145th Place SE #B5
4115S: Bellevue, Washington 98007 4119S: Bellevue, Washington 98007
4116S: USA 4120S: USA
4117 4121
4122N: Haojian Zhuang
4123E: haojian.zhuang@gmail.com
4124D: MMP support
4125
4118N: Richard Zidlicky 4126N: Richard Zidlicky
4119E: rz@linux-m68k.org, rdzidlic@geocities.com 4127E: rz@linux-m68k.org, rdzidlic@geocities.com
4120W: http://www.geocities.com/rdzidlic 4128W: http://www.geocities.com/rdzidlic
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index dbe96cb5558e..6a6d67acaf69 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -187,6 +187,8 @@ Takes xa_lock internally:
187 * :c:func:`xa_erase_bh` 187 * :c:func:`xa_erase_bh`
188 * :c:func:`xa_erase_irq` 188 * :c:func:`xa_erase_irq`
189 * :c:func:`xa_cmpxchg` 189 * :c:func:`xa_cmpxchg`
190 * :c:func:`xa_cmpxchg_bh`
191 * :c:func:`xa_cmpxchg_irq`
190 * :c:func:`xa_store_range` 192 * :c:func:`xa_store_range`
191 * :c:func:`xa_alloc` 193 * :c:func:`xa_alloc`
192 * :c:func:`xa_alloc_bh` 194 * :c:func:`xa_alloc_bh`
@@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
263context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` 265context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
264in the interrupt handler. Some of the more common patterns have helper 266in the interrupt handler. Some of the more common patterns have helper
265functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`, 267functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
266:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`. 268:c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh`
269and :c:func:`xa_cmpxchg_irq`.
267 270
268Sometimes you need to protect access to the XArray with a mutex because 271Sometimes you need to protect access to the XArray with a mutex because
269that lock sits above another mutex in the locking hierarchy. That does 272that lock sits above another mutex in the locking hierarchy. That does
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 65a1d873196b..027358b91082 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -1505,6 +1505,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
1505 configuring a stateless hardware decoding pipeline for MPEG-2. 1505 configuring a stateless hardware decoding pipeline for MPEG-2.
1506 The bitstream parameters are defined according to :ref:`mpeg2part2`. 1506 The bitstream parameters are defined according to :ref:`mpeg2part2`.
1507 1507
1508 .. note::
1509
1510 This compound control is not yet part of the public kernel API and
1511 it is expected to change.
1512
1508.. c:type:: v4l2_ctrl_mpeg2_slice_params 1513.. c:type:: v4l2_ctrl_mpeg2_slice_params
1509 1514
1510.. cssclass:: longtable 1515.. cssclass:: longtable
@@ -1625,6 +1630,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
1625 Specifies quantization matrices (as extracted from the bitstream) for the 1630 Specifies quantization matrices (as extracted from the bitstream) for the
1626 associated MPEG-2 slice data. 1631 associated MPEG-2 slice data.
1627 1632
1633 .. note::
1634
1635 This compound control is not yet part of the public kernel API and
1636 it is expected to change.
1637
1628.. c:type:: v4l2_ctrl_mpeg2_quantization 1638.. c:type:: v4l2_ctrl_mpeg2_quantization
1629 1639
1630.. cssclass:: longtable 1640.. cssclass:: longtable
diff --git a/MAINTAINERS b/MAINTAINERS
index 95baadb0b62b..6de660a5efe7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1739,13 +1739,17 @@ ARM/Mediatek SoC support
1739M: Matthias Brugger <matthias.bgg@gmail.com> 1739M: Matthias Brugger <matthias.bgg@gmail.com>
1740L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1740L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1741L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) 1741L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
1742W: https://mtk.bcnfs.org/
1743C: irc://chat.freenode.net/linux-mediatek
1742S: Maintained 1744S: Maintained
1743F: arch/arm/boot/dts/mt6* 1745F: arch/arm/boot/dts/mt6*
1744F: arch/arm/boot/dts/mt7* 1746F: arch/arm/boot/dts/mt7*
1745F: arch/arm/boot/dts/mt8* 1747F: arch/arm/boot/dts/mt8*
1746F: arch/arm/mach-mediatek/ 1748F: arch/arm/mach-mediatek/
1747F: arch/arm64/boot/dts/mediatek/ 1749F: arch/arm64/boot/dts/mediatek/
1750F: drivers/soc/mediatek/
1748N: mtk 1751N: mtk
1752N: mt[678]
1749K: mediatek 1753K: mediatek
1750 1754
1751ARM/Mediatek USB3 PHY DRIVER 1755ARM/Mediatek USB3 PHY DRIVER
@@ -4843,6 +4847,7 @@ F: include/uapi/drm/vmwgfx_drm.h
4843 4847
4844DRM DRIVERS 4848DRM DRIVERS
4845M: David Airlie <airlied@linux.ie> 4849M: David Airlie <airlied@linux.ie>
4850M: Daniel Vetter <daniel@ffwll.ch>
4846L: dri-devel@lists.freedesktop.org 4851L: dri-devel@lists.freedesktop.org
4847T: git git://anongit.freedesktop.org/drm/drm 4852T: git git://anongit.freedesktop.org/drm/drm
4848B: https://bugs.freedesktop.org/ 4853B: https://bugs.freedesktop.org/
@@ -6902,8 +6907,10 @@ Hyper-V CORE AND DRIVERS
6902M: "K. Y. Srinivasan" <kys@microsoft.com> 6907M: "K. Y. Srinivasan" <kys@microsoft.com>
6903M: Haiyang Zhang <haiyangz@microsoft.com> 6908M: Haiyang Zhang <haiyangz@microsoft.com>
6904M: Stephen Hemminger <sthemmin@microsoft.com> 6909M: Stephen Hemminger <sthemmin@microsoft.com>
6910M: Sasha Levin <sashal@kernel.org>
6911T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
6905L: devel@linuxdriverproject.org 6912L: devel@linuxdriverproject.org
6906S: Maintained 6913S: Supported
6907F: Documentation/networking/device_drivers/microsoft/netvsc.txt 6914F: Documentation/networking/device_drivers/microsoft/netvsc.txt
6908F: arch/x86/include/asm/mshyperv.h 6915F: arch/x86/include/asm/mshyperv.h
6909F: arch/x86/include/asm/trace/hyperv.h 6916F: arch/x86/include/asm/trace/hyperv.h
@@ -8932,7 +8939,7 @@ F: arch/mips/boot/dts/img/pistachio_marduk.dts
8932 8939
8933MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER 8940MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
8934M: Andrew Lunn <andrew@lunn.ch> 8941M: Andrew Lunn <andrew@lunn.ch>
8935M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> 8942M: Vivien Didelot <vivien.didelot@gmail.com>
8936L: netdev@vger.kernel.org 8943L: netdev@vger.kernel.org
8937S: Maintained 8944S: Maintained
8938F: drivers/net/dsa/mv88e6xxx/ 8945F: drivers/net/dsa/mv88e6xxx/
@@ -9437,6 +9444,13 @@ F: drivers/media/platform/mtk-vpu/
9437F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt 9444F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
9438F: Documentation/devicetree/bindings/media/mediatek-vpu.txt 9445F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
9439 9446
9447MEDIATEK MT76 WIRELESS LAN DRIVER
9448M: Felix Fietkau <nbd@nbd.name>
9449M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
9450L: linux-wireless@vger.kernel.org
9451S: Maintained
9452F: drivers/net/wireless/mediatek/mt76/
9453
9440MEDIATEK MT7601U WIRELESS LAN DRIVER 9454MEDIATEK MT7601U WIRELESS LAN DRIVER
9441M: Jakub Kicinski <kubakici@wp.pl> 9455M: Jakub Kicinski <kubakici@wp.pl>
9442L: linux-wireless@vger.kernel.org 9456L: linux-wireless@vger.kernel.org
@@ -10000,12 +10014,9 @@ S: Odd Fixes
10000F: drivers/media/radio/radio-miropcm20* 10014F: drivers/media/radio/radio-miropcm20*
10001 10015
10002MMP SUPPORT 10016MMP SUPPORT
10003M: Eric Miao <eric.y.miao@gmail.com> 10017R: Lubomir Rintel <lkundrak@v3.sk>
10004M: Haojian Zhuang <haojian.zhuang@gmail.com>
10005L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 10018L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
10006T: git git://github.com/hzhuang1/linux.git 10019S: Odd Fixes
10007T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
10008S: Maintained
10009F: arch/arm/boot/dts/mmp* 10020F: arch/arm/boot/dts/mmp*
10010F: arch/arm/mach-mmp/ 10021F: arch/arm/mach-mmp/
10011 10022
@@ -10411,7 +10422,7 @@ F: drivers/net/wireless/
10411 10422
10412NETWORKING [DSA] 10423NETWORKING [DSA]
10413M: Andrew Lunn <andrew@lunn.ch> 10424M: Andrew Lunn <andrew@lunn.ch>
10414M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> 10425M: Vivien Didelot <vivien.didelot@gmail.com>
10415M: Florian Fainelli <f.fainelli@gmail.com> 10426M: Florian Fainelli <f.fainelli@gmail.com>
10416S: Maintained 10427S: Maintained
10417F: Documentation/devicetree/bindings/net/dsa/ 10428F: Documentation/devicetree/bindings/net/dsa/
diff --git a/Makefile b/Makefile
index f2c3423c3062..d45856f80057 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 20 3PATCHLEVEL = 20
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc7
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -962,11 +962,6 @@ ifdef CONFIG_STACK_VALIDATION
962 ifeq ($(has_libelf),1) 962 ifeq ($(has_libelf),1)
963 objtool_target := tools/objtool FORCE 963 objtool_target := tools/objtool FORCE
964 else 964 else
965 ifdef CONFIG_UNWINDER_ORC
966 $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
967 else
968 $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
969 endif
970 SKIP_STACK_VALIDATION := 1 965 SKIP_STACK_VALIDATION := 1
971 export SKIP_STACK_VALIDATION 966 export SKIP_STACK_VALIDATION
972 endif 967 endif
@@ -1125,6 +1120,14 @@ uapi-asm-generic:
1125 1120
1126PHONY += prepare-objtool 1121PHONY += prepare-objtool
1127prepare-objtool: $(objtool_target) 1122prepare-objtool: $(objtool_target)
1123ifeq ($(SKIP_STACK_VALIDATION),1)
1124ifdef CONFIG_UNWINDER_ORC
1125 @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
1126 @false
1127else
1128 @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
1129endif
1130endif
1128 1131
1129# Generate some files 1132# Generate some files
1130# --------------------------------------------------------------------------- 1133# ---------------------------------------------------------------------------
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index a37fd990bd55..4b5b1b244f86 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -634,6 +634,7 @@ setup_arch(char **cmdline_p)
634 634
635 /* Find our memory. */ 635 /* Find our memory. */
636 setup_memory(kernel_end); 636 setup_memory(kernel_end);
637 memblock_set_bottom_up(true);
637 638
638 /* First guess at cpu cache sizes. Do this before init_arch. */ 639 /* First guess at cpu cache sizes. Do this before init_arch. */
639 determine_cpu_caches(cpu->type); 640 determine_cpu_caches(cpu->type);
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 74846553e3f1..d0b73371e985 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -144,14 +144,14 @@ setup_memory_node(int nid, void *kernel_end)
144 if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) 144 if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
145 panic("kernel loaded out of ram"); 145 panic("kernel loaded out of ram");
146 146
147 memblock_add(PFN_PHYS(node_min_pfn),
148 (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
149
147 /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. 150 /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
148 Note that we round this down, not up - node memory 151 Note that we round this down, not up - node memory
149 has much larger alignment than 8Mb, so it's safe. */ 152 has much larger alignment than 8Mb, so it's safe. */
150 node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); 153 node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
151 154
152 memblock_add(PFN_PHYS(node_min_pfn),
153 (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
154
155 NODE_DATA(nid)->node_start_pfn = node_min_pfn; 155 NODE_DATA(nid)->node_start_pfn = node_min_pfn;
156 NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn; 156 NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
157 157
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index f2a1d25eb6cf..83e0fbc4a1a1 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -45,7 +45,7 @@
45 }; 45 };
46 46
47 /* The voltage to the MMC card is hardwired at 3.3V */ 47 /* The voltage to the MMC card is hardwired at 3.3V */
48 vmmc: fixedregulator@0 { 48 vmmc: regulator-vmmc {
49 compatible = "regulator-fixed"; 49 compatible = "regulator-fixed";
50 regulator-name = "vmmc"; 50 regulator-name = "vmmc";
51 regulator-min-microvolt = <3300000>; 51 regulator-min-microvolt = <3300000>;
@@ -53,7 +53,7 @@
53 regulator-boot-on; 53 regulator-boot-on;
54 }; 54 };
55 55
56 veth: fixedregulator@0 { 56 veth: regulator-veth {
57 compatible = "regulator-fixed"; 57 compatible = "regulator-fixed";
58 regulator-name = "veth"; 58 regulator-name = "veth";
59 regulator-min-microvolt = <3300000>; 59 regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts
index 7f9cbdf33a51..2f6aa24a0b67 100644
--- a/arch/arm/boot/dts/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts
@@ -145,7 +145,7 @@
145 }; 145 };
146 146
147 /* The voltage to the MMC card is hardwired at 3.3V */ 147 /* The voltage to the MMC card is hardwired at 3.3V */
148 vmmc: fixedregulator@0 { 148 vmmc: regulator-vmmc {
149 compatible = "regulator-fixed"; 149 compatible = "regulator-fixed";
150 regulator-name = "vmmc"; 150 regulator-name = "vmmc";
151 regulator-min-microvolt = <3300000>; 151 regulator-min-microvolt = <3300000>;
@@ -153,7 +153,7 @@
153 regulator-boot-on; 153 regulator-boot-on;
154 }; 154 };
155 155
156 veth: fixedregulator@0 { 156 veth: regulator-veth {
157 compatible = "regulator-fixed"; 157 compatible = "regulator-fixed";
158 regulator-name = "veth"; 158 regulator-name = "veth";
159 regulator-min-microvolt = <3300000>; 159 regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
index 4adb85e66be3..93762244be7f 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
@@ -31,7 +31,7 @@
31 31
32 wifi_pwrseq: wifi-pwrseq { 32 wifi_pwrseq: wifi-pwrseq {
33 compatible = "mmc-pwrseq-simple"; 33 compatible = "mmc-pwrseq-simple";
34 reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>; 34 reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
35 }; 35 };
36}; 36};
37 37
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
index c318bcbc6ba7..89e6fd547c75 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -26,7 +26,7 @@
26 26
27 wifi_pwrseq: wifi-pwrseq { 27 wifi_pwrseq: wifi-pwrseq {
28 compatible = "mmc-pwrseq-simple"; 28 compatible = "mmc-pwrseq-simple";
29 reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>; 29 reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
30 }; 30 };
31}; 31};
32 32
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
index d8aac4a2d02a..177d21fdeb28 100644
--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
@@ -86,13 +86,17 @@
86 compatible = "regulator-fixed"; 86 compatible = "regulator-fixed";
87 regulator-min-microvolt = <3300000>; 87 regulator-min-microvolt = <3300000>;
88 regulator-max-microvolt = <3300000>; 88 regulator-max-microvolt = <3300000>;
89 clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
90 clock-names = "slow";
91 regulator-name = "reg_wlan"; 89 regulator-name = "reg_wlan";
92 startup-delay-us = <70000>; 90 startup-delay-us = <70000>;
93 gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>; 91 gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
94 enable-active-high; 92 enable-active-high;
95 }; 93 };
94
95 usdhc2_pwrseq: usdhc2_pwrseq {
96 compatible = "mmc-pwrseq-simple";
97 clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
98 clock-names = "ext_clock";
99 };
96}; 100};
97 101
98&adc1 { 102&adc1 {
@@ -375,6 +379,7 @@
375 bus-width = <4>; 379 bus-width = <4>;
376 non-removable; 380 non-removable;
377 vmmc-supply = <&reg_wlan>; 381 vmmc-supply = <&reg_wlan>;
382 mmc-pwrseq = <&usdhc2_pwrseq>;
378 cap-power-off-card; 383 cap-power-off-card;
379 keep-power-in-suspend; 384 keep-power-in-suspend;
380 status = "okay"; 385 status = "okay";
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index 21973eb55671..f27b3849d3ff 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -100,6 +100,19 @@
100 regulator-min-microvolt = <1800000>; 100 regulator-min-microvolt = <1800000>;
101 regulator-max-microvolt = <1800000>; 101 regulator-max-microvolt = <1800000>;
102 }; 102 };
103
104 usdhc2_pwrseq: usdhc2_pwrseq {
105 compatible = "mmc-pwrseq-simple";
106 clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
107 clock-names = "ext_clock";
108 };
109};
110
111&clks {
112 assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
113 <&clks IMX7D_CLKO2_ROOT_DIV>;
114 assigned-clock-parents = <&clks IMX7D_CKIL>;
115 assigned-clock-rates = <0>, <32768>;
103}; 116};
104 117
105&i2c4 { 118&i2c4 {
@@ -199,12 +212,13 @@
199 212
200&usdhc2 { /* Wifi SDIO */ 213&usdhc2 { /* Wifi SDIO */
201 pinctrl-names = "default"; 214 pinctrl-names = "default";
202 pinctrl-0 = <&pinctrl_usdhc2>; 215 pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
203 no-1-8-v; 216 no-1-8-v;
204 non-removable; 217 non-removable;
205 keep-power-in-suspend; 218 keep-power-in-suspend;
206 wakeup-source; 219 wakeup-source;
207 vmmc-supply = <&reg_ap6212>; 220 vmmc-supply = <&reg_ap6212>;
221 mmc-pwrseq = <&usdhc2_pwrseq>;
208 status = "okay"; 222 status = "okay";
209}; 223};
210 224
@@ -301,6 +315,12 @@
301}; 315};
302 316
303&iomuxc_lpsr { 317&iomuxc_lpsr {
318 pinctrl_wifi_clk: wificlkgrp {
319 fsl,pins = <
320 MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d
321 >;
322 };
323
304 pinctrl_wdog: wdoggrp { 324 pinctrl_wdog: wdoggrp {
305 fsl,pins = < 325 fsl,pins = <
306 MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74 326 MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index 742d2946b08b..583a5a01642f 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -314,8 +314,8 @@
314 314
315&reg_dldo3 { 315&reg_dldo3 {
316 regulator-always-on; 316 regulator-always-on;
317 regulator-min-microvolt = <2500000>; 317 regulator-min-microvolt = <3300000>;
318 regulator-max-microvolt = <2500000>; 318 regulator-max-microvolt = <3300000>;
319 regulator-name = "vcc-pd"; 319 regulator-name = "vcc-pd";
320}; 320};
321 321
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 243a108a940b..fd0053e47a15 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void)
110 * except for power up sw2iso which need to be 110 * except for power up sw2iso which need to be
111 * larger than LDO ramp up time. 111 * larger than LDO ramp up time.
112 */ 112 */
113 imx_gpc_set_arm_power_up_timing(2, 1); 113 imx_gpc_set_arm_power_up_timing(0xf, 1);
114 imx_gpc_set_arm_power_down_timing(1, 1); 114 imx_gpc_set_arm_power_down_timing(1, 1);
115 115
116 return cpuidle_register(&imx6sx_cpuidle_driver, NULL); 116 return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h
index 446edaeb78a7..a96abcf521b4 100644
--- a/arch/arm/mach-mmp/cputype.h
+++ b/arch/arm/mach-mmp/cputype.h
@@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
44#define cpu_is_pxa910() (0) 44#define cpu_is_pxa910() (0)
45#endif 45#endif
46 46
47#ifdef CONFIG_CPU_MMP2 47#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
48static inline int cpu_is_mmp2(void) 48static inline int cpu_is_mmp2(void)
49{ 49{
50 return (((read_cpuid_id() >> 8) & 0xff) == 0x58); 50 return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
51 (((mmp_chip_id & 0xfff) == 0x410) ||
52 ((mmp_chip_id & 0xfff) == 0x610));
51} 53}
52#else 54#else
53#define cpu_is_mmp2() (0) 55#define cpu_is_mmp2() (0)
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
index 64632c873888..01ea662afba8 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
@@ -20,28 +20,24 @@
20 compatible = "arm,cortex-a72", "arm,armv8"; 20 compatible = "arm,cortex-a72", "arm,armv8";
21 reg = <0x000>; 21 reg = <0x000>;
22 enable-method = "psci"; 22 enable-method = "psci";
23 cpu-idle-states = <&CPU_SLEEP_0>;
24 }; 23 };
25 cpu1: cpu@1 { 24 cpu1: cpu@1 {
26 device_type = "cpu"; 25 device_type = "cpu";
27 compatible = "arm,cortex-a72", "arm,armv8"; 26 compatible = "arm,cortex-a72", "arm,armv8";
28 reg = <0x001>; 27 reg = <0x001>;
29 enable-method = "psci"; 28 enable-method = "psci";
30 cpu-idle-states = <&CPU_SLEEP_0>;
31 }; 29 };
32 cpu2: cpu@100 { 30 cpu2: cpu@100 {
33 device_type = "cpu"; 31 device_type = "cpu";
34 compatible = "arm,cortex-a72", "arm,armv8"; 32 compatible = "arm,cortex-a72", "arm,armv8";
35 reg = <0x100>; 33 reg = <0x100>;
36 enable-method = "psci"; 34 enable-method = "psci";
37 cpu-idle-states = <&CPU_SLEEP_0>;
38 }; 35 };
39 cpu3: cpu@101 { 36 cpu3: cpu@101 {
40 device_type = "cpu"; 37 device_type = "cpu";
41 compatible = "arm,cortex-a72", "arm,armv8"; 38 compatible = "arm,cortex-a72", "arm,armv8";
42 reg = <0x101>; 39 reg = <0x101>;
43 enable-method = "psci"; 40 enable-method = "psci";
44 cpu-idle-states = <&CPU_SLEEP_0>;
45 }; 41 };
46 }; 42 };
47}; 43};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 073610ac0a53..7d94c1fa592a 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -28,33 +28,6 @@
28 method = "smc"; 28 method = "smc";
29 }; 29 };
30 30
31 cpus {
32 #address-cells = <1>;
33 #size-cells = <0>;
34
35 idle_states {
36 entry_method = "arm,pcsi";
37
38 CPU_SLEEP_0: cpu-sleep-0 {
39 compatible = "arm,idle-state";
40 local-timer-stop;
41 arm,psci-suspend-param = <0x0010000>;
42 entry-latency-us = <80>;
43 exit-latency-us = <160>;
44 min-residency-us = <320>;
45 };
46
47 CLUSTER_SLEEP_0: cluster-sleep-0 {
48 compatible = "arm,idle-state";
49 local-timer-stop;
50 arm,psci-suspend-param = <0x1010000>;
51 entry-latency-us = <500>;
52 exit-latency-us = <1000>;
53 min-residency-us = <2500>;
54 };
55 };
56 };
57
58 ap806 { 31 ap806 {
59 #address-cells = <2>; 32 #address-cells = <2>;
60 #size-cells = <2>; 33 #size-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index 5d6005c9b097..710c5c3d87d3 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -16,8 +16,13 @@
16 model = "Bananapi BPI-R64"; 16 model = "Bananapi BPI-R64";
17 compatible = "bananapi,bpi-r64", "mediatek,mt7622"; 17 compatible = "bananapi,bpi-r64", "mediatek,mt7622";
18 18
19 aliases {
20 serial0 = &uart0;
21 };
22
19 chosen { 23 chosen {
20 bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512"; 24 stdout-path = "serial0:115200n8";
25 bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
21 }; 26 };
22 27
23 cpus { 28 cpus {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index dcad0869b84c..3f783348c66a 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -17,8 +17,13 @@
17 model = "MediaTek MT7622 RFB1 board"; 17 model = "MediaTek MT7622 RFB1 board";
18 compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622"; 18 compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
19 19
20 aliases {
21 serial0 = &uart0;
22 };
23
20 chosen { 24 chosen {
21 bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512"; 25 stdout-path = "serial0:115200n8";
26 bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
22 }; 27 };
23 28
24 cpus { 29 cpus {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index fe0c875f1d95..14a1028ca3a6 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -227,16 +227,6 @@
227 #reset-cells = <1>; 227 #reset-cells = <1>;
228 }; 228 };
229 229
230 timer: timer@10004000 {
231 compatible = "mediatek,mt7622-timer",
232 "mediatek,mt6577-timer";
233 reg = <0 0x10004000 0 0x80>;
234 interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_LOW>;
235 clocks = <&infracfg CLK_INFRA_APXGPT_PD>,
236 <&topckgen CLK_TOP_RTC>;
237 clock-names = "system-clk", "rtc-clk";
238 };
239
240 scpsys: scpsys@10006000 { 230 scpsys: scpsys@10006000 {
241 compatible = "mediatek,mt7622-scpsys", 231 compatible = "mediatek,mt7622-scpsys",
242 "syscon"; 232 "syscon";
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ee20fc63899c..932c60e036cf 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -35,15 +35,6 @@
35#define PCI_IO_SIZE SZ_16M 35#define PCI_IO_SIZE SZ_16M
36 36
37/* 37/*
38 * Log2 of the upper bound of the size of a struct page. Used for sizing
39 * the vmemmap region only, does not affect actual memory footprint.
40 * We don't use sizeof(struct page) directly since taking its size here
41 * requires its definition to be available at this point in the inclusion
42 * chain, and it may not be a power of 2 in the first place.
43 */
44#define STRUCT_PAGE_MAX_SHIFT 6
45
46/*
47 * VMEMMAP_SIZE - allows the whole linear region to be covered by 38 * VMEMMAP_SIZE - allows the whole linear region to be covered by
48 * a struct page array 39 * a struct page array
49 */ 40 */
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index a3ac26284845..a53704406099 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -429,9 +429,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
429 prot, 429 prot,
430 __builtin_return_address(0)); 430 __builtin_return_address(0));
431 if (addr) { 431 if (addr) {
432 memset(addr, 0, size);
433 if (!coherent) 432 if (!coherent)
434 __dma_flush_area(page_to_virt(page), iosize); 433 __dma_flush_area(page_to_virt(page), iosize);
434 memset(addr, 0, size);
435 } else { 435 } else {
436 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); 436 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
437 dma_release_from_contiguous(dev, page, 437 dma_release_from_contiguous(dev, page,
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9b432d9fcada..0340e45655c6 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -610,14 +610,6 @@ void __init mem_init(void)
610 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); 610 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
611#endif 611#endif
612 612
613#ifdef CONFIG_SPARSEMEM_VMEMMAP
614 /*
615 * Make sure we chose the upper bound of sizeof(struct page)
616 * correctly when sizing the VMEMMAP array.
617 */
618 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
619#endif
620
621 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 613 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
622 extern int sysctl_overcommit_memory; 614 extern int sysctl_overcommit_memory;
623 /* 615 /*
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index a1a3eaeaf58c..ad0195cbe042 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -164,8 +164,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
164 be32_to_cpu(m->addr); 164 be32_to_cpu(m->addr);
165 m68k_memory[m68k_num_memory].size = 165 m68k_memory[m68k_num_memory].size =
166 be32_to_cpu(m->size); 166 be32_to_cpu(m->size);
167 memblock_add(m68k_memory[m68k_num_memory].addr,
168 m68k_memory[m68k_num_memory].size);
169 m68k_num_memory++; 167 m68k_num_memory++;
170 } else 168 } else
171 pr_warn("%s: too many memory chunks\n", 169 pr_warn("%s: too many memory chunks\n",
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 7497cf30bf1c..3f3d0bf36091 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -228,6 +228,7 @@ void __init paging_init(void)
228 228
229 min_addr = m68k_memory[0].addr; 229 min_addr = m68k_memory[0].addr;
230 max_addr = min_addr + m68k_memory[0].size; 230 max_addr = min_addr + m68k_memory[0].size;
231 memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
231 for (i = 1; i < m68k_num_memory;) { 232 for (i = 1; i < m68k_num_memory;) {
232 if (m68k_memory[i].addr < min_addr) { 233 if (m68k_memory[i].addr < min_addr) {
233 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", 234 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
@@ -238,6 +239,7 @@ void __init paging_init(void)
238 (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); 239 (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
239 continue; 240 continue;
240 } 241 }
242 memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
241 addr = m68k_memory[i].addr + m68k_memory[i].size; 243 addr = m68k_memory[i].addr + m68k_memory[i].size;
242 if (addr > max_addr) 244 if (addr > max_addr)
243 max_addr = addr; 245 max_addr = addr;
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 39354365f54a..ed9883169190 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -197,7 +197,7 @@ $(obj)/empty.c:
197$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S 197$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
198 $(Q)cp $< $@ 198 $(Q)cp $< $@
199 199
200$(obj)/serial.c: $(obj)/autoconf.h 200$(srctree)/$(src)/serial.c: $(obj)/autoconf.h
201 201
202$(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/% 202$(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/%
203 $(Q)cp $< $@ 203 $(Q)cp $< $@
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 32dfe6d083f3..9b9d17437373 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -15,7 +15,7 @@
15RELA = 7 15RELA = 7
16RELACOUNT = 0x6ffffff9 16RELACOUNT = 0x6ffffff9
17 17
18 .text 18 .data
19 /* A procedure descriptor used when booting this as a COFF file. 19 /* A procedure descriptor used when booting this as a COFF file.
20 * When making COFF, this comes first in the link and we're 20 * When making COFF, this comes first in the link and we're
21 * linked at 0x500000. 21 * linked at 0x500000.
@@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
23 .globl _zimage_start_opd 23 .globl _zimage_start_opd
24_zimage_start_opd: 24_zimage_start_opd:
25 .long 0x500000, 0, 0, 0 25 .long 0x500000, 0, 0, 0
26 .text
27 b _zimage_start
26 28
27#ifdef __powerpc64__ 29#ifdef __powerpc64__
28.balign 8 30.balign 8
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 8bf1b6351716..16a49819da9a 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -26,6 +26,8 @@
26#include <asm/ptrace.h> 26#include <asm/ptrace.h>
27#include <asm/reg.h> 27#include <asm/reg.h>
28 28
29#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
30
29/* 31/*
30 * Overload regs->result to specify whether we should use the MSR (result 32 * Overload regs->result to specify whether we should use the MSR (result
31 * is zero) or the SIAR (result is non zero). 33 * is zero) or the SIAR (result is non zero).
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index a658091a19f9..3712152206f3 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -1,7 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4generic-y += bpf_perf_event.h
5generic-y += param.h 4generic-y += param.h
6generic-y += poll.h 5generic-y += poll.h
7generic-y += resource.h 6generic-y += resource.h
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..b551b741653d
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include <asm/ptrace.h>
6
7typedef struct user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 33b34a58fc62..5b9dce17f0c9 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void)
372 372
373 /* Now find out if one of these is out firmware console */ 373 /* Now find out if one of these is out firmware console */
374 path = of_get_property(of_chosen, "linux,stdout-path", NULL); 374 path = of_get_property(of_chosen, "linux,stdout-path", NULL);
375 if (path == NULL)
376 path = of_get_property(of_chosen, "stdout-path", NULL);
375 if (path != NULL) { 377 if (path != NULL) {
376 stdout = of_find_node_by_path(path); 378 stdout = of_find_node_by_path(path);
377 if (stdout) 379 if (stdout)
@@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void)
595 /* We are getting a weird phandle from OF ... */ 597 /* We are getting a weird phandle from OF ... */
596 /* ... So use the full path instead */ 598 /* ... So use the full path instead */
597 name = of_get_property(of_chosen, "linux,stdout-path", NULL); 599 name = of_get_property(of_chosen, "linux,stdout-path", NULL);
600 if (name == NULL)
601 name = of_get_property(of_chosen, "stdout-path", NULL);
598 if (name == NULL) { 602 if (name == NULL) {
599 DBG(" no linux,stdout-path !\n"); 603 DBG(" no stdout-path !\n");
600 return -ENODEV; 604 return -ENODEV;
601 } 605 }
602 prom_stdout = of_find_node_by_path(name); 606 prom_stdout = of_find_node_by_path(name);
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index dab616a33b8d..f2197654be07 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
34{ 34{
35 struct pci_controller *phb = pci_bus_to_host(dev->bus); 35 struct pci_controller *phb = pci_bus_to_host(dev->bus);
36 36
37 phb->controller_ops.teardown_msi_irqs(dev); 37 /*
38 * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
39 * so check the pointer again.
40 */
41 if (phb->controller_ops.teardown_msi_irqs)
42 phb->controller_ops.teardown_msi_irqs(dev);
38} 43}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index afb819f4ca68..714c3480c52d 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -3266,12 +3266,17 @@ long do_syscall_trace_enter(struct pt_regs *regs)
3266 user_exit(); 3266 user_exit();
3267 3267
3268 if (test_thread_flag(TIF_SYSCALL_EMU)) { 3268 if (test_thread_flag(TIF_SYSCALL_EMU)) {
3269 ptrace_report_syscall(regs);
3270 /* 3269 /*
3270 * A nonzero return code from tracehook_report_syscall_entry()
3271 * tells us to prevent the syscall execution, but we are not
3272 * going to execute it anyway.
3273 *
3271 * Returning -1 will skip the syscall execution. We want to 3274 * Returning -1 will skip the syscall execution. We want to
3272 * avoid clobbering any register also, thus, not 'gotoing' 3275 * avoid clobbering any register also, thus, not 'gotoing'
3273 * skip label. 3276 * skip label.
3274 */ 3277 */
3278 if (tracehook_report_syscall_entry(regs))
3279 ;
3275 return -1; 3280 return -1;
3276 } 3281 }
3277 3282
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index 2b74f8adf4d0..6aa41669ac1a 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -19,6 +19,7 @@
19#include <linux/hugetlb.h> 19#include <linux/hugetlb.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/highmem.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
23#include <linux/seq_file.h> 24#include <linux/seq_file.h>
24#include <asm/fixmap.h> 25#include <asm/fixmap.h>
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7a9886f98b0c..a5091c034747 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
188 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); 188 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
189 189
190 for (; start < end; start += page_size) { 190 for (; start < end; start += page_size) {
191 void *p; 191 void *p = NULL;
192 int rc; 192 int rc;
193 193
194 if (vmemmap_populated(start, page_size)) 194 if (vmemmap_populated(start, page_size))
195 continue; 195 continue;
196 196
197 /*
198 * Allocate from the altmap first if we have one. This may
199 * fail due to alignment issues when using 16MB hugepages, so
200 * fall back to system memory if the altmap allocation fail.
201 */
197 if (altmap) 202 if (altmap)
198 p = altmap_alloc_block_buf(page_size, altmap); 203 p = altmap_alloc_block_buf(page_size, altmap);
199 else 204 if (!p)
200 p = vmemmap_alloc_block_buf(page_size, node); 205 p = vmemmap_alloc_block_buf(page_size, node);
201 if (!p) 206 if (!p)
202 return -ENOMEM; 207 return -ENOMEM;
@@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
255{ 260{
256 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 261 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
257 unsigned long page_order = get_order(page_size); 262 unsigned long page_order = get_order(page_size);
263 unsigned long alt_start = ~0, alt_end = ~0;
264 unsigned long base_pfn;
258 265
259 start = _ALIGN_DOWN(start, page_size); 266 start = _ALIGN_DOWN(start, page_size);
267 if (altmap) {
268 alt_start = altmap->base_pfn;
269 alt_end = altmap->base_pfn + altmap->reserve +
270 altmap->free + altmap->alloc + altmap->align;
271 }
260 272
261 pr_debug("vmemmap_free %lx...%lx\n", start, end); 273 pr_debug("vmemmap_free %lx...%lx\n", start, end);
262 274
@@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
280 page = pfn_to_page(addr >> PAGE_SHIFT); 292 page = pfn_to_page(addr >> PAGE_SHIFT);
281 section_base = pfn_to_page(vmemmap_section_start(start)); 293 section_base = pfn_to_page(vmemmap_section_start(start));
282 nr_pages = 1 << page_order; 294 nr_pages = 1 << page_order;
295 base_pfn = PHYS_PFN(addr);
283 296
284 if (altmap) { 297 if (base_pfn >= alt_start && base_pfn < alt_end) {
285 vmem_altmap_free(altmap, nr_pages); 298 vmem_altmap_free(altmap, nr_pages);
286 } else if (PageReserved(page)) { 299 } else if (PageReserved(page)) {
287 /* allocated from bootmem */ 300 /* allocated from bootmem */
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 2e4bd32154b5..472b784f01eb 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -140,8 +140,7 @@ config IBMEBUS
140 Bus device driver for GX bus based adapters. 140 Bus device driver for GX bus based adapters.
141 141
142config PAPR_SCM 142config PAPR_SCM
143 depends on PPC_PSERIES && MEMORY_HOTPLUG 143 depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
144 select LIBNVDIMM
145 tristate "Support for the PAPR Storage Class Memory interface" 144 tristate "Support for the PAPR Storage Class Memory interface"
146 help 145 help
147 Enable access to hypervisor provided storage class memory. 146 Enable access to hypervisor provided storage class memory.
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index ee9372b65ca5..7d6457ab5d34 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -55,7 +55,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
55 do { 55 do {
56 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, 56 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
57 p->blocks, BIND_ANY_ADDR, token); 57 p->blocks, BIND_ANY_ADDR, token);
58 token = be64_to_cpu(ret[0]); 58 token = ret[0];
59 cond_resched(); 59 cond_resched();
60 } while (rc == H_BUSY); 60 } while (rc == H_BUSY);
61 61
@@ -64,7 +64,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
64 return -ENXIO; 64 return -ENXIO;
65 } 65 }
66 66
67 p->bound_addr = be64_to_cpu(ret[1]); 67 p->bound_addr = ret[1];
68 68
69 dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); 69 dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
70 70
@@ -82,7 +82,7 @@ static int drc_pmem_unbind(struct papr_scm_priv *p)
82 do { 82 do {
83 rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index, 83 rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
84 p->bound_addr, p->blocks, token); 84 p->bound_addr, p->blocks, token);
85 token = be64_to_cpu(ret); 85 token = ret[0];
86 cond_resched(); 86 cond_resched();
87 } while (rc == H_BUSY); 87 } while (rc == H_BUSY);
88 88
@@ -223,6 +223,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
223 goto err; 223 goto err;
224 } 224 }
225 225
226 if (nvdimm_bus_check_dimm_count(p->bus, 1))
227 goto err;
228
226 /* now add the region */ 229 /* now add the region */
227 230
228 memset(&mapping, 0, sizeof(mapping)); 231 memset(&mapping, 0, sizeof(mapping));
@@ -257,9 +260,12 @@ err: nvdimm_bus_unregister(p->bus);
257 260
258static int papr_scm_probe(struct platform_device *pdev) 261static int papr_scm_probe(struct platform_device *pdev)
259{ 262{
260 uint32_t drc_index, metadata_size, unit_cap[2];
261 struct device_node *dn = pdev->dev.of_node; 263 struct device_node *dn = pdev->dev.of_node;
264 u32 drc_index, metadata_size;
265 u64 blocks, block_size;
262 struct papr_scm_priv *p; 266 struct papr_scm_priv *p;
267 const char *uuid_str;
268 u64 uuid[2];
263 int rc; 269 int rc;
264 270
265 /* check we have all the required DT properties */ 271 /* check we have all the required DT properties */
@@ -268,8 +274,18 @@ static int papr_scm_probe(struct platform_device *pdev)
268 return -ENODEV; 274 return -ENODEV;
269 } 275 }
270 276
271 if (of_property_read_u32_array(dn, "ibm,unit-capacity", unit_cap, 2)) { 277 if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
272 dev_err(&pdev->dev, "%pOF: missing unit-capacity!\n", dn); 278 dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
279 return -ENODEV;
280 }
281
282 if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
283 dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
284 return -ENODEV;
285 }
286
287 if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
288 dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
273 return -ENODEV; 289 return -ENODEV;
274 } 290 }
275 291
@@ -282,8 +298,13 @@ static int papr_scm_probe(struct platform_device *pdev)
282 298
283 p->dn = dn; 299 p->dn = dn;
284 p->drc_index = drc_index; 300 p->drc_index = drc_index;
285 p->block_size = unit_cap[0]; 301 p->block_size = block_size;
286 p->blocks = unit_cap[1]; 302 p->blocks = blocks;
303
304 /* We just need to ensure that set cookies are unique across */
305 uuid_parse(uuid_str, (uuid_t *) uuid);
306 p->nd_set.cookie1 = uuid[0];
307 p->nd_set.cookie2 = uuid[1];
287 308
288 /* might be zero */ 309 /* might be zero */
289 p->metadata_size = metadata_size; 310 p->metadata_size = metadata_size;
@@ -296,7 +317,7 @@ static int papr_scm_probe(struct platform_device *pdev)
296 317
297 /* setup the resource for the newly bound range */ 318 /* setup the resource for the newly bound range */
298 p->res.start = p->bound_addr; 319 p->res.start = p->bound_addr;
299 p->res.end = p->bound_addr + p->blocks * p->block_size; 320 p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
300 p->res.name = pdev->name; 321 p->res.name = pdev->name;
301 p->res.flags = IORESOURCE_MEM; 322 p->res.flags = IORESOURCE_MEM;
302 323
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 98cb8c802b1a..4f7f235f15f8 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -24,6 +24,7 @@
24#define __IO_PREFIX generic 24#define __IO_PREFIX generic
25#include <asm/io_generic.h> 25#include <asm/io_generic.h>
26#include <asm/io_trapped.h> 26#include <asm/io_trapped.h>
27#include <asm-generic/pci_iomap.h>
27#include <mach/mangle-port.h> 28#include <mach/mangle-port.h>
28 29
29#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) 30#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c8f73efb4ece..9e39cc8bd989 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -390,6 +390,7 @@
390#define MSR_F15H_NB_PERF_CTR 0xc0010241 390#define MSR_F15H_NB_PERF_CTR 0xc0010241
391#define MSR_F15H_PTSC 0xc0010280 391#define MSR_F15H_PTSC 0xc0010280
392#define MSR_F15H_IC_CFG 0xc0011021 392#define MSR_F15H_IC_CFG 0xc0011021
393#define MSR_F15H_EX_CFG 0xc001102c
393 394
394/* Fam 10h MSRs */ 395/* Fam 10h MSRs */
395#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 396#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 02edd9960e9d..8d5d984541be 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11985,6 +11985,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
11985 kunmap(vmx->nested.pi_desc_page); 11985 kunmap(vmx->nested.pi_desc_page);
11986 kvm_release_page_dirty(vmx->nested.pi_desc_page); 11986 kvm_release_page_dirty(vmx->nested.pi_desc_page);
11987 vmx->nested.pi_desc_page = NULL; 11987 vmx->nested.pi_desc_page = NULL;
11988 vmx->nested.pi_desc = NULL;
11989 vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
11988 } 11990 }
11989 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); 11991 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
11990 if (is_error_page(page)) 11992 if (is_error_page(page))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d02937760c3b..f049ecfac7bb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2426,6 +2426,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2426 case MSR_AMD64_PATCH_LOADER: 2426 case MSR_AMD64_PATCH_LOADER:
2427 case MSR_AMD64_BU_CFG2: 2427 case MSR_AMD64_BU_CFG2:
2428 case MSR_AMD64_DC_CFG: 2428 case MSR_AMD64_DC_CFG:
2429 case MSR_F15H_EX_CFG:
2429 break; 2430 break;
2430 2431
2431 case MSR_IA32_UCODE_REV: 2432 case MSR_IA32_UCODE_REV:
@@ -2721,6 +2722,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2721 case MSR_AMD64_BU_CFG2: 2722 case MSR_AMD64_BU_CFG2:
2722 case MSR_IA32_PERF_CTL: 2723 case MSR_IA32_PERF_CTL:
2723 case MSR_AMD64_DC_CFG: 2724 case MSR_AMD64_DC_CFG:
2725 case MSR_F15H_EX_CFG:
2724 msr_info->data = 0; 2726 msr_info->data = 0;
2725 break; 2727 break;
2726 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 2728 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
@@ -7446,7 +7448,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
7446 7448
7447static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 7449static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
7448{ 7450{
7449 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 7451 if (!kvm_apic_present(vcpu))
7450 return; 7452 return;
7451 7453
7452 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 7454 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
diff --git a/block/bio.c b/block/bio.c
index 4f4d9884443b..4d86e90654b2 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1261,7 +1261,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1261 if (ret) 1261 if (ret)
1262 goto cleanup; 1262 goto cleanup;
1263 } else { 1263 } else {
1264 zero_fill_bio(bio); 1264 if (bmd->is_our_pages)
1265 zero_fill_bio(bio);
1265 iov_iter_advance(iter, bio->bi_iter.bi_size); 1266 iov_iter_advance(iter, bio->bi_iter.bi_size);
1266 } 1267 }
1267 1268
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 13ba2011a306..a327bef07642 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -378,7 +378,7 @@ static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones)
378 struct page *page; 378 struct page *page;
379 int order; 379 int order;
380 380
381 for (order = get_order(size); order > 0; order--) { 381 for (order = get_order(size); order >= 0; order--) {
382 page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order); 382 page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order);
383 if (page) { 383 if (page) {
384 *nr_zones = min_t(unsigned int, *nr_zones, 384 *nr_zones = min_t(unsigned int, *nr_zones,
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index ef1b267cb058..64da032bb9ed 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -297,7 +297,7 @@ static struct clk_alpha_pll gpll0_out_main = {
297 .hw.init = &(struct clk_init_data){ 297 .hw.init = &(struct clk_init_data){
298 .name = "gpll0_out_main", 298 .name = "gpll0_out_main",
299 .parent_names = (const char *[]) 299 .parent_names = (const char *[])
300 { "gpll0_sleep_clk_src" }, 300 { "cxo" },
301 .num_parents = 1, 301 .num_parents = 1,
302 .ops = &clk_alpha_pll_ops, 302 .ops = &clk_alpha_pll_ops,
303 }, 303 },
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index 7725b6ee14ef..59bb67d5a7ce 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -153,6 +153,11 @@ struct chtls_dev {
153 unsigned int cdev_state; 153 unsigned int cdev_state;
154}; 154};
155 155
156struct chtls_listen {
157 struct chtls_dev *cdev;
158 struct sock *sk;
159};
160
156struct chtls_hws { 161struct chtls_hws {
157 struct sk_buff_head sk_recv_queue; 162 struct sk_buff_head sk_recv_queue;
158 u8 txqid; 163 u8 txqid;
@@ -215,6 +220,8 @@ struct chtls_sock {
215 u16 resv2; 220 u16 resv2;
216 u32 delack_mode; 221 u32 delack_mode;
217 u32 delack_seq; 222 u32 delack_seq;
223 u32 snd_win;
224 u32 rcv_win;
218 225
219 void *passive_reap_next; /* placeholder for passive */ 226 void *passive_reap_next; /* placeholder for passive */
220 struct chtls_hws tlshws; 227 struct chtls_hws tlshws;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 228b91b7d6b5..59b75299fcbc 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -21,6 +21,7 @@
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/if_vlan.h> 23#include <linux/if_vlan.h>
24#include <net/inet_common.h>
24#include <net/tcp.h> 25#include <net/tcp.h>
25#include <net/dst.h> 26#include <net/dst.h>
26 27
@@ -887,24 +888,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
887 return mtu_idx; 888 return mtu_idx;
888} 889}
889 890
890static unsigned int select_rcv_wnd(struct chtls_sock *csk)
891{
892 unsigned int rcvwnd;
893 unsigned int wnd;
894 struct sock *sk;
895
896 sk = csk->sk;
897 wnd = tcp_full_space(sk);
898
899 if (wnd < MIN_RCV_WND)
900 wnd = MIN_RCV_WND;
901
902 rcvwnd = MAX_RCV_WND;
903
904 csk_set_flag(csk, CSK_UPDATE_RCV_WND);
905 return min(wnd, rcvwnd);
906}
907
908static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp) 891static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
909{ 892{
910 int wscale = 0; 893 int wscale = 0;
@@ -951,7 +934,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
951 csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)), 934 csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
952 req); 935 req);
953 opt0 = TCAM_BYPASS_F | 936 opt0 = TCAM_BYPASS_F |
954 WND_SCALE_V((tp)->rx_opt.rcv_wscale) | 937 WND_SCALE_V(RCV_WSCALE(tp)) |
955 MSS_IDX_V(csk->mtu_idx) | 938 MSS_IDX_V(csk->mtu_idx) |
956 L2T_IDX_V(csk->l2t_entry->idx) | 939 L2T_IDX_V(csk->l2t_entry->idx) |
957 NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) | 940 NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
@@ -1005,6 +988,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1005 return 0; 988 return 0;
1006} 989}
1007 990
991static void chtls_set_tcp_window(struct chtls_sock *csk)
992{
993 struct net_device *ndev = csk->egress_dev;
994 struct port_info *pi = netdev_priv(ndev);
995 unsigned int linkspeed;
996 u8 scale;
997
998 linkspeed = pi->link_cfg.speed;
999 scale = linkspeed / SPEED_10000;
1000#define CHTLS_10G_RCVWIN (256 * 1024)
1001 csk->rcv_win = CHTLS_10G_RCVWIN;
1002 if (scale)
1003 csk->rcv_win *= scale;
1004#define CHTLS_10G_SNDWIN (256 * 1024)
1005 csk->snd_win = CHTLS_10G_SNDWIN;
1006 if (scale)
1007 csk->snd_win *= scale;
1008}
1009
1008static struct sock *chtls_recv_sock(struct sock *lsk, 1010static struct sock *chtls_recv_sock(struct sock *lsk,
1009 struct request_sock *oreq, 1011 struct request_sock *oreq,
1010 void *network_hdr, 1012 void *network_hdr,
@@ -1067,6 +1069,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
1067 csk->port_id = port_id; 1069 csk->port_id = port_id;
1068 csk->egress_dev = ndev; 1070 csk->egress_dev = ndev;
1069 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); 1071 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1072 chtls_set_tcp_window(csk);
1073 tp->rcv_wnd = csk->rcv_win;
1074 csk->sndbuf = csk->snd_win;
1070 csk->ulp_mode = ULP_MODE_TLS; 1075 csk->ulp_mode = ULP_MODE_TLS;
1071 step = cdev->lldi->nrxq / cdev->lldi->nchan; 1076 step = cdev->lldi->nrxq / cdev->lldi->nchan;
1072 csk->rss_qid = cdev->lldi->rxq_ids[port_id * step]; 1077 csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
@@ -1075,9 +1080,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
1075 port_id * step; 1080 port_id * step;
1076 csk->sndbuf = newsk->sk_sndbuf; 1081 csk->sndbuf = newsk->sk_sndbuf;
1077 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; 1082 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1078 tp->rcv_wnd = select_rcv_wnd(csk);
1079 RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), 1083 RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
1080 WSCALE_OK(tp), 1084 sock_net(newsk)->
1085 ipv4.sysctl_tcp_window_scaling,
1081 tp->window_clamp); 1086 tp->window_clamp);
1082 neigh_release(n); 1087 neigh_release(n);
1083 inet_inherit_port(&tcp_hashinfo, lsk, newsk); 1088 inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1129,6 +1134,7 @@ static void chtls_pass_accept_request(struct sock *sk,
1129 struct cpl_t5_pass_accept_rpl *rpl; 1134 struct cpl_t5_pass_accept_rpl *rpl;
1130 struct cpl_pass_accept_req *req; 1135 struct cpl_pass_accept_req *req;
1131 struct listen_ctx *listen_ctx; 1136 struct listen_ctx *listen_ctx;
1137 struct vlan_ethhdr *vlan_eh;
1132 struct request_sock *oreq; 1138 struct request_sock *oreq;
1133 struct sk_buff *reply_skb; 1139 struct sk_buff *reply_skb;
1134 struct chtls_sock *csk; 1140 struct chtls_sock *csk;
@@ -1141,6 +1147,10 @@ static void chtls_pass_accept_request(struct sock *sk,
1141 unsigned int stid; 1147 unsigned int stid;
1142 unsigned int len; 1148 unsigned int len;
1143 unsigned int tid; 1149 unsigned int tid;
1150 bool th_ecn, ect;
1151 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
1152 u16 eth_hdr_len;
1153 bool ecn_ok;
1144 1154
1145 req = cplhdr(skb) + RSS_HDR; 1155 req = cplhdr(skb) + RSS_HDR;
1146 tid = GET_TID(req); 1156 tid = GET_TID(req);
@@ -1179,24 +1189,40 @@ static void chtls_pass_accept_request(struct sock *sk,
1179 oreq->mss = 0; 1189 oreq->mss = 0;
1180 oreq->ts_recent = 0; 1190 oreq->ts_recent = 0;
1181 1191
1182 eh = (struct ethhdr *)(req + 1); 1192 eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
1183 iph = (struct iphdr *)(eh + 1); 1193 if (eth_hdr_len == ETH_HLEN) {
1194 eh = (struct ethhdr *)(req + 1);
1195 iph = (struct iphdr *)(eh + 1);
1196 network_hdr = (void *)(eh + 1);
1197 } else {
1198 vlan_eh = (struct vlan_ethhdr *)(req + 1);
1199 iph = (struct iphdr *)(vlan_eh + 1);
1200 network_hdr = (void *)(vlan_eh + 1);
1201 }
1184 if (iph->version != 0x4) 1202 if (iph->version != 0x4)
1185 goto free_oreq; 1203 goto free_oreq;
1186 1204
1187 network_hdr = (void *)(eh + 1);
1188 tcph = (struct tcphdr *)(iph + 1); 1205 tcph = (struct tcphdr *)(iph + 1);
1206 skb_set_network_header(skb, (void *)iph - (void *)req);
1189 1207
1190 tcp_rsk(oreq)->tfo_listener = false; 1208 tcp_rsk(oreq)->tfo_listener = false;
1191 tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq); 1209 tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
1192 chtls_set_req_port(oreq, tcph->source, tcph->dest); 1210 chtls_set_req_port(oreq, tcph->source, tcph->dest);
1193 inet_rsk(oreq)->ecn_ok = 0;
1194 chtls_set_req_addr(oreq, iph->daddr, iph->saddr); 1211 chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
1195 if (req->tcpopt.wsf <= 14) { 1212 ip_dsfield = ipv4_get_dsfield(iph);
1213 if (req->tcpopt.wsf <= 14 &&
1214 sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
1196 inet_rsk(oreq)->wscale_ok = 1; 1215 inet_rsk(oreq)->wscale_ok = 1;
1197 inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; 1216 inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
1198 } 1217 }
1199 inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if; 1218 inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
1219 th_ecn = tcph->ece && tcph->cwr;
1220 if (th_ecn) {
1221 ect = !INET_ECN_is_not_ect(ip_dsfield);
1222 ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
1223 if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
1224 inet_rsk(oreq)->ecn_ok = 1;
1225 }
1200 1226
1201 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev); 1227 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
1202 if (!newsk) 1228 if (!newsk)
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index afebbd87c4aa..18f553fcc167 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
397 397
398 req_wr->lsodisable_to_flags = 398 req_wr->lsodisable_to_flags =
399 htonl(TX_ULP_MODE_V(ULP_MODE_TLS) | 399 htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
400 FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) | 400 TX_URG_V(skb_urgent(skb)) |
401 T6_TX_FORCE_F | wr_ulp_mode_force | 401 T6_TX_FORCE_F | wr_ulp_mode_force |
402 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) && 402 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
403 skb_queue_empty(&csk->txq))); 403 skb_queue_empty(&csk->txq)));
@@ -534,10 +534,9 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
534 FW_OFLD_TX_DATA_WR_SHOVE_F); 534 FW_OFLD_TX_DATA_WR_SHOVE_F);
535 535
536 req->tunnel_to_proxy = htonl(wr_ulp_mode_force | 536 req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
537 FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) | 537 TX_URG_V(skb_urgent(skb)) |
538 FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag 538 TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
539 (sk, CSK_TX_MORE_DATA)) && 539 skb_queue_empty(&csk->txq)));
540 skb_queue_empty(&csk->txq)));
541 req->plen = htonl(len); 540 req->plen = htonl(len);
542} 541}
543 542
@@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
995 int mss, flags, err; 994 int mss, flags, err;
996 int recordsz = 0; 995 int recordsz = 0;
997 int copied = 0; 996 int copied = 0;
998 int hdrlen = 0;
999 long timeo; 997 long timeo;
1000 998
1001 lock_sock(sk); 999 lock_sock(sk);
@@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1032 1030
1033 recordsz = tls_header_read(&hdr, &msg->msg_iter); 1031 recordsz = tls_header_read(&hdr, &msg->msg_iter);
1034 size -= TLS_HEADER_LENGTH; 1032 size -= TLS_HEADER_LENGTH;
1035 hdrlen += TLS_HEADER_LENGTH; 1033 copied += TLS_HEADER_LENGTH;
1036 csk->tlshws.txleft = recordsz; 1034 csk->tlshws.txleft = recordsz;
1037 csk->tlshws.type = hdr.type; 1035 csk->tlshws.type = hdr.type;
1038 if (skb) 1036 if (skb)
@@ -1083,10 +1081,8 @@ new_buf:
1083 int off = TCP_OFF(sk); 1081 int off = TCP_OFF(sk);
1084 bool merge; 1082 bool merge;
1085 1083
1086 if (!page) 1084 if (page)
1087 goto wait_for_memory; 1085 pg_size <<= compound_order(page);
1088
1089 pg_size <<= compound_order(page);
1090 if (off < pg_size && 1086 if (off < pg_size &&
1091 skb_can_coalesce(skb, i, page, off)) { 1087 skb_can_coalesce(skb, i, page, off)) {
1092 merge = 1; 1088 merge = 1;
@@ -1187,7 +1183,7 @@ out:
1187 chtls_tcp_push(sk, flags); 1183 chtls_tcp_push(sk, flags);
1188done: 1184done:
1189 release_sock(sk); 1185 release_sock(sk);
1190 return copied + hdrlen; 1186 return copied;
1191do_fault: 1187do_fault:
1192 if (!skb->len) { 1188 if (!skb->len) {
1193 __skb_unlink(skb, &csk->txq); 1189 __skb_unlink(skb, &csk->txq);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index f472c51abe56..563f8fe7686a 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb)
55static int listen_notify_handler(struct notifier_block *this, 55static int listen_notify_handler(struct notifier_block *this,
56 unsigned long event, void *data) 56 unsigned long event, void *data)
57{ 57{
58 struct chtls_dev *cdev; 58 struct chtls_listen *clisten;
59 struct sock *sk; 59 int ret = NOTIFY_DONE;
60 int ret;
61 60
62 sk = data; 61 clisten = (struct chtls_listen *)data;
63 ret = NOTIFY_DONE;
64 62
65 switch (event) { 63 switch (event) {
66 case CHTLS_LISTEN_START: 64 case CHTLS_LISTEN_START:
65 ret = chtls_listen_start(clisten->cdev, clisten->sk);
66 kfree(clisten);
67 break;
67 case CHTLS_LISTEN_STOP: 68 case CHTLS_LISTEN_STOP:
68 mutex_lock(&cdev_list_lock); 69 chtls_listen_stop(clisten->cdev, clisten->sk);
69 list_for_each_entry(cdev, &cdev_list, list) { 70 kfree(clisten);
70 if (event == CHTLS_LISTEN_START)
71 ret = chtls_listen_start(cdev, sk);
72 else
73 chtls_listen_stop(cdev, sk);
74 }
75 mutex_unlock(&cdev_list_lock);
76 break; 71 break;
77 } 72 }
78 return ret; 73 return ret;
@@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
90 return 0; 85 return 0;
91} 86}
92 87
93static int chtls_start_listen(struct sock *sk) 88static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
94{ 89{
90 struct chtls_listen *clisten;
95 int err; 91 int err;
96 92
97 if (sk->sk_protocol != IPPROTO_TCP) 93 if (sk->sk_protocol != IPPROTO_TCP)
@@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk)
102 return -EADDRNOTAVAIL; 98 return -EADDRNOTAVAIL;
103 99
104 sk->sk_backlog_rcv = listen_backlog_rcv; 100 sk->sk_backlog_rcv = listen_backlog_rcv;
101 clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
102 if (!clisten)
103 return -ENOMEM;
104 clisten->cdev = cdev;
105 clisten->sk = sk;
105 mutex_lock(&notify_mutex); 106 mutex_lock(&notify_mutex);
106 err = raw_notifier_call_chain(&listen_notify_list, 107 err = raw_notifier_call_chain(&listen_notify_list,
107 CHTLS_LISTEN_START, sk); 108 CHTLS_LISTEN_START, clisten);
108 mutex_unlock(&notify_mutex); 109 mutex_unlock(&notify_mutex);
109 return err; 110 return err;
110} 111}
111 112
112static void chtls_stop_listen(struct sock *sk) 113static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
113{ 114{
115 struct chtls_listen *clisten;
116
114 if (sk->sk_protocol != IPPROTO_TCP) 117 if (sk->sk_protocol != IPPROTO_TCP)
115 return; 118 return;
116 119
120 clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
121 if (!clisten)
122 return;
123 clisten->cdev = cdev;
124 clisten->sk = sk;
117 mutex_lock(&notify_mutex); 125 mutex_lock(&notify_mutex);
118 raw_notifier_call_chain(&listen_notify_list, 126 raw_notifier_call_chain(&listen_notify_list,
119 CHTLS_LISTEN_STOP, sk); 127 CHTLS_LISTEN_STOP, clisten);
120 mutex_unlock(&notify_mutex); 128 mutex_unlock(&notify_mutex);
121} 129}
122 130
@@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev)
138 146
139static int chtls_create_hash(struct tls_device *dev, struct sock *sk) 147static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
140{ 148{
149 struct chtls_dev *cdev = to_chtls_dev(dev);
150
141 if (sk->sk_state == TCP_LISTEN) 151 if (sk->sk_state == TCP_LISTEN)
142 return chtls_start_listen(sk); 152 return chtls_start_listen(cdev, sk);
143 return 0; 153 return 0;
144} 154}
145 155
146static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk) 156static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
147{ 157{
158 struct chtls_dev *cdev = to_chtls_dev(dev);
159
148 if (sk->sk_state == TCP_LISTEN) 160 if (sk->sk_state == TCP_LISTEN)
149 chtls_stop_listen(sk); 161 chtls_stop_listen(cdev, sk);
162}
163
164static void chtls_free_uld(struct chtls_dev *cdev)
165{
166 int i;
167
168 tls_unregister_device(&cdev->tlsdev);
169 kvfree(cdev->kmap.addr);
170 idr_destroy(&cdev->hwtid_idr);
171 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
172 kfree_skb(cdev->rspq_skb_cache[i]);
173 kfree(cdev->lldi);
174 kfree_skb(cdev->askb);
175 kfree(cdev);
176}
177
178static inline void chtls_dev_release(struct kref *kref)
179{
180 struct chtls_dev *cdev;
181 struct tls_device *dev;
182
183 dev = container_of(kref, struct tls_device, kref);
184 cdev = to_chtls_dev(dev);
185 chtls_free_uld(cdev);
150} 186}
151 187
152static void chtls_register_dev(struct chtls_dev *cdev) 188static void chtls_register_dev(struct chtls_dev *cdev)
@@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev)
159 tlsdev->feature = chtls_inline_feature; 195 tlsdev->feature = chtls_inline_feature;
160 tlsdev->hash = chtls_create_hash; 196 tlsdev->hash = chtls_create_hash;
161 tlsdev->unhash = chtls_destroy_hash; 197 tlsdev->unhash = chtls_destroy_hash;
162 tls_register_device(&cdev->tlsdev); 198 tlsdev->release = chtls_dev_release;
199 kref_init(&tlsdev->kref);
200 tls_register_device(tlsdev);
163 cdev->cdev_state = CHTLS_CDEV_STATE_UP; 201 cdev->cdev_state = CHTLS_CDEV_STATE_UP;
164} 202}
165 203
166static void chtls_unregister_dev(struct chtls_dev *cdev)
167{
168 tls_unregister_device(&cdev->tlsdev);
169}
170
171static void process_deferq(struct work_struct *task_param) 204static void process_deferq(struct work_struct *task_param)
172{ 205{
173 struct chtls_dev *cdev = container_of(task_param, 206 struct chtls_dev *cdev = container_of(task_param,
@@ -262,28 +295,16 @@ out:
262 return NULL; 295 return NULL;
263} 296}
264 297
265static void chtls_free_uld(struct chtls_dev *cdev)
266{
267 int i;
268
269 chtls_unregister_dev(cdev);
270 kvfree(cdev->kmap.addr);
271 idr_destroy(&cdev->hwtid_idr);
272 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
273 kfree_skb(cdev->rspq_skb_cache[i]);
274 kfree(cdev->lldi);
275 kfree_skb(cdev->askb);
276 kfree(cdev);
277}
278
279static void chtls_free_all_uld(void) 298static void chtls_free_all_uld(void)
280{ 299{
281 struct chtls_dev *cdev, *tmp; 300 struct chtls_dev *cdev, *tmp;
282 301
283 mutex_lock(&cdev_mutex); 302 mutex_lock(&cdev_mutex);
284 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { 303 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
285 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) 304 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
286 chtls_free_uld(cdev); 305 list_del(&cdev->list);
306 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
307 }
287 } 308 }
288 mutex_unlock(&cdev_mutex); 309 mutex_unlock(&cdev_mutex);
289} 310}
@@ -304,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
304 mutex_lock(&cdev_mutex); 325 mutex_lock(&cdev_mutex);
305 list_del(&cdev->list); 326 list_del(&cdev->list);
306 mutex_unlock(&cdev_mutex); 327 mutex_unlock(&cdev_mutex);
307 chtls_free_uld(cdev); 328 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
308 break; 329 break;
309 default: 330 default:
310 break; 331 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 8816c697b205..387f1cf1dc20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
330 case CHIP_TOPAZ: 330 case CHIP_TOPAZ:
331 if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || 331 if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
332 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || 332 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
333 ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { 333 ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
334 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
335 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
334 info->is_kicker = true; 336 info->is_kicker = true;
335 strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); 337 strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
336 } else 338 } else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
351 if (type == CGS_UCODE_ID_SMU) { 353 if (type == CGS_UCODE_ID_SMU) {
352 if (((adev->pdev->device == 0x67ef) && 354 if (((adev->pdev->device == 0x67ef) &&
353 ((adev->pdev->revision == 0xe0) || 355 ((adev->pdev->revision == 0xe0) ||
354 (adev->pdev->revision == 0xe2) ||
355 (adev->pdev->revision == 0xe5))) || 356 (adev->pdev->revision == 0xe5))) ||
356 ((adev->pdev->device == 0x67ff) && 357 ((adev->pdev->device == 0x67ff) &&
357 ((adev->pdev->revision == 0xcf) || 358 ((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
359 (adev->pdev->revision == 0xff)))) { 360 (adev->pdev->revision == 0xff)))) {
360 info->is_kicker = true; 361 info->is_kicker = true;
361 strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); 362 strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
362 } else 363 } else if ((adev->pdev->device == 0x67ef) &&
364 (adev->pdev->revision == 0xe2)) {
365 info->is_kicker = true;
366 strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
367 } else {
363 strcpy(fw_name, "amdgpu/polaris11_smc.bin"); 368 strcpy(fw_name, "amdgpu/polaris11_smc.bin");
369 }
364 } else if (type == CGS_UCODE_ID_SMU_SK) { 370 } else if (type == CGS_UCODE_ID_SMU_SK) {
365 strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); 371 strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
366 } 372 }
@@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
375 (adev->pdev->revision == 0xe7) || 381 (adev->pdev->revision == 0xe7) ||
376 (adev->pdev->revision == 0xef))) || 382 (adev->pdev->revision == 0xef))) ||
377 ((adev->pdev->device == 0x6fdf) && 383 ((adev->pdev->device == 0x6fdf) &&
378 (adev->pdev->revision == 0xef))) { 384 ((adev->pdev->revision == 0xef) ||
385 (adev->pdev->revision == 0xff)))) {
379 info->is_kicker = true; 386 info->is_kicker = true;
380 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); 387 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
381 } else 388 } else if ((adev->pdev->device == 0x67df) &&
389 ((adev->pdev->revision == 0xe1) ||
390 (adev->pdev->revision == 0xf7))) {
391 info->is_kicker = true;
392 strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
393 } else {
382 strcpy(fw_name, "amdgpu/polaris10_smc.bin"); 394 strcpy(fw_name, "amdgpu/polaris10_smc.bin");
395 }
383 } else if (type == CGS_UCODE_ID_SMU_SK) { 396 } else if (type == CGS_UCODE_ID_SMU_SK) {
384 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); 397 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
385 } 398 }
386 break; 399 break;
387 case CHIP_POLARIS12: 400 case CHIP_POLARIS12:
388 strcpy(fw_name, "amdgpu/polaris12_smc.bin"); 401 if (((adev->pdev->device == 0x6987) &&
402 ((adev->pdev->revision == 0xc0) ||
403 (adev->pdev->revision == 0xc3))) ||
404 ((adev->pdev->device == 0x6981) &&
405 ((adev->pdev->revision == 0x00) ||
406 (adev->pdev->revision == 0x01) ||
407 (adev->pdev->revision == 0x10)))) {
408 info->is_kicker = true;
409 strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
410 } else {
411 strcpy(fw_name, "amdgpu/polaris12_smc.bin");
412 }
389 break; 413 break;
390 case CHIP_VEGAM: 414 case CHIP_VEGAM:
391 strcpy(fw_name, "amdgpu/vegam_smc.bin"); 415 strcpy(fw_name, "amdgpu/vegam_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 663043c8f0f5..0acc8dee2cb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -124,14 +124,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
124 goto free_chunk; 124 goto free_chunk;
125 } 125 }
126 126
127 mutex_lock(&p->ctx->lock);
128
127 /* skip guilty context job */ 129 /* skip guilty context job */
128 if (atomic_read(&p->ctx->guilty) == 1) { 130 if (atomic_read(&p->ctx->guilty) == 1) {
129 ret = -ECANCELED; 131 ret = -ECANCELED;
130 goto free_chunk; 132 goto free_chunk;
131 } 133 }
132 134
133 mutex_lock(&p->ctx->lock);
134
135 /* get chunks */ 135 /* get chunks */
136 chunk_array_user = u64_to_user_ptr(cs->in.chunks); 136 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
137 if (copy_from_user(chunk_array, chunk_array_user, 137 if (copy_from_user(chunk_array, chunk_array_user,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8de55f7f1a3a..74b611e8a1b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -872,7 +872,13 @@ static const struct pci_device_id pciidlist[] = {
872 {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 872 {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
873 {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 873 {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
874 {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 874 {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
875 {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
876 {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
877 {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
875 {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 878 {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
879 {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
880 {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
881 {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
876 {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 882 {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
877 /* Vega 12 */ 883 /* Vega 12 */
878 {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, 884 {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -885,6 +891,7 @@ static const struct pci_device_id pciidlist[] = {
885 {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 891 {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
886 {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 892 {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
887 {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 893 {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
894 {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
888 {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 895 {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
889 {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, 896 {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
890 /* Raven */ 897 /* Raven */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a9f18ea7e354..e4ded890b1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -337,12 +337,19 @@ static const struct kfd_deviceid supported_devices[] = {
337 { 0x6864, &vega10_device_info }, /* Vega10 */ 337 { 0x6864, &vega10_device_info }, /* Vega10 */
338 { 0x6867, &vega10_device_info }, /* Vega10 */ 338 { 0x6867, &vega10_device_info }, /* Vega10 */
339 { 0x6868, &vega10_device_info }, /* Vega10 */ 339 { 0x6868, &vega10_device_info }, /* Vega10 */
340 { 0x6869, &vega10_device_info }, /* Vega10 */
341 { 0x686A, &vega10_device_info }, /* Vega10 */
342 { 0x686B, &vega10_device_info }, /* Vega10 */
340 { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ 343 { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
344 { 0x686D, &vega10_device_info }, /* Vega10 */
345 { 0x686E, &vega10_device_info }, /* Vega10 */
346 { 0x686F, &vega10_device_info }, /* Vega10 */
341 { 0x687F, &vega10_device_info }, /* Vega10 */ 347 { 0x687F, &vega10_device_info }, /* Vega10 */
342 { 0x66a0, &vega20_device_info }, /* Vega20 */ 348 { 0x66a0, &vega20_device_info }, /* Vega20 */
343 { 0x66a1, &vega20_device_info }, /* Vega20 */ 349 { 0x66a1, &vega20_device_info }, /* Vega20 */
344 { 0x66a2, &vega20_device_info }, /* Vega20 */ 350 { 0x66a2, &vega20_device_info }, /* Vega20 */
345 { 0x66a3, &vega20_device_info }, /* Vega20 */ 351 { 0x66a3, &vega20_device_info }, /* Vega20 */
352 { 0x66a4, &vega20_device_info }, /* Vega20 */
346 { 0x66a7, &vega20_device_info }, /* Vega20 */ 353 { 0x66a7, &vega20_device_info }, /* Vega20 */
347 { 0x66af, &vega20_device_info } /* Vega20 */ 354 { 0x66af, &vega20_device_info } /* Vega20 */
348}; 355};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3367dd30cdd0..3b7fce5d7258 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
130 data->registry_data.disable_auto_wattman = 1; 130 data->registry_data.disable_auto_wattman = 1;
131 data->registry_data.auto_wattman_debug = 0; 131 data->registry_data.auto_wattman_debug = 0;
132 data->registry_data.auto_wattman_sample_period = 100; 132 data->registry_data.auto_wattman_sample_period = 100;
133 data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD; 133 data->registry_data.fclk_gfxclk_ratio = 0;
134 data->registry_data.auto_wattman_threshold = 50; 134 data->registry_data.auto_wattman_threshold = 50;
135 data->registry_data.gfxoff_controlled_by_driver = 1; 135 data->registry_data.gfxoff_controlled_by_driver = 1;
136 data->gfxoff_allowed = false; 136 data->gfxoff_allowed = false;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index 62f36ba2435b..c1a99dfe4913 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
386#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) 386#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
387#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) 387#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
388 388
389#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
390
389#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280) 391#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
390#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281) 392#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
391#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282) 393#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 872d3824337b..a1e0ac9ae248 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1985,6 +1985,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
1985 1985
1986 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); 1986 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
1987 1987
1988 /* Apply avfs cks-off voltages to avoid the overshoot
1989 * when switching to the highest sclk frequency
1990 */
1991 if (data->apply_avfs_cks_off_voltage)
1992 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
1993
1988 return 0; 1994 return 0;
1989} 1995}
1990 1996
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 99d5e4f98f49..a6edd5df33b0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
37MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 37MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
38MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 38MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
39MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); 39MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
40MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
40MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 41MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
41MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 42MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
42MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); 43MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
44MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
43MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); 45MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
46MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
44MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); 47MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
45MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); 48MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
46MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); 49MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 481896fb712a..85e6736f0a32 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
235 plane->bpp = skl_pixel_formats[fmt].bpp; 235 plane->bpp = skl_pixel_formats[fmt].bpp;
236 plane->drm_format = skl_pixel_formats[fmt].drm_format; 236 plane->drm_format = skl_pixel_formats[fmt].drm_format;
237 } else { 237 } else {
238 plane->tiled = !!(val & DISPPLANE_TILED); 238 plane->tiled = val & DISPPLANE_TILED;
239 fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); 239 fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
240 plane->bpp = bdw_pixel_formats[fmt].bpp; 240 plane->bpp = bdw_pixel_formats[fmt].bpp;
241 plane->drm_format = bdw_pixel_formats[fmt].drm_format; 241 plane->drm_format = bdw_pixel_formats[fmt].drm_format;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ffdbbac4400e..47062ee979cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1444 1444
1445 intel_uncore_sanitize(dev_priv); 1445 intel_uncore_sanitize(dev_priv);
1446 1446
1447 intel_gt_init_workarounds(dev_priv);
1447 i915_gem_load_init_fences(dev_priv); 1448 i915_gem_load_init_fences(dev_priv);
1448 1449
1449 /* On the 945G/GM, the chipset reports the MSI capability on the 1450 /* On the 945G/GM, the chipset reports the MSI capability on the
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9102571e9692..872a2e159a5f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -67,6 +67,7 @@
67#include "intel_ringbuffer.h" 67#include "intel_ringbuffer.h"
68#include "intel_uncore.h" 68#include "intel_uncore.h"
69#include "intel_wopcm.h" 69#include "intel_wopcm.h"
70#include "intel_workarounds.h"
70#include "intel_uc.h" 71#include "intel_uc.h"
71 72
72#include "i915_gem.h" 73#include "i915_gem.h"
@@ -1805,6 +1806,7 @@ struct drm_i915_private {
1805 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1806 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1806 1807
1807 struct i915_workarounds workarounds; 1808 struct i915_workarounds workarounds;
1809 struct i915_wa_list gt_wa_list;
1808 1810
1809 struct i915_frontbuffer_tracking fb_tracking; 1811 struct i915_frontbuffer_tracking fb_tracking;
1810 1812
@@ -2148,6 +2150,8 @@ struct drm_i915_private {
2148 struct delayed_work idle_work; 2150 struct delayed_work idle_work;
2149 2151
2150 ktime_t last_init_time; 2152 ktime_t last_init_time;
2153
2154 struct i915_vma *scratch;
2151 } gt; 2155 } gt;
2152 2156
2153 /* perform PHY state sanity checks? */ 2157 /* perform PHY state sanity checks? */
@@ -3870,4 +3874,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
3870 return I915_HWS_CSB_WRITE_INDEX; 3874 return I915_HWS_CSB_WRITE_INDEX;
3871} 3875}
3872 3876
3877static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
3878{
3879 return i915_ggtt_offset(i915->gt.scratch);
3880}
3881
3873#endif 3882#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..6ae9a6080cc8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
5305 } 5305 }
5306 } 5306 }
5307 5307
5308 intel_gt_workarounds_apply(dev_priv); 5308 intel_gt_apply_workarounds(dev_priv);
5309 5309
5310 i915_gem_init_swizzling(dev_priv); 5310 i915_gem_init_swizzling(dev_priv);
5311 5311
@@ -5500,6 +5500,44 @@ err_active:
5500 goto out_ctx; 5500 goto out_ctx;
5501} 5501}
5502 5502
5503static int
5504i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
5505{
5506 struct drm_i915_gem_object *obj;
5507 struct i915_vma *vma;
5508 int ret;
5509
5510 obj = i915_gem_object_create_stolen(i915, size);
5511 if (!obj)
5512 obj = i915_gem_object_create_internal(i915, size);
5513 if (IS_ERR(obj)) {
5514 DRM_ERROR("Failed to allocate scratch page\n");
5515 return PTR_ERR(obj);
5516 }
5517
5518 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
5519 if (IS_ERR(vma)) {
5520 ret = PTR_ERR(vma);
5521 goto err_unref;
5522 }
5523
5524 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
5525 if (ret)
5526 goto err_unref;
5527
5528 i915->gt.scratch = vma;
5529 return 0;
5530
5531err_unref:
5532 i915_gem_object_put(obj);
5533 return ret;
5534}
5535
5536static void i915_gem_fini_scratch(struct drm_i915_private *i915)
5537{
5538 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
5539}
5540
5503int i915_gem_init(struct drm_i915_private *dev_priv) 5541int i915_gem_init(struct drm_i915_private *dev_priv)
5504{ 5542{
5505 int ret; 5543 int ret;
@@ -5546,12 +5584,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
5546 goto err_unlock; 5584 goto err_unlock;
5547 } 5585 }
5548 5586
5549 ret = i915_gem_contexts_init(dev_priv); 5587 ret = i915_gem_init_scratch(dev_priv,
5588 IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
5550 if (ret) { 5589 if (ret) {
5551 GEM_BUG_ON(ret == -EIO); 5590 GEM_BUG_ON(ret == -EIO);
5552 goto err_ggtt; 5591 goto err_ggtt;
5553 } 5592 }
5554 5593
5594 ret = i915_gem_contexts_init(dev_priv);
5595 if (ret) {
5596 GEM_BUG_ON(ret == -EIO);
5597 goto err_scratch;
5598 }
5599
5555 ret = intel_engines_init(dev_priv); 5600 ret = intel_engines_init(dev_priv);
5556 if (ret) { 5601 if (ret) {
5557 GEM_BUG_ON(ret == -EIO); 5602 GEM_BUG_ON(ret == -EIO);
@@ -5624,6 +5669,8 @@ err_pm:
5624err_context: 5669err_context:
5625 if (ret != -EIO) 5670 if (ret != -EIO)
5626 i915_gem_contexts_fini(dev_priv); 5671 i915_gem_contexts_fini(dev_priv);
5672err_scratch:
5673 i915_gem_fini_scratch(dev_priv);
5627err_ggtt: 5674err_ggtt:
5628err_unlock: 5675err_unlock:
5629 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5676 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5675,8 +5722,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
5675 intel_uc_fini(dev_priv); 5722 intel_uc_fini(dev_priv);
5676 i915_gem_cleanup_engines(dev_priv); 5723 i915_gem_cleanup_engines(dev_priv);
5677 i915_gem_contexts_fini(dev_priv); 5724 i915_gem_contexts_fini(dev_priv);
5725 i915_gem_fini_scratch(dev_priv);
5678 mutex_unlock(&dev_priv->drm.struct_mutex); 5726 mutex_unlock(&dev_priv->drm.struct_mutex);
5679 5727
5728 intel_wa_list_free(&dev_priv->gt_wa_list);
5729
5680 intel_cleanup_gt_powersave(dev_priv); 5730 intel_cleanup_gt_powersave(dev_priv);
5681 5731
5682 intel_uc_fini_misc(dev_priv); 5732 intel_uc_fini_misc(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d4fac09095f8..1aaccbe7e1de 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
1268 else if (gen >= 4) 1268 else if (gen >= 4)
1269 len = 4; 1269 len = 4;
1270 else 1270 else
1271 len = 6; 1271 len = 3;
1272 1272
1273 batch = reloc_gpu(eb, vma, len); 1273 batch = reloc_gpu(eb, vma, len);
1274 if (IS_ERR(batch)) 1274 if (IS_ERR(batch))
@@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 1309 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1310 *batch++ = addr; 1310 *batch++ = addr;
1311 *batch++ = target_offset; 1311 *batch++ = target_offset;
1312
1313 /* And again for good measure (blb/pnv) */
1314 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1315 *batch++ = addr;
1316 *batch++ = target_offset;
1317 } 1312 }
1318 1313
1319 goto out; 1314 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3eb33e000d6f..db4128d6c09b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1495,7 +1495,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
1495 if (HAS_BROKEN_CS_TLB(i915)) 1495 if (HAS_BROKEN_CS_TLB(i915))
1496 ee->wa_batchbuffer = 1496 ee->wa_batchbuffer =
1497 i915_error_object_create(i915, 1497 i915_error_object_create(i915,
1498 engine->scratch); 1498 i915->gt.scratch);
1499 request_record_user_bo(request, ee); 1499 request_record_user_bo(request, ee);
1500 1500
1501 ee->ctx = 1501 ee->ctx =
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..76b5f94ea6cb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -490,46 +490,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
490 intel_engine_init_cmd_parser(engine); 490 intel_engine_init_cmd_parser(engine);
491} 491}
492 492
493int intel_engine_create_scratch(struct intel_engine_cs *engine,
494 unsigned int size)
495{
496 struct drm_i915_gem_object *obj;
497 struct i915_vma *vma;
498 int ret;
499
500 WARN_ON(engine->scratch);
501
502 obj = i915_gem_object_create_stolen(engine->i915, size);
503 if (!obj)
504 obj = i915_gem_object_create_internal(engine->i915, size);
505 if (IS_ERR(obj)) {
506 DRM_ERROR("Failed to allocate scratch page\n");
507 return PTR_ERR(obj);
508 }
509
510 vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
511 if (IS_ERR(vma)) {
512 ret = PTR_ERR(vma);
513 goto err_unref;
514 }
515
516 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
517 if (ret)
518 goto err_unref;
519
520 engine->scratch = vma;
521 return 0;
522
523err_unref:
524 i915_gem_object_put(obj);
525 return ret;
526}
527
528void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
529{
530 i915_vma_unpin_and_release(&engine->scratch, 0);
531}
532
533static void cleanup_status_page(struct intel_engine_cs *engine) 493static void cleanup_status_page(struct intel_engine_cs *engine)
534{ 494{
535 if (HWS_NEEDS_PHYSICAL(engine->i915)) { 495 if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -704,8 +664,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
704{ 664{
705 struct drm_i915_private *i915 = engine->i915; 665 struct drm_i915_private *i915 = engine->i915;
706 666
707 intel_engine_cleanup_scratch(engine);
708
709 cleanup_status_page(engine); 667 cleanup_status_page(engine);
710 668
711 intel_engine_fini_breadcrumbs(engine); 669 intel_engine_fini_breadcrumbs(engine);
@@ -720,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
720 __intel_context_unpin(i915->kernel_context, engine); 678 __intel_context_unpin(i915->kernel_context, engine);
721 679
722 i915_timeline_fini(&engine->timeline); 680 i915_timeline_fini(&engine->timeline);
681
682 intel_wa_list_free(&engine->wa_list);
723} 683}
724 684
725u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) 685u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 37c94a54efcb..58d1d3d47dd3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
442 * may not be visible to the HW prior to the completion of the UC 442 * may not be visible to the HW prior to the completion of the UC
443 * register write and that we may begin execution from the context 443 * register write and that we may begin execution from the context
444 * before its image is complete leading to invalid PD chasing. 444 * before its image is complete leading to invalid PD chasing.
445 *
446 * Furthermore, Braswell, at least, wants a full mb to be sure that
447 * the writes are coherent in memory (visible to the GPU) prior to
448 * execution, and not just visible to other CPUs (as is the result of
449 * wmb).
445 */ 450 */
446 wmb(); 451 mb();
447 return ce->lrc_desc; 452 return ce->lrc_desc;
448} 453}
449 454
@@ -1443,9 +1448,10 @@ static int execlists_request_alloc(struct i915_request *request)
1443static u32 * 1448static u32 *
1444gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) 1449gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
1445{ 1450{
1451 /* NB no one else is allowed to scribble over scratch + 256! */
1446 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1452 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1447 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1453 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1448 *batch++ = i915_ggtt_offset(engine->scratch) + 256; 1454 *batch++ = i915_scratch_offset(engine->i915) + 256;
1449 *batch++ = 0; 1455 *batch++ = 0;
1450 1456
1451 *batch++ = MI_LOAD_REGISTER_IMM(1); 1457 *batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1459,7 +1465,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
1459 1465
1460 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1466 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1461 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1467 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1462 *batch++ = i915_ggtt_offset(engine->scratch) + 256; 1468 *batch++ = i915_scratch_offset(engine->i915) + 256;
1463 *batch++ = 0; 1469 *batch++ = 0;
1464 1470
1465 return batch; 1471 return batch;
@@ -1496,7 +1502,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1496 PIPE_CONTROL_GLOBAL_GTT_IVB | 1502 PIPE_CONTROL_GLOBAL_GTT_IVB |
1497 PIPE_CONTROL_CS_STALL | 1503 PIPE_CONTROL_CS_STALL |
1498 PIPE_CONTROL_QW_WRITE, 1504 PIPE_CONTROL_QW_WRITE,
1499 i915_ggtt_offset(engine->scratch) + 1505 i915_scratch_offset(engine->i915) +
1500 2 * CACHELINE_BYTES); 1506 2 * CACHELINE_BYTES);
1501 1507
1502 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1508 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1573,7 +1579,7 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1573 PIPE_CONTROL_GLOBAL_GTT_IVB | 1579 PIPE_CONTROL_GLOBAL_GTT_IVB |
1574 PIPE_CONTROL_CS_STALL | 1580 PIPE_CONTROL_CS_STALL |
1575 PIPE_CONTROL_QW_WRITE, 1581 PIPE_CONTROL_QW_WRITE,
1576 i915_ggtt_offset(engine->scratch) 1582 i915_scratch_offset(engine->i915)
1577 + 2 * CACHELINE_BYTES); 1583 + 2 * CACHELINE_BYTES);
1578 } 1584 }
1579 1585
@@ -1793,6 +1799,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
1793 1799
1794static int gen8_init_common_ring(struct intel_engine_cs *engine) 1800static int gen8_init_common_ring(struct intel_engine_cs *engine)
1795{ 1801{
1802 intel_engine_apply_workarounds(engine);
1803
1796 intel_mocs_init_engine(engine); 1804 intel_mocs_init_engine(engine);
1797 1805
1798 intel_engine_reset_breadcrumbs(engine); 1806 intel_engine_reset_breadcrumbs(engine);
@@ -2139,7 +2147,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
2139{ 2147{
2140 struct intel_engine_cs *engine = request->engine; 2148 struct intel_engine_cs *engine = request->engine;
2141 u32 scratch_addr = 2149 u32 scratch_addr =
2142 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; 2150 i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
2143 bool vf_flush_wa = false, dc_flush_wa = false; 2151 bool vf_flush_wa = false, dc_flush_wa = false;
2144 u32 *cs, flags = 0; 2152 u32 *cs, flags = 0;
2145 int len; 2153 int len;
@@ -2476,10 +2484,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
2476 if (ret) 2484 if (ret)
2477 return ret; 2485 return ret;
2478 2486
2479 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2480 if (ret)
2481 goto err_cleanup_common;
2482
2483 ret = intel_init_workaround_bb(engine); 2487 ret = intel_init_workaround_bb(engine);
2484 if (ret) { 2488 if (ret) {
2485 /* 2489 /*
@@ -2491,11 +2495,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
2491 ret); 2495 ret);
2492 } 2496 }
2493 2497
2494 return 0; 2498 intel_engine_init_workarounds(engine);
2495 2499
2496err_cleanup_common: 2500 return 0;
2497 intel_engine_cleanup_common(engine);
2498 return ret;
2499} 2501}
2500 2502
2501int logical_xcs_ring_init(struct intel_engine_cs *engine) 2503int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 187bb0ceb4ac..1f8d2a66c791 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
69static int 69static int
70gen2_render_ring_flush(struct i915_request *rq, u32 mode) 70gen2_render_ring_flush(struct i915_request *rq, u32 mode)
71{ 71{
72 unsigned int num_store_dw;
72 u32 cmd, *cs; 73 u32 cmd, *cs;
73 74
74 cmd = MI_FLUSH; 75 cmd = MI_FLUSH;
75 76 num_store_dw = 0;
76 if (mode & EMIT_INVALIDATE) 77 if (mode & EMIT_INVALIDATE)
77 cmd |= MI_READ_FLUSH; 78 cmd |= MI_READ_FLUSH;
79 if (mode & EMIT_FLUSH)
80 num_store_dw = 4;
78 81
79 cs = intel_ring_begin(rq, 2); 82 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
80 if (IS_ERR(cs)) 83 if (IS_ERR(cs))
81 return PTR_ERR(cs); 84 return PTR_ERR(cs);
82 85
83 *cs++ = cmd; 86 *cs++ = cmd;
84 *cs++ = MI_NOOP; 87 while (num_store_dw--) {
88 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
89 *cs++ = i915_scratch_offset(rq->i915);
90 *cs++ = 0;
91 }
92 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
93
85 intel_ring_advance(rq, cs); 94 intel_ring_advance(rq, cs);
86 95
87 return 0; 96 return 0;
@@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
150 */ 159 */
151 if (mode & EMIT_INVALIDATE) { 160 if (mode & EMIT_INVALIDATE) {
152 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 161 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
153 *cs++ = i915_ggtt_offset(rq->engine->scratch) | 162 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
154 PIPE_CONTROL_GLOBAL_GTT;
155 *cs++ = 0; 163 *cs++ = 0;
156 *cs++ = 0; 164 *cs++ = 0;
157 165
@@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
159 *cs++ = MI_FLUSH; 167 *cs++ = MI_FLUSH;
160 168
161 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 169 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
162 *cs++ = i915_ggtt_offset(rq->engine->scratch) | 170 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
163 PIPE_CONTROL_GLOBAL_GTT;
164 *cs++ = 0; 171 *cs++ = 0;
165 *cs++ = 0; 172 *cs++ = 0;
166 } 173 }
@@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
212static int 219static int
213intel_emit_post_sync_nonzero_flush(struct i915_request *rq) 220intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
214{ 221{
215 u32 scratch_addr = 222 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
216 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
217 u32 *cs; 223 u32 *cs;
218 224
219 cs = intel_ring_begin(rq, 6); 225 cs = intel_ring_begin(rq, 6);
@@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
246static int 252static int
247gen6_render_ring_flush(struct i915_request *rq, u32 mode) 253gen6_render_ring_flush(struct i915_request *rq, u32 mode)
248{ 254{
249 u32 scratch_addr = 255 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
250 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
251 u32 *cs, flags = 0; 256 u32 *cs, flags = 0;
252 int ret; 257 int ret;
253 258
@@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
316static int 321static int
317gen7_render_ring_flush(struct i915_request *rq, u32 mode) 322gen7_render_ring_flush(struct i915_request *rq, u32 mode)
318{ 323{
319 u32 scratch_addr = 324 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
320 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
321 u32 *cs, flags = 0; 325 u32 *cs, flags = 0;
322 326
323 /* 327 /*
@@ -971,7 +975,7 @@ i965_emit_bb_start(struct i915_request *rq,
971} 975}
972 976
973/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 977/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
974#define I830_BATCH_LIMIT (256*1024) 978#define I830_BATCH_LIMIT SZ_256K
975#define I830_TLB_ENTRIES (2) 979#define I830_TLB_ENTRIES (2)
976#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 980#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
977static int 981static int
@@ -979,7 +983,9 @@ i830_emit_bb_start(struct i915_request *rq,
979 u64 offset, u32 len, 983 u64 offset, u32 len,
980 unsigned int dispatch_flags) 984 unsigned int dispatch_flags)
981{ 985{
982 u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch); 986 u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
987
988 GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
983 989
984 cs = intel_ring_begin(rq, 6); 990 cs = intel_ring_begin(rq, 6);
985 if (IS_ERR(cs)) 991 if (IS_ERR(cs))
@@ -1437,7 +1443,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1437{ 1443{
1438 struct i915_timeline *timeline; 1444 struct i915_timeline *timeline;
1439 struct intel_ring *ring; 1445 struct intel_ring *ring;
1440 unsigned int size;
1441 int err; 1446 int err;
1442 1447
1443 intel_engine_setup_common(engine); 1448 intel_engine_setup_common(engine);
@@ -1462,21 +1467,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1462 GEM_BUG_ON(engine->buffer); 1467 GEM_BUG_ON(engine->buffer);
1463 engine->buffer = ring; 1468 engine->buffer = ring;
1464 1469
1465 size = PAGE_SIZE;
1466 if (HAS_BROKEN_CS_TLB(engine->i915))
1467 size = I830_WA_SIZE;
1468 err = intel_engine_create_scratch(engine, size);
1469 if (err)
1470 goto err_unpin;
1471
1472 err = intel_engine_init_common(engine); 1470 err = intel_engine_init_common(engine);
1473 if (err) 1471 if (err)
1474 goto err_scratch; 1472 goto err_unpin;
1475 1473
1476 return 0; 1474 return 0;
1477 1475
1478err_scratch:
1479 intel_engine_cleanup_scratch(engine);
1480err_unpin: 1476err_unpin:
1481 intel_ring_unpin(ring); 1477 intel_ring_unpin(ring);
1482err_ring: 1478err_ring:
@@ -1550,7 +1546,7 @@ static int flush_pd_dir(struct i915_request *rq)
1550 /* Stall until the page table load is complete */ 1546 /* Stall until the page table load is complete */
1551 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1547 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1552 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); 1548 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1553 *cs++ = i915_ggtt_offset(engine->scratch); 1549 *cs++ = i915_scratch_offset(rq->i915);
1554 *cs++ = MI_NOOP; 1550 *cs++ = MI_NOOP;
1555 1551
1556 intel_ring_advance(rq, cs); 1552 intel_ring_advance(rq, cs);
@@ -1659,7 +1655,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
1659 /* Insert a delay before the next switch! */ 1655 /* Insert a delay before the next switch! */
1660 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1656 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1661 *cs++ = i915_mmio_reg_offset(last_reg); 1657 *cs++ = i915_mmio_reg_offset(last_reg);
1662 *cs++ = i915_ggtt_offset(engine->scratch); 1658 *cs++ = i915_scratch_offset(rq->i915);
1663 *cs++ = MI_NOOP; 1659 *cs++ = MI_NOOP;
1664 } 1660 }
1665 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1661 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..767a7192c969 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -15,6 +15,7 @@
15#include "i915_selftest.h" 15#include "i915_selftest.h"
16#include "i915_timeline.h" 16#include "i915_timeline.h"
17#include "intel_gpu_commands.h" 17#include "intel_gpu_commands.h"
18#include "intel_workarounds.h"
18 19
19struct drm_printer; 20struct drm_printer;
20struct i915_sched_attr; 21struct i915_sched_attr;
@@ -440,7 +441,7 @@ struct intel_engine_cs {
440 441
441 struct intel_hw_status_page status_page; 442 struct intel_hw_status_page status_page;
442 struct i915_ctx_workarounds wa_ctx; 443 struct i915_ctx_workarounds wa_ctx;
443 struct i915_vma *scratch; 444 struct i915_wa_list wa_list;
444 445
445 u32 irq_keep_mask; /* always keep these interrupts */ 446 u32 irq_keep_mask; /* always keep these interrupts */
446 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 447 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -898,10 +899,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
898int intel_engine_init_common(struct intel_engine_cs *engine); 899int intel_engine_init_common(struct intel_engine_cs *engine);
899void intel_engine_cleanup_common(struct intel_engine_cs *engine); 900void intel_engine_cleanup_common(struct intel_engine_cs *engine);
900 901
901int intel_engine_create_scratch(struct intel_engine_cs *engine,
902 unsigned int size);
903void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
904
905int intel_init_render_ring_buffer(struct intel_engine_cs *engine); 902int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
906int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); 903int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
907int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); 904int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..6e580891db96 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,6 +48,20 @@
48 * - Public functions to init or apply the given workaround type. 48 * - Public functions to init or apply the given workaround type.
49 */ 49 */
50 50
51static void wa_init_start(struct i915_wa_list *wal, const char *name)
52{
53 wal->name = name;
54}
55
56static void wa_init_finish(struct i915_wa_list *wal)
57{
58 if (!wal->count)
59 return;
60
61 DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
62 wal->count, wal->name);
63}
64
51static void wa_add(struct drm_i915_private *i915, 65static void wa_add(struct drm_i915_private *i915,
52 i915_reg_t reg, const u32 mask, const u32 val) 66 i915_reg_t reg, const u32 mask, const u32 val)
53{ 67{
@@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
580 return 0; 594 return 0;
581} 595}
582 596
583static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv) 597static void
598wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
599{
600 const unsigned int grow = 1 << 4;
601
602 GEM_BUG_ON(!is_power_of_2(grow));
603
604 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
605 struct i915_wa *list;
606
607 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
608 GFP_KERNEL);
609 if (!list) {
610 DRM_ERROR("No space for workaround init!\n");
611 return;
612 }
613
614 if (wal->list)
615 memcpy(list, wal->list, sizeof(*wa) * wal->count);
616
617 wal->list = list;
618 }
619
620 wal->list[wal->count++] = *wa;
621}
622
623static void
624wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
625{
626 struct i915_wa wa = {
627 .reg = reg,
628 .mask = val,
629 .val = _MASKED_BIT_ENABLE(val)
630 };
631
632 wal_add(wal, &wa);
633}
634
635static void
636wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
637 u32 val)
584{ 638{
639 struct i915_wa wa = {
640 .reg = reg,
641 .mask = mask,
642 .val = val
643 };
644
645 wal_add(wal, &wa);
585} 646}
586 647
587static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv) 648static void
649wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
588{ 650{
651 wa_write_masked_or(wal, reg, ~0, val);
589} 652}
590 653
591static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv) 654static void
655wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
592{ 656{
593 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ 657 wa_write_masked_or(wal, reg, val, val);
594 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, 658}
595 _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
596 659
597 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ 660static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
598 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 661{
599 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 662 struct i915_wa_list *wal = &i915->gt_wa_list;
600 663
601 /* WaDisableKillLogic:bxt,skl,kbl */ 664 /* WaDisableKillLogic:bxt,skl,kbl */
602 if (!IS_COFFEELAKE(dev_priv)) 665 if (!IS_COFFEELAKE(i915))
603 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 666 wa_write_or(wal,
604 ECOCHK_DIS_TLB); 667 GAM_ECOCHK,
668 ECOCHK_DIS_TLB);
605 669
606 if (HAS_LLC(dev_priv)) { 670 if (HAS_LLC(i915)) {
607 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl 671 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
608 * 672 *
609 * Must match Display Engine. See 673 * Must match Display Engine. See
610 * WaCompressedResourceDisplayNewHashMode. 674 * WaCompressedResourceDisplayNewHashMode.
611 */ 675 */
612 I915_WRITE(MMCD_MISC_CTRL, 676 wa_write_or(wal,
613 I915_READ(MMCD_MISC_CTRL) | 677 MMCD_MISC_CTRL,
614 MMCD_PCLA | 678 MMCD_PCLA | MMCD_HOTSPOT_EN);
615 MMCD_HOTSPOT_EN);
616 } 679 }
617 680
618 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ 681 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
619 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 682 wa_write_or(wal,
620 BDW_DISABLE_HDC_INVALIDATION); 683 GAM_ECOCHK,
621 684 BDW_DISABLE_HDC_INVALIDATION);
622 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
623 if (IS_GEN9_LP(dev_priv)) {
624 u32 val = I915_READ(GEN8_L3SQCREG1);
625
626 val &= ~L3_PRIO_CREDITS_MASK;
627 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
628 I915_WRITE(GEN8_L3SQCREG1, val);
629 }
630
631 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
632 I915_WRITE(GEN8_L3SQCREG4,
633 I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
634
635 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
636 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
637 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
638} 685}
639 686
640static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 687static void skl_gt_workarounds_init(struct drm_i915_private *i915)
641{ 688{
642 gen9_gt_workarounds_apply(dev_priv); 689 struct i915_wa_list *wal = &i915->gt_wa_list;
643 690
644 /* WaEnableGapsTsvCreditFix:skl */ 691 gen9_gt_workarounds_init(i915);
645 I915_WRITE(GEN8_GARBCNTL,
646 I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
647 692
648 /* WaDisableGafsUnitClkGating:skl */ 693 /* WaDisableGafsUnitClkGating:skl */
649 I915_WRITE(GEN7_UCGCTL4, 694 wa_write_or(wal,
650 I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 695 GEN7_UCGCTL4,
696 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
651 697
652 /* WaInPlaceDecompressionHang:skl */ 698 /* WaInPlaceDecompressionHang:skl */
653 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) 699 if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
654 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 700 wa_write_or(wal,
655 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 701 GEN9_GAMT_ECO_REG_RW_IA,
656 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 702 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
657} 703}
658 704
659static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv) 705static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
660{ 706{
661 gen9_gt_workarounds_apply(dev_priv); 707 struct i915_wa_list *wal = &i915->gt_wa_list;
662 708
663 /* WaDisablePooledEuLoadBalancingFix:bxt */ 709 gen9_gt_workarounds_init(i915);
664 I915_WRITE(FF_SLICE_CS_CHICKEN2,
665 _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
666 710
667 /* WaInPlaceDecompressionHang:bxt */ 711 /* WaInPlaceDecompressionHang:bxt */
668 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 712 wa_write_or(wal,
669 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 713 GEN9_GAMT_ECO_REG_RW_IA,
670 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 714 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
671} 715}
672 716
673static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 717static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
674{ 718{
675 gen9_gt_workarounds_apply(dev_priv); 719 struct i915_wa_list *wal = &i915->gt_wa_list;
676 720
677 /* WaEnableGapsTsvCreditFix:kbl */ 721 gen9_gt_workarounds_init(i915);
678 I915_WRITE(GEN8_GARBCNTL,
679 I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
680 722
681 /* WaDisableDynamicCreditSharing:kbl */ 723 /* WaDisableDynamicCreditSharing:kbl */
682 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 724 if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
683 I915_WRITE(GAMT_CHKN_BIT_REG, 725 wa_write_or(wal,
684 I915_READ(GAMT_CHKN_BIT_REG) | 726 GAMT_CHKN_BIT_REG,
685 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 727 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
686 728
687 /* WaDisableGafsUnitClkGating:kbl */ 729 /* WaDisableGafsUnitClkGating:kbl */
688 I915_WRITE(GEN7_UCGCTL4, 730 wa_write_or(wal,
689 I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 731 GEN7_UCGCTL4,
732 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
690 733
691 /* WaInPlaceDecompressionHang:kbl */ 734 /* WaInPlaceDecompressionHang:kbl */
692 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 735 wa_write_or(wal,
693 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 736 GEN9_GAMT_ECO_REG_RW_IA,
694 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 737 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
695
696 /* WaKBLVECSSemaphoreWaitPoll:kbl */
697 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
698 struct intel_engine_cs *engine;
699 unsigned int tmp;
700
701 for_each_engine(engine, dev_priv, tmp) {
702 if (engine->id == RCS)
703 continue;
704
705 I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
706 }
707 }
708} 738}
709 739
710static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv) 740static void glk_gt_workarounds_init(struct drm_i915_private *i915)
711{ 741{
712 gen9_gt_workarounds_apply(dev_priv); 742 gen9_gt_workarounds_init(i915);
713} 743}
714 744
715static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 745static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
716{ 746{
717 gen9_gt_workarounds_apply(dev_priv); 747 struct i915_wa_list *wal = &i915->gt_wa_list;
718 748
719 /* WaEnableGapsTsvCreditFix:cfl */ 749 gen9_gt_workarounds_init(i915);
720 I915_WRITE(GEN8_GARBCNTL,
721 I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
722 750
723 /* WaDisableGafsUnitClkGating:cfl */ 751 /* WaDisableGafsUnitClkGating:cfl */
724 I915_WRITE(GEN7_UCGCTL4, 752 wa_write_or(wal,
725 I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 753 GEN7_UCGCTL4,
754 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
726 755
727 /* WaInPlaceDecompressionHang:cfl */ 756 /* WaInPlaceDecompressionHang:cfl */
728 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 757 wa_write_or(wal,
729 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 758 GEN9_GAMT_ECO_REG_RW_IA,
730 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 759 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
731} 760}
732 761
733static void wa_init_mcr(struct drm_i915_private *dev_priv) 762static void wa_init_mcr(struct drm_i915_private *dev_priv)
734{ 763{
735 const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); 764 const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
736 u32 mcr; 765 struct i915_wa_list *wal = &dev_priv->gt_wa_list;
737 u32 mcr_slice_subslice_mask; 766 u32 mcr_slice_subslice_mask;
738 767
739 /* 768 /*
@@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
770 WARN_ON((enabled_mask & disabled_mask) != enabled_mask); 799 WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
771 } 800 }
772 801
773 mcr = I915_READ(GEN8_MCR_SELECTOR);
774
775 if (INTEL_GEN(dev_priv) >= 11) 802 if (INTEL_GEN(dev_priv) >= 11)
776 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | 803 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
777 GEN11_MCR_SUBSLICE_MASK; 804 GEN11_MCR_SUBSLICE_MASK;
@@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
789 * occasions, such as INSTDONE, where this value is dependent 816 * occasions, such as INSTDONE, where this value is dependent
790 * on s/ss combo, the read should be done with read_subslice_reg. 817 * on s/ss combo, the read should be done with read_subslice_reg.
791 */ 818 */
792 mcr &= ~mcr_slice_subslice_mask; 819 wa_write_masked_or(wal,
793 mcr |= intel_calculate_mcr_s_ss_select(dev_priv); 820 GEN8_MCR_SELECTOR,
794 I915_WRITE(GEN8_MCR_SELECTOR, mcr); 821 mcr_slice_subslice_mask,
822 intel_calculate_mcr_s_ss_select(dev_priv));
795} 823}
796 824
797static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 825static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
798{ 826{
799 wa_init_mcr(dev_priv); 827 struct i915_wa_list *wal = &i915->gt_wa_list;
828
829 wa_init_mcr(i915);
800 830
801 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ 831 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
802 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) 832 if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
803 I915_WRITE(GAMT_CHKN_BIT_REG, 833 wa_write_or(wal,
804 I915_READ(GAMT_CHKN_BIT_REG) | 834 GAMT_CHKN_BIT_REG,
805 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); 835 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
806 836
807 /* WaInPlaceDecompressionHang:cnl */ 837 /* WaInPlaceDecompressionHang:cnl */
808 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 838 wa_write_or(wal,
809 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 839 GEN9_GAMT_ECO_REG_RW_IA,
810 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 840 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
811
812 /* WaEnablePreemptionGranularityControlByUMD:cnl */
813 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
814 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
815} 841}
816 842
817static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 843static void icl_gt_workarounds_init(struct drm_i915_private *i915)
818{ 844{
819 wa_init_mcr(dev_priv); 845 struct i915_wa_list *wal = &i915->gt_wa_list;
820 846
821 /* This is not an Wa. Enable for better image quality */ 847 wa_init_mcr(i915);
822 I915_WRITE(_3D_CHICKEN3,
823 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
824 848
825 /* WaInPlaceDecompressionHang:icl */ 849 /* WaInPlaceDecompressionHang:icl */
826 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 850 wa_write_or(wal,
827 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 851 GEN9_GAMT_ECO_REG_RW_IA,
828 852 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
829 /* WaPipelineFlushCoherentLines:icl */
830 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
831 GEN8_LQSC_FLUSH_COHERENT_LINES);
832
833 /* Wa_1405543622:icl
834 * Formerly known as WaGAPZPriorityScheme
835 */
836 I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
837 GEN11_ARBITRATION_PRIO_ORDER_MASK);
838
839 /* Wa_1604223664:icl
840 * Formerly known as WaL3BankAddressHashing
841 */
842 I915_WRITE(GEN8_GARBCNTL,
843 (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
844 GEN11_HASH_CTRL_EXCL_BIT0);
845 I915_WRITE(GEN11_GLBLINVL,
846 (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
847 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
848 853
849 /* WaModifyGamTlbPartitioning:icl */ 854 /* WaModifyGamTlbPartitioning:icl */
850 I915_WRITE(GEN11_GACB_PERF_CTRL, 855 wa_write_masked_or(wal,
851 (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) | 856 GEN11_GACB_PERF_CTRL,
852 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); 857 GEN11_HASH_CTRL_MASK,
853 858 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
854 /* Wa_1405733216:icl
855 * Formerly known as WaDisableCleanEvicts
856 */
857 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
858 GEN11_LQSC_CLEAN_EVICT_DISABLE);
859 859
860 /* Wa_1405766107:icl 860 /* Wa_1405766107:icl
861 * Formerly known as WaCL2SFHalfMaxAlloc 861 * Formerly known as WaCL2SFHalfMaxAlloc
862 */ 862 */
863 I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) | 863 wa_write_or(wal,
864 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | 864 GEN11_LSN_UNSLCVC,
865 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); 865 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
866 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
866 867
867 /* Wa_220166154:icl 868 /* Wa_220166154:icl
868 * Formerly known as WaDisCtxReload 869 * Formerly known as WaDisCtxReload
869 */ 870 */
870 I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) | 871 wa_write_or(wal,
871 GAMW_ECO_DEV_CTX_RELOAD_DISABLE); 872 GEN8_GAMW_ECO_DEV_RW_IA,
873 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
872 874
873 /* Wa_1405779004:icl (pre-prod) */ 875 /* Wa_1405779004:icl (pre-prod) */
874 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) 876 if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
875 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, 877 wa_write_or(wal,
876 I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | 878 SLICE_UNIT_LEVEL_CLKGATE,
877 MSCUNIT_CLKGATE_DIS); 879 MSCUNIT_CLKGATE_DIS);
878 880
879 /* Wa_1406680159:icl */ 881 /* Wa_1406680159:icl */
880 I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, 882 wa_write_or(wal,
881 I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) | 883 SUBSLICE_UNIT_LEVEL_CLKGATE,
882 GWUNIT_CLKGATE_DIS); 884 GWUNIT_CLKGATE_DIS);
883
884 /* Wa_1604302699:icl */
885 I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
886 I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
887 GEN11_I2M_WRITE_DISABLE);
888 885
889 /* Wa_1406838659:icl (pre-prod) */ 886 /* Wa_1406838659:icl (pre-prod) */
890 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) 887 if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
891 I915_WRITE(INF_UNIT_LEVEL_CLKGATE, 888 wa_write_or(wal,
892 I915_READ(INF_UNIT_LEVEL_CLKGATE) | 889 INF_UNIT_LEVEL_CLKGATE,
893 CGPSF_CLKGATE_DIS); 890 CGPSF_CLKGATE_DIS);
894
895 /* WaForwardProgressSoftReset:icl */
896 I915_WRITE(GEN10_SCRATCH_LNCF2,
897 I915_READ(GEN10_SCRATCH_LNCF2) |
898 PMFLUSHDONE_LNICRSDROP |
899 PMFLUSH_GAPL3UNBLOCK |
900 PMFLUSHDONE_LNEBLK);
901 891
902 /* Wa_1406463099:icl 892 /* Wa_1406463099:icl
903 * Formerly known as WaGamTlbPendError 893 * Formerly known as WaGamTlbPendError
904 */ 894 */
905 I915_WRITE(GAMT_CHKN_BIT_REG, 895 wa_write_or(wal,
906 I915_READ(GAMT_CHKN_BIT_REG) | 896 GAMT_CHKN_BIT_REG,
907 GAMT_CHKN_DISABLE_L3_COH_PIPE); 897 GAMT_CHKN_DISABLE_L3_COH_PIPE);
908} 898}
909 899
910void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) 900void intel_gt_init_workarounds(struct drm_i915_private *i915)
911{ 901{
912 if (INTEL_GEN(dev_priv) < 8) 902 struct i915_wa_list *wal = &i915->gt_wa_list;
903
904 wa_init_start(wal, "GT");
905
906 if (INTEL_GEN(i915) < 8)
913 return; 907 return;
914 else if (IS_BROADWELL(dev_priv)) 908 else if (IS_BROADWELL(i915))
915 bdw_gt_workarounds_apply(dev_priv); 909 return;
916 else if (IS_CHERRYVIEW(dev_priv)) 910 else if (IS_CHERRYVIEW(i915))
917 chv_gt_workarounds_apply(dev_priv); 911 return;
918 else if (IS_SKYLAKE(dev_priv)) 912 else if (IS_SKYLAKE(i915))
919 skl_gt_workarounds_apply(dev_priv); 913 skl_gt_workarounds_init(i915);
920 else if (IS_BROXTON(dev_priv)) 914 else if (IS_BROXTON(i915))
921 bxt_gt_workarounds_apply(dev_priv); 915 bxt_gt_workarounds_init(i915);
922 else if (IS_KABYLAKE(dev_priv)) 916 else if (IS_KABYLAKE(i915))
923 kbl_gt_workarounds_apply(dev_priv); 917 kbl_gt_workarounds_init(i915);
924 else if (IS_GEMINILAKE(dev_priv)) 918 else if (IS_GEMINILAKE(i915))
925 glk_gt_workarounds_apply(dev_priv); 919 glk_gt_workarounds_init(i915);
926 else if (IS_COFFEELAKE(dev_priv)) 920 else if (IS_COFFEELAKE(i915))
927 cfl_gt_workarounds_apply(dev_priv); 921 cfl_gt_workarounds_init(i915);
928 else if (IS_CANNONLAKE(dev_priv)) 922 else if (IS_CANNONLAKE(i915))
929 cnl_gt_workarounds_apply(dev_priv); 923 cnl_gt_workarounds_init(i915);
930 else if (IS_ICELAKE(dev_priv)) 924 else if (IS_ICELAKE(i915))
931 icl_gt_workarounds_apply(dev_priv); 925 icl_gt_workarounds_init(i915);
932 else 926 else
933 MISSING_CASE(INTEL_GEN(dev_priv)); 927 MISSING_CASE(INTEL_GEN(i915));
928
929 wa_init_finish(wal);
930}
931
932static enum forcewake_domains
933wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
934 const struct i915_wa_list *wal)
935{
936 enum forcewake_domains fw = 0;
937 struct i915_wa *wa;
938 unsigned int i;
939
940 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
941 fw |= intel_uncore_forcewake_for_reg(dev_priv,
942 wa->reg,
943 FW_REG_READ |
944 FW_REG_WRITE);
945
946 return fw;
947}
948
949static void
950wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
951{
952 enum forcewake_domains fw;
953 unsigned long flags;
954 struct i915_wa *wa;
955 unsigned int i;
956
957 if (!wal->count)
958 return;
959
960 fw = wal_get_fw_for_rmw(dev_priv, wal);
961
962 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
963 intel_uncore_forcewake_get__locked(dev_priv, fw);
964
965 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
966 u32 val = I915_READ_FW(wa->reg);
967
968 val &= ~wa->mask;
969 val |= wa->val;
970
971 I915_WRITE_FW(wa->reg, val);
972 }
973
974 intel_uncore_forcewake_put__locked(dev_priv, fw);
975 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
976
977 DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
978}
979
980void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
981{
982 wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
934} 983}
935 984
936struct whitelist { 985struct whitelist {
@@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
1077 whitelist_apply(engine, whitelist_build(engine, &w)); 1126 whitelist_apply(engine, whitelist_build(engine, &w));
1078} 1127}
1079 1128
1129static void rcs_engine_wa_init(struct intel_engine_cs *engine)
1130{
1131 struct drm_i915_private *i915 = engine->i915;
1132 struct i915_wa_list *wal = &engine->wa_list;
1133
1134 if (IS_ICELAKE(i915)) {
1135 /* This is not an Wa. Enable for better image quality */
1136 wa_masked_en(wal,
1137 _3D_CHICKEN3,
1138 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
1139
1140 /* WaPipelineFlushCoherentLines:icl */
1141 wa_write_or(wal,
1142 GEN8_L3SQCREG4,
1143 GEN8_LQSC_FLUSH_COHERENT_LINES);
1144
1145 /*
1146 * Wa_1405543622:icl
1147 * Formerly known as WaGAPZPriorityScheme
1148 */
1149 wa_write_or(wal,
1150 GEN8_GARBCNTL,
1151 GEN11_ARBITRATION_PRIO_ORDER_MASK);
1152
1153 /*
1154 * Wa_1604223664:icl
1155 * Formerly known as WaL3BankAddressHashing
1156 */
1157 wa_write_masked_or(wal,
1158 GEN8_GARBCNTL,
1159 GEN11_HASH_CTRL_EXCL_MASK,
1160 GEN11_HASH_CTRL_EXCL_BIT0);
1161 wa_write_masked_or(wal,
1162 GEN11_GLBLINVL,
1163 GEN11_BANK_HASH_ADDR_EXCL_MASK,
1164 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
1165
1166 /*
1167 * Wa_1405733216:icl
1168 * Formerly known as WaDisableCleanEvicts
1169 */
1170 wa_write_or(wal,
1171 GEN8_L3SQCREG4,
1172 GEN11_LQSC_CLEAN_EVICT_DISABLE);
1173
1174 /* Wa_1604302699:icl */
1175 wa_write_or(wal,
1176 GEN10_L3_CHICKEN_MODE_REGISTER,
1177 GEN11_I2M_WRITE_DISABLE);
1178
1179 /* WaForwardProgressSoftReset:icl */
1180 wa_write_or(wal,
1181 GEN10_SCRATCH_LNCF2,
1182 PMFLUSHDONE_LNICRSDROP |
1183 PMFLUSH_GAPL3UNBLOCK |
1184 PMFLUSHDONE_LNEBLK);
1185 }
1186
1187 if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
1188 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
1189 wa_masked_en(wal,
1190 GEN7_FF_SLICE_CS_CHICKEN1,
1191 GEN9_FFSC_PERCTX_PREEMPT_CTRL);
1192 }
1193
1194 if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
1195 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
1196 wa_write_or(wal,
1197 GEN8_GARBCNTL,
1198 GEN9_GAPS_TSV_CREDIT_DISABLE);
1199 }
1200
1201 if (IS_BROXTON(i915)) {
1202 /* WaDisablePooledEuLoadBalancingFix:bxt */
1203 wa_masked_en(wal,
1204 FF_SLICE_CS_CHICKEN2,
1205 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1206 }
1207
1208 if (IS_GEN9(i915)) {
1209 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
1210 wa_masked_en(wal,
1211 GEN9_CSFE_CHICKEN1_RCS,
1212 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
1213
1214 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
1215 wa_write_or(wal,
1216 BDW_SCRATCH1,
1217 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
1218
1219 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
1220 if (IS_GEN9_LP(i915))
1221 wa_write_masked_or(wal,
1222 GEN8_L3SQCREG1,
1223 L3_PRIO_CREDITS_MASK,
1224 L3_GENERAL_PRIO_CREDITS(62) |
1225 L3_HIGH_PRIO_CREDITS(2));
1226
1227 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
1228 wa_write_or(wal,
1229 GEN8_L3SQCREG4,
1230 GEN8_LQSC_FLUSH_COHERENT_LINES);
1231 }
1232}
1233
1234static void xcs_engine_wa_init(struct intel_engine_cs *engine)
1235{
1236 struct drm_i915_private *i915 = engine->i915;
1237 struct i915_wa_list *wal = &engine->wa_list;
1238
1239 /* WaKBLVECSSemaphoreWaitPoll:kbl */
1240 if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
1241 wa_write(wal,
1242 RING_SEMA_WAIT_POLL(engine->mmio_base),
1243 1);
1244 }
1245}
1246
1247void intel_engine_init_workarounds(struct intel_engine_cs *engine)
1248{
1249 struct i915_wa_list *wal = &engine->wa_list;
1250
1251 if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
1252 return;
1253
1254 wa_init_start(wal, engine->name);
1255
1256 if (engine->id == RCS)
1257 rcs_engine_wa_init(engine);
1258 else
1259 xcs_engine_wa_init(engine);
1260
1261 wa_init_finish(wal);
1262}
1263
1264void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
1265{
1266 wa_list_apply(engine->i915, &engine->wa_list);
1267}
1268
1080#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1269#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1081#include "selftests/intel_workarounds.c" 1270#include "selftests/intel_workarounds.c"
1082#endif 1271#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..979695a53964 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,35 @@
7#ifndef _I915_WORKAROUNDS_H_ 7#ifndef _I915_WORKAROUNDS_H_
8#define _I915_WORKAROUNDS_H_ 8#define _I915_WORKAROUNDS_H_
9 9
10#include <linux/slab.h>
11
12struct i915_wa {
13 i915_reg_t reg;
14 u32 mask;
15 u32 val;
16};
17
18struct i915_wa_list {
19 const char *name;
20 struct i915_wa *list;
21 unsigned int count;
22};
23
24static inline void intel_wa_list_free(struct i915_wa_list *wal)
25{
26 kfree(wal->list);
27 memset(wal, 0, sizeof(*wal));
28}
29
10int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv); 30int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
11int intel_ctx_workarounds_emit(struct i915_request *rq); 31int intel_ctx_workarounds_emit(struct i915_request *rq);
12 32
13void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv); 33void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
34void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
14 35
15void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine); 36void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
16 37
38void intel_engine_init_workarounds(struct intel_engine_cs *engine);
39void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
40
17#endif 41#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 66df1b177959..27b507eb4a99 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
818 dsi->encoder.possible_crtcs = 1; 818 dsi->encoder.possible_crtcs = 1;
819 819
820 /* If there's a bridge, attach to it and let it create the connector */ 820 /* If there's a bridge, attach to it and let it create the connector */
821 ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL); 821 if (dsi->bridge) {
822 if (ret) { 822 ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
823 DRM_ERROR("Failed to attach bridge to drm\n"); 823 if (ret) {
824 824 DRM_ERROR("Failed to attach bridge to drm\n");
825 goto err_encoder_cleanup;
826 }
827 } else {
825 /* Otherwise create our own connector and attach to a panel */ 828 /* Otherwise create our own connector and attach to a panel */
826 ret = mtk_dsi_create_connector(drm, dsi); 829 ret = mtk_dsi_create_connector(drm, dsi);
827 if (ret) 830 if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6cbbae3f438b..db1bf7f88c1f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
198/****************************************************************************** 198/******************************************************************************
199 * EVO channel helpers 199 * EVO channel helpers
200 *****************************************************************************/ 200 *****************************************************************************/
201static void
202evo_flush(struct nv50_dmac *dmac)
203{
204 /* Push buffer fetches are not coherent with BAR1, we need to ensure
205 * writes have been flushed right through to VRAM before writing PUT.
206 */
207 if (dmac->push.type & NVIF_MEM_VRAM) {
208 struct nvif_device *device = dmac->base.device;
209 nvif_wr32(&device->object, 0x070000, 0x00000001);
210 nvif_msec(device, 2000,
211 if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
212 break;
213 );
214 }
215}
216
201u32 * 217u32 *
202evo_wait(struct nv50_dmac *evoc, int nr) 218evo_wait(struct nv50_dmac *evoc, int nr)
203{ 219{
@@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
208 mutex_lock(&dmac->lock); 224 mutex_lock(&dmac->lock);
209 if (put + nr >= (PAGE_SIZE / 4) - 8) { 225 if (put + nr >= (PAGE_SIZE / 4) - 8) {
210 dmac->ptr[put] = 0x20000000; 226 dmac->ptr[put] = 0x20000000;
227 evo_flush(dmac);
211 228
212 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000); 229 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
213 if (nvif_msec(device, 2000, 230 if (nvif_msec(device, 2000,
@@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
230{ 247{
231 struct nv50_dmac *dmac = evoc; 248 struct nv50_dmac *dmac = evoc;
232 249
233 /* Push buffer fetches are not coherent with BAR1, we need to ensure 250 evo_flush(dmac);
234 * writes have been flushed right through to VRAM before writing PUT.
235 */
236 if (dmac->push.type & NVIF_MEM_VRAM) {
237 struct nvif_device *device = dmac->base.device;
238 nvif_wr32(&device->object, 0x070000, 0x00000001);
239 nvif_msec(device, 2000,
240 if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
241 break;
242 );
243 }
244 251
245 nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); 252 nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
246 mutex_unlock(&dmac->lock); 253 mutex_unlock(&dmac->lock);
@@ -1264,6 +1271,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
1264{ 1271{
1265 struct nv50_mstm *mstm = *pmstm; 1272 struct nv50_mstm *mstm = *pmstm;
1266 if (mstm) { 1273 if (mstm) {
1274 drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
1267 kfree(*pmstm); 1275 kfree(*pmstm);
1268 *pmstm = NULL; 1276 *pmstm = NULL;
1269 } 1277 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2b2baf6e0e0d..d2928d43f29a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1171,10 +1171,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
1171 goto err_free; 1171 goto err_free;
1172 } 1172 }
1173 1173
1174 err = nouveau_drm_device_init(drm);
1175 if (err)
1176 goto err_put;
1177
1174 platform_set_drvdata(pdev, drm); 1178 platform_set_drvdata(pdev, drm);
1175 1179
1176 return drm; 1180 return drm;
1177 1181
1182err_put:
1183 drm_dev_put(drm);
1178err_free: 1184err_free:
1179 nvkm_device_del(pdevice); 1185 nvkm_device_del(pdevice);
1180 1186
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 941f35233b1f..5864cb452c5c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
448 return 0; 448 return 0;
449} 449}
450 450
451static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
452{
453 rockchip_drm_platform_remove(pdev);
454}
455
456static const struct of_device_id rockchip_drm_dt_ids[] = { 451static const struct of_device_id rockchip_drm_dt_ids[] = {
457 { .compatible = "rockchip,display-subsystem", }, 452 { .compatible = "rockchip,display-subsystem", },
458 { /* sentinel */ }, 453 { /* sentinel */ },
@@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
462static struct platform_driver rockchip_drm_platform_driver = { 457static struct platform_driver rockchip_drm_platform_driver = {
463 .probe = rockchip_drm_platform_probe, 458 .probe = rockchip_drm_platform_probe,
464 .remove = rockchip_drm_platform_remove, 459 .remove = rockchip_drm_platform_remove,
465 .shutdown = rockchip_drm_platform_shutdown,
466 .driver = { 460 .driver = {
467 .name = "rockchip-drm", 461 .name = "rockchip-drm",
468 .of_match_table = rockchip_drm_dt_ids, 462 .of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 61a84b958d67..d7a2dfb8ee9b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -49,6 +49,8 @@
49 49
50#define VMWGFX_REPO "In Tree" 50#define VMWGFX_REPO "In Tree"
51 51
52#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
53
52 54
53/** 55/**
54 * Fully encoded drm commands. Might move to vmw_drm.h 56 * Fully encoded drm commands. Might move to vmw_drm.h
@@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
918 spin_unlock(&dev_priv->cap_lock); 920 spin_unlock(&dev_priv->cap_lock);
919 } 921 }
920 922
921 923 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
922 ret = vmw_kms_init(dev_priv); 924 ret = vmw_kms_init(dev_priv);
923 if (unlikely(ret != 0)) 925 if (unlikely(ret != 0))
924 goto out_no_kms; 926 goto out_no_kms;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 59f614225bcd..aca974b14b55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -606,6 +606,9 @@ struct vmw_private {
606 606
607 struct vmw_cmdbuf_man *cman; 607 struct vmw_cmdbuf_man *cman;
608 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); 608 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
609
610 /* Validation memory reservation */
611 struct vmw_validation_mem vvm;
609}; 612};
610 613
611static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 614static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
846extern void vmw_ttm_global_release(struct vmw_private *dev_priv); 849extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
847extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); 850extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
848 851
852extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
853 size_t gran);
849/** 854/**
850 * TTM buffer object driver - vmwgfx_ttm_buffer.c 855 * TTM buffer object driver - vmwgfx_ttm_buffer.c
851 */ 856 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5a6b70ba137a..f2d13a72c05d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1738 void *buf) 1738 void *buf)
1739{ 1739{
1740 struct vmw_buffer_object *vmw_bo; 1740 struct vmw_buffer_object *vmw_bo;
1741 int ret;
1742 1741
1743 struct { 1742 struct {
1744 uint32_t header; 1743 uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1748 return vmw_translate_guest_ptr(dev_priv, sw_context, 1747 return vmw_translate_guest_ptr(dev_priv, sw_context,
1749 &cmd->body.ptr, 1748 &cmd->body.ptr,
1750 &vmw_bo); 1749 &vmw_bo);
1751 return ret;
1752} 1750}
1753 1751
1754 1752
@@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
3837 struct sync_file *sync_file = NULL; 3835 struct sync_file *sync_file = NULL;
3838 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); 3836 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3839 3837
3838 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3839
3840 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 3840 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3841 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 3841 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3842 if (out_fence_fd < 0) { 3842 if (out_fence_fd < 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 7b1e5a5cbd2c..f88247046721 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
96 drm_global_item_unref(&dev_priv->bo_global_ref.ref); 96 drm_global_item_unref(&dev_priv->bo_global_ref.ref);
97 drm_global_item_unref(&dev_priv->mem_global_ref); 97 drm_global_item_unref(&dev_priv->mem_global_ref);
98} 98}
99
100/* struct vmw_validation_mem callback */
101static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
102{
103 static struct ttm_operation_ctx ctx = {.interruptible = false,
104 .no_wait_gpu = false};
105 struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
106
107 return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
108}
109
110/* struct vmw_validation_mem callback */
111static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
112{
113 struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
114
115 return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
116}
117
118/**
119 * vmw_validation_mem_init_ttm - Interface the validation memory tracker
120 * to ttm.
121 * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
122 * rather than a struct vmw_validation_mem is to make sure assumption in the
123 * callbacks that struct vmw_private derives from struct vmw_validation_mem
124 * holds true.
125 * @gran: The recommended allocation granularity
126 */
127void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
128{
129 struct vmw_validation_mem *vvm = &dev_priv->vvm;
130
131 vvm->reserve_mem = vmw_vmt_reserve;
132 vvm->unreserve_mem = vmw_vmt_unreserve;
133 vvm->gran = gran;
134}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 184025fa938e..f116f092e00b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
104 return NULL; 104 return NULL;
105 105
106 if (ctx->mem_size_left < size) { 106 if (ctx->mem_size_left < size) {
107 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 107 struct page *page;
108 108
109 if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
110 int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
111
112 if (ret)
113 return NULL;
114
115 ctx->vm_size_left += ctx->vm->gran;
116 ctx->total_mem += ctx->vm->gran;
117 }
118
119 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
109 if (!page) 120 if (!page)
110 return NULL; 121 return NULL;
111 122
123 if (ctx->vm)
124 ctx->vm_size_left -= PAGE_SIZE;
125
112 list_add_tail(&page->lru, &ctx->page_list); 126 list_add_tail(&page->lru, &ctx->page_list);
113 ctx->page_address = page_address(page); 127 ctx->page_address = page_address(page);
114 ctx->mem_size_left = PAGE_SIZE; 128 ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
138 } 152 }
139 153
140 ctx->mem_size_left = 0; 154 ctx->mem_size_left = 0;
155 if (ctx->vm && ctx->total_mem) {
156 ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
157 ctx->total_mem = 0;
158 ctx->vm_size_left = 0;
159 }
141} 160}
142 161
143/** 162/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index b57e3292c386..3b396fea40d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -34,6 +34,21 @@
34#include <drm/ttm/ttm_execbuf_util.h> 34#include <drm/ttm/ttm_execbuf_util.h>
35 35
36/** 36/**
37 * struct vmw_validation_mem - Custom interface to provide memory reservations
38 * for the validation code.
39 * @reserve_mem: Callback to reserve memory
40 * @unreserve_mem: Callback to unreserve memory
41 * @gran: Reservation granularity. Contains a hint how much memory should
42 * be reserved in each call to @reserve_mem(). A slow implementation may want
43 * reservation to be done in large batches.
44 */
45struct vmw_validation_mem {
46 int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
47 void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
48 size_t gran;
49};
50
51/**
37 * struct vmw_validation_context - Per command submission validation context 52 * struct vmw_validation_context - Per command submission validation context
38 * @ht: Hash table used to find resource- or buffer object duplicates 53 * @ht: Hash table used to find resource- or buffer object duplicates
39 * @resource_list: List head for resource validation metadata 54 * @resource_list: List head for resource validation metadata
@@ -47,6 +62,10 @@
47 * buffer objects 62 * buffer objects
48 * @mem_size_left: Free memory left in the last page in @page_list 63 * @mem_size_left: Free memory left in the last page in @page_list
49 * @page_address: Kernel virtual address of the last page in @page_list 64 * @page_address: Kernel virtual address of the last page in @page_list
65 * @vm: A pointer to the memory reservation interface or NULL if no
66 * memory reservation is needed.
67 * @vm_size_left: Amount of reserved memory that so far has not been allocated.
68 * @total_mem: Amount of reserved memory.
50 */ 69 */
51struct vmw_validation_context { 70struct vmw_validation_context {
52 struct drm_open_hash *ht; 71 struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
59 unsigned int merge_dups; 78 unsigned int merge_dups;
60 unsigned int mem_size_left; 79 unsigned int mem_size_left;
61 u8 *page_address; 80 u8 *page_address;
81 struct vmw_validation_mem *vm;
82 size_t vm_size_left;
83 size_t total_mem;
62}; 84};
63 85
64struct vmw_buffer_object; 86struct vmw_buffer_object;
@@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
102} 124}
103 125
104/** 126/**
127 * vmw_validation_set_val_mem - Register a validation mem object for
128 * validation memory reservation
129 * @ctx: The validation context
130 * @vm: Pointer to a struct vmw_validation_mem
131 *
132 * Must be set before the first attempt to allocate validation memory.
133 */
134static inline void
135vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
136 struct vmw_validation_mem *vm)
137{
138 ctx->vm = vm;
139}
140
141/**
105 * vmw_validation_set_ht - Register a hash table for duplicate finding 142 * vmw_validation_set_ht - Register a hash table for duplicate finding
106 * @ctx: The validation context 143 * @ctx: The validation context
107 * @ht: Pointer to a hash table to use for duplicate finding 144 * @ht: Pointer to a hash table to use for duplicate finding
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ed35c9a9a110..27519eb8ee63 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -17,6 +17,9 @@
17#ifndef HID_IDS_H_FILE 17#ifndef HID_IDS_H_FILE
18#define HID_IDS_H_FILE 18#define HID_IDS_H_FILE
19 19
20#define USB_VENDOR_ID_258A 0x258a
21#define USB_DEVICE_ID_258A_6A88 0x6a88
22
20#define USB_VENDOR_ID_3M 0x0596 23#define USB_VENDOR_ID_3M 0x0596
21#define USB_DEVICE_ID_3M1968 0x0500 24#define USB_DEVICE_ID_3M1968 0x0500
22#define USB_DEVICE_ID_3M2256 0x0502 25#define USB_DEVICE_ID_3M2256 0x0502
@@ -941,6 +944,10 @@
941#define USB_VENDOR_ID_REALTEK 0x0bda 944#define USB_VENDOR_ID_REALTEK 0x0bda
942#define USB_DEVICE_ID_REALTEK_READER 0x0152 945#define USB_DEVICE_ID_REALTEK_READER 0x0152
943 946
947#define USB_VENDOR_ID_RETROUSB 0xf000
948#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003
949#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1
950
944#define USB_VENDOR_ID_ROCCAT 0x1e7d 951#define USB_VENDOR_ID_ROCCAT 0x1e7d
945#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 952#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
946#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c 953#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 1882a4ab0f29..98b059d79bc8 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
42 42
43static const struct hid_device_id ite_devices[] = { 43static const struct hid_device_id ite_devices[] = {
44 { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, 44 { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
45 { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
45 { } 46 { }
46}; 47};
47MODULE_DEVICE_TABLE(hid, ite_devices); 48MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index c85a79986b6a..94088c0ed68a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -137,6 +137,8 @@ static const struct hid_device_id hid_quirks[] = {
137 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET }, 137 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
138 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET }, 138 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
139 { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS }, 139 { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
140 { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
141 { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
140 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, 142 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
141 { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, 143 { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
142 { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, 144 { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 97954f575c3f..1c1a2514d6f3 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -4,7 +4,7 @@ menu "Microsoft Hyper-V guest support"
4 4
5config HYPERV 5config HYPERV
6 tristate "Microsoft Hyper-V client drivers" 6 tristate "Microsoft Hyper-V client drivers"
7 depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST 7 depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
8 select PARAVIRT 8 select PARAVIRT
9 help 9 help
10 Select this option to run Linux as a Hyper-V client operating 10 Select this option to run Linux as a Hyper-V client operating
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 283d184280af..d0ff65675292 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -316,6 +316,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
316 316
317 if (!hv_dev->channel) 317 if (!hv_dev->channel)
318 return -ENODEV; 318 return -ENODEV;
319 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
320 return -EINVAL;
319 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 321 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
320 return sprintf(buf, "%d\n", outbound.current_interrupt_mask); 322 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
321} 323}
@@ -329,6 +331,8 @@ static ssize_t out_read_index_show(struct device *dev,
329 331
330 if (!hv_dev->channel) 332 if (!hv_dev->channel)
331 return -ENODEV; 333 return -ENODEV;
334 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
335 return -EINVAL;
332 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 336 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
333 return sprintf(buf, "%d\n", outbound.current_read_index); 337 return sprintf(buf, "%d\n", outbound.current_read_index);
334} 338}
@@ -343,6 +347,8 @@ static ssize_t out_write_index_show(struct device *dev,
343 347
344 if (!hv_dev->channel) 348 if (!hv_dev->channel)
345 return -ENODEV; 349 return -ENODEV;
350 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
351 return -EINVAL;
346 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 352 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
347 return sprintf(buf, "%d\n", outbound.current_write_index); 353 return sprintf(buf, "%d\n", outbound.current_write_index);
348} 354}
@@ -357,6 +363,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
357 363
358 if (!hv_dev->channel) 364 if (!hv_dev->channel)
359 return -ENODEV; 365 return -ENODEV;
366 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
367 return -EINVAL;
360 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 368 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
361 return sprintf(buf, "%d\n", outbound.bytes_avail_toread); 369 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
362} 370}
@@ -371,6 +379,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
371 379
372 if (!hv_dev->channel) 380 if (!hv_dev->channel)
373 return -ENODEV; 381 return -ENODEV;
382 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
383 return -EINVAL;
374 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 384 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
375 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); 385 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
376} 386}
@@ -384,6 +394,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
384 394
385 if (!hv_dev->channel) 395 if (!hv_dev->channel)
386 return -ENODEV; 396 return -ENODEV;
397 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
398 return -EINVAL;
387 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 399 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
388 return sprintf(buf, "%d\n", inbound.current_interrupt_mask); 400 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
389} 401}
@@ -397,6 +409,8 @@ static ssize_t in_read_index_show(struct device *dev,
397 409
398 if (!hv_dev->channel) 410 if (!hv_dev->channel)
399 return -ENODEV; 411 return -ENODEV;
412 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
413 return -EINVAL;
400 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 414 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
401 return sprintf(buf, "%d\n", inbound.current_read_index); 415 return sprintf(buf, "%d\n", inbound.current_read_index);
402} 416}
@@ -410,6 +424,8 @@ static ssize_t in_write_index_show(struct device *dev,
410 424
411 if (!hv_dev->channel) 425 if (!hv_dev->channel)
412 return -ENODEV; 426 return -ENODEV;
427 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
428 return -EINVAL;
413 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 429 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
414 return sprintf(buf, "%d\n", inbound.current_write_index); 430 return sprintf(buf, "%d\n", inbound.current_write_index);
415} 431}
@@ -424,6 +440,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
424 440
425 if (!hv_dev->channel) 441 if (!hv_dev->channel)
426 return -ENODEV; 442 return -ENODEV;
443 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
444 return -EINVAL;
427 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 445 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
428 return sprintf(buf, "%d\n", inbound.bytes_avail_toread); 446 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
429} 447}
@@ -438,6 +456,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
438 456
439 if (!hv_dev->channel) 457 if (!hv_dev->channel)
440 return -ENODEV; 458 return -ENODEV;
459 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
460 return -EINVAL;
441 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 461 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
442 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); 462 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
443} 463}
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 25d43c8f1c2a..558de0b9895c 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
267 struct net_device *cookie_ndev = cookie; 267 struct net_device *cookie_ndev = cookie;
268 bool match = false; 268 bool match = false;
269 269
270 if (!rdma_ndev)
271 return false;
272
270 rcu_read_lock(); 273 rcu_read_lock();
271 if (netif_is_bond_master(cookie_ndev) && 274 if (netif_is_bond_master(cookie_ndev) &&
272 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)) 275 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9b20479dc710..7e6d70936c63 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12500,7 +12500,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
12500 } 12500 }
12501 12501
12502 /* allocate space for the counter values */ 12502 /* allocate space for the counter values */
12503 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); 12503 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12504 GFP_KERNEL);
12504 if (!dd->cntrs) 12505 if (!dd->cntrs)
12505 goto bail; 12506 goto bail;
12506 12507
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 1401b6ea4a28..2b882347d0c2 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -155,6 +155,8 @@ struct hfi1_ib_stats {
155extern struct hfi1_ib_stats hfi1_stats; 155extern struct hfi1_ib_stats hfi1_stats;
156extern const struct pci_error_handlers hfi1_pci_err_handler; 156extern const struct pci_error_handlers hfi1_pci_err_handler;
157 157
158extern int num_driver_cntrs;
159
158/* 160/*
159 * First-cut criterion for "device is active" is 161 * First-cut criterion for "device is active" is
160 * two thousand dwords combined Tx, Rx traffic per 162 * two thousand dwords combined Tx, Rx traffic per
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 6f3bc4dab858..1a016248039f 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -340,6 +340,13 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
340 default: 340 default:
341 break; 341 break;
342 } 342 }
343
344 /*
345 * System latency between send and schedule is large enough that
346 * forcing call_send to true for piothreshold packets is necessary.
347 */
348 if (wqe->length <= piothreshold)
349 *call_send = true;
343 return 0; 350 return 0;
344} 351}
345 352
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 48e11e510358..a365089a9305 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1479,7 +1479,7 @@ static const char * const driver_cntr_names[] = {
1479static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */ 1479static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
1480static const char **dev_cntr_names; 1480static const char **dev_cntr_names;
1481static const char **port_cntr_names; 1481static const char **port_cntr_names;
1482static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); 1482int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
1483static int num_dev_cntrs; 1483static int num_dev_cntrs;
1484static int num_port_cntrs; 1484static int num_port_cntrs;
1485static int cntr_names_initialized; 1485static int cntr_names_initialized;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 61aab7c0c513..45c421c87100 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1066,7 +1066,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
1066 1066
1067 err = uverbs_get_flags32(&access, attrs, 1067 err = uverbs_get_flags32(&access, attrs,
1068 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, 1068 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
1069 IB_ACCESS_SUPPORTED); 1069 IB_ACCESS_LOCAL_WRITE |
1070 IB_ACCESS_REMOTE_WRITE |
1071 IB_ACCESS_REMOTE_READ);
1070 if (err) 1072 if (err)
1071 return err; 1073 return err;
1072 1074
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 4ead8c0fff5a..7309fb6bf0d2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -552,14 +552,13 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
552static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, 552static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
553 u64 io_virt, size_t bcnt, u32 *bytes_mapped) 553 u64 io_virt, size_t bcnt, u32 *bytes_mapped)
554{ 554{
555 int npages = 0, current_seq, page_shift, ret, np;
556 bool implicit = false;
555 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); 557 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
556 u64 access_mask = ODP_READ_ALLOWED_BIT; 558 u64 access_mask = ODP_READ_ALLOWED_BIT;
557 int npages = 0, page_shift, np;
558 u64 start_idx, page_mask; 559 u64 start_idx, page_mask;
559 struct ib_umem_odp *odp; 560 struct ib_umem_odp *odp;
560 int current_seq;
561 size_t size; 561 size_t size;
562 int ret;
563 562
564 if (!odp_mr->page_list) { 563 if (!odp_mr->page_list) {
565 odp = implicit_mr_get_data(mr, io_virt, bcnt); 564 odp = implicit_mr_get_data(mr, io_virt, bcnt);
@@ -567,7 +566,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
567 if (IS_ERR(odp)) 566 if (IS_ERR(odp))
568 return PTR_ERR(odp); 567 return PTR_ERR(odp);
569 mr = odp->private; 568 mr = odp->private;
570 569 implicit = true;
571 } else { 570 } else {
572 odp = odp_mr; 571 odp = odp_mr;
573 } 572 }
@@ -646,7 +645,7 @@ next_mr:
646 645
647out: 646out:
648 if (ret == -EAGAIN) { 647 if (ret == -EAGAIN) {
649 if (mr->parent || !odp->dying) { 648 if (implicit || !odp->dying) {
650 unsigned long timeout = 649 unsigned long timeout =
651 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); 650 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
652 651
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 5936de71883f..6fc93834da44 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
930 bool dirty_flag; 930 bool dirty_flag;
931 *result = true; 931 *result = true;
932 932
933 if (from_cblock(cmd->cache_blocks) == 0)
934 /* Nothing to do */
935 return 0;
936
933 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, 937 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
934 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); 938 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
935 if (r) { 939 if (r) {
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 0bd8d498b3b9..dadd9696340c 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
195struct dm_thin_new_mapping; 195struct dm_thin_new_mapping;
196 196
197/* 197/*
198 * The pool runs in 4 modes. Ordered in degraded order for comparisons. 198 * The pool runs in various modes. Ordered in degraded order for comparisons.
199 */ 199 */
200enum pool_mode { 200enum pool_mode {
201 PM_WRITE, /* metadata may be changed */ 201 PM_WRITE, /* metadata may be changed */
@@ -282,9 +282,38 @@ struct pool {
282 mempool_t mapping_pool; 282 mempool_t mapping_pool;
283}; 283};
284 284
285static enum pool_mode get_pool_mode(struct pool *pool);
286static void metadata_operation_failed(struct pool *pool, const char *op, int r); 285static void metadata_operation_failed(struct pool *pool, const char *op, int r);
287 286
287static enum pool_mode get_pool_mode(struct pool *pool)
288{
289 return pool->pf.mode;
290}
291
292static void notify_of_pool_mode_change(struct pool *pool)
293{
294 const char *descs[] = {
295 "write",
296 "out-of-data-space",
297 "read-only",
298 "read-only",
299 "fail"
300 };
301 const char *extra_desc = NULL;
302 enum pool_mode mode = get_pool_mode(pool);
303
304 if (mode == PM_OUT_OF_DATA_SPACE) {
305 if (!pool->pf.error_if_no_space)
306 extra_desc = " (queue IO)";
307 else
308 extra_desc = " (error IO)";
309 }
310
311 dm_table_event(pool->ti->table);
312 DMINFO("%s: switching pool to %s%s mode",
313 dm_device_name(pool->pool_md),
314 descs[(int)mode], extra_desc ? : "");
315}
316
288/* 317/*
289 * Target context for a pool. 318 * Target context for a pool.
290 */ 319 */
@@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws)
2351 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 2380 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2352} 2381}
2353 2382
2354static void notify_of_pool_mode_change_to_oods(struct pool *pool);
2355
2356/* 2383/*
2357 * We're holding onto IO to allow userland time to react. After the 2384 * We're holding onto IO to allow userland time to react. After the
2358 * timeout either the pool will have been resized (and thus back in 2385 * timeout either the pool will have been resized (and thus back in
@@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws)
2365 2392
2366 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { 2393 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2367 pool->pf.error_if_no_space = true; 2394 pool->pf.error_if_no_space = true;
2368 notify_of_pool_mode_change_to_oods(pool); 2395 notify_of_pool_mode_change(pool);
2369 error_retry_list_with_code(pool, BLK_STS_NOSPC); 2396 error_retry_list_with_code(pool, BLK_STS_NOSPC);
2370 } 2397 }
2371} 2398}
@@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2433 2460
2434/*----------------------------------------------------------------*/ 2461/*----------------------------------------------------------------*/
2435 2462
2436static enum pool_mode get_pool_mode(struct pool *pool)
2437{
2438 return pool->pf.mode;
2439}
2440
2441static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2442{
2443 dm_table_event(pool->ti->table);
2444 DMINFO("%s: switching pool to %s mode",
2445 dm_device_name(pool->pool_md), new_mode);
2446}
2447
2448static void notify_of_pool_mode_change_to_oods(struct pool *pool)
2449{
2450 if (!pool->pf.error_if_no_space)
2451 notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
2452 else
2453 notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
2454}
2455
2456static bool passdown_enabled(struct pool_c *pt) 2463static bool passdown_enabled(struct pool_c *pt)
2457{ 2464{
2458 return pt->adjusted_pf.discard_passdown; 2465 return pt->adjusted_pf.discard_passdown;
@@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2501 2508
2502 switch (new_mode) { 2509 switch (new_mode) {
2503 case PM_FAIL: 2510 case PM_FAIL:
2504 if (old_mode != new_mode)
2505 notify_of_pool_mode_change(pool, "failure");
2506 dm_pool_metadata_read_only(pool->pmd); 2511 dm_pool_metadata_read_only(pool->pmd);
2507 pool->process_bio = process_bio_fail; 2512 pool->process_bio = process_bio_fail;
2508 pool->process_discard = process_bio_fail; 2513 pool->process_discard = process_bio_fail;
@@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2516 2521
2517 case PM_OUT_OF_METADATA_SPACE: 2522 case PM_OUT_OF_METADATA_SPACE:
2518 case PM_READ_ONLY: 2523 case PM_READ_ONLY:
2519 if (!is_read_only_pool_mode(old_mode))
2520 notify_of_pool_mode_change(pool, "read-only");
2521 dm_pool_metadata_read_only(pool->pmd); 2524 dm_pool_metadata_read_only(pool->pmd);
2522 pool->process_bio = process_bio_read_only; 2525 pool->process_bio = process_bio_read_only;
2523 pool->process_discard = process_bio_success; 2526 pool->process_discard = process_bio_success;
@@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2538 * alarming rate. Adjust your low water mark if you're 2541 * alarming rate. Adjust your low water mark if you're
2539 * frequently seeing this mode. 2542 * frequently seeing this mode.
2540 */ 2543 */
2541 if (old_mode != new_mode)
2542 notify_of_pool_mode_change_to_oods(pool);
2543 pool->out_of_data_space = true; 2544 pool->out_of_data_space = true;
2544 pool->process_bio = process_bio_read_only; 2545 pool->process_bio = process_bio_read_only;
2545 pool->process_discard = process_discard_bio; 2546 pool->process_discard = process_discard_bio;
@@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2552 break; 2553 break;
2553 2554
2554 case PM_WRITE: 2555 case PM_WRITE:
2555 if (old_mode != new_mode)
2556 notify_of_pool_mode_change(pool, "write");
2557 if (old_mode == PM_OUT_OF_DATA_SPACE) 2556 if (old_mode == PM_OUT_OF_DATA_SPACE)
2558 cancel_delayed_work_sync(&pool->no_space_timeout); 2557 cancel_delayed_work_sync(&pool->no_space_timeout);
2559 pool->out_of_data_space = false; 2558 pool->out_of_data_space = false;
@@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2573 * doesn't cause an unexpected mode transition on resume. 2572 * doesn't cause an unexpected mode transition on resume.
2574 */ 2573 */
2575 pt->adjusted_pf.mode = new_mode; 2574 pt->adjusted_pf.mode = new_mode;
2575
2576 if (old_mode != new_mode)
2577 notify_of_pool_mode_change(pool);
2576} 2578}
2577 2579
2578static void abort_transaction(struct pool *pool) 2580static void abort_transaction(struct pool *pool)
@@ -4023,7 +4025,7 @@ static struct target_type pool_target = {
4023 .name = "thin-pool", 4025 .name = "thin-pool",
4024 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 4026 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
4025 DM_TARGET_IMMUTABLE, 4027 DM_TARGET_IMMUTABLE,
4026 .version = {1, 20, 0}, 4028 .version = {1, 21, 0},
4027 .module = THIS_MODULE, 4029 .module = THIS_MODULE,
4028 .ctr = pool_ctr, 4030 .ctr = pool_ctr,
4029 .dtr = pool_dtr, 4031 .dtr = pool_dtr,
@@ -4397,7 +4399,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4397 4399
4398static struct target_type thin_target = { 4400static struct target_type thin_target = {
4399 .name = "thin", 4401 .name = "thin",
4400 .version = {1, 20, 0}, 4402 .version = {1, 21, 0},
4401 .module = THIS_MODULE, 4403 .module = THIS_MODULE,
4402 .ctr = thin_ctr, 4404 .ctr = thin_ctr,
4403 .dtr = thin_dtr, 4405 .dtr = thin_dtr,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 981154e59461..6af5babe6837 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -20,7 +20,6 @@ struct dmz_bioctx {
20 struct dm_zone *zone; 20 struct dm_zone *zone;
21 struct bio *bio; 21 struct bio *bio;
22 refcount_t ref; 22 refcount_t ref;
23 blk_status_t status;
24}; 23};
25 24
26/* 25/*
@@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
78{ 77{
79 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); 78 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
80 79
81 if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK) 80 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
82 bioctx->status = status; 81 bio->bi_status = status;
83 bio_endio(bio); 82
83 if (refcount_dec_and_test(&bioctx->ref)) {
84 struct dm_zone *zone = bioctx->zone;
85
86 if (zone) {
87 if (bio->bi_status != BLK_STS_OK &&
88 bio_op(bio) == REQ_OP_WRITE &&
89 dmz_is_seq(zone))
90 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
91 dmz_deactivate_zone(zone);
92 }
93 bio_endio(bio);
94 }
84} 95}
85 96
86/* 97/*
87 * Partial clone read BIO completion callback. This terminates the 98 * Completion callback for an internally cloned target BIO. This terminates the
88 * target BIO when there are no more references to its context. 99 * target BIO when there are no more references to its context.
89 */ 100 */
90static void dmz_read_bio_end_io(struct bio *bio) 101static void dmz_clone_endio(struct bio *clone)
91{ 102{
92 struct dmz_bioctx *bioctx = bio->bi_private; 103 struct dmz_bioctx *bioctx = clone->bi_private;
93 blk_status_t status = bio->bi_status; 104 blk_status_t status = clone->bi_status;
94 105
95 bio_put(bio); 106 bio_put(clone);
96 dmz_bio_endio(bioctx->bio, status); 107 dmz_bio_endio(bioctx->bio, status);
97} 108}
98 109
99/* 110/*
100 * Issue a BIO to a zone. The BIO may only partially process the 111 * Issue a clone of a target BIO. The clone may only partially process the
101 * original target BIO. 112 * original target BIO.
102 */ 113 */
103static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone, 114static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
104 struct bio *bio, sector_t chunk_block, 115 struct bio *bio, sector_t chunk_block,
105 unsigned int nr_blocks) 116 unsigned int nr_blocks)
106{ 117{
107 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); 118 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
108 sector_t sector;
109 struct bio *clone; 119 struct bio *clone;
110 120
111 /* BIO remap sector */
112 sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
113
114 /* If the read is not partial, there is no need to clone the BIO */
115 if (nr_blocks == dmz_bio_blocks(bio)) {
116 /* Setup and submit the BIO */
117 bio->bi_iter.bi_sector = sector;
118 refcount_inc(&bioctx->ref);
119 generic_make_request(bio);
120 return 0;
121 }
122
123 /* Partial BIO: we need to clone the BIO */
124 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); 121 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
125 if (!clone) 122 if (!clone)
126 return -ENOMEM; 123 return -ENOMEM;
127 124
128 /* Setup the clone */ 125 bio_set_dev(clone, dmz->dev->bdev);
129 clone->bi_iter.bi_sector = sector; 126 clone->bi_iter.bi_sector =
127 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
130 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; 128 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
131 clone->bi_end_io = dmz_read_bio_end_io; 129 clone->bi_end_io = dmz_clone_endio;
132 clone->bi_private = bioctx; 130 clone->bi_private = bioctx;
133 131
134 bio_advance(bio, clone->bi_iter.bi_size); 132 bio_advance(bio, clone->bi_iter.bi_size);
135 133
136 /* Submit the clone */
137 refcount_inc(&bioctx->ref); 134 refcount_inc(&bioctx->ref);
138 generic_make_request(clone); 135 generic_make_request(clone);
139 136
137 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
138 zone->wp_block += nr_blocks;
139
140 return 0; 140 return 0;
141} 141}
142 142
@@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
214 if (nr_blocks) { 214 if (nr_blocks) {
215 /* Valid blocks found: read them */ 215 /* Valid blocks found: read them */
216 nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); 216 nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
217 ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks); 217 ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
218 if (ret) 218 if (ret)
219 return ret; 219 return ret;
220 chunk_block += nr_blocks; 220 chunk_block += nr_blocks;
@@ -229,25 +229,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
229} 229}
230 230
231/* 231/*
232 * Issue a write BIO to a zone.
233 */
234static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
235 struct bio *bio, sector_t chunk_block,
236 unsigned int nr_blocks)
237{
238 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
239
240 /* Setup and submit the BIO */
241 bio_set_dev(bio, dmz->dev->bdev);
242 bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
243 refcount_inc(&bioctx->ref);
244 generic_make_request(bio);
245
246 if (dmz_is_seq(zone))
247 zone->wp_block += nr_blocks;
248}
249
250/*
251 * Write blocks directly in a data zone, at the write pointer. 232 * Write blocks directly in a data zone, at the write pointer.
252 * If a buffer zone is assigned, invalidate the blocks written 233 * If a buffer zone is assigned, invalidate the blocks written
253 * in place. 234 * in place.
@@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
265 return -EROFS; 246 return -EROFS;
266 247
267 /* Submit write */ 248 /* Submit write */
268 dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks); 249 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
250 if (ret)
251 return ret;
269 252
270 /* 253 /*
271 * Validate the blocks in the data zone and invalidate 254 * Validate the blocks in the data zone and invalidate
@@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
301 return -EROFS; 284 return -EROFS;
302 285
303 /* Submit write */ 286 /* Submit write */
304 dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks); 287 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
288 if (ret)
289 return ret;
305 290
306 /* 291 /*
307 * Validate the blocks in the buffer zone 292 * Validate the blocks in the buffer zone
@@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
600 bioctx->zone = NULL; 585 bioctx->zone = NULL;
601 bioctx->bio = bio; 586 bioctx->bio = bio;
602 refcount_set(&bioctx->ref, 1); 587 refcount_set(&bioctx->ref, 1);
603 bioctx->status = BLK_STS_OK;
604 588
605 /* Set the BIO pending in the flush list */ 589 /* Set the BIO pending in the flush list */
606 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { 590 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
@@ -624,35 +608,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
624} 608}
625 609
626/* 610/*
627 * Completed target BIO processing.
628 */
629static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
630{
631 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
632
633 if (bioctx->status == BLK_STS_OK && *error)
634 bioctx->status = *error;
635
636 if (!refcount_dec_and_test(&bioctx->ref))
637 return DM_ENDIO_INCOMPLETE;
638
639 /* Done */
640 bio->bi_status = bioctx->status;
641
642 if (bioctx->zone) {
643 struct dm_zone *zone = bioctx->zone;
644
645 if (*error && bio_op(bio) == REQ_OP_WRITE) {
646 if (dmz_is_seq(zone))
647 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
648 }
649 dmz_deactivate_zone(zone);
650 }
651
652 return DM_ENDIO_DONE;
653}
654
655/*
656 * Get zoned device information. 611 * Get zoned device information.
657 */ 612 */
658static int dmz_get_zoned_device(struct dm_target *ti, char *path) 613static int dmz_get_zoned_device(struct dm_target *ti, char *path)
@@ -946,7 +901,6 @@ static struct target_type dmz_type = {
946 .ctr = dmz_ctr, 901 .ctr = dmz_ctr,
947 .dtr = dmz_dtr, 902 .dtr = dmz_dtr,
948 .map = dmz_map, 903 .map = dmz_map,
949 .end_io = dmz_end_io,
950 .io_hints = dmz_io_hints, 904 .io_hints = dmz_io_hints,
951 .prepare_ioctl = dmz_prepare_ioctl, 905 .prepare_ioctl = dmz_prepare_ioctl,
952 .postsuspend = dmz_suspend, 906 .postsuspend = dmz_suspend,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c510179a7f84..63a7c416b224 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1593,6 +1593,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1593 return ret; 1593 return ret;
1594 } 1594 }
1595 1595
1596 blk_queue_split(md->queue, &bio);
1597
1596 init_clone_info(&ci, md, map, bio); 1598 init_clone_info(&ci, md, map, bio);
1597 1599
1598 if (bio->bi_opf & REQ_PREFLUSH) { 1600 if (bio->bi_opf & REQ_PREFLUSH) {
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8add62a18293..102eb35fcf3f 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -110,6 +110,19 @@ config MEDIA_CONTROLLER_DVB
110 110
111 This is currently experimental. 111 This is currently experimental.
112 112
113config MEDIA_CONTROLLER_REQUEST_API
114 bool "Enable Media controller Request API (EXPERIMENTAL)"
115 depends on MEDIA_CONTROLLER && STAGING_MEDIA
116 default n
117 ---help---
118 DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
119
120 This option enables the Request API for the Media controller and V4L2
121 interfaces. It is currently needed by a few stateless codec drivers.
122
123 There is currently no intention to provide API or ABI stability for
124 this new API as of yet.
125
113# 126#
114# Video4Linux support 127# Video4Linux support
115# Only enables if one of the V4L2 types (ATV, webcam, radio) is selected 128# Only enables if one of the V4L2 types (ATV, webcam, radio) is selected
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 975ff5669f72..8ff8722cb6b1 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -947,7 +947,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
947 } 947 }
948 atomic_dec(&q->owned_by_drv_count); 948 atomic_dec(&q->owned_by_drv_count);
949 949
950 if (vb->req_obj.req) { 950 if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
951 /* This is not supported at the moment */ 951 /* This is not supported at the moment */
952 WARN_ON(state == VB2_BUF_STATE_REQUEUEING); 952 WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
953 media_request_object_unbind(&vb->req_obj); 953 media_request_object_unbind(&vb->req_obj);
@@ -1359,8 +1359,12 @@ static void vb2_req_release(struct media_request_object *obj)
1359{ 1359{
1360 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1360 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1361 1361
1362 if (vb->state == VB2_BUF_STATE_IN_REQUEST) 1362 if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
1363 vb->state = VB2_BUF_STATE_DEQUEUED; 1363 vb->state = VB2_BUF_STATE_DEQUEUED;
1364 if (vb->request)
1365 media_request_put(vb->request);
1366 vb->request = NULL;
1367 }
1364} 1368}
1365 1369
1366static const struct media_request_object_ops vb2_core_req_ops = { 1370static const struct media_request_object_ops vb2_core_req_ops = {
@@ -1528,6 +1532,18 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
1528 return ret; 1532 return ret;
1529 1533
1530 vb->state = VB2_BUF_STATE_IN_REQUEST; 1534 vb->state = VB2_BUF_STATE_IN_REQUEST;
1535
1536 /*
1537 * Increment the refcount and store the request.
1538 * The request refcount is decremented again when the
1539 * buffer is dequeued. This is to prevent vb2_buffer_done()
1540 * from freeing the request from interrupt context, which can
1541 * happen if the application closed the request fd after
1542 * queueing the request.
1543 */
1544 media_request_get(req);
1545 vb->request = req;
1546
1531 /* Fill buffer information for the userspace */ 1547 /* Fill buffer information for the userspace */
1532 if (pb) { 1548 if (pb) {
1533 call_void_bufop(q, copy_timestamp, vb, pb); 1549 call_void_bufop(q, copy_timestamp, vb, pb);
@@ -1749,10 +1765,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
1749 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); 1765 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
1750 vb->planes[i].dbuf_mapped = 0; 1766 vb->planes[i].dbuf_mapped = 0;
1751 } 1767 }
1752 if (vb->req_obj.req) {
1753 media_request_object_unbind(&vb->req_obj);
1754 media_request_object_put(&vb->req_obj);
1755 }
1756 call_void_bufop(q, init_buffer, vb); 1768 call_void_bufop(q, init_buffer, vb);
1757} 1769}
1758 1770
@@ -1797,6 +1809,14 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1797 /* go back to dequeued state */ 1809 /* go back to dequeued state */
1798 __vb2_dqbuf(vb); 1810 __vb2_dqbuf(vb);
1799 1811
1812 if (WARN_ON(vb->req_obj.req)) {
1813 media_request_object_unbind(&vb->req_obj);
1814 media_request_object_put(&vb->req_obj);
1815 }
1816 if (vb->request)
1817 media_request_put(vb->request);
1818 vb->request = NULL;
1819
1800 dprintk(2, "dqbuf of buffer %d, with state %d\n", 1820 dprintk(2, "dqbuf of buffer %d, with state %d\n",
1801 vb->index, vb->state); 1821 vb->index, vb->state);
1802 1822
@@ -1903,6 +1923,14 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
1903 vb->prepared = false; 1923 vb->prepared = false;
1904 } 1924 }
1905 __vb2_dqbuf(vb); 1925 __vb2_dqbuf(vb);
1926
1927 if (vb->req_obj.req) {
1928 media_request_object_unbind(&vb->req_obj);
1929 media_request_object_put(&vb->req_obj);
1930 }
1931 if (vb->request)
1932 media_request_put(vb->request);
1933 vb->request = NULL;
1906 } 1934 }
1907} 1935}
1908 1936
@@ -1940,10 +1968,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
1940 if (ret) 1968 if (ret)
1941 return ret; 1969 return ret;
1942 ret = vb2_start_streaming(q); 1970 ret = vb2_start_streaming(q);
1943 if (ret) { 1971 if (ret)
1944 __vb2_queue_cancel(q);
1945 return ret; 1972 return ret;
1946 }
1947 } 1973 }
1948 1974
1949 q->streaming = 1; 1975 q->streaming = 1;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index a17033ab2c22..1d35aeabfd85 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -333,10 +333,10 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
333} 333}
334 334
335static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, 335static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
336 struct v4l2_buffer *b, 336 struct v4l2_buffer *b, bool is_prepare,
337 const char *opname,
338 struct media_request **p_req) 337 struct media_request **p_req)
339{ 338{
339 const char *opname = is_prepare ? "prepare_buf" : "qbuf";
340 struct media_request *req; 340 struct media_request *req;
341 struct vb2_v4l2_buffer *vbuf; 341 struct vb2_v4l2_buffer *vbuf;
342 struct vb2_buffer *vb; 342 struct vb2_buffer *vb;
@@ -378,6 +378,9 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
378 return ret; 378 return ret;
379 } 379 }
380 380
381 if (is_prepare)
382 return 0;
383
381 if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 384 if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
382 if (q->uses_requests) { 385 if (q->uses_requests) {
383 dprintk(1, "%s: queue uses requests\n", opname); 386 dprintk(1, "%s: queue uses requests\n", opname);
@@ -631,8 +634,10 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
631 *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR; 634 *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
632 if (q->io_modes & VB2_DMABUF) 635 if (q->io_modes & VB2_DMABUF)
633 *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; 636 *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
637#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
634 if (q->supports_requests) 638 if (q->supports_requests)
635 *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; 639 *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
640#endif
636} 641}
637 642
638int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) 643int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
@@ -657,7 +662,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
657 if (b->flags & V4L2_BUF_FLAG_REQUEST_FD) 662 if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
658 return -EINVAL; 663 return -EINVAL;
659 664
660 ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL); 665 ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
661 666
662 return ret ? ret : vb2_core_prepare_buf(q, b->index, b); 667 return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
663} 668}
@@ -729,7 +734,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
729 return -EBUSY; 734 return -EBUSY;
730 } 735 }
731 736
732 ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req); 737 ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
733 if (ret) 738 if (ret)
734 return ret; 739 return ret;
735 ret = vb2_core_qbuf(q, b->index, b, req); 740 ret = vb2_core_qbuf(q, b->index, b, req);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index bed24372e61f..b8ec88612df7 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -381,10 +381,14 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
381static long media_device_request_alloc(struct media_device *mdev, 381static long media_device_request_alloc(struct media_device *mdev,
382 int *alloc_fd) 382 int *alloc_fd)
383{ 383{
384#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
384 if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue) 385 if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
385 return -ENOTTY; 386 return -ENOTTY;
386 387
387 return media_request_alloc(mdev, alloc_fd); 388 return media_request_alloc(mdev, alloc_fd);
389#else
390 return -ENOTTY;
391#endif
388} 392}
389 393
390static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd) 394static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 013cdebecbc4..13fb69c58967 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -997,11 +997,18 @@ static int vicodec_start_streaming(struct vb2_queue *q,
997 997
998 q_data->sequence = 0; 998 q_data->sequence = 0;
999 999
1000 if (!V4L2_TYPE_IS_OUTPUT(q->type)) 1000 if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
1001 if (!ctx->is_enc) {
1002 state->width = q_data->width;
1003 state->height = q_data->height;
1004 }
1001 return 0; 1005 return 0;
1006 }
1002 1007
1003 state->width = q_data->width; 1008 if (ctx->is_enc) {
1004 state->height = q_data->height; 1009 state->width = q_data->width;
1010 state->height = q_data->height;
1011 }
1005 state->ref_frame.width = state->ref_frame.height = 0; 1012 state->ref_frame.width = state->ref_frame.height = 0;
1006 state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div, 1013 state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div,
1007 GFP_KERNEL); 1014 GFP_KERNEL);
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index dcdc80e272c2..9acc709b0740 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -276,8 +276,6 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
276 276
277 list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) { 277 list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
278 list_del(&buf->list); 278 list_del(&buf->list);
279 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
280 &dev->ctrl_hdl_sdr_cap);
281 vb2_buffer_done(&buf->vb.vb2_buf, 279 vb2_buffer_done(&buf->vb.vb2_buf,
282 VB2_BUF_STATE_QUEUED); 280 VB2_BUF_STATE_QUEUED);
283 } 281 }
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index 903cebeb5ce5..d666271bdaed 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -204,8 +204,6 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
204 204
205 list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) { 205 list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
206 list_del(&buf->list); 206 list_del(&buf->list);
207 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
208 &dev->ctrl_hdl_vbi_cap);
209 vb2_buffer_done(&buf->vb.vb2_buf, 207 vb2_buffer_done(&buf->vb.vb2_buf,
210 VB2_BUF_STATE_QUEUED); 208 VB2_BUF_STATE_QUEUED);
211 } 209 }
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 9357c07e30d6..cd56476902a2 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -96,8 +96,6 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
96 96
97 list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) { 97 list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
98 list_del(&buf->list); 98 list_del(&buf->list);
99 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
100 &dev->ctrl_hdl_vbi_out);
101 vb2_buffer_done(&buf->vb.vb2_buf, 99 vb2_buffer_done(&buf->vb.vb2_buf,
102 VB2_BUF_STATE_QUEUED); 100 VB2_BUF_STATE_QUEUED);
103 } 101 }
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 9c8e8be81ce3..673772cd17d6 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -243,8 +243,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
243 243
244 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 244 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
245 list_del(&buf->list); 245 list_del(&buf->list);
246 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
247 &dev->ctrl_hdl_vid_cap);
248 vb2_buffer_done(&buf->vb.vb2_buf, 246 vb2_buffer_done(&buf->vb.vb2_buf,
249 VB2_BUF_STATE_QUEUED); 247 VB2_BUF_STATE_QUEUED);
250 } 248 }
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index aaf13f03d5d4..628eae154ee7 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -162,8 +162,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
162 162
163 list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) { 163 list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
164 list_del(&buf->list); 164 list_del(&buf->list);
165 v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
166 &dev->ctrl_hdl_vid_out);
167 vb2_buffer_done(&buf->vb.vb2_buf, 165 vb2_buffer_done(&buf->vb.vb2_buf,
168 VB2_BUF_STATE_QUEUED); 166 VB2_BUF_STATE_QUEUED);
169 } 167 }
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 0b18f0bd7419..8b0a26335d70 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -95,7 +95,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
95 format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config, 95 format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
96 LIF_PAD_SOURCE); 96 LIF_PAD_SOURCE);
97 97
98 switch (entity->vsp1->version & VI6_IP_VERSION_SOC_MASK) { 98 switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) {
99 case VI6_IP_VERSION_MODEL_VSPD_GEN2: 99 case VI6_IP_VERSION_MODEL_VSPD_GEN2:
100 case VI6_IP_VERSION_MODEL_VSPD_V2H: 100 case VI6_IP_VERSION_MODEL_VSPD_V2H:
101 hbth = 1536; 101 hbth = 1536;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 5f2b033a7a42..10b8d94edbef 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1563,7 +1563,7 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
1563 u64 offset; 1563 u64 offset;
1564 s64 val; 1564 s64 val;
1565 1565
1566 switch (ctrl->type) { 1566 switch ((u32)ctrl->type) {
1567 case V4L2_CTRL_TYPE_INTEGER: 1567 case V4L2_CTRL_TYPE_INTEGER:
1568 return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl); 1568 return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
1569 case V4L2_CTRL_TYPE_INTEGER64: 1569 case V4L2_CTRL_TYPE_INTEGER64:
@@ -2232,7 +2232,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
2232 is_array = nr_of_dims > 0; 2232 is_array = nr_of_dims > 0;
2233 2233
2234 /* Prefill elem_size for all types handled by std_type_ops */ 2234 /* Prefill elem_size for all types handled by std_type_ops */
2235 switch (type) { 2235 switch ((u32)type) {
2236 case V4L2_CTRL_TYPE_INTEGER64: 2236 case V4L2_CTRL_TYPE_INTEGER64:
2237 elem_size = sizeof(s64); 2237 elem_size = sizeof(s64);
2238 break; 2238 break;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index c35b5b08bb33..111934838da2 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -472,7 +472,7 @@ out:
472static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, 472static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
473 struct mmc_blk_ioc_data *idata) 473 struct mmc_blk_ioc_data *idata)
474{ 474{
475 struct mmc_command cmd = {}; 475 struct mmc_command cmd = {}, sbc = {};
476 struct mmc_data data = {}; 476 struct mmc_data data = {};
477 struct mmc_request mrq = {}; 477 struct mmc_request mrq = {};
478 struct scatterlist sg; 478 struct scatterlist sg;
@@ -550,10 +550,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
550 } 550 }
551 551
552 if (idata->rpmb) { 552 if (idata->rpmb) {
553 err = mmc_set_blockcount(card, data.blocks, 553 sbc.opcode = MMC_SET_BLOCK_COUNT;
554 idata->ic.write_flag & (1 << 31)); 554 /*
555 if (err) 555 * We don't do any blockcount validation because the max size
556 return err; 556 * may be increased by a future standard. We just copy the
557 * 'Reliable Write' bit here.
558 */
559 sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
560 sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
561 mrq.sbc = &sbc;
557 } 562 }
558 563
559 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 564 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bc1bd2c25613..55997cf84b39 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -30,6 +30,7 @@
30#include "pwrseq.h" 30#include "pwrseq.h"
31 31
32#define DEFAULT_CMD6_TIMEOUT_MS 500 32#define DEFAULT_CMD6_TIMEOUT_MS 500
33#define MIN_CACHE_EN_TIMEOUT_MS 1600
33 34
34static const unsigned int tran_exp[] = { 35static const unsigned int tran_exp[] = {
35 10000, 100000, 1000000, 10000000, 36 10000, 100000, 1000000, 10000000,
@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
526 card->cid.year += 16; 527 card->cid.year += 16;
527 528
528 /* check whether the eMMC card supports BKOPS */ 529 /* check whether the eMMC card supports BKOPS */
529 if (!mmc_card_broken_hpi(card) && 530 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
530 ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
531 card->ext_csd.bkops = 1; 531 card->ext_csd.bkops = 1;
532 card->ext_csd.man_bkops_en = 532 card->ext_csd.man_bkops_en =
533 (ext_csd[EXT_CSD_BKOPS_EN] & 533 (ext_csd[EXT_CSD_BKOPS_EN] &
@@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1782 if (err) { 1782 if (err) {
1783 pr_warn("%s: Enabling HPI failed\n", 1783 pr_warn("%s: Enabling HPI failed\n",
1784 mmc_hostname(card->host)); 1784 mmc_hostname(card->host));
1785 card->ext_csd.hpi_en = 0;
1785 err = 0; 1786 err = 0;
1786 } else 1787 } else {
1787 card->ext_csd.hpi_en = 1; 1788 card->ext_csd.hpi_en = 1;
1789 }
1788 } 1790 }
1789 1791
1790 /* 1792 /*
1791 * If cache size is higher than 0, this indicates 1793 * If cache size is higher than 0, this indicates the existence of cache
1792 * the existence of cache and it can be turned on. 1794 * and it can be turned on. Note that some eMMCs from Micron has been
1795 * reported to need ~800 ms timeout, while enabling the cache after
1796 * sudden power failure tests. Let's extend the timeout to a minimum of
1797 * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
1793 */ 1798 */
1794 if (!mmc_card_broken_hpi(card) && 1799 if (card->ext_csd.cache_size > 0) {
1795 card->ext_csd.cache_size > 0) { 1800 unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
1801
1802 timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
1796 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1803 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1797 EXT_CSD_CACHE_CTRL, 1, 1804 EXT_CSD_CACHE_CTRL, 1, timeout_ms);
1798 card->ext_csd.generic_cmd6_time);
1799 if (err && err != -EBADMSG) 1805 if (err && err != -EBADMSG)
1800 goto free_card; 1806 goto free_card;
1801 1807
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index adf32682f27a..c60a7625b1fa 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -104,6 +104,7 @@ struct mmc_omap_slot {
104 unsigned int vdd; 104 unsigned int vdd;
105 u16 saved_con; 105 u16 saved_con;
106 u16 bus_mode; 106 u16 bus_mode;
107 u16 power_mode;
107 unsigned int fclk_freq; 108 unsigned int fclk_freq;
108 109
109 struct tasklet_struct cover_tasklet; 110 struct tasklet_struct cover_tasklet;
@@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1157 struct mmc_omap_slot *slot = mmc_priv(mmc); 1158 struct mmc_omap_slot *slot = mmc_priv(mmc);
1158 struct mmc_omap_host *host = slot->host; 1159 struct mmc_omap_host *host = slot->host;
1159 int i, dsor; 1160 int i, dsor;
1160 int clk_enabled; 1161 int clk_enabled, init_stream;
1161 1162
1162 mmc_omap_select_slot(slot, 0); 1163 mmc_omap_select_slot(slot, 0);
1163 1164
@@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1167 slot->vdd = ios->vdd; 1168 slot->vdd = ios->vdd;
1168 1169
1169 clk_enabled = 0; 1170 clk_enabled = 0;
1171 init_stream = 0;
1170 switch (ios->power_mode) { 1172 switch (ios->power_mode) {
1171 case MMC_POWER_OFF: 1173 case MMC_POWER_OFF:
1172 mmc_omap_set_power(slot, 0, ios->vdd); 1174 mmc_omap_set_power(slot, 0, ios->vdd);
@@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1174 case MMC_POWER_UP: 1176 case MMC_POWER_UP:
1175 /* Cannot touch dsor yet, just power up MMC */ 1177 /* Cannot touch dsor yet, just power up MMC */
1176 mmc_omap_set_power(slot, 1, ios->vdd); 1178 mmc_omap_set_power(slot, 1, ios->vdd);
1179 slot->power_mode = ios->power_mode;
1177 goto exit; 1180 goto exit;
1178 case MMC_POWER_ON: 1181 case MMC_POWER_ON:
1179 mmc_omap_fclk_enable(host, 1); 1182 mmc_omap_fclk_enable(host, 1);
1180 clk_enabled = 1; 1183 clk_enabled = 1;
1181 dsor |= 1 << 11; 1184 dsor |= 1 << 11;
1185 if (slot->power_mode != MMC_POWER_ON)
1186 init_stream = 1;
1182 break; 1187 break;
1183 } 1188 }
1189 slot->power_mode = ios->power_mode;
1184 1190
1185 if (slot->bus_mode != ios->bus_mode) { 1191 if (slot->bus_mode != ios->bus_mode) {
1186 if (slot->pdata->set_bus_mode != NULL) 1192 if (slot->pdata->set_bus_mode != NULL)
@@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1196 for (i = 0; i < 2; i++) 1202 for (i = 0; i < 2; i++)
1197 OMAP_MMC_WRITE(host, CON, dsor); 1203 OMAP_MMC_WRITE(host, CON, dsor);
1198 slot->saved_con = dsor; 1204 slot->saved_con = dsor;
1199 if (ios->power_mode == MMC_POWER_ON) { 1205 if (init_stream) {
1200 /* worst case at 400kHz, 80 cycles makes 200 microsecs */ 1206 /* worst case at 400kHz, 80 cycles makes 200 microsecs */
1201 int usecs = 250; 1207 int usecs = 250;
1202 1208
@@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1234 slot->host = host; 1240 slot->host = host;
1235 slot->mmc = mmc; 1241 slot->mmc = mmc;
1236 slot->id = id; 1242 slot->id = id;
1243 slot->power_mode = MMC_POWER_UNDEFINED;
1237 slot->pdata = &host->pdata->slots[id]; 1244 slot->pdata = &host->pdata->slots[id];
1238 1245
1239 host->slots[id] = slot; 1246 host->slots[id] = slot;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 467d889a1638..3f4ea8f624be 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1909,7 +1909,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
1909 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ 1909 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
1910 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ 1910 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
1911 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1911 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1912 mmc->max_seg_size = mmc->max_req_size;
1913 1912
1914 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1913 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1915 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; 1914 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
@@ -1939,6 +1938,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
1939 goto err_irq; 1938 goto err_irq;
1940 } 1939 }
1941 1940
1941 /*
1942 * Limit the maximum segment size to the lower of the request size
1943 * and the DMA engine device segment size limits. In reality, with
1944 * 32-bit transfers, the DMA engine can do longer segments than this
1945 * but there is no way to represent that in the DMA model - if we
1946 * increase this figure here, we get warnings from the DMA API debug.
1947 */
1948 mmc->max_seg_size = min3(mmc->max_req_size,
1949 dma_get_max_seg_size(host->rx_chan->device->dev),
1950 dma_get_max_seg_size(host->tx_chan->device->dev));
1951
1942 /* Request IRQ for MMC operations */ 1952 /* Request IRQ for MMC operations */
1943 ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, 1953 ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
1944 mmc_hostname(mmc), host); 1954 mmc_hostname(mmc), host);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 88347ce78f23..d264391616f9 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
288 struct device *dev = omap_host->dev; 288 struct device *dev = omap_host->dev;
289 struct mmc_ios *ios = &mmc->ios; 289 struct mmc_ios *ios = &mmc->ios;
290 u32 start_window = 0, max_window = 0; 290 u32 start_window = 0, max_window = 0;
291 bool dcrc_was_enabled = false;
291 u8 cur_match, prev_match = 0; 292 u8 cur_match, prev_match = 0;
292 u32 length = 0, max_len = 0; 293 u32 length = 0, max_len = 0;
293 u32 ier = host->ier;
294 u32 phase_delay = 0; 294 u32 phase_delay = 0;
295 int ret = 0; 295 int ret = 0;
296 u32 reg; 296 u32 reg;
@@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
317 * during the tuning procedure. So disable it during the 317 * during the tuning procedure. So disable it during the
318 * tuning procedure. 318 * tuning procedure.
319 */ 319 */
320 ier &= ~SDHCI_INT_DATA_CRC; 320 if (host->ier & SDHCI_INT_DATA_CRC) {
321 sdhci_writel(host, ier, SDHCI_INT_ENABLE); 321 host->ier &= ~SDHCI_INT_DATA_CRC;
322 sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); 322 dcrc_was_enabled = true;
323 }
323 324
324 while (phase_delay <= MAX_PHASE_DELAY) { 325 while (phase_delay <= MAX_PHASE_DELAY) {
325 sdhci_omap_set_dll(omap_host, phase_delay); 326 sdhci_omap_set_dll(omap_host, phase_delay);
@@ -366,6 +367,9 @@ tuning_error:
366 367
367ret: 368ret:
368 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 369 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
370 /* Reenable forbidden interrupt */
371 if (dcrc_was_enabled)
372 host->ier |= SDHCI_INT_DATA_CRC;
369 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 373 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
370 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 374 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
371 return ret; 375 return ret;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 7b95d088fdef..e6ace31e2a41 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -510,25 +510,25 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
510 510
511 err = device_property_read_u32(host->mmc->parent, 511 err = device_property_read_u32(host->mmc->parent,
512 "nvidia,pad-autocal-pull-up-offset-3v3-timeout", 512 "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
513 &autocal->pull_up_3v3); 513 &autocal->pull_up_3v3_timeout);
514 if (err) 514 if (err)
515 autocal->pull_up_3v3_timeout = 0; 515 autocal->pull_up_3v3_timeout = 0;
516 516
517 err = device_property_read_u32(host->mmc->parent, 517 err = device_property_read_u32(host->mmc->parent,
518 "nvidia,pad-autocal-pull-down-offset-3v3-timeout", 518 "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
519 &autocal->pull_down_3v3); 519 &autocal->pull_down_3v3_timeout);
520 if (err) 520 if (err)
521 autocal->pull_down_3v3_timeout = 0; 521 autocal->pull_down_3v3_timeout = 0;
522 522
523 err = device_property_read_u32(host->mmc->parent, 523 err = device_property_read_u32(host->mmc->parent,
524 "nvidia,pad-autocal-pull-up-offset-1v8-timeout", 524 "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
525 &autocal->pull_up_1v8); 525 &autocal->pull_up_1v8_timeout);
526 if (err) 526 if (err)
527 autocal->pull_up_1v8_timeout = 0; 527 autocal->pull_up_1v8_timeout = 0;
528 528
529 err = device_property_read_u32(host->mmc->parent, 529 err = device_property_read_u32(host->mmc->parent,
530 "nvidia,pad-autocal-pull-down-offset-1v8-timeout", 530 "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
531 &autocal->pull_down_1v8); 531 &autocal->pull_down_1v8_timeout);
532 if (err) 532 if (err)
533 autocal->pull_down_1v8_timeout = 0; 533 autocal->pull_down_1v8_timeout = 0;
534 534
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 99bdae53fa2e..df05352b6a4a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -127,12 +127,12 @@ static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127{ 127{
128 u16 ctrl2; 128 u16 ctrl2;
129 129
130 ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2); 130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131 if (ctrl2 & SDHCI_CTRL_V4_MODE) 131 if (ctrl2 & SDHCI_CTRL_V4_MODE)
132 return; 132 return;
133 133
134 ctrl2 |= SDHCI_CTRL_V4_MODE; 134 ctrl2 |= SDHCI_CTRL_V4_MODE;
135 sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL); 135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136} 136}
137 137
138/* 138/*
@@ -216,8 +216,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
216 timeout = ktime_add_ms(ktime_get(), 100); 216 timeout = ktime_add_ms(ktime_get(), 100);
217 217
218 /* hw clears the bit when it's done */ 218 /* hw clears the bit when it's done */
219 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 219 while (1) {
220 if (ktime_after(ktime_get(), timeout)) { 220 bool timedout = ktime_after(ktime_get(), timeout);
221
222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223 break;
224 if (timedout) {
221 pr_err("%s: Reset 0x%x never completed.\n", 225 pr_err("%s: Reset 0x%x never completed.\n",
222 mmc_hostname(host->mmc), (int)mask); 226 mmc_hostname(host->mmc), (int)mask);
223 sdhci_dumpregs(host); 227 sdhci_dumpregs(host);
@@ -1608,9 +1612,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1608 1612
1609 /* Wait max 20 ms */ 1613 /* Wait max 20 ms */
1610 timeout = ktime_add_ms(ktime_get(), 20); 1614 timeout = ktime_add_ms(ktime_get(), 20);
1611 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 1615 while (1) {
1612 & SDHCI_CLOCK_INT_STABLE)) { 1616 bool timedout = ktime_after(ktime_get(), timeout);
1613 if (ktime_after(ktime_get(), timeout)) { 1617
1618 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1619 if (clk & SDHCI_CLOCK_INT_STABLE)
1620 break;
1621 if (timedout) {
1614 pr_err("%s: Internal clock never stabilised.\n", 1622 pr_err("%s: Internal clock never stabilised.\n",
1615 mmc_hostname(host->mmc)); 1623 mmc_hostname(host->mmc));
1616 sdhci_dumpregs(host); 1624 sdhci_dumpregs(host);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 0f749d1ef53b..8a517d8fb9d1 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1124,7 +1124,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
1124 u16 *p = _p; 1124 u16 *p = _p;
1125 int i; 1125 int i;
1126 1126
1127 regs->version = 0; 1127 regs->version = chip->info->prod_num;
1128 1128
1129 memset(p, 0xff, 32 * sizeof(u16)); 1129 memset(p, 0xff, 32 * sizeof(u16));
1130 1130
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3b889efddf78..50dd6bf176d0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -29,9 +29,6 @@
29#define RES_RING_CSR 1 29#define RES_RING_CSR 1
30#define RES_RING_CMD 2 30#define RES_RING_CMD 2
31 31
32static const struct of_device_id xgene_enet_of_match[];
33static const struct acpi_device_id xgene_enet_acpi_match[];
34
35static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 32static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
36{ 33{
37 struct xgene_enet_raw_desc16 *raw_desc; 34 struct xgene_enet_raw_desc16 *raw_desc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0de487a8f0eb..5cd3135dfe30 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1282,6 +1282,7 @@ enum sp_rtnl_flag {
1282 BNX2X_SP_RTNL_TX_STOP, 1282 BNX2X_SP_RTNL_TX_STOP,
1283 BNX2X_SP_RTNL_GET_DRV_VERSION, 1283 BNX2X_SP_RTNL_GET_DRV_VERSION,
1284 BNX2X_SP_RTNL_CHANGE_UDP_PORT, 1284 BNX2X_SP_RTNL_CHANGE_UDP_PORT,
1285 BNX2X_SP_RTNL_UPDATE_SVID,
1285}; 1286};
1286 1287
1287enum bnx2x_iov_flag { 1288enum bnx2x_iov_flag {
@@ -2520,6 +2521,7 @@ void bnx2x_update_mfw_dump(struct bnx2x *bp);
2520void bnx2x_init_ptp(struct bnx2x *bp); 2521void bnx2x_init_ptp(struct bnx2x *bp);
2521int bnx2x_configure_ptp_filters(struct bnx2x *bp); 2522int bnx2x_configure_ptp_filters(struct bnx2x *bp);
2522void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb); 2523void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
2524void bnx2x_register_phc(struct bnx2x *bp);
2523 2525
2524#define BNX2X_MAX_PHC_DRIFT 31000000 2526#define BNX2X_MAX_PHC_DRIFT 31000000
2525#define BNX2X_PTP_TX_TIMEOUT 2527#define BNX2X_PTP_TX_TIMEOUT
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 686899d7e555..ecb1bd7eb508 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2842,6 +2842,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2842 bnx2x_set_rx_mode_inner(bp); 2842 bnx2x_set_rx_mode_inner(bp);
2843 2843
2844 if (bp->flags & PTP_SUPPORTED) { 2844 if (bp->flags & PTP_SUPPORTED) {
2845 bnx2x_register_phc(bp);
2845 bnx2x_init_ptp(bp); 2846 bnx2x_init_ptp(bp);
2846 bnx2x_configure_ptp_filters(bp); 2847 bnx2x_configure_ptp_filters(bp);
2847 } 2848 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 95309b27c7d1..b164f705709d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2925 func_params.f_obj = &bp->func_obj; 2925 func_params.f_obj = &bp->func_obj;
2926 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 2926 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2927 2927
2928 /* Prepare parameters for function state transitions */
2929 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2930 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2931
2928 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { 2932 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2929 int func = BP_ABS_FUNC(bp); 2933 int func = BP_ABS_FUNC(bp);
2930 u32 val; 2934 u32 val;
@@ -4311,7 +4315,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4311 bnx2x_handle_eee_event(bp); 4315 bnx2x_handle_eee_event(bp);
4312 4316
4313 if (val & DRV_STATUS_OEM_UPDATE_SVID) 4317 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4314 bnx2x_handle_update_svid_cmd(bp); 4318 bnx2x_schedule_sp_rtnl(bp,
4319 BNX2X_SP_RTNL_UPDATE_SVID, 0);
4315 4320
4316 if (bp->link_vars.periodic_flags & 4321 if (bp->link_vars.periodic_flags &
4317 PERIODIC_FLAGS_LINK_EVENT) { 4322 PERIODIC_FLAGS_LINK_EVENT) {
@@ -7723,6 +7728,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7723 REG_WR(bp, reg_addr, val); 7728 REG_WR(bp, reg_addr, val);
7724 } 7729 }
7725 7730
7731 if (CHIP_IS_E3B0(bp))
7732 bp->flags |= PTP_SUPPORTED;
7733
7726 return 0; 7734 return 0;
7727} 7735}
7728 7736
@@ -8472,6 +8480,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8472 /* Fill a user request section if needed */ 8480 /* Fill a user request section if needed */
8473 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 8481 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8474 ramrod_param.user_req.u.vlan.vlan = vlan; 8482 ramrod_param.user_req.u.vlan.vlan = vlan;
8483 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8475 /* Set the command: ADD or DEL */ 8484 /* Set the command: ADD or DEL */
8476 if (set) 8485 if (set)
8477 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 8486 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
@@ -8492,6 +8501,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8492 return rc; 8501 return rc;
8493} 8502}
8494 8503
8504static int bnx2x_del_all_vlans(struct bnx2x *bp)
8505{
8506 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8507 unsigned long ramrod_flags = 0, vlan_flags = 0;
8508 struct bnx2x_vlan_entry *vlan;
8509 int rc;
8510
8511 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8512 __set_bit(BNX2X_VLAN, &vlan_flags);
8513 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8514 if (rc)
8515 return rc;
8516
8517 /* Mark that hw forgot all entries */
8518 list_for_each_entry(vlan, &bp->vlan_reg, link)
8519 vlan->hw = false;
8520 bp->vlan_cnt = 0;
8521
8522 return 0;
8523}
8524
8495int bnx2x_del_all_macs(struct bnx2x *bp, 8525int bnx2x_del_all_macs(struct bnx2x *bp,
8496 struct bnx2x_vlan_mac_obj *mac_obj, 8526 struct bnx2x_vlan_mac_obj *mac_obj,
8497 int mac_type, bool wait_for_comp) 8527 int mac_type, bool wait_for_comp)
@@ -9330,6 +9360,11 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9330 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 9360 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9331 rc); 9361 rc);
9332 9362
9363 /* Remove all currently configured VLANs */
9364 rc = bnx2x_del_all_vlans(bp);
9365 if (rc < 0)
9366 BNX2X_ERR("Failed to delete all VLANs\n");
9367
9333 /* Disable LLH */ 9368 /* Disable LLH */
9334 if (!CHIP_IS_E1(bp)) 9369 if (!CHIP_IS_E1(bp))
9335 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 9370 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
@@ -9417,8 +9452,13 @@ unload_error:
9417 * function stop ramrod is sent, since as part of this ramrod FW access 9452 * function stop ramrod is sent, since as part of this ramrod FW access
9418 * PTP registers. 9453 * PTP registers.
9419 */ 9454 */
9420 if (bp->flags & PTP_SUPPORTED) 9455 if (bp->flags & PTP_SUPPORTED) {
9421 bnx2x_stop_ptp(bp); 9456 bnx2x_stop_ptp(bp);
9457 if (bp->ptp_clock) {
9458 ptp_clock_unregister(bp->ptp_clock);
9459 bp->ptp_clock = NULL;
9460 }
9461 }
9422 9462
9423 /* Disable HW interrupts, NAPI */ 9463 /* Disable HW interrupts, NAPI */
9424 bnx2x_netif_stop(bp, 1); 9464 bnx2x_netif_stop(bp, 1);
@@ -10359,6 +10399,9 @@ sp_rtnl_not_reset:
10359 &bp->sp_rtnl_state)) 10399 &bp->sp_rtnl_state))
10360 bnx2x_update_mng_version(bp); 10400 bnx2x_update_mng_version(bp);
10361 10401
10402 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10403 bnx2x_handle_update_svid_cmd(bp);
10404
10362 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, 10405 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10363 &bp->sp_rtnl_state)) { 10406 &bp->sp_rtnl_state)) {
10364 if (bnx2x_udp_port_update(bp)) { 10407 if (bnx2x_udp_port_update(bp)) {
@@ -11750,8 +11793,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11750 * If maximum allowed number of connections is zero - 11793 * If maximum allowed number of connections is zero -
11751 * disable the feature. 11794 * disable the feature.
11752 */ 11795 */
11753 if (!bp->cnic_eth_dev.max_fcoe_conn) 11796 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11754 bp->flags |= NO_FCOE_FLAG; 11797 bp->flags |= NO_FCOE_FLAG;
11798 eth_zero_addr(bp->fip_mac);
11799 }
11755} 11800}
11756 11801
11757static void bnx2x_get_cnic_info(struct bnx2x *bp) 11802static void bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -12494,9 +12539,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
12494 12539
12495 bp->dump_preset_idx = 1; 12540 bp->dump_preset_idx = 1;
12496 12541
12497 if (CHIP_IS_E3B0(bp))
12498 bp->flags |= PTP_SUPPORTED;
12499
12500 return rc; 12542 return rc;
12501} 12543}
12502 12544
@@ -13024,13 +13066,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
13024 13066
13025int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) 13067int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
13026{ 13068{
13027 struct bnx2x_vlan_entry *vlan;
13028
13029 /* The hw forgot all entries after reload */
13030 list_for_each_entry(vlan, &bp->vlan_reg, link)
13031 vlan->hw = false;
13032 bp->vlan_cnt = 0;
13033
13034 /* Don't set rx mode here. Our caller will do it. */ 13069 /* Don't set rx mode here. Our caller will do it. */
13035 bnx2x_vlan_configure(bp, false); 13070 bnx2x_vlan_configure(bp, false);
13036 13071
@@ -13895,7 +13930,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13895 return -ENOTSUPP; 13930 return -ENOTSUPP;
13896} 13931}
13897 13932
13898static void bnx2x_register_phc(struct bnx2x *bp) 13933void bnx2x_register_phc(struct bnx2x *bp)
13899{ 13934{
13900 /* Fill the ptp_clock_info struct and register PTP clock*/ 13935 /* Fill the ptp_clock_info struct and register PTP clock*/
13901 bp->ptp_clock_info.owner = THIS_MODULE; 13936 bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14097,8 +14132,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
14097 dev->base_addr, bp->pdev->irq, dev->dev_addr); 14132 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14098 pcie_print_link_status(bp->pdev); 14133 pcie_print_link_status(bp->pdev);
14099 14134
14100 bnx2x_register_phc(bp);
14101
14102 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) 14135 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14103 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); 14136 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14104 14137
@@ -14131,11 +14164,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
14131 struct bnx2x *bp, 14164 struct bnx2x *bp,
14132 bool remove_netdev) 14165 bool remove_netdev)
14133{ 14166{
14134 if (bp->ptp_clock) {
14135 ptp_clock_unregister(bp->ptp_clock);
14136 bp->ptp_clock = NULL;
14137 }
14138
14139 /* Delete storage MAC address */ 14167 /* Delete storage MAC address */
14140 if (!NO_FCOE(bp)) { 14168 if (!NO_FCOE(bp)) {
14141 rtnl_lock(); 14169 rtnl_lock();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 0bf2fd470819..7a6e82db4231 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -265,6 +265,7 @@ enum {
265 BNX2X_ETH_MAC, 265 BNX2X_ETH_MAC,
266 BNX2X_ISCSI_ETH_MAC, 266 BNX2X_ISCSI_ETH_MAC,
267 BNX2X_NETQ_ETH_MAC, 267 BNX2X_NETQ_ETH_MAC,
268 BNX2X_VLAN,
268 BNX2X_DONT_CONSUME_CAM_CREDIT, 269 BNX2X_DONT_CONSUME_CAM_CREDIT,
269 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 270 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
270}; 271};
@@ -272,7 +273,8 @@ enum {
272#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ 273#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
273 1 << BNX2X_ETH_MAC | \ 274 1 << BNX2X_ETH_MAC | \
274 1 << BNX2X_ISCSI_ETH_MAC | \ 275 1 << BNX2X_ISCSI_ETH_MAC | \
275 1 << BNX2X_NETQ_ETH_MAC) 276 1 << BNX2X_NETQ_ETH_MAC | \
277 1 << BNX2X_VLAN)
276#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ 278#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
277 ((flags) & BNX2X_VLAN_MAC_CMP_MASK) 279 ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
278 280
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 997775777dbe..adabbe94a259 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2727,6 +2727,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2727static int bnxt_run_loopback(struct bnxt *bp) 2727static int bnxt_run_loopback(struct bnxt *bp)
2728{ 2728{
2729 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 2729 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
2730 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
2730 struct bnxt_cp_ring_info *cpr; 2731 struct bnxt_cp_ring_info *cpr;
2731 int pkt_size, i = 0; 2732 int pkt_size, i = 0;
2732 struct sk_buff *skb; 2733 struct sk_buff *skb;
@@ -2734,7 +2735,9 @@ static int bnxt_run_loopback(struct bnxt *bp)
2734 u8 *data; 2735 u8 *data;
2735 int rc; 2736 int rc;
2736 2737
2737 cpr = &txr->bnapi->cp_ring; 2738 cpr = &rxr->bnapi->cp_ring;
2739 if (bp->flags & BNXT_FLAG_CHIP_P5)
2740 cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
2738 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); 2741 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
2739 skb = netdev_alloc_skb(bp->dev, pkt_size); 2742 skb = netdev_alloc_skb(bp->dev, pkt_size);
2740 if (!skb) 2743 if (!skb)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d9a208f7bb40..b126926ef7f5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -61,7 +61,8 @@
61#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 61#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
62 | MACB_BIT(ISR_RLE) \ 62 | MACB_BIT(ISR_RLE) \
63 | MACB_BIT(TXERR)) 63 | MACB_BIT(TXERR))
64#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) 64#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
65 | MACB_BIT(TXUBR))
65 66
66/* Max length of transmit frame must be a multiple of 8 bytes */ 67/* Max length of transmit frame must be a multiple of 8 bytes */
67#define MACB_TX_LEN_ALIGN 8 68#define MACB_TX_LEN_ALIGN 8
@@ -680,6 +681,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
680 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 681 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
681 desc_64 = macb_64b_desc(bp, desc); 682 desc_64 = macb_64b_desc(bp, desc);
682 desc_64->addrh = upper_32_bits(addr); 683 desc_64->addrh = upper_32_bits(addr);
684 /* The low bits of RX address contain the RX_USED bit, clearing
685 * of which allows packet RX. Make sure the high bits are also
686 * visible to HW at that point.
687 */
688 dma_wmb();
683 } 689 }
684#endif 690#endif
685 desc->addr = lower_32_bits(addr); 691 desc->addr = lower_32_bits(addr);
@@ -928,14 +934,19 @@ static void gem_rx_refill(struct macb_queue *queue)
928 934
929 if (entry == bp->rx_ring_size - 1) 935 if (entry == bp->rx_ring_size - 1)
930 paddr |= MACB_BIT(RX_WRAP); 936 paddr |= MACB_BIT(RX_WRAP);
931 macb_set_addr(bp, desc, paddr);
932 desc->ctrl = 0; 937 desc->ctrl = 0;
938 /* Setting addr clears RX_USED and allows reception,
939 * make sure ctrl is cleared first to avoid a race.
940 */
941 dma_wmb();
942 macb_set_addr(bp, desc, paddr);
933 943
934 /* properly align Ethernet header */ 944 /* properly align Ethernet header */
935 skb_reserve(skb, NET_IP_ALIGN); 945 skb_reserve(skb, NET_IP_ALIGN);
936 } else { 946 } else {
937 desc->addr &= ~MACB_BIT(RX_USED);
938 desc->ctrl = 0; 947 desc->ctrl = 0;
948 dma_wmb();
949 desc->addr &= ~MACB_BIT(RX_USED);
939 } 950 }
940 } 951 }
941 952
@@ -989,11 +1000,15 @@ static int gem_rx(struct macb_queue *queue, int budget)
989 1000
990 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 1001 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
991 addr = macb_get_addr(bp, desc); 1002 addr = macb_get_addr(bp, desc);
992 ctrl = desc->ctrl;
993 1003
994 if (!rxused) 1004 if (!rxused)
995 break; 1005 break;
996 1006
1007 /* Ensure ctrl is at least as up-to-date as rxused */
1008 dma_rmb();
1009
1010 ctrl = desc->ctrl;
1011
997 queue->rx_tail++; 1012 queue->rx_tail++;
998 count++; 1013 count++;
999 1014
@@ -1168,11 +1183,14 @@ static int macb_rx(struct macb_queue *queue, int budget)
1168 /* Make hw descriptor updates visible to CPU */ 1183 /* Make hw descriptor updates visible to CPU */
1169 rmb(); 1184 rmb();
1170 1185
1171 ctrl = desc->ctrl;
1172
1173 if (!(desc->addr & MACB_BIT(RX_USED))) 1186 if (!(desc->addr & MACB_BIT(RX_USED)))
1174 break; 1187 break;
1175 1188
1189 /* Ensure ctrl is at least as up-to-date as addr */
1190 dma_rmb();
1191
1192 ctrl = desc->ctrl;
1193
1176 if (ctrl & MACB_BIT(RX_SOF)) { 1194 if (ctrl & MACB_BIT(RX_SOF)) {
1177 if (first_frag != -1) 1195 if (first_frag != -1)
1178 discard_partial_frame(queue, first_frag, tail); 1196 discard_partial_frame(queue, first_frag, tail);
@@ -1312,6 +1330,21 @@ static void macb_hresp_error_task(unsigned long data)
1312 netif_tx_start_all_queues(dev); 1330 netif_tx_start_all_queues(dev);
1313} 1331}
1314 1332
1333static void macb_tx_restart(struct macb_queue *queue)
1334{
1335 unsigned int head = queue->tx_head;
1336 unsigned int tail = queue->tx_tail;
1337 struct macb *bp = queue->bp;
1338
1339 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1340 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1341
1342 if (head == tail)
1343 return;
1344
1345 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1346}
1347
1315static irqreturn_t macb_interrupt(int irq, void *dev_id) 1348static irqreturn_t macb_interrupt(int irq, void *dev_id)
1316{ 1349{
1317 struct macb_queue *queue = dev_id; 1350 struct macb_queue *queue = dev_id;
@@ -1369,6 +1402,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1369 if (status & MACB_BIT(TCOMP)) 1402 if (status & MACB_BIT(TCOMP))
1370 macb_tx_interrupt(queue); 1403 macb_tx_interrupt(queue);
1371 1404
1405 if (status & MACB_BIT(TXUBR))
1406 macb_tx_restart(queue);
1407
1372 /* Link change detection isn't possible with RMII, so we'll 1408 /* Link change detection isn't possible with RMII, so we'll
1373 * add that if/when we get our hands on a full-blown MII PHY. 1409 * add that if/when we get our hands on a full-blown MII PHY.
1374 */ 1410 */
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index cd5296b84229..a6dc47edc4cf 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -319,6 +319,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
319 desc_ptp = macb_ptp_desc(queue->bp, desc); 319 desc_ptp = macb_ptp_desc(queue->bp, desc);
320 tx_timestamp = &queue->tx_timestamps[head]; 320 tx_timestamp = &queue->tx_timestamps[head];
321 tx_timestamp->skb = skb; 321 tx_timestamp->skb = skb;
322 /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
323 dma_rmb();
322 tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1; 324 tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
323 tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2; 325 tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
324 /* move head */ 326 /* move head */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f152da1ce046..c62a0c830705 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1453,6 +1453,9 @@ struct cpl_tx_data {
1453#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S) 1453#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
1454#define T6_TX_FORCE_F T6_TX_FORCE_V(1U) 1454#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
1455 1455
1456#define TX_URG_S 16
1457#define TX_URG_V(x) ((x) << TX_URG_S)
1458
1456#define TX_SHOVE_S 14 1459#define TX_SHOVE_S 14
1457#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S) 1460#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
1458 1461
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b52029e26d15..ad1779fc410e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -379,6 +379,9 @@ static void hns_ae_stop(struct hnae_handle *handle)
379 379
380 hns_ae_ring_enable_all(handle, 0); 380 hns_ae_ring_enable_all(handle, 0);
381 381
382 /* clean rx fbd. */
383 hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
384
382 (void)hns_mac_vm_config_bc_en(mac_cb, 0, false); 385 (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
383} 386}
384 387
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index aaf72c055711..1790cdafd9b8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
67 struct mac_driver *drv = (struct mac_driver *)mac_drv; 67 struct mac_driver *drv = (struct mac_driver *)mac_drv;
68 68
69 /*enable GE rX/tX */ 69 /*enable GE rX/tX */
70 if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX)) 70 if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
71 dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1); 71 dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
72 72
73 if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX)) 73 if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
74 /* enable rx pcs */
75 dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
74 dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1); 76 dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
77 }
75} 78}
76 79
77static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode) 80static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
@@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
79 struct mac_driver *drv = (struct mac_driver *)mac_drv; 82 struct mac_driver *drv = (struct mac_driver *)mac_drv;
80 83
81 /*disable GE rX/tX */ 84 /*disable GE rX/tX */
82 if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX)) 85 if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
83 dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0); 86 dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
84 87
85 if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX)) 88 if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
89 /* disable rx pcs */
90 dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
86 dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0); 91 dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
92 }
87} 93}
88 94
89/* hns_gmac_get_en - get port enable 95/* hns_gmac_get_en - get port enable
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 3613e400e816..a97228c93831 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -778,6 +778,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
778 return rc; 778 return rc;
779} 779}
780 780
781static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
782{
783 if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
784 return;
785
786 phy_device_remove(mac_cb->phy_dev);
787 phy_device_free(mac_cb->phy_dev);
788
789 mac_cb->phy_dev = NULL;
790}
791
781#define MAC_MEDIA_TYPE_MAX_LEN 16 792#define MAC_MEDIA_TYPE_MAX_LEN 16
782 793
783static const struct { 794static const struct {
@@ -1117,7 +1128,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
1117 int max_port_num = hns_mac_get_max_port_num(dsaf_dev); 1128 int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
1118 1129
1119 for (i = 0; i < max_port_num; i++) { 1130 for (i = 0; i < max_port_num; i++) {
1131 if (!dsaf_dev->mac_cb[i])
1132 continue;
1133
1120 dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]); 1134 dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
1135 hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
1121 dsaf_dev->mac_cb[i] = NULL; 1136 dsaf_dev->mac_cb[i] = NULL;
1122 } 1137 }
1123} 1138}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index e557a4ef5996..3b9e74be5fbd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -935,6 +935,62 @@ static void hns_dsaf_tcam_mc_cfg(
935} 935}
936 936
937/** 937/**
938 * hns_dsaf_tcam_uc_cfg_vague - INT
939 * @dsaf_dev: dsa fabric device struct pointer
940 * @address,
941 * @ptbl_tcam_data,
942 */
943static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
944 u32 address,
945 struct dsaf_tbl_tcam_data *tcam_data,
946 struct dsaf_tbl_tcam_data *tcam_mask,
947 struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
948{
949 spin_lock_bh(&dsaf_dev->tcam_lock);
950 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
951 hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
952 hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
953 hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
954 hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
955
956 /*Restore Match Data*/
957 tcam_mask->tbl_tcam_data_high = 0xffffffff;
958 tcam_mask->tbl_tcam_data_low = 0xffffffff;
959 hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
960
961 spin_unlock_bh(&dsaf_dev->tcam_lock);
962}
963
964/**
965 * hns_dsaf_tcam_mc_cfg_vague - INT
966 * @dsaf_dev: dsa fabric device struct pointer
967 * @address,
968 * @ptbl_tcam_data,
969 * @ptbl_tcam_mask
970 * @ptbl_tcam_mcast
971 */
972static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
973 u32 address,
974 struct dsaf_tbl_tcam_data *tcam_data,
975 struct dsaf_tbl_tcam_data *tcam_mask,
976 struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
977{
978 spin_lock_bh(&dsaf_dev->tcam_lock);
979 hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
980 hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
981 hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
982 hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
983 hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
984
985 /*Restore Match Data*/
986 tcam_mask->tbl_tcam_data_high = 0xffffffff;
987 tcam_mask->tbl_tcam_data_low = 0xffffffff;
988 hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
989
990 spin_unlock_bh(&dsaf_dev->tcam_lock);
991}
992
993/**
938 * hns_dsaf_tcam_mc_invld - INT 994 * hns_dsaf_tcam_mc_invld - INT
939 * @dsaf_id: dsa fabric id 995 * @dsaf_id: dsa fabric id
940 * @address 996 * @address
@@ -1493,6 +1549,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
1493} 1549}
1494 1550
1495/** 1551/**
1552 * hns_dsaf_find_empty_mac_entry_reverse
1553 * search dsa fabric soft empty-entry from the end
1554 * @dsaf_dev: dsa fabric device struct pointer
1555 */
1556static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
1557{
1558 struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
1559 struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
1560 int i;
1561
1562 soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
1563 for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
1564 /* search all entry from end to start.*/
1565 if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
1566 return i;
1567 soft_mac_entry--;
1568 }
1569 return DSAF_INVALID_ENTRY_IDX;
1570}
1571
1572/**
1496 * hns_dsaf_set_mac_key - set mac key 1573 * hns_dsaf_set_mac_key - set mac key
1497 * @dsaf_dev: dsa fabric device struct pointer 1574 * @dsaf_dev: dsa fabric device struct pointer
1498 * @mac_key: tcam key pointer 1575 * @mac_key: tcam key pointer
@@ -2166,9 +2243,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
2166 DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num); 2243 DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
2167 2244
2168 hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev, 2245 hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
2169 DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num); 2246 DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
2170 hw_stats->stp_drop += dsaf_read_dev(dsaf_dev, 2247 hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
2171 DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num); 2248 DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
2172 2249
2173 /* pfc pause frame statistics stored in dsaf inode*/ 2250 /* pfc pause frame statistics stored in dsaf inode*/
2174 if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) { 2251 if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
@@ -2285,237 +2362,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
2285 DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4); 2362 DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
2286 p[223 + i] = dsaf_read_dev(ddev, 2363 p[223 + i] = dsaf_read_dev(ddev,
2287 DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4); 2364 DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
2288 p[224 + i] = dsaf_read_dev(ddev, 2365 p[226 + i] = dsaf_read_dev(ddev,
2289 DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4); 2366 DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
2290 } 2367 }
2291 2368
2292 p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4); 2369 p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
2293 2370
2294 for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) { 2371 for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
2295 j = i * DSAF_COMM_CHN + port; 2372 j = i * DSAF_COMM_CHN + port;
2296 p[228 + i] = dsaf_read_dev(ddev, 2373 p[230 + i] = dsaf_read_dev(ddev,
2297 DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4); 2374 DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
2298 } 2375 }
2299 2376
2300 p[231] = dsaf_read_dev(ddev, 2377 p[233] = dsaf_read_dev(ddev,
2301 DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4); 2378 DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
2302 2379
2303 /* dsaf inode registers */ 2380 /* dsaf inode registers */
2304 for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) { 2381 for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
2305 j = i * DSAF_COMM_CHN + port; 2382 j = i * DSAF_COMM_CHN + port;
2306 p[232 + i] = dsaf_read_dev(ddev, 2383 p[234 + i] = dsaf_read_dev(ddev,
2307 DSAF_SBM_CFG_REG_0_REG + j * 0x80); 2384 DSAF_SBM_CFG_REG_0_REG + j * 0x80);
2308 p[235 + i] = dsaf_read_dev(ddev, 2385 p[237 + i] = dsaf_read_dev(ddev,
2309 DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80); 2386 DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
2310 p[238 + i] = dsaf_read_dev(ddev, 2387 p[240 + i] = dsaf_read_dev(ddev,
2311 DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80); 2388 DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
2312 p[241 + i] = dsaf_read_dev(ddev, 2389 p[243 + i] = dsaf_read_dev(ddev,
2313 DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80); 2390 DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
2314 p[244 + i] = dsaf_read_dev(ddev, 2391 p[246 + i] = dsaf_read_dev(ddev,
2315 DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80); 2392 DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
2316 p[245 + i] = dsaf_read_dev(ddev, 2393 p[249 + i] = dsaf_read_dev(ddev,
2317 DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80); 2394 DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
2318 p[248 + i] = dsaf_read_dev(ddev, 2395 p[252 + i] = dsaf_read_dev(ddev,
2319 DSAF_SBM_BP_CNT_0_0_REG + j * 0x80); 2396 DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
2320 p[251 + i] = dsaf_read_dev(ddev, 2397 p[255 + i] = dsaf_read_dev(ddev,
2321 DSAF_SBM_BP_CNT_1_0_REG + j * 0x80); 2398 DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
2322 p[254 + i] = dsaf_read_dev(ddev, 2399 p[258 + i] = dsaf_read_dev(ddev,
2323 DSAF_SBM_BP_CNT_2_0_REG + j * 0x80); 2400 DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
2324 p[257 + i] = dsaf_read_dev(ddev, 2401 p[261 + i] = dsaf_read_dev(ddev,
2325 DSAF_SBM_BP_CNT_3_0_REG + j * 0x80); 2402 DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
2326 p[260 + i] = dsaf_read_dev(ddev, 2403 p[264 + i] = dsaf_read_dev(ddev,
2327 DSAF_SBM_INER_ST_0_REG + j * 0x80); 2404 DSAF_SBM_INER_ST_0_REG + j * 0x80);
2328 p[263 + i] = dsaf_read_dev(ddev, 2405 p[267 + i] = dsaf_read_dev(ddev,
2329 DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80); 2406 DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
2330 p[266 + i] = dsaf_read_dev(ddev, 2407 p[270 + i] = dsaf_read_dev(ddev,
2331 DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80); 2408 DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
2332 p[269 + i] = dsaf_read_dev(ddev, 2409 p[273 + i] = dsaf_read_dev(ddev,
2333 DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80); 2410 DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
2334 p[272 + i] = dsaf_read_dev(ddev, 2411 p[276 + i] = dsaf_read_dev(ddev,
2335 DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80); 2412 DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
2336 p[275 + i] = dsaf_read_dev(ddev, 2413 p[279 + i] = dsaf_read_dev(ddev,
2337 DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80); 2414 DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
2338 p[278 + i] = dsaf_read_dev(ddev, 2415 p[282 + i] = dsaf_read_dev(ddev,
2339 DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80); 2416 DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
2340 p[281 + i] = dsaf_read_dev(ddev, 2417 p[285 + i] = dsaf_read_dev(ddev,
2341 DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80); 2418 DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
2342 p[284 + i] = dsaf_read_dev(ddev, 2419 p[288 + i] = dsaf_read_dev(ddev,
2343 DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80); 2420 DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
2344 p[287 + i] = dsaf_read_dev(ddev, 2421 p[291 + i] = dsaf_read_dev(ddev,
2345 DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80); 2422 DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
2346 p[290 + i] = dsaf_read_dev(ddev, 2423 p[294 + i] = dsaf_read_dev(ddev,
2347 DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80); 2424 DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
2348 p[293 + i] = dsaf_read_dev(ddev, 2425 p[297 + i] = dsaf_read_dev(ddev,
2349 DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80); 2426 DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
2350 p[296 + i] = dsaf_read_dev(ddev, 2427 p[300 + i] = dsaf_read_dev(ddev,
2351 DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80); 2428 DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
2352 p[299 + i] = dsaf_read_dev(ddev, 2429 p[303 + i] = dsaf_read_dev(ddev,
2353 DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80); 2430 DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
2354 p[302 + i] = dsaf_read_dev(ddev, 2431 p[306 + i] = dsaf_read_dev(ddev,
2355 DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80); 2432 DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
2356 p[305 + i] = dsaf_read_dev(ddev, 2433 p[309 + i] = dsaf_read_dev(ddev,
2357 DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80); 2434 DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
2358 p[308 + i] = dsaf_read_dev(ddev, 2435 p[312 + i] = dsaf_read_dev(ddev,
2359 DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80); 2436 DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
2360 } 2437 }
2361 2438
2362 /* dsaf onode registers */ 2439 /* dsaf onode registers */
2363 for (i = 0; i < DSAF_XOD_NUM; i++) { 2440 for (i = 0; i < DSAF_XOD_NUM; i++) {
2364 p[311 + i] = dsaf_read_dev(ddev, 2441 p[315 + i] = dsaf_read_dev(ddev,
2365 DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90); 2442 DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
2366 p[319 + i] = dsaf_read_dev(ddev, 2443 p[323 + i] = dsaf_read_dev(ddev,
2367 DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90); 2444 DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
2368 p[327 + i] = dsaf_read_dev(ddev, 2445 p[331 + i] = dsaf_read_dev(ddev,
2369 DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90); 2446 DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
2370 p[335 + i] = dsaf_read_dev(ddev, 2447 p[339 + i] = dsaf_read_dev(ddev,
2371 DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90); 2448 DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
2372 p[343 + i] = dsaf_read_dev(ddev, 2449 p[347 + i] = dsaf_read_dev(ddev,
2373 DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90); 2450 DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
2374 p[351 + i] = dsaf_read_dev(ddev, 2451 p[355 + i] = dsaf_read_dev(ddev,
2375 DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90); 2452 DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
2376 } 2453 }
2377 2454
2378 p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); 2455 p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
2379 p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90); 2456 p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
2380 p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90); 2457 p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
2381 2458
2382 for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) { 2459 for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
2383 j = i * DSAF_COMM_CHN + port; 2460 j = i * DSAF_COMM_CHN + port;
2384 p[362 + i] = dsaf_read_dev(ddev, 2461 p[366 + i] = dsaf_read_dev(ddev,
2385 DSAF_XOD_GNT_L_0_REG + j * 0x90); 2462 DSAF_XOD_GNT_L_0_REG + j * 0x90);
2386 p[365 + i] = dsaf_read_dev(ddev, 2463 p[369 + i] = dsaf_read_dev(ddev,
2387 DSAF_XOD_GNT_H_0_REG + j * 0x90); 2464 DSAF_XOD_GNT_H_0_REG + j * 0x90);
2388 p[368 + i] = dsaf_read_dev(ddev, 2465 p[372 + i] = dsaf_read_dev(ddev,
2389 DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90); 2466 DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
2390 p[371 + i] = dsaf_read_dev(ddev, 2467 p[375 + i] = dsaf_read_dev(ddev,
2391 DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90); 2468 DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
2392 p[374 + i] = dsaf_read_dev(ddev, 2469 p[378 + i] = dsaf_read_dev(ddev,
2393 DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90); 2470 DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
2394 p[377 + i] = dsaf_read_dev(ddev, 2471 p[381 + i] = dsaf_read_dev(ddev,
2395 DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90); 2472 DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
2396 p[380 + i] = dsaf_read_dev(ddev, 2473 p[384 + i] = dsaf_read_dev(ddev,
2397 DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90); 2474 DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
2398 p[383 + i] = dsaf_read_dev(ddev, 2475 p[387 + i] = dsaf_read_dev(ddev,
2399 DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90); 2476 DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
2400 p[386 + i] = dsaf_read_dev(ddev, 2477 p[390 + i] = dsaf_read_dev(ddev,
2401 DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90); 2478 DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
2402 p[389 + i] = dsaf_read_dev(ddev, 2479 p[393 + i] = dsaf_read_dev(ddev,
2403 DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90); 2480 DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
2404 } 2481 }
2405 2482
2406 p[392] = dsaf_read_dev(ddev, 2483 p[396] = dsaf_read_dev(ddev,
2407 DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90); 2484 DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
2408 p[393] = dsaf_read_dev(ddev, 2485 p[397] = dsaf_read_dev(ddev,
2409 DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90); 2486 DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
2410 p[394] = dsaf_read_dev(ddev, 2487 p[398] = dsaf_read_dev(ddev,
2411 DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90); 2488 DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
2412 p[395] = dsaf_read_dev(ddev, 2489 p[399] = dsaf_read_dev(ddev,
2413 DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90); 2490 DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
2414 p[396] = dsaf_read_dev(ddev, 2491 p[400] = dsaf_read_dev(ddev,
2415 DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90); 2492 DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
2416 p[397] = dsaf_read_dev(ddev, 2493 p[401] = dsaf_read_dev(ddev,
2417 DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90); 2494 DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
2418 p[398] = dsaf_read_dev(ddev, 2495 p[402] = dsaf_read_dev(ddev,
2419 DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90); 2496 DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
2420 p[399] = dsaf_read_dev(ddev, 2497 p[403] = dsaf_read_dev(ddev,
2421 DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90); 2498 DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
2422 p[400] = dsaf_read_dev(ddev, 2499 p[404] = dsaf_read_dev(ddev,
2423 DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90); 2500 DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
2424 p[401] = dsaf_read_dev(ddev, 2501 p[405] = dsaf_read_dev(ddev,
2425 DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90); 2502 DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
2426 p[402] = dsaf_read_dev(ddev, 2503 p[406] = dsaf_read_dev(ddev,
2427 DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90); 2504 DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
2428 p[403] = dsaf_read_dev(ddev, 2505 p[407] = dsaf_read_dev(ddev,
2429 DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90); 2506 DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
2430 p[404] = dsaf_read_dev(ddev, 2507 p[408] = dsaf_read_dev(ddev,
2431 DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90); 2508 DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
2432 2509
2433 /* dsaf voq registers */ 2510 /* dsaf voq registers */
2434 for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) { 2511 for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
2435 j = (i * DSAF_COMM_CHN + port) * 0x90; 2512 j = (i * DSAF_COMM_CHN + port) * 0x90;
2436 p[405 + i] = dsaf_read_dev(ddev, 2513 p[409 + i] = dsaf_read_dev(ddev,
2437 DSAF_VOQ_ECC_INVERT_EN_0_REG + j); 2514 DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
2438 p[408 + i] = dsaf_read_dev(ddev, 2515 p[412 + i] = dsaf_read_dev(ddev,
2439 DSAF_VOQ_SRAM_PKT_NUM_0_REG + j); 2516 DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
2440 p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j); 2517 p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
2441 p[414 + i] = dsaf_read_dev(ddev, 2518 p[418 + i] = dsaf_read_dev(ddev,
2442 DSAF_VOQ_OUT_PKT_NUM_0_REG + j); 2519 DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
2443 p[417 + i] = dsaf_read_dev(ddev, 2520 p[421 + i] = dsaf_read_dev(ddev,
2444 DSAF_VOQ_ECC_ERR_ADDR_0_REG + j); 2521 DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
2445 p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j); 2522 p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
2446 p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j); 2523 p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
2447 p[426 + i] = dsaf_read_dev(ddev, 2524 p[430 + i] = dsaf_read_dev(ddev,
2448 DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j); 2525 DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
2449 p[429 + i] = dsaf_read_dev(ddev, 2526 p[433 + i] = dsaf_read_dev(ddev,
2450 DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j); 2527 DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
2451 p[432 + i] = dsaf_read_dev(ddev, 2528 p[436 + i] = dsaf_read_dev(ddev,
2452 DSAF_VOQ_PPE_XOD_REQ_0_REG + j); 2529 DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
2453 p[435 + i] = dsaf_read_dev(ddev, 2530 p[439 + i] = dsaf_read_dev(ddev,
2454 DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j); 2531 DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
2455 p[438 + i] = dsaf_read_dev(ddev, 2532 p[442 + i] = dsaf_read_dev(ddev,
2456 DSAF_VOQ_BP_ALL_THRD_0_REG + j); 2533 DSAF_VOQ_BP_ALL_THRD_0_REG + j);
2457 } 2534 }
2458 2535
2459 /* dsaf tbl registers */ 2536 /* dsaf tbl registers */
2460 p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG); 2537 p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
2461 p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG); 2538 p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
2462 p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG); 2539 p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
2463 p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG); 2540 p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
2464 p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG); 2541 p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
2465 p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG); 2542 p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
2466 p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG); 2543 p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
2467 p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG); 2544 p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
2468 p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG); 2545 p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
2469 p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG); 2546 p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
2470 p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG); 2547 p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
2471 p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG); 2548 p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
2472 p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG); 2549 p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
2473 p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG); 2550 p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
2474 p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG); 2551 p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
2475 p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); 2552 p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
2476 p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); 2553 p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
2477 p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG); 2554 p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
2478 p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG); 2555 p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
2479 p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG); 2556 p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
2480 p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG); 2557 p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
2481 p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG); 2558 p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
2482 p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG); 2559 p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
2483 2560
2484 for (i = 0; i < DSAF_SW_PORT_NUM; i++) { 2561 for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
2485 j = i * 0x8; 2562 j = i * 0x8;
2486 p[464 + 2 * i] = dsaf_read_dev(ddev, 2563 p[468 + 2 * i] = dsaf_read_dev(ddev,
2487 DSAF_TBL_DA0_MIS_INFO1_0_REG + j); 2564 DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
2488 p[465 + 2 * i] = dsaf_read_dev(ddev, 2565 p[469 + 2 * i] = dsaf_read_dev(ddev,
2489 DSAF_TBL_DA0_MIS_INFO0_0_REG + j); 2566 DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
2490 } 2567 }
2491 2568
2492 p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG); 2569 p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
2493 p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG); 2570 p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
2494 p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG); 2571 p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
2495 p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG); 2572 p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
2496 p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG); 2573 p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
2497 p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG); 2574 p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
2498 p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG); 2575 p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
2499 p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG); 2576 p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
2500 p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG); 2577 p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
2501 p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG); 2578 p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
2502 p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG); 2579 p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
2503 p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG); 2580 p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
2504 2581
2505 /* dsaf other registers */ 2582 /* dsaf other registers */
2506 p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4); 2583 p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
2507 p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4); 2584 p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
2508 p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4); 2585 p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
2509 p[495] = dsaf_read_dev(ddev, 2586 p[499] = dsaf_read_dev(ddev,
2510 DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4); 2587 DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
2511 p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); 2588 p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
2512 p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); 2589 p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
2513 2590
2514 if (!is_ver1) 2591 if (!is_ver1)
2515 p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4); 2592 p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
2516 2593
2517 /* mark end of dsaf regs */ 2594 /* mark end of dsaf regs */
2518 for (i = 499; i < 504; i++) 2595 for (i = 503; i < 504; i++)
2519 p[i] = 0xdddddddd; 2596 p[i] = 0xdddddddd;
2520} 2597}
2521 2598
@@ -2673,58 +2750,156 @@ int hns_dsaf_get_regs_count(void)
2673 return DSAF_DUMP_REGS_NUM; 2750 return DSAF_DUMP_REGS_NUM;
2674} 2751}
2675 2752
2676/* Reserve the last TCAM entry for promisc support */ 2753static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2677#define dsaf_promisc_tcam_entry(port) \
2678 (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
2679void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2680 u32 port, bool enable)
2681{ 2754{
2755 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
2756 struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
2757 struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
2758 struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
2682 struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); 2759 struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
2683 struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; 2760 struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
2684 u16 entry_index; 2761 struct dsaf_drv_mac_single_dest_entry mask_entry;
2685 struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask; 2762 struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
2686 struct dsaf_tbl_tcam_mcast_cfg mac_data = {0}; 2763 struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
2764 u16 entry_index = DSAF_INVALID_ENTRY_IDX;
2765 struct dsaf_drv_tbl_tcam_key mac_key;
2766 struct hns_mac_cb *mac_cb;
2767 u8 addr[ETH_ALEN] = {0};
2768 u8 port_num;
2769 u16 mskid;
2770
2771 /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
2772 hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
2773 entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
2774 if (entry_index != DSAF_INVALID_ENTRY_IDX)
2775 return;
2776
2777 /* put promisc tcam entry in the end. */
2778 /* 1. set promisc unicast vague tcam entry. */
2779 entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
2780 if (entry_index == DSAF_INVALID_ENTRY_IDX) {
2781 dev_err(dsaf_dev->dev,
2782 "enable uc promisc failed (port:%#x)\n",
2783 port);
2784 return;
2785 }
2786
2787 mac_cb = dsaf_dev->mac_cb[port];
2788 (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
2789 tbl_tcam_ucast.tbl_ucast_out_port = port_num;
2687 2790
2688 if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev)) 2791 /* config uc vague table */
2792 hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
2793 &tbl_tcam_mask_uc, &tbl_tcam_ucast);
2794
2795 /* update software entry */
2796 soft_mac_entry = priv->soft_mac_tbl;
2797 soft_mac_entry += entry_index;
2798 soft_mac_entry->index = entry_index;
2799 soft_mac_entry->tcam_key.high.val = mac_key.high.val;
2800 soft_mac_entry->tcam_key.low.val = mac_key.low.val;
2801 /* step back to the START for mc. */
2802 soft_mac_entry = priv->soft_mac_tbl;
2803
2804 /* 2. set promisc multicast vague tcam entry. */
2805 entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
2806 if (entry_index == DSAF_INVALID_ENTRY_IDX) {
2807 dev_err(dsaf_dev->dev,
2808 "enable mc promisc failed (port:%#x)\n",
2809 port);
2689 return; 2810 return;
2811 }
2812
2813 memset(&mask_entry, 0x0, sizeof(mask_entry));
2814 memset(&mask_key, 0x0, sizeof(mask_key));
2815 memset(&temp_key, 0x0, sizeof(temp_key));
2816 mask_entry.addr[0] = 0x01;
2817 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
2818 port, mask_entry.addr);
2819 tbl_tcam_mcast.tbl_mcast_item_vld = 1;
2820 tbl_tcam_mcast.tbl_mcast_old_en = 0;
2690 2821
2691 /* find the tcam entry index for promisc */ 2822 if (port < DSAF_SERVICE_NW_NUM) {
2692 entry_index = dsaf_promisc_tcam_entry(port); 2823 mskid = port;
2693 2824 } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
2694 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data)); 2825 mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2695 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2696
2697 /* config key mask */
2698 if (enable) {
2699 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
2700 DSAF_TBL_TCAM_KEY_PORT_M,
2701 DSAF_TBL_TCAM_KEY_PORT_S, port);
2702 dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
2703 DSAF_TBL_TCAM_KEY_PORT_M,
2704 DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
2705
2706 /* SUB_QID */
2707 dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
2708 DSAF_SERVICE_NW_NUM, true);
2709 mac_data.tbl_mcast_item_vld = true; /* item_vld bit */
2710 } else { 2826 } else {
2711 mac_data.tbl_mcast_item_vld = false; /* item_vld bit */ 2827 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
2828 dsaf_dev->ae_dev.name, port,
2829 mask_key.high.val, mask_key.low.val);
2830 return;
2712 } 2831 }
2713 2832
2714 dev_dbg(dsaf_dev->dev, 2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2715 "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n", 2834 mskid % 32, 1);
2716 dsaf_dev->ae_dev.name, tbl_tcam_data.high.val, 2835 memcpy(&temp_key, &mask_key, sizeof(mask_key));
2717 tbl_tcam_data.low.val, entry_index); 2836 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
2837 (struct dsaf_tbl_tcam_data *)(&mask_key),
2838 &tbl_tcam_mcast);
2839
2840 /* update software entry */
2841 soft_mac_entry += entry_index;
2842 soft_mac_entry->index = entry_index;
2843 soft_mac_entry->tcam_key.high.val = temp_key.high.val;
2844 soft_mac_entry->tcam_key.low.val = temp_key.low.val;
2845}
2718 2846
2719 /* config promisc entry with mask */ 2847static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
2720 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, 2848{
2721 (struct dsaf_tbl_tcam_data *)&tbl_tcam_data, 2849 struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
2722 (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask, 2850 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
2723 &mac_data); 2851 struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
2852 struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
2853 struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
2854 struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
2855 struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
2856 u16 entry_index = DSAF_INVALID_ENTRY_IDX;
2857 struct dsaf_drv_tbl_tcam_key mac_key;
2858 u8 addr[ETH_ALEN] = {0};
2724 2859
2725 /* config software entry */ 2860 /* 1. delete uc vague tcam entry. */
2861 /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
2862 hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
2863 entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
2864
2865 if (entry_index == DSAF_INVALID_ENTRY_IDX)
2866 return;
2867
2868 /* config uc vague table */
2869 hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
2870 &tbl_tcam_mask, &tbl_tcam_ucast);
2871 /* update soft management table. */
2872 soft_mac_entry = priv->soft_mac_tbl;
2873 soft_mac_entry += entry_index;
2874 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
2875 /* step back to the START for mc. */
2876 soft_mac_entry = priv->soft_mac_tbl;
2877
2878 /* 2. delete mc vague tcam entry. */
2879 addr[0] = 0x01;
2880 memset(&mac_key, 0x0, sizeof(mac_key));
2881 hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
2882 entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
2883
2884 if (entry_index == DSAF_INVALID_ENTRY_IDX)
2885 return;
2886
2887 /* config mc vague table */
2888 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
2889 &tbl_tcam_mask, &tbl_tcam_mcast);
2890 /* update soft management table. */
2726 soft_mac_entry += entry_index; 2891 soft_mac_entry += entry_index;
2727 soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; 2892 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
2893}
2894
2895/* Reserve the last TCAM entry for promisc support */
2896void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2897 u32 port, bool enable)
2898{
2899 if (enable)
2900 set_promisc_tcam_enable(dsaf_dev, port);
2901 else
2902 set_promisc_tcam_disable(dsaf_dev, port);
2728} 2903}
2729 2904
2730int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) 2905int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 74d935d82cbc..b9733b0b8482 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -176,7 +176,7 @@
176#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50 176#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
177#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00 177#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
178#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50 178#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
179#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00 179#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C
180#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00 180#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
181#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100 181#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
182#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50 182#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
@@ -404,11 +404,11 @@
404#define RCB_ECC_ERR_ADDR4_REG 0x460 404#define RCB_ECC_ERR_ADDR4_REG 0x460
405#define RCB_ECC_ERR_ADDR5_REG 0x464 405#define RCB_ECC_ERR_ADDR5_REG 0x464
406 406
407#define RCB_COM_SF_CFG_INTMASK_RING 0x480 407#define RCB_COM_SF_CFG_INTMASK_RING 0x470
408#define RCB_COM_SF_CFG_RING_STS 0x484 408#define RCB_COM_SF_CFG_RING_STS 0x474
409#define RCB_COM_SF_CFG_RING 0x488 409#define RCB_COM_SF_CFG_RING 0x478
410#define RCB_COM_SF_CFG_INTMASK_BD 0x48C 410#define RCB_COM_SF_CFG_INTMASK_BD 0x47C
411#define RCB_COM_SF_CFG_BD_RINT_STS 0x470 411#define RCB_COM_SF_CFG_BD_RINT_STS 0x480
412#define RCB_COM_RCB_RD_BD_BUSY 0x490 412#define RCB_COM_RCB_RD_BD_BUSY 0x490
413#define RCB_COM_RCB_FBD_CRT_EN 0x494 413#define RCB_COM_RCB_FBD_CRT_EN 0x494
414#define RCB_COM_AXI_WR_ERR_INTMASK 0x498 414#define RCB_COM_AXI_WR_ERR_INTMASK 0x498
@@ -534,6 +534,7 @@
534#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL 534#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
535#define GMAC_LOOP_REG 0x01DCUL 535#define GMAC_LOOP_REG 0x01DCUL
536#define GMAC_RECV_CONTROL_REG 0x01E0UL 536#define GMAC_RECV_CONTROL_REG 0x01E0UL
537#define GMAC_PCS_RX_EN_REG 0x01E4UL
537#define GMAC_VLAN_CODE_REG 0x01E8UL 538#define GMAC_VLAN_CODE_REG 0x01E8UL
538#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL 539#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
539#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL 540#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index c62378c07e70..5748d3f722f6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1188,6 +1188,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1188 if (h->phy_if == PHY_INTERFACE_MODE_XGMII) 1188 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1189 phy_dev->autoneg = false; 1189 phy_dev->autoneg = false;
1190 1190
1191 if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
1192 phy_stop(phy_dev);
1193
1191 return 0; 1194 return 0;
1192} 1195}
1193 1196
@@ -1283,6 +1286,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
1283 return cpu; 1286 return cpu;
1284} 1287}
1285 1288
1289static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
1290{
1291 int i;
1292
1293 for (i = 0; i < q_num * 2; i++) {
1294 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1295 irq_set_affinity_hint(priv->ring_data[i].ring->irq,
1296 NULL);
1297 free_irq(priv->ring_data[i].ring->irq,
1298 &priv->ring_data[i]);
1299 priv->ring_data[i].ring->irq_init_flag =
1300 RCB_IRQ_NOT_INITED;
1301 }
1302 }
1303}
1304
1286static int hns_nic_init_irq(struct hns_nic_priv *priv) 1305static int hns_nic_init_irq(struct hns_nic_priv *priv)
1287{ 1306{
1288 struct hnae_handle *h = priv->ae_handle; 1307 struct hnae_handle *h = priv->ae_handle;
@@ -1308,7 +1327,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
1308 if (ret) { 1327 if (ret) {
1309 netdev_err(priv->netdev, "request irq(%d) fail\n", 1328 netdev_err(priv->netdev, "request irq(%d) fail\n",
1310 rd->ring->irq); 1329 rd->ring->irq);
1311 return ret; 1330 goto out_free_irq;
1312 } 1331 }
1313 disable_irq(rd->ring->irq); 1332 disable_irq(rd->ring->irq);
1314 1333
@@ -1323,6 +1342,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
1323 } 1342 }
1324 1343
1325 return 0; 1344 return 0;
1345
1346out_free_irq:
1347 hns_nic_free_irq(h->q_num, priv);
1348 return ret;
1326} 1349}
1327 1350
1328static int hns_nic_net_up(struct net_device *ndev) 1351static int hns_nic_net_up(struct net_device *ndev)
@@ -1332,6 +1355,9 @@ static int hns_nic_net_up(struct net_device *ndev)
1332 int i, j; 1355 int i, j;
1333 int ret; 1356 int ret;
1334 1357
1358 if (!test_bit(NIC_STATE_DOWN, &priv->state))
1359 return 0;
1360
1335 ret = hns_nic_init_irq(priv); 1361 ret = hns_nic_init_irq(priv);
1336 if (ret != 0) { 1362 if (ret != 0) {
1337 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret); 1363 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
@@ -1367,6 +1393,7 @@ out_has_some_queues:
1367 for (j = i - 1; j >= 0; j--) 1393 for (j = i - 1; j >= 0; j--)
1368 hns_nic_ring_close(ndev, j); 1394 hns_nic_ring_close(ndev, j);
1369 1395
1396 hns_nic_free_irq(h->q_num, priv);
1370 set_bit(NIC_STATE_DOWN, &priv->state); 1397 set_bit(NIC_STATE_DOWN, &priv->state);
1371 1398
1372 return ret; 1399 return ret;
@@ -1484,11 +1511,19 @@ static int hns_nic_net_stop(struct net_device *ndev)
1484} 1511}
1485 1512
1486static void hns_tx_timeout_reset(struct hns_nic_priv *priv); 1513static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1514#define HNS_TX_TIMEO_LIMIT (40 * HZ)
1487static void hns_nic_net_timeout(struct net_device *ndev) 1515static void hns_nic_net_timeout(struct net_device *ndev)
1488{ 1516{
1489 struct hns_nic_priv *priv = netdev_priv(ndev); 1517 struct hns_nic_priv *priv = netdev_priv(ndev);
1490 1518
1491 hns_tx_timeout_reset(priv); 1519 if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
1520 ndev->watchdog_timeo *= 2;
1521 netdev_info(ndev, "watchdog_timo changed to %d.\n",
1522 ndev->watchdog_timeo);
1523 } else {
1524 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1525 hns_tx_timeout_reset(priv);
1526 }
1492} 1527}
1493 1528
1494static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, 1529static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -2051,11 +2086,11 @@ static void hns_nic_service_task(struct work_struct *work)
2051 = container_of(work, struct hns_nic_priv, service_task); 2086 = container_of(work, struct hns_nic_priv, service_task);
2052 struct hnae_handle *h = priv->ae_handle; 2087 struct hnae_handle *h = priv->ae_handle;
2053 2088
2089 hns_nic_reset_subtask(priv);
2054 hns_nic_update_link_status(priv->netdev); 2090 hns_nic_update_link_status(priv->netdev);
2055 h->dev->ops->update_led_status(h); 2091 h->dev->ops->update_led_status(h);
2056 hns_nic_update_stats(priv->netdev); 2092 hns_nic_update_stats(priv->netdev);
2057 2093
2058 hns_nic_reset_subtask(priv);
2059 hns_nic_service_event_complete(priv); 2094 hns_nic_service_event_complete(priv);
2060} 2095}
2061 2096
@@ -2341,7 +2376,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2341 ndev->min_mtu = MAC_MIN_MTU; 2376 ndev->min_mtu = MAC_MIN_MTU;
2342 switch (priv->enet_ver) { 2377 switch (priv->enet_ver) {
2343 case AE_VERSION_2: 2378 case AE_VERSION_2:
2344 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2379 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
2345 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2380 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2346 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 2381 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2347 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; 2382 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 14d00985f087..5ecbb1adcf3b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1936,8 +1936,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1936static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 1936static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1937{ 1937{
1938 struct ibmvnic_rwi *rwi; 1938 struct ibmvnic_rwi *rwi;
1939 unsigned long flags;
1939 1940
1940 mutex_lock(&adapter->rwi_lock); 1941 spin_lock_irqsave(&adapter->rwi_lock, flags);
1941 1942
1942 if (!list_empty(&adapter->rwi_list)) { 1943 if (!list_empty(&adapter->rwi_list)) {
1943 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 1944 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
@@ -1947,7 +1948,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1947 rwi = NULL; 1948 rwi = NULL;
1948 } 1949 }
1949 1950
1950 mutex_unlock(&adapter->rwi_lock); 1951 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
1951 return rwi; 1952 return rwi;
1952} 1953}
1953 1954
@@ -2022,6 +2023,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2022 struct list_head *entry, *tmp_entry; 2023 struct list_head *entry, *tmp_entry;
2023 struct ibmvnic_rwi *rwi, *tmp; 2024 struct ibmvnic_rwi *rwi, *tmp;
2024 struct net_device *netdev = adapter->netdev; 2025 struct net_device *netdev = adapter->netdev;
2026 unsigned long flags;
2025 int ret; 2027 int ret;
2026 2028
2027 if (adapter->state == VNIC_REMOVING || 2029 if (adapter->state == VNIC_REMOVING ||
@@ -2038,21 +2040,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2038 goto err; 2040 goto err;
2039 } 2041 }
2040 2042
2041 mutex_lock(&adapter->rwi_lock); 2043 spin_lock_irqsave(&adapter->rwi_lock, flags);
2042 2044
2043 list_for_each(entry, &adapter->rwi_list) { 2045 list_for_each(entry, &adapter->rwi_list) {
2044 tmp = list_entry(entry, struct ibmvnic_rwi, list); 2046 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2045 if (tmp->reset_reason == reason) { 2047 if (tmp->reset_reason == reason) {
2046 netdev_dbg(netdev, "Skipping matching reset\n"); 2048 netdev_dbg(netdev, "Skipping matching reset\n");
2047 mutex_unlock(&adapter->rwi_lock); 2049 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2048 ret = EBUSY; 2050 ret = EBUSY;
2049 goto err; 2051 goto err;
2050 } 2052 }
2051 } 2053 }
2052 2054
2053 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); 2055 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2054 if (!rwi) { 2056 if (!rwi) {
2055 mutex_unlock(&adapter->rwi_lock); 2057 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2056 ibmvnic_close(netdev); 2058 ibmvnic_close(netdev);
2057 ret = ENOMEM; 2059 ret = ENOMEM;
2058 goto err; 2060 goto err;
@@ -2066,7 +2068,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2066 } 2068 }
2067 rwi->reset_reason = reason; 2069 rwi->reset_reason = reason;
2068 list_add_tail(&rwi->list, &adapter->rwi_list); 2070 list_add_tail(&rwi->list, &adapter->rwi_list);
2069 mutex_unlock(&adapter->rwi_lock); 2071 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2070 adapter->resetting = true; 2072 adapter->resetting = true;
2071 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); 2073 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2072 schedule_work(&adapter->ibmvnic_reset); 2074 schedule_work(&adapter->ibmvnic_reset);
@@ -4756,7 +4758,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4756 4758
4757 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4759 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4758 INIT_LIST_HEAD(&adapter->rwi_list); 4760 INIT_LIST_HEAD(&adapter->rwi_list);
4759 mutex_init(&adapter->rwi_lock); 4761 spin_lock_init(&adapter->rwi_lock);
4760 adapter->resetting = false; 4762 adapter->resetting = false;
4761 4763
4762 adapter->mac_change_pending = false; 4764 adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 99c4f8d331ce..f2018dbebfa5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
1075 struct tasklet_struct tasklet; 1075 struct tasklet_struct tasklet;
1076 enum vnic_state state; 1076 enum vnic_state state;
1077 enum ibmvnic_reset_reason reset_reason; 1077 enum ibmvnic_reset_reason reset_reason;
1078 struct mutex rwi_lock; 1078 spinlock_t rwi_lock;
1079 struct list_head rwi_list; 1079 struct list_head rwi_list;
1080 struct work_struct ibmvnic_reset; 1080 struct work_struct ibmvnic_reset;
1081 bool resetting; 1081 bool resetting;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index fbb21ac06c98..0b3bcb73d4bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1546,17 +1546,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1546 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1546 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1547 1547
1548 /* Copy the address first, so that we avoid a possible race with 1548 /* Copy the address first, so that we avoid a possible race with
1549 * .set_rx_mode(). If we copy after changing the address in the filter 1549 * .set_rx_mode().
1550 * list, we might open ourselves to a narrow race window where 1550 * - Remove old address from MAC filter
1551 * .set_rx_mode could delete our dev_addr filter and prevent traffic 1551 * - Copy new address
1552 * from passing. 1552 * - Add new address to MAC filter
1553 */ 1553 */
1554 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1555
1556 spin_lock_bh(&vsi->mac_filter_hash_lock); 1554 spin_lock_bh(&vsi->mac_filter_hash_lock);
1557 i40e_del_mac_filter(vsi, netdev->dev_addr); 1555 i40e_del_mac_filter(vsi, netdev->dev_addr);
1558 i40e_add_mac_filter(vsi, addr->sa_data); 1556 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1557 i40e_add_mac_filter(vsi, netdev->dev_addr);
1559 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1558 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1559
1560 if (vsi->type == I40E_VSI_MAIN) { 1560 if (vsi->type == I40E_VSI_MAIN) {
1561 i40e_status ret; 1561 i40e_status ret;
1562 1562
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a0b1575468fc..a7e14e98889f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1559,24 +1559,6 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1559} 1559}
1560 1560
1561/** 1561/**
1562 * i40e_receive_skb - Send a completed packet up the stack
1563 * @rx_ring: rx ring in play
1564 * @skb: packet to send up
1565 * @vlan_tag: vlan tag for packet
1566 **/
1567void i40e_receive_skb(struct i40e_ring *rx_ring,
1568 struct sk_buff *skb, u16 vlan_tag)
1569{
1570 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1571
1572 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1573 (vlan_tag & VLAN_VID_MASK))
1574 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1575
1576 napi_gro_receive(&q_vector->napi, skb);
1577}
1578
1579/**
1580 * i40e_alloc_rx_buffers - Replace used receive buffers 1562 * i40e_alloc_rx_buffers - Replace used receive buffers
1581 * @rx_ring: ring to place buffers on 1563 * @rx_ring: ring to place buffers on
1582 * @cleaned_count: number of buffers to replace 1564 * @cleaned_count: number of buffers to replace
@@ -1793,8 +1775,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
1793 * other fields within the skb. 1775 * other fields within the skb.
1794 **/ 1776 **/
1795void i40e_process_skb_fields(struct i40e_ring *rx_ring, 1777void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1796 union i40e_rx_desc *rx_desc, struct sk_buff *skb, 1778 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1797 u8 rx_ptype)
1798{ 1779{
1799 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1780 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1800 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1781 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
@@ -1802,6 +1783,8 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1802 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK; 1783 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1803 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> 1784 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1804 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; 1785 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1786 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1787 I40E_RXD_QW1_PTYPE_SHIFT;
1805 1788
1806 if (unlikely(tsynvalid)) 1789 if (unlikely(tsynvalid))
1807 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); 1790 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -1812,6 +1795,13 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1812 1795
1813 skb_record_rx_queue(skb, rx_ring->queue_index); 1796 skb_record_rx_queue(skb, rx_ring->queue_index);
1814 1797
1798 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1799 u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1800
1801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1802 le16_to_cpu(vlan_tag));
1803 }
1804
1815 /* modifies the skb - consumes the enet header */ 1805 /* modifies the skb - consumes the enet header */
1816 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1806 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1817} 1807}
@@ -2350,8 +2340,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2350 struct i40e_rx_buffer *rx_buffer; 2340 struct i40e_rx_buffer *rx_buffer;
2351 union i40e_rx_desc *rx_desc; 2341 union i40e_rx_desc *rx_desc;
2352 unsigned int size; 2342 unsigned int size;
2353 u16 vlan_tag;
2354 u8 rx_ptype;
2355 u64 qword; 2343 u64 qword;
2356 2344
2357 /* return some buffers to hardware, one at a time is too slow */ 2345 /* return some buffers to hardware, one at a time is too slow */
@@ -2444,18 +2432,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2444 /* probably a little skewed due to removing CRC */ 2432 /* probably a little skewed due to removing CRC */
2445 total_rx_bytes += skb->len; 2433 total_rx_bytes += skb->len;
2446 2434
2447 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2448 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2449 I40E_RXD_QW1_PTYPE_SHIFT;
2450
2451 /* populate checksum, VLAN, and protocol */ 2435 /* populate checksum, VLAN, and protocol */
2452 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 2436 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2453
2454 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2455 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2456 2437
2457 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); 2438 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2458 i40e_receive_skb(rx_ring, skb, vlan_tag); 2439 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2459 skb = NULL; 2440 skb = NULL;
2460 2441
2461 /* update budget accounting */ 2442 /* update budget accounting */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 09809dffe399..8af0e99c6c0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -12,10 +12,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
12 union i40e_rx_desc *rx_desc, 12 union i40e_rx_desc *rx_desc,
13 u64 qw); 13 u64 qw);
14void i40e_process_skb_fields(struct i40e_ring *rx_ring, 14void i40e_process_skb_fields(struct i40e_ring *rx_ring,
15 union i40e_rx_desc *rx_desc, struct sk_buff *skb, 15 union i40e_rx_desc *rx_desc, struct sk_buff *skb);
16 u8 rx_ptype);
17void i40e_receive_skb(struct i40e_ring *rx_ring,
18 struct sk_buff *skb, u16 vlan_tag);
19void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring); 16void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
20void i40e_update_rx_stats(struct i40e_ring *rx_ring, 17void i40e_update_rx_stats(struct i40e_ring *rx_ring,
21 unsigned int total_rx_bytes, 18 unsigned int total_rx_bytes,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 433c8e688c78..870cf654e436 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -634,8 +634,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
634 struct i40e_rx_buffer *bi; 634 struct i40e_rx_buffer *bi;
635 union i40e_rx_desc *rx_desc; 635 union i40e_rx_desc *rx_desc;
636 unsigned int size; 636 unsigned int size;
637 u16 vlan_tag;
638 u8 rx_ptype;
639 u64 qword; 637 u64 qword;
640 638
641 if (cleaned_count >= I40E_RX_BUFFER_WRITE) { 639 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
@@ -713,14 +711,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
713 total_rx_bytes += skb->len; 711 total_rx_bytes += skb->len;
714 total_rx_packets++; 712 total_rx_packets++;
715 713
716 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 714 i40e_process_skb_fields(rx_ring, rx_desc, skb);
717 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> 715 napi_gro_receive(&rx_ring->q_vector->napi, skb);
718 I40E_RXD_QW1_PTYPE_SHIFT;
719 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
720
721 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
722 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
723 i40e_receive_skb(rx_ring, skb, vlan_tag);
724 } 716 }
725 717
726 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 718 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 5dacfc870259..345701af7749 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -700,7 +700,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
700 u8 num_tcs = adapter->hw_tcs; 700 u8 num_tcs = adapter->hw_tcs;
701 u32 reg_val; 701 u32 reg_val;
702 u32 queue; 702 u32 queue;
703 u32 word;
704 703
705 /* remove VLAN filters beloning to this VF */ 704 /* remove VLAN filters beloning to this VF */
706 ixgbe_clear_vf_vlans(adapter, vf); 705 ixgbe_clear_vf_vlans(adapter, vf);
@@ -758,6 +757,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
758 } 757 }
759 } 758 }
760 759
760 IXGBE_WRITE_FLUSH(hw);
761}
762
763static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
764{
765 struct ixgbe_hw *hw = &adapter->hw;
766 u32 word;
767
761 /* Clear VF's mailbox memory */ 768 /* Clear VF's mailbox memory */
762 for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) 769 for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
763 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); 770 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
@@ -831,6 +838,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
831 /* reset the filters for the device */ 838 /* reset the filters for the device */
832 ixgbe_vf_reset_event(adapter, vf); 839 ixgbe_vf_reset_event(adapter, vf);
833 840
841 ixgbe_vf_clear_mbx(adapter, vf);
842
834 /* set vf mac address */ 843 /* set vf mac address */
835 if (!is_zero_ether_addr(vf_mac)) 844 if (!is_zero_ether_addr(vf_mac))
836 ixgbe_set_vf_mac(adapter, vf, vf_mac); 845 ixgbe_set_vf_mac(adapter, vf, vf_mac);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 46a0f6b45d84..9d4568eb2297 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -408,7 +408,6 @@ struct mvneta_port {
408 struct mvneta_pcpu_stats __percpu *stats; 408 struct mvneta_pcpu_stats __percpu *stats;
409 409
410 int pkt_size; 410 int pkt_size;
411 unsigned int frag_size;
412 void __iomem *base; 411 void __iomem *base;
413 struct mvneta_rx_queue *rxqs; 412 struct mvneta_rx_queue *rxqs;
414 struct mvneta_tx_queue *txqs; 413 struct mvneta_tx_queue *txqs;
@@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
2905 if (!pp->bm_priv) { 2904 if (!pp->bm_priv) {
2906 /* Set Offset */ 2905 /* Set Offset */
2907 mvneta_rxq_offset_set(pp, rxq, 0); 2906 mvneta_rxq_offset_set(pp, rxq, 0);
2908 mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size); 2907 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
2908 PAGE_SIZE :
2909 MVNETA_RX_BUF_SIZE(pp->pkt_size));
2909 mvneta_rxq_bm_disable(pp, rxq); 2910 mvneta_rxq_bm_disable(pp, rxq);
2910 mvneta_rxq_fill(pp, rxq, rxq->size); 2911 mvneta_rxq_fill(pp, rxq, rxq->size);
2911 } else { 2912 } else {
@@ -3760,7 +3761,6 @@ static int mvneta_open(struct net_device *dev)
3760 int ret; 3761 int ret;
3761 3762
3762 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 3763 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3763 pp->frag_size = PAGE_SIZE;
3764 3764
3765 ret = mvneta_setup_rxqs(pp); 3765 ret = mvneta_setup_rxqs(pp);
3766 if (ret) 3766 if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 125ea99418df..f1dab0b55769 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4405,12 +4405,15 @@ static void mvpp2_phylink_validate(struct net_device *dev,
4405 case PHY_INTERFACE_MODE_10GKR: 4405 case PHY_INTERFACE_MODE_10GKR:
4406 case PHY_INTERFACE_MODE_XAUI: 4406 case PHY_INTERFACE_MODE_XAUI:
4407 case PHY_INTERFACE_MODE_NA: 4407 case PHY_INTERFACE_MODE_NA:
4408 phylink_set(mask, 10000baseCR_Full); 4408 if (port->gop_id == 0) {
4409 phylink_set(mask, 10000baseSR_Full); 4409 phylink_set(mask, 10000baseT_Full);
4410 phylink_set(mask, 10000baseLR_Full); 4410 phylink_set(mask, 10000baseCR_Full);
4411 phylink_set(mask, 10000baseLRM_Full); 4411 phylink_set(mask, 10000baseSR_Full);
4412 phylink_set(mask, 10000baseER_Full); 4412 phylink_set(mask, 10000baseLR_Full);
4413 phylink_set(mask, 10000baseKR_Full); 4413 phylink_set(mask, 10000baseLRM_Full);
4414 phylink_set(mask, 10000baseER_Full);
4415 phylink_set(mask, 10000baseKR_Full);
4416 }
4414 /* Fall-through */ 4417 /* Fall-through */
4415 case PHY_INTERFACE_MODE_RGMII: 4418 case PHY_INTERFACE_MODE_RGMII:
4416 case PHY_INTERFACE_MODE_RGMII_ID: 4419 case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4421,7 +4424,6 @@ static void mvpp2_phylink_validate(struct net_device *dev,
4421 phylink_set(mask, 10baseT_Full); 4424 phylink_set(mask, 10baseT_Full);
4422 phylink_set(mask, 100baseT_Half); 4425 phylink_set(mask, 100baseT_Half);
4423 phylink_set(mask, 100baseT_Full); 4426 phylink_set(mask, 100baseT_Full);
4424 phylink_set(mask, 10000baseT_Full);
4425 /* Fall-through */ 4427 /* Fall-through */
4426 case PHY_INTERFACE_MODE_1000BASEX: 4428 case PHY_INTERFACE_MODE_1000BASEX:
4427 case PHY_INTERFACE_MODE_2500BASEX: 4429 case PHY_INTERFACE_MODE_2500BASEX:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 6af587cf147f..6e101201dcbb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1195,11 +1195,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1195 struct ethtool_ts_info *info) 1195 struct ethtool_ts_info *info)
1196{ 1196{
1197 struct mlx5_core_dev *mdev = priv->mdev; 1197 struct mlx5_core_dev *mdev = priv->mdev;
1198 int ret;
1199
1200 ret = ethtool_op_get_ts_info(priv->netdev, info);
1201 if (ret)
1202 return ret;
1203 1198
1204 info->phc_index = mlx5_clock_get_ptp_index(mdev); 1199 info->phc_index = mlx5_clock_get_ptp_index(mdev);
1205 1200
@@ -1207,9 +1202,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1207 info->phc_index == -1) 1202 info->phc_index == -1)
1208 return 0; 1203 return 0;
1209 1204
1210 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 1205 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1211 SOF_TIMESTAMPING_RX_HARDWARE | 1206 SOF_TIMESTAMPING_RX_HARDWARE |
1212 SOF_TIMESTAMPING_RAW_HARDWARE; 1207 SOF_TIMESTAMPING_RAW_HARDWARE;
1213 1208
1214 info->tx_types = BIT(HWTSTAMP_TX_OFF) | 1209 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1215 BIT(HWTSTAMP_TX_ON); 1210 BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cd6872a9e4dc..bc791404f2e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -130,6 +130,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
130 return !params->lro_en && frag_sz <= PAGE_SIZE; 130 return !params->lro_en && frag_sz <= PAGE_SIZE;
131} 131}
132 132
133#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
134 MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
133static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, 135static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
134 struct mlx5e_params *params) 136 struct mlx5e_params *params)
135{ 137{
@@ -140,6 +142,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
140 if (!mlx5e_rx_is_linear_skb(mdev, params)) 142 if (!mlx5e_rx_is_linear_skb(mdev, params))
141 return false; 143 return false;
142 144
145 if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
146 return false;
147
143 if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) 148 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
144 return true; 149 return true;
145 150
@@ -1400,6 +1405,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1400 struct mlx5_core_dev *mdev = c->mdev; 1405 struct mlx5_core_dev *mdev = c->mdev;
1401 struct mlx5_rate_limit rl = {0}; 1406 struct mlx5_rate_limit rl = {0};
1402 1407
1408 cancel_work_sync(&sq->dim.work);
1403 mlx5e_destroy_sq(mdev, sq->sqn); 1409 mlx5e_destroy_sq(mdev, sq->sqn);
1404 if (sq->rate_limit) { 1410 if (sq->rate_limit) {
1405 rl.rate = sq->rate_limit; 1411 rl.rate = sq->rate_limit;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index ed1158b58798..3a177133e230 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -47,6 +47,7 @@
47 47
48#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ 48#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
49 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) 49 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
50#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
50 51
51static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; 52static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
52 53
@@ -586,8 +587,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
586 587
587 ASSERT_RTNL(); 588 ASSERT_RTNL();
588 589
589 if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) || 590 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
590 !ether_addr_equal(e->h_dest, ha)) 591 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
591 mlx5e_tc_encap_flows_del(priv, e); 592 mlx5e_tc_encap_flows_del(priv, e);
592 593
593 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { 594 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
@@ -1395,30 +1396,19 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
1395 netdev->features |= netdev->hw_features; 1396 netdev->features |= netdev->hw_features;
1396} 1397}
1397 1398
1398static int mlx5e_rep_get_default_num_channels(struct mlx5_eswitch_rep *rep,
1399 struct net_device *netdev)
1400{
1401 if (rep->vport == FDB_UPLINK_VPORT)
1402 return mlx5e_get_netdev_max_channels(netdev);
1403 else
1404 return 1;
1405}
1406
1407static int mlx5e_init_rep(struct mlx5_core_dev *mdev, 1399static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1408 struct net_device *netdev, 1400 struct net_device *netdev,
1409 const struct mlx5e_profile *profile, 1401 const struct mlx5e_profile *profile,
1410 void *ppriv) 1402 void *ppriv)
1411{ 1403{
1412 struct mlx5e_priv *priv = netdev_priv(netdev); 1404 struct mlx5e_priv *priv = netdev_priv(netdev);
1413 struct mlx5e_rep_priv *rpriv = ppriv;
1414 int err; 1405 int err;
1415 1406
1416 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); 1407 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1417 if (err) 1408 if (err)
1418 return err; 1409 return err;
1419 1410
1420 priv->channels.params.num_channels = 1411 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1421 mlx5e_rep_get_default_num_channels(rpriv->rep, netdev);
1422 1412
1423 mlx5e_build_rep_params(netdev); 1413 mlx5e_build_rep_params(netdev);
1424 mlx5e_build_rep_netdev(netdev); 1414 mlx5e_build_rep_netdev(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index a75aad035593..cdce30a95c60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1190,7 +1190,7 @@ mpwrq_cqe_out:
1190int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) 1190int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1191{ 1191{
1192 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 1192 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
1193 struct mlx5e_xdpsq *xdpsq; 1193 struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
1194 struct mlx5_cqe64 *cqe; 1194 struct mlx5_cqe64 *cqe;
1195 int work_done = 0; 1195 int work_done = 0;
1196 1196
@@ -1201,10 +1201,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1201 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); 1201 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
1202 1202
1203 cqe = mlx5_cqwq_get_cqe(&cq->wq); 1203 cqe = mlx5_cqwq_get_cqe(&cq->wq);
1204 if (!cqe) 1204 if (!cqe) {
1205 if (unlikely(work_done))
1206 goto out;
1205 return 0; 1207 return 0;
1206 1208 }
1207 xdpsq = &rq->xdpsq;
1208 1209
1209 do { 1210 do {
1210 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { 1211 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
@@ -1219,6 +1220,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1219 rq->handle_rx_cqe(rq, cqe); 1220 rq->handle_rx_cqe(rq, cqe);
1220 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 1221 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1221 1222
1223out:
1222 if (xdpsq->doorbell) { 1224 if (xdpsq->doorbell) {
1223 mlx5e_xmit_xdp_doorbell(xdpsq); 1225 mlx5e_xmit_xdp_doorbell(xdpsq);
1224 xdpsq->doorbell = false; 1226 xdpsq->doorbell = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 3071a44e2f30..d3fe48ff9da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -75,7 +75,6 @@ static const struct counter_desc sw_stats_desc[] = {
75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, 75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, 76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
77 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 77 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
78 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
79 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, 78 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, 79 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, 80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
@@ -199,7 +198,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
199 s->tx_nop += sq_stats->nop; 198 s->tx_nop += sq_stats->nop;
200 s->tx_queue_stopped += sq_stats->stopped; 199 s->tx_queue_stopped += sq_stats->stopped;
201 s->tx_queue_wake += sq_stats->wake; 200 s->tx_queue_wake += sq_stats->wake;
202 s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
203 s->tx_queue_dropped += sq_stats->dropped; 201 s->tx_queue_dropped += sq_stats->dropped;
204 s->tx_cqe_err += sq_stats->cqe_err; 202 s->tx_cqe_err += sq_stats->cqe_err;
205 s->tx_recover += sq_stats->recover; 203 s->tx_recover += sq_stats->recover;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 807e60582a6b..fe91ec06e3c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -87,7 +87,6 @@ struct mlx5e_sw_stats {
87 u64 tx_recover; 87 u64 tx_recover;
88 u64 tx_cqes; 88 u64 tx_cqes;
89 u64 tx_queue_wake; 89 u64 tx_queue_wake;
90 u64 tx_udp_seg_rem;
91 u64 tx_cqe_err; 90 u64 tx_cqe_err;
92 u64 tx_xdp_xmit; 91 u64 tx_xdp_xmit;
93 u64 tx_xdp_full; 92 u64 tx_xdp_full;
@@ -221,7 +220,6 @@ struct mlx5e_sq_stats {
221 u64 csum_partial_inner; 220 u64 csum_partial_inner;
222 u64 added_vlan_packets; 221 u64 added_vlan_packets;
223 u64 nop; 222 u64 nop;
224 u64 udp_seg_rem;
225#ifdef CONFIG_MLX5_EN_TLS 223#ifdef CONFIG_MLX5_EN_TLS
226 u64 tls_ooo; 224 u64 tls_ooo;
227 u64 tls_resync_bytes; 225 u64 tls_resync_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index c1a9120412b8..59255aeec5ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -903,9 +903,9 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
903 struct mlx5_flow_handle *rule; 903 struct mlx5_flow_handle *rule;
904 904
905 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); 905 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
906 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 906 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
907 slow_attr->split_count = 0, 907 slow_attr->split_count = 0;
908 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN, 908 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
909 909
910 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); 910 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
911 if (!IS_ERR(rule)) 911 if (!IS_ERR(rule))
@@ -920,6 +920,9 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
920 struct mlx5_esw_flow_attr *slow_attr) 920 struct mlx5_esw_flow_attr *slow_attr)
921{ 921{
922 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); 922 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
923 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
924 slow_attr->split_count = 0;
925 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
923 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); 926 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
924 flow->flags &= ~MLX5E_TC_FLOW_SLOW; 927 flow->flags &= ~MLX5E_TC_FLOW_SLOW;
925} 928}
@@ -941,11 +944,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
941 int err = 0, encap_err = 0; 944 int err = 0, encap_err = 0;
942 int out_index; 945 int out_index;
943 946
944 /* if prios are not supported, keep the old behaviour of using same prio 947 if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
945 * for all offloaded rules. 948 NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
946 */ 949 return -EOPNOTSUPP;
947 if (!mlx5_eswitch_prios_supported(esw)) 950 }
948 attr->prio = 1;
949 951
950 if (attr->chain > max_chain) { 952 if (attr->chain > max_chain) {
951 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); 953 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
@@ -1163,10 +1165,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1163 flow->rule[0] = rule; 1165 flow->rule[0] = rule;
1164 } 1166 }
1165 1167
1166 if (e->flags & MLX5_ENCAP_ENTRY_VALID) { 1168 /* we know that the encap is valid */
1167 e->flags &= ~MLX5_ENCAP_ENTRY_VALID; 1169 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1168 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id); 1170 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1169 }
1170} 1171}
1171 1172
1172static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) 1173static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
@@ -2653,8 +2654,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2653 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range"); 2654 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
2654 return -EOPNOTSUPP; 2655 return -EOPNOTSUPP;
2655 } 2656 }
2656 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 2657 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2657 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2658 attr->dest_chain = dest_chain; 2658 attr->dest_chain = dest_chain;
2659 2659
2660 continue; 2660 continue;
@@ -2667,6 +2667,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2667 if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) 2667 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2668 return -EOPNOTSUPP; 2668 return -EOPNOTSUPP;
2669 2669
2670 if (attr->dest_chain) {
2671 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2672 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
2673 return -EOPNOTSUPP;
2674 }
2675 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2676 }
2677
2670 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { 2678 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2671 NL_SET_ERR_MSG_MOD(extack, 2679 NL_SET_ERR_MSG_MOD(extack,
2672 "current firmware doesn't support split rule for port mirroring"); 2680 "current firmware doesn't support split rule for port mirroring");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f21277e636a3..79f122b45def 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -452,7 +452,7 @@ static void del_sw_hw_rule(struct fs_node *node)
452 452
453 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && 453 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
454 --fte->dests_size) { 454 --fte->dests_size) {
455 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), 455 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
456 update_fte = true; 456 update_fte = true;
457 } 457 }
458out: 458out:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 281aeb1c2386..ddedf8ab5b64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -81,6 +81,7 @@ struct mlxsw_core {
81 struct mlxsw_core_port *ports; 81 struct mlxsw_core_port *ports;
82 unsigned int max_ports; 82 unsigned int max_ports;
83 bool reload_fail; 83 bool reload_fail;
84 bool fw_flash_in_progress;
84 unsigned long driver_priv[0]; 85 unsigned long driver_priv[0];
85 /* driver_priv has to be always the last item */ 86 /* driver_priv has to be always the last item */
86}; 87};
@@ -428,12 +429,16 @@ struct mlxsw_reg_trans {
428 struct rcu_head rcu; 429 struct rcu_head rcu;
429}; 430};
430 431
431#define MLXSW_EMAD_TIMEOUT_MS 200 432#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
433#define MLXSW_EMAD_TIMEOUT_MS 200
432 434
433static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) 435static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
434{ 436{
435 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 437 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
436 438
439 if (trans->core->fw_flash_in_progress)
440 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
441
437 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); 442 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
438} 443}
439 444
@@ -1891,6 +1896,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
1891} 1896}
1892EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); 1897EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
1893 1898
1899void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
1900{
1901 mlxsw_core->fw_flash_in_progress = true;
1902}
1903EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
1904
1905void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
1906{
1907 mlxsw_core->fw_flash_in_progress = false;
1908}
1909EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
1910
1894static int __init mlxsw_core_module_init(void) 1911static int __init mlxsw_core_module_init(void)
1895{ 1912{
1896 int err; 1913 int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index d811be8989b0..4e114f35ee0d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -294,6 +294,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
294 u64 *p_single_size, u64 *p_double_size, 294 u64 *p_single_size, u64 *p_double_size,
295 u64 *p_linear_size); 295 u64 *p_linear_size);
296 296
297void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
298void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
299
297bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, 300bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
298 enum mlxsw_res_id res_id); 301 enum mlxsw_res_id res_id);
299 302
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4197b29a9bda..9bfcb9c060a9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -316,8 +316,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
316 }, 316 },
317 .mlxsw_sp = mlxsw_sp 317 .mlxsw_sp = mlxsw_sp
318 }; 318 };
319 int err;
320
321 mlxsw_core_fw_flash_start(mlxsw_sp->core);
322 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
323 mlxsw_core_fw_flash_end(mlxsw_sp->core);
319 324
320 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); 325 return err;
321} 326}
322 327
323static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) 328static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
@@ -3671,6 +3676,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
3671 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 3676 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
3672 /* NVE traps */ 3677 /* NVE traps */
3673 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), 3678 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
3679 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
3674}; 3680};
3675 3681
3676static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 3682static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 3b1e8268df1c..0a31fff2516e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -1033,6 +1033,6 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
1033{ 1033{
1034 WARN_ON(mlxsw_sp->nve->num_nve_tunnels); 1034 WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
1035 rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht); 1035 rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
1036 mlxsw_sp->nve = NULL;
1037 kfree(mlxsw_sp->nve); 1036 kfree(mlxsw_sp->nve);
1037 mlxsw_sp->nve = NULL;
1038} 1038}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 6f18f4d3322a..451216dd7f6b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -60,6 +60,7 @@ enum {
60 MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, 60 MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
61 MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, 61 MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
62 MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1, 62 MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
63 MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
63 MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD, 64 MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
64 MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, 65 MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
65 MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, 66 MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e8ca98c070f6..20c9377e99cb 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -802,14 +802,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
802 u32 mac_addr_hi = 0; 802 u32 mac_addr_hi = 0;
803 u32 mac_addr_lo = 0; 803 u32 mac_addr_lo = 0;
804 u32 data; 804 u32 data;
805 int ret;
806 805
807 netdev = adapter->netdev; 806 netdev = adapter->netdev;
808 lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
809 ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
810 0, 1000, 20000, 100);
811 if (ret)
812 return ret;
813 807
814 /* setup auto duplex, and speed detection */ 808 /* setup auto duplex, and speed detection */
815 data = lan743x_csr_read(adapter, MAC_CR); 809 data = lan743x_csr_read(adapter, MAC_CR);
@@ -2719,8 +2713,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
2719 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, 2713 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
2720 "pci-%s", pci_name(adapter->pdev)); 2714 "pci-%s", pci_name(adapter->pdev));
2721 2715
2722 /* set to internal PHY id */ 2716 if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
2723 adapter->mdiobus->phy_mask = ~(u32)BIT(1); 2717 /* LAN7430 uses internal phy at address 1 */
2718 adapter->mdiobus->phy_mask = ~(u32)BIT(1);
2724 2719
2725 /* register mdiobus */ 2720 /* register mdiobus */
2726 ret = mdiobus_register(adapter->mdiobus); 2721 ret = mdiobus_register(adapter->mdiobus);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 4c1fb7e57888..7cde387e5ec6 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -808,7 +808,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
808 struct vxge_hw_device_date *fw_date = &hw_info->fw_date; 808 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
809 struct vxge_hw_device_version *flash_version = &hw_info->flash_version; 809 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
810 struct vxge_hw_device_date *flash_date = &hw_info->flash_date; 810 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
811 u64 data0, data1 = 0, steer_ctrl = 0; 811 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
812 enum vxge_hw_status status; 812 enum vxge_hw_status status;
813 813
814 status = vxge_hw_vpath_fw_api(vpath, 814 status = vxge_hw_vpath_fw_api(vpath,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index c642fd84eb02..2cdbf29ecbe7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -343,13 +343,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
343 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) 343 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
344 return -EOPNOTSUPP; 344 return -EOPNOTSUPP;
345 345
346 /* We need to store TCP flags in the IPv4 key space, thus 346 /* We need to store TCP flags in the either the IPv4 or IPv6 key
347 * we need to ensure we include a IPv4 key layer if we have 347 * space, thus we need to ensure we include a IPv4/IPv6 key
348 * not done so already. 348 * layer if we have not done so already.
349 */ 349 */
350 if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) { 350 if (!key_basic)
351 key_layer |= NFP_FLOWER_LAYER_IPV4; 351 return -EOPNOTSUPP;
352 key_size += sizeof(struct nfp_flower_ipv4); 352
353 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
354 !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
355 switch (key_basic->n_proto) {
356 case cpu_to_be16(ETH_P_IP):
357 key_layer |= NFP_FLOWER_LAYER_IPV4;
358 key_size += sizeof(struct nfp_flower_ipv4);
359 break;
360
361 case cpu_to_be16(ETH_P_IPV6):
362 key_layer |= NFP_FLOWER_LAYER_IPV6;
363 key_size += sizeof(struct nfp_flower_ipv6);
364 break;
365
366 default:
367 return -EOPNOTSUPP;
368 }
353 } 369 }
354 } 370 }
355 371
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 052b3d2c07a1..c662c6f5bee3 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
912 .ndo_validate_addr = eth_validate_addr, 912 .ndo_validate_addr = eth_validate_addr,
913}; 913};
914 914
915static void __init get_mac_address(struct net_device *dev) 915static void get_mac_address(struct net_device *dev)
916{ 916{
917 struct w90p910_ether *ether = netdev_priv(dev); 917 struct w90p910_ether *ether = netdev_priv(dev);
918 struct platform_device *pdev; 918 struct platform_device *pdev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 7e120b58ac58..b13cfb449d8f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12837,8 +12837,9 @@ enum MFW_DRV_MSG_TYPE {
12837 MFW_DRV_MSG_BW_UPDATE10, 12837 MFW_DRV_MSG_BW_UPDATE10,
12838 MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, 12838 MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
12839 MFW_DRV_MSG_BW_UPDATE11, 12839 MFW_DRV_MSG_BW_UPDATE11,
12840 MFW_DRV_MSG_OEM_CFG_UPDATE, 12840 MFW_DRV_MSG_RESERVED,
12841 MFW_DRV_MSG_GET_TLV_REQ, 12841 MFW_DRV_MSG_GET_TLV_REQ,
12842 MFW_DRV_MSG_OEM_CFG_UPDATE,
12842 MFW_DRV_MSG_MAX 12843 MFW_DRV_MSG_MAX
12843}; 12844};
12844 12845
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 504c8f71b61d..90afd514ffe1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2506,6 +2506,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2506 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { 2506 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2507 DP_NOTICE(cdev, 2507 DP_NOTICE(cdev,
2508 "Unable to map frag - dropping packet\n"); 2508 "Unable to map frag - dropping packet\n");
2509 rc = -ENOMEM;
2509 goto err; 2510 goto err;
2510 } 2511 }
2511 2512
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 18e39e5e447b..5b0c32bdb016 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6415,7 +6415,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6415 goto out; 6415 goto out;
6416 } 6416 }
6417 6417
6418 if (status & LinkChg) 6418 if (status & LinkChg && tp->dev->phydev)
6419 phy_mac_interrupt(tp->dev->phydev); 6419 phy_mac_interrupt(tp->dev->phydev);
6420 6420
6421 if (unlikely(status & RxFIFOOver && 6421 if (unlikely(status & RxFIFOOver &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e821ccc8027b..0e0a0789c2ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4224,6 +4224,7 @@ int stmmac_dvr_probe(struct device *device,
4224 priv->wq = create_singlethread_workqueue("stmmac_wq"); 4224 priv->wq = create_singlethread_workqueue("stmmac_wq");
4225 if (!priv->wq) { 4225 if (!priv->wq) {
4226 dev_err(priv->device, "failed to create workqueue\n"); 4226 dev_err(priv->device, "failed to create workqueue\n");
4227 ret = -ENOMEM;
4227 goto error_wq; 4228 goto error_wq;
4228 } 4229 }
4229 4230
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 0ff5a403a8dc..b2ff903a9cb6 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
721static void ca8210_rx_done(struct cas_control *cas_ctl) 721static void ca8210_rx_done(struct cas_control *cas_ctl)
722{ 722{
723 u8 *buf; 723 u8 *buf;
724 u8 len; 724 unsigned int len;
725 struct work_priv_container *mlme_reset_wpc; 725 struct work_priv_container *mlme_reset_wpc;
726 struct ca8210_priv *priv = cas_ctl->priv; 726 struct ca8210_priv *priv = cas_ctl->priv;
727 727
@@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
730 if (len > CA8210_SPI_BUF_SIZE) { 730 if (len > CA8210_SPI_BUF_SIZE) {
731 dev_crit( 731 dev_crit(
732 &priv->spi->dev, 732 &priv->spi->dev,
733 "Received packet len (%d) erroneously long\n", 733 "Received packet len (%u) erroneously long\n",
734 len 734 len
735 ); 735 );
736 goto finish; 736 goto finish;
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 51b5198d5943..b6743f03dce0 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -492,7 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
492 !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) 492 !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
493 return -EINVAL; 493 return -EINVAL;
494 494
495 if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1, 495 if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
496 info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], 496 info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
497 hwsim_edge_policy, NULL)) 497 hwsim_edge_policy, NULL))
498 return -EINVAL; 498 return -EINVAL;
@@ -542,7 +542,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
542 !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) 542 !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
543 return -EINVAL; 543 return -EINVAL;
544 544
545 if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1, 545 if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
546 info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], 546 info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
547 hwsim_edge_policy, NULL)) 547 hwsim_edge_policy, NULL))
548 return -EINVAL; 548 return -EINVAL;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 54af2bde6682..51990002d495 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -315,11 +315,8 @@ static int mdio_bus_phy_restore(struct device *dev)
315 if (ret < 0) 315 if (ret < 0)
316 return ret; 316 return ret;
317 317
318 /* The PHY needs to renegotiate. */ 318 if (phydev->attached_dev && phydev->adjust_link)
319 phydev->link = 0; 319 phy_start_machine(phydev);
320 phydev->state = PHY_UP;
321
322 phy_start_machine(phydev);
323 320
324 return 0; 321 return 0;
325} 322}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 184c24baca15..d6916f787fce 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface)
2807 return -EIO; 2807 return -EIO;
2808 } 2808 }
2809 2809
2810 /* check if we have a valid interface */
2811 if (if_num > 16) {
2812 kfree(config_data);
2813 return -EINVAL;
2814 }
2815
2810 switch (config_data[if_num]) { 2816 switch (config_data[if_num]) {
2811 case 0x0: 2817 case 0x0:
2812 result = 0; 2818 result = 0;
@@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface,
2877 2883
2878 /* Get the interface/port specification from either driver_info or from 2884 /* Get the interface/port specification from either driver_info or from
2879 * the device itself */ 2885 * the device itself */
2880 if (id->driver_info) 2886 if (id->driver_info) {
2887 /* if_num is controlled by the device, driver_info is a 0 terminated
2888 * array. Make sure, the access is in bounds! */
2889 for (i = 0; i <= if_num; ++i)
2890 if (((u32 *)(id->driver_info))[i] == 0)
2891 goto exit;
2881 port_spec = ((u32 *)(id->driver_info))[if_num]; 2892 port_spec = ((u32 *)(id->driver_info))[if_num];
2882 else 2893 } else {
2883 port_spec = hso_get_config_data(interface); 2894 port_spec = hso_get_config_data(interface);
2895 if (port_spec < 0)
2896 goto exit;
2897 }
2884 2898
2885 /* Check if we need to switch to alt interfaces prior to port 2899 /* Check if we need to switch to alt interfaces prior to port
2886 * configuration */ 2900 * configuration */
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3c8bdac78866..e96bc0c6140f 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2325,6 +2325,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2325 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); 2325 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2326 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); 2326 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2327 2327
2328 /* Added to support MAC address changes */
2329 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2330 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2331
2328 return 0; 2332 return 0;
2329} 2333}
2330 2334
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 72a55b6b4211..c8872dd5ff5e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1117,6 +1117,7 @@ static const struct usb_device_id products[] = {
1117 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ 1117 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
1118 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ 1118 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
1119 {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ 1119 {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
1120 {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
1120 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 1121 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1121 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ 1122 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
1122 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ 1123 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
@@ -1229,6 +1230,7 @@ static const struct usb_device_id products[] = {
1229 {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ 1230 {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
1230 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1231 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1231 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1232 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1233 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
1232 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ 1234 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
1233 {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ 1235 {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
1234 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 1236 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f1b5201cc320..60dd1ec1665f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -129,6 +129,7 @@
129#define USB_UPS_CTRL 0xd800 129#define USB_UPS_CTRL 0xd800
130#define USB_POWER_CUT 0xd80a 130#define USB_POWER_CUT 0xd80a
131#define USB_MISC_0 0xd81a 131#define USB_MISC_0 0xd81a
132#define USB_MISC_1 0xd81f
132#define USB_AFE_CTRL2 0xd824 133#define USB_AFE_CTRL2 0xd824
133#define USB_UPS_CFG 0xd842 134#define USB_UPS_CFG 0xd842
134#define USB_UPS_FLAGS 0xd848 135#define USB_UPS_FLAGS 0xd848
@@ -555,6 +556,7 @@ enum spd_duplex {
555 556
556/* MAC PASSTHRU */ 557/* MAC PASSTHRU */
557#define AD_MASK 0xfee0 558#define AD_MASK 0xfee0
559#define BND_MASK 0x0004
558#define EFUSE 0xcfdb 560#define EFUSE 0xcfdb
559#define PASS_THRU_MASK 0x1 561#define PASS_THRU_MASK 0x1
560 562
@@ -1150,7 +1152,7 @@ out1:
1150 return ret; 1152 return ret;
1151} 1153}
1152 1154
1153/* Devices containing RTL8153-AD can support a persistent 1155/* Devices containing proper chips can support a persistent
1154 * host system provided MAC address. 1156 * host system provided MAC address.
1155 * Examples of this are Dell TB15 and Dell WD15 docks 1157 * Examples of this are Dell TB15 and Dell WD15 docks
1156 */ 1158 */
@@ -1165,13 +1167,23 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
1165 1167
1166 /* test for -AD variant of RTL8153 */ 1168 /* test for -AD variant of RTL8153 */
1167 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); 1169 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
1168 if ((ocp_data & AD_MASK) != 0x1000) 1170 if ((ocp_data & AD_MASK) == 0x1000) {
1169 return -ENODEV; 1171 /* test for MAC address pass-through bit */
1170 1172 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
1171 /* test for MAC address pass-through bit */ 1173 if ((ocp_data & PASS_THRU_MASK) != 1) {
1172 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE); 1174 netif_dbg(tp, probe, tp->netdev,
1173 if ((ocp_data & PASS_THRU_MASK) != 1) 1175 "No efuse for RTL8153-AD MAC pass through\n");
1174 return -ENODEV; 1176 return -ENODEV;
1177 }
1178 } else {
1179 /* test for RTL8153-BND */
1180 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
1181 if ((ocp_data & BND_MASK) == 0) {
1182 netif_dbg(tp, probe, tp->netdev,
1183 "Invalid variant for MAC pass through\n");
1184 return -ENODEV;
1185 }
1186 }
1175 1187
1176 /* returns _AUXMAC_#AABBCCDDEEFF# */ 1188 /* returns _AUXMAC_#AABBCCDDEEFF# */
1177 status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer); 1189 status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
@@ -1217,9 +1229,8 @@ static int set_ethernet_addr(struct r8152 *tp)
1217 if (tp->version == RTL_VER_01) { 1229 if (tp->version == RTL_VER_01) {
1218 ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); 1230 ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
1219 } else { 1231 } else {
1220 /* if this is not an RTL8153-AD, no eFuse mac pass thru set, 1232 /* if device doesn't support MAC pass through this will
1221 * or system doesn't provide valid _SB.AMAC this will be 1233 * be expected to be non-zero
1222 * be expected to non-zero
1223 */ 1234 */
1224 ret = vendor_mac_passthru_addr_read(tp, &sa); 1235 ret = vendor_mac_passthru_addr_read(tp, &sa);
1225 if (ret < 0) 1236 if (ret < 0)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 71c3b7b6b1ab..3d773634a8e2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -642,6 +642,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
642 rd->remote_port = port; 642 rd->remote_port = port;
643 rd->remote_vni = vni; 643 rd->remote_vni = vni;
644 rd->remote_ifindex = ifindex; 644 rd->remote_ifindex = ifindex;
645 rd->offloaded = false;
645 return 1; 646 return 1;
646} 647}
647 648
@@ -3444,6 +3445,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3444 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3445 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3445 struct vxlan_dev *vxlan = netdev_priv(dev); 3446 struct vxlan_dev *vxlan = netdev_priv(dev);
3446 struct vxlan_fdb *f = NULL; 3447 struct vxlan_fdb *f = NULL;
3448 bool unregister = false;
3447 int err; 3449 int err;
3448 3450
3449 err = vxlan_dev_configure(net, dev, conf, false, extack); 3451 err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3469,12 +3471,11 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3469 err = register_netdevice(dev); 3471 err = register_netdevice(dev);
3470 if (err) 3472 if (err)
3471 goto errout; 3473 goto errout;
3474 unregister = true;
3472 3475
3473 err = rtnl_configure_link(dev, NULL); 3476 err = rtnl_configure_link(dev, NULL);
3474 if (err) { 3477 if (err)
3475 unregister_netdevice(dev);
3476 goto errout; 3478 goto errout;
3477 }
3478 3479
3479 /* notify default fdb entry */ 3480 /* notify default fdb entry */
3480 if (f) 3481 if (f)
@@ -3483,9 +3484,16 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3483 3484
3484 list_add(&vxlan->next, &vn->vxlan_list); 3485 list_add(&vxlan->next, &vn->vxlan_list);
3485 return 0; 3486 return 0;
3487
3486errout: 3488errout:
3489 /* unregister_netdevice() destroys the default FDB entry with deletion
3490 * notification. But the addition notification was not sent yet, so
3491 * destroy the entry by hand here.
3492 */
3487 if (f) 3493 if (f)
3488 vxlan_fdb_destroy(vxlan, f, false, false); 3494 vxlan_fdb_destroy(vxlan, f, false, false);
3495 if (unregister)
3496 unregister_netdevice(dev);
3489 return err; 3497 return err;
3490} 3498}
3491 3499
@@ -3722,7 +3730,6 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3722 unsigned long old_age_interval; 3730 unsigned long old_age_interval;
3723 struct vxlan_rdst old_dst; 3731 struct vxlan_rdst old_dst;
3724 struct vxlan_config conf; 3732 struct vxlan_config conf;
3725 struct vxlan_fdb *f = NULL;
3726 int err; 3733 int err;
3727 3734
3728 err = vxlan_nl2conf(tb, data, 3735 err = vxlan_nl2conf(tb, data,
@@ -3753,20 +3760,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3753 true); 3760 true);
3754 3761
3755 if (!vxlan_addr_any(&dst->remote_ip)) { 3762 if (!vxlan_addr_any(&dst->remote_ip)) {
3756 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3763 err = vxlan_fdb_update(vxlan, all_zeros_mac,
3757 &dst->remote_ip, 3764 &dst->remote_ip,
3758 NUD_REACHABLE | NUD_PERMANENT, 3765 NUD_REACHABLE | NUD_PERMANENT,
3766 NLM_F_APPEND | NLM_F_CREATE,
3759 vxlan->cfg.dst_port, 3767 vxlan->cfg.dst_port,
3760 dst->remote_vni, 3768 dst->remote_vni,
3761 dst->remote_vni, 3769 dst->remote_vni,
3762 dst->remote_ifindex, 3770 dst->remote_ifindex,
3763 NTF_SELF, &f); 3771 NTF_SELF, false);
3764 if (err) { 3772 if (err) {
3765 spin_unlock_bh(&vxlan->hash_lock); 3773 spin_unlock_bh(&vxlan->hash_lock);
3766 return err; 3774 return err;
3767 } 3775 }
3768 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
3769 RTM_NEWNEIGH, true);
3770 } 3776 }
3771 spin_unlock_bh(&vxlan->hash_lock); 3777 spin_unlock_bh(&vxlan->hash_lock);
3772 } 3778 }
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 22cbe9a2e646..399b501f3c3c 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -2422,6 +2422,28 @@ static int ath10k_core_reset_rx_filter(struct ath10k *ar)
2422 return 0; 2422 return 0;
2423} 2423}
2424 2424
2425static int ath10k_core_compat_services(struct ath10k *ar)
2426{
2427 struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
2428
2429 /* all 10.x firmware versions support thermal throttling but don't
2430 * advertise the support via service flags so we have to hardcode
2431 * it here
2432 */
2433 switch (fw_file->wmi_op_version) {
2434 case ATH10K_FW_WMI_OP_VERSION_10_1:
2435 case ATH10K_FW_WMI_OP_VERSION_10_2:
2436 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
2437 case ATH10K_FW_WMI_OP_VERSION_10_4:
2438 set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map);
2439 break;
2440 default:
2441 break;
2442 }
2443
2444 return 0;
2445}
2446
2425int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, 2447int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
2426 const struct ath10k_fw_components *fw) 2448 const struct ath10k_fw_components *fw)
2427{ 2449{
@@ -2621,6 +2643,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
2621 goto err_hif_stop; 2643 goto err_hif_stop;
2622 } 2644 }
2623 2645
2646 status = ath10k_core_compat_services(ar);
2647 if (status) {
2648 ath10k_err(ar, "compat services failed: %d\n", status);
2649 goto err_hif_stop;
2650 }
2651
2624 /* Some firmware revisions do not properly set up hardware rx filter 2652 /* Some firmware revisions do not properly set up hardware rx filter
2625 * registers. 2653 * registers.
2626 * 2654 *
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 15964b374f68..02988fc378a1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -2578,8 +2578,9 @@ int ath10k_debug_register(struct ath10k *ar)
2578 debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar, 2578 debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
2579 &fops_pktlog_filter); 2579 &fops_pktlog_filter);
2580 2580
2581 debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar, 2581 if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
2582 &fops_quiet_period); 2582 debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
2583 &fops_quiet_period);
2583 2584
2584 debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar, 2585 debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
2585 &fops_tpc_stats); 2586 &fops_tpc_stats);
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index aa8978a8d751..fe35edcd3ec8 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -140,6 +140,9 @@ void ath10k_thermal_set_throttling(struct ath10k *ar)
140 140
141 lockdep_assert_held(&ar->conf_mutex); 141 lockdep_assert_held(&ar->conf_mutex);
142 142
143 if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
144 return;
145
143 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 146 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
144 return; 147 return;
145 148
@@ -165,6 +168,9 @@ int ath10k_thermal_register(struct ath10k *ar)
165 struct device *hwmon_dev; 168 struct device *hwmon_dev;
166 int ret; 169 int ret;
167 170
171 if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
172 return 0;
173
168 cdev = thermal_cooling_device_register("ath10k_thermal", ar, 174 cdev = thermal_cooling_device_register("ath10k_thermal", ar,
169 &ath10k_thermal_ops); 175 &ath10k_thermal_ops);
170 176
@@ -216,6 +222,9 @@ err_cooling_destroy:
216 222
217void ath10k_thermal_unregister(struct ath10k *ar) 223void ath10k_thermal_unregister(struct ath10k *ar)
218{ 224{
225 if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
226 return;
227
219 sysfs_remove_link(&ar->dev->kobj, "cooling_device"); 228 sysfs_remove_link(&ar->dev->kobj, "cooling_device");
220 thermal_cooling_device_unregister(ar->thermal.cdev); 229 thermal_cooling_device_unregister(ar->thermal.cdev);
221} 230}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index bf8a4320c39c..e07e9907e355 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1564,6 +1564,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
1564 SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, 1564 SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
1565 WMI_SERVICE_SPOOF_MAC_SUPPORT, 1565 WMI_SERVICE_SPOOF_MAC_SUPPORT,
1566 WMI_TLV_MAX_SERVICE); 1566 WMI_TLV_MAX_SERVICE);
1567 SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
1568 WMI_SERVICE_THERM_THROT,
1569 WMI_TLV_MAX_SERVICE);
1567} 1570}
1568 1571
1569#undef SVCMAP 1572#undef SVCMAP
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 58e33ab9e0e9..66222eeaba4c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -205,6 +205,7 @@ enum wmi_service {
205 WMI_SERVICE_SPOOF_MAC_SUPPORT, 205 WMI_SERVICE_SPOOF_MAC_SUPPORT,
206 WMI_SERVICE_TX_DATA_ACK_RSSI, 206 WMI_SERVICE_TX_DATA_ACK_RSSI,
207 WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, 207 WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
208 WMI_SERVICE_THERM_THROT,
208 209
209 /* keep last */ 210 /* keep last */
210 WMI_SERVICE_MAX, 211 WMI_SERVICE_MAX,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index c5168abe107c..1a9edd8244cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -885,6 +885,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
885 int ret, i, j; 885 int ret, i, j;
886 u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); 886 u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
887 887
888 /*
889 * This command is not supported on earlier firmware versions.
890 * Unfortunately, we don't have a TLV API flag to rely on, so
891 * rely on the major version which is in the first byte of
892 * ucode_ver.
893 */
894 if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
895 return 0;
896
888 ret = iwl_mvm_sar_get_wgds_table(mvm); 897 ret = iwl_mvm_sar_get_wgds_table(mvm);
889 if (ret < 0) { 898 if (ret < 0) {
890 IWL_DEBUG_RADIO(mvm, 899 IWL_DEBUG_RADIO(mvm,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index e2addd8b878b..5d75c971004b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
696 "Send delba to tid=%d, %pM\n", 696 "Send delba to tid=%d, %pM\n",
697 tid, rx_reor_tbl_ptr->ta); 697 tid, rx_reor_tbl_ptr->ta);
698 mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0); 698 mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
699 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, 699 goto exit;
700 flags);
701 return;
702 } 700 }
703 } 701 }
702exit:
704 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 703 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
705} 704}
706 705
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 8e63d14c1e1c..5380fba652cc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
103 * There could be holes in the buffer, which are skipped by the function. 103 * There could be holes in the buffer, which are skipped by the function.
104 * Since the buffer is linear, the function uses rotation to simulate 104 * Since the buffer is linear, the function uses rotation to simulate
105 * circular buffer. 105 * circular buffer.
106 *
107 * The caller must hold rx_reorder_tbl_lock spinlock.
108 */ 106 */
109static void 107static void
110mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, 108mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
113{ 111{
114 int pkt_to_send, i; 112 int pkt_to_send, i;
115 void *rx_tmp_ptr; 113 void *rx_tmp_ptr;
114 unsigned long flags;
116 115
117 pkt_to_send = (start_win > tbl->start_win) ? 116 pkt_to_send = (start_win > tbl->start_win) ?
118 min((start_win - tbl->start_win), tbl->win_size) : 117 min((start_win - tbl->start_win), tbl->win_size) :
119 tbl->win_size; 118 tbl->win_size;
120 119
121 for (i = 0; i < pkt_to_send; ++i) { 120 for (i = 0; i < pkt_to_send; ++i) {
121 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
122 rx_tmp_ptr = NULL; 122 rx_tmp_ptr = NULL;
123 if (tbl->rx_reorder_ptr[i]) { 123 if (tbl->rx_reorder_ptr[i]) {
124 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; 124 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
125 tbl->rx_reorder_ptr[i] = NULL; 125 tbl->rx_reorder_ptr[i] = NULL;
126 } 126 }
127 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
127 if (rx_tmp_ptr) 128 if (rx_tmp_ptr)
128 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); 129 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
129 } 130 }
130 131
132 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
131 /* 133 /*
132 * We don't have a circular buffer, hence use rotation to simulate 134 * We don't have a circular buffer, hence use rotation to simulate
133 * circular buffer 135 * circular buffer
@@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
138 } 140 }
139 141
140 tbl->start_win = start_win; 142 tbl->start_win = start_win;
143 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
141} 144}
142 145
143/* 146/*
@@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
147 * The start window is adjusted automatically when a hole is located. 150 * The start window is adjusted automatically when a hole is located.
148 * Since the buffer is linear, the function uses rotation to simulate 151 * Since the buffer is linear, the function uses rotation to simulate
149 * circular buffer. 152 * circular buffer.
150 *
151 * The caller must hold rx_reorder_tbl_lock spinlock.
152 */ 153 */
153static void 154static void
154mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, 155mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
156{ 157{
157 int i, j, xchg; 158 int i, j, xchg;
158 void *rx_tmp_ptr; 159 void *rx_tmp_ptr;
160 unsigned long flags;
159 161
160 for (i = 0; i < tbl->win_size; ++i) { 162 for (i = 0; i < tbl->win_size; ++i) {
161 if (!tbl->rx_reorder_ptr[i]) 163 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
164 if (!tbl->rx_reorder_ptr[i]) {
165 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
166 flags);
162 break; 167 break;
168 }
163 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; 169 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
164 tbl->rx_reorder_ptr[i] = NULL; 170 tbl->rx_reorder_ptr[i] = NULL;
171 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
165 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); 172 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
166 } 173 }
167 174
175 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
168 /* 176 /*
169 * We don't have a circular buffer, hence use rotation to simulate 177 * We don't have a circular buffer, hence use rotation to simulate
170 * circular buffer 178 * circular buffer
@@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
177 } 185 }
178 } 186 }
179 tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); 187 tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
188 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
180} 189}
181 190
182/* 191/*
@@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
184 * 193 *
185 * The function stops the associated timer and dispatches all the 194 * The function stops the associated timer and dispatches all the
186 * pending packets in the Rx reorder table before deletion. 195 * pending packets in the Rx reorder table before deletion.
187 *
188 * The caller must hold rx_reorder_tbl_lock spinlock.
189 */ 196 */
190static void 197static void
191mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, 198mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
211 218
212 del_timer_sync(&tbl->timer_context.timer); 219 del_timer_sync(&tbl->timer_context.timer);
213 tbl->timer_context.timer_is_set = false; 220 tbl->timer_context.timer_is_set = false;
221
222 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
214 list_del(&tbl->list); 223 list_del(&tbl->list);
224 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
225
215 kfree(tbl->rx_reorder_ptr); 226 kfree(tbl->rx_reorder_ptr);
216 kfree(tbl); 227 kfree(tbl);
217 228
@@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
224/* 235/*
225 * This function returns the pointer to an entry in Rx reordering 236 * This function returns the pointer to an entry in Rx reordering
226 * table which matches the given TA/TID pair. 237 * table which matches the given TA/TID pair.
227 *
228 * The caller must hold rx_reorder_tbl_lock spinlock.
229 */ 238 */
230struct mwifiex_rx_reorder_tbl * 239struct mwifiex_rx_reorder_tbl *
231mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) 240mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
232{ 241{
233 struct mwifiex_rx_reorder_tbl *tbl; 242 struct mwifiex_rx_reorder_tbl *tbl;
243 unsigned long flags;
234 244
235 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) 245 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
236 if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) 246 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
247 if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
248 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
249 flags);
237 return tbl; 250 return tbl;
251 }
252 }
253 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
238 254
239 return NULL; 255 return NULL;
240} 256}
@@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
251 return; 267 return;
252 268
253 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 269 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
254 list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) 270 list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
255 if (!memcmp(tbl->ta, ta, ETH_ALEN)) 271 if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
272 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
273 flags);
256 mwifiex_del_rx_reorder_entry(priv, tbl); 274 mwifiex_del_rx_reorder_entry(priv, tbl);
275 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
276 }
277 }
257 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 278 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
258 279
259 return; 280 return;
@@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
262/* 283/*
263 * This function finds the last sequence number used in the packets 284 * This function finds the last sequence number used in the packets
264 * buffered in Rx reordering table. 285 * buffered in Rx reordering table.
265 *
266 * The caller must hold rx_reorder_tbl_lock spinlock.
267 */ 286 */
268static int 287static int
269mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx) 288mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
270{ 289{
271 struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr; 290 struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
291 struct mwifiex_private *priv = ctx->priv;
292 unsigned long flags;
272 int i; 293 int i;
273 294
274 for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) 295 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
275 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) 296 for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
297 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
298 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
299 flags);
276 return i; 300 return i;
301 }
302 }
303 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
277 304
278 return -1; 305 return -1;
279} 306}
@@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
291 struct reorder_tmr_cnxt *ctx = 318 struct reorder_tmr_cnxt *ctx =
292 from_timer(ctx, t, timer); 319 from_timer(ctx, t, timer);
293 int start_win, seq_num; 320 int start_win, seq_num;
294 unsigned long flags;
295 321
296 ctx->timer_is_set = false; 322 ctx->timer_is_set = false;
297 spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
298 seq_num = mwifiex_11n_find_last_seq_num(ctx); 323 seq_num = mwifiex_11n_find_last_seq_num(ctx);
299 324
300 if (seq_num < 0) { 325 if (seq_num < 0)
301 spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
302 return; 326 return;
303 }
304 327
305 mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num); 328 mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
306 start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1); 329 start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
307 mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr, 330 mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
308 start_win); 331 start_win);
309 spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
310} 332}
311 333
312/* 334/*
@@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
333 * If we get a TID, ta pair which is already present dispatch all the 355 * If we get a TID, ta pair which is already present dispatch all the
334 * the packets and move the window size until the ssn 356 * the packets and move the window size until the ssn
335 */ 357 */
336 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
337 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 358 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
338 if (tbl) { 359 if (tbl) {
339 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num); 360 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
340 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
341 return; 361 return;
342 } 362 }
343 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
344 /* if !tbl then create one */ 363 /* if !tbl then create one */
345 new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); 364 new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
346 if (!new_node) 365 if (!new_node)
@@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
551 int prev_start_win, start_win, end_win, win_size; 570 int prev_start_win, start_win, end_win, win_size;
552 u16 pkt_index; 571 u16 pkt_index;
553 bool init_window_shift = false; 572 bool init_window_shift = false;
554 unsigned long flags;
555 int ret = 0; 573 int ret = 0;
556 574
557 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
558 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 575 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
559 if (!tbl) { 576 if (!tbl) {
560 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
561 if (pkt_type != PKT_TYPE_BAR) 577 if (pkt_type != PKT_TYPE_BAR)
562 mwifiex_11n_dispatch_pkt(priv, payload); 578 mwifiex_11n_dispatch_pkt(priv, payload);
563 return ret; 579 return ret;
564 } 580 }
565 581
566 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { 582 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
567 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
568 mwifiex_11n_dispatch_pkt(priv, payload); 583 mwifiex_11n_dispatch_pkt(priv, payload);
569 return ret; 584 return ret;
570 } 585 }
@@ -651,8 +666,6 @@ done:
651 if (!tbl->timer_context.timer_is_set || 666 if (!tbl->timer_context.timer_is_set ||
652 prev_start_win != tbl->start_win) 667 prev_start_win != tbl->start_win)
653 mwifiex_11n_rxreorder_timer_restart(tbl); 668 mwifiex_11n_rxreorder_timer_restart(tbl);
654
655 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
656 return ret; 669 return ret;
657} 670}
658 671
@@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
681 peer_mac, tid, initiator); 694 peer_mac, tid, initiator);
682 695
683 if (cleanup_rx_reorder_tbl) { 696 if (cleanup_rx_reorder_tbl) {
684 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
685 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, 697 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
686 peer_mac); 698 peer_mac);
687 if (!tbl) { 699 if (!tbl) {
688 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
689 flags);
690 mwifiex_dbg(priv->adapter, EVENT, 700 mwifiex_dbg(priv->adapter, EVENT,
691 "event: TID, TA not found in table\n"); 701 "event: TID, TA not found in table\n");
692 return; 702 return;
693 } 703 }
694 mwifiex_del_rx_reorder_entry(priv, tbl); 704 mwifiex_del_rx_reorder_entry(priv, tbl);
695 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
696 } else { 705 } else {
697 ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); 706 ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
698 if (!ptx_tbl) { 707 if (!ptx_tbl) {
@@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
726 int tid, win_size; 735 int tid, win_size;
727 struct mwifiex_rx_reorder_tbl *tbl; 736 struct mwifiex_rx_reorder_tbl *tbl;
728 uint16_t block_ack_param_set; 737 uint16_t block_ack_param_set;
729 unsigned long flags;
730 738
731 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); 739 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
732 740
@@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
740 mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n", 748 mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
741 add_ba_rsp->peer_mac_addr, tid); 749 add_ba_rsp->peer_mac_addr, tid);
742 750
743 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
744 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, 751 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
745 add_ba_rsp->peer_mac_addr); 752 add_ba_rsp->peer_mac_addr);
746 if (tbl) 753 if (tbl)
747 mwifiex_del_rx_reorder_entry(priv, tbl); 754 mwifiex_del_rx_reorder_entry(priv, tbl);
748 755
749 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
750 return 0; 756 return 0;
751 } 757 }
752 758
753 win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) 759 win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
754 >> BLOCKACKPARAM_WINSIZE_POS; 760 >> BLOCKACKPARAM_WINSIZE_POS;
755 761
756 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
757 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, 762 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
758 add_ba_rsp->peer_mac_addr); 763 add_ba_rsp->peer_mac_addr);
759 if (tbl) { 764 if (tbl) {
@@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
764 else 769 else
765 tbl->amsdu = false; 770 tbl->amsdu = false;
766 } 771 }
767 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
768 772
769 mwifiex_dbg(priv->adapter, CMD, 773 mwifiex_dbg(priv->adapter, CMD,
770 "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", 774 "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
804 808
805 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 809 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
806 list_for_each_entry_safe(del_tbl_ptr, tmp_node, 810 list_for_each_entry_safe(del_tbl_ptr, tmp_node,
807 &priv->rx_reorder_tbl_ptr, list) 811 &priv->rx_reorder_tbl_ptr, list) {
812 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
808 mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); 813 mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
814 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
815 }
809 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 816 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
810 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 817 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
811 818
@@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
929 int tlv_buf_left = len; 936 int tlv_buf_left = len;
930 int ret; 937 int ret;
931 u8 *tmp; 938 u8 *tmp;
932 unsigned long flags;
933 939
934 mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", 940 mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
935 event_buf, len); 941 event_buf, len);
@@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
949 tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, 955 tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
950 tlv_bitmap_len); 956 tlv_bitmap_len);
951 957
952 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
953 rx_reor_tbl_ptr = 958 rx_reor_tbl_ptr =
954 mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid, 959 mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
955 tlv_rxba->mac); 960 tlv_rxba->mac);
956 if (!rx_reor_tbl_ptr) { 961 if (!rx_reor_tbl_ptr) {
957 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
958 flags);
959 mwifiex_dbg(priv->adapter, ERROR, 962 mwifiex_dbg(priv->adapter, ERROR,
960 "Can not find rx_reorder_tbl!"); 963 "Can not find rx_reorder_tbl!");
961 return; 964 return;
962 } 965 }
963 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
964 966
965 for (i = 0; i < tlv_bitmap_len; i++) { 967 for (i = 0; i < tlv_bitmap_len; i++) {
966 for (j = 0 ; j < 8; j++) { 968 for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index a83c5afc256a..5ce85d5727e4 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
421 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); 421 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
422 } 422 }
423 423
424 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
425 if (!priv->ap_11n_enabled || 424 if (!priv->ap_11n_enabled ||
426 (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && 425 (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
427 (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { 426 (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
428 ret = mwifiex_handle_uap_rx_forward(priv, skb); 427 ret = mwifiex_handle_uap_rx_forward(priv, skb);
429 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
430 return ret; 428 return ret;
431 } 429 }
432 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
433 430
434 /* Reorder and send to kernel */ 431 /* Reorder and send to kernel */
435 pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); 432 pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 7cbce03aa65b..aa426b838ffa 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -400,7 +400,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
400 400
401 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 401 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
402 struct ieee80211_txq *txq = sta->txq[i]; 402 struct ieee80211_txq *txq = sta->txq[i];
403 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 403 struct mt76_txq *mtxq;
404
405 if (!txq)
406 continue;
407
408 mtxq = (struct mt76_txq *)txq->drv_priv;
404 409
405 spin_lock_bh(&mtxq->hwq->lock); 410 spin_lock_bh(&mtxq->hwq->lock);
406 mtxq->send_bar = mtxq->aggr && send_bar; 411 mtxq->send_bar = mtxq->aggr && send_bar;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index f4122c8fdd97..ef9b502ce576 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
2289 2289
2290 if (rtl_c2h_fast_cmd(hw, skb)) { 2290 if (rtl_c2h_fast_cmd(hw, skb)) {
2291 rtl_c2h_content_parsing(hw, skb); 2291 rtl_c2h_content_parsing(hw, skb);
2292 kfree_skb(skb);
2292 return; 2293 return;
2293 } 2294 }
2294 2295
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a8303afa15f1..c914c24f880b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -903,7 +903,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
903 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { 903 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
904 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 904 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
905 905
906 BUG_ON(pull_to <= skb_headlen(skb)); 906 BUG_ON(pull_to < skb_headlen(skb));
907 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 907 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
908 } 908 }
909 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { 909 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index a90a9194ac4a..fed29de783e0 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1064,7 +1064,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
1064 .regs = aer_regs, 1064 .regs = aer_regs,
1065 }; 1065 };
1066 1066
1067 if (kfifo_in_spinlocked(&aer_recover_ring, &entry, sizeof(entry), 1067 if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1,
1068 &aer_recover_ring_lock)) 1068 &aer_recover_ring_lock))
1069 schedule_work(&aer_recover_work); 1069 schedule_work(&aer_recover_work);
1070 else 1070 else
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 53d449076dee..ea87d739f534 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -191,7 +191,8 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
191 case PIN_CONFIG_BIAS_DISABLE: 191 case PIN_CONFIG_BIAS_DISABLE:
192 dev_dbg(pc->dev, "pin %u: disable bias\n", pin); 192 dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
193 193
194 meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit); 194 meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
195 &bit);
195 ret = regmap_update_bits(pc->reg_pullen, reg, 196 ret = regmap_update_bits(pc->reg_pullen, reg,
196 BIT(bit), 0); 197 BIT(bit), 0);
197 if (ret) 198 if (ret)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index 6838b38555a1..1bfb0ae6b387 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -33,7 +33,7 @@ enum {
33 } 33 }
34 34
35 35
36#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ 36#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
37 { \ 37 { \
38 .name = "gpio" #id, \ 38 .name = "gpio" #id, \
39 .pins = gpio##id##_pins, \ 39 .pins = gpio##id##_pins, \
@@ -51,11 +51,12 @@ enum {
51 msm_mux_##f9 \ 51 msm_mux_##f9 \
52 }, \ 52 }, \
53 .nfuncs = 10, \ 53 .nfuncs = 10, \
54 .ctl_reg = base + REG_SIZE * id, \ 54 .ctl_reg = REG_SIZE * id, \
55 .io_reg = base + 0x4 + REG_SIZE * id, \ 55 .io_reg = 0x4 + REG_SIZE * id, \
56 .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \ 56 .intr_cfg_reg = 0x8 + REG_SIZE * id, \
57 .intr_status_reg = base + 0xc + REG_SIZE * id, \ 57 .intr_status_reg = 0xc + REG_SIZE * id, \
58 .intr_target_reg = base + 0x8 + REG_SIZE * id, \ 58 .intr_target_reg = 0x8 + REG_SIZE * id, \
59 .tile = _tile, \
59 .mux_bit = 2, \ 60 .mux_bit = 2, \
60 .pull_bit = 0, \ 61 .pull_bit = 0, \
61 .drv_bit = 6, \ 62 .drv_bit = 6, \
@@ -82,6 +83,7 @@ enum {
82 .intr_cfg_reg = 0, \ 83 .intr_cfg_reg = 0, \
83 .intr_status_reg = 0, \ 84 .intr_status_reg = 0, \
84 .intr_target_reg = 0, \ 85 .intr_target_reg = 0, \
86 .tile = NORTH, \
85 .mux_bit = -1, \ 87 .mux_bit = -1, \
86 .pull_bit = pull, \ 88 .pull_bit = pull, \
87 .drv_bit = drv, \ 89 .drv_bit = drv, \
@@ -1397,13 +1399,13 @@ static const struct msm_pingroup sdm660_groups[] = {
1397 PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _), 1399 PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _),
1398 PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _), 1400 PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _),
1399 PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _), 1401 PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _),
1400 SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6), 1402 SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
1401 SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3), 1403 SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
1402 SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0), 1404 SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
1403 SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6), 1405 SDC_QDSD_PINGROUP(sdc2_clk, 0x9b000, 14, 6),
1404 SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3), 1406 SDC_QDSD_PINGROUP(sdc2_cmd, 0x9b000, 11, 3),
1405 SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0), 1407 SDC_QDSD_PINGROUP(sdc2_data, 0x9b000, 9, 0),
1406 SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0), 1408 SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
1407}; 1409};
1408 1410
1409static const struct msm_pinctrl_soc_data sdm660_pinctrl = { 1411static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 6624499eae72..4ada80317a3b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
568 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11), 568 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
569 SUNXI_FUNCTION(0x0, "gpio_in"), 569 SUNXI_FUNCTION(0x0, "gpio_in"),
570 SUNXI_FUNCTION(0x1, "gpio_out"), 570 SUNXI_FUNCTION(0x1, "gpio_out"),
571 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */ 571 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
572}; 572};
573 573
574static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = { 574static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index cd160f2ec75d..bcd30e2374f1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2364,7 +2364,7 @@ static int _bnx2fc_create(struct net_device *netdev,
2364 if (!interface) { 2364 if (!interface) {
2365 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); 2365 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
2366 rc = -ENOMEM; 2366 rc = -ENOMEM;
2367 goto ifput_err; 2367 goto netdev_err;
2368 } 2368 }
2369 2369
2370 if (is_vlan_dev(netdev)) { 2370 if (is_vlan_dev(netdev)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b658b9a5eb1e..d0ecc729a90a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4886,10 +4886,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4886 fcport->d_id = e->u.new_sess.id; 4886 fcport->d_id = e->u.new_sess.id;
4887 fcport->flags |= FCF_FABRIC_DEVICE; 4887 fcport->flags |= FCF_FABRIC_DEVICE;
4888 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4888 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
4889 if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP) 4889 if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
4890 fcport->fc4_type = FC4_TYPE_FCP_SCSI; 4890 fcport->fc4_type = FC4_TYPE_FCP_SCSI;
4891 4891
4892 if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) { 4892 if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
4893 fcport->fc4_type = FC4_TYPE_OTHER; 4893 fcport->fc4_type = FC4_TYPE_OTHER;
4894 fcport->fc4f_nvme = FC4_TYPE_NVME; 4894 fcport->fc4f_nvme = FC4_TYPE_NVME;
4895 } 4895 }
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
index a7a34e89c42d..3252efa422f9 100644
--- a/drivers/staging/media/sunxi/cedrus/Kconfig
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_SUNXI_CEDRUS
3 depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER 3 depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
4 depends on HAS_DMA 4 depends on HAS_DMA
5 depends on OF 5 depends on OF
6 depends on MEDIA_CONTROLLER_REQUEST_API
6 select SUNXI_SRAM 7 select SUNXI_SRAM
7 select VIDEOBUF2_DMA_CONTIG 8 select VIDEOBUF2_DMA_CONTIG
8 select V4L2_MEM2MEM_DEV 9 select V4L2_MEM2MEM_DEV
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 32adbcbe6175..07520a2ce179 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -255,10 +255,10 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
255 255
256 res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0); 256 res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
257 dev->base = devm_ioremap_resource(dev->dev, res); 257 dev->base = devm_ioremap_resource(dev->dev, res);
258 if (!dev->base) { 258 if (IS_ERR(dev->base)) {
259 v4l2_err(&dev->v4l2_dev, "Failed to map registers\n"); 259 v4l2_err(&dev->v4l2_dev, "Failed to map registers\n");
260 260
261 ret = -ENOMEM; 261 ret = PTR_ERR(dev->base);
262 goto err_sram; 262 goto err_sram;
263 } 263 }
264 264
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index c4111a98f1a7..2d26ae80e202 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -424,7 +424,7 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
424 struct platform_device *pdev = data->pdev; 424 struct platform_device *pdev = data->pdev;
425 struct device *dev = &pdev->dev; 425 struct device *dev = &pdev->dev;
426 426
427 data->nr_sensors = 2; 427 data->nr_sensors = 1;
428 428
429 data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) * 429 data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
430 data->nr_sensors, GFP_KERNEL); 430 data->nr_sensors, GFP_KERNEL);
@@ -589,7 +589,7 @@ static int hisi_thermal_probe(struct platform_device *pdev)
589 return ret; 589 return ret;
590 } 590 }
591 591
592 ret = platform_get_irq_byname(pdev, sensor->irq_name); 592 ret = platform_get_irq(pdev, 0);
593 if (ret < 0) 593 if (ret < 0)
594 return ret; 594 return ret;
595 595
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 47623da0f91b..bbd73c5a4a4e 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -241,8 +241,8 @@ static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
241 sensor->t0 = TS1_T0_VAL1; 241 sensor->t0 = TS1_T0_VAL1;
242 242
243 /* Retrieve fmt0 and put it on Hz */ 243 /* Retrieve fmt0 and put it on Hz */
244 sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET) 244 sensor->fmt0 = ADJUST * (readl_relaxed(sensor->base +
245 & TS1_FMT0_MASK; 245 DTS_T0VALR1_OFFSET) & TS1_FMT0_MASK);
246 246
247 /* Retrieve ramp coefficient */ 247 /* Retrieve ramp coefficient */
248 sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) & 248 sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
@@ -532,6 +532,10 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
532 if (ret) 532 if (ret)
533 return ret; 533 return ret;
534 534
535 ret = stm_thermal_read_factory_settings(sensor);
536 if (ret)
537 goto thermal_unprepare;
538
535 ret = stm_thermal_calibration(sensor); 539 ret = stm_thermal_calibration(sensor);
536 if (ret) 540 if (ret)
537 goto thermal_unprepare; 541 goto thermal_unprepare;
@@ -636,10 +640,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
636 /* Populate sensor */ 640 /* Populate sensor */
637 sensor->base = base; 641 sensor->base = base;
638 642
639 ret = stm_thermal_read_factory_settings(sensor);
640 if (ret)
641 return ret;
642
643 sensor->clk = devm_clk_get(&pdev->dev, "pclk"); 643 sensor->clk = devm_clk_get(&pdev->dev, "pclk");
644 if (IS_ERR(sensor->clk)) { 644 if (IS_ERR(sensor->clk)) {
645 dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n", 645 dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f776b3eafb96..3f779d25ec0c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -552,30 +552,11 @@ static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
552 */ 552 */
553static void serial8250_clear_fifos(struct uart_8250_port *p) 553static void serial8250_clear_fifos(struct uart_8250_port *p)
554{ 554{
555 unsigned char fcr;
556 unsigned char clr_mask = UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT;
557
558 if (p->capabilities & UART_CAP_FIFO) { 555 if (p->capabilities & UART_CAP_FIFO) {
559 /* 556 serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
560 * Make sure to avoid changing FCR[7:3] and ENABLE_FIFO bits. 557 serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
561 * In case ENABLE_FIFO is not set, there is nothing to flush 558 UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
562 * so just return. Furthermore, on certain implementations of 559 serial_out(p, UART_FCR, 0);
563 * the 8250 core, the FCR[7:3] bits may only be changed under
564 * specific conditions and changing them if those conditions
565 * are not met can have nasty side effects. One such core is
566 * the 8250-omap present in TI AM335x.
567 */
568 fcr = serial_in(p, UART_FCR);
569
570 /* FIFO is not enabled, there's nothing to clear. */
571 if (!(fcr & UART_FCR_ENABLE_FIFO))
572 return;
573
574 fcr |= clr_mask;
575 serial_out(p, UART_FCR, fcr);
576
577 fcr &= ~clr_mask;
578 serial_out(p, UART_FCR, fcr);
579 } 560 }
580} 561}
581 562
@@ -1467,7 +1448,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
1467 * Enable previously disabled RX interrupts. 1448 * Enable previously disabled RX interrupts.
1468 */ 1449 */
1469 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { 1450 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
1470 serial8250_clear_fifos(p); 1451 serial8250_clear_and_reinit_fifos(p);
1471 1452
1472 p->ier |= UART_IER_RLSI | UART_IER_RDI; 1453 p->ier |= UART_IER_RLSI | UART_IER_RDI;
1473 serial_port_out(&p->port, UART_IER, p->ier); 1454 serial_port_out(&p->port, UART_IER, p->ier);
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index c2493d011225..3c5169eb23f5 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -204,9 +204,11 @@ hv_uio_open(struct uio_info *info, struct inode *inode)
204 if (atomic_inc_return(&pdata->refcnt) != 1) 204 if (atomic_inc_return(&pdata->refcnt) != 1)
205 return 0; 205 return 0;
206 206
207 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
208 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
209
207 ret = vmbus_connect_ring(dev->channel, 210 ret = vmbus_connect_ring(dev->channel,
208 hv_uio_channel_cb, dev->channel); 211 hv_uio_channel_cb, dev->channel);
209
210 if (ret == 0) 212 if (ret == 0)
211 dev->channel->inbound.ring_buffer->interrupt_mask = 1; 213 dev->channel->inbound.ring_buffer->interrupt_mask = 1;
212 else 214 else
@@ -334,9 +336,6 @@ hv_uio_probe(struct hv_device *dev,
334 goto fail_close; 336 goto fail_close;
335 } 337 }
336 338
337 vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
338 vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
339
340 ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr); 339 ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
341 if (ret) 340 if (ret)
342 dev_notice(&dev->device, 341 dev_notice(&dev->device,
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 94aca1b5ac8a..01b5818a4be5 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1507,7 +1507,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1507 portsc_buf[port_index] = 0; 1507 portsc_buf[port_index] = 0;
1508 1508
1509 /* Bail out if a USB3 port has a new device in link training */ 1509 /* Bail out if a USB3 port has a new device in link training */
1510 if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) { 1510 if ((hcd->speed >= HCD_USB3) &&
1511 (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1511 bus_state->bus_suspended = 0; 1512 bus_state->bus_suspended = 0;
1512 spin_unlock_irqrestore(&xhci->lock, flags); 1513 spin_unlock_irqrestore(&xhci->lock, flags);
1513 xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); 1514 xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index c3515bad5dbb..011dd45f8718 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1863,6 +1863,8 @@ struct xhci_hcd {
1863 unsigned sw_lpm_support:1; 1863 unsigned sw_lpm_support:1;
1864 /* support xHCI 1.0 spec USB2 hardware LPM */ 1864 /* support xHCI 1.0 spec USB2 hardware LPM */
1865 unsigned hw_lpm_support:1; 1865 unsigned hw_lpm_support:1;
1866 /* Broken Suspend flag for SNPS Suspend resume issue */
1867 unsigned broken_suspend:1;
1866 /* cached usb2 extened protocol capabilites */ 1868 /* cached usb2 extened protocol capabilites */
1867 u32 *ext_caps; 1869 u32 *ext_caps;
1868 unsigned int num_ext_caps; 1870 unsigned int num_ext_caps;
@@ -1880,8 +1882,6 @@ struct xhci_hcd {
1880 void *dbc; 1882 void *dbc;
1881 /* platform-specific data -- must come last */ 1883 /* platform-specific data -- must come last */
1882 unsigned long priv[0] __aligned(sizeof(s64)); 1884 unsigned long priv[0] __aligned(sizeof(s64));
1883 /* Broken Suspend flag for SNPS Suspend resume issue */
1884 u8 broken_suspend;
1885}; 1885};
1886 1886
1887/* Platform specific overrides to generic XHCI hc_driver ops */ 1887/* Platform specific overrides to generic XHCI hc_driver ops */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e24ff16d4147..1ce27f3ff7a7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1164,6 +1164,10 @@ static const struct usb_device_id option_ids[] = {
1164 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, 1164 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
1165 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), 1165 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
1166 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, 1166 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1167 { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
1168 .driver_info = NCTRL(0) | RSVD(1) },
1169 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
1170 .driver_info = NCTRL(0) },
1167 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 1171 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1168 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), 1172 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
1169 .driver_info = RSVD(1) }, 1173 .driver_info = RSVD(1) },
@@ -1328,6 +1332,7 @@ static const struct usb_device_id option_ids[] = {
1328 .driver_info = RSVD(4) }, 1332 .driver_info = RSVD(4) },
1329 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, 1333 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
1330 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, 1334 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
1335 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
1331 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), 1336 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
1332 .driver_info = RSVD(4) }, 1337 .driver_info = RSVD(4) },
1333 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), 1338 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1531,6 +1536,7 @@ static const struct usb_device_id option_ids[] = {
1531 .driver_info = RSVD(2) }, 1536 .driver_info = RSVD(2) },
1532 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ 1537 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1533 .driver_info = RSVD(2) }, 1538 .driver_info = RSVD(2) },
1539 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1534 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, 1540 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1535 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, 1541 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1536 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, 1542 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1758,6 +1764,7 @@ static const struct usb_device_id option_ids[] = {
1758 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 1764 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1759 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), 1765 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1760 .driver_info = RSVD(5) | RSVD(6) }, 1766 .driver_info = RSVD(5) | RSVD(6) },
1767 { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
1761 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), 1768 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1762 .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, 1769 .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
1763 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), 1770 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1940,7 +1947,14 @@ static const struct usb_device_id option_ids[] = {
1940 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, 1947 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
1941 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, 1948 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
1942 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, 1949 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
1943 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */ 1950 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1951 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
1952 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
1953 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
1954 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
1955 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
1956 { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
1957 .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
1944 { } /* Terminating entry */ 1958 { } /* Terminating entry */
1945}; 1959};
1946MODULE_DEVICE_TABLE(usb, option_ids); 1960MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d919284f103b..02b699a66868 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -517,7 +517,13 @@ static void vhost_net_busy_poll(struct vhost_net *net,
517 struct socket *sock; 517 struct socket *sock;
518 struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; 518 struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
519 519
520 mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); 520 /* Try to hold the vq mutex of the paired virtqueue. We can't
521 * use mutex_lock() here since we could not guarantee a
522 * consistenet lock ordering.
523 */
524 if (!mutex_trylock(&vq->mutex))
525 return;
526
521 vhost_disable_notify(&net->dev, vq); 527 vhost_disable_notify(&net->dev, vq);
522 sock = rvq->private_data; 528 sock = rvq->private_data;
523 529
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 6b98d8e3a5bf..55e5aa662ad5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -295,11 +295,8 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
295{ 295{
296 int i; 296 int i;
297 297
298 for (i = 0; i < d->nvqs; ++i) { 298 for (i = 0; i < d->nvqs; ++i)
299 mutex_lock(&d->vqs[i]->mutex);
300 __vhost_vq_meta_reset(d->vqs[i]); 299 __vhost_vq_meta_reset(d->vqs[i]);
301 mutex_unlock(&d->vqs[i]->mutex);
302 }
303} 300}
304 301
305static void vhost_vq_reset(struct vhost_dev *dev, 302static void vhost_vq_reset(struct vhost_dev *dev,
@@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
895#define vhost_get_used(vq, x, ptr) \ 892#define vhost_get_used(vq, x, ptr) \
896 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) 893 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
897 894
895static void vhost_dev_lock_vqs(struct vhost_dev *d)
896{
897 int i = 0;
898 for (i = 0; i < d->nvqs; ++i)
899 mutex_lock_nested(&d->vqs[i]->mutex, i);
900}
901
902static void vhost_dev_unlock_vqs(struct vhost_dev *d)
903{
904 int i = 0;
905 for (i = 0; i < d->nvqs; ++i)
906 mutex_unlock(&d->vqs[i]->mutex);
907}
908
898static int vhost_new_umem_range(struct vhost_umem *umem, 909static int vhost_new_umem_range(struct vhost_umem *umem,
899 u64 start, u64 size, u64 end, 910 u64 start, u64 size, u64 end,
900 u64 userspace_addr, int perm) 911 u64 userspace_addr, int perm)
@@ -976,6 +987,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
976 int ret = 0; 987 int ret = 0;
977 988
978 mutex_lock(&dev->mutex); 989 mutex_lock(&dev->mutex);
990 vhost_dev_lock_vqs(dev);
979 switch (msg->type) { 991 switch (msg->type) {
980 case VHOST_IOTLB_UPDATE: 992 case VHOST_IOTLB_UPDATE:
981 if (!dev->iotlb) { 993 if (!dev->iotlb) {
@@ -1009,6 +1021,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1009 break; 1021 break;
1010 } 1022 }
1011 1023
1024 vhost_dev_unlock_vqs(dev);
1012 mutex_unlock(&dev->mutex); 1025 mutex_unlock(&dev->mutex);
1013 1026
1014 return ret; 1027 return ret;
@@ -2220,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2220 return -EFAULT; 2233 return -EFAULT;
2221 } 2234 }
2222 if (unlikely(vq->log_used)) { 2235 if (unlikely(vq->log_used)) {
2236 /* Make sure used idx is seen before log. */
2237 smp_wmb();
2223 /* Log used index update. */ 2238 /* Log used index update. */
2224 log_write(vq->log_base, 2239 log_write(vq->log_base,
2225 vq->log_addr + offsetof(struct vring_used, idx), 2240 vq->log_addr + offsetof(struct vring_used, idx),
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 678b27063198..f9ef0673a083 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -562,7 +562,30 @@ static int pwm_backlight_probe(struct platform_device *pdev)
562 goto err_alloc; 562 goto err_alloc;
563 } 563 }
564 564
565 if (!data->levels) { 565 if (data->levels) {
566 /*
567 * For the DT case, only when brightness levels is defined
568 * data->levels is filled. For the non-DT case, data->levels
569 * can come from platform data, however is not usual.
570 */
571 for (i = 0; i <= data->max_brightness; i++) {
572 if (data->levels[i] > pb->scale)
573 pb->scale = data->levels[i];
574
575 pb->levels = data->levels;
576 }
577 } else if (!data->max_brightness) {
578 /*
579 * If no brightness levels are provided and max_brightness is
580 * not set, use the default brightness table. For the DT case,
581 * max_brightness is set to 0 when brightness levels is not
582 * specified. For the non-DT case, max_brightness is usually
583 * set to some value.
584 */
585
586 /* Get the PWM period (in nanoseconds) */
587 pwm_get_state(pb->pwm, &state);
588
566 ret = pwm_backlight_brightness_default(&pdev->dev, data, 589 ret = pwm_backlight_brightness_default(&pdev->dev, data,
567 state.period); 590 state.period);
568 if (ret < 0) { 591 if (ret < 0) {
@@ -570,13 +593,19 @@ static int pwm_backlight_probe(struct platform_device *pdev)
570 "failed to setup default brightness table\n"); 593 "failed to setup default brightness table\n");
571 goto err_alloc; 594 goto err_alloc;
572 } 595 }
573 }
574 596
575 for (i = 0; i <= data->max_brightness; i++) { 597 for (i = 0; i <= data->max_brightness; i++) {
576 if (data->levels[i] > pb->scale) 598 if (data->levels[i] > pb->scale)
577 pb->scale = data->levels[i]; 599 pb->scale = data->levels[i];
578 600
579 pb->levels = data->levels; 601 pb->levels = data->levels;
602 }
603 } else {
604 /*
605 * That only happens for the non-DT case, where platform data
606 * sets the max_brightness value.
607 */
608 pb->scale = data->max_brightness;
580 } 609 }
581 610
582 pb->lth_brightness = data->lth_brightness * (state.period / pb->scale); 611 pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
diff --git a/fs/aio.c b/fs/aio.c
index 97f983592925..aac9659381d2 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -45,6 +45,7 @@
45 45
46#include <asm/kmap_types.h> 46#include <asm/kmap_types.h>
47#include <linux/uaccess.h> 47#include <linux/uaccess.h>
48#include <linux/nospec.h>
48 49
49#include "internal.h" 50#include "internal.h"
50 51
@@ -1038,6 +1039,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1038 if (!table || id >= table->nr) 1039 if (!table || id >= table->nr)
1039 goto out; 1040 goto out;
1040 1041
1042 id = array_index_nospec(id, table->nr);
1041 ctx = rcu_dereference(table->table[id]); 1043 ctx = rcu_dereference(table->table[id]);
1042 if (ctx && ctx->user_id == ctx_id) { 1044 if (ctx && ctx->user_id == ctx_id) {
1043 if (percpu_ref_tryget_live(&ctx->users)) 1045 if (percpu_ref_tryget_live(&ctx->users))
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index b5ecd6f50360..4e9a7cc488da 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -563,8 +563,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
563 seq_puts(m, ",noacl"); 563 seq_puts(m, ",noacl");
564#endif 564#endif
565 565
566 if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) 566 if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
567 seq_puts(m, ",nocopyfrom"); 567 seq_puts(m, ",copyfrom");
568 568
569 if (fsopt->mds_namespace) 569 if (fsopt->mds_namespace)
570 seq_show_option(m, "mds_namespace", fsopt->mds_namespace); 570 seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index c005a5400f2e..79a265ba9200 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -42,7 +42,9 @@
42#define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */ 42#define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */
43#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */ 43#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
44 44
45#define CEPH_MOUNT_OPT_DEFAULT CEPH_MOUNT_OPT_DCACHE 45#define CEPH_MOUNT_OPT_DEFAULT \
46 (CEPH_MOUNT_OPT_DCACHE | \
47 CEPH_MOUNT_OPT_NOCOPYFROM)
46 48
47#define ceph_set_mount_opt(fsc, opt) \ 49#define ceph_set_mount_opt(fsc, opt) \
48 (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt; 50 (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 47395b0c3b35..e909678afa2d 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1119,8 +1119,10 @@ static int fuse_permission(struct inode *inode, int mask)
1119 if (fc->default_permissions || 1119 if (fc->default_permissions ||
1120 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { 1120 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1121 struct fuse_inode *fi = get_fuse_inode(inode); 1121 struct fuse_inode *fi = get_fuse_inode(inode);
1122 u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
1122 1123
1123 if (time_before64(fi->i_time, get_jiffies_64())) { 1124 if (perm_mask & READ_ONCE(fi->inval_mask) ||
1125 time_before64(fi->i_time, get_jiffies_64())) {
1124 refreshed = true; 1126 refreshed = true;
1125 1127
1126 err = fuse_perm_getattr(inode, mask); 1128 err = fuse_perm_getattr(inode, mask);
@@ -1241,7 +1243,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
1241 1243
1242static int fuse_dir_release(struct inode *inode, struct file *file) 1244static int fuse_dir_release(struct inode *inode, struct file *file)
1243{ 1245{
1244 fuse_release_common(file, FUSE_RELEASEDIR); 1246 fuse_release_common(file, true);
1245 1247
1246 return 0; 1248 return 0;
1247} 1249}
@@ -1249,7 +1251,25 @@ static int fuse_dir_release(struct inode *inode, struct file *file)
1249static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, 1251static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
1250 int datasync) 1252 int datasync)
1251{ 1253{
1252 return fuse_fsync_common(file, start, end, datasync, 1); 1254 struct inode *inode = file->f_mapping->host;
1255 struct fuse_conn *fc = get_fuse_conn(inode);
1256 int err;
1257
1258 if (is_bad_inode(inode))
1259 return -EIO;
1260
1261 if (fc->no_fsyncdir)
1262 return 0;
1263
1264 inode_lock(inode);
1265 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
1266 if (err == -ENOSYS) {
1267 fc->no_fsyncdir = 1;
1268 err = 0;
1269 }
1270 inode_unlock(inode);
1271
1272 return err;
1253} 1273}
1254 1274
1255static long fuse_dir_ioctl(struct file *file, unsigned int cmd, 1275static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b52f9baaa3e7..ffaffe18352a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -89,12 +89,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
89 iput(req->misc.release.inode); 89 iput(req->misc.release.inode);
90} 90}
91 91
92static void fuse_file_put(struct fuse_file *ff, bool sync) 92static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
93{ 93{
94 if (refcount_dec_and_test(&ff->count)) { 94 if (refcount_dec_and_test(&ff->count)) {
95 struct fuse_req *req = ff->reserved_req; 95 struct fuse_req *req = ff->reserved_req;
96 96
97 if (ff->fc->no_open) { 97 if (ff->fc->no_open && !isdir) {
98 /* 98 /*
99 * Drop the release request when client does not 99 * Drop the release request when client does not
100 * implement 'open' 100 * implement 'open'
@@ -247,10 +247,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
247 req->in.args[0].value = inarg; 247 req->in.args[0].value = inarg;
248} 248}
249 249
250void fuse_release_common(struct file *file, int opcode) 250void fuse_release_common(struct file *file, bool isdir)
251{ 251{
252 struct fuse_file *ff = file->private_data; 252 struct fuse_file *ff = file->private_data;
253 struct fuse_req *req = ff->reserved_req; 253 struct fuse_req *req = ff->reserved_req;
254 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
254 255
255 fuse_prepare_release(ff, file->f_flags, opcode); 256 fuse_prepare_release(ff, file->f_flags, opcode);
256 257
@@ -272,7 +273,7 @@ void fuse_release_common(struct file *file, int opcode)
272 * synchronous RELEASE is allowed (and desirable) in this case 273 * synchronous RELEASE is allowed (and desirable) in this case
273 * because the server can be trusted not to screw up. 274 * because the server can be trusted not to screw up.
274 */ 275 */
275 fuse_file_put(ff, ff->fc->destroy_req != NULL); 276 fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
276} 277}
277 278
278static int fuse_open(struct inode *inode, struct file *file) 279static int fuse_open(struct inode *inode, struct file *file)
@@ -288,7 +289,7 @@ static int fuse_release(struct inode *inode, struct file *file)
288 if (fc->writeback_cache) 289 if (fc->writeback_cache)
289 write_inode_now(inode, 1); 290 write_inode_now(inode, 1);
290 291
291 fuse_release_common(file, FUSE_RELEASE); 292 fuse_release_common(file, false);
292 293
293 /* return value is ignored by VFS */ 294 /* return value is ignored by VFS */
294 return 0; 295 return 0;
@@ -302,7 +303,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
302 * iput(NULL) is a no-op and since the refcount is 1 and everything's 303 * iput(NULL) is a no-op and since the refcount is 1 and everything's
303 * synchronous, we are fine with not doing igrab() here" 304 * synchronous, we are fine with not doing igrab() here"
304 */ 305 */
305 fuse_file_put(ff, true); 306 fuse_file_put(ff, true, false);
306} 307}
307EXPORT_SYMBOL_GPL(fuse_sync_release); 308EXPORT_SYMBOL_GPL(fuse_sync_release);
308 309
@@ -441,13 +442,30 @@ static int fuse_flush(struct file *file, fl_owner_t id)
441} 442}
442 443
443int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 444int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
444 int datasync, int isdir) 445 int datasync, int opcode)
445{ 446{
446 struct inode *inode = file->f_mapping->host; 447 struct inode *inode = file->f_mapping->host;
447 struct fuse_conn *fc = get_fuse_conn(inode); 448 struct fuse_conn *fc = get_fuse_conn(inode);
448 struct fuse_file *ff = file->private_data; 449 struct fuse_file *ff = file->private_data;
449 FUSE_ARGS(args); 450 FUSE_ARGS(args);
450 struct fuse_fsync_in inarg; 451 struct fuse_fsync_in inarg;
452
453 memset(&inarg, 0, sizeof(inarg));
454 inarg.fh = ff->fh;
455 inarg.fsync_flags = datasync ? 1 : 0;
456 args.in.h.opcode = opcode;
457 args.in.h.nodeid = get_node_id(inode);
458 args.in.numargs = 1;
459 args.in.args[0].size = sizeof(inarg);
460 args.in.args[0].value = &inarg;
461 return fuse_simple_request(fc, &args);
462}
463
464static int fuse_fsync(struct file *file, loff_t start, loff_t end,
465 int datasync)
466{
467 struct inode *inode = file->f_mapping->host;
468 struct fuse_conn *fc = get_fuse_conn(inode);
451 int err; 469 int err;
452 470
453 if (is_bad_inode(inode)) 471 if (is_bad_inode(inode))
@@ -479,34 +497,18 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
479 if (err) 497 if (err)
480 goto out; 498 goto out;
481 499
482 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 500 if (fc->no_fsync)
483 goto out; 501 goto out;
484 502
485 memset(&inarg, 0, sizeof(inarg)); 503 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
486 inarg.fh = ff->fh;
487 inarg.fsync_flags = datasync ? 1 : 0;
488 args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
489 args.in.h.nodeid = get_node_id(inode);
490 args.in.numargs = 1;
491 args.in.args[0].size = sizeof(inarg);
492 args.in.args[0].value = &inarg;
493 err = fuse_simple_request(fc, &args);
494 if (err == -ENOSYS) { 504 if (err == -ENOSYS) {
495 if (isdir) 505 fc->no_fsync = 1;
496 fc->no_fsyncdir = 1;
497 else
498 fc->no_fsync = 1;
499 err = 0; 506 err = 0;
500 } 507 }
501out: 508out:
502 inode_unlock(inode); 509 inode_unlock(inode);
503 return err;
504}
505 510
506static int fuse_fsync(struct file *file, loff_t start, loff_t end, 511 return err;
507 int datasync)
508{
509 return fuse_fsync_common(file, start, end, datasync, 0);
510} 512}
511 513
512void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, 514void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
@@ -807,7 +809,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
807 put_page(page); 809 put_page(page);
808 } 810 }
809 if (req->ff) 811 if (req->ff)
810 fuse_file_put(req->ff, false); 812 fuse_file_put(req->ff, false, false);
811} 813}
812 814
813static void fuse_send_readpages(struct fuse_req *req, struct file *file) 815static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1460,7 +1462,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1460 __free_page(req->pages[i]); 1462 __free_page(req->pages[i]);
1461 1463
1462 if (req->ff) 1464 if (req->ff)
1463 fuse_file_put(req->ff, false); 1465 fuse_file_put(req->ff, false, false);
1464} 1466}
1465 1467
1466static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1468static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@@ -1619,7 +1621,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1619 ff = __fuse_write_file_get(fc, fi); 1621 ff = __fuse_write_file_get(fc, fi);
1620 err = fuse_flush_times(inode, ff); 1622 err = fuse_flush_times(inode, ff);
1621 if (ff) 1623 if (ff)
1622 fuse_file_put(ff, 0); 1624 fuse_file_put(ff, false, false);
1623 1625
1624 return err; 1626 return err;
1625} 1627}
@@ -1940,7 +1942,7 @@ static int fuse_writepages(struct address_space *mapping,
1940 err = 0; 1942 err = 0;
1941 } 1943 }
1942 if (data.ff) 1944 if (data.ff)
1943 fuse_file_put(data.ff, false); 1945 fuse_file_put(data.ff, false, false);
1944 1946
1945 kfree(data.orig_pages); 1947 kfree(data.orig_pages);
1946out: 1948out:
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e9f712e81c7d..2f2c92e6f8cb 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -822,13 +822,13 @@ void fuse_sync_release(struct fuse_file *ff, int flags);
822/** 822/**
823 * Send RELEASE or RELEASEDIR request 823 * Send RELEASE or RELEASEDIR request
824 */ 824 */
825void fuse_release_common(struct file *file, int opcode); 825void fuse_release_common(struct file *file, bool isdir);
826 826
827/** 827/**
828 * Send FSYNC or FSYNCDIR request 828 * Send FSYNC or FSYNCDIR request
829 */ 829 */
830int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 830int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
831 int datasync, int isdir); 831 int datasync, int opcode);
832 832
833/** 833/**
834 * Notify poll wakeup 834 * Notify poll wakeup
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 0b94b23b02d4..568abed20eb2 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -115,7 +115,7 @@ static void fuse_i_callback(struct rcu_head *head)
115static void fuse_destroy_inode(struct inode *inode) 115static void fuse_destroy_inode(struct inode *inode)
116{ 116{
117 struct fuse_inode *fi = get_fuse_inode(inode); 117 struct fuse_inode *fi = get_fuse_inode(inode);
118 if (S_ISREG(inode->i_mode)) { 118 if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) {
119 WARN_ON(!list_empty(&fi->write_files)); 119 WARN_ON(!list_empty(&fi->write_files));
120 WARN_ON(!list_empty(&fi->queued_writes)); 120 WARN_ON(!list_empty(&fi->queued_writes));
121 } 121 }
@@ -1068,6 +1068,7 @@ void fuse_dev_free(struct fuse_dev *fud)
1068 1068
1069 fuse_conn_put(fc); 1069 fuse_conn_put(fc);
1070 } 1070 }
1071 kfree(fud->pq.processing);
1071 kfree(fud); 1072 kfree(fud);
1072} 1073}
1073EXPORT_SYMBOL_GPL(fuse_dev_free); 1074EXPORT_SYMBOL_GPL(fuse_dev_free);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index c6289147c787..82c129bfe58d 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -651,6 +651,18 @@ static int ovl_symlink(struct inode *dir, struct dentry *dentry,
651 return ovl_create_object(dentry, S_IFLNK, 0, link); 651 return ovl_create_object(dentry, S_IFLNK, 0, link);
652} 652}
653 653
654static int ovl_set_link_redirect(struct dentry *dentry)
655{
656 const struct cred *old_cred;
657 int err;
658
659 old_cred = ovl_override_creds(dentry->d_sb);
660 err = ovl_set_redirect(dentry, false);
661 revert_creds(old_cred);
662
663 return err;
664}
665
654static int ovl_link(struct dentry *old, struct inode *newdir, 666static int ovl_link(struct dentry *old, struct inode *newdir,
655 struct dentry *new) 667 struct dentry *new)
656{ 668{
@@ -670,7 +682,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
670 goto out_drop_write; 682 goto out_drop_write;
671 683
672 if (ovl_is_metacopy_dentry(old)) { 684 if (ovl_is_metacopy_dentry(old)) {
673 err = ovl_set_redirect(old, false); 685 err = ovl_set_link_redirect(old);
674 if (err) 686 if (err)
675 goto out_drop_write; 687 goto out_drop_write;
676 } 688 }
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 8fa37cd7818a..54e5d17d7f3e 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -754,9 +754,8 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
754 goto out; 754 goto out;
755 } 755 }
756 756
757 /* Otherwise, get a connected non-upper dir or disconnected non-dir */ 757 /* Find origin.dentry again with ovl_acceptable() layer check */
758 if (d_is_dir(origin.dentry) && 758 if (d_is_dir(origin.dentry)) {
759 (origin.dentry->d_flags & DCACHE_DISCONNECTED)) {
760 dput(origin.dentry); 759 dput(origin.dentry);
761 origin.dentry = NULL; 760 origin.dentry = NULL;
762 err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack); 761 err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack);
@@ -769,6 +768,7 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
769 goto out_err; 768 goto out_err;
770 } 769 }
771 770
771 /* Get a connected non-upper dir or disconnected non-dir */
772 dentry = ovl_get_dentry(sb, NULL, &origin, index); 772 dentry = ovl_get_dentry(sb, NULL, &origin, index);
773 773
774out: 774out:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 6bcc9dedc342..3b7ed5d2279c 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -286,22 +286,13 @@ int ovl_permission(struct inode *inode, int mask)
286 if (err) 286 if (err)
287 return err; 287 return err;
288 288
289 /* No need to do any access on underlying for special files */ 289 old_cred = ovl_override_creds(inode->i_sb);
290 if (special_file(realinode->i_mode)) 290 if (!upperinode &&
291 return 0; 291 !special_file(realinode->i_mode) && mask & MAY_WRITE) {
292
293 /* No need to access underlying for execute */
294 mask &= ~MAY_EXEC;
295 if ((mask & (MAY_READ | MAY_WRITE)) == 0)
296 return 0;
297
298 /* Lower files get copied up, so turn write access into read */
299 if (!upperinode && mask & MAY_WRITE) {
300 mask &= ~(MAY_WRITE | MAY_APPEND); 292 mask &= ~(MAY_WRITE | MAY_APPEND);
293 /* Make sure mounter can read file for copy up later */
301 mask |= MAY_READ; 294 mask |= MAY_READ;
302 } 295 }
303
304 old_cred = ovl_override_creds(inode->i_sb);
305 err = inode_permission(realinode, mask); 296 err = inode_permission(realinode, mask);
306 revert_creds(old_cred); 297 revert_creds(old_cred);
307 298
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index cd58939dc977..7a85e609fc27 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1566,7 +1566,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1566 cond_resched(); 1566 cond_resched();
1567 1567
1568 BUG_ON(!vma_can_userfault(vma)); 1568 BUG_ON(!vma_can_userfault(vma));
1569 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1570 1569
1571 /* 1570 /*
1572 * Nothing to do: this vma is already registered into this 1571 * Nothing to do: this vma is already registered into this
@@ -1575,6 +1574,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1575 if (!vma->vm_userfaultfd_ctx.ctx) 1574 if (!vma->vm_userfaultfd_ctx.ctx)
1576 goto skip; 1575 goto skip;
1577 1576
1577 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1578
1578 if (vma->vm_start > start) 1579 if (vma->vm_start > start)
1579 start = vma->vm_start; 1580 start = vma->vm_start;
1580 vma_end = min(end, vma->vm_end); 1581 vma_end = min(end, vma->vm_end);
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 827e4d3bbc7a..8cc7b09c1bc7 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -16,6 +16,7 @@
16#define __ASM_GENERIC_FIXMAP_H 16#define __ASM_GENERIC_FIXMAP_H
17 17
18#include <linux/bug.h> 18#include <linux/bug.h>
19#include <linux/mm_types.h>
19 20
20#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 21#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
21#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 22#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 537e9e7c6e6f..8c8544b375eb 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -854,7 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
854extern int bpf_jit_enable; 854extern int bpf_jit_enable;
855extern int bpf_jit_harden; 855extern int bpf_jit_harden;
856extern int bpf_jit_kallsyms; 856extern int bpf_jit_kallsyms;
857extern int bpf_jit_limit; 857extern long bpf_jit_limit;
858 858
859typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 859typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
860 860
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 57fd376b1027..821b751485fb 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -593,11 +593,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
593}; 593};
594 594
595struct mlx5_ifc_flow_table_eswitch_cap_bits { 595struct mlx5_ifc_flow_table_eswitch_cap_bits {
596 u8 reserved_at_0[0x1c]; 596 u8 reserved_at_0[0x1a];
597 u8 fdb_multi_path_to_table[0x1];
598 u8 reserved_at_1d[0x1];
599 u8 multi_fdb_encap[0x1]; 597 u8 multi_fdb_encap[0x1];
600 u8 reserved_at_1f[0x1e1]; 598 u8 reserved_at_1b[0x1];
599 u8 fdb_multi_path_to_table[0x1];
600 u8 reserved_at_1d[0x3];
601
602 u8 reserved_at_20[0x1e0];
601 603
602 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; 604 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
603 605
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5ed8f6292a53..2c471a2c43fa 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -206,6 +206,11 @@ struct page {
206#endif 206#endif
207} _struct_page_alignment; 207} _struct_page_alignment;
208 208
209/*
210 * Used for sizing the vmemmap region on some architectures
211 */
212#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
213
209#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) 214#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
210#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) 215#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
211 216
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 847705a6d0ec..db023a92f3a4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
783static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 783static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
784#endif 784#endif
785 785
786#if defined(CONFIG_SPARSEMEM)
787void memblocks_present(void);
788#else
789static inline void memblocks_present(void) {}
790#endif
791
786#ifdef CONFIG_HAVE_MEMORYLESS_NODES 792#ifdef CONFIG_HAVE_MEMORYLESS_NODES
787int local_memory_node(int node_id); 793int local_memory_node(int node_id);
788#else 794#else
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 01797cb4587e..a0dcc9b6a723 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -565,7 +565,7 @@ struct platform_device_id {
565/** 565/**
566 * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus 566 * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
567 * @phy_id: The result of 567 * @phy_id: The result of
568 * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask 568 * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask
569 * for this PHY type 569 * for this PHY type
570 * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0 570 * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
571 * is used to terminate an array of struct mdio_device_id. 571 * is used to terminate an array of struct mdio_device_id.
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 4a520d3304a2..cf09ab37b45b 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
62} 62}
63#endif /* CONFIG_PROVE_LOCKING */ 63#endif /* CONFIG_PROVE_LOCKING */
64 64
65/*
66 * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
67 *
68 * @p: The pointer to read, prior to dereferencing
69 * @ss: The nfnetlink subsystem ID
70 *
71 * Return the value of the specified RCU-protected pointer, but omit
72 * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
73 */
74#define nfnl_dereference(p, ss) \
75 rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
76
77#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ 65#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
78 MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) 66 MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
79 67
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index b9626aa7e90c..3e2a80cc7b56 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -39,12 +39,13 @@ struct t10_pi_tuple {
39 39
40static inline u32 t10_pi_ref_tag(struct request *rq) 40static inline u32 t10_pi_ref_tag(struct request *rq)
41{ 41{
42 unsigned int shift = ilog2(queue_logical_block_size(rq->q));
43
42#ifdef CONFIG_BLK_DEV_INTEGRITY 44#ifdef CONFIG_BLK_DEV_INTEGRITY
43 return blk_rq_pos(rq) >> 45 if (rq->q->integrity.interval_exp)
44 (rq->q->integrity.interval_exp - 9) & 0xffffffff; 46 shift = rq->q->integrity.interval_exp;
45#else
46 return -1U;
47#endif 47#endif
48 return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
48} 49}
49 50
50extern const struct blk_integrity_profile t10_pi_type1_crc; 51extern const struct blk_integrity_profile t10_pi_type1_crc;
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 564892e19f8c..f492e21c4aa2 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -554,6 +554,60 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
554} 554}
555 555
556/** 556/**
557 * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
558 * @xa: XArray.
559 * @index: Index into array.
560 * @old: Old value to test against.
561 * @entry: New value to place in array.
562 * @gfp: Memory allocation flags.
563 *
564 * This function is like calling xa_cmpxchg() except it disables softirqs
565 * while holding the array lock.
566 *
567 * Context: Any context. Takes and releases the xa_lock while
568 * disabling softirqs. May sleep if the @gfp flags permit.
569 * Return: The old value at this index or xa_err() if an error happened.
570 */
571static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
572 void *old, void *entry, gfp_t gfp)
573{
574 void *curr;
575
576 xa_lock_bh(xa);
577 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
578 xa_unlock_bh(xa);
579
580 return curr;
581}
582
583/**
584 * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
585 * @xa: XArray.
586 * @index: Index into array.
587 * @old: Old value to test against.
588 * @entry: New value to place in array.
589 * @gfp: Memory allocation flags.
590 *
591 * This function is like calling xa_cmpxchg() except it disables interrupts
592 * while holding the array lock.
593 *
594 * Context: Process context. Takes and releases the xa_lock while
595 * disabling interrupts. May sleep if the @gfp flags permit.
596 * Return: The old value at this index or xa_err() if an error happened.
597 */
598static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
599 void *old, void *entry, gfp_t gfp)
600{
601 void *curr;
602
603 xa_lock_irq(xa);
604 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
605 xa_unlock_irq(xa);
606
607 return curr;
608}
609
610/**
557 * xa_insert() - Store this entry in the XArray unless another entry is 611 * xa_insert() - Store this entry in the XArray unless another entry is
558 * already present. 612 * already present.
559 * @xa: XArray. 613 * @xa: XArray.
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
new file mode 100644
index 000000000000..d21f40edc09e
--- /dev/null
+++ b/include/media/mpeg2-ctrls.h
@@ -0,0 +1,86 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * These are the MPEG2 state controls for use with stateless MPEG-2
4 * codec drivers.
5 *
6 * It turns out that these structs are not stable yet and will undergo
7 * more changes. So keep them private until they are stable and ready to
8 * become part of the official public API.
9 */
10
11#ifndef _MPEG2_CTRLS_H_
12#define _MPEG2_CTRLS_H_
13
14#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
15#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
16
17/* enum v4l2_ctrl_type type values */
18#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
19#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
20
21#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
22#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
23#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
24#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
25
26struct v4l2_mpeg2_sequence {
27 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
28 __u16 horizontal_size;
29 __u16 vertical_size;
30 __u32 vbv_buffer_size;
31
32 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
33 __u8 profile_and_level_indication;
34 __u8 progressive_sequence;
35 __u8 chroma_format;
36 __u8 pad;
37};
38
39struct v4l2_mpeg2_picture {
40 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
41 __u8 picture_coding_type;
42
43 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
44 __u8 f_code[2][2];
45 __u8 intra_dc_precision;
46 __u8 picture_structure;
47 __u8 top_field_first;
48 __u8 frame_pred_frame_dct;
49 __u8 concealment_motion_vectors;
50 __u8 q_scale_type;
51 __u8 intra_vlc_format;
52 __u8 alternate_scan;
53 __u8 repeat_first_field;
54 __u8 progressive_frame;
55 __u8 pad;
56};
57
58struct v4l2_ctrl_mpeg2_slice_params {
59 __u32 bit_size;
60 __u32 data_bit_offset;
61
62 struct v4l2_mpeg2_sequence sequence;
63 struct v4l2_mpeg2_picture picture;
64
65 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
66 __u8 quantiser_scale_code;
67
68 __u8 backward_ref_index;
69 __u8 forward_ref_index;
70 __u8 pad;
71};
72
73struct v4l2_ctrl_mpeg2_quantization {
74 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
75 __u8 load_intra_quantiser_matrix;
76 __u8 load_non_intra_quantiser_matrix;
77 __u8 load_chroma_intra_quantiser_matrix;
78 __u8 load_chroma_non_intra_quantiser_matrix;
79
80 __u8 intra_quantiser_matrix[64];
81 __u8 non_intra_quantiser_matrix[64];
82 __u8 chroma_intra_quantiser_matrix[64];
83 __u8 chroma_non_intra_quantiser_matrix[64];
84};
85
86#endif
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 83ce0593b275..d63cf227b0ab 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -22,6 +22,12 @@
22#include <linux/videodev2.h> 22#include <linux/videodev2.h>
23#include <media/media-request.h> 23#include <media/media-request.h>
24 24
25/*
26 * Include the mpeg2 stateless codec compound control definitions.
27 * This will move to the public headers once this API is fully stable.
28 */
29#include <media/mpeg2-ctrls.h>
30
25/* forward references */ 31/* forward references */
26struct file; 32struct file;
27struct v4l2_ctrl_handler; 33struct v4l2_ctrl_handler;
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index e86981d615ae..4a737b2c610b 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -239,6 +239,7 @@ struct vb2_queue;
239 * @num_planes: number of planes in the buffer 239 * @num_planes: number of planes in the buffer
240 * on an internal driver queue. 240 * on an internal driver queue.
241 * @timestamp: frame timestamp in ns. 241 * @timestamp: frame timestamp in ns.
242 * @request: the request this buffer is associated with.
242 * @req_obj: used to bind this buffer to a request. This 243 * @req_obj: used to bind this buffer to a request. This
243 * request object has a refcount. 244 * request object has a refcount.
244 */ 245 */
@@ -249,6 +250,7 @@ struct vb2_buffer {
249 unsigned int memory; 250 unsigned int memory;
250 unsigned int num_planes; 251 unsigned int num_planes;
251 u64 timestamp; 252 u64 timestamp;
253 struct media_request *request;
252 struct media_request_object req_obj; 254 struct media_request_object req_obj;
253 255
254 /* private: internal use only 256 /* private: internal use only
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index db6b2218a2ad..cbcf35ce1b14 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -144,25 +144,6 @@ struct ip_tunnel {
144 bool ignore_df; 144 bool ignore_df;
145}; 145};
146 146
147#define TUNNEL_CSUM __cpu_to_be16(0x01)
148#define TUNNEL_ROUTING __cpu_to_be16(0x02)
149#define TUNNEL_KEY __cpu_to_be16(0x04)
150#define TUNNEL_SEQ __cpu_to_be16(0x08)
151#define TUNNEL_STRICT __cpu_to_be16(0x10)
152#define TUNNEL_REC __cpu_to_be16(0x20)
153#define TUNNEL_VERSION __cpu_to_be16(0x40)
154#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
155#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
156#define TUNNEL_OAM __cpu_to_be16(0x0200)
157#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
158#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
159#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
160#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
161#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
162
163#define TUNNEL_OPTIONS_PRESENT \
164 (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
165
166struct tnl_ptk_info { 147struct tnl_ptk_info {
167 __be16 flags; 148 __be16 flags;
168 __be16 proto; 149 __be16 proto;
diff --git a/include/net/sock.h b/include/net/sock.h
index df390a3e23fe..a6235c286ef9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2350,22 +2350,39 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2350void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); 2350void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2351 2351
2352/** 2352/**
2353 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 2353 * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
2354 * @sk: socket sending this packet 2354 * @sk: socket sending this packet
2355 * @tsflags: timestamping flags to use 2355 * @tsflags: timestamping flags to use
2356 * @tx_flags: completed with instructions for time stamping 2356 * @tx_flags: completed with instructions for time stamping
2357 * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
2357 * 2358 *
2358 * Note: callers should take care of initial ``*tx_flags`` value (usually 0) 2359 * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
2359 */ 2360 */
2360static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags, 2361static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2361 __u8 *tx_flags) 2362 __u8 *tx_flags, __u32 *tskey)
2362{ 2363{
2363 if (unlikely(tsflags)) 2364 if (unlikely(tsflags)) {
2364 __sock_tx_timestamp(tsflags, tx_flags); 2365 __sock_tx_timestamp(tsflags, tx_flags);
2366 if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
2367 tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2368 *tskey = sk->sk_tskey++;
2369 }
2365 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) 2370 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2366 *tx_flags |= SKBTX_WIFI_STATUS; 2371 *tx_flags |= SKBTX_WIFI_STATUS;
2367} 2372}
2368 2373
2374static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2375 __u8 *tx_flags)
2376{
2377 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2378}
2379
2380static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2381{
2382 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2383 &skb_shinfo(skb)->tskey);
2384}
2385
2369/** 2386/**
2370 * sk_eat_skb - Release a skb if it is no longer needed 2387 * sk_eat_skb - Release a skb if it is no longer needed
2371 * @sk: socket to eat this skb from 2388 * @sk: socket to eat this skb from
diff --git a/include/net/tls.h b/include/net/tls.h
index bab5627ff5e3..3cbcd12303fd 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -76,6 +76,10 @@
76 * 76 *
77 * void (*unhash)(struct tls_device *device, struct sock *sk); 77 * void (*unhash)(struct tls_device *device, struct sock *sk);
78 * This function cleans listen state set by Inline TLS driver 78 * This function cleans listen state set by Inline TLS driver
79 *
80 * void (*release)(struct kref *kref);
81 * Release the registered device and allocated resources
82 * @kref: Number of reference to tls_device
79 */ 83 */
80struct tls_device { 84struct tls_device {
81 char name[TLS_DEVICE_NAME_MAX]; 85 char name[TLS_DEVICE_NAME_MAX];
@@ -83,6 +87,8 @@ struct tls_device {
83 int (*feature)(struct tls_device *device); 87 int (*feature)(struct tls_device *device);
84 int (*hash)(struct tls_device *device, struct sock *sk); 88 int (*hash)(struct tls_device *device, struct sock *sk);
85 void (*unhash)(struct tls_device *device, struct sock *sk); 89 void (*unhash)(struct tls_device *device, struct sock *sk);
90 void (*release)(struct kref *kref);
91 struct kref kref;
86}; 92};
87 93
88enum { 94enum {
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 38c232861a64..7298a53b9702 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1526,6 +1526,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1526 int (*func)(struct xfrm_state *, int, void*), void *); 1526 int (*func)(struct xfrm_state *, int, void*), void *);
1527void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net); 1527void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1528struct xfrm_state *xfrm_state_alloc(struct net *net); 1528struct xfrm_state *xfrm_state_alloc(struct net *net);
1529void xfrm_state_free(struct xfrm_state *x);
1529struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr, 1530struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1530 const xfrm_address_t *saddr, 1531 const xfrm_address_t *saddr,
1531 const struct flowi *fl, 1532 const struct flowi *fl,
diff --git a/include/uapi/asm-generic/Kbuild.asm b/include/uapi/asm-generic/Kbuild.asm
index 21381449d98a..355c4ac2c0b0 100644
--- a/include/uapi/asm-generic/Kbuild.asm
+++ b/include/uapi/asm-generic/Kbuild.asm
@@ -3,6 +3,7 @@
3# 3#
4mandatory-y += auxvec.h 4mandatory-y += auxvec.h
5mandatory-y += bitsperlong.h 5mandatory-y += bitsperlong.h
6mandatory-y += bpf_perf_event.h
6mandatory-y += byteorder.h 7mandatory-y += byteorder.h
7mandatory-y += errno.h 8mandatory-y += errno.h
8mandatory-y += fcntl.h 9mandatory-y += fcntl.h
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 8f08ff9bdea0..6fa38d001d84 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -141,7 +141,7 @@ struct blk_zone_range {
141 */ 141 */
142#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) 142#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
143#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) 143#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
144#define BLKGETZONESZ _IOW(0x12, 132, __u32) 144#define BLKGETZONESZ _IOR(0x12, 132, __u32)
145#define BLKGETNRZONES _IOW(0x12, 133, __u32) 145#define BLKGETNRZONES _IOR(0x12, 133, __u32)
146 146
147#endif /* _UAPI_BLKZONED_H */ 147#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 1b3d148c4560..7d9105533c7b 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -160,4 +160,24 @@ enum {
160}; 160};
161 161
162#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1) 162#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1)
163
164#define TUNNEL_CSUM __cpu_to_be16(0x01)
165#define TUNNEL_ROUTING __cpu_to_be16(0x02)
166#define TUNNEL_KEY __cpu_to_be16(0x04)
167#define TUNNEL_SEQ __cpu_to_be16(0x08)
168#define TUNNEL_STRICT __cpu_to_be16(0x10)
169#define TUNNEL_REC __cpu_to_be16(0x20)
170#define TUNNEL_VERSION __cpu_to_be16(0x40)
171#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
172#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
173#define TUNNEL_OAM __cpu_to_be16(0x0200)
174#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
175#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
176#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
177#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
178#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
179
180#define TUNNEL_OPTIONS_PRESENT \
181 (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
182
163#endif /* _UAPI_IF_TUNNEL_H_ */ 183#endif /* _UAPI_IF_TUNNEL_H_ */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 48e8a225b985..f6052e70bf40 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -266,10 +266,14 @@ struct sockaddr_in {
266 266
267#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000) 267#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
268#define IN_MULTICAST(a) IN_CLASSD(a) 268#define IN_MULTICAST(a) IN_CLASSD(a)
269#define IN_MULTICAST_NET 0xF0000000 269#define IN_MULTICAST_NET 0xe0000000
270 270
271#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) 271#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
272#define IN_BADCLASS(a) IN_EXPERIMENTAL((a)) 272#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
273
274#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
275#define IN_CLASSE_NET 0xffffffff
276#define IN_CLASSE_NSHIFT 0
273 277
274/* Address to accept any incoming messages. */ 278/* Address to accept any incoming messages. */
275#define INADDR_ANY ((unsigned long int) 0x00000000) 279#define INADDR_ANY ((unsigned long int) 0x00000000)
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 3eb5a4c3d60a..ae366b87426a 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -752,6 +752,15 @@
752 752
753#define ABS_MISC 0x28 753#define ABS_MISC 0x28
754 754
755/*
756 * 0x2e is reserved and should not be used in input drivers.
757 * It was used by HID as ABS_MISC+6 and userspace needs to detect if
758 * the next ABS_* event is correct or is just ABS_MISC + n.
759 * We define here ABS_RESERVED so userspace can rely on it and detect
760 * the situation described above.
761 */
762#define ABS_RESERVED 0x2e
763
755#define ABS_MT_SLOT 0x2f /* MT slot being modified */ 764#define ABS_MT_SLOT 0x2f /* MT slot being modified */
756#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ 765#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
757#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ 766#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 97ff3c17ec4d..e5b39721c6e4 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -155,8 +155,8 @@ enum txtime_flags {
155}; 155};
156 156
157struct sock_txtime { 157struct sock_txtime {
158 clockid_t clockid; /* reference clockid */ 158 __kernel_clockid_t clockid;/* reference clockid */
159 __u32 flags; /* as defined by enum txtime_flags */ 159 __u32 flags; /* as defined by enum txtime_flags */
160}; 160};
161 161
162#endif /* _NET_TIMESTAMPING_H */ 162#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 486ed1f0c0bc..0a4d73317759 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
155#define NETLINK_LIST_MEMBERSHIPS 9 155#define NETLINK_LIST_MEMBERSHIPS 9
156#define NETLINK_CAP_ACK 10 156#define NETLINK_CAP_ACK 10
157#define NETLINK_EXT_ACK 11 157#define NETLINK_EXT_ACK 11
158#define NETLINK_DUMP_STRICT_CHK 12 158#define NETLINK_GET_STRICT_CHK 12
159 159
160struct nl_pktinfo { 160struct nl_pktinfo {
161 __u32 group; 161 __u32 group;
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 998983a6e6b7..3dcfc6148f99 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -404,9 +404,6 @@ enum v4l2_mpeg_video_multi_slice_mode {
404#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) 404#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228)
405#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) 405#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229)
406 406
407#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
408#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
409
410#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) 407#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300)
411#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) 408#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301)
412#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302) 409#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302)
@@ -1097,69 +1094,4 @@ enum v4l2_detect_md_mode {
1097#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3) 1094#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3)
1098#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4) 1095#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4)
1099 1096
1100#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
1101#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
1102#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
1103#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
1104
1105struct v4l2_mpeg2_sequence {
1106 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
1107 __u16 horizontal_size;
1108 __u16 vertical_size;
1109 __u32 vbv_buffer_size;
1110
1111 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
1112 __u8 profile_and_level_indication;
1113 __u8 progressive_sequence;
1114 __u8 chroma_format;
1115 __u8 pad;
1116};
1117
1118struct v4l2_mpeg2_picture {
1119 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
1120 __u8 picture_coding_type;
1121
1122 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
1123 __u8 f_code[2][2];
1124 __u8 intra_dc_precision;
1125 __u8 picture_structure;
1126 __u8 top_field_first;
1127 __u8 frame_pred_frame_dct;
1128 __u8 concealment_motion_vectors;
1129 __u8 q_scale_type;
1130 __u8 intra_vlc_format;
1131 __u8 alternate_scan;
1132 __u8 repeat_first_field;
1133 __u8 progressive_frame;
1134 __u8 pad;
1135};
1136
1137struct v4l2_ctrl_mpeg2_slice_params {
1138 __u32 bit_size;
1139 __u32 data_bit_offset;
1140
1141 struct v4l2_mpeg2_sequence sequence;
1142 struct v4l2_mpeg2_picture picture;
1143
1144 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
1145 __u8 quantiser_scale_code;
1146
1147 __u8 backward_ref_index;
1148 __u8 forward_ref_index;
1149 __u8 pad;
1150};
1151
1152struct v4l2_ctrl_mpeg2_quantization {
1153 /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
1154 __u8 load_intra_quantiser_matrix;
1155 __u8 load_non_intra_quantiser_matrix;
1156 __u8 load_chroma_intra_quantiser_matrix;
1157 __u8 load_chroma_non_intra_quantiser_matrix;
1158
1159 __u8 intra_quantiser_matrix[64];
1160 __u8 non_intra_quantiser_matrix[64];
1161 __u8 chroma_intra_quantiser_matrix[64];
1162 __u8 chroma_non_intra_quantiser_matrix[64];
1163};
1164
1165#endif 1097#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index c8e8ff810190..2ba2ad0e23fb 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1622,8 +1622,6 @@ struct v4l2_ext_control {
1622 __u8 __user *p_u8; 1622 __u8 __user *p_u8;
1623 __u16 __user *p_u16; 1623 __u16 __user *p_u16;
1624 __u32 __user *p_u32; 1624 __u32 __user *p_u32;
1625 struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params;
1626 struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization;
1627 void __user *ptr; 1625 void __user *ptr;
1628 }; 1626 };
1629} __attribute__ ((packed)); 1627} __attribute__ ((packed));
@@ -1669,8 +1667,6 @@ enum v4l2_ctrl_type {
1669 V4L2_CTRL_TYPE_U8 = 0x0100, 1667 V4L2_CTRL_TYPE_U8 = 0x0100,
1670 V4L2_CTRL_TYPE_U16 = 0x0101, 1668 V4L2_CTRL_TYPE_U16 = 0x0101,
1671 V4L2_CTRL_TYPE_U32 = 0x0102, 1669 V4L2_CTRL_TYPE_U32 = 0x0102,
1672 V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103,
1673 V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104,
1674}; 1670};
1675 1671
1676/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ 1672/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
diff --git a/init/Kconfig b/init/Kconfig
index cf5b5a0dcbc2..ed9352513c32 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -515,8 +515,8 @@ config PSI_DEFAULT_DISABLED
515 depends on PSI 515 depends on PSI
516 help 516 help
517 If set, pressure stall information tracking will be disabled 517 If set, pressure stall information tracking will be disabled
518 per default but can be enabled through passing psi_enable=1 518 per default but can be enabled through passing psi=1 on the
519 on the kernel commandline during boot. 519 kernel commandline during boot.
520 520
521endmenu # "CPU/Task time and stats accounting" 521endmenu # "CPU/Task time and stats accounting"
522 522
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5cdd8da0e7f2..38de580abcc2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -474,13 +474,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
474} 474}
475 475
476#ifdef CONFIG_BPF_JIT 476#ifdef CONFIG_BPF_JIT
477# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
478
479/* All BPF JIT sysctl knobs here. */ 477/* All BPF JIT sysctl knobs here. */
480int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); 478int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
481int bpf_jit_harden __read_mostly; 479int bpf_jit_harden __read_mostly;
482int bpf_jit_kallsyms __read_mostly; 480int bpf_jit_kallsyms __read_mostly;
483int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT; 481long bpf_jit_limit __read_mostly;
484 482
485static __always_inline void 483static __always_inline void
486bpf_get_prog_addr_region(const struct bpf_prog *prog, 484bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -701,16 +699,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
701 699
702static atomic_long_t bpf_jit_current; 700static atomic_long_t bpf_jit_current;
703 701
702/* Can be overridden by an arch's JIT compiler if it has a custom,
703 * dedicated BPF backend memory area, or if neither of the two
704 * below apply.
705 */
706u64 __weak bpf_jit_alloc_exec_limit(void)
707{
704#if defined(MODULES_VADDR) 708#if defined(MODULES_VADDR)
709 return MODULES_END - MODULES_VADDR;
710#else
711 return VMALLOC_END - VMALLOC_START;
712#endif
713}
714
705static int __init bpf_jit_charge_init(void) 715static int __init bpf_jit_charge_init(void)
706{ 716{
707 /* Only used as heuristic here to derive limit. */ 717 /* Only used as heuristic here to derive limit. */
708 bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2, 718 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
709 PAGE_SIZE), INT_MAX); 719 PAGE_SIZE), LONG_MAX);
710 return 0; 720 return 0;
711} 721}
712pure_initcall(bpf_jit_charge_init); 722pure_initcall(bpf_jit_charge_init);
713#endif
714 723
715static int bpf_jit_charge_modmem(u32 pages) 724static int bpf_jit_charge_modmem(u32 pages)
716{ 725{
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8b511a4fe84a..5b3c0a95244f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5381,9 +5381,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
5381 } 5381 }
5382 new_sl->next = env->explored_states[insn_idx]; 5382 new_sl->next = env->explored_states[insn_idx];
5383 env->explored_states[insn_idx] = new_sl; 5383 env->explored_states[insn_idx] = new_sl;
5384 /* connect new state to parentage chain */ 5384 /* connect new state to parentage chain. Current frame needs all
5385 for (i = 0; i < BPF_REG_FP; i++) 5385 * registers connected. Only r6 - r9 of the callers are alive (pushed
5386 cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i]; 5386 * to the stack implicitly by JITs) so in callers' frames connect just
5387 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
5388 * the state of the call instruction (with WRITTEN set), and r0 comes
5389 * from callee with its full parentage chain, anyway.
5390 */
5391 for (j = 0; j <= cur->curframe; j++)
5392 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
5393 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
5387 /* clear write marks in current state: the writes we did are not writes 5394 /* clear write marks in current state: the writes we did are not writes
5388 * our child did, so they don't screen off its reads from us. 5395 * our child did, so they don't screen off its reads from us.
5389 * (There are no read marks in current state, because reads always mark 5396 * (There are no read marks in current state, because reads always mark
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 22a12ab5a5e9..375c77e8d52f 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -309,7 +309,12 @@ int dma_direct_supported(struct device *dev, u64 mask)
309 309
310 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); 310 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
311 311
312 return mask >= phys_to_dma(dev, min_mask); 312 /*
313 * This check needs to be against the actual bit mask value, so
314 * use __phys_to_dma() here so that the SME encryption mask isn't
315 * part of the check.
316 */
317 return mask >= __phys_to_dma(dev, min_mask);
313} 318}
314 319
315int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) 320int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 77734451cb05..e23eb9fc77aa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
5460 if (ops->flags & FTRACE_OPS_FL_ENABLED) 5460 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5461 ftrace_shutdown(ops, 0); 5461 ftrace_shutdown(ops, 0);
5462 ops->flags |= FTRACE_OPS_FL_DELETED; 5462 ops->flags |= FTRACE_OPS_FL_DELETED;
5463 ftrace_free_filter(ops);
5463 mutex_unlock(&ftrace_lock); 5464 mutex_unlock(&ftrace_lock);
5464} 5465}
5465 5466
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 84a65173b1e9..5574e862de8d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
570 } 570 }
571 } 571 }
572 572
573 kfree(op_stack);
574 kfree(inverts);
573 return prog; 575 return prog;
574out_free: 576out_free:
575 kfree(op_stack); 577 kfree(op_stack);
576 kfree(prog_stack);
577 kfree(inverts); 578 kfree(inverts);
579 kfree(prog_stack);
578 return ERR_PTR(ret); 580 return ERR_PTR(ret);
579} 581}
580 582
@@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call,
1718 err = process_preds(call, filter_string, *filterp, pe); 1720 err = process_preds(call, filter_string, *filterp, pe);
1719 if (err && set_str) 1721 if (err && set_str)
1720 append_filter_err(pe, *filterp); 1722 append_filter_err(pe, *filterp);
1723 create_filter_finish(pe);
1721 1724
1722 return err; 1725 return err;
1723} 1726}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 2152d1e530cb..cd12ecb66eb9 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str,
732 732
733 /* The filter is for the 'trigger' event, not the triggered event */ 733 /* The filter is for the 'trigger' event, not the triggered event */
734 ret = create_event_filter(file->event_call, filter_str, false, &filter); 734 ret = create_event_filter(file->event_call, filter_str, false, &filter);
735 if (ret) 735 /*
736 goto out; 736 * If create_event_filter() fails, filter still needs to be freed.
737 * Which the calling code will do with data->filter.
738 */
737 assign: 739 assign:
738 tmp = rcu_access_pointer(data->filter); 740 tmp = rcu_access_pointer(data->filter);
739 741
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 1106bb6aa01e..14d51548bea6 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -784,11 +784,11 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
784 while (radix_tree_is_internal_node(node)) { 784 while (radix_tree_is_internal_node(node)) {
785 unsigned offset; 785 unsigned offset;
786 786
787 if (node == RADIX_TREE_RETRY)
788 goto restart;
789 parent = entry_to_node(node); 787 parent = entry_to_node(node);
790 offset = radix_tree_descend(parent, &node, index); 788 offset = radix_tree_descend(parent, &node, index);
791 slot = parent->slots + offset; 789 slot = parent->slots + offset;
790 if (node == RADIX_TREE_RETRY)
791 goto restart;
792 if (parent->shift == 0) 792 if (parent->shift == 0)
793 break; 793 break;
794 } 794 }
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 0598e86af8fc..4676c0a1eeca 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { }
28} while (0) 28} while (0)
29#endif 29#endif
30 30
31static void *xa_mk_index(unsigned long index)
32{
33 return xa_mk_value(index & LONG_MAX);
34}
35
31static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 36static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
32{ 37{
33 return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp); 38 return xa_store(xa, index, xa_mk_index(index), gfp);
34} 39}
35 40
36static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 41static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
37{ 42{
38 u32 id = 0; 43 u32 id = 0;
39 44
40 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX), 45 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
41 gfp) != 0); 46 gfp) != 0);
42 XA_BUG_ON(xa, id != index); 47 XA_BUG_ON(xa, id != index);
43} 48}
44 49
45static void xa_erase_index(struct xarray *xa, unsigned long index) 50static void xa_erase_index(struct xarray *xa, unsigned long index)
46{ 51{
47 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX)); 52 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
48 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 53 XA_BUG_ON(xa, xa_load(xa, index) != NULL);
49} 54}
50 55
@@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa)
118 123
119 xas_set(&xas, 0); 124 xas_set(&xas, 0);
120 xas_for_each(&xas, entry, ULONG_MAX) { 125 xas_for_each(&xas, entry, ULONG_MAX) {
121 xas_store(&xas, xa_mk_value(xas.xa_index)); 126 xas_store(&xas, xa_mk_index(xas.xa_index));
122 } 127 }
123 xas_unlock(&xas); 128 xas_unlock(&xas);
124 129
@@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
196 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
197 xa_set_mark(xa, index + 2, XA_MARK_1); 202 xa_set_mark(xa, index + 2, XA_MARK_1);
198 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
199 xa_store_order(xa, index, order, xa_mk_value(index), 204 xa_store_order(xa, index, order, xa_mk_index(index),
200 GFP_KERNEL); 205 GFP_KERNEL);
201 for (i = base; i < next; i++) { 206 for (i = base; i < next; i++) {
202 XA_STATE(xas, xa, i); 207 XA_STATE(xas, xa, i);
@@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa)
405 xas_set(&xas, j); 410 xas_set(&xas, j);
406 do { 411 do {
407 xas_lock(&xas); 412 xas_lock(&xas);
408 xas_store(&xas, xa_mk_value(j)); 413 xas_store(&xas, xa_mk_index(j));
409 xas_unlock(&xas); 414 xas_unlock(&xas);
410 } while (xas_nomem(&xas, GFP_KERNEL)); 415 } while (xas_nomem(&xas, GFP_KERNEL));
411 } 416 }
@@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa)
423 xas_set(&xas, 0); 428 xas_set(&xas, 0);
424 j = i; 429 j = i;
425 xas_for_each(&xas, entry, ULONG_MAX) { 430 xas_for_each(&xas, entry, ULONG_MAX) {
426 XA_BUG_ON(xa, entry != xa_mk_value(j)); 431 XA_BUG_ON(xa, entry != xa_mk_index(j));
427 xas_store(&xas, NULL); 432 xas_store(&xas, NULL);
428 j++; 433 j++;
429 } 434 }
@@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
440 unsigned long min = index & ~((1UL << order) - 1); 445 unsigned long min = index & ~((1UL << order) - 1);
441 unsigned long max = min + (1UL << order); 446 unsigned long max = min + (1UL << order);
442 447
443 xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL); 448 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
444 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index)); 449 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
445 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index)); 450 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
446 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 451 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
447 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 452 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
448 453
449 xas_lock(&xas); 454 xas_lock(&xas);
450 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); 455 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
451 xas_unlock(&xas); 456 xas_unlock(&xas);
452 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); 457 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
453 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); 458 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
454 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 459 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
455 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 460 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
456 461
@@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
471 xas_unlock(&xas); 476 xas_unlock(&xas);
472 XA_BUG_ON(xa, !xa_empty(xa)); 477 XA_BUG_ON(xa, !xa_empty(xa));
473} 478}
479
480static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
481 unsigned int order)
482{
483 XA_STATE(xas, xa, 0);
484 void *entry;
485 int n = 0;
486
487 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
488
489 xas_lock(&xas);
490 xas_for_each(&xas, entry, ULONG_MAX) {
491 XA_BUG_ON(xa, entry != xa_mk_index(index));
492 n++;
493 }
494 XA_BUG_ON(xa, n != 1);
495 xas_set(&xas, index + 1);
496 xas_for_each(&xas, entry, ULONG_MAX) {
497 XA_BUG_ON(xa, entry != xa_mk_index(index));
498 n++;
499 }
500 XA_BUG_ON(xa, n != 2);
501 xas_unlock(&xas);
502
503 xa_destroy(xa);
504}
474#endif 505#endif
475 506
476static noinline void check_multi_store(struct xarray *xa) 507static noinline void check_multi_store(struct xarray *xa)
@@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa)
523 554
524 for (i = 0; i < max_order; i++) { 555 for (i = 0; i < max_order; i++) {
525 for (j = 0; j < max_order; j++) { 556 for (j = 0; j < max_order; j++) {
526 xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL); 557 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
527 xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL); 558 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
528 559
529 for (k = 0; k < max_order; k++) { 560 for (k = 0; k < max_order; k++) {
530 void *entry = xa_load(xa, (1UL << k) - 1); 561 void *entry = xa_load(xa, (1UL << k) - 1);
531 if ((i < k) && (j < k)) 562 if ((i < k) && (j < k))
532 XA_BUG_ON(xa, entry != NULL); 563 XA_BUG_ON(xa, entry != NULL);
533 else 564 else
534 XA_BUG_ON(xa, entry != xa_mk_value(j)); 565 XA_BUG_ON(xa, entry != xa_mk_index(j));
535 } 566 }
536 567
537 xa_erase(xa, 0); 568 xa_erase(xa, 0);
@@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa)
545 check_multi_store_1(xa, (1UL << i) + 1, i); 576 check_multi_store_1(xa, (1UL << i) + 1, i);
546 } 577 }
547 check_multi_store_2(xa, 4095, 9); 578 check_multi_store_2(xa, 4095, 9);
579
580 for (i = 1; i < 20; i++) {
581 check_multi_store_3(xa, 0, i);
582 check_multi_store_3(xa, 1UL << i, i);
583 }
548#endif 584#endif
549} 585}
550 586
@@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void)
587 xa_destroy(&xa0); 623 xa_destroy(&xa0);
588 624
589 id = 0xfffffffeU; 625 id = 0xfffffffeU;
590 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), 626 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
591 GFP_KERNEL) != 0); 627 GFP_KERNEL) != 0);
592 XA_BUG_ON(&xa0, id != 0xfffffffeU); 628 XA_BUG_ON(&xa0, id != 0xfffffffeU);
593 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), 629 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
594 GFP_KERNEL) != 0); 630 GFP_KERNEL) != 0);
595 XA_BUG_ON(&xa0, id != 0xffffffffU); 631 XA_BUG_ON(&xa0, id != 0xffffffffU);
596 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), 632 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
597 GFP_KERNEL) != -ENOSPC); 633 GFP_KERNEL) != -ENOSPC);
598 XA_BUG_ON(&xa0, id != 0xffffffffU); 634 XA_BUG_ON(&xa0, id != 0xffffffffU);
599 xa_destroy(&xa0); 635 xa_destroy(&xa0);
636
637 id = 10;
638 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
639 GFP_KERNEL) != -ENOSPC);
640 XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
641 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
642 GFP_KERNEL) != -ENOSPC);
643 xa_erase_index(&xa0, 3);
644 XA_BUG_ON(&xa0, !xa_empty(&xa0));
600} 645}
601 646
602static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 647static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
@@ -610,11 +655,11 @@ retry:
610 xas_lock(&xas); 655 xas_lock(&xas);
611 xas_for_each_conflict(&xas, entry) { 656 xas_for_each_conflict(&xas, entry) {
612 XA_BUG_ON(xa, !xa_is_value(entry)); 657 XA_BUG_ON(xa, !xa_is_value(entry));
613 XA_BUG_ON(xa, entry < xa_mk_value(start)); 658 XA_BUG_ON(xa, entry < xa_mk_index(start));
614 XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1)); 659 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
615 count++; 660 count++;
616 } 661 }
617 xas_store(&xas, xa_mk_value(start)); 662 xas_store(&xas, xa_mk_index(start));
618 xas_unlock(&xas); 663 xas_unlock(&xas);
619 if (xas_nomem(&xas, GFP_KERNEL)) { 664 if (xas_nomem(&xas, GFP_KERNEL)) {
620 count = 0; 665 count = 0;
@@ -622,9 +667,9 @@ retry:
622 } 667 }
623 XA_BUG_ON(xa, xas_error(&xas)); 668 XA_BUG_ON(xa, xas_error(&xas));
624 XA_BUG_ON(xa, count != present); 669 XA_BUG_ON(xa, count != present);
625 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start)); 670 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
626 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 671 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
627 xa_mk_value(start)); 672 xa_mk_index(start));
628 xa_erase_index(xa, start); 673 xa_erase_index(xa, start);
629} 674}
630 675
@@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
703 for (j = 0; j < index; j++) { 748 for (j = 0; j < index; j++) {
704 XA_STATE(xas, xa, j + index); 749 XA_STATE(xas, xa, j + index);
705 xa_store_index(xa, index - 1, GFP_KERNEL); 750 xa_store_index(xa, index - 1, GFP_KERNEL);
706 xa_store_order(xa, index, i, xa_mk_value(index), 751 xa_store_order(xa, index, i, xa_mk_index(index),
707 GFP_KERNEL); 752 GFP_KERNEL);
708 rcu_read_lock(); 753 rcu_read_lock();
709 xas_for_each(&xas, entry, ULONG_MAX) { 754 xas_for_each(&xas, entry, ULONG_MAX) {
@@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa)
778 j = 0; 823 j = 0;
779 index = 0; 824 index = 0;
780 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 825 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
781 XA_BUG_ON(xa, xa_mk_value(index) != entry); 826 XA_BUG_ON(xa, xa_mk_index(index) != entry);
782 XA_BUG_ON(xa, index != j++); 827 XA_BUG_ON(xa, index != j++);
783 } 828 }
784 } 829 }
@@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa)
786 xa_destroy(xa); 831 xa_destroy(xa);
787} 832}
788 833
834static noinline void check_find_3(struct xarray *xa)
835{
836 XA_STATE(xas, xa, 0);
837 unsigned long i, j, k;
838 void *entry;
839
840 for (i = 0; i < 100; i++) {
841 for (j = 0; j < 100; j++) {
842 for (k = 0; k < 100; k++) {
843 xas_set(&xas, j);
844 xas_for_each_marked(&xas, entry, k, XA_MARK_0)
845 ;
846 if (j > k)
847 XA_BUG_ON(xa,
848 xas.xa_node != XAS_RESTART);
849 }
850 }
851 xa_store_index(xa, i, GFP_KERNEL);
852 xa_set_mark(xa, i, XA_MARK_0);
853 }
854 xa_destroy(xa);
855}
856
789static noinline void check_find(struct xarray *xa) 857static noinline void check_find(struct xarray *xa)
790{ 858{
791 check_find_1(xa); 859 check_find_1(xa);
792 check_find_2(xa); 860 check_find_2(xa);
861 check_find_3(xa);
793 check_multi_find(xa); 862 check_multi_find(xa);
794 check_multi_find_2(xa); 863 check_multi_find_2(xa);
795} 864}
@@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa)
829 for (index = 0; index < (1UL << (order + 5)); 898 for (index = 0; index < (1UL << (order + 5));
830 index += (1UL << order)) { 899 index += (1UL << order)) {
831 xa_store_order(xa, index, order, 900 xa_store_order(xa, index, order,
832 xa_mk_value(index), GFP_KERNEL); 901 xa_mk_index(index), GFP_KERNEL);
833 XA_BUG_ON(xa, xa_load(xa, index) != 902 XA_BUG_ON(xa, xa_load(xa, index) !=
834 xa_mk_value(index)); 903 xa_mk_index(index));
835 XA_BUG_ON(xa, xa_find_entry(xa, 904 XA_BUG_ON(xa, xa_find_entry(xa,
836 xa_mk_value(index)) != index); 905 xa_mk_index(index)) != index);
837 } 906 }
838 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 907 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
839 xa_destroy(xa); 908 xa_destroy(xa);
@@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa)
844 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 913 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
845 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 914 xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
846 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 915 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
847 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1); 916 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
848 xa_erase_index(xa, ULONG_MAX); 917 xa_erase_index(xa, ULONG_MAX);
849 XA_BUG_ON(xa, !xa_empty(xa)); 918 XA_BUG_ON(xa, !xa_empty(xa));
850} 919}
@@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
864 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 933 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
865 XA_BUG_ON(xa, xas.xa_index != i); 934 XA_BUG_ON(xa, xas.xa_index != i);
866 if (i == 0 || i == idx) 935 if (i == 0 || i == idx)
867 XA_BUG_ON(xa, entry != xa_mk_value(i)); 936 XA_BUG_ON(xa, entry != xa_mk_index(i));
868 else 937 else
869 XA_BUG_ON(xa, entry != NULL); 938 XA_BUG_ON(xa, entry != NULL);
870 } 939 }
@@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
878 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 947 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
879 XA_BUG_ON(xa, xas.xa_index != i); 948 XA_BUG_ON(xa, xas.xa_index != i);
880 if (i == 0 || i == idx) 949 if (i == 0 || i == idx)
881 XA_BUG_ON(xa, entry != xa_mk_value(i)); 950 XA_BUG_ON(xa, entry != xa_mk_index(i));
882 else 951 else
883 XA_BUG_ON(xa, entry != NULL); 952 XA_BUG_ON(xa, entry != NULL);
884 } while (i > 0); 953 } while (i > 0);
@@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa)
909 do { 978 do {
910 void *entry = xas_prev(&xas); 979 void *entry = xas_prev(&xas);
911 i--; 980 i--;
912 XA_BUG_ON(xa, entry != xa_mk_value(i)); 981 XA_BUG_ON(xa, entry != xa_mk_index(i));
913 XA_BUG_ON(xa, i != xas.xa_index); 982 XA_BUG_ON(xa, i != xas.xa_index);
914 } while (i != 0); 983 } while (i != 0);
915 984
@@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa)
918 987
919 do { 988 do {
920 void *entry = xas_next(&xas); 989 void *entry = xas_next(&xas);
921 XA_BUG_ON(xa, entry != xa_mk_value(i)); 990 XA_BUG_ON(xa, entry != xa_mk_index(i));
922 XA_BUG_ON(xa, i != xas.xa_index); 991 XA_BUG_ON(xa, i != xas.xa_index);
923 i++; 992 i++;
924 } while (i < (1 << 16)); 993 } while (i < (1 << 16));
@@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa)
934 void *entry = xas_prev(&xas); 1003 void *entry = xas_prev(&xas);
935 i--; 1004 i--;
936 if ((i < (1 << 8)) || (i >= (1 << 15))) 1005 if ((i < (1 << 8)) || (i >= (1 << 15)))
937 XA_BUG_ON(xa, entry != xa_mk_value(i)); 1006 XA_BUG_ON(xa, entry != xa_mk_index(i));
938 else 1007 else
939 XA_BUG_ON(xa, entry != NULL); 1008 XA_BUG_ON(xa, entry != NULL);
940 XA_BUG_ON(xa, i != xas.xa_index); 1009 XA_BUG_ON(xa, i != xas.xa_index);
@@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa)
946 do { 1015 do {
947 void *entry = xas_next(&xas); 1016 void *entry = xas_next(&xas);
948 if ((i < (1 << 8)) || (i >= (1 << 15))) 1017 if ((i < (1 << 8)) || (i >= (1 << 15)))
949 XA_BUG_ON(xa, entry != xa_mk_value(i)); 1018 XA_BUG_ON(xa, entry != xa_mk_index(i));
950 else 1019 else
951 XA_BUG_ON(xa, entry != NULL); 1020 XA_BUG_ON(xa, entry != NULL);
952 XA_BUG_ON(xa, i != xas.xa_index); 1021 XA_BUG_ON(xa, i != xas.xa_index);
@@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa,
976 if (xas_error(&xas)) 1045 if (xas_error(&xas))
977 goto unlock; 1046 goto unlock;
978 for (i = 0; i < (1U << order); i++) { 1047 for (i = 0; i < (1U << order); i++) {
979 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i))); 1048 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
980 xas_next(&xas); 1049 xas_next(&xas);
981 } 1050 }
982unlock: 1051unlock:
@@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa,
1031 if (xas_error(&xas)) 1100 if (xas_error(&xas))
1032 goto unlock; 1101 goto unlock;
1033 for (i = 0; i < (1UL << order); i++) { 1102 for (i = 0; i < (1UL << order); i++) {
1034 void *old = xas_store(&xas, xa_mk_value(base + i)); 1103 void *old = xas_store(&xas, xa_mk_index(base + i));
1035 if (xas.xa_index == index) 1104 if (xas.xa_index == index)
1036 XA_BUG_ON(xa, old != xa_mk_value(base + i)); 1105 XA_BUG_ON(xa, old != xa_mk_index(base + i));
1037 else 1106 else
1038 XA_BUG_ON(xa, old != NULL); 1107 XA_BUG_ON(xa, old != NULL);
1039 xas_next(&xas); 1108 xas_next(&xas);
@@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first,
1085 unsigned long last) 1154 unsigned long last)
1086{ 1155{
1087#ifdef CONFIG_XARRAY_MULTI 1156#ifdef CONFIG_XARRAY_MULTI
1088 xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL); 1157 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
1089 1158
1090 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first)); 1159 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1091 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first)); 1160 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
1092 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1161 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
1093 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1162 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
1094 1163
@@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa)
1195 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1264 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1196 rcu_read_unlock(); 1265 rcu_read_unlock();
1197 1266
1198 xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), 1267 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1199 GFP_KERNEL); 1268 GFP_KERNEL);
1200 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1269 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1201 1270
diff --git a/lib/xarray.c b/lib/xarray.c
index bbacca576593..5f3f9311de89 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1131,7 +1131,7 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1131 entry = xa_head(xas->xa); 1131 entry = xa_head(xas->xa);
1132 xas->xa_node = NULL; 1132 xas->xa_node = NULL;
1133 if (xas->xa_index > max_index(entry)) 1133 if (xas->xa_index > max_index(entry))
1134 goto bounds; 1134 goto out;
1135 if (!xa_is_node(entry)) { 1135 if (!xa_is_node(entry)) {
1136 if (xa_marked(xas->xa, mark)) 1136 if (xa_marked(xas->xa, mark))
1137 return entry; 1137 return entry;
@@ -1180,11 +1180,9 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1180 } 1180 }
1181 1181
1182out: 1182out:
1183 if (!max) 1183 if (xas->xa_index > max)
1184 goto max; 1184 goto max;
1185bounds: 1185 return set_bounds(xas);
1186 xas->xa_node = XAS_BOUNDS;
1187 return NULL;
1188max: 1186max:
1189 xas->xa_node = XAS_RESTART; 1187 xas->xa_node = XAS_RESTART;
1190 return NULL; 1188 return NULL;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 705a3e9cc910..a80832487981 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1248,10 +1248,11 @@ void free_huge_page(struct page *page)
1248 (struct hugepage_subpool *)page_private(page); 1248 (struct hugepage_subpool *)page_private(page);
1249 bool restore_reserve; 1249 bool restore_reserve;
1250 1250
1251 set_page_private(page, 0);
1252 page->mapping = NULL;
1253 VM_BUG_ON_PAGE(page_count(page), page); 1251 VM_BUG_ON_PAGE(page_count(page), page);
1254 VM_BUG_ON_PAGE(page_mapcount(page), page); 1252 VM_BUG_ON_PAGE(page_mapcount(page), page);
1253
1254 set_page_private(page, 0);
1255 page->mapping = NULL;
1255 restore_reserve = PagePrivate(page); 1256 restore_reserve = PagePrivate(page);
1256 ClearPagePrivate(page); 1257 ClearPagePrivate(page);
1257 1258
diff --git a/mm/memblock.c b/mm/memblock.c
index 9a2d5ae81ae1..81ae63ca78d0 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1727,7 +1727,7 @@ static int __init_memblock memblock_search(struct memblock_type *type, phys_addr
1727 return -1; 1727 return -1;
1728} 1728}
1729 1729
1730bool __init memblock_is_reserved(phys_addr_t addr) 1730bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1731{ 1731{
1732 return memblock_search(&memblock.reserved, addr) != -1; 1732 return memblock_search(&memblock.reserved, addr) != -1;
1733} 1733}
diff --git a/mm/shmem.c b/mm/shmem.c
index 921f80488bb3..5d07e0b1352f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -661,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping,
661{ 661{
662 void *old; 662 void *old;
663 663
664 xa_lock_irq(&mapping->i_pages); 664 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
665 old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0);
666 xa_unlock_irq(&mapping->i_pages);
667 if (old != radswap) 665 if (old != radswap)
668 return -ENOENT; 666 return -ENOENT;
669 free_swap_and_cache(radix_to_swp_entry(radswap)); 667 free_swap_and_cache(radix_to_swp_entry(radswap));
diff --git a/mm/sparse.c b/mm/sparse.c
index 33307fc05c4d..3abc8cc50201 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -240,6 +240,22 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
240} 240}
241 241
242/* 242/*
243 * Mark all memblocks as present using memory_present(). This is a
244 * convienence function that is useful for a number of arches
245 * to mark all of the systems memory as present during initialization.
246 */
247void __init memblocks_present(void)
248{
249 struct memblock_region *reg;
250
251 for_each_memblock(memory, reg) {
252 memory_present(memblock_get_region_node(reg),
253 memblock_region_memory_base_pfn(reg),
254 memblock_region_memory_end_pfn(reg));
255 }
256}
257
258/*
243 * Subtle, we encode the real pfn into the mem_map such that 259 * Subtle, we encode the real pfn into the mem_map such that
244 * the identity pfn - section_mem_map will return the actual 260 * the identity pfn - section_mem_map will return the actual
245 * physical page frame number. 261 * physical page frame number.
diff --git a/net/can/raw.c b/net/can/raw.c
index 3aab7664933f..c70207537488 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -771,7 +771,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
771 if (err < 0) 771 if (err < 0)
772 goto free_skb; 772 goto free_skb;
773 773
774 sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags); 774 skb_setup_tx_timestamp(skb, sk->sk_tsflags);
775 775
776 skb->dev = dev; 776 skb->dev = dev;
777 skb->sk = sk; 777 skb->sk = sk;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2e8d91e54179..9f2840510e63 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -783,6 +783,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
783 /* Pass parameters to the BPF program */ 783 /* Pass parameters to the BPF program */
784 cb->qdisc_cb.flow_keys = &flow_keys; 784 cb->qdisc_cb.flow_keys = &flow_keys;
785 flow_keys.nhoff = nhoff; 785 flow_keys.nhoff = nhoff;
786 flow_keys.thoff = nhoff;
786 787
787 bpf_compute_data_pointers((struct sk_buff *)skb); 788 bpf_compute_data_pointers((struct sk_buff *)skb);
788 result = BPF_PROG_RUN(attached, skb); 789 result = BPF_PROG_RUN(attached, skb);
@@ -790,9 +791,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
790 /* Restore state */ 791 /* Restore state */
791 memcpy(cb, &cb_saved, sizeof(cb_saved)); 792 memcpy(cb, &cb_saved, sizeof(cb_saved));
792 793
794 flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
795 flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
796 flow_keys.nhoff, skb->len);
797
793 __skb_flow_bpf_to_target(&flow_keys, flow_dissector, 798 __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
794 target_container); 799 target_container);
795 key_control->thoff = min_t(u16, key_control->thoff, skb->len);
796 rcu_read_unlock(); 800 rcu_read_unlock();
797 return result == BPF_OK; 801 return result == BPF_OK;
798 } 802 }
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index 4b54e5f107c6..acf45ddbe924 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
84 for_each_possible_cpu(i) { 84 for_each_possible_cpu(i) {
85 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 85 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
86 86
87 napi_disable(&cell->napi);
87 netif_napi_del(&cell->napi); 88 netif_napi_del(&cell->napi);
88 __skb_queue_purge(&cell->napi_skbs); 89 __skb_queue_purge(&cell->napi_skbs);
89 } 90 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8baa9ab01db6..fa384f775f1a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2629,11 +2629,16 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2629 2629
2630 ndm = nlmsg_data(nlh); 2630 ndm = nlmsg_data(nlh);
2631 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || 2631 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2632 ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) { 2632 ndm->ndm_state || ndm->ndm_type) {
2633 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); 2633 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2634 return -EINVAL; 2634 return -EINVAL;
2635 } 2635 }
2636 2636
2637 if (ndm->ndm_flags & ~NTF_PROXY) {
2638 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2639 return -EINVAL;
2640 }
2641
2637 err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX, 2642 err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
2638 nda_policy, extack); 2643 nda_policy, extack);
2639 } else { 2644 } else {
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 37b4667128a3..d67ec17f2cc8 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -28,6 +28,8 @@ static int two __maybe_unused = 2;
28static int min_sndbuf = SOCK_MIN_SNDBUF; 28static int min_sndbuf = SOCK_MIN_SNDBUF;
29static int min_rcvbuf = SOCK_MIN_RCVBUF; 29static int min_rcvbuf = SOCK_MIN_RCVBUF;
30static int max_skb_frags = MAX_SKB_FRAGS; 30static int max_skb_frags = MAX_SKB_FRAGS;
31static long long_one __maybe_unused = 1;
32static long long_max __maybe_unused = LONG_MAX;
31 33
32static int net_msg_warn; /* Unused, but still a sysctl */ 34static int net_msg_warn; /* Unused, but still a sysctl */
33 35
@@ -289,6 +291,17 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
289 291
290 return proc_dointvec_minmax(table, write, buffer, lenp, ppos); 292 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
291} 293}
294
295static int
296proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
297 void __user *buffer, size_t *lenp,
298 loff_t *ppos)
299{
300 if (!capable(CAP_SYS_ADMIN))
301 return -EPERM;
302
303 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
304}
292#endif 305#endif
293 306
294static struct ctl_table net_core_table[] = { 307static struct ctl_table net_core_table[] = {
@@ -398,10 +411,11 @@ static struct ctl_table net_core_table[] = {
398 { 411 {
399 .procname = "bpf_jit_limit", 412 .procname = "bpf_jit_limit",
400 .data = &bpf_jit_limit, 413 .data = &bpf_jit_limit,
401 .maxlen = sizeof(int), 414 .maxlen = sizeof(long),
402 .mode = 0600, 415 .mode = 0600,
403 .proc_handler = proc_dointvec_minmax_bpf_restricted, 416 .proc_handler = proc_dolongvec_minmax_bpf_restricted,
404 .extra1 = &one, 417 .extra1 = &long_one,
418 .extra2 = &long_max,
405 }, 419 },
406#endif 420#endif
407 { 421 {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5b9b6d497f71..04ba321ae5ce 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -952,17 +952,18 @@ static int inet_abc_len(__be32 addr)
952{ 952{
953 int rc = -1; /* Something else, probably a multicast. */ 953 int rc = -1; /* Something else, probably a multicast. */
954 954
955 if (ipv4_is_zeronet(addr)) 955 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
956 rc = 0; 956 rc = 0;
957 else { 957 else {
958 __u32 haddr = ntohl(addr); 958 __u32 haddr = ntohl(addr);
959
960 if (IN_CLASSA(haddr)) 959 if (IN_CLASSA(haddr))
961 rc = 8; 960 rc = 8;
962 else if (IN_CLASSB(haddr)) 961 else if (IN_CLASSB(haddr))
963 rc = 16; 962 rc = 16;
964 else if (IN_CLASSC(haddr)) 963 else if (IN_CLASSC(haddr))
965 rc = 24; 964 rc = 24;
965 else if (IN_CLASSE(haddr))
966 rc = 32;
966 } 967 }
967 968
968 return rc; 969 return rc;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 06ee4696703c..00ec819f949b 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -79,6 +79,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
79 if (unlikely(opt->optlen)) 79 if (unlikely(opt->optlen))
80 ip_forward_options(skb); 80 ip_forward_options(skb);
81 81
82 skb->tstamp = 0;
82 return dst_output(net, sk, skb); 83 return dst_output(net, sk, skb);
83} 84}
84 85
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index aa0b22697998..867be8f7f1fa 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -346,10 +346,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
346 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); 346 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
347 struct rb_node **rbn, *parent; 347 struct rb_node **rbn, *parent;
348 struct sk_buff *skb1, *prev_tail; 348 struct sk_buff *skb1, *prev_tail;
349 int ihl, end, skb1_run_end;
349 struct net_device *dev; 350 struct net_device *dev;
350 unsigned int fragsize; 351 unsigned int fragsize;
351 int flags, offset; 352 int flags, offset;
352 int ihl, end;
353 int err = -ENOENT; 353 int err = -ENOENT;
354 u8 ecn; 354 u8 ecn;
355 355
@@ -419,7 +419,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
419 * overlapping fragment, the entire datagram (and any constituent 419 * overlapping fragment, the entire datagram (and any constituent
420 * fragments) MUST be silently discarded. 420 * fragments) MUST be silently discarded.
421 * 421 *
422 * We do the same here for IPv4 (and increment an snmp counter). 422 * We do the same here for IPv4 (and increment an snmp counter) but
423 * we do not want to drop the whole queue in response to a duplicate
424 * fragment.
423 */ 425 */
424 426
425 err = -EINVAL; 427 err = -EINVAL;
@@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
444 do { 446 do {
445 parent = *rbn; 447 parent = *rbn;
446 skb1 = rb_to_skb(parent); 448 skb1 = rb_to_skb(parent);
449 skb1_run_end = skb1->ip_defrag_offset +
450 FRAG_CB(skb1)->frag_run_len;
447 if (end <= skb1->ip_defrag_offset) 451 if (end <= skb1->ip_defrag_offset)
448 rbn = &parent->rb_left; 452 rbn = &parent->rb_left;
449 else if (offset >= skb1->ip_defrag_offset + 453 else if (offset >= skb1_run_end)
450 FRAG_CB(skb1)->frag_run_len)
451 rbn = &parent->rb_right; 454 rbn = &parent->rb_right;
452 else /* Found an overlap with skb1. */ 455 else if (offset >= skb1->ip_defrag_offset &&
453 goto overlap; 456 end <= skb1_run_end)
457 goto err; /* No new data, potential duplicate */
458 else
459 goto overlap; /* Found an overlap */
454 } while (*rbn); 460 } while (*rbn);
455 /* Here we have parent properly set, and rbn pointing to 461 /* Here we have parent properly set, and rbn pointing to
456 * one of its NULL left/right children. Insert skb. 462 * one of its NULL left/right children. Insert skb.
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 208a5b4419c6..b9a9873c25c6 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -429,6 +429,8 @@ static int __init ic_defaults(void)
429 ic_netmask = htonl(IN_CLASSB_NET); 429 ic_netmask = htonl(IN_CLASSB_NET);
430 else if (IN_CLASSC(ntohl(ic_myaddr))) 430 else if (IN_CLASSC(ntohl(ic_myaddr)))
431 ic_netmask = htonl(IN_CLASSC_NET); 431 ic_netmask = htonl(IN_CLASSC_NET);
432 else if (IN_CLASSE(ntohl(ic_myaddr)))
433 ic_netmask = htonl(IN_CLASSE_NET);
432 else { 434 else {
433 pr_err("IP-Config: Unable to guess netmask for address %pI4\n", 435 pr_err("IP-Config: Unable to guess netmask for address %pI4\n",
434 &ic_myaddr); 436 &ic_myaddr);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 75c654924532..ddbf8c9a1abb 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -69,6 +69,8 @@
69#include <net/nexthop.h> 69#include <net/nexthop.h>
70#include <net/switchdev.h> 70#include <net/switchdev.h>
71 71
72#include <linux/nospec.h>
73
72struct ipmr_rule { 74struct ipmr_rule {
73 struct fib_rule common; 75 struct fib_rule common;
74}; 76};
@@ -1612,6 +1614,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1612 return -EFAULT; 1614 return -EFAULT;
1613 if (vr.vifi >= mrt->maxvif) 1615 if (vr.vifi >= mrt->maxvif)
1614 return -EINVAL; 1616 return -EINVAL;
1617 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1615 read_lock(&mrt_lock); 1618 read_lock(&mrt_lock);
1616 vif = &mrt->vif_table[vr.vifi]; 1619 vif = &mrt->vif_table[vr.vifi];
1617 if (VIF_EXISTS(mrt, vr.vifi)) { 1620 if (VIF_EXISTS(mrt, vr.vifi)) {
@@ -1686,6 +1689,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1686 return -EFAULT; 1689 return -EFAULT;
1687 if (vr.vifi >= mrt->maxvif) 1690 if (vr.vifi >= mrt->maxvif)
1688 return -EINVAL; 1691 return -EINVAL;
1692 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1689 read_lock(&mrt_lock); 1693 read_lock(&mrt_lock);
1690 vif = &mrt->vif_table[vr.vifi]; 1694 vif = &mrt->vif_table[vr.vifi];
1691 if (VIF_EXISTS(mrt, vr.vifi)) { 1695 if (VIF_EXISTS(mrt, vr.vifi)) {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 076f51646d26..c55a5432cf37 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -390,7 +390,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
390 390
391 skb->ip_summed = CHECKSUM_NONE; 391 skb->ip_summed = CHECKSUM_NONE;
392 392
393 sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); 393 skb_setup_tx_timestamp(skb, sockc->tsflags);
394 394
395 if (flags & MSG_CONFIRM) 395 if (flags & MSG_CONFIRM)
396 skb_set_dst_pending_confirm(skb, 1); 396 skb_set_dst_pending_confirm(skb, 1);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 703a8e801c5c..5f9fa0302b5a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -385,6 +385,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
385 } 385 }
386#endif 386#endif
387 387
388 skb->tstamp = 0;
388 return dst_output(net, sk, skb); 389 return dst_output(net, sk, skb);
389} 390}
390 391
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index 3965d5396b0a..ad1a9ccd4b44 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -15,7 +15,7 @@
15int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, 15int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
16 struct socket **sockp) 16 struct socket **sockp)
17{ 17{
18 struct sockaddr_in6 udp6_addr; 18 struct sockaddr_in6 udp6_addr = {};
19 int err; 19 int err;
20 struct socket *sock = NULL; 20 struct socket *sock = NULL;
21 21
@@ -58,6 +58,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
58 goto error; 58 goto error;
59 59
60 if (cfg->peer_udp_port) { 60 if (cfg->peer_udp_port) {
61 memset(&udp6_addr, 0, sizeof(udp6_addr));
61 udp6_addr.sin6_family = AF_INET6; 62 udp6_addr.sin6_family = AF_INET6;
62 memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, 63 memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
63 sizeof(udp6_addr.sin6_addr)); 64 sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 34b8a90e6be2..8276f1224f16 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -52,6 +52,8 @@
52#include <net/ip6_checksum.h> 52#include <net/ip6_checksum.h>
53#include <linux/netconf.h> 53#include <linux/netconf.h>
54 54
55#include <linux/nospec.h>
56
55struct ip6mr_rule { 57struct ip6mr_rule {
56 struct fib_rule common; 58 struct fib_rule common;
57}; 59};
@@ -1841,6 +1843,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1841 return -EFAULT; 1843 return -EFAULT;
1842 if (vr.mifi >= mrt->maxvif) 1844 if (vr.mifi >= mrt->maxvif)
1843 return -EINVAL; 1845 return -EINVAL;
1846 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1844 read_lock(&mrt_lock); 1847 read_lock(&mrt_lock);
1845 vif = &mrt->vif_table[vr.mifi]; 1848 vif = &mrt->vif_table[vr.mifi];
1846 if (VIF_EXISTS(mrt, vr.mifi)) { 1849 if (VIF_EXISTS(mrt, vr.mifi)) {
@@ -1915,6 +1918,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1915 return -EFAULT; 1918 return -EFAULT;
1916 if (vr.mifi >= mrt->maxvif) 1919 if (vr.mifi >= mrt->maxvif)
1917 return -EINVAL; 1920 return -EINVAL;
1921 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1918 read_lock(&mrt_lock); 1922 read_lock(&mrt_lock);
1919 vif = &mrt->vif_table[vr.mifi]; 1923 vif = &mrt->vif_table[vr.mifi];
1920 if (VIF_EXISTS(mrt, vr.mifi)) { 1924 if (VIF_EXISTS(mrt, vr.mifi)) {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index aed7eb5c2123..5a426226c762 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -657,6 +657,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
657 657
658 skb->ip_summed = CHECKSUM_NONE; 658 skb->ip_summed = CHECKSUM_NONE;
659 659
660 skb_setup_tx_timestamp(skb, sockc->tsflags);
661
660 if (flags & MSG_CONFIRM) 662 if (flags & MSG_CONFIRM)
661 skb_set_dst_pending_confirm(skb, 1); 663 skb_set_dst_pending_confirm(skb, 1);
662 664
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index e9652e623a31..4a6ff1482a9f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -7,6 +7,7 @@
7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
8 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright 2013-2014 Intel Mobile Communications GmbH
9 * Copyright (c) 2016 Intel Deutschland GmbH 9 * Copyright (c) 2016 Intel Deutschland GmbH
10 * Copyright (C) 2018 Intel Corporation
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -1949,6 +1950,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1949 WARN(local->open_count, "%s: open count remains %d\n", 1950 WARN(local->open_count, "%s: open count remains %d\n",
1950 wiphy_name(local->hw.wiphy), local->open_count); 1951 wiphy_name(local->hw.wiphy), local->open_count);
1951 1952
1953 ieee80211_txq_teardown_flows(local);
1954
1952 mutex_lock(&local->iflist_mtx); 1955 mutex_lock(&local->iflist_mtx);
1953 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1956 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1954 list_del(&sdata->list); 1957 list_del(&sdata->list);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index ada8e16d52d2..87a729926734 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1264,7 +1264,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1264 rtnl_unlock(); 1264 rtnl_unlock();
1265 ieee80211_led_exit(local); 1265 ieee80211_led_exit(local);
1266 ieee80211_wep_free(local); 1266 ieee80211_wep_free(local);
1267 ieee80211_txq_teardown_flows(local);
1268 fail_flows: 1267 fail_flows:
1269 destroy_workqueue(local->workqueue); 1268 destroy_workqueue(local->workqueue);
1270 fail_workqueue: 1269 fail_workqueue:
@@ -1290,7 +1289,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1290#if IS_ENABLED(CONFIG_IPV6) 1289#if IS_ENABLED(CONFIG_IPV6)
1291 unregister_inet6addr_notifier(&local->ifa6_notifier); 1290 unregister_inet6addr_notifier(&local->ifa6_notifier);
1292#endif 1291#endif
1293 ieee80211_txq_teardown_flows(local);
1294 1292
1295 rtnl_lock(); 1293 rtnl_lock();
1296 1294
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index a794ca729000..3f0b96e1e02f 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -556,6 +556,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
556 } 556 }
557 557
558 ieee80211_led_tx(local); 558 ieee80211_led_tx(local);
559
560 if (skb_has_frag_list(skb)) {
561 kfree_skb_list(skb_shinfo(skb)->frag_list);
562 skb_shinfo(skb)->frag_list = NULL;
563 }
559} 564}
560 565
561/* 566/*
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 4eef55da0878..8da228da53ae 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -531,8 +531,8 @@ nla_put_failure:
531 ret = -EMSGSIZE; 531 ret = -EMSGSIZE;
532 } else { 532 } else {
533 cb->args[IPSET_CB_ARG0] = i; 533 cb->args[IPSET_CB_ARG0] = i;
534 ipset_nest_end(skb, atd);
534 } 535 }
535 ipset_nest_end(skb, atd);
536out: 536out:
537 rcu_read_unlock(); 537 rcu_read_unlock();
538 return ret; 538 return ret;
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index b6d0f6deea86..9cd180bda092 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -427,7 +427,7 @@ insert_tree(struct net *net,
427 count = 1; 427 count = 1;
428 rbconn->list.count = count; 428 rbconn->list.count = count;
429 429
430 rb_link_node(&rbconn->node, parent, rbnode); 430 rb_link_node_rcu(&rbconn->node, parent, rbnode);
431 rb_insert_color(&rbconn->node, root); 431 rb_insert_color(&rbconn->node, root);
432out_unlock: 432out_unlock:
433 spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); 433 spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index a975efd6b8c3..9da303461069 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -115,12 +115,12 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
115/* TCP SACK sequence number adjustment */ 115/* TCP SACK sequence number adjustment */
116static unsigned int nf_ct_sack_adjust(struct sk_buff *skb, 116static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
117 unsigned int protoff, 117 unsigned int protoff,
118 struct tcphdr *tcph,
119 struct nf_conn *ct, 118 struct nf_conn *ct,
120 enum ip_conntrack_info ctinfo) 119 enum ip_conntrack_info ctinfo)
121{ 120{
122 unsigned int dir, optoff, optend; 121 struct tcphdr *tcph = (void *)skb->data + protoff;
123 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); 122 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
123 unsigned int dir, optoff, optend;
124 124
125 optoff = protoff + sizeof(struct tcphdr); 125 optoff = protoff + sizeof(struct tcphdr);
126 optend = protoff + tcph->doff * 4; 126 optend = protoff + tcph->doff * 4;
@@ -128,6 +128,7 @@ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
128 if (!skb_make_writable(skb, optend)) 128 if (!skb_make_writable(skb, optend))
129 return 0; 129 return 0;
130 130
131 tcph = (void *)skb->data + protoff;
131 dir = CTINFO2DIR(ctinfo); 132 dir = CTINFO2DIR(ctinfo);
132 133
133 while (optoff < optend) { 134 while (optoff < optend) {
@@ -207,7 +208,7 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
207 ntohl(newack)); 208 ntohl(newack));
208 tcph->ack_seq = newack; 209 tcph->ack_seq = newack;
209 210
210 res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo); 211 res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
211out: 212out:
212 spin_unlock_bh(&ct->lock); 213 spin_unlock_bh(&ct->lock);
213 214
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index e2b196054dfc..2268b10a9dcf 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -117,7 +117,8 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
117 dst = skb_dst(skb); 117 dst = skb_dst(skb);
118 if (dst->xfrm) 118 if (dst->xfrm)
119 dst = ((struct xfrm_dst *)dst)->route; 119 dst = ((struct xfrm_dst *)dst)->route;
120 dst_hold(dst); 120 if (!dst_hold_safe(dst))
121 return -EHOSTUNREACH;
121 122
122 if (sk && !net_eq(net, sock_net(sk))) 123 if (sk && !net_eq(net, sock_net(sk)))
123 sk = NULL; 124 sk = NULL;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2e61aab6ed73..6e548d7c9f67 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1216,7 +1216,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
1216 if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) 1216 if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
1217 goto nla_put_failure; 1217 goto nla_put_failure;
1218 1218
1219 if (basechain->stats && nft_dump_stats(skb, basechain->stats)) 1219 if (rcu_access_pointer(basechain->stats) &&
1220 nft_dump_stats(skb, rcu_dereference(basechain->stats)))
1220 goto nla_put_failure; 1221 goto nla_put_failure;
1221 } 1222 }
1222 1223
@@ -1392,7 +1393,8 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
1392 return newstats; 1393 return newstats;
1393} 1394}
1394 1395
1395static void nft_chain_stats_replace(struct nft_base_chain *chain, 1396static void nft_chain_stats_replace(struct net *net,
1397 struct nft_base_chain *chain,
1396 struct nft_stats __percpu *newstats) 1398 struct nft_stats __percpu *newstats)
1397{ 1399{
1398 struct nft_stats __percpu *oldstats; 1400 struct nft_stats __percpu *oldstats;
@@ -1400,8 +1402,9 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
1400 if (newstats == NULL) 1402 if (newstats == NULL)
1401 return; 1403 return;
1402 1404
1403 if (chain->stats) { 1405 if (rcu_access_pointer(chain->stats)) {
1404 oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES); 1406 oldstats = rcu_dereference_protected(chain->stats,
1407 lockdep_commit_lock_is_held(net));
1405 rcu_assign_pointer(chain->stats, newstats); 1408 rcu_assign_pointer(chain->stats, newstats);
1406 synchronize_rcu(); 1409 synchronize_rcu();
1407 free_percpu(oldstats); 1410 free_percpu(oldstats);
@@ -1439,9 +1442,10 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
1439 struct nft_base_chain *basechain = nft_base_chain(chain); 1442 struct nft_base_chain *basechain = nft_base_chain(chain);
1440 1443
1441 module_put(basechain->type->owner); 1444 module_put(basechain->type->owner);
1442 free_percpu(basechain->stats); 1445 if (rcu_access_pointer(basechain->stats)) {
1443 if (basechain->stats)
1444 static_branch_dec(&nft_counters_enabled); 1446 static_branch_dec(&nft_counters_enabled);
1447 free_percpu(rcu_dereference_raw(basechain->stats));
1448 }
1445 kfree(chain->name); 1449 kfree(chain->name);
1446 kfree(basechain); 1450 kfree(basechain);
1447 } else { 1451 } else {
@@ -1590,7 +1594,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
1590 kfree(basechain); 1594 kfree(basechain);
1591 return PTR_ERR(stats); 1595 return PTR_ERR(stats);
1592 } 1596 }
1593 basechain->stats = stats; 1597 rcu_assign_pointer(basechain->stats, stats);
1594 static_branch_inc(&nft_counters_enabled); 1598 static_branch_inc(&nft_counters_enabled);
1595 } 1599 }
1596 1600
@@ -6180,7 +6184,8 @@ static void nft_chain_commit_update(struct nft_trans *trans)
6180 return; 6184 return;
6181 6185
6182 basechain = nft_base_chain(trans->ctx.chain); 6186 basechain = nft_base_chain(trans->ctx.chain);
6183 nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans)); 6187 nft_chain_stats_replace(trans->ctx.net, basechain,
6188 nft_trans_chain_stats(trans));
6184 6189
6185 switch (nft_trans_chain_policy(trans)) { 6190 switch (nft_trans_chain_policy(trans)) {
6186 case NF_DROP: 6191 case NF_DROP:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 3fbce3b9c5ec..a50500232b0a 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -101,7 +101,7 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
101 struct nft_stats *stats; 101 struct nft_stats *stats;
102 102
103 base_chain = nft_base_chain(chain); 103 base_chain = nft_base_chain(chain);
104 if (!base_chain->stats) 104 if (!rcu_access_pointer(base_chain->stats))
105 return; 105 return;
106 106
107 local_bh_disable(); 107 local_bh_disable();
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6bb9f3cde0b0..3c023d6120f6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1706,7 +1706,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1706 nlk->flags &= ~NETLINK_F_EXT_ACK; 1706 nlk->flags &= ~NETLINK_F_EXT_ACK;
1707 err = 0; 1707 err = 0;
1708 break; 1708 break;
1709 case NETLINK_DUMP_STRICT_CHK: 1709 case NETLINK_GET_STRICT_CHK:
1710 if (val) 1710 if (val)
1711 nlk->flags |= NETLINK_F_STRICT_CHK; 1711 nlk->flags |= NETLINK_F_STRICT_CHK;
1712 else 1712 else
@@ -1806,7 +1806,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
1806 return -EFAULT; 1806 return -EFAULT;
1807 err = 0; 1807 err = 0;
1808 break; 1808 break;
1809 case NETLINK_DUMP_STRICT_CHK: 1809 case NETLINK_GET_STRICT_CHK:
1810 if (len < sizeof(int)) 1810 if (len < sizeof(int))
1811 return -EINVAL; 1811 return -EINVAL;
1812 len = sizeof(int); 1812 len = sizeof(int);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a74650e98f42..6655793765b2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1965,7 +1965,7 @@ retry:
1965 skb->mark = sk->sk_mark; 1965 skb->mark = sk->sk_mark;
1966 skb->tstamp = sockc.transmit_time; 1966 skb->tstamp = sockc.transmit_time;
1967 1967
1968 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); 1968 skb_setup_tx_timestamp(skb, sockc.tsflags);
1969 1969
1970 if (unlikely(extra_len == 4)) 1970 if (unlikely(extra_len == 4))
1971 skb->no_fcs = 1; 1971 skb->no_fcs = 1;
@@ -2460,7 +2460,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2460 skb->priority = po->sk.sk_priority; 2460 skb->priority = po->sk.sk_priority;
2461 skb->mark = po->sk.sk_mark; 2461 skb->mark = po->sk.sk_mark;
2462 skb->tstamp = sockc->transmit_time; 2462 skb->tstamp = sockc->transmit_time;
2463 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); 2463 skb_setup_tx_timestamp(skb, sockc->tsflags);
2464 skb_zcopy_set_nouarg(skb, ph.raw); 2464 skb_zcopy_set_nouarg(skb, ph.raw);
2465 2465
2466 skb_reserve(skb, hlen); 2466 skb_reserve(skb, hlen);
@@ -2898,7 +2898,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2898 goto out_free; 2898 goto out_free;
2899 } 2899 }
2900 2900
2901 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); 2901 skb_setup_tx_timestamp(skb, sockc.tsflags);
2902 2902
2903 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 2903 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2904 !packet_extra_vlan_len_allowed(dev, skb)) { 2904 !packet_extra_vlan_len_allowed(dev, skb)) {
diff --git a/net/rds/message.c b/net/rds/message.c
index 4b00b1152a5f..f139420ba1f6 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -308,16 +308,27 @@ out:
308/* 308/*
309 * RDS ops use this to grab SG entries from the rm's sg pool. 309 * RDS ops use this to grab SG entries from the rm's sg pool.
310 */ 310 */
311struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) 311struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
312 int *ret)
312{ 313{
313 struct scatterlist *sg_first = (struct scatterlist *) &rm[1]; 314 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
314 struct scatterlist *sg_ret; 315 struct scatterlist *sg_ret;
315 316
316 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); 317 if (WARN_ON(!ret))
317 WARN_ON(!nents); 318 return NULL;
318 319
319 if (rm->m_used_sgs + nents > rm->m_total_sgs) 320 if (nents <= 0) {
321 pr_warn("rds: alloc sgs failed! nents <= 0\n");
322 *ret = -EINVAL;
320 return NULL; 323 return NULL;
324 }
325
326 if (rm->m_used_sgs + nents > rm->m_total_sgs) {
327 pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
328 rm->m_total_sgs, rm->m_used_sgs, nents);
329 *ret = -ENOMEM;
330 return NULL;
331 }
321 332
322 sg_ret = &sg_first[rm->m_used_sgs]; 333 sg_ret = &sg_first[rm->m_used_sgs];
323 sg_init_table(sg_ret, nents); 334 sg_init_table(sg_ret, nents);
@@ -332,6 +343,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
332 unsigned int i; 343 unsigned int i;
333 int num_sgs = ceil(total_len, PAGE_SIZE); 344 int num_sgs = ceil(total_len, PAGE_SIZE);
334 int extra_bytes = num_sgs * sizeof(struct scatterlist); 345 int extra_bytes = num_sgs * sizeof(struct scatterlist);
346 int ret;
335 347
336 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT); 348 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
337 if (!rm) 349 if (!rm)
@@ -340,10 +352,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
340 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); 352 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
341 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 353 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
342 rm->data.op_nents = ceil(total_len, PAGE_SIZE); 354 rm->data.op_nents = ceil(total_len, PAGE_SIZE);
343 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); 355 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
344 if (!rm->data.op_sg) { 356 if (!rm->data.op_sg) {
345 rds_message_put(rm); 357 rds_message_put(rm);
346 return ERR_PTR(-ENOMEM); 358 return ERR_PTR(ret);
347 } 359 }
348 360
349 for (i = 0; i < rm->data.op_nents; ++i) { 361 for (i = 0; i < rm->data.op_nents; ++i) {
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 98237feb607a..182ab8430594 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -517,9 +517,10 @@ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
517 return tot_pages; 517 return tot_pages;
518} 518}
519 519
520int rds_rdma_extra_size(struct rds_rdma_args *args) 520int rds_rdma_extra_size(struct rds_rdma_args *args,
521 struct rds_iov_vector *iov)
521{ 522{
522 struct rds_iovec vec; 523 struct rds_iovec *vec;
523 struct rds_iovec __user *local_vec; 524 struct rds_iovec __user *local_vec;
524 int tot_pages = 0; 525 int tot_pages = 0;
525 unsigned int nr_pages; 526 unsigned int nr_pages;
@@ -530,13 +531,23 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
530 if (args->nr_local == 0) 531 if (args->nr_local == 0)
531 return -EINVAL; 532 return -EINVAL;
532 533
534 iov->iov = kcalloc(args->nr_local,
535 sizeof(struct rds_iovec),
536 GFP_KERNEL);
537 if (!iov->iov)
538 return -ENOMEM;
539
540 vec = &iov->iov[0];
541
542 if (copy_from_user(vec, local_vec, args->nr_local *
543 sizeof(struct rds_iovec)))
544 return -EFAULT;
545 iov->len = args->nr_local;
546
533 /* figure out the number of pages in the vector */ 547 /* figure out the number of pages in the vector */
534 for (i = 0; i < args->nr_local; i++) { 548 for (i = 0; i < args->nr_local; i++, vec++) {
535 if (copy_from_user(&vec, &local_vec[i],
536 sizeof(struct rds_iovec)))
537 return -EFAULT;
538 549
539 nr_pages = rds_pages_in_vec(&vec); 550 nr_pages = rds_pages_in_vec(vec);
540 if (nr_pages == 0) 551 if (nr_pages == 0)
541 return -EINVAL; 552 return -EINVAL;
542 553
@@ -558,15 +569,15 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
558 * Extract all arguments and set up the rdma_op 569 * Extract all arguments and set up the rdma_op
559 */ 570 */
560int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, 571int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
561 struct cmsghdr *cmsg) 572 struct cmsghdr *cmsg,
573 struct rds_iov_vector *vec)
562{ 574{
563 struct rds_rdma_args *args; 575 struct rds_rdma_args *args;
564 struct rm_rdma_op *op = &rm->rdma; 576 struct rm_rdma_op *op = &rm->rdma;
565 int nr_pages; 577 int nr_pages;
566 unsigned int nr_bytes; 578 unsigned int nr_bytes;
567 struct page **pages = NULL; 579 struct page **pages = NULL;
568 struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; 580 struct rds_iovec *iovs;
569 int iov_size;
570 unsigned int i, j; 581 unsigned int i, j;
571 int ret = 0; 582 int ret = 0;
572 583
@@ -586,31 +597,23 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
586 goto out_ret; 597 goto out_ret;
587 } 598 }
588 599
589 /* Check whether to allocate the iovec area */ 600 if (vec->len != args->nr_local) {
590 iov_size = args->nr_local * sizeof(struct rds_iovec); 601 ret = -EINVAL;
591 if (args->nr_local > UIO_FASTIOV) { 602 goto out_ret;
592 iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
593 if (!iovs) {
594 ret = -ENOMEM;
595 goto out_ret;
596 }
597 } 603 }
598 604
599 if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { 605 iovs = vec->iov;
600 ret = -EFAULT;
601 goto out;
602 }
603 606
604 nr_pages = rds_rdma_pages(iovs, args->nr_local); 607 nr_pages = rds_rdma_pages(iovs, args->nr_local);
605 if (nr_pages < 0) { 608 if (nr_pages < 0) {
606 ret = -EINVAL; 609 ret = -EINVAL;
607 goto out; 610 goto out_ret;
608 } 611 }
609 612
610 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 613 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
611 if (!pages) { 614 if (!pages) {
612 ret = -ENOMEM; 615 ret = -ENOMEM;
613 goto out; 616 goto out_ret;
614 } 617 }
615 618
616 op->op_write = !!(args->flags & RDS_RDMA_READWRITE); 619 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
@@ -620,11 +623,9 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
620 op->op_active = 1; 623 op->op_active = 1;
621 op->op_recverr = rs->rs_recverr; 624 op->op_recverr = rs->rs_recverr;
622 WARN_ON(!nr_pages); 625 WARN_ON(!nr_pages);
623 op->op_sg = rds_message_alloc_sgs(rm, nr_pages); 626 op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
624 if (!op->op_sg) { 627 if (!op->op_sg)
625 ret = -ENOMEM; 628 goto out_pages;
626 goto out;
627 }
628 629
629 if (op->op_notify || op->op_recverr) { 630 if (op->op_notify || op->op_recverr) {
630 /* We allocate an uninitialized notifier here, because 631 /* We allocate an uninitialized notifier here, because
@@ -635,7 +636,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
635 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); 636 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
636 if (!op->op_notifier) { 637 if (!op->op_notifier) {
637 ret = -ENOMEM; 638 ret = -ENOMEM;
638 goto out; 639 goto out_pages;
639 } 640 }
640 op->op_notifier->n_user_token = args->user_token; 641 op->op_notifier->n_user_token = args->user_token;
641 op->op_notifier->n_status = RDS_RDMA_SUCCESS; 642 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
@@ -681,7 +682,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
681 */ 682 */
682 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); 683 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
683 if (ret < 0) 684 if (ret < 0)
684 goto out; 685 goto out_pages;
685 else 686 else
686 ret = 0; 687 ret = 0;
687 688
@@ -714,13 +715,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
714 nr_bytes, 715 nr_bytes,
715 (unsigned int) args->remote_vec.bytes); 716 (unsigned int) args->remote_vec.bytes);
716 ret = -EINVAL; 717 ret = -EINVAL;
717 goto out; 718 goto out_pages;
718 } 719 }
719 op->op_bytes = nr_bytes; 720 op->op_bytes = nr_bytes;
720 721
721out: 722out_pages:
722 if (iovs != iovstack)
723 sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
724 kfree(pages); 723 kfree(pages);
725out_ret: 724out_ret:
726 if (ret) 725 if (ret)
@@ -838,11 +837,9 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
838 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); 837 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
839 rm->atomic.op_active = 1; 838 rm->atomic.op_active = 1;
840 rm->atomic.op_recverr = rs->rs_recverr; 839 rm->atomic.op_recverr = rs->rs_recverr;
841 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); 840 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
842 if (!rm->atomic.op_sg) { 841 if (!rm->atomic.op_sg)
843 ret = -ENOMEM;
844 goto err; 842 goto err;
845 }
846 843
847 /* verify 8 byte-aligned */ 844 /* verify 8 byte-aligned */
848 if (args->local_addr & 0x7) { 845 if (args->local_addr & 0x7) {
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 6bfaf05b63b2..02ec4a3b2799 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -386,6 +386,18 @@ static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
386 INIT_LIST_HEAD(&q->zcookie_head); 386 INIT_LIST_HEAD(&q->zcookie_head);
387} 387}
388 388
389struct rds_iov_vector {
390 struct rds_iovec *iov;
391 int len;
392};
393
394struct rds_iov_vector_arr {
395 struct rds_iov_vector *vec;
396 int len;
397 int indx;
398 int incr;
399};
400
389struct rds_message { 401struct rds_message {
390 refcount_t m_refcount; 402 refcount_t m_refcount;
391 struct list_head m_sock_item; 403 struct list_head m_sock_item;
@@ -827,7 +839,8 @@ rds_conn_connecting(struct rds_connection *conn)
827 839
828/* message.c */ 840/* message.c */
829struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); 841struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
830struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents); 842struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
843 int *ret);
831int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, 844int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
832 bool zcopy); 845 bool zcopy);
833struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); 846struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
@@ -904,13 +917,13 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
904int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen); 917int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
905int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen); 918int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
906void rds_rdma_drop_keys(struct rds_sock *rs); 919void rds_rdma_drop_keys(struct rds_sock *rs);
907int rds_rdma_extra_size(struct rds_rdma_args *args); 920int rds_rdma_extra_size(struct rds_rdma_args *args,
908int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, 921 struct rds_iov_vector *iov);
909 struct cmsghdr *cmsg);
910int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, 922int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
911 struct cmsghdr *cmsg); 923 struct cmsghdr *cmsg);
912int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, 924int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
913 struct cmsghdr *cmsg); 925 struct cmsghdr *cmsg,
926 struct rds_iov_vector *vec);
914int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, 927int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
915 struct cmsghdr *cmsg); 928 struct cmsghdr *cmsg);
916void rds_rdma_free_op(struct rm_rdma_op *ro); 929void rds_rdma_free_op(struct rm_rdma_op *ro);
diff --git a/net/rds/send.c b/net/rds/send.c
index fe785ee819dd..3d822bad7de9 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -876,13 +876,18 @@ out:
876 * rds_message is getting to be quite complicated, and we'd like to allocate 876 * rds_message is getting to be quite complicated, and we'd like to allocate
877 * it all in one go. This figures out how big it needs to be up front. 877 * it all in one go. This figures out how big it needs to be up front.
878 */ 878 */
879static int rds_rm_size(struct msghdr *msg, int num_sgs) 879static int rds_rm_size(struct msghdr *msg, int num_sgs,
880 struct rds_iov_vector_arr *vct)
880{ 881{
881 struct cmsghdr *cmsg; 882 struct cmsghdr *cmsg;
882 int size = 0; 883 int size = 0;
883 int cmsg_groups = 0; 884 int cmsg_groups = 0;
884 int retval; 885 int retval;
885 bool zcopy_cookie = false; 886 bool zcopy_cookie = false;
887 struct rds_iov_vector *iov, *tmp_iov;
888
889 if (num_sgs < 0)
890 return -EINVAL;
886 891
887 for_each_cmsghdr(cmsg, msg) { 892 for_each_cmsghdr(cmsg, msg) {
888 if (!CMSG_OK(msg, cmsg)) 893 if (!CMSG_OK(msg, cmsg))
@@ -893,8 +898,24 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs)
893 898
894 switch (cmsg->cmsg_type) { 899 switch (cmsg->cmsg_type) {
895 case RDS_CMSG_RDMA_ARGS: 900 case RDS_CMSG_RDMA_ARGS:
901 if (vct->indx >= vct->len) {
902 vct->len += vct->incr;
903 tmp_iov =
904 krealloc(vct->vec,
905 vct->len *
906 sizeof(struct rds_iov_vector),
907 GFP_KERNEL);
908 if (!tmp_iov) {
909 vct->len -= vct->incr;
910 return -ENOMEM;
911 }
912 vct->vec = tmp_iov;
913 }
914 iov = &vct->vec[vct->indx];
915 memset(iov, 0, sizeof(struct rds_iov_vector));
916 vct->indx++;
896 cmsg_groups |= 1; 917 cmsg_groups |= 1;
897 retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); 918 retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
898 if (retval < 0) 919 if (retval < 0)
899 return retval; 920 return retval;
900 size += retval; 921 size += retval;
@@ -951,10 +972,11 @@ static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
951} 972}
952 973
953static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, 974static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
954 struct msghdr *msg, int *allocated_mr) 975 struct msghdr *msg, int *allocated_mr,
976 struct rds_iov_vector_arr *vct)
955{ 977{
956 struct cmsghdr *cmsg; 978 struct cmsghdr *cmsg;
957 int ret = 0; 979 int ret = 0, ind = 0;
958 980
959 for_each_cmsghdr(cmsg, msg) { 981 for_each_cmsghdr(cmsg, msg) {
960 if (!CMSG_OK(msg, cmsg)) 982 if (!CMSG_OK(msg, cmsg))
@@ -968,7 +990,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
968 */ 990 */
969 switch (cmsg->cmsg_type) { 991 switch (cmsg->cmsg_type) {
970 case RDS_CMSG_RDMA_ARGS: 992 case RDS_CMSG_RDMA_ARGS:
971 ret = rds_cmsg_rdma_args(rs, rm, cmsg); 993 if (ind >= vct->indx)
994 return -ENOMEM;
995 ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
996 ind++;
972 break; 997 break;
973 998
974 case RDS_CMSG_RDMA_DEST: 999 case RDS_CMSG_RDMA_DEST:
@@ -1084,6 +1109,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1084 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); 1109 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1085 int num_sgs = ceil(payload_len, PAGE_SIZE); 1110 int num_sgs = ceil(payload_len, PAGE_SIZE);
1086 int namelen; 1111 int namelen;
1112 struct rds_iov_vector_arr vct;
1113 int ind;
1114
1115 memset(&vct, 0, sizeof(vct));
1116
1117 /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
1118 vct.incr = 1;
1087 1119
1088 /* Mirror Linux UDP mirror of BSD error message compatibility */ 1120 /* Mirror Linux UDP mirror of BSD error message compatibility */
1089 /* XXX: Perhaps MSG_MORE someday */ 1121 /* XXX: Perhaps MSG_MORE someday */
@@ -1220,7 +1252,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1220 num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX); 1252 num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
1221 } 1253 }
1222 /* size of rm including all sgs */ 1254 /* size of rm including all sgs */
1223 ret = rds_rm_size(msg, num_sgs); 1255 ret = rds_rm_size(msg, num_sgs, &vct);
1224 if (ret < 0) 1256 if (ret < 0)
1225 goto out; 1257 goto out;
1226 1258
@@ -1232,11 +1264,9 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1232 1264
1233 /* Attach data to the rm */ 1265 /* Attach data to the rm */
1234 if (payload_len) { 1266 if (payload_len) {
1235 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); 1267 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
1236 if (!rm->data.op_sg) { 1268 if (!rm->data.op_sg)
1237 ret = -ENOMEM;
1238 goto out; 1269 goto out;
1239 }
1240 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy); 1270 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
1241 if (ret) 1271 if (ret)
1242 goto out; 1272 goto out;
@@ -1270,7 +1300,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1270 rm->m_conn_path = cpath; 1300 rm->m_conn_path = cpath;
1271 1301
1272 /* Parse any control messages the user may have included. */ 1302 /* Parse any control messages the user may have included. */
1273 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); 1303 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
1274 if (ret) { 1304 if (ret) {
1275 /* Trigger connection so that its ready for the next retry */ 1305 /* Trigger connection so that its ready for the next retry */
1276 if (ret == -EAGAIN) 1306 if (ret == -EAGAIN)
@@ -1348,9 +1378,18 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1348 if (ret) 1378 if (ret)
1349 goto out; 1379 goto out;
1350 rds_message_put(rm); 1380 rds_message_put(rm);
1381
1382 for (ind = 0; ind < vct.indx; ind++)
1383 kfree(vct.vec[ind].iov);
1384 kfree(vct.vec);
1385
1351 return payload_len; 1386 return payload_len;
1352 1387
1353out: 1388out:
1389 for (ind = 0; ind < vct.indx; ind++)
1390 kfree(vct.vec[ind].iov);
1391 kfree(vct.vec);
1392
1354 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. 1393 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1355 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN 1394 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1356 * or in any other way, we need to destroy the MR again */ 1395 * or in any other way, we need to destroy the MR again */
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 1eb2e2c31dd5..dad04e710493 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1372,10 +1372,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1372 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 1372 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1373 1373
1374 if (fold) { 1374 if (fold) {
1375 if (!tc_skip_sw(fold->flags)) 1375 rhashtable_remove_fast(&fold->mask->ht,
1376 rhashtable_remove_fast(&fold->mask->ht, 1376 &fold->ht_node,
1377 &fold->ht_node, 1377 fold->mask->filter_ht_params);
1378 fold->mask->filter_ht_params);
1379 if (!tc_skip_hw(fold->flags)) 1378 if (!tc_skip_hw(fold->flags))
1380 fl_hw_destroy_filter(tp, fold, NULL); 1379 fl_hw_destroy_filter(tp, fold, NULL);
1381 } 1380 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 6e27c62646e9..b9ed271b7ef7 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -101,6 +101,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
101 if (addr) { 101 if (addr) {
102 addr->a.v6.sin6_family = AF_INET6; 102 addr->a.v6.sin6_family = AF_INET6;
103 addr->a.v6.sin6_port = 0; 103 addr->a.v6.sin6_port = 0;
104 addr->a.v6.sin6_flowinfo = 0;
104 addr->a.v6.sin6_addr = ifa->addr; 105 addr->a.v6.sin6_addr = ifa->addr;
105 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; 106 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
106 addr->valid = 1; 107 addr->valid = 1;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 63f08b4e51d6..c4da4a78d369 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -147,8 +147,14 @@ static int smc_release(struct socket *sock)
147 sk->sk_shutdown |= SHUTDOWN_MASK; 147 sk->sk_shutdown |= SHUTDOWN_MASK;
148 } 148 }
149 if (smc->clcsock) { 149 if (smc->clcsock) {
150 if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
151 /* wake up clcsock accept */
152 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
153 }
154 mutex_lock(&smc->clcsock_release_lock);
150 sock_release(smc->clcsock); 155 sock_release(smc->clcsock);
151 smc->clcsock = NULL; 156 smc->clcsock = NULL;
157 mutex_unlock(&smc->clcsock_release_lock);
152 } 158 }
153 if (smc->use_fallback) { 159 if (smc->use_fallback) {
154 if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT) 160 if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
@@ -205,6 +211,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
205 spin_lock_init(&smc->conn.send_lock); 211 spin_lock_init(&smc->conn.send_lock);
206 sk->sk_prot->hash(sk); 212 sk->sk_prot->hash(sk);
207 sk_refcnt_debug_inc(sk); 213 sk_refcnt_debug_inc(sk);
214 mutex_init(&smc->clcsock_release_lock);
208 215
209 return sk; 216 return sk;
210} 217}
@@ -824,7 +831,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
824 struct socket *new_clcsock = NULL; 831 struct socket *new_clcsock = NULL;
825 struct sock *lsk = &lsmc->sk; 832 struct sock *lsk = &lsmc->sk;
826 struct sock *new_sk; 833 struct sock *new_sk;
827 int rc; 834 int rc = -EINVAL;
828 835
829 release_sock(lsk); 836 release_sock(lsk);
830 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol); 837 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
@@ -837,7 +844,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
837 } 844 }
838 *new_smc = smc_sk(new_sk); 845 *new_smc = smc_sk(new_sk);
839 846
840 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0); 847 mutex_lock(&lsmc->clcsock_release_lock);
848 if (lsmc->clcsock)
849 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
850 mutex_unlock(&lsmc->clcsock_release_lock);
841 lock_sock(lsk); 851 lock_sock(lsk);
842 if (rc < 0) 852 if (rc < 0)
843 lsk->sk_err = -rc; 853 lsk->sk_err = -rc;
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 08786ace6010..5721416d0605 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -219,6 +219,10 @@ struct smc_sock { /* smc sock container */
219 * started, waiting for unsent 219 * started, waiting for unsent
220 * data to be sent 220 * data to be sent
221 */ 221 */
222 struct mutex clcsock_release_lock;
223 /* protects clcsock of a listen
224 * socket
225 * */
222}; 226};
223 227
224static inline struct smc_sock *smc_sk(const struct sock *sk) 228static inline struct smc_sock *smc_sk(const struct sock *sk)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c6782aa47525..24cbddc44c88 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1952,6 +1952,7 @@ call_connect_status(struct rpc_task *task)
1952 /* retry with existing socket, after a delay */ 1952 /* retry with existing socket, after a delay */
1953 rpc_delay(task, 3*HZ); 1953 rpc_delay(task, 3*HZ);
1954 /* fall through */ 1954 /* fall through */
1955 case -ENOTCONN:
1955 case -EAGAIN: 1956 case -EAGAIN:
1956 /* Check for timeouts before looping back to call_bind */ 1957 /* Check for timeouts before looping back to call_bind */
1957 case -ETIMEDOUT: 1958 case -ETIMEDOUT:
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ce927002862a..73547d17d3c6 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -67,7 +67,6 @@
67 */ 67 */
68static void xprt_init(struct rpc_xprt *xprt, struct net *net); 68static void xprt_init(struct rpc_xprt *xprt, struct net *net);
69static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); 69static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
70static void xprt_connect_status(struct rpc_task *task);
71static void xprt_destroy(struct rpc_xprt *xprt); 70static void xprt_destroy(struct rpc_xprt *xprt);
72 71
73static DEFINE_SPINLOCK(xprt_list_lock); 72static DEFINE_SPINLOCK(xprt_list_lock);
@@ -680,7 +679,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
680 /* Try to schedule an autoclose RPC call */ 679 /* Try to schedule an autoclose RPC call */
681 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 680 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
682 queue_work(xprtiod_workqueue, &xprt->task_cleanup); 681 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
683 xprt_wake_pending_tasks(xprt, -EAGAIN); 682 else if (xprt->snd_task)
683 rpc_wake_up_queued_task_set_status(&xprt->pending,
684 xprt->snd_task, -ENOTCONN);
684 spin_unlock_bh(&xprt->transport_lock); 685 spin_unlock_bh(&xprt->transport_lock);
685} 686}
686EXPORT_SYMBOL_GPL(xprt_force_disconnect); 687EXPORT_SYMBOL_GPL(xprt_force_disconnect);
@@ -820,7 +821,7 @@ void xprt_connect(struct rpc_task *task)
820 if (!xprt_connected(xprt)) { 821 if (!xprt_connected(xprt)) {
821 task->tk_timeout = task->tk_rqstp->rq_timeout; 822 task->tk_timeout = task->tk_rqstp->rq_timeout;
822 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; 823 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
823 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); 824 rpc_sleep_on(&xprt->pending, task, NULL);
824 825
825 if (test_bit(XPRT_CLOSING, &xprt->state)) 826 if (test_bit(XPRT_CLOSING, &xprt->state))
826 return; 827 return;
@@ -839,34 +840,6 @@ void xprt_connect(struct rpc_task *task)
839 xprt_release_write(xprt, task); 840 xprt_release_write(xprt, task);
840} 841}
841 842
842static void xprt_connect_status(struct rpc_task *task)
843{
844 switch (task->tk_status) {
845 case 0:
846 dprintk("RPC: %5u xprt_connect_status: connection established\n",
847 task->tk_pid);
848 break;
849 case -ECONNREFUSED:
850 case -ECONNRESET:
851 case -ECONNABORTED:
852 case -ENETUNREACH:
853 case -EHOSTUNREACH:
854 case -EPIPE:
855 case -EAGAIN:
856 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
857 break;
858 case -ETIMEDOUT:
859 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
860 "out\n", task->tk_pid);
861 break;
862 default:
863 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
864 "server %s\n", task->tk_pid, -task->tk_status,
865 task->tk_rqstp->rq_xprt->servername);
866 task->tk_status = -EIO;
867 }
868}
869
870enum xprt_xid_rb_cmp { 843enum xprt_xid_rb_cmp {
871 XID_RB_EQUAL, 844 XID_RB_EQUAL,
872 XID_RB_LEFT, 845 XID_RB_LEFT,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 8a5e823e0b33..f0b3700cec95 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1217,6 +1217,8 @@ static void xs_reset_transport(struct sock_xprt *transport)
1217 1217
1218 trace_rpc_socket_close(xprt, sock); 1218 trace_rpc_socket_close(xprt, sock);
1219 sock_release(sock); 1219 sock_release(sock);
1220
1221 xprt_disconnect_done(xprt);
1220} 1222}
1221 1223
1222/** 1224/**
@@ -1237,8 +1239,6 @@ static void xs_close(struct rpc_xprt *xprt)
1237 1239
1238 xs_reset_transport(transport); 1240 xs_reset_transport(transport);
1239 xprt->reestablish_timeout = 0; 1241 xprt->reestablish_timeout = 0;
1240
1241 xprt_disconnect_done(xprt);
1242} 1242}
1243 1243
1244static void xs_inject_disconnect(struct rpc_xprt *xprt) 1244static void xs_inject_disconnect(struct rpc_xprt *xprt)
@@ -1489,8 +1489,6 @@ static void xs_tcp_state_change(struct sock *sk)
1489 &transport->sock_state)) 1489 &transport->sock_state))
1490 xprt_clear_connecting(xprt); 1490 xprt_clear_connecting(xprt);
1491 clear_bit(XPRT_CLOSING, &xprt->state); 1491 clear_bit(XPRT_CLOSING, &xprt->state);
1492 if (sk->sk_err)
1493 xprt_wake_pending_tasks(xprt, -sk->sk_err);
1494 /* Trigger the socket release */ 1492 /* Trigger the socket release */
1495 xs_tcp_force_close(xprt); 1493 xs_tcp_force_close(xprt);
1496 } 1494 }
@@ -2092,8 +2090,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
2092 trace_rpc_socket_connect(xprt, sock, 0); 2090 trace_rpc_socket_connect(xprt, sock, 0);
2093 status = 0; 2091 status = 0;
2094out: 2092out:
2095 xprt_unlock_connect(xprt, transport);
2096 xprt_clear_connecting(xprt); 2093 xprt_clear_connecting(xprt);
2094 xprt_unlock_connect(xprt, transport);
2097 xprt_wake_pending_tasks(xprt, status); 2095 xprt_wake_pending_tasks(xprt, status);
2098} 2096}
2099 2097
@@ -2329,8 +2327,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2329 } 2327 }
2330 status = -EAGAIN; 2328 status = -EAGAIN;
2331out: 2329out:
2332 xprt_unlock_connect(xprt, transport);
2333 xprt_clear_connecting(xprt); 2330 xprt_clear_connecting(xprt);
2331 xprt_unlock_connect(xprt, transport);
2334 xprt_wake_pending_tasks(xprt, status); 2332 xprt_wake_pending_tasks(xprt, status);
2335} 2333}
2336 2334
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 291d6bbe85f4..1217c90a363b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -889,7 +889,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
889 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 889 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
890 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 890 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
891 struct tipc_sock *tsk = tipc_sk(sk); 891 struct tipc_sock *tsk = tipc_sk(sk);
892 struct tipc_group *grp = tsk->group;
893 struct net *net = sock_net(sk); 892 struct net *net = sock_net(sk);
894 struct tipc_member *mb = NULL; 893 struct tipc_member *mb = NULL;
895 u32 node, port; 894 u32 node, port;
@@ -903,7 +902,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
903 /* Block or return if destination link or member is congested */ 902 /* Block or return if destination link or member is congested */
904 rc = tipc_wait_for_cond(sock, &timeout, 903 rc = tipc_wait_for_cond(sock, &timeout,
905 !tipc_dest_find(&tsk->cong_links, node, 0) && 904 !tipc_dest_find(&tsk->cong_links, node, 0) &&
906 !tipc_group_cong(grp, node, port, blks, &mb)); 905 tsk->group &&
906 !tipc_group_cong(tsk->group, node, port, blks,
907 &mb));
907 if (unlikely(rc)) 908 if (unlikely(rc))
908 return rc; 909 return rc;
909 910
@@ -933,7 +934,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
933 struct tipc_sock *tsk = tipc_sk(sk); 934 struct tipc_sock *tsk = tipc_sk(sk);
934 struct list_head *cong_links = &tsk->cong_links; 935 struct list_head *cong_links = &tsk->cong_links;
935 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 936 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
936 struct tipc_group *grp = tsk->group;
937 struct tipc_msg *hdr = &tsk->phdr; 937 struct tipc_msg *hdr = &tsk->phdr;
938 struct tipc_member *first = NULL; 938 struct tipc_member *first = NULL;
939 struct tipc_member *mbr = NULL; 939 struct tipc_member *mbr = NULL;
@@ -950,9 +950,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
950 type = msg_nametype(hdr); 950 type = msg_nametype(hdr);
951 inst = dest->addr.name.name.instance; 951 inst = dest->addr.name.name.instance;
952 scope = msg_lookup_scope(hdr); 952 scope = msg_lookup_scope(hdr);
953 exclude = tipc_group_exclude(grp);
954 953
955 while (++lookups < 4) { 954 while (++lookups < 4) {
955 exclude = tipc_group_exclude(tsk->group);
956
956 first = NULL; 957 first = NULL;
957 958
958 /* Look for a non-congested destination member, if any */ 959 /* Look for a non-congested destination member, if any */
@@ -961,7 +962,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
961 &dstcnt, exclude, false)) 962 &dstcnt, exclude, false))
962 return -EHOSTUNREACH; 963 return -EHOSTUNREACH;
963 tipc_dest_pop(&dsts, &node, &port); 964 tipc_dest_pop(&dsts, &node, &port);
964 cong = tipc_group_cong(grp, node, port, blks, &mbr); 965 cong = tipc_group_cong(tsk->group, node, port, blks,
966 &mbr);
965 if (!cong) 967 if (!cong)
966 break; 968 break;
967 if (mbr == first) 969 if (mbr == first)
@@ -980,7 +982,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
980 /* Block or return if destination link or member is congested */ 982 /* Block or return if destination link or member is congested */
981 rc = tipc_wait_for_cond(sock, &timeout, 983 rc = tipc_wait_for_cond(sock, &timeout,
982 !tipc_dest_find(cong_links, node, 0) && 984 !tipc_dest_find(cong_links, node, 0) &&
983 !tipc_group_cong(grp, node, port, 985 tsk->group &&
986 !tipc_group_cong(tsk->group, node, port,
984 blks, &mbr)); 987 blks, &mbr));
985 if (unlikely(rc)) 988 if (unlikely(rc))
986 return rc; 989 return rc;
@@ -1015,8 +1018,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1015 struct sock *sk = sock->sk; 1018 struct sock *sk = sock->sk;
1016 struct net *net = sock_net(sk); 1019 struct net *net = sock_net(sk);
1017 struct tipc_sock *tsk = tipc_sk(sk); 1020 struct tipc_sock *tsk = tipc_sk(sk);
1018 struct tipc_group *grp = tsk->group; 1021 struct tipc_nlist *dsts;
1019 struct tipc_nlist *dsts = tipc_group_dests(grp);
1020 struct tipc_mc_method *method = &tsk->mc_method; 1022 struct tipc_mc_method *method = &tsk->mc_method;
1021 bool ack = method->mandatory && method->rcast; 1023 bool ack = method->mandatory && method->rcast;
1022 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1024 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
@@ -1025,15 +1027,17 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1025 struct sk_buff_head pkts; 1027 struct sk_buff_head pkts;
1026 int rc = -EHOSTUNREACH; 1028 int rc = -EHOSTUNREACH;
1027 1029
1028 if (!dsts->local && !dsts->remote)
1029 return -EHOSTUNREACH;
1030
1031 /* Block or return if any destination link or member is congested */ 1030 /* Block or return if any destination link or member is congested */
1032 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1031 rc = tipc_wait_for_cond(sock, &timeout,
1033 !tipc_group_bc_cong(grp, blks)); 1032 !tsk->cong_link_cnt && tsk->group &&
1033 !tipc_group_bc_cong(tsk->group, blks));
1034 if (unlikely(rc)) 1034 if (unlikely(rc))
1035 return rc; 1035 return rc;
1036 1036
1037 dsts = tipc_group_dests(tsk->group);
1038 if (!dsts->local && !dsts->remote)
1039 return -EHOSTUNREACH;
1040
1037 /* Complete message header */ 1041 /* Complete message header */
1038 if (dest) { 1042 if (dest) {
1039 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1043 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
@@ -1045,7 +1049,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1045 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1049 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1046 msg_set_destport(hdr, 0); 1050 msg_set_destport(hdr, 0);
1047 msg_set_destnode(hdr, 0); 1051 msg_set_destnode(hdr, 0);
1048 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1052 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1049 1053
1050 /* Avoid getting stuck with repeated forced replicasts */ 1054 /* Avoid getting stuck with repeated forced replicasts */
1051 msg_set_grp_bc_ack_req(hdr, ack); 1055 msg_set_grp_bc_ack_req(hdr, ack);
@@ -2757,11 +2761,15 @@ void tipc_sk_reinit(struct net *net)
2757 rhashtable_walk_start(&iter); 2761 rhashtable_walk_start(&iter);
2758 2762
2759 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2763 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2760 spin_lock_bh(&tsk->sk.sk_lock.slock); 2764 sock_hold(&tsk->sk);
2765 rhashtable_walk_stop(&iter);
2766 lock_sock(&tsk->sk);
2761 msg = &tsk->phdr; 2767 msg = &tsk->phdr;
2762 msg_set_prevnode(msg, tipc_own_addr(net)); 2768 msg_set_prevnode(msg, tipc_own_addr(net));
2763 msg_set_orignode(msg, tipc_own_addr(net)); 2769 msg_set_orignode(msg, tipc_own_addr(net));
2764 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2770 release_sock(&tsk->sk);
2771 rhashtable_walk_start(&iter);
2772 sock_put(&tsk->sk);
2765 } 2773 }
2766 2774
2767 rhashtable_walk_stop(&iter); 2775 rhashtable_walk_stop(&iter);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 10dc59ce9c82..4d85d71f16e2 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -245,10 +245,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
245 } 245 }
246 246
247 err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr); 247 err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
248 if (err) { 248 if (err)
249 kfree_skb(_skb);
250 goto out; 249 goto out;
251 }
252 } 250 }
253 err = 0; 251 err = 0;
254out: 252out:
@@ -681,6 +679,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
681 if (err) 679 if (err)
682 goto err; 680 goto err;
683 681
682 if (remote.proto != local.proto) {
683 err = -EINVAL;
684 goto err;
685 }
686
684 /* Checking remote ip address */ 687 /* Checking remote ip address */
685 rmcast = tipc_udp_is_mcast_addr(&remote); 688 rmcast = tipc_udp_is_mcast_addr(&remote);
686 689
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 311cec8e533d..28887cf628b8 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -56,7 +56,7 @@ enum {
56static struct proto *saved_tcpv6_prot; 56static struct proto *saved_tcpv6_prot;
57static DEFINE_MUTEX(tcpv6_prot_mutex); 57static DEFINE_MUTEX(tcpv6_prot_mutex);
58static LIST_HEAD(device_list); 58static LIST_HEAD(device_list);
59static DEFINE_MUTEX(device_mutex); 59static DEFINE_SPINLOCK(device_spinlock);
60static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 60static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
61static struct proto_ops tls_sw_proto_ops; 61static struct proto_ops tls_sw_proto_ops;
62 62
@@ -538,11 +538,14 @@ static struct tls_context *create_ctx(struct sock *sk)
538 struct inet_connection_sock *icsk = inet_csk(sk); 538 struct inet_connection_sock *icsk = inet_csk(sk);
539 struct tls_context *ctx; 539 struct tls_context *ctx;
540 540
541 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 541 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
542 if (!ctx) 542 if (!ctx)
543 return NULL; 543 return NULL;
544 544
545 icsk->icsk_ulp_data = ctx; 545 icsk->icsk_ulp_data = ctx;
546 ctx->setsockopt = sk->sk_prot->setsockopt;
547 ctx->getsockopt = sk->sk_prot->getsockopt;
548 ctx->sk_proto_close = sk->sk_prot->close;
546 return ctx; 549 return ctx;
547} 550}
548 551
@@ -552,7 +555,7 @@ static int tls_hw_prot(struct sock *sk)
552 struct tls_device *dev; 555 struct tls_device *dev;
553 int rc = 0; 556 int rc = 0;
554 557
555 mutex_lock(&device_mutex); 558 spin_lock_bh(&device_spinlock);
556 list_for_each_entry(dev, &device_list, dev_list) { 559 list_for_each_entry(dev, &device_list, dev_list) {
557 if (dev->feature && dev->feature(dev)) { 560 if (dev->feature && dev->feature(dev)) {
558 ctx = create_ctx(sk); 561 ctx = create_ctx(sk);
@@ -570,7 +573,7 @@ static int tls_hw_prot(struct sock *sk)
570 } 573 }
571 } 574 }
572out: 575out:
573 mutex_unlock(&device_mutex); 576 spin_unlock_bh(&device_spinlock);
574 return rc; 577 return rc;
575} 578}
576 579
@@ -579,12 +582,17 @@ static void tls_hw_unhash(struct sock *sk)
579 struct tls_context *ctx = tls_get_ctx(sk); 582 struct tls_context *ctx = tls_get_ctx(sk);
580 struct tls_device *dev; 583 struct tls_device *dev;
581 584
582 mutex_lock(&device_mutex); 585 spin_lock_bh(&device_spinlock);
583 list_for_each_entry(dev, &device_list, dev_list) { 586 list_for_each_entry(dev, &device_list, dev_list) {
584 if (dev->unhash) 587 if (dev->unhash) {
588 kref_get(&dev->kref);
589 spin_unlock_bh(&device_spinlock);
585 dev->unhash(dev, sk); 590 dev->unhash(dev, sk);
591 kref_put(&dev->kref, dev->release);
592 spin_lock_bh(&device_spinlock);
593 }
586 } 594 }
587 mutex_unlock(&device_mutex); 595 spin_unlock_bh(&device_spinlock);
588 ctx->unhash(sk); 596 ctx->unhash(sk);
589} 597}
590 598
@@ -595,12 +603,17 @@ static int tls_hw_hash(struct sock *sk)
595 int err; 603 int err;
596 604
597 err = ctx->hash(sk); 605 err = ctx->hash(sk);
598 mutex_lock(&device_mutex); 606 spin_lock_bh(&device_spinlock);
599 list_for_each_entry(dev, &device_list, dev_list) { 607 list_for_each_entry(dev, &device_list, dev_list) {
600 if (dev->hash) 608 if (dev->hash) {
609 kref_get(&dev->kref);
610 spin_unlock_bh(&device_spinlock);
601 err |= dev->hash(dev, sk); 611 err |= dev->hash(dev, sk);
612 kref_put(&dev->kref, dev->release);
613 spin_lock_bh(&device_spinlock);
614 }
602 } 615 }
603 mutex_unlock(&device_mutex); 616 spin_unlock_bh(&device_spinlock);
604 617
605 if (err) 618 if (err)
606 tls_hw_unhash(sk); 619 tls_hw_unhash(sk);
@@ -675,9 +688,6 @@ static int tls_init(struct sock *sk)
675 rc = -ENOMEM; 688 rc = -ENOMEM;
676 goto out; 689 goto out;
677 } 690 }
678 ctx->setsockopt = sk->sk_prot->setsockopt;
679 ctx->getsockopt = sk->sk_prot->getsockopt;
680 ctx->sk_proto_close = sk->sk_prot->close;
681 691
682 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ 692 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
683 if (ip_ver == TLSV6 && 693 if (ip_ver == TLSV6 &&
@@ -699,17 +709,17 @@ out:
699 709
700void tls_register_device(struct tls_device *device) 710void tls_register_device(struct tls_device *device)
701{ 711{
702 mutex_lock(&device_mutex); 712 spin_lock_bh(&device_spinlock);
703 list_add_tail(&device->dev_list, &device_list); 713 list_add_tail(&device->dev_list, &device_list);
704 mutex_unlock(&device_mutex); 714 spin_unlock_bh(&device_spinlock);
705} 715}
706EXPORT_SYMBOL(tls_register_device); 716EXPORT_SYMBOL(tls_register_device);
707 717
708void tls_unregister_device(struct tls_device *device) 718void tls_unregister_device(struct tls_device *device)
709{ 719{
710 mutex_lock(&device_mutex); 720 spin_lock_bh(&device_spinlock);
711 list_del(&device->dev_list); 721 list_del(&device->dev_list);
712 mutex_unlock(&device_mutex); 722 spin_unlock_bh(&device_spinlock);
713} 723}
714EXPORT_SYMBOL(tls_unregister_device); 724EXPORT_SYMBOL(tls_unregister_device);
715 725
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ab27a2872935..43a1dec08825 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -107,6 +107,7 @@
107#include <linux/mutex.h> 107#include <linux/mutex.h>
108#include <linux/net.h> 108#include <linux/net.h>
109#include <linux/poll.h> 109#include <linux/poll.h>
110#include <linux/random.h>
110#include <linux/skbuff.h> 111#include <linux/skbuff.h>
111#include <linux/smp.h> 112#include <linux/smp.h>
112#include <linux/socket.h> 113#include <linux/socket.h>
@@ -504,9 +505,13 @@ out:
504static int __vsock_bind_stream(struct vsock_sock *vsk, 505static int __vsock_bind_stream(struct vsock_sock *vsk,
505 struct sockaddr_vm *addr) 506 struct sockaddr_vm *addr)
506{ 507{
507 static u32 port = LAST_RESERVED_PORT + 1; 508 static u32 port = 0;
508 struct sockaddr_vm new_addr; 509 struct sockaddr_vm new_addr;
509 510
511 if (!port)
512 port = LAST_RESERVED_PORT + 1 +
513 prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
514
510 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); 515 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
511 516
512 if (addr->svm_port == VMADDR_PORT_ANY) { 517 if (addr->svm_port == VMADDR_PORT_ANY) {
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index cb332adb84cd..c361ce782412 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -264,6 +264,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
264} 264}
265 265
266static int 266static int
267vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
268 struct sockaddr_vm *dst,
269 enum vmci_transport_packet_type type,
270 u64 size,
271 u64 mode,
272 struct vmci_transport_waiting_info *wait,
273 u16 proto,
274 struct vmci_handle handle)
275{
276 struct vmci_transport_packet *pkt;
277 int err;
278
279 pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
280 if (!pkt)
281 return -ENOMEM;
282
283 err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
284 mode, wait, proto, handle,
285 true);
286 kfree(pkt);
287
288 return err;
289}
290
291static int
267vmci_transport_send_control_pkt(struct sock *sk, 292vmci_transport_send_control_pkt(struct sock *sk,
268 enum vmci_transport_packet_type type, 293 enum vmci_transport_packet_type type,
269 u64 size, 294 u64 size,
@@ -272,9 +297,7 @@ vmci_transport_send_control_pkt(struct sock *sk,
272 u16 proto, 297 u16 proto,
273 struct vmci_handle handle) 298 struct vmci_handle handle)
274{ 299{
275 struct vmci_transport_packet *pkt;
276 struct vsock_sock *vsk; 300 struct vsock_sock *vsk;
277 int err;
278 301
279 vsk = vsock_sk(sk); 302 vsk = vsock_sk(sk);
280 303
@@ -284,17 +307,10 @@ vmci_transport_send_control_pkt(struct sock *sk,
284 if (!vsock_addr_bound(&vsk->remote_addr)) 307 if (!vsock_addr_bound(&vsk->remote_addr))
285 return -EINVAL; 308 return -EINVAL;
286 309
287 pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); 310 return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
288 if (!pkt) 311 &vsk->remote_addr,
289 return -ENOMEM; 312 type, size, mode,
290 313 wait, proto, handle);
291 err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
292 &vsk->remote_addr, type, size,
293 mode, wait, proto, handle,
294 true);
295 kfree(pkt);
296
297 return err;
298} 314}
299 315
300static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst, 316static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
@@ -312,12 +328,29 @@ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
312static int vmci_transport_send_reset(struct sock *sk, 328static int vmci_transport_send_reset(struct sock *sk,
313 struct vmci_transport_packet *pkt) 329 struct vmci_transport_packet *pkt)
314{ 330{
331 struct sockaddr_vm *dst_ptr;
332 struct sockaddr_vm dst;
333 struct vsock_sock *vsk;
334
315 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) 335 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
316 return 0; 336 return 0;
317 return vmci_transport_send_control_pkt(sk, 337
318 VMCI_TRANSPORT_PACKET_TYPE_RST, 338 vsk = vsock_sk(sk);
319 0, 0, NULL, VSOCK_PROTO_INVALID, 339
320 VMCI_INVALID_HANDLE); 340 if (!vsock_addr_bound(&vsk->local_addr))
341 return -EINVAL;
342
343 if (vsock_addr_bound(&vsk->remote_addr)) {
344 dst_ptr = &vsk->remote_addr;
345 } else {
346 vsock_addr_init(&dst, pkt->dg.src.context,
347 pkt->src_port);
348 dst_ptr = &dst;
349 }
350 return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
351 VMCI_TRANSPORT_PACKET_TYPE_RST,
352 0, 0, NULL, VSOCK_PROTO_INVALID,
353 VMCI_INVALID_HANDLE);
321} 354}
322 355
323static int vmci_transport_send_negotiate(struct sock *sk, size_t size) 356static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 10ec05589795..5e49492d5911 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -9152,8 +9152,10 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
9152 if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) { 9152 if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
9153 int r = validate_pae_over_nl80211(rdev, info); 9153 int r = validate_pae_over_nl80211(rdev, info);
9154 9154
9155 if (r < 0) 9155 if (r < 0) {
9156 kzfree(connkeys);
9156 return r; 9157 return r;
9158 }
9157 9159
9158 ibss.control_port_over_nl80211 = true; 9160 ibss.control_port_over_nl80211 = true;
9159 } 9161 }
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 6bc817359b58..b3b613660d44 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -315,6 +315,12 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
315 315
316 sp->xvec[sp->len++] = x; 316 sp->xvec[sp->len++] = x;
317 317
318 skb_dst_force(skb);
319 if (!skb_dst(skb)) {
320 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
321 goto drop;
322 }
323
318lock: 324lock:
319 spin_lock(&x->lock); 325 spin_lock(&x->lock);
320 326
@@ -354,7 +360,6 @@ lock:
354 XFRM_SKB_CB(skb)->seq.input.low = seq; 360 XFRM_SKB_CB(skb)->seq.input.low = seq;
355 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; 361 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
356 362
357 skb_dst_force(skb);
358 dev_hold(skb->dev); 363 dev_hold(skb->dev);
359 364
360 if (crypto_done) 365 if (crypto_done)
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 757c4d11983b..9333153bafda 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -102,6 +102,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
102 skb_dst_force(skb); 102 skb_dst_force(skb);
103 if (!skb_dst(skb)) { 103 if (!skb_dst(skb)) {
104 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 104 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
105 err = -EHOSTUNREACH;
105 goto error_nolock; 106 goto error_nolock;
106 } 107 }
107 108
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index dc4a9f1fb941..23c92891758a 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -426,6 +426,12 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
426 module_put(mode->owner); 426 module_put(mode->owner);
427} 427}
428 428
429void xfrm_state_free(struct xfrm_state *x)
430{
431 kmem_cache_free(xfrm_state_cache, x);
432}
433EXPORT_SYMBOL(xfrm_state_free);
434
429static void xfrm_state_gc_destroy(struct xfrm_state *x) 435static void xfrm_state_gc_destroy(struct xfrm_state *x)
430{ 436{
431 tasklet_hrtimer_cancel(&x->mtimer); 437 tasklet_hrtimer_cancel(&x->mtimer);
@@ -452,7 +458,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
452 } 458 }
453 xfrm_dev_state_free(x); 459 xfrm_dev_state_free(x);
454 security_xfrm_state_free(x); 460 security_xfrm_state_free(x);
455 kmem_cache_free(xfrm_state_cache, x); 461 xfrm_state_free(x);
456} 462}
457 463
458static void xfrm_state_gc_task(struct work_struct *work) 464static void xfrm_state_gc_task(struct work_struct *work)
@@ -788,7 +794,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
788{ 794{
789 spin_lock_bh(&net->xfrm.xfrm_state_lock); 795 spin_lock_bh(&net->xfrm.xfrm_state_lock);
790 si->sadcnt = net->xfrm.state_num; 796 si->sadcnt = net->xfrm.state_num;
791 si->sadhcnt = net->xfrm.state_hmask; 797 si->sadhcnt = net->xfrm.state_hmask + 1;
792 si->sadhmcnt = xfrm_state_hashmax; 798 si->sadhmcnt = xfrm_state_hashmax;
793 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 799 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
794} 800}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c9a84e22f5d5..277c1c46fe94 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2288,13 +2288,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2288 2288
2289 } 2289 }
2290 2290
2291 kfree(x); 2291 xfrm_state_free(x);
2292 kfree(xp); 2292 kfree(xp);
2293 2293
2294 return 0; 2294 return 0;
2295 2295
2296free_state: 2296free_state:
2297 kfree(x); 2297 xfrm_state_free(x);
2298nomem: 2298nomem:
2299 return err; 2299 return err;
2300} 2300}
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 8081b6cf67d2..34414c6efad6 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -47,8 +47,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
47 $xs = "[0-9a-f ]"; # hex character or space 47 $xs = "[0-9a-f ]"; # hex character or space
48 $funcre = qr/^$x* <(.*)>:$/; 48 $funcre = qr/^$x* <(.*)>:$/;
49 if ($arch eq 'aarch64') { 49 if ($arch eq 'aarch64') {
50 #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]! 50 #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
51 $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o; 51 $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
52 } elsif ($arch eq 'arm') { 52 } elsif ($arch eq 'arm') {
53 #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64 53 #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
54 $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o; 54 $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 5056fb3b897d..e559c6294c39 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -168,6 +168,7 @@ class id_parser(object):
168 self.curline = 0 168 self.curline = 0
169 try: 169 try:
170 for line in fd: 170 for line in fd:
171 line = line.decode(locale.getpreferredencoding(False), errors='ignore')
171 self.curline += 1 172 self.curline += 1
172 if self.curline > maxlines: 173 if self.curline > maxlines:
173 break 174 break
@@ -249,12 +250,13 @@ if __name__ == '__main__':
249 250
250 try: 251 try:
251 if len(args.path) and args.path[0] == '-': 252 if len(args.path) and args.path[0] == '-':
252 parser.parse_lines(sys.stdin, args.maxlines, '-') 253 stdin = os.fdopen(sys.stdin.fileno(), 'rb')
254 parser.parse_lines(stdin, args.maxlines, '-')
253 else: 255 else:
254 if args.path: 256 if args.path:
255 for p in args.path: 257 for p in args.path:
256 if os.path.isfile(p): 258 if os.path.isfile(p):
257 parser.parse_lines(open(p), args.maxlines, p) 259 parser.parse_lines(open(p, 'rb'), args.maxlines, p)
258 elif os.path.isdir(p): 260 elif os.path.isdir(p):
259 scan_git_subtree(repo.head.reference.commit.tree, p) 261 scan_git_subtree(repo.head.reference.commit.tree, p)
260 else: 262 else:
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 8c9499867c91..7489cb7de6dc 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -580,9 +580,9 @@ void ima_update_policy(void)
580 ima_update_policy_flag(); 580 ima_update_policy_flag();
581} 581}
582 582
583/* Keep the enumeration in sync with the policy_tokens! */
583enum { 584enum {
584 Opt_err = -1, 585 Opt_measure, Opt_dont_measure,
585 Opt_measure = 1, Opt_dont_measure,
586 Opt_appraise, Opt_dont_appraise, 586 Opt_appraise, Opt_dont_appraise,
587 Opt_audit, Opt_hash, Opt_dont_hash, 587 Opt_audit, Opt_hash, Opt_dont_hash,
588 Opt_obj_user, Opt_obj_role, Opt_obj_type, 588 Opt_obj_user, Opt_obj_role, Opt_obj_type,
@@ -592,10 +592,10 @@ enum {
592 Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt, 592 Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
593 Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt, 593 Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
594 Opt_appraise_type, Opt_permit_directio, 594 Opt_appraise_type, Opt_permit_directio,
595 Opt_pcr 595 Opt_pcr, Opt_err
596}; 596};
597 597
598static match_table_t policy_tokens = { 598static const match_table_t policy_tokens = {
599 {Opt_measure, "measure"}, 599 {Opt_measure, "measure"},
600 {Opt_dont_measure, "dont_measure"}, 600 {Opt_dont_measure, "dont_measure"},
601 {Opt_appraise, "appraise"}, 601 {Opt_appraise, "appraise"},
@@ -1103,7 +1103,7 @@ void ima_policy_stop(struct seq_file *m, void *v)
1103{ 1103{
1104} 1104}
1105 1105
1106#define pt(token) policy_tokens[token + Opt_err].pattern 1106#define pt(token) policy_tokens[token].pattern
1107#define mt(token) mask_tokens[token] 1107#define mt(token) mask_tokens[token]
1108 1108
1109/* 1109/*
diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
index 783978842f13..70e65a2ff207 100644
--- a/security/keys/keyctl_pkey.c
+++ b/security/keys/keyctl_pkey.c
@@ -25,7 +25,7 @@ static void keyctl_pkey_params_free(struct kernel_pkey_params *params)
25} 25}
26 26
27enum { 27enum {
28 Opt_err = -1, 28 Opt_err,
29 Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */ 29 Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */
30 Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */ 30 Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */
31}; 31};
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index ff6789365a12..697bfc6c8192 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -711,7 +711,7 @@ static int key_unseal(struct trusted_key_payload *p,
711} 711}
712 712
713enum { 713enum {
714 Opt_err = -1, 714 Opt_err,
715 Opt_new, Opt_load, Opt_update, 715 Opt_new, Opt_load, Opt_update,
716 Opt_keyhandle, Opt_keyauth, Opt_blobauth, 716 Opt_keyhandle, Opt_keyauth, Opt_blobauth,
717 Opt_pcrinfo, Opt_pcrlock, Opt_migratable, 717 Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index 64c3cb0fb926..654a50319198 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -30,7 +30,7 @@ static int ff400_get_clock(struct snd_ff *ff, unsigned int *rate,
30 int err; 30 int err;
31 31
32 err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST, 32 err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
33 FF400_SYNC_STATUS, &reg, sizeof(reg), 0); 33 FF400_CLOCK_CONFIG, &reg, sizeof(reg), 0);
34 if (err < 0) 34 if (err < 0)
35 return err; 35 return err;
36 data = le32_to_cpu(reg); 36 data = le32_to_cpu(reg);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8d75597028ee..15021c839372 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5520,6 +5520,9 @@ enum {
5520 ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, 5520 ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
5521 ALC295_FIXUP_HP_AUTO_MUTE, 5521 ALC295_FIXUP_HP_AUTO_MUTE,
5522 ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE, 5522 ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
5523 ALC294_FIXUP_ASUS_MIC,
5524 ALC294_FIXUP_ASUS_HEADSET_MIC,
5525 ALC294_FIXUP_ASUS_SPK,
5523}; 5526};
5524 5527
5525static const struct hda_fixup alc269_fixups[] = { 5528static const struct hda_fixup alc269_fixups[] = {
@@ -6392,6 +6395,8 @@ static const struct hda_fixup alc269_fixups[] = {
6392 [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = { 6395 [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
6393 .type = HDA_FIXUP_FUNC, 6396 .type = HDA_FIXUP_FUNC,
6394 .v.func = alc285_fixup_invalidate_dacs, 6397 .v.func = alc285_fixup_invalidate_dacs,
6398 .chained = true,
6399 .chain_id = ALC269_FIXUP_THINKPAD_ACPI
6395 }, 6400 },
6396 [ALC295_FIXUP_HP_AUTO_MUTE] = { 6401 [ALC295_FIXUP_HP_AUTO_MUTE] = {
6397 .type = HDA_FIXUP_FUNC, 6402 .type = HDA_FIXUP_FUNC,
@@ -6406,6 +6411,36 @@ static const struct hda_fixup alc269_fixups[] = {
6406 .chained = true, 6411 .chained = true,
6407 .chain_id = ALC269_FIXUP_HEADSET_MIC 6412 .chain_id = ALC269_FIXUP_HEADSET_MIC
6408 }, 6413 },
6414 [ALC294_FIXUP_ASUS_MIC] = {
6415 .type = HDA_FIXUP_PINS,
6416 .v.pins = (const struct hda_pintbl[]) {
6417 { 0x13, 0x90a60160 }, /* use as internal mic */
6418 { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
6419 { }
6420 },
6421 .chained = true,
6422 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6423 },
6424 [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
6425 .type = HDA_FIXUP_PINS,
6426 .v.pins = (const struct hda_pintbl[]) {
6427 { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
6428 { }
6429 },
6430 .chained = true,
6431 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6432 },
6433 [ALC294_FIXUP_ASUS_SPK] = {
6434 .type = HDA_FIXUP_VERBS,
6435 .v.verbs = (const struct hda_verb[]) {
6436 /* Set EAPD high */
6437 { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
6438 { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
6439 { }
6440 },
6441 .chained = true,
6442 .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
6443 },
6409}; 6444};
6410 6445
6411static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6446static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6548,6 +6583,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6548 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), 6583 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
6549 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), 6584 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
6550 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), 6585 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
6586 SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
6551 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 6587 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
6552 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 6588 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
6553 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 6589 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -7155,6 +7191,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7155 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 7191 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
7156 ALC292_STANDARD_PINS, 7192 ALC292_STANDARD_PINS,
7157 {0x13, 0x90a60140}), 7193 {0x13, 0x90a60140}),
7194 SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
7195 {0x14, 0x90170110},
7196 {0x1b, 0x90a70130},
7197 {0x21, 0x04211020}),
7198 SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
7199 {0x12, 0x90a60130},
7200 {0x17, 0x90170110},
7201 {0x21, 0x04211020}),
7158 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 7202 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
7159 ALC295_STANDARD_PINS, 7203 ALC295_STANDARD_PINS,
7160 {0x17, 0x21014020}, 7204 {0x17, 0x21014020},
@@ -7227,6 +7271,37 @@ static void alc269_fill_coef(struct hda_codec *codec)
7227 alc_update_coef_idx(codec, 0x4, 0, 1<<11); 7271 alc_update_coef_idx(codec, 0x4, 0, 1<<11);
7228} 7272}
7229 7273
7274static void alc294_hp_init(struct hda_codec *codec)
7275{
7276 struct alc_spec *spec = codec->spec;
7277 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
7278 int i, val;
7279
7280 if (!hp_pin)
7281 return;
7282
7283 snd_hda_codec_write(codec, hp_pin, 0,
7284 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
7285
7286 msleep(100);
7287
7288 snd_hda_codec_write(codec, hp_pin, 0,
7289 AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
7290
7291 alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
7292 alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
7293
7294 /* Wait for depop procedure finish */
7295 val = alc_read_coefex_idx(codec, 0x58, 0x01);
7296 for (i = 0; i < 20 && val & 0x0080; i++) {
7297 msleep(50);
7298 val = alc_read_coefex_idx(codec, 0x58, 0x01);
7299 }
7300 /* Set HP depop to auto mode */
7301 alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
7302 msleep(50);
7303}
7304
7230/* 7305/*
7231 */ 7306 */
7232static int patch_alc269(struct hda_codec *codec) 7307static int patch_alc269(struct hda_codec *codec)
@@ -7352,6 +7427,7 @@ static int patch_alc269(struct hda_codec *codec)
7352 spec->codec_variant = ALC269_TYPE_ALC294; 7427 spec->codec_variant = ALC269_TYPE_ALC294;
7353 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ 7428 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
7354 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ 7429 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
7430 alc294_hp_init(codec);
7355 break; 7431 break;
7356 case 0x10ec0300: 7432 case 0x10ec0300:
7357 spec->codec_variant = ALC269_TYPE_ALC300; 7433 spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7363,6 +7439,7 @@ static int patch_alc269(struct hda_codec *codec)
7363 spec->codec_variant = ALC269_TYPE_ALC700; 7439 spec->codec_variant = ALC269_TYPE_ALC700;
7364 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ 7440 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
7365 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ 7441 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
7442 alc294_hp_init(codec);
7366 break; 7443 break;
7367 7444
7368 } 7445 }
diff --git a/tools/include/uapi/linux/netlink.h b/tools/include/uapi/linux/netlink.h
index 486ed1f0c0bc..0a4d73317759 100644
--- a/tools/include/uapi/linux/netlink.h
+++ b/tools/include/uapi/linux/netlink.h
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
155#define NETLINK_LIST_MEMBERSHIPS 9 155#define NETLINK_LIST_MEMBERSHIPS 9
156#define NETLINK_CAP_ACK 10 156#define NETLINK_CAP_ACK 10
157#define NETLINK_EXT_ACK 11 157#define NETLINK_EXT_ACK 11
158#define NETLINK_DUMP_STRICT_CHK 12 158#define NETLINK_GET_STRICT_CHK 12
159 159
160struct nl_pktinfo { 160struct nl_pktinfo {
161 __u32 group; 161 __u32 group;
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index acf1afa01c5b..397d6b612502 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -7,6 +7,7 @@ LDLIBS+= -lpthread -lurcu
7TARGETS = main idr-test multiorder xarray 7TARGETS = main idr-test multiorder xarray
8CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o 8CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
9OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ 9OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
10 regression4.o \
10 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o 11 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
11 12
12ifndef SHIFT 13ifndef SHIFT
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index 77a44c54998f..7a22d6e3732e 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -308,6 +308,7 @@ int main(int argc, char **argv)
308 regression1_test(); 308 regression1_test();
309 regression2_test(); 309 regression2_test();
310 regression3_test(); 310 regression3_test();
311 regression4_test();
311 iteration_test(0, 10 + 90 * long_run); 312 iteration_test(0, 10 + 90 * long_run);
312 iteration_test(7, 10 + 90 * long_run); 313 iteration_test(7, 10 + 90 * long_run);
313 single_thread_tests(long_run); 314 single_thread_tests(long_run);
diff --git a/tools/testing/radix-tree/regression.h b/tools/testing/radix-tree/regression.h
index 3c8a1584e9ee..135145af18b7 100644
--- a/tools/testing/radix-tree/regression.h
+++ b/tools/testing/radix-tree/regression.h
@@ -5,5 +5,6 @@
5void regression1_test(void); 5void regression1_test(void);
6void regression2_test(void); 6void regression2_test(void);
7void regression3_test(void); 7void regression3_test(void);
8void regression4_test(void);
8 9
9#endif 10#endif
diff --git a/tools/testing/radix-tree/regression4.c b/tools/testing/radix-tree/regression4.c
new file mode 100644
index 000000000000..cf4e5aba6b08
--- /dev/null
+++ b/tools/testing/radix-tree/regression4.c
@@ -0,0 +1,79 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/gfp.h>
4#include <linux/slab.h>
5#include <linux/radix-tree.h>
6#include <linux/rcupdate.h>
7#include <stdlib.h>
8#include <pthread.h>
9#include <stdio.h>
10#include <assert.h>
11
12#include "regression.h"
13
14static pthread_barrier_t worker_barrier;
15static int obj0, obj1;
16static RADIX_TREE(mt_tree, GFP_KERNEL);
17
18static void *reader_fn(void *arg)
19{
20 int i;
21 void *entry;
22
23 rcu_register_thread();
24 pthread_barrier_wait(&worker_barrier);
25
26 for (i = 0; i < 1000000; i++) {
27 rcu_read_lock();
28 entry = radix_tree_lookup(&mt_tree, 0);
29 rcu_read_unlock();
30 if (entry != &obj0) {
31 printf("iteration %d bad entry = %p\n", i, entry);
32 abort();
33 }
34 }
35
36 rcu_unregister_thread();
37
38 return NULL;
39}
40
41static void *writer_fn(void *arg)
42{
43 int i;
44
45 rcu_register_thread();
46 pthread_barrier_wait(&worker_barrier);
47
48 for (i = 0; i < 1000000; i++) {
49 radix_tree_insert(&mt_tree, 1, &obj1);
50 radix_tree_delete(&mt_tree, 1);
51 }
52
53 rcu_unregister_thread();
54
55 return NULL;
56}
57
58void regression4_test(void)
59{
60 pthread_t reader, writer;
61
62 printv(1, "regression test 4 starting\n");
63
64 radix_tree_insert(&mt_tree, 0, &obj0);
65 pthread_barrier_init(&worker_barrier, NULL, 2);
66
67 if (pthread_create(&reader, NULL, reader_fn, NULL) ||
68 pthread_create(&writer, NULL, writer_fn, NULL)) {
69 perror("pthread_create");
70 exit(1);
71 }
72
73 if (pthread_join(reader, NULL) || pthread_join(writer, NULL)) {
74 perror("pthread_join");
75 exit(1);
76 }
77
78 printv(1, "regression test 4 passed\n");
79}
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/bpf_flow.c
index b9798f558ca7..284660f5aa95 100644
--- a/tools/testing/selftests/bpf/bpf_flow.c
+++ b/tools/testing/selftests/bpf/bpf_flow.c
@@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
70{ 70{
71 void *data_end = (void *)(long)skb->data_end; 71 void *data_end = (void *)(long)skb->data_end;
72 void *data = (void *)(long)skb->data; 72 void *data = (void *)(long)skb->data;
73 __u16 nhoff = skb->flow_keys->nhoff; 73 __u16 thoff = skb->flow_keys->thoff;
74 __u8 *hdr; 74 __u8 *hdr;
75 75
76 /* Verifies this variable offset does not overflow */ 76 /* Verifies this variable offset does not overflow */
77 if (nhoff > (USHRT_MAX - hdr_size)) 77 if (thoff > (USHRT_MAX - hdr_size))
78 return NULL; 78 return NULL;
79 79
80 hdr = data + nhoff; 80 hdr = data + thoff;
81 if (hdr + hdr_size <= data_end) 81 if (hdr + hdr_size <= data_end)
82 return hdr; 82 return hdr;
83 83
84 if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size)) 84 if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size))
85 return NULL; 85 return NULL;
86 86
87 return buffer; 87 return buffer;
@@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
158 /* Only inspect standard GRE packets with version 0 */ 158 /* Only inspect standard GRE packets with version 0 */
159 return BPF_OK; 159 return BPF_OK;
160 160
161 keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */ 161 keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
162 if (GRE_IS_CSUM(gre->flags)) 162 if (GRE_IS_CSUM(gre->flags))
163 keys->nhoff += 4; /* Step over chksum and Padding */ 163 keys->thoff += 4; /* Step over chksum and Padding */
164 if (GRE_IS_KEY(gre->flags)) 164 if (GRE_IS_KEY(gre->flags))
165 keys->nhoff += 4; /* Step over key */ 165 keys->thoff += 4; /* Step over key */
166 if (GRE_IS_SEQ(gre->flags)) 166 if (GRE_IS_SEQ(gre->flags))
167 keys->nhoff += 4; /* Step over sequence number */ 167 keys->thoff += 4; /* Step over sequence number */
168 168
169 keys->is_encap = true; 169 keys->is_encap = true;
170 170
@@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
174 if (!eth) 174 if (!eth)
175 return BPF_DROP; 175 return BPF_DROP;
176 176
177 keys->nhoff += sizeof(*eth); 177 keys->thoff += sizeof(*eth);
178 178
179 return parse_eth_proto(skb, eth->h_proto); 179 return parse_eth_proto(skb, eth->h_proto);
180 } else { 180 } else {
@@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
191 if ((__u8 *)tcp + (tcp->doff << 2) > data_end) 191 if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
192 return BPF_DROP; 192 return BPF_DROP;
193 193
194 keys->thoff = keys->nhoff;
195 keys->sport = tcp->source; 194 keys->sport = tcp->source;
196 keys->dport = tcp->dest; 195 keys->dport = tcp->dest;
197 return BPF_OK; 196 return BPF_OK;
@@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
201 if (!udp) 200 if (!udp)
202 return BPF_DROP; 201 return BPF_DROP;
203 202
204 keys->thoff = keys->nhoff;
205 keys->sport = udp->source; 203 keys->sport = udp->source;
206 keys->dport = udp->dest; 204 keys->dport = udp->dest;
207 return BPF_OK; 205 return BPF_OK;
@@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb)
252 keys->ipv4_src = iph->saddr; 250 keys->ipv4_src = iph->saddr;
253 keys->ipv4_dst = iph->daddr; 251 keys->ipv4_dst = iph->daddr;
254 252
255 keys->nhoff += iph->ihl << 2; 253 keys->thoff += iph->ihl << 2;
256 if (data + keys->nhoff > data_end) 254 if (data + keys->thoff > data_end)
257 return BPF_DROP; 255 return BPF_DROP;
258 256
259 if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { 257 if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
@@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb)
285 keys->addr_proto = ETH_P_IPV6; 283 keys->addr_proto = ETH_P_IPV6;
286 memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); 284 memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
287 285
288 keys->nhoff += sizeof(struct ipv6hdr); 286 keys->thoff += sizeof(struct ipv6hdr);
289 287
290 return parse_ipv6_proto(skb, ip6h->nexthdr); 288 return parse_ipv6_proto(skb, ip6h->nexthdr);
291} 289}
@@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb)
301 /* hlen is in 8-octets and does not include the first 8 bytes 299 /* hlen is in 8-octets and does not include the first 8 bytes
302 * of the header 300 * of the header
303 */ 301 */
304 skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3; 302 skb->flow_keys->thoff += (1 + ip6h->hdrlen) << 3;
305 303
306 return parse_ipv6_proto(skb, ip6h->nexthdr); 304 return parse_ipv6_proto(skb, ip6h->nexthdr);
307} 305}
@@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)
315 if (!fragh) 313 if (!fragh)
316 return BPF_DROP; 314 return BPF_DROP;
317 315
318 keys->nhoff += sizeof(*fragh); 316 keys->thoff += sizeof(*fragh);
319 keys->is_frag = true; 317 keys->is_frag = true;
320 if (!(fragh->frag_off & bpf_htons(IP6_OFFSET))) 318 if (!(fragh->frag_off & bpf_htons(IP6_OFFSET)))
321 keys->is_first_frag = true; 319 keys->is_first_frag = true;
@@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb)
341 __be16 proto; 339 __be16 proto;
342 340
343 /* Peek back to see if single or double-tagging */ 341 /* Peek back to see if single or double-tagging */
344 if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto, 342 if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
345 sizeof(proto))) 343 sizeof(proto)))
346 return BPF_DROP; 344 return BPF_DROP;
347 345
@@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
354 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) 352 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
355 return BPF_DROP; 353 return BPF_DROP;
356 354
357 keys->nhoff += sizeof(*vlan); 355 keys->thoff += sizeof(*vlan);
358 } 356 }
359 357
360 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); 358 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
361 if (!vlan) 359 if (!vlan)
362 return BPF_DROP; 360 return BPF_DROP;
363 361
364 keys->nhoff += sizeof(*vlan); 362 keys->thoff += sizeof(*vlan);
365 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ 363 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
366 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || 364 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
367 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) 365 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index a08c67c8767e..c3b799c1ee97 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -14099,6 +14099,33 @@ static struct bpf_test tests[] = {
14099 .errstr_unpriv = "R1 leaks addr", 14099 .errstr_unpriv = "R1 leaks addr",
14100 .result = REJECT, 14100 .result = REJECT,
14101 }, 14101 },
14102 "calls: cross frame pruning",
14103 .insns = {
14104 /* r8 = !!random();
14105 * call pruner()
14106 * if (r8)
14107 * do something bad;
14108 */
14109 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14110 BPF_FUNC_get_prandom_u32),
14111 BPF_MOV64_IMM(BPF_REG_8, 0),
14112 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
14113 BPF_MOV64_IMM(BPF_REG_8, 1),
14114 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
14115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
14116 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
14117 BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
14118 BPF_MOV64_IMM(BPF_REG_0, 0),
14119 BPF_EXIT_INSN(),
14120 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
14121 BPF_EXIT_INSN(),
14122 },
14123 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14124 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14125 .result_unpriv = REJECT,
14126 .errstr = "!read_ok",
14127 .result = REJECT,
14128 },
14102}; 14129};
14103 14130
14104static int probe_filter_length(const struct bpf_insn *fp) 14131static int probe_filter_length(const struct bpf_insn *fp)
@@ -14124,7 +14151,7 @@ static int create_map(uint32_t type, uint32_t size_key,
14124 return fd; 14151 return fd;
14125} 14152}
14126 14153
14127static int create_prog_dummy1(enum bpf_map_type prog_type) 14154static int create_prog_dummy1(enum bpf_prog_type prog_type)
14128{ 14155{
14129 struct bpf_insn prog[] = { 14156 struct bpf_insn prog[] = {
14130 BPF_MOV64_IMM(BPF_REG_0, 42), 14157 BPF_MOV64_IMM(BPF_REG_0, 42),
@@ -14135,7 +14162,7 @@ static int create_prog_dummy1(enum bpf_map_type prog_type)
14135 ARRAY_SIZE(prog), "GPL", 0, NULL, 0); 14162 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14136} 14163}
14137 14164
14138static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx) 14165static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
14139{ 14166{
14140 struct bpf_insn prog[] = { 14167 struct bpf_insn prog[] = {
14141 BPF_MOV64_IMM(BPF_REG_3, idx), 14168 BPF_MOV64_IMM(BPF_REG_3, idx),
@@ -14150,7 +14177,7 @@ static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
14150 ARRAY_SIZE(prog), "GPL", 0, NULL, 0); 14177 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14151} 14178}
14152 14179
14153static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem, 14180static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
14154 int p1key) 14181 int p1key)
14155{ 14182{
14156 int p2key = 1; 14183 int p2key = 1;
@@ -14221,7 +14248,7 @@ static int create_cgroup_storage(bool percpu)
14221 14248
14222static char bpf_vlog[UINT_MAX >> 8]; 14249static char bpf_vlog[UINT_MAX >> 8];
14223 14250
14224static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type, 14251static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
14225 struct bpf_insn *prog, int *map_fds) 14252 struct bpf_insn *prog, int *map_fds)
14226{ 14253{
14227 int *fixup_map_hash_8b = test->fixup_map_hash_8b; 14254 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
@@ -14350,7 +14377,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
14350 do { 14377 do {
14351 prog[*fixup_map_stacktrace].imm = map_fds[12]; 14378 prog[*fixup_map_stacktrace].imm = map_fds[12];
14352 fixup_map_stacktrace++; 14379 fixup_map_stacktrace++;
14353 } while (fixup_map_stacktrace); 14380 } while (*fixup_map_stacktrace);
14354 } 14381 }
14355} 14382}
14356 14383
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 9543a4c2f9be..f8f3e90700c0 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -9,6 +9,7 @@ TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh \
9TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh 9TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
10TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh 10TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
11TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh 11TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
12TEST_PROGS += test_vxlan_fdb_changelink.sh
12TEST_PROGS_EXTENDED := in_netns.sh 13TEST_PROGS_EXTENDED := in_netns.sh
13TEST_GEN_FILES = socket 14TEST_GEN_FILES = socket
14TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any 15TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
diff --git a/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
new file mode 100755
index 000000000000..2d442cdab11e
--- /dev/null
+++ b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
@@ -0,0 +1,29 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# Check FDB default-remote handling across "ip link set".
5
6check_remotes()
7{
8 local what=$1; shift
9 local N=$(bridge fdb sh dev vx | grep 00:00:00:00:00:00 | wc -l)
10
11 echo -ne "expected two remotes after $what\t"
12 if [[ $N != 2 ]]; then
13 echo "[FAIL]"
14 EXIT_STATUS=1
15 else
16 echo "[ OK ]"
17 fi
18}
19
20ip link add name vx up type vxlan id 2000 dstport 4789
21bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.20 self permanent
22bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.30 self permanent
23check_remotes "fdb append"
24
25ip link set dev vx type vxlan remote 192.0.2.30
26check_remotes "link set"
27
28ip link del dev vx
29exit $EXIT_STATUS
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index e1473234968d..c9a2abf8be1b 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -2731,9 +2731,14 @@ TEST(syscall_restart)
2731 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2731 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
2732 ASSERT_EQ(true, WIFSTOPPED(status)); 2732 ASSERT_EQ(true, WIFSTOPPED(status));
2733 ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); 2733 ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
2734 /* Verify signal delivery came from parent now. */
2735 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 2734 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
2736 EXPECT_EQ(getpid(), info.si_pid); 2735 /*
2736 * There is no siginfo on SIGSTOP any more, so we can't verify
2737 * signal delivery came from parent now (getpid() == info.si_pid).
2738 * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com
2739 * At least verify the SIGSTOP via PTRACE_GETSIGINFO.
2740 */
2741 EXPECT_EQ(SIGSTOP, info.si_signo);
2737 2742
2738 /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ 2743 /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
2739 ASSERT_EQ(0, kill(child_pid, SIGCONT)); 2744 ASSERT_EQ(0, kill(child_pid, SIGCONT));
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index fb22bccfbc8a..7ef45a4a3cba 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -23,6 +23,10 @@
23#define PAGE_MASK (~(PAGE_SIZE-1)) 23#define PAGE_MASK (~(PAGE_SIZE-1))
24#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK) 24#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
25 25
26/* generic data direction definitions */
27#define READ 0
28#define WRITE 1
29
26typedef unsigned long long phys_addr_t; 30typedef unsigned long long phys_addr_t;
27typedef unsigned long long dma_addr_t; 31typedef unsigned long long dma_addr_t;
28typedef size_t __kernel_size_t; 32typedef size_t __kernel_size_t;
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 3710342cf6ad..6855cce3e528 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -175,10 +175,14 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
175{ 175{
176 struct kvm_coalesced_mmio_dev *dev, *tmp; 176 struct kvm_coalesced_mmio_dev *dev, *tmp;
177 177
178 if (zone->pio != 1 && zone->pio != 0)
179 return -EINVAL;
180
178 mutex_lock(&kvm->slots_lock); 181 mutex_lock(&kvm->slots_lock);
179 182
180 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) 183 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
181 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { 184 if (zone->pio == dev->zone.pio &&
185 coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
182 kvm_io_bus_unregister_dev(kvm, 186 kvm_io_bus_unregister_dev(kvm,
183 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); 187 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
184 kvm_iodevice_destructor(&dev->dev); 188 kvm_iodevice_destructor(&dev->dev);