aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/overlayfs.txt16
-rw-r--r--MAINTAINERS5
-rw-r--r--Makefile5
-rw-r--r--arch/arc/boot/dts/hsdk.dts45
-rw-r--r--arch/arc/configs/hsdk_defconfig3
-rw-r--r--arch/arc/include/asm/cmpxchg.h14
-rw-r--r--arch/arc/mm/fault.c9
-rw-r--r--arch/arc/mm/tlb.c13
-rw-r--r--arch/arm64/Makefile1
-rw-r--r--arch/arm64/include/asm/arch_timer.h8
-rw-r--r--arch/arm64/include/asm/smp.h6
-rw-r--r--arch/arm64/include/asm/smp_plat.h5
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/nds32/include/asm/bitfield.h2
-rw-r--r--arch/nds32/include/asm/fpu.h2
-rw-r--r--arch/nds32/include/asm/fpuemu.h12
-rw-r--r--arch/nds32/include/asm/syscalls.h2
-rw-r--r--arch/nds32/include/uapi/asm/fp_udfiex_crtl.h16
-rw-r--r--arch/nds32/include/uapi/asm/sigcontext.h24
-rw-r--r--arch/nds32/include/uapi/asm/udftrap.h13
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h4
-rw-r--r--arch/nds32/kernel/fpu.c15
-rw-r--r--arch/nds32/kernel/sys_nds32.c26
-rw-r--r--arch/nds32/math-emu/Makefile4
-rw-r--r--arch/nds32/math-emu/fd2si.c30
-rw-r--r--arch/nds32/math-emu/fd2siz.c30
-rw-r--r--arch/nds32/math-emu/fd2ui.c30
-rw-r--r--arch/nds32/math-emu/fd2uiz.c30
-rw-r--r--arch/nds32/math-emu/fpuemu.c57
-rw-r--r--arch/nds32/math-emu/fs2si.c29
-rw-r--r--arch/nds32/math-emu/fs2siz.c29
-rw-r--r--arch/nds32/math-emu/fs2ui.c29
-rw-r--r--arch/nds32/math-emu/fs2uiz.c30
-rw-r--r--arch/nds32/math-emu/fsi2d.c22
-rw-r--r--arch/nds32/math-emu/fsi2s.c22
-rw-r--r--arch/nds32/math-emu/fui2d.c22
-rw-r--r--arch/nds32/math-emu/fui2s.c22
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/configs/712_defconfig1
-rw-r--r--arch/parisc/configs/a500_defconfig1
-rw-r--r--arch/parisc/configs/b180_defconfig1
-rw-r--r--arch/parisc/configs/c3000_defconfig1
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/parisc/configs/default_defconfig1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/parisc/include/asm/special_insns.h24
-rw-r--r--arch/parisc/kernel/alternative.c3
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S21
-rw-r--r--arch/parisc/math-emu/cnv_float.h8
-rw-r--r--arch/sparc/kernel/mdesc.c2
-rw-r--r--arch/sparc/kernel/perf_event.c4
-rw-r--r--arch/sparc/mm/ultra.S4
-rw-r--r--arch/x86/kernel/cpu/Makefile5
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c22
-rw-r--r--arch/x86/lib/insn-eval.c47
-rw-r--r--arch/x86/power/cpu.c10
-rw-r--r--arch/x86/power/hibernate.c33
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--block/bfq-cgroup.c6
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-mq-sched.c30
-rw-r--r--block/blk-mq-sched.h1
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk.h10
-rw-r--r--block/elevator.c2
-rw-r--r--crypto/hmac.c4
-rw-r--r--crypto/jitterentropy-kcapi.c2
-rw-r--r--drivers/block/aoe/aoeblk.c16
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/rsxx/core.c1
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/dma/dma-jz4780.c32
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c3
-rw-r--r--drivers/dma/fsl-qdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/sprd-dma.c49
-rw-r--r--drivers/dma/tegra210-adma.c57
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/fpga/dfl.c22
-rw-r--r--drivers/fpga/stratix10-soc.c6
-rw-r--r--drivers/fpga/zynqmp-fpga.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h10
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c14
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c13
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c38
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c51
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/hwmon/hwmon.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c37
-rw-r--r--drivers/i2c/busses/i2c-xiic.c5
-rw-r--r--drivers/infiniband/core/device.c49
-rw-r--r--drivers/infiniband/core/rdma_core.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c30
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c1
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c1
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c1
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c9
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c18
-rw-r--r--drivers/memstick/core/mspro_block.c13
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/habanalabs/context.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c65
-rw-r--r--drivers/misc/habanalabs/device.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c3
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h1
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c31
-rw-r--r--drivers/misc/habanalabs/habanalabs.h2
-rw-r--r--drivers/misc/habanalabs/memory.c6
-rw-r--r--drivers/misc/habanalabs/mmu.c8
-rw-r--r--drivers/misc/lkdtm/bugs.c23
-rw-r--r--drivers/misc/lkdtm/core.c6
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/usercopy.c10
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c24
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c15
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/phy/phylink.c13
-rw-r--r--drivers/net/phy/sfp.c24
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/host/rdma.c152
-rw-r--r--drivers/nvme/host/tcp.c57
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c1
-rw-r--r--drivers/parisc/ccio-dma.c6
-rw-r--r--drivers/parisc/sba_iommu.c5
-rw-r--r--drivers/parport/share.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c32
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/vhost/net.c41
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vhost.c20
-rw-r--r--drivers/vhost/vhost.h5
-rw-r--r--drivers/vhost/vsock.c28
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
-rw-r--r--fs/adfs/adfs.h14
-rw-r--r--fs/adfs/dir.c137
-rw-r--r--fs/adfs/dir_f.c43
-rw-r--r--fs/adfs/dir_fplus.c24
-rw-r--r--fs/fuse/file.c43
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c33
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/nfs/nfs4proc.c32
-rw-r--r--fs/overlayfs/file.c9
-rw-r--r--fs/overlayfs/inode.c48
-rw-r--r--fs/overlayfs/namei.c8
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--fs/overlayfs/ovl_entry.h6
-rw-r--r--fs/overlayfs/super.c169
-rw-r--r--fs/overlayfs/util.c12
-rw-r--r--fs/pstore/platform.c7
-rw-r--r--fs/pstore/ram.c36
-rw-r--r--fs/xfs/scrub/ialloc.c3
-rw-r--r--fs/xfs/xfs_log.c11
-rw-r--r--include/drm/drm_modeset_helper_vtables.h8
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/dsa/sja1105.h12
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/suspend.h31
-rw-r--r--include/math-emu/op-2.h17
-rw-r--r--include/math-emu/op-common.h11
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/tls.h4
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/uapi/linux/fuse.h7
-rw-r--r--include/uapi/misc/habanalabs.h22
-rw-r--r--init/Kconfig17
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/cgroup/cgroup.c33
-rw-r--r--kernel/cpu.c4
-rwxr-xr-xkernel/gen_kheaders.sh (renamed from kernel/gen_ikh_data.sh)17
-rw-r--r--kernel/kheaders.c40
-rw-r--r--kernel/power/hibernate.c9
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/signal.c11
-rw-r--r--lib/lockref.c3
-rw-r--r--lib/test_firmware.c14
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/pktgen.c11
-rw-r--r--net/dsa/tag_sja1105.c10
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/raw.c25
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_rdma.c10
-rw-r--r--net/rds/ib_recv.c3
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sunrpc/clnt.c30
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/tls/tls_device.c26
-rw-r--r--samples/pidfd/pidfd-metadata.c4
-rw-r--r--scripts/Kbuild.include7
-rwxr-xr-xscripts/checkstack.pl2
-rw-r--r--scripts/kconfig/tests/err_recursive_inc/expected_stderr6
-rw-r--r--scripts/package/Makefile2
-rw-r--r--tools/testing/selftests/cgroup/test_core.c7
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c4
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_broadcast.sh5
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c4
-rw-r--r--tools/testing/selftests/vm/Makefile6
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c2
-rw-r--r--tools/virtio/linux/kernel.h2
278 files changed, 2756 insertions, 1166 deletions
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index eef7d9d259e8..1da2f1668f08 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -336,8 +336,20 @@ the copied layers will fail the verification of the lower root file handle.
336Non-standard behavior 336Non-standard behavior
337--------------------- 337---------------------
338 338
339Overlayfs can now act as a POSIX compliant filesystem with the following 339Current version of overlayfs can act as a mostly POSIX compliant
340features turned on: 340filesystem.
341
342This is the list of cases that overlayfs doesn't currently handle:
343
344a) POSIX mandates updating st_atime for reads. This is currently not
345done in the case when the file resides on a lower layer.
346
347b) If a file residing on a lower layer is opened for read-only and then
348memory mapped with MAP_SHARED, then subsequent changes to the file are not
349reflected in the memory mapping.
350
351The following options allow overlayfs to act more like a standards
352compliant filesystem:
341 353
3421) "redirect_dir" 3541) "redirect_dir"
343 355
diff --git a/MAINTAINERS b/MAINTAINERS
index a6954776a37e..57f496cff999 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13057,7 +13057,6 @@ F: Documentation/devicetree/bindings/net/qcom,dwmac.txt
13057 13057
13058QUALCOMM GENERIC INTERFACE I2C DRIVER 13058QUALCOMM GENERIC INTERFACE I2C DRIVER
13059M: Alok Chauhan <alokc@codeaurora.org> 13059M: Alok Chauhan <alokc@codeaurora.org>
13060M: Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
13061L: linux-i2c@vger.kernel.org 13060L: linux-i2c@vger.kernel.org
13062L: linux-arm-msm@vger.kernel.org 13061L: linux-arm-msm@vger.kernel.org
13063S: Supported 13062S: Supported
@@ -14995,7 +14994,7 @@ S: Odd Fixes
14995F: drivers/net/ethernet/adaptec/starfire* 14994F: drivers/net/ethernet/adaptec/starfire*
14996 14995
14997STEC S1220 SKD DRIVER 14996STEC S1220 SKD DRIVER
14998M: Bart Van Assche <bart.vanassche@wdc.com> 14997M: Damien Le Moal <Damien.LeMoal@wdc.com>
14999L: linux-block@vger.kernel.org 14998L: linux-block@vger.kernel.org
15000S: Maintained 14999S: Maintained
15001F: drivers/block/skd*[ch] 15000F: drivers/block/skd*[ch]
@@ -17312,7 +17311,7 @@ F: Documentation/ABI/stable/sysfs-hypervisor-xen
17312F: Documentation/ABI/testing/sysfs-hypervisor-xen 17311F: Documentation/ABI/testing/sysfs-hypervisor-xen
17313 17312
17314XEN NETWORK BACKEND DRIVER 17313XEN NETWORK BACKEND DRIVER
17315M: Wei Liu <wei.liu2@citrix.com> 17314M: Wei Liu <wei.liu@kernel.org>
17316M: Paul Durrant <paul.durrant@citrix.com> 17315M: Paul Durrant <paul.durrant@citrix.com>
17317L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 17316L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
17318L: netdev@vger.kernel.org 17317L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 004d67a4405f..d27e1326cc03 100644
--- a/Makefile
+++ b/Makefile
@@ -1228,9 +1228,8 @@ kselftest-clean:
1228PHONY += kselftest-merge 1228PHONY += kselftest-merge
1229kselftest-merge: 1229kselftest-merge:
1230 $(if $(wildcard $(objtree)/.config),, $(error No .config exists, config your kernel first!)) 1230 $(if $(wildcard $(objtree)/.config),, $(error No .config exists, config your kernel first!))
1231 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \ 1231 $(Q)find $(srctree)/tools/testing/selftests -name config | \
1232 -m $(objtree)/.config \ 1232 xargs $(srctree)/scripts/kconfig/merge_config.sh -m $(objtree)/.config
1233 $(srctree)/tools/testing/selftests/*/config
1234 +$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig 1233 +$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
1235 1234
1236# --------------------------------------------------------------------------- 1235# ---------------------------------------------------------------------------
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 7425bb0f2d1b..acfbed41b020 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -11,7 +11,6 @@
11 */ 11 */
12/dts-v1/; 12/dts-v1/;
13 13
14#include <dt-bindings/net/ti-dp83867.h>
15#include <dt-bindings/reset/snps,hsdk-reset.h> 14#include <dt-bindings/reset/snps,hsdk-reset.h>
16 15
17/ { 16/ {
@@ -167,6 +166,24 @@
167 #clock-cells = <0>; 166 #clock-cells = <0>;
168 }; 167 };
169 168
169 gpu_core_clk: gpu-core-clk {
170 compatible = "fixed-clock";
171 clock-frequency = <400000000>;
172 #clock-cells = <0>;
173 };
174
175 gpu_dma_clk: gpu-dma-clk {
176 compatible = "fixed-clock";
177 clock-frequency = <400000000>;
178 #clock-cells = <0>;
179 };
180
181 gpu_cfg_clk: gpu-cfg-clk {
182 compatible = "fixed-clock";
183 clock-frequency = <200000000>;
184 #clock-cells = <0>;
185 };
186
170 dmac_core_clk: dmac-core-clk { 187 dmac_core_clk: dmac-core-clk {
171 compatible = "fixed-clock"; 188 compatible = "fixed-clock";
172 clock-frequency = <400000000>; 189 clock-frequency = <400000000>;
@@ -187,6 +204,7 @@
187 interrupt-names = "macirq"; 204 interrupt-names = "macirq";
188 phy-mode = "rgmii"; 205 phy-mode = "rgmii";
189 snps,pbl = <32>; 206 snps,pbl = <32>;
207 snps,multicast-filter-bins = <256>;
190 clocks = <&gmacclk>; 208 clocks = <&gmacclk>;
191 clock-names = "stmmaceth"; 209 clock-names = "stmmaceth";
192 phy-handle = <&phy0>; 210 phy-handle = <&phy0>;
@@ -195,15 +213,15 @@
195 mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ 213 mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
196 dma-coherent; 214 dma-coherent;
197 215
216 tx-fifo-depth = <4096>;
217 rx-fifo-depth = <4096>;
218
198 mdio { 219 mdio {
199 #address-cells = <1>; 220 #address-cells = <1>;
200 #size-cells = <0>; 221 #size-cells = <0>;
201 compatible = "snps,dwmac-mdio"; 222 compatible = "snps,dwmac-mdio";
202 phy0: ethernet-phy@0 { 223 phy0: ethernet-phy@0 {
203 reg = <0>; 224 reg = <0>;
204 ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
205 ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
206 ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
207 }; 225 };
208 }; 226 };
209 }; 227 };
@@ -237,6 +255,14 @@
237 dma-coherent; 255 dma-coherent;
238 }; 256 };
239 257
258 creg_gpio: gpio@14b0 {
259 compatible = "snps,creg-gpio-hsdk";
260 reg = <0x14b0 0x4>;
261 gpio-controller;
262 #gpio-cells = <2>;
263 ngpios = <2>;
264 };
265
240 gpio: gpio@3000 { 266 gpio: gpio@3000 {
241 compatible = "snps,dw-apb-gpio"; 267 compatible = "snps,dw-apb-gpio";
242 reg = <0x3000 0x20>; 268 reg = <0x3000 0x20>;
@@ -252,6 +278,17 @@
252 }; 278 };
253 }; 279 };
254 280
281 gpu_3d: gpu@90000 {
282 compatible = "vivante,gc";
283 reg = <0x90000 0x4000>;
284 clocks = <&gpu_dma_clk>,
285 <&gpu_cfg_clk>,
286 <&gpu_core_clk>,
287 <&gpu_core_clk>;
288 clock-names = "bus", "reg", "core", "shader";
289 interrupts = <28>;
290 };
291
255 dmac: dmac@80000 { 292 dmac: dmac@80000 {
256 compatible = "snps,axi-dma-1.01a"; 293 compatible = "snps,axi-dma-1.01a";
257 reg = <0x80000 0x400>; 294 reg = <0x80000 0x400>;
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 0e5fd29ed238..c8fb5d60c53f 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -49,10 +49,12 @@ CONFIG_SERIAL_OF_PLATFORM=y
49CONFIG_GPIOLIB=y 49CONFIG_GPIOLIB=y
50CONFIG_GPIO_SYSFS=y 50CONFIG_GPIO_SYSFS=y
51CONFIG_GPIO_DWAPB=y 51CONFIG_GPIO_DWAPB=y
52CONFIG_GPIO_SNPS_CREG=y
52# CONFIG_HWMON is not set 53# CONFIG_HWMON is not set
53CONFIG_DRM=y 54CONFIG_DRM=y
54# CONFIG_DRM_FBDEV_EMULATION is not set 55# CONFIG_DRM_FBDEV_EMULATION is not set
55CONFIG_DRM_UDL=y 56CONFIG_DRM_UDL=y
57CONFIG_DRM_ETNAVIV=y
56CONFIG_FB=y 58CONFIG_FB=y
57CONFIG_FRAMEBUFFER_CONSOLE=y 59CONFIG_FRAMEBUFFER_CONSOLE=y
58CONFIG_USB_EHCI_HCD=y 60CONFIG_USB_EHCI_HCD=y
@@ -64,7 +66,6 @@ CONFIG_MMC=y
64CONFIG_MMC_SDHCI=y 66CONFIG_MMC_SDHCI=y
65CONFIG_MMC_SDHCI_PLTFM=y 67CONFIG_MMC_SDHCI_PLTFM=y
66CONFIG_MMC_DW=y 68CONFIG_MMC_DW=y
67# CONFIG_IOMMU_SUPPORT is not set
68CONFIG_EXT3_FS=y 69CONFIG_EXT3_FS=y
69CONFIG_VFAT_FS=y 70CONFIG_VFAT_FS=y
70CONFIG_TMPFS=y 71CONFIG_TMPFS=y
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index d819de1c5d10..3ea4112c8302 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
92 92
93#endif /* CONFIG_ARC_HAS_LLSC */ 93#endif /* CONFIG_ARC_HAS_LLSC */
94 94
95#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \ 95#define cmpxchg(ptr, o, n) ({ \
96 (unsigned long)(o), (unsigned long)(n))) 96 (typeof(*(ptr)))__cmpxchg((ptr), \
97 (unsigned long)(o), \
98 (unsigned long)(n)); \
99})
97 100
98/* 101/*
99 * atomic_cmpxchg is same as cmpxchg 102 * atomic_cmpxchg is same as cmpxchg
@@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
198 return __xchg_bad_pointer(); 201 return __xchg_bad_pointer();
199} 202}
200 203
201#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \ 204#define xchg(ptr, with) ({ \
202 sizeof(*(ptr)))) 205 (typeof(*(ptr)))__xchg((unsigned long)(with), \
206 (ptr), \
207 sizeof(*(ptr))); \
208})
203 209
204#endif /* CONFIG_ARC_PLAT_EZNPS */ 210#endif /* CONFIG_ARC_PLAT_EZNPS */
205 211
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 8df1638259f3..6836095251ed 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -66,7 +66,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
66 struct vm_area_struct *vma = NULL; 66 struct vm_area_struct *vma = NULL;
67 struct task_struct *tsk = current; 67 struct task_struct *tsk = current;
68 struct mm_struct *mm = tsk->mm; 68 struct mm_struct *mm = tsk->mm;
69 int si_code = 0; 69 int si_code = SEGV_MAPERR;
70 int ret; 70 int ret;
71 vm_fault_t fault; 71 vm_fault_t fault;
72 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ 72 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
@@ -81,16 +81,14 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
81 * only copy the information from the master page table, 81 * only copy the information from the master page table,
82 * nothing more. 82 * nothing more.
83 */ 83 */
84 if (address >= VMALLOC_START) { 84 if (address >= VMALLOC_START && !user_mode(regs)) {
85 ret = handle_kernel_vaddr_fault(address); 85 ret = handle_kernel_vaddr_fault(address);
86 if (unlikely(ret)) 86 if (unlikely(ret))
87 goto bad_area_nosemaphore; 87 goto no_context;
88 else 88 else
89 return; 89 return;
90 } 90 }
91 91
92 si_code = SEGV_MAPERR;
93
94 /* 92 /*
95 * If we're in an interrupt or have no user 93 * If we're in an interrupt or have no user
96 * context, we must not take the fault.. 94 * context, we must not take the fault..
@@ -198,7 +196,6 @@ good_area:
198bad_area: 196bad_area:
199 up_read(&mm->mmap_sem); 197 up_read(&mm->mmap_sem);
200 198
201bad_area_nosemaphore:
202 /* User mode accesses just cause a SIGSEGV */ 199 /* User mode accesses just cause a SIGSEGV */
203 if (user_mode(regs)) { 200 if (user_mode(regs)) {
204 tsk->thread.fault_address = address; 201 tsk->thread.fault_address = address;
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 4097764fea23..fa18c00b0cfd 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -911,9 +911,11 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
911 struct pt_regs *regs) 911 struct pt_regs *regs)
912{ 912{
913 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 913 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
914 unsigned int pd0[mmu->ways];
915 unsigned long flags; 914 unsigned long flags;
916 int set; 915 int set, n_ways = mmu->ways;
916
917 n_ways = min(n_ways, 4);
918 BUG_ON(mmu->ways > 4);
917 919
918 local_irq_save(flags); 920 local_irq_save(flags);
919 921
@@ -921,9 +923,10 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
921 for (set = 0; set < mmu->sets; set++) { 923 for (set = 0; set < mmu->sets; set++) {
922 924
923 int is_valid, way; 925 int is_valid, way;
926 unsigned int pd0[4];
924 927
925 /* read out all the ways of current set */ 928 /* read out all the ways of current set */
926 for (way = 0, is_valid = 0; way < mmu->ways; way++) { 929 for (way = 0, is_valid = 0; way < n_ways; way++) {
927 write_aux_reg(ARC_REG_TLBINDEX, 930 write_aux_reg(ARC_REG_TLBINDEX,
928 SET_WAY_TO_IDX(mmu, set, way)); 931 SET_WAY_TO_IDX(mmu, set, way));
929 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); 932 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
@@ -937,14 +940,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
937 continue; 940 continue;
938 941
939 /* Scan the set for duplicate ways: needs a nested loop */ 942 /* Scan the set for duplicate ways: needs a nested loop */
940 for (way = 0; way < mmu->ways - 1; way++) { 943 for (way = 0; way < n_ways - 1; way++) {
941 944
942 int n; 945 int n;
943 946
944 if (!pd0[way]) 947 if (!pd0[way])
945 continue; 948 continue;
946 949
947 for (n = way + 1; n < mmu->ways; n++) { 950 for (n = way + 1; n < n_ways; n++) {
948 if (pd0[way] != pd0[n]) 951 if (pd0[way] != pd0[n])
949 continue; 952 continue;
950 953
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index b025304bde46..8fbd583b18e1 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -51,6 +51,7 @@ endif
51 51
52KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) 52KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
53KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 53KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
54KBUILD_CFLAGS += -Wno-psabi
54KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) 55KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
55 56
56KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) 57KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index b7bca1ae09e6..50b3ab7ded4f 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -193,7 +193,7 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
193 : "=r" (tmp) : "r" (_val)); \ 193 : "=r" (tmp) : "r" (_val)); \
194} while (0) 194} while (0)
195 195
196static inline u64 __arch_counter_get_cntpct_stable(void) 196static __always_inline u64 __arch_counter_get_cntpct_stable(void)
197{ 197{
198 u64 cnt; 198 u64 cnt;
199 199
@@ -203,7 +203,7 @@ static inline u64 __arch_counter_get_cntpct_stable(void)
203 return cnt; 203 return cnt;
204} 204}
205 205
206static inline u64 __arch_counter_get_cntpct(void) 206static __always_inline u64 __arch_counter_get_cntpct(void)
207{ 207{
208 u64 cnt; 208 u64 cnt;
209 209
@@ -213,7 +213,7 @@ static inline u64 __arch_counter_get_cntpct(void)
213 return cnt; 213 return cnt;
214} 214}
215 215
216static inline u64 __arch_counter_get_cntvct_stable(void) 216static __always_inline u64 __arch_counter_get_cntvct_stable(void)
217{ 217{
218 u64 cnt; 218 u64 cnt;
219 219
@@ -223,7 +223,7 @@ static inline u64 __arch_counter_get_cntvct_stable(void)
223 return cnt; 223 return cnt;
224} 224}
225 225
226static inline u64 __arch_counter_get_cntvct(void) 226static __always_inline u64 __arch_counter_get_cntvct(void)
227{ 227{
228 u64 cnt; 228 u64 cnt;
229 229
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 18553f399e08..eae2d6c01262 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -53,6 +53,12 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
53 */ 53 */
54#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number)) 54#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
55 55
56/*
57 * Logical CPU mapping.
58 */
59extern u64 __cpu_logical_map[NR_CPUS];
60#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
61
56struct seq_file; 62struct seq_file;
57 63
58/* 64/*
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index af58dcdefb21..7a495403a18a 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -37,11 +37,6 @@ static inline u32 mpidr_hash_size(void)
37} 37}
38 38
39/* 39/*
40 * Logical CPU mapping.
41 */
42extern u64 __cpu_logical_map[NR_CPUS];
43#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
44/*
45 * Retrieve logical cpu index corresponding to a given MPIDR.Aff* 40 * Retrieve logical cpu index corresponding to a given MPIDR.Aff*
46 * - mpidr: MPIDR.Aff* bits to be used for the look-up 41 * - mpidr: MPIDR.Aff* bits to be used for the look-up
47 * 42 *
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index eb3ef73e07cf..f1d032be628a 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -75,7 +75,7 @@ void arch_release_task_struct(struct task_struct *tsk);
75 * TIF_SYSCALL_TRACE - syscall trace active 75 * TIF_SYSCALL_TRACE - syscall trace active
76 * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace 76 * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
77 * TIF_SYSCALL_AUDIT - syscall auditing 77 * TIF_SYSCALL_AUDIT - syscall auditing
78 * TIF_SECOMP - syscall secure computing 78 * TIF_SECCOMP - syscall secure computing
79 * TIF_SIGPENDING - signal pending 79 * TIF_SIGPENDING - signal pending
80 * TIF_NEED_RESCHED - rescheduling necessary 80 * TIF_NEED_RESCHED - rescheduling necessary
81 * TIF_NOTIFY_RESUME - callback before returning to user 81 * TIF_NOTIFY_RESUME - callback before returning to user
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index ca27e08e3d8a..80babf451519 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -830,6 +830,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
830 830
831 read_sysreg_case(SYS_ID_AA64PFR0_EL1); 831 read_sysreg_case(SYS_ID_AA64PFR0_EL1);
832 read_sysreg_case(SYS_ID_AA64PFR1_EL1); 832 read_sysreg_case(SYS_ID_AA64PFR1_EL1);
833 read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
833 read_sysreg_case(SYS_ID_AA64DFR0_EL1); 834 read_sysreg_case(SYS_ID_AA64DFR0_EL1);
834 read_sysreg_case(SYS_ID_AA64DFR1_EL1); 835 read_sysreg_case(SYS_ID_AA64DFR1_EL1);
835 read_sysreg_case(SYS_ID_AA64MMFR0_EL1); 836 read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h
index e75212c76b20..b02a58e71f80 100644
--- a/arch/nds32/include/asm/bitfield.h
+++ b/arch/nds32/include/asm/bitfield.h
@@ -937,7 +937,7 @@
937#define FPCSR_mskDNIT ( 0x1 << FPCSR_offDNIT ) 937#define FPCSR_mskDNIT ( 0x1 << FPCSR_offDNIT )
938#define FPCSR_mskRIT ( 0x1 << FPCSR_offRIT ) 938#define FPCSR_mskRIT ( 0x1 << FPCSR_offRIT )
939#define FPCSR_mskALL (FPCSR_mskIVO | FPCSR_mskDBZ | FPCSR_mskOVF | FPCSR_mskUDF | FPCSR_mskIEX) 939#define FPCSR_mskALL (FPCSR_mskIVO | FPCSR_mskDBZ | FPCSR_mskOVF | FPCSR_mskUDF | FPCSR_mskIEX)
940#define FPCSR_mskALLE_NO_UDFE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskIEXE) 940#define FPCSR_mskALLE_NO_UDF_IEXE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE)
941#define FPCSR_mskALLE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskUDFE | FPCSR_mskIEXE) 941#define FPCSR_mskALLE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskUDFE | FPCSR_mskIEXE)
942#define FPCSR_mskALLT (FPCSR_mskIVOT | FPCSR_mskDBZT | FPCSR_mskOVFT | FPCSR_mskUDFT | FPCSR_mskIEXT |FPCSR_mskDNIT | FPCSR_mskRIT) 942#define FPCSR_mskALLT (FPCSR_mskIVOT | FPCSR_mskDBZT | FPCSR_mskOVFT | FPCSR_mskUDFT | FPCSR_mskIEXT |FPCSR_mskDNIT | FPCSR_mskRIT)
943 943
diff --git a/arch/nds32/include/asm/fpu.h b/arch/nds32/include/asm/fpu.h
index 019f1bcfc5ee..8294ed4aaa2c 100644
--- a/arch/nds32/include/asm/fpu.h
+++ b/arch/nds32/include/asm/fpu.h
@@ -36,7 +36,7 @@ extern int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu);
36 * enabled by default and kerenl will re-execute it by fpu emulator 36 * enabled by default and kerenl will re-execute it by fpu emulator
37 * when getting underflow exception. 37 * when getting underflow exception.
38 */ 38 */
39#define FPCSR_INIT FPCSR_mskUDFE 39#define FPCSR_INIT (FPCSR_mskUDFE | FPCSR_mskIEXE)
40#else 40#else
41#define FPCSR_INIT 0x0UL 41#define FPCSR_INIT 0x0UL
42#endif 42#endif
diff --git a/arch/nds32/include/asm/fpuemu.h b/arch/nds32/include/asm/fpuemu.h
index c4bd0c7faa75..63e7ef5f7969 100644
--- a/arch/nds32/include/asm/fpuemu.h
+++ b/arch/nds32/include/asm/fpuemu.h
@@ -13,6 +13,12 @@ void fsubs(void *ft, void *fa, void *fb);
13void fmuls(void *ft, void *fa, void *fb); 13void fmuls(void *ft, void *fa, void *fb);
14void fdivs(void *ft, void *fa, void *fb); 14void fdivs(void *ft, void *fa, void *fb);
15void fs2d(void *ft, void *fa); 15void fs2d(void *ft, void *fa);
16void fs2si(void *ft, void *fa);
17void fs2si_z(void *ft, void *fa);
18void fs2ui(void *ft, void *fa);
19void fs2ui_z(void *ft, void *fa);
20void fsi2s(void *ft, void *fa);
21void fui2s(void *ft, void *fa);
16void fsqrts(void *ft, void *fa); 22void fsqrts(void *ft, void *fa);
17void fnegs(void *ft, void *fa); 23void fnegs(void *ft, void *fa);
18int fcmps(void *ft, void *fa, void *fb, int cop); 24int fcmps(void *ft, void *fa, void *fb, int cop);
@@ -26,6 +32,12 @@ void fmuld(void *ft, void *fa, void *fb);
26void fdivd(void *ft, void *fa, void *fb); 32void fdivd(void *ft, void *fa, void *fb);
27void fsqrtd(void *ft, void *fa); 33void fsqrtd(void *ft, void *fa);
28void fd2s(void *ft, void *fa); 34void fd2s(void *ft, void *fa);
35void fd2si(void *ft, void *fa);
36void fd2si_z(void *ft, void *fa);
37void fd2ui(void *ft, void *fa);
38void fd2ui_z(void *ft, void *fa);
39void fsi2d(void *ft, void *fa);
40void fui2d(void *ft, void *fa);
29void fnegd(void *ft, void *fa); 41void fnegd(void *ft, void *fa);
30int fcmpd(void *ft, void *fa, void *fb, int cop); 42int fcmpd(void *ft, void *fa, void *fb, int cop);
31 43
diff --git a/arch/nds32/include/asm/syscalls.h b/arch/nds32/include/asm/syscalls.h
index f3b16f602cb5..4e7216082a67 100644
--- a/arch/nds32/include/asm/syscalls.h
+++ b/arch/nds32/include/asm/syscalls.h
@@ -7,7 +7,7 @@
7asmlinkage long sys_cacheflush(unsigned long addr, unsigned long len, unsigned int op); 7asmlinkage long sys_cacheflush(unsigned long addr, unsigned long len, unsigned int op);
8asmlinkage long sys_fadvise64_64_wrapper(int fd, int advice, loff_t offset, loff_t len); 8asmlinkage long sys_fadvise64_64_wrapper(int fd, int advice, loff_t offset, loff_t len);
9asmlinkage long sys_rt_sigreturn_wrapper(void); 9asmlinkage long sys_rt_sigreturn_wrapper(void);
10asmlinkage long sys_udftrap(int option); 10asmlinkage long sys_fp_udfiex_crtl(int cmd, int act);
11 11
12#include <asm-generic/syscalls.h> 12#include <asm-generic/syscalls.h>
13 13
diff --git a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
new file mode 100644
index 000000000000..d54a5d6c6538
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
@@ -0,0 +1,16 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2005-2019 Andes Technology Corporation */
3#ifndef _FP_UDF_IEX_CRTL_H
4#define _FP_UDF_IEX_CRTL_H
5
6/*
7 * The cmd list of sys_fp_udfiex_crtl()
8 */
9/* Disable UDF or IEX trap based on the content of parameter act */
10#define DISABLE_UDF_IEX_TRAP 0
11/* Enable UDF or IEX trap based on the content of parameter act */
12#define ENABLE_UDF_IEX_TRAP 1
13/* Get current status of UDF and IEX trap */
14#define GET_UDF_IEX_TRAP 2
15
16#endif /* _FP_UDF_IEX_CRTL_H */
diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h
index 628ff6b75825..dc89af7ddcc3 100644
--- a/arch/nds32/include/uapi/asm/sigcontext.h
+++ b/arch/nds32/include/uapi/asm/sigcontext.h
@@ -13,14 +13,24 @@ struct fpu_struct {
13 unsigned long long fd_regs[32]; 13 unsigned long long fd_regs[32];
14 unsigned long fpcsr; 14 unsigned long fpcsr;
15 /* 15 /*
16 * UDF_trap is used to recognize whether underflow trap is enabled 16 * When CONFIG_SUPPORT_DENORMAL_ARITHMETIC is defined, kernel prevents
17 * or not. When UDF_trap == 1, this process will be traped and then 17 * hardware from treating the denormalized output as an underflow case
18 * get a SIGFPE signal when encountering an underflow exception. 18 * and rounding it to a normal number. Hence kernel enables the UDF and
19 * UDF_trap is only modified through setfputrap syscall. Therefore, 19 * IEX trap in the fpcsr register to step in the calculation.
20 * UDF_trap needn't be saved or loaded to context in each context 20 * However, the UDF and IEX trap enable bit in $fpcsr also lose
21 * switch. 21 * their use.
22 *
23 * UDF_IEX_trap replaces the feature of UDF and IEX trap enable bit in
24 * $fpcsr to control the trap of underflow and inexact. The bit filed
25 * of UDF_IEX_trap is the same as $fpcsr, 10th bit is used to enable UDF
26 * exception trapping and 11th bit is used to enable IEX exception
27 * trapping.
28 *
29 * UDF_IEX_trap is only modified through fp_udfiex_crtl syscall.
30 * Therefore, UDF_IEX_trap needn't be saved and restored in each
31 * context switch.
22 */ 32 */
23 unsigned long UDF_trap; 33 unsigned long UDF_IEX_trap;
24}; 34};
25 35
26struct zol_struct { 36struct zol_struct {
diff --git a/arch/nds32/include/uapi/asm/udftrap.h b/arch/nds32/include/uapi/asm/udftrap.h
deleted file mode 100644
index 433f79d679c0..000000000000
--- a/arch/nds32/include/uapi/asm/udftrap.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2005-2018 Andes Technology Corporation */
3#ifndef _ASM_SETFPUTRAP
4#define _ASM_SETFPUTRAP
5
6/*
7 * Options for setfputrap system call
8 */
9#define DISABLE_UDFTRAP 0 /* disable underflow exception trap */
10#define ENABLE_UDFTRAP 1 /* enable undeflos exception trap */
11#define GET_UDFTRAP 2 /* only get undeflos exception trap status */
12
13#endif /* _ASM_CACHECTL */
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index c691735017ed..a0b2f7b9c0f2 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -11,6 +11,6 @@
11 11
12/* Additional NDS32 specific syscalls. */ 12/* Additional NDS32 specific syscalls. */
13#define __NR_cacheflush (__NR_arch_specific_syscall) 13#define __NR_cacheflush (__NR_arch_specific_syscall)
14#define __NR_udftrap (__NR_arch_specific_syscall + 1) 14#define __NR_fp_udfiex_crtl (__NR_arch_specific_syscall + 1)
15__SYSCALL(__NR_cacheflush, sys_cacheflush) 15__SYSCALL(__NR_cacheflush, sys_cacheflush)
16__SYSCALL(__NR_udftrap, sys_udftrap) 16__SYSCALL(__NR_fp_udfiex_crtl, sys_fp_udfiex_crtl)
diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
index fddd40c7a16f..cf0b8760f261 100644
--- a/arch/nds32/kernel/fpu.c
+++ b/arch/nds32/kernel/fpu.c
@@ -14,7 +14,7 @@ const struct fpu_struct init_fpuregs = {
14 .fd_regs = {[0 ... 31] = sNAN64}, 14 .fd_regs = {[0 ... 31] = sNAN64},
15 .fpcsr = FPCSR_INIT, 15 .fpcsr = FPCSR_INIT,
16#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) 16#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
17 .UDF_trap = 0 17 .UDF_IEX_trap = 0
18#endif 18#endif
19}; 19};
20 20
@@ -178,7 +178,7 @@ inline void do_fpu_context_switch(struct pt_regs *regs)
178 /* First time FPU user. */ 178 /* First time FPU user. */
179 load_fpu(&init_fpuregs); 179 load_fpu(&init_fpuregs);
180#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) 180#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
181 current->thread.fpu.UDF_trap = init_fpuregs.UDF_trap; 181 current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap;
182#endif 182#endif
183 set_used_math(); 183 set_used_math();
184 } 184 }
@@ -206,7 +206,7 @@ inline void handle_fpu_exception(struct pt_regs *regs)
206 unsigned int fpcsr; 206 unsigned int fpcsr;
207 int si_code = 0, si_signo = SIGFPE; 207 int si_code = 0, si_signo = SIGFPE;
208#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) 208#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
209 unsigned long redo_except = FPCSR_mskDNIT|FPCSR_mskUDFT; 209 unsigned long redo_except = FPCSR_mskDNIT|FPCSR_mskUDFT|FPCSR_mskIEXT;
210#else 210#else
211 unsigned long redo_except = FPCSR_mskDNIT; 211 unsigned long redo_except = FPCSR_mskDNIT;
212#endif 212#endif
@@ -215,21 +215,18 @@ inline void handle_fpu_exception(struct pt_regs *regs)
215 fpcsr = current->thread.fpu.fpcsr; 215 fpcsr = current->thread.fpu.fpcsr;
216 216
217 if (fpcsr & redo_except) { 217 if (fpcsr & redo_except) {
218#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
219 if (fpcsr & FPCSR_mskUDFT)
220 current->thread.fpu.fpcsr &= ~FPCSR_mskIEX;
221#endif
222 si_signo = do_fpuemu(regs, &current->thread.fpu); 218 si_signo = do_fpuemu(regs, &current->thread.fpu);
223 fpcsr = current->thread.fpu.fpcsr; 219 fpcsr = current->thread.fpu.fpcsr;
224 if (!si_signo) 220 if (!si_signo) {
221 current->thread.fpu.fpcsr &= ~(redo_except);
225 goto done; 222 goto done;
223 }
226 } else if (fpcsr & FPCSR_mskRIT) { 224 } else if (fpcsr & FPCSR_mskRIT) {
227 if (!user_mode(regs)) 225 if (!user_mode(regs))
228 do_exit(SIGILL); 226 do_exit(SIGILL);
229 si_signo = SIGILL; 227 si_signo = SIGILL;
230 } 228 }
231 229
232
233 switch (si_signo) { 230 switch (si_signo) {
234 case SIGFPE: 231 case SIGFPE:
235 fill_sigfpe_signo(fpcsr, &si_code); 232 fill_sigfpe_signo(fpcsr, &si_code);
diff --git a/arch/nds32/kernel/sys_nds32.c b/arch/nds32/kernel/sys_nds32.c
index 0835277636ce..cb2d1e219bb3 100644
--- a/arch/nds32/kernel/sys_nds32.c
+++ b/arch/nds32/kernel/sys_nds32.c
@@ -6,8 +6,8 @@
6 6
7#include <asm/cachectl.h> 7#include <asm/cachectl.h>
8#include <asm/proc-fns.h> 8#include <asm/proc-fns.h>
9#include <asm/udftrap.h>
10#include <asm/fpu.h> 9#include <asm/fpu.h>
10#include <asm/fp_udfiex_crtl.h>
11 11
12SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, 12SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
13 unsigned long, prot, unsigned long, flags, 13 unsigned long, prot, unsigned long, flags,
@@ -51,31 +51,33 @@ SYSCALL_DEFINE3(cacheflush, unsigned int, start, unsigned int, end, int, cache)
51 return 0; 51 return 0;
52} 52}
53 53
54SYSCALL_DEFINE1(udftrap, int, option) 54SYSCALL_DEFINE2(fp_udfiex_crtl, unsigned int, cmd, unsigned int, act)
55{ 55{
56#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) 56#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
57 int old_udftrap; 57 int old_udf_iex;
58 58
59 if (!used_math()) { 59 if (!used_math()) {
60 load_fpu(&init_fpuregs); 60 load_fpu(&init_fpuregs);
61 current->thread.fpu.UDF_trap = init_fpuregs.UDF_trap; 61 current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap;
62 set_used_math(); 62 set_used_math();
63 } 63 }
64 64
65 old_udftrap = current->thread.fpu.UDF_trap; 65 old_udf_iex = current->thread.fpu.UDF_IEX_trap;
66 switch (option) { 66 act &= (FPCSR_mskUDFE | FPCSR_mskIEXE);
67 case DISABLE_UDFTRAP: 67
68 current->thread.fpu.UDF_trap = 0; 68 switch (cmd) {
69 case DISABLE_UDF_IEX_TRAP:
70 current->thread.fpu.UDF_IEX_trap &= ~act;
69 break; 71 break;
70 case ENABLE_UDFTRAP: 72 case ENABLE_UDF_IEX_TRAP:
71 current->thread.fpu.UDF_trap = FPCSR_mskUDFE; 73 current->thread.fpu.UDF_IEX_trap |= act;
72 break; 74 break;
73 case GET_UDFTRAP: 75 case GET_UDF_IEX_TRAP:
74 break; 76 break;
75 default: 77 default:
76 return -EINVAL; 78 return -EINVAL;
77 } 79 }
78 return old_udftrap; 80 return old_udf_iex;
79#else 81#else
80 return -ENOTSUPP; 82 return -ENOTSUPP;
81#endif 83#endif
diff --git a/arch/nds32/math-emu/Makefile b/arch/nds32/math-emu/Makefile
index 14fa01f4574a..3bed7e5d5d05 100644
--- a/arch/nds32/math-emu/Makefile
+++ b/arch/nds32/math-emu/Makefile
@@ -5,4 +5,6 @@
5 5
6obj-y := fpuemu.o \ 6obj-y := fpuemu.o \
7 fdivd.o fmuld.o fsubd.o faddd.o fs2d.o fsqrtd.o fcmpd.o fnegs.o \ 7 fdivd.o fmuld.o fsubd.o faddd.o fs2d.o fsqrtd.o fcmpd.o fnegs.o \
8 fdivs.o fmuls.o fsubs.o fadds.o fd2s.o fsqrts.o fcmps.o fnegd.o 8 fd2si.o fd2ui.o fd2siz.o fd2uiz.o fsi2d.o fui2d.o \
9 fdivs.o fmuls.o fsubs.o fadds.o fd2s.o fsqrts.o fcmps.o fnegd.o \
10 fs2si.o fs2ui.o fs2siz.o fs2uiz.o fsi2s.o fui2s.o
diff --git a/arch/nds32/math-emu/fd2si.c b/arch/nds32/math-emu/fd2si.c
new file mode 100644
index 000000000000..fae3e16a0a10
--- /dev/null
+++ b/arch/nds32/math-emu/fd2si.c
@@ -0,0 +1,30 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/double.h>
8
9void fd2si(void *ft, void *fa)
10{
11 int r;
12
13 FP_DECL_D(A);
14 FP_DECL_EX;
15
16 FP_UNPACK_DP(A, fa);
17
18 if (A_c == FP_CLS_INF) {
19 *(int *)ft = (A_s == 0) ? 0x7fffffff : 0x80000000;
20 __FPU_FPCSR |= FP_EX_INVALID;
21 } else if (A_c == FP_CLS_NAN) {
22 *(int *)ft = 0xffffffff;
23 __FPU_FPCSR |= FP_EX_INVALID;
24 } else {
25 FP_TO_INT_ROUND_D(r, A, 32, 1);
26 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
27 *(int *)ft = r;
28 }
29
30}
diff --git a/arch/nds32/math-emu/fd2siz.c b/arch/nds32/math-emu/fd2siz.c
new file mode 100644
index 000000000000..92fe6774f112
--- /dev/null
+++ b/arch/nds32/math-emu/fd2siz.c
@@ -0,0 +1,30 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/double.h>
8
9void fd2si_z(void *ft, void *fa)
10{
11 int r;
12
13 FP_DECL_D(A);
14 FP_DECL_EX;
15
16 FP_UNPACK_DP(A, fa);
17
18 if (A_c == FP_CLS_INF) {
19 *(int *)ft = (A_s == 0) ? 0x7fffffff : 0x80000000;
20 __FPU_FPCSR |= FP_EX_INVALID;
21 } else if (A_c == FP_CLS_NAN) {
22 *(int *)ft = 0xffffffff;
23 __FPU_FPCSR |= FP_EX_INVALID;
24 } else {
25 FP_TO_INT_D(r, A, 32, 1);
26 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
27 *(int *)ft = r;
28 }
29
30}
diff --git a/arch/nds32/math-emu/fd2ui.c b/arch/nds32/math-emu/fd2ui.c
new file mode 100644
index 000000000000..a0423b699aa4
--- /dev/null
+++ b/arch/nds32/math-emu/fd2ui.c
@@ -0,0 +1,30 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/double.h>
8
9void fd2ui(void *ft, void *fa)
10{
11 unsigned int r;
12
13 FP_DECL_D(A);
14 FP_DECL_EX;
15
16 FP_UNPACK_DP(A, fa);
17
18 if (A_c == FP_CLS_INF) {
19 *(unsigned int *)ft = (A_s == 0) ? 0xffffffff : 0x00000000;
20 __FPU_FPCSR |= FP_EX_INVALID;
21 } else if (A_c == FP_CLS_NAN) {
22 *(unsigned int *)ft = 0xffffffff;
23 __FPU_FPCSR |= FP_EX_INVALID;
24 } else {
25 FP_TO_INT_ROUND_D(r, A, 32, 0);
26 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
27 *(unsigned int *)ft = r;
28 }
29
30}
diff --git a/arch/nds32/math-emu/fd2uiz.c b/arch/nds32/math-emu/fd2uiz.c
new file mode 100644
index 000000000000..8ae17cfce90d
--- /dev/null
+++ b/arch/nds32/math-emu/fd2uiz.c
@@ -0,0 +1,30 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/double.h>
8
9void fd2ui_z(void *ft, void *fa)
10{
11 unsigned int r;
12
13 FP_DECL_D(A);
14 FP_DECL_EX;
15
16 FP_UNPACK_DP(A, fa);
17
18 if (A_c == FP_CLS_INF) {
19 *(unsigned int *)ft = (A_s == 0) ? 0xffffffff : 0x00000000;
20 __FPU_FPCSR |= FP_EX_INVALID;
21 } else if (A_c == FP_CLS_NAN) {
22 *(unsigned int *)ft = 0xffffffff;
23 __FPU_FPCSR |= FP_EX_INVALID;
24 } else {
25 FP_TO_INT_D(r, A, 32, 0);
26 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
27 *(unsigned int *)ft = r;
28 }
29
30}
diff --git a/arch/nds32/math-emu/fpuemu.c b/arch/nds32/math-emu/fpuemu.c
index 75cf1643fa78..46558a15c0dc 100644
--- a/arch/nds32/math-emu/fpuemu.c
+++ b/arch/nds32/math-emu/fpuemu.c
@@ -113,6 +113,30 @@ static int fpu_emu(struct fpu_struct *fpu_reg, unsigned long insn)
113 func.b = fs2d; 113 func.b = fs2d;
114 ftype = S1D; 114 ftype = S1D;
115 break; 115 break;
116 case fs2si_op:
117 func.b = fs2si;
118 ftype = S1S;
119 break;
120 case fs2si_z_op:
121 func.b = fs2si_z;
122 ftype = S1S;
123 break;
124 case fs2ui_op:
125 func.b = fs2ui;
126 ftype = S1S;
127 break;
128 case fs2ui_z_op:
129 func.b = fs2ui_z;
130 ftype = S1S;
131 break;
132 case fsi2s_op:
133 func.b = fsi2s;
134 ftype = S1S;
135 break;
136 case fui2s_op:
137 func.b = fui2s;
138 ftype = S1S;
139 break;
116 case fsqrts_op: 140 case fsqrts_op:
117 func.b = fsqrts; 141 func.b = fsqrts;
118 ftype = S1S; 142 ftype = S1S;
@@ -182,6 +206,30 @@ static int fpu_emu(struct fpu_struct *fpu_reg, unsigned long insn)
182 func.b = fd2s; 206 func.b = fd2s;
183 ftype = D1S; 207 ftype = D1S;
184 break; 208 break;
209 case fd2si_op:
210 func.b = fd2si;
211 ftype = D1S;
212 break;
213 case fd2si_z_op:
214 func.b = fd2si_z;
215 ftype = D1S;
216 break;
217 case fd2ui_op:
218 func.b = fd2ui;
219 ftype = D1S;
220 break;
221 case fd2ui_z_op:
222 func.b = fd2ui_z;
223 ftype = D1S;
224 break;
225 case fsi2d_op:
226 func.b = fsi2d;
227 ftype = D1S;
228 break;
229 case fui2d_op:
230 func.b = fui2d;
231 ftype = D1S;
232 break;
185 case fsqrtd_op: 233 case fsqrtd_op:
186 func.b = fsqrtd; 234 func.b = fsqrtd;
187 ftype = D1D; 235 ftype = D1D;
@@ -305,16 +353,16 @@ static int fpu_emu(struct fpu_struct *fpu_reg, unsigned long insn)
305 * If an exception is required, generate a tidy SIGFPE exception. 353 * If an exception is required, generate a tidy SIGFPE exception.
306 */ 354 */
307#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC) 355#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
308 if (((fpu_reg->fpcsr << 5) & fpu_reg->fpcsr & FPCSR_mskALLE_NO_UDFE) || 356 if (((fpu_reg->fpcsr << 5) & fpu_reg->fpcsr & FPCSR_mskALLE_NO_UDF_IEXE)
309 ((fpu_reg->fpcsr & FPCSR_mskUDF) && (fpu_reg->UDF_trap))) 357 || ((fpu_reg->fpcsr << 5) & (fpu_reg->UDF_IEX_trap))) {
310#else 358#else
311 if ((fpu_reg->fpcsr << 5) & fpu_reg->fpcsr & FPCSR_mskALLE) 359 if ((fpu_reg->fpcsr << 5) & fpu_reg->fpcsr & FPCSR_mskALLE) {
312#endif 360#endif
313 return SIGFPE; 361 return SIGFPE;
362 }
314 return 0; 363 return 0;
315} 364}
316 365
317
318int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu) 366int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu)
319{ 367{
320 unsigned long insn = 0, addr = regs->ipc; 368 unsigned long insn = 0, addr = regs->ipc;
@@ -336,6 +384,7 @@ int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu)
336 384
337 if (NDS32Insn_OPCODE(insn) != cop0_op) 385 if (NDS32Insn_OPCODE(insn) != cop0_op)
338 return SIGILL; 386 return SIGILL;
387
339 switch (NDS32Insn_OPCODE_COP0(insn)) { 388 switch (NDS32Insn_OPCODE_COP0(insn)) {
340 case fs1_op: 389 case fs1_op:
341 case fs2_op: 390 case fs2_op:
diff --git a/arch/nds32/math-emu/fs2si.c b/arch/nds32/math-emu/fs2si.c
new file mode 100644
index 000000000000..b4931d60980e
--- /dev/null
+++ b/arch/nds32/math-emu/fs2si.c
@@ -0,0 +1,29 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/single.h>
8
9void fs2si(void *ft, void *fa)
10{
11 int r;
12
13 FP_DECL_S(A);
14 FP_DECL_EX;
15
16 FP_UNPACK_SP(A, fa);
17
18 if (A_c == FP_CLS_INF) {
19 *(int *)ft = (A_s == 0) ? 0x7fffffff : 0x80000000;
20 __FPU_FPCSR |= FP_EX_INVALID;
21 } else if (A_c == FP_CLS_NAN) {
22 *(int *)ft = 0xffffffff;
23 __FPU_FPCSR |= FP_EX_INVALID;
24 } else {
25 FP_TO_INT_ROUND_S(r, A, 32, 1);
26 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
27 *(int *)ft = r;
28 }
29}
diff --git a/arch/nds32/math-emu/fs2siz.c b/arch/nds32/math-emu/fs2siz.c
new file mode 100644
index 000000000000..1c2b99ce3e38
--- /dev/null
+++ b/arch/nds32/math-emu/fs2siz.c
@@ -0,0 +1,29 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/single.h>
8
9void fs2si_z(void *ft, void *fa)
10{
11 int r;
12
13 FP_DECL_S(A);
14 FP_DECL_EX;
15
16 FP_UNPACK_SP(A, fa);
17
18 if (A_c == FP_CLS_INF) {
19 *(int *)ft = (A_s == 0) ? 0x7fffffff : 0x80000000;
20 __FPU_FPCSR |= FP_EX_INVALID;
21 } else if (A_c == FP_CLS_NAN) {
22 *(int *)ft = 0xffffffff;
23 __FPU_FPCSR |= FP_EX_INVALID;
24 } else {
25 FP_TO_INT_S(r, A, 32, 1);
26 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
27 *(int *)ft = r;
28 }
29}
diff --git a/arch/nds32/math-emu/fs2ui.c b/arch/nds32/math-emu/fs2ui.c
new file mode 100644
index 000000000000..c337f0384d06
--- /dev/null
+++ b/arch/nds32/math-emu/fs2ui.c
@@ -0,0 +1,29 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/single.h>
8
9void fs2ui(void *ft, void *fa)
10{
11 unsigned int r;
12
13 FP_DECL_S(A);
14 FP_DECL_EX;
15
16 FP_UNPACK_SP(A, fa);
17
18 if (A_c == FP_CLS_INF) {
19 *(unsigned int *)ft = (A_s == 0) ? 0xffffffff : 0x00000000;
20 __FPU_FPCSR |= FP_EX_INVALID;
21 } else if (A_c == FP_CLS_NAN) {
22 *(unsigned int *)ft = 0xffffffff;
23 __FPU_FPCSR |= FP_EX_INVALID;
24 } else {
25 FP_TO_INT_ROUND_S(r, A, 32, 0);
26 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
27 *(unsigned int *)ft = r;
28 }
29}
diff --git a/arch/nds32/math-emu/fs2uiz.c b/arch/nds32/math-emu/fs2uiz.c
new file mode 100644
index 000000000000..22c5e4768044
--- /dev/null
+++ b/arch/nds32/math-emu/fs2uiz.c
@@ -0,0 +1,30 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/single.h>
8
9void fs2ui_z(void *ft, void *fa)
10{
11 unsigned int r;
12
13 FP_DECL_S(A);
14 FP_DECL_EX;
15
16 FP_UNPACK_SP(A, fa);
17
18 if (A_c == FP_CLS_INF) {
19 *(unsigned int *)ft = (A_s == 0) ? 0xffffffff : 0x00000000;
20 __FPU_FPCSR |= FP_EX_INVALID;
21 } else if (A_c == FP_CLS_NAN) {
22 *(unsigned int *)ft = 0xffffffff;
23 __FPU_FPCSR |= FP_EX_INVALID;
24 } else {
25 FP_TO_INT_S(r, A, 32, 0);
26 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
27 *(unsigned int *)ft = r;
28 }
29
30}
diff --git a/arch/nds32/math-emu/fsi2d.c b/arch/nds32/math-emu/fsi2d.c
new file mode 100644
index 000000000000..6b04cec0c5c5
--- /dev/null
+++ b/arch/nds32/math-emu/fsi2d.c
@@ -0,0 +1,22 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/double.h>
8
9void fsi2d(void *ft, void *fa)
10{
11 int a = *(int *)fa;
12
13 FP_DECL_D(R);
14 FP_DECL_EX;
15
16 FP_FROM_INT_D(R, a, 32, int);
17
18 FP_PACK_DP(ft, R);
19
20 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
21
22}
diff --git a/arch/nds32/math-emu/fsi2s.c b/arch/nds32/math-emu/fsi2s.c
new file mode 100644
index 000000000000..689864a5df90
--- /dev/null
+++ b/arch/nds32/math-emu/fsi2s.c
@@ -0,0 +1,22 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/single.h>
8
9void fsi2s(void *ft, void *fa)
10{
11 int a = *(int *)fa;
12
13 FP_DECL_S(R);
14 FP_DECL_EX;
15
16 FP_FROM_INT_S(R, a, 32, int);
17
18 FP_PACK_SP(ft, R);
19
20 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
21
22}
diff --git a/arch/nds32/math-emu/fui2d.c b/arch/nds32/math-emu/fui2d.c
new file mode 100644
index 000000000000..9689d33a8d50
--- /dev/null
+++ b/arch/nds32/math-emu/fui2d.c
@@ -0,0 +1,22 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/double.h>
8
9void fui2d(void *ft, void *fa)
10{
11 unsigned int a = *(unsigned int *)fa;
12
13 FP_DECL_D(R);
14 FP_DECL_EX;
15
16 FP_FROM_INT_D(R, a, 32, int);
17
18 FP_PACK_DP(ft, R);
19
20 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
21
22}
diff --git a/arch/nds32/math-emu/fui2s.c b/arch/nds32/math-emu/fui2s.c
new file mode 100644
index 000000000000..f70f0762547d
--- /dev/null
+++ b/arch/nds32/math-emu/fui2s.c
@@ -0,0 +1,22 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2019 Andes Technology Corporation
3#include <linux/uaccess.h>
4
5#include <asm/sfp-machine.h>
6#include <math-emu/soft-fp.h>
7#include <math-emu/single.h>
8
9void fui2s(void *ft, void *fa)
10{
11 unsigned int a = *(unsigned int *)fa;
12
13 FP_DECL_S(R);
14 FP_DECL_EX;
15
16 FP_FROM_INT_S(R, a, 32, int);
17
18 FP_PACK_SP(ft, R);
19
20 __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
21
22}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 09407ed1aacd..4860efa91d7b 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -36,7 +36,6 @@ config PARISC
36 select GENERIC_STRNCPY_FROM_USER 36 select GENERIC_STRNCPY_FROM_USER
37 select SYSCTL_ARCH_UNALIGN_ALLOW 37 select SYSCTL_ARCH_UNALIGN_ALLOW
38 select SYSCTL_EXCEPTION_TRACE 38 select SYSCTL_EXCEPTION_TRACE
39 select ARCH_DISCARD_MEMBLOCK
40 select HAVE_MOD_ARCH_SPECIFIC 39 select HAVE_MOD_ARCH_SPECIFIC
41 select VIRT_TO_BUS 40 select VIRT_TO_BUS
42 select MODULES_USE_ELF_RELA 41 select MODULES_USE_ELF_RELA
@@ -195,7 +194,8 @@ config PREFETCH
195 194
196config MLONGCALLS 195config MLONGCALLS
197 bool "Enable the -mlong-calls compiler option for big kernels" 196 bool "Enable the -mlong-calls compiler option for big kernels"
198 default y 197 default y if !MODULES || UBSAN || FTRACE
198 default n
199 depends on PA8X00 199 depends on PA8X00
200 help 200 help
201 If you configure the kernel to include many drivers built-in instead 201 If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
index ccc109761f44..d3e3d94e90c3 100644
--- a/arch/parisc/configs/712_defconfig
+++ b/arch/parisc/configs/712_defconfig
@@ -34,7 +34,6 @@ CONFIG_INET_DIAG=m
34CONFIG_NETFILTER=y 34CONFIG_NETFILTER=y
35CONFIG_LLC2=m 35CONFIG_LLC2=m
36CONFIG_NET_PKTGEN=m 36CONFIG_NET_PKTGEN=m
37CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
38CONFIG_DEVTMPFS=y 37CONFIG_DEVTMPFS=y
39CONFIG_DEVTMPFS_MOUNT=y 38CONFIG_DEVTMPFS_MOUNT=y
40# CONFIG_STANDALONE is not set 39# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index 5acb93dcaabf..a8859496b0b9 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -70,7 +70,6 @@ CONFIG_IP_DCCP=m
70# CONFIG_IP_DCCP_CCID3 is not set 70# CONFIG_IP_DCCP_CCID3 is not set
71CONFIG_LLC2=m 71CONFIG_LLC2=m
72CONFIG_NET_PKTGEN=m 72CONFIG_NET_PKTGEN=m
73CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
74CONFIG_DEVTMPFS=y 73CONFIG_DEVTMPFS=y
75CONFIG_DEVTMPFS_MOUNT=y 74CONFIG_DEVTMPFS_MOUNT=y
76# CONFIG_STANDALONE is not set 75# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
index 83ffd161aec5..0cae9664bf67 100644
--- a/arch/parisc/configs/b180_defconfig
+++ b/arch/parisc/configs/b180_defconfig
@@ -24,7 +24,6 @@ CONFIG_INET=y
24CONFIG_IP_MULTICAST=y 24CONFIG_IP_MULTICAST=y
25CONFIG_IP_PNP=y 25CONFIG_IP_PNP=y
26CONFIG_IP_PNP_BOOTP=y 26CONFIG_IP_PNP_BOOTP=y
27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
28CONFIG_DEVTMPFS=y 27CONFIG_DEVTMPFS=y
29CONFIG_DEVTMPFS_MOUNT=y 28CONFIG_DEVTMPFS_MOUNT=y
30# CONFIG_PREVENT_FIRMWARE_BUILD is not set 29# CONFIG_PREVENT_FIRMWARE_BUILD is not set
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 8d41a73bd71b..6c29b841735c 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -32,7 +32,6 @@ CONFIG_INET6_IPCOMP=m
32CONFIG_IPV6_TUNNEL=m 32CONFIG_IPV6_TUNNEL=m
33CONFIG_NETFILTER=y 33CONFIG_NETFILTER=y
34CONFIG_NET_PKTGEN=m 34CONFIG_NET_PKTGEN=m
35CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
36CONFIG_DEVTMPFS=y 35CONFIG_DEVTMPFS=y
37CONFIG_DEVTMPFS_MOUNT=y 36CONFIG_DEVTMPFS_MOUNT=y
38# CONFIG_STANDALONE is not set 37# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 900b00084953..507f0644fcf8 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -57,7 +57,6 @@ CONFIG_IP_DCCP=m
57CONFIG_TIPC=m 57CONFIG_TIPC=m
58CONFIG_LLC2=m 58CONFIG_LLC2=m
59CONFIG_DNS_RESOLVER=y 59CONFIG_DNS_RESOLVER=y
60CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
61CONFIG_DEVTMPFS=y 60CONFIG_DEVTMPFS=y
62CONFIG_DEVTMPFS_MOUNT=y 61CONFIG_DEVTMPFS_MOUNT=y
63# CONFIG_STANDALONE is not set 62# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
index 52c9050a7c5c..6a91cc2623e8 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/default_defconfig
@@ -44,7 +44,6 @@ CONFIG_INET6_AH=y
44CONFIG_INET6_ESP=y 44CONFIG_INET6_ESP=y
45CONFIG_INET6_IPCOMP=y 45CONFIG_INET6_IPCOMP=y
46CONFIG_LLC2=m 46CONFIG_LLC2=m
47CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
48CONFIG_DEVTMPFS=y 47CONFIG_DEVTMPFS=y
49CONFIG_DEVTMPFS_MOUNT=y 48CONFIG_DEVTMPFS_MOUNT=y
50# CONFIG_STANDALONE is not set 49# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index a8f9bbef0975..18b072a47a10 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -47,7 +47,6 @@ CONFIG_INET_ESP=m
47CONFIG_INET_DIAG=m 47CONFIG_INET_DIAG=m
48CONFIG_LLC2=m 48CONFIG_LLC2=m
49# CONFIG_WIRELESS is not set 49# CONFIG_WIRELESS is not set
50CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
51CONFIG_DEVTMPFS=y 50CONFIG_DEVTMPFS=y
52CONFIG_DEVTMPFS_MOUNT=y 51CONFIG_DEVTMPFS_MOUNT=y
53# CONFIG_STANDALONE is not set 52# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index 3d4dd68e181b..a303ae9a77f4 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -2,6 +2,30 @@
2#ifndef __PARISC_SPECIAL_INSNS_H 2#ifndef __PARISC_SPECIAL_INSNS_H
3#define __PARISC_SPECIAL_INSNS_H 3#define __PARISC_SPECIAL_INSNS_H
4 4
5#define lpa(va) ({ \
6 unsigned long pa; \
7 __asm__ __volatile__( \
8 "copy %%r0,%0\n\t" \
9 "lpa %%r0(%1),%0" \
10 : "=r" (pa) \
11 : "r" (va) \
12 : "memory" \
13 ); \
14 pa; \
15})
16
17#define lpa_user(va) ({ \
18 unsigned long pa; \
19 __asm__ __volatile__( \
20 "copy %%r0,%0\n\t" \
21 "lpa %%r0(%%sr3,%1),%0" \
22 : "=r" (pa) \
23 : "r" (va) \
24 : "memory" \
25 ); \
26 pa; \
27})
28
5#define mfctl(reg) ({ \ 29#define mfctl(reg) ({ \
6 unsigned long cr; \ 30 unsigned long cr; \
7 __asm__ __volatile__( \ 31 __asm__ __volatile__( \
diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c
index bf2274e01a96..ca1f5ca0540a 100644
--- a/arch/parisc/kernel/alternative.c
+++ b/arch/parisc/kernel/alternative.c
@@ -56,7 +56,8 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
56 * time IO-PDIR is changed in Ike/Astro. 56 * time IO-PDIR is changed in Ike/Astro.
57 */ 57 */
58 if ((cond & ALT_COND_NO_IOC_FDC) && 58 if ((cond & ALT_COND_NO_IOC_FDC) &&
59 (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)) 59 ((boot_cpu_data.cpu_type <= pcxw_) ||
60 (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)))
60 continue; 61 continue;
61 62
62 /* Want to replace pdtlb by a pdtlb,l instruction? */ 63 /* Want to replace pdtlb by a pdtlb,l instruction? */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index c3b1b9c24ede..cd33b4feacb1 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -35,6 +35,15 @@ OUTPUT_FORMAT("elf64-hppa-linux")
35OUTPUT_ARCH(hppa:hppa2.0w) 35OUTPUT_ARCH(hppa:hppa2.0w)
36#endif 36#endif
37 37
38#define EXIT_TEXT_SECTIONS() .exit.text : { EXIT_TEXT }
39#if !defined(CONFIG_64BIT) || defined(CONFIG_MLONGCALLS)
40#define MLONGCALL_KEEP(x)
41#define MLONGCALL_DISCARD(x) x
42#else
43#define MLONGCALL_KEEP(x) x
44#define MLONGCALL_DISCARD(x)
45#endif
46
38ENTRY(parisc_kernel_start) 47ENTRY(parisc_kernel_start)
39#ifndef CONFIG_64BIT 48#ifndef CONFIG_64BIT
40jiffies = jiffies_64 + 4; 49jiffies = jiffies_64 + 4;
@@ -47,15 +56,11 @@ SECTIONS
47 56
48 __init_begin = .; 57 __init_begin = .;
49 HEAD_TEXT_SECTION 58 HEAD_TEXT_SECTION
50 INIT_TEXT_SECTION(8) 59 MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
51 60
52 . = ALIGN(PAGE_SIZE); 61 . = ALIGN(PAGE_SIZE);
53 INIT_DATA_SECTION(PAGE_SIZE) 62 INIT_DATA_SECTION(PAGE_SIZE)
54 /* we have to discard exit text and such at runtime, not link time */ 63 MLONGCALL_DISCARD(EXIT_TEXT_SECTIONS())
55 .exit.text :
56 {
57 EXIT_TEXT
58 }
59 .exit.data : 64 .exit.data :
60 { 65 {
61 EXIT_DATA 66 EXIT_DATA
@@ -73,11 +78,12 @@ SECTIONS
73 78
74 _text = .; /* Text and read-only data */ 79 _text = .; /* Text and read-only data */
75 _stext = .; 80 _stext = .;
81 MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
76 .text ALIGN(PAGE_SIZE) : { 82 .text ALIGN(PAGE_SIZE) : {
77 TEXT_TEXT 83 TEXT_TEXT
84 LOCK_TEXT
78 SCHED_TEXT 85 SCHED_TEXT
79 CPUIDLE_TEXT 86 CPUIDLE_TEXT
80 LOCK_TEXT
81 KPROBES_TEXT 87 KPROBES_TEXT
82 IRQENTRY_TEXT 88 IRQENTRY_TEXT
83 SOFTIRQENTRY_TEXT 89 SOFTIRQENTRY_TEXT
@@ -92,6 +98,7 @@ SECTIONS
92 *(.lock.text) /* out-of-line lock text */ 98 *(.lock.text) /* out-of-line lock text */
93 *(.gnu.warning) 99 *(.gnu.warning)
94 } 100 }
101 MLONGCALL_KEEP(EXIT_TEXT_SECTIONS())
95 . = ALIGN(PAGE_SIZE); 102 . = ALIGN(PAGE_SIZE);
96 _etext = .; 103 _etext = .;
97 /* End of text section */ 104 /* End of text section */
diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h
index bfcd834f42d0..ef783a383c5a 100644
--- a/arch/parisc/math-emu/cnv_float.h
+++ b/arch/parisc/math-emu/cnv_float.h
@@ -47,19 +47,19 @@
47 ((exponent < (SGL_P - 1)) ? \ 47 ((exponent < (SGL_P - 1)) ? \
48 (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE) 48 (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE)
49 49
50#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH) 50#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0)
51 51
52#define Sgl_roundnearest_from_int(int_value,sgl_value) \ 52#define Sgl_roundnearest_from_int(int_value,sgl_value) \
53 if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \ 53 if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \
54 if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \ 54 if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \
55 Sall(sgl_value)++ 55 Sall(sgl_value)++
56 56
57#define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \ 57#define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \
58 ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB)) 58 (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB))
59 59
60#define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \ 60#define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \
61 if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \ 61 if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \
62 if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \ 62 if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \
63 Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++ 63 Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
64 64
65#define Dint_isinexact_to_dbl(dint_value) \ 65#define Dint_isinexact_to_dbl(dint_value) \
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 9a26b442f820..8e645ddac58e 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -356,6 +356,8 @@ static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
356 356
357 node_info->vdev_port.id = *idp; 357 node_info->vdev_port.id = *idp;
358 node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL); 358 node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
359 if (!node_info->vdev_port.name)
360 return -1;
359 node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp; 361 node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
360 362
361 return 0; 363 return 0;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6de7c684c29f..a58ae9c42803 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -891,6 +891,10 @@ static int sparc_perf_event_set_period(struct perf_event *event,
891 s64 period = hwc->sample_period; 891 s64 period = hwc->sample_period;
892 int ret = 0; 892 int ret = 0;
893 893
894 /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
895 if (unlikely(period != hwc->last_period))
896 left = period - (hwc->last_period - left);
897
894 if (unlikely(left <= -period)) { 898 if (unlikely(left <= -period)) {
895 left = period; 899 left = period;
896 local64_set(&hwc->period_left, left); 900 local64_set(&hwc->period_left, left);
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index d245f89d1395..d220b6848746 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -587,7 +587,7 @@ xcall_flush_tlb_kernel_range: /* 44 insns */
587 sub %g7, %g1, %g3 587 sub %g7, %g1, %g3
588 srlx %g3, 18, %g2 588 srlx %g3, 18, %g2
589 brnz,pn %g2, 2f 589 brnz,pn %g2, 2f
590 add %g2, 1, %g2 590 sethi %hi(PAGE_SIZE), %g2
591 sub %g3, %g2, %g3 591 sub %g3, %g2, %g3
592 or %g1, 0x20, %g1 ! Nucleus 592 or %g1, 0x20, %g1 ! Nucleus
5931: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP 5931: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -751,7 +751,7 @@ __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
751 sub %g7, %g1, %g3 751 sub %g7, %g1, %g3
752 srlx %g3, 18, %g2 752 srlx %g3, 18, %g2
753 brnz,pn %g2, 2f 753 brnz,pn %g2, 2f
754 add %g2, 1, %g2 754 sethi %hi(PAGE_SIZE), %g2
755 sub %g3, %g2, %g3 755 sub %g3, %g2, %g3
756 or %g1, 0x20, %g1 ! Nucleus 756 or %g1, 0x20, %g1 ! Nucleus
7571: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP 7571: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 1796d2bdcaaa..5102bf7c8192 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -28,7 +28,10 @@ obj-y += cpuid-deps.o
28obj-$(CONFIG_PROC_FS) += proc.o 28obj-$(CONFIG_PROC_FS) += proc.o
29obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o 29obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
30 30
31obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o intel_epb.o 31ifdef CONFIG_CPU_SUP_INTEL
32obj-y += intel.o intel_pconfig.o
33obj-$(CONFIG_PM) += intel_epb.o
34endif
32obj-$(CONFIG_CPU_SUP_AMD) += amd.o 35obj-$(CONFIG_CPU_SUP_AMD) += amd.o
33obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o 36obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
34obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o 37obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
index ebb14a26f117..f4dd73396f28 100644
--- a/arch/x86/kernel/cpu/intel_epb.c
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -97,7 +97,6 @@ static void intel_epb_restore(void)
97 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); 97 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
98} 98}
99 99
100#ifdef CONFIG_PM
101static struct syscore_ops intel_epb_syscore_ops = { 100static struct syscore_ops intel_epb_syscore_ops = {
102 .suspend = intel_epb_save, 101 .suspend = intel_epb_save,
103 .resume = intel_epb_restore, 102 .resume = intel_epb_restore,
@@ -194,25 +193,6 @@ static int intel_epb_offline(unsigned int cpu)
194 return 0; 193 return 0;
195} 194}
196 195
197static inline void register_intel_ebp_syscore_ops(void)
198{
199 register_syscore_ops(&intel_epb_syscore_ops);
200}
201#else /* !CONFIG_PM */
202static int intel_epb_online(unsigned int cpu)
203{
204 intel_epb_restore();
205 return 0;
206}
207
208static int intel_epb_offline(unsigned int cpu)
209{
210 return intel_epb_save();
211}
212
213static inline void register_intel_ebp_syscore_ops(void) {}
214#endif
215
216static __init int intel_epb_init(void) 196static __init int intel_epb_init(void)
217{ 197{
218 int ret; 198 int ret;
@@ -226,7 +206,7 @@ static __init int intel_epb_init(void)
226 if (ret < 0) 206 if (ret < 0)
227 goto err_out_online; 207 goto err_out_online;
228 208
229 register_intel_ebp_syscore_ops(); 209 register_syscore_ops(&intel_epb_syscore_ops);
230 return 0; 210 return 0;
231 211
232err_out_online: 212err_out_online:
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index cf00ab6c6621..306c3a0902ba 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -557,7 +557,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
557} 557}
558 558
559/** 559/**
560 * get_desc() - Obtain pointer to a segment descriptor 560 * get_desc() - Obtain contents of a segment descriptor
561 * @out: Segment descriptor contents on success
561 * @sel: Segment selector 562 * @sel: Segment selector
562 * 563 *
563 * Given a segment selector, obtain a pointer to the segment descriptor. 564 * Given a segment selector, obtain a pointer to the segment descriptor.
@@ -565,18 +566,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
565 * 566 *
566 * Returns: 567 * Returns:
567 * 568 *
568 * Pointer to segment descriptor on success. 569 * True on success, false on failure.
569 * 570 *
570 * NULL on error. 571 * NULL on error.
571 */ 572 */
572static struct desc_struct *get_desc(unsigned short sel) 573static bool get_desc(struct desc_struct *out, unsigned short sel)
573{ 574{
574 struct desc_ptr gdt_desc = {0, 0}; 575 struct desc_ptr gdt_desc = {0, 0};
575 unsigned long desc_base; 576 unsigned long desc_base;
576 577
577#ifdef CONFIG_MODIFY_LDT_SYSCALL 578#ifdef CONFIG_MODIFY_LDT_SYSCALL
578 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { 579 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
579 struct desc_struct *desc = NULL; 580 bool success = false;
580 struct ldt_struct *ldt; 581 struct ldt_struct *ldt;
581 582
582 /* Bits [15:3] contain the index of the desired entry. */ 583 /* Bits [15:3] contain the index of the desired entry. */
@@ -584,12 +585,14 @@ static struct desc_struct *get_desc(unsigned short sel)
584 585
585 mutex_lock(&current->active_mm->context.lock); 586 mutex_lock(&current->active_mm->context.lock);
586 ldt = current->active_mm->context.ldt; 587 ldt = current->active_mm->context.ldt;
587 if (ldt && sel < ldt->nr_entries) 588 if (ldt && sel < ldt->nr_entries) {
588 desc = &ldt->entries[sel]; 589 *out = ldt->entries[sel];
590 success = true;
591 }
589 592
590 mutex_unlock(&current->active_mm->context.lock); 593 mutex_unlock(&current->active_mm->context.lock);
591 594
592 return desc; 595 return success;
593 } 596 }
594#endif 597#endif
595 native_store_gdt(&gdt_desc); 598 native_store_gdt(&gdt_desc);
@@ -604,9 +607,10 @@ static struct desc_struct *get_desc(unsigned short sel)
604 desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); 607 desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
605 608
606 if (desc_base > gdt_desc.size) 609 if (desc_base > gdt_desc.size)
607 return NULL; 610 return false;
608 611
609 return (struct desc_struct *)(gdt_desc.address + desc_base); 612 *out = *(struct desc_struct *)(gdt_desc.address + desc_base);
613 return true;
610} 614}
611 615
612/** 616/**
@@ -628,7 +632,7 @@ static struct desc_struct *get_desc(unsigned short sel)
628 */ 632 */
629unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) 633unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
630{ 634{
631 struct desc_struct *desc; 635 struct desc_struct desc;
632 short sel; 636 short sel;
633 637
634 sel = get_segment_selector(regs, seg_reg_idx); 638 sel = get_segment_selector(regs, seg_reg_idx);
@@ -666,11 +670,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
666 if (!sel) 670 if (!sel)
667 return -1L; 671 return -1L;
668 672
669 desc = get_desc(sel); 673 if (!get_desc(&desc, sel))
670 if (!desc)
671 return -1L; 674 return -1L;
672 675
673 return get_desc_base(desc); 676 return get_desc_base(&desc);
674} 677}
675 678
676/** 679/**
@@ -692,7 +695,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
692 */ 695 */
693static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) 696static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
694{ 697{
695 struct desc_struct *desc; 698 struct desc_struct desc;
696 unsigned long limit; 699 unsigned long limit;
697 short sel; 700 short sel;
698 701
@@ -706,8 +709,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
706 if (!sel) 709 if (!sel)
707 return 0; 710 return 0;
708 711
709 desc = get_desc(sel); 712 if (!get_desc(&desc, sel))
710 if (!desc)
711 return 0; 713 return 0;
712 714
713 /* 715 /*
@@ -716,8 +718,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
716 * not tested when checking the segment limits. In practice, 718 * not tested when checking the segment limits. In practice,
717 * this means that the segment ends in (limit << 12) + 0xfff. 719 * this means that the segment ends in (limit << 12) + 0xfff.
718 */ 720 */
719 limit = get_desc_limit(desc); 721 limit = get_desc_limit(&desc);
720 if (desc->g) 722 if (desc.g)
721 limit = (limit << 12) + 0xfff; 723 limit = (limit << 12) + 0xfff;
722 724
723 return limit; 725 return limit;
@@ -741,7 +743,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
741 */ 743 */
742int insn_get_code_seg_params(struct pt_regs *regs) 744int insn_get_code_seg_params(struct pt_regs *regs)
743{ 745{
744 struct desc_struct *desc; 746 struct desc_struct desc;
745 short sel; 747 short sel;
746 748
747 if (v8086_mode(regs)) 749 if (v8086_mode(regs))
@@ -752,8 +754,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
752 if (sel < 0) 754 if (sel < 0)
753 return sel; 755 return sel;
754 756
755 desc = get_desc(sel); 757 if (!get_desc(&desc, sel))
756 if (!desc)
757 return -EINVAL; 758 return -EINVAL;
758 759
759 /* 760 /*
@@ -761,10 +762,10 @@ int insn_get_code_seg_params(struct pt_regs *regs)
761 * determines whether a segment contains data or code. If this is a data 762 * determines whether a segment contains data or code. If this is a data
762 * segment, return error. 763 * segment, return error.
763 */ 764 */
764 if (!(desc->type & BIT(3))) 765 if (!(desc.type & BIT(3)))
765 return -EINVAL; 766 return -EINVAL;
766 767
767 switch ((desc->l << 1) | desc->d) { 768 switch ((desc.l << 1) | desc.d) {
768 case 0: /* 769 case 0: /*
769 * Legacy mode. CS.L=0, CS.D=0. Address and operand size are 770 * Legacy mode. CS.L=0, CS.D=0. Address and operand size are
770 * both 16-bit. 771 * both 16-bit.
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0bc07a9bc27b..24b079e94bc2 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -298,7 +298,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
298 * address in its instruction pointer may not be possible to resolve 298 * address in its instruction pointer may not be possible to resolve
299 * any more at that point (the page tables used by it previously may 299 * any more at that point (the page tables used by it previously may
300 * have been overwritten by hibernate image data). 300 * have been overwritten by hibernate image data).
301 *
302 * First, make sure that we wake up all the potentially disabled SMT
303 * threads which have been initially brought up and then put into
304 * mwait/cpuidle sleep.
305 * Those will be put to proper (not interfering with hibernation
306 * resume) sleep afterwards, and the resumed kernel will decide itself
307 * what to do with them.
301 */ 308 */
309 ret = cpuhp_smt_enable();
310 if (ret)
311 return ret;
302 smp_ops.play_dead = resume_play_dead; 312 smp_ops.play_dead = resume_play_dead;
303 ret = disable_nonboot_cpus(); 313 ret = disable_nonboot_cpus();
304 smp_ops.play_dead = play_dead; 314 smp_ops.play_dead = play_dead;
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index 4845b8c7be7f..fc413717a45f 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -11,6 +11,7 @@
11#include <linux/suspend.h> 11#include <linux/suspend.h>
12#include <linux/scatterlist.h> 12#include <linux/scatterlist.h>
13#include <linux/kdebug.h> 13#include <linux/kdebug.h>
14#include <linux/cpu.h>
14 15
15#include <crypto/hash.h> 16#include <crypto/hash.h>
16 17
@@ -245,3 +246,35 @@ out:
245 __flush_tlb_all(); 246 __flush_tlb_all();
246 return 0; 247 return 0;
247} 248}
249
250int arch_resume_nosmt(void)
251{
252 int ret = 0;
253 /*
254 * We reached this while coming out of hibernation. This means
255 * that SMT siblings are sleeping in hlt, as mwait is not safe
256 * against control transition during resume (see comment in
257 * hibernate_resume_nonboot_cpu_disable()).
258 *
259 * If the resumed kernel has SMT disabled, we have to take all the
260 * SMT siblings out of hlt, and offline them again so that they
261 * end up in mwait proper.
262 *
263 * Called with hotplug disabled.
264 */
265 cpu_hotplug_enable();
266 if (cpu_smt_control == CPU_SMT_DISABLED ||
267 cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
268 enum cpuhp_smt_control old = cpu_smt_control;
269
270 ret = cpuhp_smt_enable();
271 if (ret)
272 goto out;
273 ret = cpuhp_smt_disable(old);
274 if (ret)
275 goto out;
276 }
277out:
278 cpu_hotplug_disable();
279 return ret;
280}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index c0ec24349421..176cb46bcf12 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -310,7 +310,8 @@ extern char _SecondaryResetVector_text_start;
310extern char _SecondaryResetVector_text_end; 310extern char _SecondaryResetVector_text_end;
311#endif 311#endif
312 312
313static inline int mem_reserve(unsigned long start, unsigned long end) 313static inline int __init_memblock mem_reserve(unsigned long start,
314 unsigned long end)
314{ 315{
315 return memblock_reserve(start, end - start); 316 return memblock_reserve(start, end - start);
316} 317}
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index b3796a40a61a..59f46904cb11 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -1046,7 +1046,8 @@ struct blkcg_policy blkcg_policy_bfq = {
1046struct cftype bfq_blkcg_legacy_files[] = { 1046struct cftype bfq_blkcg_legacy_files[] = {
1047 { 1047 {
1048 .name = "bfq.weight", 1048 .name = "bfq.weight",
1049 .flags = CFTYPE_NOT_ON_ROOT, 1049 .link_name = "weight",
1050 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
1050 .seq_show = bfq_io_show_weight, 1051 .seq_show = bfq_io_show_weight,
1051 .write_u64 = bfq_io_set_weight_legacy, 1052 .write_u64 = bfq_io_set_weight_legacy,
1052 }, 1053 },
@@ -1166,7 +1167,8 @@ struct cftype bfq_blkcg_legacy_files[] = {
1166struct cftype bfq_blkg_files[] = { 1167struct cftype bfq_blkg_files[] = {
1167 { 1168 {
1168 .name = "bfq.weight", 1169 .name = "bfq.weight",
1169 .flags = CFTYPE_NOT_ON_ROOT, 1170 .link_name = "weight",
1171 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
1170 .seq_show = bfq_io_show_weight, 1172 .seq_show = bfq_io_show_weight,
1171 .write = bfq_io_set_weight, 1173 .write = bfq_io_set_weight,
1172 }, 1174 },
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b97b479e4f64..1f7127b03490 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -881,7 +881,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
881 blkg_free(new_blkg); 881 blkg_free(new_blkg);
882 } else { 882 } else {
883 blkg = blkg_create(pos, q, new_blkg); 883 blkg = blkg_create(pos, q, new_blkg);
884 if (unlikely(IS_ERR(blkg))) { 884 if (IS_ERR(blkg)) {
885 ret = PTR_ERR(blkg); 885 ret = PTR_ERR(blkg);
886 goto fail_unlock; 886 goto fail_unlock;
887 } 887 }
diff --git a/block/blk-core.c b/block/blk-core.c
index ee1b35fe8572..8340f69670d8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -320,6 +320,19 @@ void blk_cleanup_queue(struct request_queue *q)
320 if (queue_is_mq(q)) 320 if (queue_is_mq(q))
321 blk_mq_exit_queue(q); 321 blk_mq_exit_queue(q);
322 322
323 /*
324 * In theory, request pool of sched_tags belongs to request queue.
325 * However, the current implementation requires tag_set for freeing
326 * requests, so free the pool now.
327 *
328 * Queue has become frozen, there can't be any in-queue requests, so
329 * it is safe to free requests now.
330 */
331 mutex_lock(&q->sysfs_lock);
332 if (q->elevator)
333 blk_mq_sched_free_requests(q);
334 mutex_unlock(&q->sysfs_lock);
335
323 percpu_ref_exit(&q->q_usage_counter); 336 percpu_ref_exit(&q->q_usage_counter);
324 337
325 /* @q is and will stay empty, shutdown and put */ 338 /* @q is and will stay empty, shutdown and put */
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 74c6bb871f7e..500cb04901cc 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -475,14 +475,18 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
475 return ret; 475 return ret;
476} 476}
477 477
478/* called in queue's release handler, tagset has gone away */
478static void blk_mq_sched_tags_teardown(struct request_queue *q) 479static void blk_mq_sched_tags_teardown(struct request_queue *q)
479{ 480{
480 struct blk_mq_tag_set *set = q->tag_set;
481 struct blk_mq_hw_ctx *hctx; 481 struct blk_mq_hw_ctx *hctx;
482 int i; 482 int i;
483 483
484 queue_for_each_hw_ctx(q, hctx, i) 484 queue_for_each_hw_ctx(q, hctx, i) {
485 blk_mq_sched_free_tags(set, hctx, i); 485 if (hctx->sched_tags) {
486 blk_mq_free_rq_map(hctx->sched_tags);
487 hctx->sched_tags = NULL;
488 }
489 }
486} 490}
487 491
488int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 492int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
@@ -523,6 +527,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
523 ret = e->ops.init_hctx(hctx, i); 527 ret = e->ops.init_hctx(hctx, i);
524 if (ret) { 528 if (ret) {
525 eq = q->elevator; 529 eq = q->elevator;
530 blk_mq_sched_free_requests(q);
526 blk_mq_exit_sched(q, eq); 531 blk_mq_exit_sched(q, eq);
527 kobject_put(&eq->kobj); 532 kobject_put(&eq->kobj);
528 return ret; 533 return ret;
@@ -534,11 +539,30 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
534 return 0; 539 return 0;
535 540
536err: 541err:
542 blk_mq_sched_free_requests(q);
537 blk_mq_sched_tags_teardown(q); 543 blk_mq_sched_tags_teardown(q);
538 q->elevator = NULL; 544 q->elevator = NULL;
539 return ret; 545 return ret;
540} 546}
541 547
548/*
549 * called in either blk_queue_cleanup or elevator_switch, tagset
550 * is required for freeing requests
551 */
552void blk_mq_sched_free_requests(struct request_queue *q)
553{
554 struct blk_mq_hw_ctx *hctx;
555 int i;
556
557 lockdep_assert_held(&q->sysfs_lock);
558 WARN_ON(!q->elevator);
559
560 queue_for_each_hw_ctx(q, hctx, i) {
561 if (hctx->sched_tags)
562 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
563 }
564}
565
542void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 566void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
543{ 567{
544 struct blk_mq_hw_ctx *hctx; 568 struct blk_mq_hw_ctx *hctx;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index c7bdb52367ac..3cf92cbbd8ac 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -28,6 +28,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
28 28
29int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); 29int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
30void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); 30void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
31void blk_mq_sched_free_requests(struct request_queue *q);
31 32
32static inline bool 33static inline bool
33blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 34blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 75b5281cc577..977c659dcd18 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -850,7 +850,7 @@ static void blk_exit_queue(struct request_queue *q)
850 */ 850 */
851 if (q->elevator) { 851 if (q->elevator) {
852 ioc_clear_queue(q); 852 ioc_clear_queue(q);
853 elevator_exit(q, q->elevator); 853 __elevator_exit(q, q->elevator);
854 q->elevator = NULL; 854 q->elevator = NULL;
855 } 855 }
856 856
diff --git a/block/blk.h b/block/blk.h
index 91b3581b7c7a..7814aa207153 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -6,6 +6,7 @@
6#include <linux/blk-mq.h> 6#include <linux/blk-mq.h>
7#include <xen/xen.h> 7#include <xen/xen.h>
8#include "blk-mq.h" 8#include "blk-mq.h"
9#include "blk-mq-sched.h"
9 10
10/* Max future timer expiry for timeouts */ 11/* Max future timer expiry for timeouts */
11#define BLK_MAX_TIMEOUT (5 * HZ) 12#define BLK_MAX_TIMEOUT (5 * HZ)
@@ -176,10 +177,17 @@ void blk_insert_flush(struct request *rq);
176int elevator_init_mq(struct request_queue *q); 177int elevator_init_mq(struct request_queue *q);
177int elevator_switch_mq(struct request_queue *q, 178int elevator_switch_mq(struct request_queue *q,
178 struct elevator_type *new_e); 179 struct elevator_type *new_e);
179void elevator_exit(struct request_queue *, struct elevator_queue *); 180void __elevator_exit(struct request_queue *, struct elevator_queue *);
180int elv_register_queue(struct request_queue *q); 181int elv_register_queue(struct request_queue *q);
181void elv_unregister_queue(struct request_queue *q); 182void elv_unregister_queue(struct request_queue *q);
182 183
184static inline void elevator_exit(struct request_queue *q,
185 struct elevator_queue *e)
186{
187 blk_mq_sched_free_requests(q);
188 __elevator_exit(q, e);
189}
190
183struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); 191struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
184 192
185#ifdef CONFIG_FAIL_IO_TIMEOUT 193#ifdef CONFIG_FAIL_IO_TIMEOUT
diff --git a/block/elevator.c b/block/elevator.c
index ec55d5fc0b3e..2f17d66d0e61 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -178,7 +178,7 @@ static void elevator_release(struct kobject *kobj)
178 kfree(e); 178 kfree(e);
179} 179}
180 180
181void elevator_exit(struct request_queue *q, struct elevator_queue *e) 181void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
182{ 182{
183 mutex_lock(&e->sysfs_lock); 183 mutex_lock(&e->sysfs_lock);
184 if (e->type->ops.exit_sched) 184 if (e->type->ops.exit_sched)
diff --git a/crypto/hmac.c b/crypto/hmac.c
index f03cb32147cc..8b2a212eb0ad 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -152,8 +152,10 @@ static int hmac_init_tfm(struct crypto_tfm *tfm)
152 152
153 parent->descsize = sizeof(struct shash_desc) + 153 parent->descsize = sizeof(struct shash_desc) +
154 crypto_shash_descsize(hash); 154 crypto_shash_descsize(hash);
155 if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE)) 155 if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE)) {
156 crypto_free_shash(hash);
156 return -EINVAL; 157 return -EINVAL;
158 }
157 159
158 ctx->hash = hash; 160 ctx->hash = hash;
159 return 0; 161 return 0;
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 6ea1a270b8dc..787dccca3715 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -198,7 +198,7 @@ static void __exit jent_mod_exit(void)
198 crypto_unregister_rng(&jent_alg); 198 crypto_unregister_rng(&jent_alg);
199} 199}
200 200
201subsys_initcall(jent_mod_init); 201module_init(jent_mod_init);
202module_exit(jent_mod_exit); 202module_exit(jent_mod_exit);
203 203
204MODULE_LICENSE("Dual BSD/GPL"); 204MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index e2c6aae2d636..bd19f8af950b 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -196,7 +196,6 @@ static const struct file_operations aoe_debugfs_fops = {
196static void 196static void
197aoedisk_add_debugfs(struct aoedev *d) 197aoedisk_add_debugfs(struct aoedev *d)
198{ 198{
199 struct dentry *entry;
200 char *p; 199 char *p;
201 200
202 if (aoe_debugfs_dir == NULL) 201 if (aoe_debugfs_dir == NULL)
@@ -207,15 +206,8 @@ aoedisk_add_debugfs(struct aoedev *d)
207 else 206 else
208 p++; 207 p++;
209 BUG_ON(*p == '\0'); 208 BUG_ON(*p == '\0');
210 entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d, 209 d->debugfs = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
211 &aoe_debugfs_fops); 210 &aoe_debugfs_fops);
212 if (IS_ERR_OR_NULL(entry)) {
213 pr_info("aoe: cannot create debugfs file for %s\n",
214 d->gd->disk_name);
215 return;
216 }
217 BUG_ON(d->debugfs);
218 d->debugfs = entry;
219} 211}
220void 212void
221aoedisk_rm_debugfs(struct aoedev *d) 213aoedisk_rm_debugfs(struct aoedev *d)
@@ -472,10 +464,6 @@ aoeblk_init(void)
472 if (buf_pool_cache == NULL) 464 if (buf_pool_cache == NULL)
473 return -ENOMEM; 465 return -ENOMEM;
474 aoe_debugfs_dir = debugfs_create_dir("aoe", NULL); 466 aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
475 if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
476 pr_info("aoe: cannot create debugfs directory\n");
477 aoe_debugfs_dir = NULL;
478 }
479 return 0; 467 return 0;
480} 468}
481 469
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index bacfdac7161c..a14b09ab3a41 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3676,6 +3676,7 @@ skip_create_disk:
3676 blk_queue_physical_block_size(dd->queue, 4096); 3676 blk_queue_physical_block_size(dd->queue, 4096);
3677 blk_queue_max_hw_sectors(dd->queue, 0xffff); 3677 blk_queue_max_hw_sectors(dd->queue, 0xffff);
3678 blk_queue_max_segment_size(dd->queue, 0x400000); 3678 blk_queue_max_segment_size(dd->queue, 0x400000);
3679 dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
3679 blk_queue_io_min(dd->queue, 4096); 3680 blk_queue_io_min(dd->queue, 4096);
3680 3681
3681 /* Set the capacity of the device in 512 byte sectors. */ 3682 /* Set the capacity of the device in 512 byte sectors. */
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index de9b2d2f8654..76b73ddf8fd7 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -767,7 +767,6 @@ static int rsxx_pci_probe(struct pci_dev *dev,
767 goto failed_enable; 767 goto failed_enable;
768 768
769 pci_set_master(dev); 769 pci_set_master(dev);
770 dma_set_max_seg_size(&dev->dev, RSXX_HW_BLK_SIZE);
771 770
772 st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); 771 st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
773 if (st) { 772 if (st) {
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cd57747286f2..9635897458a0 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -77,6 +77,7 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
77 struct sg_table *sg, 77 struct sg_table *sg,
78 enum dma_data_direction direction) 78 enum dma_data_direction direction)
79{ 79{
80 dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
80 sg_free_table(sg); 81 sg_free_table(sg);
81 kfree(sg); 82 kfree(sg);
82} 83}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7204fdeff6c5..263bee76ef0d 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -662,10 +662,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
662 return status; 662 return status;
663} 663}
664 664
665static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, 665static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
666 struct jz4780_dma_chan *jzchan) 666 struct jz4780_dma_chan *jzchan)
667{ 667{
668 uint32_t dcs; 668 uint32_t dcs;
669 bool ack = true;
669 670
670 spin_lock(&jzchan->vchan.lock); 671 spin_lock(&jzchan->vchan.lock);
671 672
@@ -688,12 +689,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
688 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) { 689 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
689 if (jzchan->desc->type == DMA_CYCLIC) { 690 if (jzchan->desc->type == DMA_CYCLIC) {
690 vchan_cyclic_callback(&jzchan->desc->vdesc); 691 vchan_cyclic_callback(&jzchan->desc->vdesc);
691 } else { 692
693 jz4780_dma_begin(jzchan);
694 } else if (dcs & JZ_DMA_DCS_TT) {
692 vchan_cookie_complete(&jzchan->desc->vdesc); 695 vchan_cookie_complete(&jzchan->desc->vdesc);
693 jzchan->desc = NULL; 696 jzchan->desc = NULL;
694 }
695 697
696 jz4780_dma_begin(jzchan); 698 jz4780_dma_begin(jzchan);
699 } else {
700 /* False positive - continue the transfer */
701 ack = false;
702 jz4780_dma_chn_writel(jzdma, jzchan->id,
703 JZ_DMA_REG_DCS,
704 JZ_DMA_DCS_CTE);
705 }
697 } 706 }
698 } else { 707 } else {
699 dev_err(&jzchan->vchan.chan.dev->device, 708 dev_err(&jzchan->vchan.chan.dev->device,
@@ -701,21 +710,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
701 } 710 }
702 711
703 spin_unlock(&jzchan->vchan.lock); 712 spin_unlock(&jzchan->vchan.lock);
713
714 return ack;
704} 715}
705 716
706static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) 717static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
707{ 718{
708 struct jz4780_dma_dev *jzdma = data; 719 struct jz4780_dma_dev *jzdma = data;
720 unsigned int nb_channels = jzdma->soc_data->nb_channels;
709 uint32_t pending, dmac; 721 uint32_t pending, dmac;
710 int i; 722 int i;
711 723
712 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); 724 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
713 725
714 for (i = 0; i < jzdma->soc_data->nb_channels; i++) { 726 for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
715 if (!(pending & (1<<i))) 727 if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
716 continue; 728 pending &= ~BIT(i);
717
718 jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
719 } 729 }
720 730
721 /* Clear halt and address error status of all channels. */ 731 /* Clear halt and address error status of all channels. */
@@ -724,7 +734,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
724 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac); 734 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
725 735
726 /* Clear interrupt pending status. */ 736 /* Clear interrupt pending status. */
727 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0); 737 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
728 738
729 return IRQ_HANDLED; 739 return IRQ_HANDLED;
730} 740}
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index b2ac1d2c5b86..a1ce307c502f 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
512 return vchan_tx_prep(&chan->vc, &first->vd, flags); 512 return vchan_tx_prep(&chan->vc, &first->vd, flags);
513 513
514err_desc_get: 514err_desc_get:
515 axi_desc_put(first); 515 if (first)
516 axi_desc_put(first);
516 return NULL; 517 return NULL;
517} 518}
518 519
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index aa1d0ae3d207..60b062c3647b 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -701,10 +701,8 @@ static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
701 701
702 intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); 702 intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
703 703
704 if (intr) { 704 if (intr)
705 dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); 705 dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
706 return IRQ_NONE;
707 }
708 706
709 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); 707 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
710 return IRQ_HANDLED; 708 return IRQ_HANDLED;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 814853842e29..723b11c190b3 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -225,7 +225,7 @@ static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc)
225 mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); 225 mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
226 mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); 226 mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
227 227
228 return mtk_cqdma_poll_engine_done(pc, false); 228 return mtk_cqdma_poll_engine_done(pc, true);
229} 229}
230 230
231static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, 231static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
@@ -671,7 +671,7 @@ static void mtk_cqdma_free_chan_resources(struct dma_chan *c)
671 mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); 671 mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
672 672
673 /* wait for the completion of flush operation */ 673 /* wait for the completion of flush operation */
674 if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0) 674 if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
675 dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n"); 675 dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
676 676
677 /* clear the flush bit and interrupt flag */ 677 /* clear the flush bit and interrupt flag */
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 48431e2da987..baac476c8622 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -62,6 +62,8 @@
62/* SPRD_DMA_GLB_2STAGE_GRP register definition */ 62/* SPRD_DMA_GLB_2STAGE_GRP register definition */
63#define SPRD_DMA_GLB_2STAGE_EN BIT(24) 63#define SPRD_DMA_GLB_2STAGE_EN BIT(24)
64#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20) 64#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
65#define SPRD_DMA_GLB_DEST_INT BIT(22)
66#define SPRD_DMA_GLB_SRC_INT BIT(20)
65#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19) 67#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
66#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18) 68#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
67#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17) 69#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
@@ -135,6 +137,7 @@
135/* define DMA channel mode & trigger mode mask */ 137/* define DMA channel mode & trigger mode mask */
136#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) 138#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
137#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) 139#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
140#define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0)
138 141
139/* define the DMA transfer step type */ 142/* define the DMA transfer step type */
140#define SPRD_DMA_NONE_STEP 0 143#define SPRD_DMA_NONE_STEP 0
@@ -190,6 +193,7 @@ struct sprd_dma_chn {
190 u32 dev_id; 193 u32 dev_id;
191 enum sprd_dma_chn_mode chn_mode; 194 enum sprd_dma_chn_mode chn_mode;
192 enum sprd_dma_trg_mode trg_mode; 195 enum sprd_dma_trg_mode trg_mode;
196 enum sprd_dma_int_type int_type;
193 struct sprd_dma_desc *cur_desc; 197 struct sprd_dma_desc *cur_desc;
194}; 198};
195 199
@@ -429,6 +433,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
429 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; 433 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
430 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; 434 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
431 val |= SPRD_DMA_GLB_2STAGE_EN; 435 val |= SPRD_DMA_GLB_2STAGE_EN;
436 if (schan->int_type != SPRD_DMA_NO_INT)
437 val |= SPRD_DMA_GLB_SRC_INT;
438
432 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); 439 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
433 break; 440 break;
434 441
@@ -436,6 +443,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
436 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; 443 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
437 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; 444 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
438 val |= SPRD_DMA_GLB_2STAGE_EN; 445 val |= SPRD_DMA_GLB_2STAGE_EN;
446 if (schan->int_type != SPRD_DMA_NO_INT)
447 val |= SPRD_DMA_GLB_SRC_INT;
448
439 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); 449 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
440 break; 450 break;
441 451
@@ -443,6 +453,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
443 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & 453 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
444 SPRD_DMA_GLB_DEST_CHN_MASK; 454 SPRD_DMA_GLB_DEST_CHN_MASK;
445 val |= SPRD_DMA_GLB_2STAGE_EN; 455 val |= SPRD_DMA_GLB_2STAGE_EN;
456 if (schan->int_type != SPRD_DMA_NO_INT)
457 val |= SPRD_DMA_GLB_DEST_INT;
458
446 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); 459 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
447 break; 460 break;
448 461
@@ -450,6 +463,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
450 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & 463 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
451 SPRD_DMA_GLB_DEST_CHN_MASK; 464 SPRD_DMA_GLB_DEST_CHN_MASK;
452 val |= SPRD_DMA_GLB_2STAGE_EN; 465 val |= SPRD_DMA_GLB_2STAGE_EN;
466 if (schan->int_type != SPRD_DMA_NO_INT)
467 val |= SPRD_DMA_GLB_DEST_INT;
468
453 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); 469 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
454 break; 470 break;
455 471
@@ -510,7 +526,9 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
510 sprd_dma_set_uid(schan); 526 sprd_dma_set_uid(schan);
511 sprd_dma_enable_chn(schan); 527 sprd_dma_enable_chn(schan);
512 528
513 if (schan->dev_id == SPRD_DMA_SOFTWARE_UID) 529 if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
530 schan->chn_mode != SPRD_DMA_DST_CHN0 &&
531 schan->chn_mode != SPRD_DMA_DST_CHN1)
514 sprd_dma_soft_request(schan); 532 sprd_dma_soft_request(schan);
515} 533}
516 534
@@ -552,12 +570,17 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
552 schan = &sdev->channels[i]; 570 schan = &sdev->channels[i];
553 571
554 spin_lock(&schan->vc.lock); 572 spin_lock(&schan->vc.lock);
573
574 sdesc = schan->cur_desc;
575 if (!sdesc) {
576 spin_unlock(&schan->vc.lock);
577 return IRQ_HANDLED;
578 }
579
555 int_type = sprd_dma_get_int_type(schan); 580 int_type = sprd_dma_get_int_type(schan);
556 req_type = sprd_dma_get_req_type(schan); 581 req_type = sprd_dma_get_req_type(schan);
557 sprd_dma_clear_int(schan); 582 sprd_dma_clear_int(schan);
558 583
559 sdesc = schan->cur_desc;
560
561 /* cyclic mode schedule callback */ 584 /* cyclic mode schedule callback */
562 cyclic = schan->linklist.phy_addr ? true : false; 585 cyclic = schan->linklist.phy_addr ? true : false;
563 if (cyclic == true) { 586 if (cyclic == true) {
@@ -625,7 +648,7 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
625 else 648 else
626 pos = 0; 649 pos = 0;
627 } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { 650 } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
628 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); 651 struct sprd_dma_desc *sdesc = schan->cur_desc;
629 652
630 if (sdesc->dir == DMA_DEV_TO_MEM) 653 if (sdesc->dir == DMA_DEV_TO_MEM)
631 pos = sprd_dma_get_dst_addr(schan); 654 pos = sprd_dma_get_dst_addr(schan);
@@ -771,7 +794,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
771 temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; 794 temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
772 hw->frg_len = temp; 795 hw->frg_len = temp;
773 796
774 hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; 797 hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
775 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; 798 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
776 799
777 temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; 800 temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
@@ -904,6 +927,16 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
904 schan->linklist.virt_addr = 0; 927 schan->linklist.virt_addr = 0;
905 } 928 }
906 929
930 /*
931 * Set channel mode, interrupt mode and trigger mode for 2-stage
932 * transfer.
933 */
934 schan->chn_mode =
935 (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
936 schan->trg_mode =
937 (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
938 schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
939
907 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); 940 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
908 if (!sdesc) 941 if (!sdesc)
909 return NULL; 942 return NULL;
@@ -937,12 +970,6 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
937 } 970 }
938 } 971 }
939 972
940 /* Set channel mode and trigger mode for 2-stage transfer */
941 schan->chn_mode =
942 (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
943 schan->trg_mode =
944 (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
945
946 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, 973 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
947 dir, flags, slave_cfg); 974 dir, flags, slave_cfg);
948 if (ret) { 975 if (ret) {
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index d51550dd91c7..2805853e963f 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -42,10 +42,14 @@
42#define ADMA_CH_CONFIG_MAX_BUFS 8 42#define ADMA_CH_CONFIG_MAX_BUFS 8
43 43
44#define ADMA_CH_FIFO_CTRL 0x2c 44#define ADMA_CH_FIFO_CTRL 0x2c
45#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24) 45#define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24)
46#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16) 46#define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0xf) << 16)
47#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8 47#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
48#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0 48#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
49#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0x1f) << 24)
50#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0x1f) << 16)
51#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
52#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
49 53
50#define ADMA_CH_LOWER_SRC_ADDR 0x34 54#define ADMA_CH_LOWER_SRC_ADDR 0x34
51#define ADMA_CH_LOWER_TRG_ADDR 0x3c 55#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -60,8 +64,15 @@
60 64
61#define TEGRA_ADMA_BURST_COMPLETE_TIME 20 65#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
62 66
63#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \ 67#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
64 ADMA_CH_FIFO_CTRL_STARV_THRES(1)) 68 TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
69 TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
70 TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
71
72#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
73 TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
74 TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
75 TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
65 76
66#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift) 77#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
67 78
@@ -73,7 +84,8 @@ struct tegra_adma;
73 * @global_int_clear: Register offset of DMA global interrupt clear. 84 * @global_int_clear: Register offset of DMA global interrupt clear.
74 * @ch_req_tx_shift: Register offset for AHUB transmit channel select. 85 * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
75 * @ch_req_rx_shift: Register offset for AHUB receive channel select. 86 * @ch_req_rx_shift: Register offset for AHUB receive channel select.
76 * @ch_base_offset: Reister offset of DMA channel registers. 87 * @ch_base_offset: Register offset of DMA channel registers.
88 * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
77 * @ch_req_mask: Mask for Tx or Rx channel select. 89 * @ch_req_mask: Mask for Tx or Rx channel select.
78 * @ch_req_max: Maximum number of Tx or Rx channels available. 90 * @ch_req_max: Maximum number of Tx or Rx channels available.
79 * @ch_reg_size: Size of DMA channel register space. 91 * @ch_reg_size: Size of DMA channel register space.
@@ -86,6 +98,7 @@ struct tegra_adma_chip_data {
86 unsigned int ch_req_tx_shift; 98 unsigned int ch_req_tx_shift;
87 unsigned int ch_req_rx_shift; 99 unsigned int ch_req_rx_shift;
88 unsigned int ch_base_offset; 100 unsigned int ch_base_offset;
101 unsigned int ch_fifo_ctrl;
89 unsigned int ch_req_mask; 102 unsigned int ch_req_mask;
90 unsigned int ch_req_max; 103 unsigned int ch_req_max;
91 unsigned int ch_reg_size; 104 unsigned int ch_reg_size;
@@ -589,7 +602,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
589 ADMA_CH_CTRL_FLOWCTRL_EN; 602 ADMA_CH_CTRL_FLOWCTRL_EN;
590 ch_regs->config |= cdata->adma_get_burst_config(burst_size); 603 ch_regs->config |= cdata->adma_get_burst_config(burst_size);
591 ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); 604 ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
592 ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT; 605 ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
593 ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; 606 ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
594 607
595 return tegra_adma_request_alloc(tdc, direction); 608 return tegra_adma_request_alloc(tdc, direction);
@@ -773,6 +786,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
773 .ch_req_tx_shift = 28, 786 .ch_req_tx_shift = 28,
774 .ch_req_rx_shift = 24, 787 .ch_req_rx_shift = 24,
775 .ch_base_offset = 0, 788 .ch_base_offset = 0,
789 .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
776 .ch_req_mask = 0xf, 790 .ch_req_mask = 0xf,
777 .ch_req_max = 10, 791 .ch_req_max = 10,
778 .ch_reg_size = 0x80, 792 .ch_reg_size = 0x80,
@@ -786,6 +800,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
786 .ch_req_tx_shift = 27, 800 .ch_req_tx_shift = 27,
787 .ch_req_rx_shift = 22, 801 .ch_req_rx_shift = 22,
788 .ch_base_offset = 0x10000, 802 .ch_base_offset = 0x10000,
803 .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
789 .ch_req_mask = 0x1f, 804 .ch_req_mask = 0x1f,
790 .ch_req_max = 20, 805 .ch_req_max = 20,
791 .ch_reg_size = 0x100, 806 .ch_reg_size = 0x100,
@@ -834,16 +849,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
834 return PTR_ERR(tdma->ahub_clk); 849 return PTR_ERR(tdma->ahub_clk);
835 } 850 }
836 851
837 pm_runtime_enable(&pdev->dev);
838
839 ret = pm_runtime_get_sync(&pdev->dev);
840 if (ret < 0)
841 goto rpm_disable;
842
843 ret = tegra_adma_init(tdma);
844 if (ret)
845 goto rpm_put;
846
847 INIT_LIST_HEAD(&tdma->dma_dev.channels); 852 INIT_LIST_HEAD(&tdma->dma_dev.channels);
848 for (i = 0; i < tdma->nr_channels; i++) { 853 for (i = 0; i < tdma->nr_channels; i++) {
849 struct tegra_adma_chan *tdc = &tdma->channels[i]; 854 struct tegra_adma_chan *tdc = &tdma->channels[i];
@@ -862,6 +867,16 @@ static int tegra_adma_probe(struct platform_device *pdev)
862 tdc->tdma = tdma; 867 tdc->tdma = tdma;
863 } 868 }
864 869
870 pm_runtime_enable(&pdev->dev);
871
872 ret = pm_runtime_get_sync(&pdev->dev);
873 if (ret < 0)
874 goto rpm_disable;
875
876 ret = tegra_adma_init(tdma);
877 if (ret)
878 goto rpm_put;
879
865 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 880 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
866 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 881 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
867 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 882 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
@@ -905,13 +920,13 @@ static int tegra_adma_probe(struct platform_device *pdev)
905 920
906dma_remove: 921dma_remove:
907 dma_async_device_unregister(&tdma->dma_dev); 922 dma_async_device_unregister(&tdma->dma_dev);
908irq_dispose:
909 while (--i >= 0)
910 irq_dispose_mapping(tdma->channels[i].irq);
911rpm_put: 923rpm_put:
912 pm_runtime_put_sync(&pdev->dev); 924 pm_runtime_put_sync(&pdev->dev);
913rpm_disable: 925rpm_disable:
914 pm_runtime_disable(&pdev->dev); 926 pm_runtime_disable(&pdev->dev);
927irq_dispose:
928 while (--i >= 0)
929 irq_dispose_mapping(tdma->channels[i].irq);
915 930
916 return ret; 931 return ret;
917} 932}
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index c438722bf4e1..dcd80b088c7b 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -399,7 +399,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
399 region->pages[0], 0, 399 region->pages[0], 0,
400 region->length, 400 region->length,
401 DMA_BIDIRECTIONAL); 401 DMA_BIDIRECTIONAL);
402 if (dma_mapping_error(&pdata->dev->dev, region->iova)) { 402 if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
403 dev_err(&pdata->dev->dev, "failed to map for dma\n"); 403 dev_err(&pdata->dev->dev, "failed to map for dma\n");
404 ret = -EFAULT; 404 ret = -EFAULT;
405 goto unpin_pages; 405 goto unpin_pages;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 2c09e502e721..4b66aaa32b5a 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -40,6 +40,13 @@ enum dfl_fpga_devt_type {
40 DFL_FPGA_DEVT_MAX, 40 DFL_FPGA_DEVT_MAX,
41}; 41};
42 42
43static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
44
45static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
46 "dfl-fme-pdata",
47 "dfl-port-pdata",
48};
49
43/** 50/**
44 * dfl_dev_info - dfl feature device information. 51 * dfl_dev_info - dfl feature device information.
45 * @name: name string of the feature platform device. 52 * @name: name string of the feature platform device.
@@ -315,7 +322,7 @@ static void dfl_chardev_uinit(void)
315 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) 322 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
316 if (MAJOR(dfl_chrdevs[i].devt)) { 323 if (MAJOR(dfl_chrdevs[i].devt)) {
317 unregister_chrdev_region(dfl_chrdevs[i].devt, 324 unregister_chrdev_region(dfl_chrdevs[i].devt,
318 MINORMASK); 325 MINORMASK + 1);
319 dfl_chrdevs[i].devt = MKDEV(0, 0); 326 dfl_chrdevs[i].devt = MKDEV(0, 0);
320 } 327 }
321} 328}
@@ -325,8 +332,8 @@ static int dfl_chardev_init(void)
325 int i, ret; 332 int i, ret;
326 333
327 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) { 334 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
328 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK, 335 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
329 dfl_chrdevs[i].name); 336 MINORMASK + 1, dfl_chrdevs[i].name);
330 if (ret) 337 if (ret)
331 goto exit; 338 goto exit;
332 } 339 }
@@ -443,11 +450,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
443 struct platform_device *fdev = binfo->feature_dev; 450 struct platform_device *fdev = binfo->feature_dev;
444 struct dfl_feature_platform_data *pdata; 451 struct dfl_feature_platform_data *pdata;
445 struct dfl_feature_info *finfo, *p; 452 struct dfl_feature_info *finfo, *p;
453 enum dfl_id_type type;
446 int ret, index = 0; 454 int ret, index = 0;
447 455
448 if (!fdev) 456 if (!fdev)
449 return 0; 457 return 0;
450 458
459 type = feature_dev_id_type(fdev);
460 if (WARN_ON_ONCE(type >= DFL_ID_MAX))
461 return -EINVAL;
462
451 /* 463 /*
452 * we do not need to care for the memory which is associated with 464 * we do not need to care for the memory which is associated with
453 * the platform device. After calling platform_device_unregister(), 465 * the platform device. After calling platform_device_unregister(),
@@ -463,6 +475,8 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
463 pdata->num = binfo->feature_num; 475 pdata->num = binfo->feature_num;
464 pdata->dfl_cdev = binfo->cdev; 476 pdata->dfl_cdev = binfo->cdev;
465 mutex_init(&pdata->lock); 477 mutex_init(&pdata->lock);
478 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
479 dfl_pdata_key_strings[type]);
466 480
467 /* 481 /*
468 * the count should be initialized to 0 to make sure 482 * the count should be initialized to 0 to make sure
@@ -497,7 +511,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
497 511
498 ret = platform_device_add(binfo->feature_dev); 512 ret = platform_device_add(binfo->feature_dev);
499 if (!ret) { 513 if (!ret) {
500 if (feature_dev_id_type(binfo->feature_dev) == PORT_ID) 514 if (type == PORT_ID)
501 dfl_fpga_cdev_add_port_dev(binfo->cdev, 515 dfl_fpga_cdev_add_port_dev(binfo->cdev,
502 binfo->feature_dev); 516 binfo->feature_dev);
503 else 517 else
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 13851b3d1c56..215d33789c74 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -507,12 +507,16 @@ static int __init s10_init(void)
507 if (!fw_np) 507 if (!fw_np)
508 return -ENODEV; 508 return -ENODEV;
509 509
510 of_node_get(fw_np);
510 np = of_find_matching_node(fw_np, s10_of_match); 511 np = of_find_matching_node(fw_np, s10_of_match);
511 if (!np) 512 if (!np) {
513 of_node_put(fw_np);
512 return -ENODEV; 514 return -ENODEV;
515 }
513 516
514 of_node_put(np); 517 of_node_put(np);
515 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); 518 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
519 of_node_put(fw_np);
516 if (ret) 520 if (ret)
517 return ret; 521 return ret;
518 522
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index f7cbaadf49ab..b8a88d21d038 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -47,7 +47,7 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
47 char *kbuf; 47 char *kbuf;
48 int ret; 48 int ret;
49 49
50 if (!eemi_ops || !eemi_ops->fpga_load) 50 if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
51 return -ENXIO; 51 return -ENXIO;
52 52
53 priv = mgr->priv; 53 priv = mgr->priv;
@@ -81,7 +81,7 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
81 const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); 81 const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
82 u32 status; 82 u32 status;
83 83
84 if (!eemi_ops || !eemi_ops->fpga_get_status) 84 if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
85 return FPGA_MGR_STATE_UNKNOWN; 85 return FPGA_MGR_STATE_UNKNOWN;
86 86
87 eemi_ops->fpga_get_status(&status); 87 eemi_ops->fpga_get_status(&status);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cc8ad3831982..f4ac632a87b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1589,6 +1589,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1589{ 1589{
1590 int r = 0; 1590 int r = 0;
1591 int i; 1591 int i;
1592 uint32_t smu_version;
1592 1593
1593 if (adev->asic_type >= CHIP_VEGA10) { 1594 if (adev->asic_type >= CHIP_VEGA10) {
1594 for (i = 0; i < adev->num_ip_blocks; i++) { 1595 for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1614,16 +1615,9 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1614 } 1615 }
1615 } 1616 }
1616 } 1617 }
1618 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
1617 1619
1618 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 1620 return r;
1619 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1620 if (r) {
1621 pr_err("firmware loading failed\n");
1622 return r;
1623 }
1624 }
1625
1626 return 0;
1627} 1621}
1628 1622
1629/** 1623/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 34471dbaa872..039cfa2ec89d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2490,6 +2490,21 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2490 2490
2491} 2491}
2492 2492
2493int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
2494{
2495 int r = -EINVAL;
2496
2497 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
2498 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
2499 if (r) {
2500 pr_err("smu firmware loading failed\n");
2501 return r;
2502 }
2503 *smu_version = adev->pm.fw_version;
2504 }
2505 return r;
2506}
2507
2493int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 2508int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2494{ 2509{
2495 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 2510 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index f21a7716b90e..7ff0e7621fff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -34,6 +34,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
34int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); 34int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
35void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); 35void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
36void amdgpu_pm_print_power_states(struct amdgpu_device *adev); 36void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
37int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
37void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); 38void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
38void amdgpu_dpm_thermal_work_handler(struct work_struct *work); 39void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
39void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); 40void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c021b114c8a4..f7189e22f6b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1072,7 +1072,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1072int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) 1072int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1073{ 1073{
1074 struct amdgpu_device *adev = ring->adev; 1074 struct amdgpu_device *adev = ring->adev;
1075 uint32_t rptr = amdgpu_ring_get_rptr(ring); 1075 uint32_t rptr;
1076 unsigned i; 1076 unsigned i;
1077 int r, timeout = adev->usec_timeout; 1077 int r, timeout = adev->usec_timeout;
1078 1078
@@ -1084,6 +1084,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1084 if (r) 1084 if (r)
1085 return r; 1085 return r;
1086 1086
1087 rptr = amdgpu_ring_get_rptr(ring);
1088
1087 amdgpu_ring_write(ring, VCE_CMD_END); 1089 amdgpu_ring_write(ring, VCE_CMD_END);
1088 amdgpu_ring_commit(ring); 1090 amdgpu_ring_commit(ring);
1089 1091
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ba67d1023264..b610e3b30d95 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -28,6 +28,7 @@
28#include "soc15.h" 28#include "soc15.h"
29#include "soc15d.h" 29#include "soc15d.h"
30#include "amdgpu_atomfirmware.h" 30#include "amdgpu_atomfirmware.h"
31#include "amdgpu_pm.h"
31 32
32#include "gc/gc_9_0_offset.h" 33#include "gc/gc_9_0_offset.h"
33#include "gc/gc_9_0_sh_mask.h" 34#include "gc/gc_9_0_sh_mask.h"
@@ -96,6 +97,7 @@ MODULE_FIRMWARE("amdgpu/raven2_me.bin");
96MODULE_FIRMWARE("amdgpu/raven2_mec.bin"); 97MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
97MODULE_FIRMWARE("amdgpu/raven2_mec2.bin"); 98MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
98MODULE_FIRMWARE("amdgpu/raven2_rlc.bin"); 99MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
100MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
99 101
100static const struct soc15_reg_golden golden_settings_gc_9_0[] = 102static const struct soc15_reg_golden golden_settings_gc_9_0[] =
101{ 103{
@@ -588,7 +590,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
588 case CHIP_RAVEN: 590 case CHIP_RAVEN:
589 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) 591 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
590 break; 592 break;
591 if ((adev->gfx.rlc_fw_version < 531) || 593 if ((adev->gfx.rlc_fw_version != 106 &&
594 adev->gfx.rlc_fw_version < 531) ||
592 (adev->gfx.rlc_fw_version == 53815) || 595 (adev->gfx.rlc_fw_version == 53815) ||
593 (adev->gfx.rlc_feature_version < 1) || 596 (adev->gfx.rlc_feature_version < 1) ||
594 !adev->gfx.rlc.is_rlc_v2_1) 597 !adev->gfx.rlc.is_rlc_v2_1)
@@ -612,6 +615,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
612 unsigned int i = 0; 615 unsigned int i = 0;
613 uint16_t version_major; 616 uint16_t version_major;
614 uint16_t version_minor; 617 uint16_t version_minor;
618 uint32_t smu_version;
615 619
616 DRM_DEBUG("\n"); 620 DRM_DEBUG("\n");
617 621
@@ -682,6 +686,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
682 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) || 686 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
683 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF)))) 687 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
684 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name); 688 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
689 else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
690 (smu_version >= 0x41e2b))
691 /**
692 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
693 */
694 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
685 else 695 else
686 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 696 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
687 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 697 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bcb1a93c0b4c..ab7c5c3004ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4232,8 +4232,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
4232 struct drm_plane_state *old_state = 4232 struct drm_plane_state *old_state =
4233 drm_atomic_get_old_plane_state(new_state->state, plane); 4233 drm_atomic_get_old_plane_state(new_state->state, plane);
4234 4234
4235 if (plane->state->fb != new_state->fb) 4235 swap(plane->state->fb, new_state->fb);
4236 drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
4237 4236
4238 plane->state->src_x = new_state->src_x; 4237 plane->state->src_x = new_state->src_x;
4239 plane->state->src_y = new_state->src_y; 4238 plane->state->src_y = new_state->src_y;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 6cd6497c6fc2..f1d326caf69e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -92,6 +92,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
92 hwmgr_set_user_specify_caps(hwmgr); 92 hwmgr_set_user_specify_caps(hwmgr);
93 hwmgr->fan_ctrl_is_in_default_mode = true; 93 hwmgr->fan_ctrl_is_in_default_mode = true;
94 hwmgr_init_workload_prority(hwmgr); 94 hwmgr_init_workload_prority(hwmgr);
95 hwmgr->gfxoff_state_changed_by_workload = false;
95 96
96 switch (hwmgr->chip_family) { 97 switch (hwmgr->chip_family) {
97 case AMDGPU_FAMILY_CI: 98 case AMDGPU_FAMILY_CI:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 9a595f7525e6..e32ae9d3373c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1258,21 +1258,46 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
1258 return size; 1258 return size;
1259} 1259}
1260 1260
1261static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
1262{
1263 struct amdgpu_device *adev = hwmgr->adev;
1264 if ((adev->asic_type == CHIP_RAVEN) &&
1265 (adev->rev_id != 0x15d8) &&
1266 (hwmgr->smu_version >= 0x41e2b))
1267 return true;
1268 else
1269 return false;
1270}
1271
1261static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 1272static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
1262{ 1273{
1263 int workload_type = 0; 1274 int workload_type = 0;
1275 int result = 0;
1264 1276
1265 if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) { 1277 if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
1266 pr_err("Invalid power profile mode %ld\n", input[size]); 1278 pr_err("Invalid power profile mode %ld\n", input[size]);
1267 return -EINVAL; 1279 return -EINVAL;
1268 } 1280 }
1269 hwmgr->power_profile_mode = input[size]; 1281 if (hwmgr->power_profile_mode == input[size])
1282 return 0;
1270 1283
1271 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1284 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1272 workload_type = 1285 workload_type =
1273 conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode); 1286 conv_power_profile_to_pplib_workload(input[size]);
1274 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify, 1287 if (workload_type &&
1288 smu10_is_raven1_refresh(hwmgr) &&
1289 !hwmgr->gfxoff_state_changed_by_workload) {
1290 smu10_gfx_off_control(hwmgr, false);
1291 hwmgr->gfxoff_state_changed_by_workload = true;
1292 }
1293 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
1275 1 << workload_type); 1294 1 << workload_type);
1295 if (!result)
1296 hwmgr->power_profile_mode = input[size];
1297 if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
1298 smu10_gfx_off_control(hwmgr, true);
1299 hwmgr->gfxoff_state_changed_by_workload = false;
1300 }
1276 1301
1277 return 0; 1302 return 0;
1278} 1303}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index bac3d85e3b82..c92999aac07c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -782,6 +782,7 @@ struct pp_hwmgr {
782 uint32_t workload_mask; 782 uint32_t workload_mask;
783 uint32_t workload_prority[Workload_Policy_Max]; 783 uint32_t workload_prority[Workload_Policy_Max];
784 uint32_t workload_setting[Workload_Policy_Max]; 784 uint32_t workload_setting[Workload_Policy_Max];
785 bool gfxoff_state_changed_by_workload;
785}; 786};
786 787
787int hwmgr_early_init(struct pp_hwmgr *hwmgr); 788int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 031e5f305a3c..6bab816ed8e7 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -245,7 +245,7 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
245 seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]); 245 seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
246} 246}
247 247
248static struct komeda_component_funcs d71_layer_funcs = { 248static const struct komeda_component_funcs d71_layer_funcs = {
249 .update = d71_layer_update, 249 .update = d71_layer_update,
250 .disable = d71_layer_disable, 250 .disable = d71_layer_disable,
251 .dump_register = d71_layer_dump, 251 .dump_register = d71_layer_dump,
@@ -391,7 +391,7 @@ static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
391 seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]); 391 seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
392} 392}
393 393
394static struct komeda_component_funcs d71_compiz_funcs = { 394static const struct komeda_component_funcs d71_compiz_funcs = {
395 .update = d71_compiz_update, 395 .update = d71_compiz_update,
396 .disable = d71_component_disable, 396 .disable = d71_component_disable,
397 .dump_register = d71_compiz_dump, 397 .dump_register = d71_compiz_dump,
@@ -467,7 +467,7 @@ static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
467 seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]); 467 seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
468} 468}
469 469
470static struct komeda_component_funcs d71_improc_funcs = { 470static const struct komeda_component_funcs d71_improc_funcs = {
471 .update = d71_improc_update, 471 .update = d71_improc_update,
472 .disable = d71_component_disable, 472 .disable = d71_component_disable,
473 .dump_register = d71_improc_dump, 473 .dump_register = d71_improc_dump,
@@ -580,7 +580,7 @@ static void d71_timing_ctrlr_dump(struct komeda_component *c,
580 seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]); 580 seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
581} 581}
582 582
583static struct komeda_component_funcs d71_timing_ctrlr_funcs = { 583static const struct komeda_component_funcs d71_timing_ctrlr_funcs = {
584 .update = d71_timing_ctrlr_update, 584 .update = d71_timing_ctrlr_update,
585 .disable = d71_timing_ctrlr_disable, 585 .disable = d71_timing_ctrlr_disable,
586 .dump_register = d71_timing_ctrlr_dump, 586 .dump_register = d71_timing_ctrlr_dump,
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index 34506ef7ad40..3a7248d42376 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -502,7 +502,7 @@ static void d71_init_fmt_tbl(struct komeda_dev *mdev)
502 table->n_formats = ARRAY_SIZE(d71_format_caps_table); 502 table->n_formats = ARRAY_SIZE(d71_format_caps_table);
503} 503}
504 504
505static struct komeda_dev_funcs d71_chip_funcs = { 505static const struct komeda_dev_funcs d71_chip_funcs = {
506 .init_format_table = d71_init_fmt_tbl, 506 .init_format_table = d71_init_fmt_tbl,
507 .enum_resources = d71_enum_resources, 507 .enum_resources = d71_enum_resources,
508 .cleanup = d71_cleanup, 508 .cleanup = d71_cleanup,
@@ -514,7 +514,7 @@ static struct komeda_dev_funcs d71_chip_funcs = {
514 .flush = d71_flush, 514 .flush = d71_flush,
515}; 515};
516 516
517struct komeda_dev_funcs * 517const struct komeda_dev_funcs *
518d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip) 518d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
519{ 519{
520 chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID); 520 chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 62fad59f5a6a..284ce079d8c4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -350,7 +350,7 @@ static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
350 return true; 350 return true;
351} 351}
352 352
353static struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = { 353static const struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
354 .atomic_check = komeda_crtc_atomic_check, 354 .atomic_check = komeda_crtc_atomic_check,
355 .atomic_flush = komeda_crtc_atomic_flush, 355 .atomic_flush = komeda_crtc_atomic_flush,
356 .atomic_enable = komeda_crtc_atomic_enable, 356 .atomic_enable = komeda_crtc_atomic_enable,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index ca3599e4a4d3..b67030a9f056 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
8#include <linux/of_device.h> 8#include <linux/of_device.h>
9#include <linux/of_graph.h> 9#include <linux/of_graph.h>
10#include <linux/platform_device.h> 10#include <linux/platform_device.h>
11#include <linux/dma-mapping.h>
11#ifdef CONFIG_DEBUG_FS 12#ifdef CONFIG_DEBUG_FS
12#include <linux/debugfs.h> 13#include <linux/debugfs.h>
13#include <linux/seq_file.h> 14#include <linux/seq_file.h>
@@ -249,6 +250,9 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
249 goto err_cleanup; 250 goto err_cleanup;
250 } 251 }
251 252
253 dev->dma_parms = &mdev->dma_parms;
254 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
255
252 err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group); 256 err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
253 if (err) { 257 if (err) {
254 DRM_ERROR("create sysfs group failed.\n"); 258 DRM_ERROR("create sysfs group failed.\n");
@@ -269,7 +273,7 @@ err_cleanup:
269void komeda_dev_destroy(struct komeda_dev *mdev) 273void komeda_dev_destroy(struct komeda_dev *mdev)
270{ 274{
271 struct device *dev = mdev->dev; 275 struct device *dev = mdev->dev;
272 struct komeda_dev_funcs *funcs = mdev->funcs; 276 const struct komeda_dev_funcs *funcs = mdev->funcs;
273 int i; 277 int i;
274 278
275 sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group); 279 sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 29e03c4e1ffc..973fd5e0eb98 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -60,7 +60,7 @@ struct komeda_chip_info {
60 60
61struct komeda_product_data { 61struct komeda_product_data {
62 u32 product_id; 62 u32 product_id;
63 struct komeda_dev_funcs *(*identify)(u32 __iomem *reg, 63 const struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
64 struct komeda_chip_info *info); 64 struct komeda_chip_info *info);
65}; 65};
66 66
@@ -149,6 +149,8 @@ struct komeda_dev {
149 struct device *dev; 149 struct device *dev;
150 /** @reg_base: the base address of komeda io space */ 150 /** @reg_base: the base address of komeda io space */
151 u32 __iomem *reg_base; 151 u32 __iomem *reg_base;
152 /** @dma_parms: the dma parameters of komeda */
153 struct device_dma_parameters dma_parms;
152 154
153 /** @chip: the basic chip information */ 155 /** @chip: the basic chip information */
154 struct komeda_chip_info chip; 156 struct komeda_chip_info chip;
@@ -173,7 +175,7 @@ struct komeda_dev {
173 struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES]; 175 struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
174 176
175 /** @funcs: chip funcs to access to HW */ 177 /** @funcs: chip funcs to access to HW */
176 struct komeda_dev_funcs *funcs; 178 const struct komeda_dev_funcs *funcs;
177 /** 179 /**
178 * @chip_data: 180 * @chip_data:
179 * 181 *
@@ -192,7 +194,7 @@ komeda_product_match(struct komeda_dev *mdev, u32 target)
192 return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target; 194 return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
193} 195}
194 196
195struct komeda_dev_funcs * 197const struct komeda_dev_funcs *
196d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip); 198d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
197 199
198struct komeda_dev *komeda_dev_create(struct device *dev); 200struct komeda_dev *komeda_dev_create(struct device *dev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index c379439c6194..a130b62fa6d1 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -12,7 +12,7 @@
12/** komeda_pipeline_add - Add a pipeline to &komeda_dev */ 12/** komeda_pipeline_add - Add a pipeline to &komeda_dev */
13struct komeda_pipeline * 13struct komeda_pipeline *
14komeda_pipeline_add(struct komeda_dev *mdev, size_t size, 14komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
15 struct komeda_pipeline_funcs *funcs) 15 const struct komeda_pipeline_funcs *funcs)
16{ 16{
17 struct komeda_pipeline *pipe; 17 struct komeda_pipeline *pipe;
18 18
@@ -130,7 +130,7 @@ komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id)
130struct komeda_component * 130struct komeda_component *
131komeda_component_add(struct komeda_pipeline *pipe, 131komeda_component_add(struct komeda_pipeline *pipe,
132 size_t comp_sz, u32 id, u32 hw_id, 132 size_t comp_sz, u32 id, u32 hw_id,
133 struct komeda_component_funcs *funcs, 133 const struct komeda_component_funcs *funcs,
134 u8 max_active_inputs, u32 supported_inputs, 134 u8 max_active_inputs, u32 supported_inputs,
135 u8 max_active_outputs, u32 __iomem *reg, 135 u8 max_active_outputs, u32 __iomem *reg,
136 const char *name_fmt, ...) 136 const char *name_fmt, ...)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index b1f813a349a4..bae8a32b81a6 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -124,7 +124,7 @@ struct komeda_component {
124 /** 124 /**
125 * @funcs: chip functions to access HW 125 * @funcs: chip functions to access HW
126 */ 126 */
127 struct komeda_component_funcs *funcs; 127 const struct komeda_component_funcs *funcs;
128}; 128};
129 129
130/** 130/**
@@ -346,8 +346,8 @@ struct komeda_pipeline {
346 struct komeda_improc *improc; 346 struct komeda_improc *improc;
347 /** @ctrlr: timing controller */ 347 /** @ctrlr: timing controller */
348 struct komeda_timing_ctrlr *ctrlr; 348 struct komeda_timing_ctrlr *ctrlr;
349 /** @funcs: chip pipeline functions */ 349 /** @funcs: chip private pipeline functions */
350 struct komeda_pipeline_funcs *funcs; /* private pipeline functions */ 350 const struct komeda_pipeline_funcs *funcs;
351 351
352 /** @of_node: pipeline dt node */ 352 /** @of_node: pipeline dt node */
353 struct device_node *of_node; 353 struct device_node *of_node;
@@ -397,7 +397,7 @@ struct komeda_pipeline_state {
397/* pipeline APIs */ 397/* pipeline APIs */
398struct komeda_pipeline * 398struct komeda_pipeline *
399komeda_pipeline_add(struct komeda_dev *mdev, size_t size, 399komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
400 struct komeda_pipeline_funcs *funcs); 400 const struct komeda_pipeline_funcs *funcs);
401void komeda_pipeline_destroy(struct komeda_dev *mdev, 401void komeda_pipeline_destroy(struct komeda_dev *mdev,
402 struct komeda_pipeline *pipe); 402 struct komeda_pipeline *pipe);
403int komeda_assemble_pipelines(struct komeda_dev *mdev); 403int komeda_assemble_pipelines(struct komeda_dev *mdev);
@@ -411,7 +411,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
411struct komeda_component * 411struct komeda_component *
412komeda_component_add(struct komeda_pipeline *pipe, 412komeda_component_add(struct komeda_pipeline *pipe,
413 size_t comp_sz, u32 id, u32 hw_id, 413 size_t comp_sz, u32 id, u32 hw_id,
414 struct komeda_component_funcs *funcs, 414 const struct komeda_component_funcs *funcs,
415 u8 max_active_inputs, u32 supported_inputs, 415 u8 max_active_inputs, u32 supported_inputs,
416 u8 max_active_outputs, u32 __iomem *reg, 416 u8 max_active_outputs, u32 __iomem *reg,
417 const char *name_fmt, ...); 417 const char *name_fmt, ...);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 07ed0cc1bc44..c97062bdd69b 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -55,7 +55,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
55 struct komeda_plane_state *kplane_st = to_kplane_st(state); 55 struct komeda_plane_state *kplane_st = to_kplane_st(state);
56 struct komeda_layer *layer = kplane->layer; 56 struct komeda_layer *layer = kplane->layer;
57 struct drm_crtc_state *crtc_st; 57 struct drm_crtc_state *crtc_st;
58 struct komeda_crtc *kcrtc;
59 struct komeda_crtc_state *kcrtc_st; 58 struct komeda_crtc_state *kcrtc_st;
60 struct komeda_data_flow_cfg dflow; 59 struct komeda_data_flow_cfg dflow;
61 int err; 60 int err;
@@ -64,7 +63,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
64 return 0; 63 return 0;
65 64
66 crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc); 65 crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
67 if (!crtc_st->enable) { 66 if (IS_ERR(crtc_st) || !crtc_st->enable) {
68 DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n"); 67 DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
69 return -EINVAL; 68 return -EINVAL;
70 } 69 }
@@ -73,7 +72,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
73 if (!crtc_st->active) 72 if (!crtc_st->active)
74 return 0; 73 return 0;
75 74
76 kcrtc = to_kcrtc(state->crtc);
77 kcrtc_st = to_kcrtc_st(crtc_st); 75 kcrtc_st = to_kcrtc_st(crtc_st);
78 76
79 err = komeda_plane_init_data_flow(state, &dflow); 77 err = komeda_plane_init_data_flow(state, &dflow);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 0b2b62f8fa3c..a3efa28436ea 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
186 clk_disable_unprepare(hdlcd->clk); 186 clk_disable_unprepare(hdlcd->clk);
187} 187}
188 188
189static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, 189static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
190 struct drm_crtc_state *state) 190 const struct drm_display_mode *mode)
191{ 191{
192 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 192 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
193 struct drm_display_mode *mode = &state->adjusted_mode;
194 long rate, clk_rate = mode->clock * 1000; 193 long rate, clk_rate = mode->clock * 1000;
195 194
196 rate = clk_round_rate(hdlcd->clk, clk_rate); 195 rate = clk_round_rate(hdlcd->clk, clk_rate);
197 if (rate != clk_rate) { 196 /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
197 if (abs(rate - clk_rate) * 1000 > clk_rate) {
198 /* clock required by mode not supported by hardware */ 198 /* clock required by mode not supported by hardware */
199 return -EINVAL; 199 return MODE_NOCLOCK;
200 } 200 }
201 201
202 return 0; 202 return MODE_OK;
203} 203}
204 204
205static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, 205static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
220} 220}
221 221
222static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { 222static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
223 .atomic_check = hdlcd_crtc_atomic_check, 223 .mode_valid = hdlcd_crtc_mode_valid,
224 .atomic_begin = hdlcd_crtc_atomic_begin, 224 .atomic_begin = hdlcd_crtc_atomic_begin,
225 .atomic_enable = hdlcd_crtc_atomic_enable, 225 .atomic_enable = hdlcd_crtc_atomic_enable,
226 .atomic_disable = hdlcd_crtc_atomic_disable, 226 .atomic_disable = hdlcd_crtc_atomic_disable,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 3ecf8ddc5130..af1992f06a1d 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -188,6 +188,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
188{ 188{
189 struct drm_device *drm = state->dev; 189 struct drm_device *drm = state->dev;
190 struct malidp_drm *malidp = drm->dev_private; 190 struct malidp_drm *malidp = drm->dev_private;
191 int loop = 5;
191 192
192 malidp->event = malidp->crtc.state->event; 193 malidp->event = malidp->crtc.state->event;
193 malidp->crtc.state->event = NULL; 194 malidp->crtc.state->event = NULL;
@@ -202,8 +203,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
202 drm_crtc_vblank_get(&malidp->crtc); 203 drm_crtc_vblank_get(&malidp->crtc);
203 204
204 /* only set config_valid if the CRTC is enabled */ 205 /* only set config_valid if the CRTC is enabled */
205 if (malidp_set_and_wait_config_valid(drm) < 0) 206 if (malidp_set_and_wait_config_valid(drm) < 0) {
207 /*
208 * make a loop around the second CVAL setting and
209 * try 5 times before giving up.
210 */
211 while (loop--) {
212 if (!malidp_set_and_wait_config_valid(drm))
213 break;
214 }
206 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); 215 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
216 }
217
207 } else if (malidp->event) { 218 } else if (malidp->event) {
208 /* CRTC inactive means vblank IRQ is disabled, send event directly */ 219 /* CRTC inactive means vblank IRQ is disabled, send event directly */
209 spin_lock_irq(&drm->event_lock); 220 spin_lock_irq(&drm->event_lock);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2e0cb4246cbd..22a5c617f670 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1607,15 +1607,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
1607 old_plane_state->crtc != new_plane_state->crtc) 1607 old_plane_state->crtc != new_plane_state->crtc)
1608 return -EINVAL; 1608 return -EINVAL;
1609 1609
1610 /*
1611 * FIXME: Since prepare_fb and cleanup_fb are always called on
1612 * the new_plane_state for async updates we need to block framebuffer
1613 * changes. This prevents use of a fb that's been cleaned up and
1614 * double cleanups from occuring.
1615 */
1616 if (old_plane_state->fb != new_plane_state->fb)
1617 return -EINVAL;
1618
1619 funcs = plane->helper_private; 1610 funcs = plane->helper_private;
1620 if (!funcs->atomic_async_update) 1611 if (!funcs->atomic_async_update)
1621 return -EINVAL; 1612 return -EINVAL;
@@ -1646,6 +1637,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
1646 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap 1637 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1647 * the states like normal sync commits, but just do in-place changes on the 1638 * the states like normal sync commits, but just do in-place changes on the
1648 * current state. 1639 * current state.
1640 *
1641 * TODO: Implement full swap instead of doing in-place changes.
1649 */ 1642 */
1650void drm_atomic_helper_async_commit(struct drm_device *dev, 1643void drm_atomic_helper_async_commit(struct drm_device *dev,
1651 struct drm_atomic_state *state) 1644 struct drm_atomic_state *state)
@@ -1656,6 +1649,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
1656 int i; 1649 int i;
1657 1650
1658 for_each_new_plane_in_state(state, plane, plane_state, i) { 1651 for_each_new_plane_in_state(state, plane, plane_state, i) {
1652 struct drm_framebuffer *new_fb = plane_state->fb;
1653 struct drm_framebuffer *old_fb = plane->state->fb;
1654
1659 funcs = plane->helper_private; 1655 funcs = plane->helper_private;
1660 funcs->atomic_async_update(plane, plane_state); 1656 funcs->atomic_async_update(plane, plane_state);
1661 1657
@@ -1664,11 +1660,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
1664 * plane->state in-place, make sure at least common 1660 * plane->state in-place, make sure at least common
1665 * properties have been properly updated. 1661 * properties have been properly updated.
1666 */ 1662 */
1667 WARN_ON_ONCE(plane->state->fb != plane_state->fb); 1663 WARN_ON_ONCE(plane->state->fb != new_fb);
1668 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x); 1664 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1669 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y); 1665 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1670 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x); 1666 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1671 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y); 1667 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1668
1669 /*
1670 * Make sure the FBs have been swapped so that cleanups in the
1671 * new_state performs a cleanup in the old FB.
1672 */
1673 WARN_ON_ONCE(plane_state->fb != old_fb);
1672 } 1674 }
1673} 1675}
1674EXPORT_SYMBOL(drm_atomic_helper_async_commit); 1676EXPORT_SYMBOL(drm_atomic_helper_async_commit);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 5cb59c0b4bbe..de5347725564 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
2530 0, 12, NULL}, 2530 0, 12, NULL},
2531 2531
2532 {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS, 2532 {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2533 0, 20, NULL}, 2533 0, 12, NULL},
2534}; 2534};
2535 2535
2536static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) 2536static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 244ad1729764..53115bdae12b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
53 */ 53 */
54bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) 54bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
55{ 55{
56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 56 if (size == 0)
57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 57 return vgpu_gmadr_is_valid(vgpu, addr);
58 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", 58
59 addr, size); 59 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
60 return false; 60 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
61 } 61 return true;
62 return true; 62 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
63 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
64 return true;
65
66 gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
67 addr, size);
68 return false;
63} 69}
64 70
65/* translate a guest gmadr to host gmadr */ 71/* translate a guest gmadr to host gmadr */
@@ -942,7 +948,16 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
942 948
943 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY 949 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
944 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 950 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
945 cur_pt_type = get_next_pt_type(e->type) + 1; 951 cur_pt_type = get_next_pt_type(e->type);
952
953 if (!gtt_type_is_pt(cur_pt_type) ||
954 !gtt_type_is_pt(cur_pt_type + 1)) {
955 WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
956 return -EINVAL;
957 }
958
959 cur_pt_type += 1;
960
946 if (ops->get_pfn(e) == 961 if (ops->get_pfn(e) ==
947 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) 962 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
948 return 0; 963 return 0;
@@ -1102,6 +1117,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1102 1117
1103err_free_spt: 1118err_free_spt:
1104 ppgtt_free_spt(spt); 1119 ppgtt_free_spt(spt);
1120 spt = NULL;
1105err: 1121err:
1106 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1122 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1107 spt, we->val64, we->type); 1123 spt, we->val64, we->type);
@@ -2183,7 +2199,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2183 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 2199 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2184 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 2200 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2185 unsigned long gma, gfn; 2201 unsigned long gma, gfn;
2186 struct intel_gvt_gtt_entry e, m; 2202 struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2203 struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2187 dma_addr_t dma_addr; 2204 dma_addr_t dma_addr;
2188 int ret; 2205 int ret;
2189 struct intel_gvt_partial_pte *partial_pte, *pos, *n; 2206 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
@@ -2250,7 +2267,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2250 2267
2251 if (!partial_update && (ops->test_present(&e))) { 2268 if (!partial_update && (ops->test_present(&e))) {
2252 gfn = ops->get_pfn(&e); 2269 gfn = ops->get_pfn(&e);
2253 m = e; 2270 m.val64 = e.val64;
2271 m.type = e.type;
2254 2272
2255 /* one PTE update may be issued in multiple writes and the 2273 /* one PTE update may be issued in multiple writes and the
2256 * first write may not construct a valid gfn 2274 * first write may not construct a valid gfn
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index e09bd6e0cc4d..a6ade66349bd 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
464 _MMIO(0x2690), 464 _MMIO(0x2690),
465 _MMIO(0x2694), 465 _MMIO(0x2694),
466 _MMIO(0x2698), 466 _MMIO(0x2698),
467 _MMIO(0x2754),
468 _MMIO(0x28a0),
467 _MMIO(0x4de0), 469 _MMIO(0x4de0),
468 _MMIO(0x4de4), 470 _MMIO(0x4de4),
469 _MMIO(0x4dfc), 471 _MMIO(0x4dfc),
@@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1690 bool enable_execlist; 1692 bool enable_execlist;
1691 int ret; 1693 int ret;
1692 1694
1695 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
1696 if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
1697 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
1693 write_vreg(vgpu, offset, p_data, bytes); 1698 write_vreg(vgpu, offset, p_data, bytes);
1694 1699
1700 if (data & _MASKED_BIT_ENABLE(1)) {
1701 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
1702 return 0;
1703 }
1704
1705 if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
1706 data & _MASKED_BIT_ENABLE(2)) {
1707 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
1708 return 0;
1709 }
1710
1695 /* when PPGTT mode enabled, we will check if guest has called 1711 /* when PPGTT mode enabled, we will check if guest has called
1696 * pvinfo, if not, we will treat this guest as non-gvtg-aware 1712 * pvinfo, if not, we will treat this guest as non-gvtg-aware
1697 * guest, and stop emulating its cfg space, mmio, gtt, etc. 1713 * guest, and stop emulating its cfg space, mmio, gtt, etc.
@@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1773 return 0; 1789 return 0;
1774} 1790}
1775 1791
1792static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
1793 unsigned int offset, void *p_data,
1794 unsigned int bytes)
1795{
1796 u32 data = *(u32 *)p_data;
1797
1798 (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
1799 write_vreg(vgpu, offset, p_data, bytes);
1800
1801 if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
1802 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
1803
1804 return 0;
1805}
1806
1776#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \ 1807#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
1777 ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \ 1808 ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
1778 f, s, am, rm, d, r, w); \ 1809 f, s, am, rm, d, r, w); \
@@ -1893,7 +1924,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1893 MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1924 MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1894 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1925 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1895 MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1926 MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1896 MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1927 MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
1928 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1897 MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1929 MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1898 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); 1930 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
1899 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1931 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2997,7 +3029,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2997 MMIO_D(CSR_HTP_SKL, D_SKL_PLUS); 3029 MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
2998 MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS); 3030 MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
2999 3031
3000 MMIO_D(BDW_SCRATCH1, D_SKL_PLUS); 3032 MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3001 3033
3002 MMIO_D(SKL_DFSM, D_SKL_PLUS); 3034 MMIO_D(SKL_DFSM, D_SKL_PLUS);
3003 MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS); 3035 MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
@@ -3010,8 +3042,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
3010 MMIO_D(RPM_CONFIG0, D_SKL_PLUS); 3042 MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
3011 MMIO_D(_MMIO(0xd08), D_SKL_PLUS); 3043 MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
3012 MMIO_D(RC6_LOCATION, D_SKL_PLUS); 3044 MMIO_D(RC6_LOCATION, D_SKL_PLUS);
3013 MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK, 3045 MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
3014 NULL, NULL); 3046 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3015 MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, 3047 MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3016 NULL, NULL); 3048 NULL, NULL);
3017 3049
@@ -3030,7 +3062,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
3030 MMIO_D(_MMIO(0x46520), D_SKL_PLUS); 3062 MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
3031 3063
3032 MMIO_D(_MMIO(0xc403c), D_SKL_PLUS); 3064 MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
3033 MMIO_D(_MMIO(0xb004), D_SKL_PLUS); 3065 MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3034 MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); 3066 MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
3035 3067
3036 MMIO_D(_MMIO(0x65900), D_SKL_PLUS); 3068 MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
@@ -3059,7 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
3059 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); 3091 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
3060 3092
3061 MMIO_D(_MMIO(0x44500), D_SKL_PLUS); 3093 MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
3062 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 3094#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
3095 MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3096 NULL, csfe_chicken1_mmio_write);
3097#undef CSFE_CHICKEN1_REG
3063 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, 3098 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3064 NULL, NULL); 3099 NULL, NULL);
3065 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, 3100 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3239,7 +3274,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
3239 MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT); 3274 MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
3240 MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); 3275 MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
3241 MMIO_D(GEN6_GFXPAUSE, D_BXT); 3276 MMIO_D(GEN6_GFXPAUSE, D_BXT);
3242 MMIO_D(GEN8_L3SQCREG1, D_BXT); 3277 MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
3243 3278
3244 MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); 3279 MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
3245 3280
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 33aaa14bfdde..5b66e14c5b7b 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -102,6 +102,8 @@
102#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88 102#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
103#define FORCEWAKE_ACK_HSW_REG 0x130044 103#define FORCEWAKE_ACK_HSW_REG 0x130044
104 104
105#define RB_HEAD_WRAP_CNT_MAX ((1 << 11) - 1)
106#define RB_HEAD_WRAP_CNT_OFF 21
105#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2)) 107#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
106#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3)) 108#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
107#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12)) 109#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 13632dba8b2a..0f919f0a43d4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -812,10 +812,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
812 void *src; 812 void *src;
813 unsigned long context_gpa, context_page_num; 813 unsigned long context_gpa, context_page_num;
814 int i; 814 int i;
815 struct drm_i915_private *dev_priv = gvt->dev_priv;
816 u32 ring_base;
817 u32 head, tail;
818 u16 wrap_count;
815 819
816 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, 820 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
817 workload->ctx_desc.lrca); 821 workload->ctx_desc.lrca);
818 822
823 head = workload->rb_head;
824 tail = workload->rb_tail;
825 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
826
827 if (tail < head) {
828 if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
829 wrap_count = 0;
830 else
831 wrap_count += 1;
832 }
833
834 head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
835
836 ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
837 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
838 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
839
819 context_page_num = rq->engine->context_size; 840 context_page_num = rq->engine->context_size;
820 context_page_num = context_page_num >> PAGE_SHIFT; 841 context_page_num = context_page_num >> PAGE_SHIFT;
821 842
@@ -1415,6 +1436,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1415 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1436 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1416 u64 ring_context_gpa; 1437 u64 ring_context_gpa;
1417 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; 1438 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1439 u32 guest_head;
1418 int ret; 1440 int ret;
1419 1441
1420 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 1442 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
@@ -1430,6 +1452,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1430 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1452 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1431 RING_CTX_OFF(ring_tail.val), &tail, 4); 1453 RING_CTX_OFF(ring_tail.val), &tail, 4);
1432 1454
1455 guest_head = head;
1456
1433 head &= RB_HEAD_OFF_MASK; 1457 head &= RB_HEAD_OFF_MASK;
1434 tail &= RB_TAIL_OFF_MASK; 1458 tail &= RB_TAIL_OFF_MASK;
1435 1459
@@ -1462,6 +1486,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1462 workload->ctx_desc = *desc; 1486 workload->ctx_desc = *desc;
1463 workload->ring_context_gpa = ring_context_gpa; 1487 workload->ring_context_gpa = ring_context_gpa;
1464 workload->rb_head = head; 1488 workload->rb_head = head;
1489 workload->guest_rb_head = guest_head;
1465 workload->rb_tail = tail; 1490 workload->rb_tail = tail;
1466 workload->rb_start = start; 1491 workload->rb_start = start;
1467 workload->rb_ctl = ctl; 1492 workload->rb_ctl = ctl;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 90c6756f5453..c50d14a9ce85 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -100,6 +100,7 @@ struct intel_vgpu_workload {
100 struct execlist_ctx_descriptor_format ctx_desc; 100 struct execlist_ctx_descriptor_format ctx_desc;
101 struct execlist_ring_context *ring_context; 101 struct execlist_ring_context *ring_context;
102 unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len; 102 unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
103 unsigned long guest_rb_head;
103 bool restore_inhibit; 104 bool restore_inhibit;
104 struct intel_vgpu_elsp_dwords elsp_dwords; 105 struct intel_vgpu_elsp_dwords elsp_dwords;
105 bool emulate_schedule_in; 106 bool emulate_schedule_in;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 249d35c12a75..2aa69d347ec4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7620,6 +7620,9 @@ enum {
7620 #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8) 7620 #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
7621 #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0) 7621 #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
7622 7622
7623#define GEN8_L3CNTLREG _MMIO(0x7034)
7624 #define GEN8_ERRDETBCTRL (1 << 9)
7625
7623#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) 7626#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
7624 #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11) 7627 #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
7625 7628
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 6decd432f4d3..841b8e515f4d 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -518,6 +518,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
518 struct drm_i915_private *i915 = engine->i915; 518 struct drm_i915_private *i915 = engine->i915;
519 struct i915_wa_list *wal = &engine->ctx_wa_list; 519 struct i915_wa_list *wal = &engine->ctx_wa_list;
520 520
521 /* WaDisableBankHangMode:icl */
522 wa_write(wal,
523 GEN8_L3CNTLREG,
524 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
525 GEN8_ERRDETBCTRL);
526
521 /* Wa_1604370585:icl (pre-prod) 527 /* Wa_1604370585:icl (pre-prod)
522 * Formerly known as WaPushConstantDereferenceHoldDisable 528 * Formerly known as WaPushConstantDereferenceHoldDisable
523 */ 529 */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index be13140967b4..b854f471e9e5 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -502,6 +502,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
502static void mdp5_plane_atomic_async_update(struct drm_plane *plane, 502static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
503 struct drm_plane_state *new_state) 503 struct drm_plane_state *new_state)
504{ 504{
505 struct drm_framebuffer *old_fb = plane->state->fb;
506
505 plane->state->src_x = new_state->src_x; 507 plane->state->src_x = new_state->src_x;
506 plane->state->src_y = new_state->src_y; 508 plane->state->src_y = new_state->src_y;
507 plane->state->crtc_x = new_state->crtc_x; 509 plane->state->crtc_x = new_state->crtc_x;
@@ -524,6 +526,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
524 526
525 *to_mdp5_plane_state(plane->state) = 527 *to_mdp5_plane_state(plane->state) =
526 *to_mdp5_plane_state(new_state); 528 *to_mdp5_plane_state(new_state);
529
530 new_state->fb = old_fb;
527} 531}
528 532
529static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { 533static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index ff0fa38aee72..54da9c6bc8d5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,12 +1,12 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NVKM_FIRMWARE_H__ 2#ifndef __NVKM_FIRMWARE_H__
3#define __NVKM_FIRMWARE_H__ 3#define __NVKM_FIRMWARE_H__
4 4#include <core/subdev.h>
5#include <core/device.h> 5
6 6int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
7int nvkm_firmware_get(struct nvkm_device *device, const char *fwname, 7 int min_version, int max_version,
8 const struct firmware **fw); 8 const struct firmware **);
9 9int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
10void nvkm_firmware_put(const struct firmware *fw); 10 const struct firmware **);
11 11void nvkm_firmware_put(const struct firmware *);
12#endif 12#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 058ff46b5f16..092acdec2c39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -24,7 +24,7 @@
24 24
25/** 25/**
26 * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory 26 * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
27 * @device device that will use that firmware 27 * @subdev subdevice that will use that firmware
28 * @fwname name of firmware file to load 28 * @fwname name of firmware file to load
29 * @fw firmware structure to load to 29 * @fw firmware structure to load to
30 * 30 *
@@ -32,9 +32,11 @@
32 * Firmware files released by NVIDIA will always follow this format. 32 * Firmware files released by NVIDIA will always follow this format.
33 */ 33 */
34int 34int
35nvkm_firmware_get(struct nvkm_device *device, const char *fwname, 35nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
36 const struct firmware **fw) 36 int min_version, int max_version,
37 const struct firmware **fw)
37{ 38{
39 struct nvkm_device *device = subdev->device;
38 char f[64]; 40 char f[64];
39 char cname[16]; 41 char cname[16];
40 int i; 42 int i;
@@ -48,8 +50,29 @@ nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
48 cname[i] = tolower(cname[i]); 50 cname[i] = tolower(cname[i]);
49 } 51 }
50 52
51 snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); 53 for (i = max_version; i >= min_version; i--) {
52 return request_firmware(fw, f, device->dev); 54 if (i != 0)
55 snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
56 else
57 snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
58
59 if (!firmware_request_nowarn(fw, f, device->dev)) {
60 nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
61 return i;
62 }
63
64 nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
65 }
66
67 nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
68 return -ENOENT;
69}
70
71int
72nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
73 const struct firmware **fw)
74{
75 return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
53} 76}
54 77
55/** 78/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 81a13cf9a292..c578deb5867a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2115,12 +2115,10 @@ int
2115gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname, 2115gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
2116 struct gf100_gr_fuc *fuc) 2116 struct gf100_gr_fuc *fuc)
2117{ 2117{
2118 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
2119 struct nvkm_device *device = subdev->device;
2120 const struct firmware *fw; 2118 const struct firmware *fw;
2121 int ret; 2119 int ret;
2122 2120
2123 ret = nvkm_firmware_get(device, fwname, &fw); 2121 ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
2124 if (ret) { 2122 if (ret) {
2125 ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret); 2123 ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
2126 if (ret) 2124 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
index 75dc06557877..dc80985cf093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
@@ -36,7 +36,7 @@ nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
36 void *blob; 36 void *blob;
37 int ret; 37 int ret;
38 38
39 ret = nvkm_firmware_get(subdev->device, name, &fw); 39 ret = nvkm_firmware_get(subdev, name, &fw);
40 if (ret) 40 if (ret)
41 return ERR_PTR(ret); 41 return ERR_PTR(ret);
42 if (fw->size < min_size) { 42 if (fw->size < min_size) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 1df09ed6fe6d..4fd4cfe459b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -229,6 +229,8 @@ struct acr_r352_lsf_wpr_header {
229struct ls_ucode_img_r352 { 229struct ls_ucode_img_r352 {
230 struct ls_ucode_img base; 230 struct ls_ucode_img base;
231 231
232 const struct acr_r352_lsf_func *func;
233
232 struct acr_r352_lsf_wpr_header wpr_header; 234 struct acr_r352_lsf_wpr_header wpr_header;
233 struct acr_r352_lsf_lsb_header lsb_header; 235 struct acr_r352_lsf_lsb_header lsb_header;
234}; 236};
@@ -243,6 +245,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
243 enum nvkm_secboot_falcon falcon_id) 245 enum nvkm_secboot_falcon falcon_id)
244{ 246{
245 const struct nvkm_subdev *subdev = acr->base.subdev; 247 const struct nvkm_subdev *subdev = acr->base.subdev;
248 const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
246 struct ls_ucode_img_r352 *img; 249 struct ls_ucode_img_r352 *img;
247 int ret; 250 int ret;
248 251
@@ -252,15 +255,16 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
252 255
253 img->base.falcon_id = falcon_id; 256 img->base.falcon_id = falcon_id;
254 257
255 ret = acr->func->ls_func[falcon_id]->load(sb, &img->base); 258 ret = func->load(sb, func->version_max, &img->base);
256 259 if (ret < 0) {
257 if (ret) {
258 kfree(img->base.ucode_data); 260 kfree(img->base.ucode_data);
259 kfree(img->base.sig); 261 kfree(img->base.sig);
260 kfree(img); 262 kfree(img);
261 return ERR_PTR(ret); 263 return ERR_PTR(ret);
262 } 264 }
263 265
266 img->func = func->version[ret];
267
264 /* Check that the signature size matches our expectations... */ 268 /* Check that the signature size matches our expectations... */
265 if (img->base.sig_size != sizeof(img->lsb_header.signature)) { 269 if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
266 nvkm_error(subdev, "invalid signature size for %s falcon!\n", 270 nvkm_error(subdev, "invalid signature size for %s falcon!\n",
@@ -302,8 +306,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
302 struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header; 306 struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
303 struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header; 307 struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
304 struct ls_ucode_img_desc *desc = &_img->ucode_desc; 308 struct ls_ucode_img_desc *desc = &_img->ucode_desc;
305 const struct acr_r352_ls_func *func = 309 const struct acr_r352_lsf_func *func = img->func;
306 acr->func->ls_func[_img->falcon_id];
307 310
308 /* Fill WPR header */ 311 /* Fill WPR header */
309 whdr->falcon_id = _img->falcon_id; 312 whdr->falcon_id = _img->falcon_id;
@@ -419,8 +422,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
419 422
420 /* Figure out how large we need gdesc to be. */ 423 /* Figure out how large we need gdesc to be. */
421 list_for_each_entry(_img, imgs, node) { 424 list_for_each_entry(_img, imgs, node) {
422 const struct acr_r352_ls_func *ls_func = 425 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
423 acr->func->ls_func[_img->falcon_id]; 426 const struct acr_r352_lsf_func *ls_func = img->func;
424 427
425 max_desc_size = max(max_desc_size, ls_func->bl_desc_size); 428 max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
426 } 429 }
@@ -433,8 +436,7 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
433 436
434 list_for_each_entry(_img, imgs, node) { 437 list_for_each_entry(_img, imgs, node) {
435 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); 438 struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
436 const struct acr_r352_ls_func *ls_func = 439 const struct acr_r352_lsf_func *ls_func = img->func;
437 acr->func->ls_func[_img->falcon_id];
438 440
439 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, 441 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
440 sizeof(img->wpr_header)); 442 sizeof(img->wpr_header));
@@ -1063,20 +1065,36 @@ acr_r352_dtor(struct nvkm_acr *_acr)
1063 kfree(acr); 1065 kfree(acr);
1064} 1066}
1065 1067
1068static const struct acr_r352_lsf_func
1069acr_r352_ls_fecs_func_0 = {
1070 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1071 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1072};
1073
1066const struct acr_r352_ls_func 1074const struct acr_r352_ls_func
1067acr_r352_ls_fecs_func = { 1075acr_r352_ls_fecs_func = {
1068 .load = acr_ls_ucode_load_fecs, 1076 .load = acr_ls_ucode_load_fecs,
1077 .version_max = 0,
1078 .version = {
1079 &acr_r352_ls_fecs_func_0,
1080 }
1081};
1082
1083static const struct acr_r352_lsf_func
1084acr_r352_ls_gpccs_func_0 = {
1069 .generate_bl_desc = acr_r352_generate_flcn_bl_desc, 1085 .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
1070 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), 1086 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
1087 /* GPCCS will be loaded using PRI */
1088 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
1071}; 1089};
1072 1090
1073const struct acr_r352_ls_func 1091const struct acr_r352_ls_func
1074acr_r352_ls_gpccs_func = { 1092acr_r352_ls_gpccs_func = {
1075 .load = acr_ls_ucode_load_gpccs, 1093 .load = acr_ls_ucode_load_gpccs,
1076 .generate_bl_desc = acr_r352_generate_flcn_bl_desc, 1094 .version_max = 0,
1077 .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), 1095 .version = {
1078 /* GPCCS will be loaded using PRI */ 1096 &acr_r352_ls_gpccs_func_0,
1079 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, 1097 }
1080}; 1098};
1081 1099
1082 1100
@@ -1150,12 +1168,20 @@ acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
1150 desc->argv = addr_args; 1168 desc->argv = addr_args;
1151} 1169}
1152 1170
1171static const struct acr_r352_lsf_func
1172acr_r352_ls_pmu_func_0 = {
1173 .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
1174 .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
1175};
1176
1153static const struct acr_r352_ls_func 1177static const struct acr_r352_ls_func
1154acr_r352_ls_pmu_func = { 1178acr_r352_ls_pmu_func = {
1155 .load = acr_ls_ucode_load_pmu, 1179 .load = acr_ls_ucode_load_pmu,
1156 .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
1157 .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
1158 .post_run = acr_ls_pmu_post_run, 1180 .post_run = acr_ls_pmu_post_run,
1181 .version_max = 0,
1182 .version = {
1183 &acr_r352_ls_pmu_func_0,
1184 }
1159}; 1185};
1160 1186
1161const struct acr_r352_func 1187const struct acr_r352_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
index 3d58ab871563..e516cab849dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -47,24 +47,34 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
47} 47}
48 48
49/** 49/**
50 * struct acr_r352_ls_func - manages a single LS firmware 50 * struct acr_r352_lsf_func - manages a specific LS firmware version
51 * 51 *
52 * @load: load the external firmware into a ls_ucode_img
53 * @generate_bl_desc: function called on a block of bl_desc_size to generate the 52 * @generate_bl_desc: function called on a block of bl_desc_size to generate the
54 * proper bootloader descriptor for this LS firmware 53 * proper bootloader descriptor for this LS firmware
55 * @bl_desc_size: size of the bootloader descriptor 54 * @bl_desc_size: size of the bootloader descriptor
56 * @post_run: hook called right after the ACR is executed
57 * @lhdr_flags: LS flags 55 * @lhdr_flags: LS flags
58 */ 56 */
59struct acr_r352_ls_func { 57struct acr_r352_lsf_func {
60 int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *);
61 void (*generate_bl_desc)(const struct nvkm_acr *, 58 void (*generate_bl_desc)(const struct nvkm_acr *,
62 const struct ls_ucode_img *, u64, void *); 59 const struct ls_ucode_img *, u64, void *);
63 u32 bl_desc_size; 60 u32 bl_desc_size;
64 int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
65 u32 lhdr_flags; 61 u32 lhdr_flags;
66}; 62};
67 63
64/**
65 * struct acr_r352_ls_func - manages a single LS falcon
66 *
67 * @load: load the external firmware into a ls_ucode_img
68 * @post_run: hook called right after the ACR is executed
69 */
70struct acr_r352_ls_func {
71 int (*load)(const struct nvkm_secboot *, int maxver,
72 struct ls_ucode_img *);
73 int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
74 int version_max;
75 const struct acr_r352_lsf_func *version[];
76};
77
68struct acr_r352; 78struct acr_r352;
69 79
70/** 80/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
index 14b36ef93628..f6b2d20d7fc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
@@ -66,20 +66,36 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
66 bl_desc->data_size = hdr->data_size; 66 bl_desc->data_size = hdr->data_size;
67} 67}
68 68
69static const struct acr_r352_lsf_func
70acr_r361_ls_fecs_func_0 = {
71 .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
72 .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
73};
74
69const struct acr_r352_ls_func 75const struct acr_r352_ls_func
70acr_r361_ls_fecs_func = { 76acr_r361_ls_fecs_func = {
71 .load = acr_ls_ucode_load_fecs, 77 .load = acr_ls_ucode_load_fecs,
78 .version_max = 0,
79 .version = {
80 &acr_r361_ls_fecs_func_0,
81 }
82};
83
84static const struct acr_r352_lsf_func
85acr_r361_ls_gpccs_func_0 = {
72 .generate_bl_desc = acr_r361_generate_flcn_bl_desc, 86 .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
73 .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), 87 .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
88 /* GPCCS will be loaded using PRI */
89 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
74}; 90};
75 91
76const struct acr_r352_ls_func 92const struct acr_r352_ls_func
77acr_r361_ls_gpccs_func = { 93acr_r361_ls_gpccs_func = {
78 .load = acr_ls_ucode_load_gpccs, 94 .load = acr_ls_ucode_load_gpccs,
79 .generate_bl_desc = acr_r361_generate_flcn_bl_desc, 95 .version_max = 0,
80 .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), 96 .version = {
81 /* GPCCS will be loaded using PRI */ 97 &acr_r361_ls_gpccs_func_0,
82 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, 98 }
83}; 99};
84 100
85struct acr_r361_pmu_bl_desc { 101struct acr_r361_pmu_bl_desc {
@@ -125,12 +141,20 @@ acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
125 desc->argv = addr_args; 141 desc->argv = addr_args;
126} 142}
127 143
144static const struct acr_r352_lsf_func
145acr_r361_ls_pmu_func_0 = {
146 .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
147 .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
148};
149
128const struct acr_r352_ls_func 150const struct acr_r352_ls_func
129acr_r361_ls_pmu_func = { 151acr_r361_ls_pmu_func = {
130 .load = acr_ls_ucode_load_pmu, 152 .load = acr_ls_ucode_load_pmu,
131 .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
132 .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
133 .post_run = acr_ls_pmu_post_run, 153 .post_run = acr_ls_pmu_post_run,
154 .version_max = 0,
155 .version = {
156 &acr_r361_ls_pmu_func_0,
157 }
134}; 158};
135 159
136static void 160static void
@@ -164,12 +188,20 @@ acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
164 desc->argv = 0x01000000; 188 desc->argv = 0x01000000;
165} 189}
166 190
167const struct acr_r352_ls_func 191const struct acr_r352_lsf_func
168acr_r361_ls_sec2_func = { 192acr_r361_ls_sec2_func_0 = {
169 .load = acr_ls_ucode_load_sec2,
170 .generate_bl_desc = acr_r361_generate_sec2_bl_desc, 193 .generate_bl_desc = acr_r361_generate_sec2_bl_desc,
171 .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), 194 .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
195};
196
197static const struct acr_r352_ls_func
198acr_r361_ls_sec2_func = {
199 .load = acr_ls_ucode_load_sec2,
172 .post_run = acr_ls_sec2_post_run, 200 .post_run = acr_ls_sec2_post_run,
201 .version_max = 0,
202 .version = {
203 &acr_r361_ls_sec2_func_0,
204 }
173}; 205};
174 206
175 207
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
index f9f978daadb9..38dec93779c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
@@ -67,6 +67,5 @@ void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
67extern const struct acr_r352_ls_func acr_r361_ls_fecs_func; 67extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
68extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func; 68extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
69extern const struct acr_r352_ls_func acr_r361_ls_pmu_func; 69extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
70extern const struct acr_r352_ls_func acr_r361_ls_sec2_func; 70extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
71
72#endif 71#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index 978ad0790367..472ced29da7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -22,6 +22,7 @@
22 22
23#include "acr_r367.h" 23#include "acr_r367.h"
24#include "acr_r361.h" 24#include "acr_r361.h"
25#include "acr_r370.h"
25 26
26#include <core/gpuobj.h> 27#include <core/gpuobj.h>
27 28
@@ -100,6 +101,8 @@ struct acr_r367_lsf_wpr_header {
100struct ls_ucode_img_r367 { 101struct ls_ucode_img_r367 {
101 struct ls_ucode_img base; 102 struct ls_ucode_img base;
102 103
104 const struct acr_r352_lsf_func *func;
105
103 struct acr_r367_lsf_wpr_header wpr_header; 106 struct acr_r367_lsf_wpr_header wpr_header;
104 struct acr_r367_lsf_lsb_header lsb_header; 107 struct acr_r367_lsf_lsb_header lsb_header;
105}; 108};
@@ -111,6 +114,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
111 enum nvkm_secboot_falcon falcon_id) 114 enum nvkm_secboot_falcon falcon_id)
112{ 115{
113 const struct nvkm_subdev *subdev = acr->base.subdev; 116 const struct nvkm_subdev *subdev = acr->base.subdev;
117 const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
114 struct ls_ucode_img_r367 *img; 118 struct ls_ucode_img_r367 *img;
115 int ret; 119 int ret;
116 120
@@ -120,14 +124,16 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
120 124
121 img->base.falcon_id = falcon_id; 125 img->base.falcon_id = falcon_id;
122 126
123 ret = acr->func->ls_func[falcon_id]->load(sb, &img->base); 127 ret = func->load(sb, func->version_max, &img->base);
124 if (ret) { 128 if (ret < 0) {
125 kfree(img->base.ucode_data); 129 kfree(img->base.ucode_data);
126 kfree(img->base.sig); 130 kfree(img->base.sig);
127 kfree(img); 131 kfree(img);
128 return ERR_PTR(ret); 132 return ERR_PTR(ret);
129 } 133 }
130 134
135 img->func = func->version[ret];
136
131 /* Check that the signature size matches our expectations... */ 137 /* Check that the signature size matches our expectations... */
132 if (img->base.sig_size != sizeof(img->lsb_header.signature)) { 138 if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
133 nvkm_error(subdev, "invalid signature size for %s falcon!\n", 139 nvkm_error(subdev, "invalid signature size for %s falcon!\n",
@@ -158,8 +164,7 @@ acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
158 struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header; 164 struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
159 struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header; 165 struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
160 struct ls_ucode_img_desc *desc = &_img->ucode_desc; 166 struct ls_ucode_img_desc *desc = &_img->ucode_desc;
161 const struct acr_r352_ls_func *func = 167 const struct acr_r352_lsf_func *func = img->func;
162 acr->func->ls_func[_img->falcon_id];
163 168
164 /* Fill WPR header */ 169 /* Fill WPR header */
165 whdr->falcon_id = _img->falcon_id; 170 whdr->falcon_id = _img->falcon_id;
@@ -269,8 +274,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
269 u8 *gdesc; 274 u8 *gdesc;
270 275
271 list_for_each_entry(_img, imgs, node) { 276 list_for_each_entry(_img, imgs, node) {
272 const struct acr_r352_ls_func *ls_func = 277 struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
273 acr->func->ls_func[_img->falcon_id]; 278 const struct acr_r352_lsf_func *ls_func = img->func;
274 279
275 max_desc_size = max(max_desc_size, ls_func->bl_desc_size); 280 max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
276 } 281 }
@@ -283,8 +288,7 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
283 288
284 list_for_each_entry(_img, imgs, node) { 289 list_for_each_entry(_img, imgs, node) {
285 struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); 290 struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
286 const struct acr_r352_ls_func *ls_func = 291 const struct acr_r352_lsf_func *ls_func = img->func;
287 acr->func->ls_func[_img->falcon_id];
288 292
289 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, 293 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
290 sizeof(img->wpr_header)); 294 sizeof(img->wpr_header));
@@ -378,6 +382,17 @@ acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
378 } 382 }
379} 383}
380 384
385static const struct acr_r352_ls_func
386acr_r367_ls_sec2_func = {
387 .load = acr_ls_ucode_load_sec2,
388 .post_run = acr_ls_sec2_post_run,
389 .version_max = 1,
390 .version = {
391 &acr_r361_ls_sec2_func_0,
392 &acr_r370_ls_sec2_func_0,
393 }
394};
395
381const struct acr_r352_func 396const struct acr_r352_func
382acr_r367_func = { 397acr_r367_func = {
383 .fixup_hs_desc = acr_r367_fixup_hs_desc, 398 .fixup_hs_desc = acr_r367_fixup_hs_desc,
@@ -391,7 +406,7 @@ acr_r367_func = {
391 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, 406 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
392 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, 407 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
393 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, 408 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
394 [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func, 409 [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
395 }, 410 },
396}; 411};
397 412
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
index 2f890dfae7fc..e821d0fd6217 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
@@ -49,20 +49,36 @@ acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
49 desc->data_size = pdesc->app_resident_data_size; 49 desc->data_size = pdesc->app_resident_data_size;
50} 50}
51 51
52static const struct acr_r352_lsf_func
53acr_r370_ls_fecs_func_0 = {
54 .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
55 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
56};
57
52const struct acr_r352_ls_func 58const struct acr_r352_ls_func
53acr_r370_ls_fecs_func = { 59acr_r370_ls_fecs_func = {
54 .load = acr_ls_ucode_load_fecs, 60 .load = acr_ls_ucode_load_fecs,
61 .version_max = 0,
62 .version = {
63 &acr_r370_ls_fecs_func_0,
64 }
65};
66
67static const struct acr_r352_lsf_func
68acr_r370_ls_gpccs_func_0 = {
55 .generate_bl_desc = acr_r370_generate_flcn_bl_desc, 69 .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
56 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), 70 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
71 /* GPCCS will be loaded using PRI */
72 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
57}; 73};
58 74
59const struct acr_r352_ls_func 75const struct acr_r352_ls_func
60acr_r370_ls_gpccs_func = { 76acr_r370_ls_gpccs_func = {
61 .load = acr_ls_ucode_load_gpccs, 77 .load = acr_ls_ucode_load_gpccs,
62 .generate_bl_desc = acr_r370_generate_flcn_bl_desc, 78 .version_max = 0,
63 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), 79 .version = {
64 /* GPCCS will be loaded using PRI */ 80 &acr_r370_ls_gpccs_func_0,
65 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, 81 }
66}; 82};
67 83
68static void 84static void
@@ -95,12 +111,20 @@ acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
95 desc->argv = 0x01000000; 111 desc->argv = 0x01000000;
96} 112}
97 113
114const struct acr_r352_lsf_func
115acr_r370_ls_sec2_func_0 = {
116 .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
117 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
118};
119
98const struct acr_r352_ls_func 120const struct acr_r352_ls_func
99acr_r370_ls_sec2_func = { 121acr_r370_ls_sec2_func = {
100 .load = acr_ls_ucode_load_sec2, 122 .load = acr_ls_ucode_load_sec2,
101 .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
102 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
103 .post_run = acr_ls_sec2_post_run, 123 .post_run = acr_ls_sec2_post_run,
124 .version_max = 0,
125 .version = {
126 &acr_r370_ls_sec2_func_0,
127 }
104}; 128};
105 129
106void 130void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
index 3426f86a15e4..2efed6f995ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
@@ -46,4 +46,5 @@ struct acr_r370_flcn_bl_desc {
46void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); 46void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
47extern const struct acr_r352_ls_func acr_r370_ls_fecs_func; 47extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
48extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func; 48extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
49extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
49#endif 50#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
index 7bdef93cb7ae..8f0647766038 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -54,12 +54,20 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
54 desc->argv = addr_args; 54 desc->argv = addr_args;
55} 55}
56 56
57static const struct acr_r352_lsf_func
58acr_r375_ls_pmu_func_0 = {
59 .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
60 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
61};
62
57const struct acr_r352_ls_func 63const struct acr_r352_ls_func
58acr_r375_ls_pmu_func = { 64acr_r375_ls_pmu_func = {
59 .load = acr_ls_ucode_load_pmu, 65 .load = acr_ls_ucode_load_pmu,
60 .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
61 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
62 .post_run = acr_ls_pmu_post_run, 66 .post_run = acr_ls_pmu_post_run,
67 .version_max = 0,
68 .version = {
69 &acr_r375_ls_pmu_func_0,
70 }
63}; 71};
64 72
65const struct acr_r352_func 73const struct acr_r352_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
index 9b7c402594e8..d43f906da3a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -147,11 +147,15 @@ struct fw_bl_desc {
147 u32 data_size; 147 u32 data_size;
148}; 148};
149 149
150int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *); 150int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
151int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *); 151 struct ls_ucode_img *);
152int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *); 152int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
153 struct ls_ucode_img *);
154int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
155 struct ls_ucode_img *);
153int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); 156int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
154int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *); 157int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
158 struct ls_ucode_img *);
155int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); 159int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
156 160
157#endif 161#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index 1b0c793c0192..821d3b2bdb1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -90,30 +90,30 @@ ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
90 * blob. Also generate the corresponding ucode descriptor. 90 * blob. Also generate the corresponding ucode descriptor.
91 */ 91 */
92static int 92static int
93ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img, 93ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
94 const char *falcon_name) 94 struct ls_ucode_img *img, const char *falcon_name)
95{ 95{
96 const struct firmware *bl, *code, *data, *sig; 96 const struct firmware *bl, *code, *data, *sig;
97 char f[64]; 97 char f[64];
98 int ret; 98 int ret;
99 99
100 snprintf(f, sizeof(f), "gr/%s_bl", falcon_name); 100 snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
101 ret = nvkm_firmware_get(subdev->device, f, &bl); 101 ret = nvkm_firmware_get(subdev, f, &bl);
102 if (ret) 102 if (ret)
103 goto error; 103 goto error;
104 104
105 snprintf(f, sizeof(f), "gr/%s_inst", falcon_name); 105 snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
106 ret = nvkm_firmware_get(subdev->device, f, &code); 106 ret = nvkm_firmware_get(subdev, f, &code);
107 if (ret) 107 if (ret)
108 goto free_bl; 108 goto free_bl;
109 109
110 snprintf(f, sizeof(f), "gr/%s_data", falcon_name); 110 snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
111 ret = nvkm_firmware_get(subdev->device, f, &data); 111 ret = nvkm_firmware_get(subdev, f, &data);
112 if (ret) 112 if (ret)
113 goto free_inst; 113 goto free_inst;
114 114
115 snprintf(f, sizeof(f), "gr/%s_sig", falcon_name); 115 snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
116 ret = nvkm_firmware_get(subdev->device, f, &sig); 116 ret = nvkm_firmware_get(subdev, f, &sig);
117 if (ret) 117 if (ret)
118 goto free_data; 118 goto free_data;
119 119
@@ -146,13 +146,15 @@ error:
146} 146}
147 147
148int 148int
149acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img) 149acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
150 struct ls_ucode_img *img)
150{ 151{
151 return ls_ucode_img_load_gr(&sb->subdev, img, "fecs"); 152 return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
152} 153}
153 154
154int 155int
155acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img) 156acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
157 struct ls_ucode_img *img)
156{ 158{
157 return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs"); 159 return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
158} 160}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index 1e1f1c635cab..77c13b096a67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -39,32 +39,32 @@
39 */ 39 */
40static int 40static int
41acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name, 41acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
42 struct ls_ucode_img *img) 42 int maxver, struct ls_ucode_img *img)
43{ 43{
44 const struct firmware *image, *desc, *sig; 44 const struct firmware *image, *desc, *sig;
45 char f[64]; 45 char f[64];
46 int ret; 46 int ver, ret;
47 47
48 snprintf(f, sizeof(f), "%s/image", name); 48 snprintf(f, sizeof(f), "%s/image", name);
49 ret = nvkm_firmware_get(subdev->device, f, &image); 49 ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
50 if (ret) 50 if (ver < 0)
51 return ret; 51 return ver;
52 img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL); 52 img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
53 nvkm_firmware_put(image); 53 nvkm_firmware_put(image);
54 if (!img->ucode_data) 54 if (!img->ucode_data)
55 return -ENOMEM; 55 return -ENOMEM;
56 56
57 snprintf(f, sizeof(f), "%s/desc", name); 57 snprintf(f, sizeof(f), "%s/desc", name);
58 ret = nvkm_firmware_get(subdev->device, f, &desc); 58 ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
59 if (ret) 59 if (ret < 0)
60 return ret; 60 return ret;
61 memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc)); 61 memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
62 img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256); 62 img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
63 nvkm_firmware_put(desc); 63 nvkm_firmware_put(desc);
64 64
65 snprintf(f, sizeof(f), "%s/sig", name); 65 snprintf(f, sizeof(f), "%s/sig", name);
66 ret = nvkm_firmware_get(subdev->device, f, &sig); 66 ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
67 if (ret) 67 if (ret < 0)
68 return ret; 68 return ret;
69 img->sig_size = sig->size; 69 img->sig_size = sig->size;
70 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); 70 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
@@ -72,7 +72,7 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
72 if (!img->sig) 72 if (!img->sig)
73 return -ENOMEM; 73 return -ENOMEM;
74 74
75 return 0; 75 return ver;
76} 76}
77 77
78static int 78static int
@@ -99,12 +99,13 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
99} 99}
100 100
101int 101int
102acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img) 102acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
103 struct ls_ucode_img *img)
103{ 104{
104 struct nvkm_pmu *pmu = sb->subdev.device->pmu; 105 struct nvkm_pmu *pmu = sb->subdev.device->pmu;
105 int ret; 106 int ret;
106 107
107 ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img); 108 ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
108 if (ret) 109 if (ret)
109 return ret; 110 return ret;
110 111
@@ -136,14 +137,15 @@ acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
136} 137}
137 138
138int 139int
139acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img) 140acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
141 struct ls_ucode_img *img)
140{ 142{
141 struct nvkm_sec2 *sec = sb->subdev.device->sec2; 143 struct nvkm_sec2 *sec = sb->subdev.device->sec2;
142 int ret; 144 int ver, ret;
143 145
144 ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img); 146 ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
145 if (ret) 147 if (ver < 0)
146 return ret; 148 return ver;
147 149
148 /* Allocate the PMU queue corresponding to the FW version */ 150 /* Allocate the PMU queue corresponding to the FW version */
149 ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon, 151 ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
@@ -151,7 +153,7 @@ acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
151 if (ret) 153 if (ret)
152 return ret; 154 return ret;
153 155
154 return 0; 156 return ver;
155} 157}
156 158
157int 159int
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 447e96f9d259..12ed5265a90b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -916,29 +916,17 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
916 struct drm_plane_state *new_state) 916 struct drm_plane_state *new_state)
917{ 917{
918 struct vop *vop = to_vop(plane->state->crtc); 918 struct vop *vop = to_vop(plane->state->crtc);
919 struct drm_plane_state *plane_state; 919 struct drm_framebuffer *old_fb = plane->state->fb;
920 920
921 plane_state = plane->funcs->atomic_duplicate_state(plane); 921 plane->state->crtc_x = new_state->crtc_x;
922 plane_state->crtc_x = new_state->crtc_x; 922 plane->state->crtc_y = new_state->crtc_y;
923 plane_state->crtc_y = new_state->crtc_y; 923 plane->state->crtc_h = new_state->crtc_h;
924 plane_state->crtc_h = new_state->crtc_h; 924 plane->state->crtc_w = new_state->crtc_w;
925 plane_state->crtc_w = new_state->crtc_w; 925 plane->state->src_x = new_state->src_x;
926 plane_state->src_x = new_state->src_x; 926 plane->state->src_y = new_state->src_y;
927 plane_state->src_y = new_state->src_y; 927 plane->state->src_h = new_state->src_h;
928 plane_state->src_h = new_state->src_h; 928 plane->state->src_w = new_state->src_w;
929 plane_state->src_w = new_state->src_w; 929 swap(plane->state->fb, new_state->fb);
930
931 if (plane_state->fb != new_state->fb)
932 drm_atomic_set_fb_for_plane(plane_state, new_state->fb);
933
934 swap(plane_state, plane->state);
935
936 if (plane->state->fb && plane->state->fb != new_state->fb) {
937 drm_framebuffer_get(plane->state->fb);
938 WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
939 drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb);
940 set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
941 }
942 930
943 if (vop->is_enabled) { 931 if (vop->is_enabled) {
944 rockchip_drm_psr_inhibit_get_state(new_state->state); 932 rockchip_drm_psr_inhibit_get_state(new_state->state);
@@ -947,9 +935,22 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
947 vop_cfg_done(vop); 935 vop_cfg_done(vop);
948 spin_unlock(&vop->reg_lock); 936 spin_unlock(&vop->reg_lock);
949 rockchip_drm_psr_inhibit_put_state(new_state->state); 937 rockchip_drm_psr_inhibit_put_state(new_state->state);
950 }
951 938
952 plane->funcs->atomic_destroy_state(plane, plane_state); 939 /*
940 * A scanout can still be occurring, so we can't drop the
941 * reference to the old framebuffer. To solve this we get a
942 * reference to old_fb and set a worker to release it later.
943 * FIXME: if we perform 500 async_update calls before the
944 * vblank, then we can have 500 different framebuffers waiting
945 * to be released.
946 */
947 if (old_fb && plane->state->fb != old_fb) {
948 drm_framebuffer_get(old_fb);
949 WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
950 drm_flip_work_queue(&vop->fb_unref_work, old_fb);
951 set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
952 }
953 }
953} 954}
954 955
955static const struct drm_plane_helper_funcs plane_helper_funcs = { 956static const struct drm_plane_helper_funcs plane_helper_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4d918d3e4858..afc80b245ea3 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1025,7 +1025,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
1025{ 1025{
1026 struct vc4_plane_state *vc4_state, *new_vc4_state; 1026 struct vc4_plane_state *vc4_state, *new_vc4_state;
1027 1027
1028 drm_atomic_set_fb_for_plane(plane->state, state->fb); 1028 swap(plane->state->fb, state->fb);
1029 plane->state->crtc_x = state->crtc_x; 1029 plane->state->crtc_x = state->crtc_x;
1030 plane->state->crtc_y = state->crtc_y; 1030 plane->state->crtc_y = state->crtc_y;
1031 plane->state->crtc_w = state->crtc_w; 1031 plane->state->crtc_w = state->crtc_w;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 35d58736a3ed..05e120e01cb4 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -633,7 +633,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
633 if (err) 633 if (err)
634 goto free_hwmon; 634 goto free_hwmon;
635 635
636 if (dev && chip && chip->ops->read && 636 if (dev && dev->of_node && chip && chip->ops->read &&
637 chip->info[0]->type == hwmon_chip && 637 chip->info[0]->type == hwmon_chip &&
638 (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) { 638 (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
639 const struct hwmon_channel_info **info = chip->info; 639 const struct hwmon_channel_info **info = chip->info;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index ef7ee90ee785..8470097907bc 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1217,7 +1217,8 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1217 const struct pmbus_driver_info *info, 1217 const struct pmbus_driver_info *info,
1218 const char *name, 1218 const char *name,
1219 int index, int page, 1219 int index, int page,
1220 const struct pmbus_sensor_attr *attr) 1220 const struct pmbus_sensor_attr *attr,
1221 bool paged)
1221{ 1222{
1222 struct pmbus_sensor *base; 1223 struct pmbus_sensor *base;
1223 bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */ 1224 bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */
@@ -1225,7 +1226,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1225 1226
1226 if (attr->label) { 1227 if (attr->label) {
1227 ret = pmbus_add_label(data, name, index, attr->label, 1228 ret = pmbus_add_label(data, name, index, attr->label,
1228 attr->paged ? page + 1 : 0); 1229 paged ? page + 1 : 0);
1229 if (ret) 1230 if (ret)
1230 return ret; 1231 return ret;
1231 } 1232 }
@@ -1258,6 +1259,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
1258 return 0; 1259 return 0;
1259} 1260}
1260 1261
1262static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info,
1263 const struct pmbus_sensor_attr *attr)
1264{
1265 int p;
1266
1267 if (attr->paged)
1268 return true;
1269
1270 /*
1271 * Some attributes may be present on more than one page despite
1272 * not being marked with the paged attribute. If that is the case,
1273 * then treat the sensor as being paged and add the page suffix to the
1274 * attribute name.
1275 * We don't just add the paged attribute to all such attributes, in
1276 * order to maintain the un-suffixed labels in the case where the
1277 * attribute is only on page 0.
1278 */
1279 for (p = 1; p < info->pages; p++) {
1280 if (info->func[p] & attr->func)
1281 return true;
1282 }
1283 return false;
1284}
1285
1261static int pmbus_add_sensor_attrs(struct i2c_client *client, 1286static int pmbus_add_sensor_attrs(struct i2c_client *client,
1262 struct pmbus_data *data, 1287 struct pmbus_data *data,
1263 const char *name, 1288 const char *name,
@@ -1271,14 +1296,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
1271 index = 1; 1296 index = 1;
1272 for (i = 0; i < nattrs; i++) { 1297 for (i = 0; i < nattrs; i++) {
1273 int page, pages; 1298 int page, pages;
1299 bool paged = pmbus_sensor_is_paged(info, attrs);
1274 1300
1275 pages = attrs->paged ? info->pages : 1; 1301 pages = paged ? info->pages : 1;
1276 for (page = 0; page < pages; page++) { 1302 for (page = 0; page < pages; page++) {
1277 if (!(info->func[page] & attrs->func)) 1303 if (!(info->func[page] & attrs->func))
1278 continue; 1304 continue;
1279 ret = pmbus_add_sensor_attrs_one(client, data, info, 1305 ret = pmbus_add_sensor_attrs_one(client, data, info,
1280 name, index, page, 1306 name, index, page,
1281 attrs); 1307 attrs, paged);
1282 if (ret) 1308 if (ret)
1283 return ret; 1309 return ret;
1284 index++; 1310 index++;
@@ -1942,11 +1968,14 @@ static ssize_t pmbus_set_samples(struct device *dev,
1942 long val; 1968 long val;
1943 struct i2c_client *client = to_i2c_client(dev->parent); 1969 struct i2c_client *client = to_i2c_client(dev->parent);
1944 struct pmbus_samples_reg *reg = to_samples_reg(devattr); 1970 struct pmbus_samples_reg *reg = to_samples_reg(devattr);
1971 struct pmbus_data *data = i2c_get_clientdata(client);
1945 1972
1946 if (kstrtol(buf, 0, &val) < 0) 1973 if (kstrtol(buf, 0, &val) < 0)
1947 return -EINVAL; 1974 return -EINVAL;
1948 1975
1976 mutex_lock(&data->update_lock);
1949 ret = _pmbus_write_word_data(client, reg->page, reg->attr->reg, val); 1977 ret = _pmbus_write_word_data(client, reg->page, reg->attr->reg, val);
1978 mutex_unlock(&data->update_lock);
1950 1979
1951 return ret ? : count; 1980 return ret ? : count;
1952} 1981}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 0fea7c54f788..37b3b9307d07 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -709,11 +709,16 @@ static const struct i2c_algorithm xiic_algorithm = {
709 .functionality = xiic_func, 709 .functionality = xiic_func,
710}; 710};
711 711
712static const struct i2c_adapter_quirks xiic_quirks = {
713 .max_read_len = 255,
714};
715
712static const struct i2c_adapter xiic_adapter = { 716static const struct i2c_adapter xiic_adapter = {
713 .owner = THIS_MODULE, 717 .owner = THIS_MODULE,
714 .name = DRIVER_NAME, 718 .name = DRIVER_NAME,
715 .class = I2C_CLASS_DEPRECATED, 719 .class = I2C_CLASS_DEPRECATED,
716 .algo = &xiic_algorithm, 720 .algo = &xiic_algorithm,
721 .quirks = &xiic_quirks,
717}; 722};
718 723
719 724
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 78dc07c6ac4b..29f7b15c81d9 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -409,27 +409,44 @@ static int rename_compat_devs(struct ib_device *device)
409 409
410int ib_device_rename(struct ib_device *ibdev, const char *name) 410int ib_device_rename(struct ib_device *ibdev, const char *name)
411{ 411{
412 unsigned long index;
413 void *client_data;
412 int ret; 414 int ret;
413 415
414 down_write(&devices_rwsem); 416 down_write(&devices_rwsem);
415 if (!strcmp(name, dev_name(&ibdev->dev))) { 417 if (!strcmp(name, dev_name(&ibdev->dev))) {
416 ret = 0; 418 up_write(&devices_rwsem);
417 goto out; 419 return 0;
418 } 420 }
419 421
420 if (__ib_device_get_by_name(name)) { 422 if (__ib_device_get_by_name(name)) {
421 ret = -EEXIST; 423 up_write(&devices_rwsem);
422 goto out; 424 return -EEXIST;
423 } 425 }
424 426
425 ret = device_rename(&ibdev->dev, name); 427 ret = device_rename(&ibdev->dev, name);
426 if (ret) 428 if (ret) {
427 goto out; 429 up_write(&devices_rwsem);
430 return ret;
431 }
432
428 strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX); 433 strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
429 ret = rename_compat_devs(ibdev); 434 ret = rename_compat_devs(ibdev);
430out: 435
431 up_write(&devices_rwsem); 436 downgrade_write(&devices_rwsem);
432 return ret; 437 down_read(&ibdev->client_data_rwsem);
438 xan_for_each_marked(&ibdev->client_data, index, client_data,
439 CLIENT_DATA_REGISTERED) {
440 struct ib_client *client = xa_load(&clients, index);
441
442 if (!client || !client->rename)
443 continue;
444
445 client->rename(ibdev, client_data);
446 }
447 up_read(&ibdev->client_data_rwsem);
448 up_read(&devices_rwsem);
449 return 0;
433} 450}
434 451
435static int alloc_name(struct ib_device *ibdev, const char *name) 452static int alloc_name(struct ib_device *ibdev, const char *name)
@@ -474,14 +491,15 @@ static void ib_device_release(struct device *device)
474 491
475 free_netdevs(dev); 492 free_netdevs(dev);
476 WARN_ON(refcount_read(&dev->refcount)); 493 WARN_ON(refcount_read(&dev->refcount));
477 ib_cache_release_one(dev); 494 if (dev->port_data) {
478 ib_security_release_port_pkey_list(dev); 495 ib_cache_release_one(dev);
479 xa_destroy(&dev->compat_devs); 496 ib_security_release_port_pkey_list(dev);
480 xa_destroy(&dev->client_data);
481 if (dev->port_data)
482 kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu, 497 kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
483 pdata[0]), 498 pdata[0]),
484 rcu_head); 499 rcu_head);
500 }
501 xa_destroy(&dev->compat_devs);
502 xa_destroy(&dev->client_data);
485 kfree_rcu(dev, rcu_head); 503 kfree_rcu(dev, rcu_head);
486} 504}
487 505
@@ -1935,6 +1953,9 @@ static void free_netdevs(struct ib_device *ib_dev)
1935 unsigned long flags; 1953 unsigned long flags;
1936 unsigned int port; 1954 unsigned int port;
1937 1955
1956 if (!ib_dev->port_data)
1957 return;
1958
1938 rdma_for_each_port (ib_dev, port) { 1959 rdma_for_each_port (ib_dev, port) {
1939 struct ib_port_data *pdata = &ib_dev->port_data[port]; 1960 struct ib_port_data *pdata = &ib_dev->port_data[port];
1940 struct net_device *ndev; 1961 struct net_device *ndev;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 5445323629b5..e63fbda25e1d 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -110,6 +110,8 @@ int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
110void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); 110void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
111void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); 111void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
112 112
113struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs);
114
113/* 115/*
114 * This is the runtime description of the uverbs API, used by the syscall 116 * This is the runtime description of the uverbs API, used by the syscall
115 * machinery to validate and dispatch calls. 117 * machinery to validate and dispatch calls.
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5a3a1780ceea..63fe14c7c68f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -174,6 +174,17 @@ static int uverbs_request_finish(struct uverbs_req_iter *iter)
174 return 0; 174 return 0;
175} 175}
176 176
177/*
178 * When calling a destroy function during an error unwind we need to pass in
179 * the udata that is sanitized of all user arguments. Ie from the driver
180 * perspective it looks like no udata was passed.
181 */
182struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
183{
184 attrs->driver_udata = (struct ib_udata){};
185 return &attrs->driver_udata;
186}
187
177static struct ib_uverbs_completion_event_file * 188static struct ib_uverbs_completion_event_file *
178_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs) 189_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
179{ 190{
@@ -441,7 +452,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
441 return uobj_alloc_commit(uobj, attrs); 452 return uobj_alloc_commit(uobj, attrs);
442 453
443err_copy: 454err_copy:
444 ib_dealloc_pd_user(pd, &attrs->driver_udata); 455 ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
445 pd = NULL; 456 pd = NULL;
446err_alloc: 457err_alloc:
447 kfree(pd); 458 kfree(pd);
@@ -644,7 +655,7 @@ err_copy:
644 } 655 }
645 656
646err_dealloc_xrcd: 657err_dealloc_xrcd:
647 ib_dealloc_xrcd(xrcd, &attrs->driver_udata); 658 ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs));
648 659
649err: 660err:
650 uobj_alloc_abort(&obj->uobject, attrs); 661 uobj_alloc_abort(&obj->uobject, attrs);
@@ -767,7 +778,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
767 return uobj_alloc_commit(uobj, attrs); 778 return uobj_alloc_commit(uobj, attrs);
768 779
769err_copy: 780err_copy:
770 ib_dereg_mr_user(mr, &attrs->driver_udata); 781 ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
771 782
772err_put: 783err_put:
773 uobj_put_obj_read(pd); 784 uobj_put_obj_read(pd);
@@ -1042,7 +1053,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
1042 return obj; 1053 return obj;
1043 1054
1044err_cb: 1055err_cb:
1045 ib_destroy_cq(cq); 1056 ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
1046 1057
1047err_file: 1058err_file:
1048 if (ev_file) 1059 if (ev_file)
@@ -1478,7 +1489,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
1478 1489
1479 return uobj_alloc_commit(&obj->uevent.uobject, attrs); 1490 return uobj_alloc_commit(&obj->uevent.uobject, attrs);
1480err_cb: 1491err_cb:
1481 ib_destroy_qp(qp); 1492 ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1482 1493
1483err_put: 1494err_put:
1484 if (!IS_ERR(xrcd_uobj)) 1495 if (!IS_ERR(xrcd_uobj))
@@ -1611,7 +1622,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
1611 return uobj_alloc_commit(&obj->uevent.uobject, attrs); 1622 return uobj_alloc_commit(&obj->uevent.uobject, attrs);
1612 1623
1613err_destroy: 1624err_destroy:
1614 ib_destroy_qp(qp); 1625 ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1615err_xrcd: 1626err_xrcd:
1616 uobj_put_read(xrcd_uobj); 1627 uobj_put_read(xrcd_uobj);
1617err_put: 1628err_put:
@@ -2453,7 +2464,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
2453 return uobj_alloc_commit(uobj, attrs); 2464 return uobj_alloc_commit(uobj, attrs);
2454 2465
2455err_copy: 2466err_copy:
2456 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); 2467 rdma_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
2468 uverbs_get_cleared_udata(attrs));
2457 2469
2458err_put: 2470err_put:
2459 uobj_put_obj_read(pd); 2471 uobj_put_obj_read(pd);
@@ -2964,7 +2976,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
2964 return uobj_alloc_commit(&obj->uevent.uobject, attrs); 2976 return uobj_alloc_commit(&obj->uevent.uobject, attrs);
2965 2977
2966err_copy: 2978err_copy:
2967 ib_destroy_wq(wq, &attrs->driver_udata); 2979 ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
2968err_put_cq: 2980err_put_cq:
2969 uobj_put_obj_read(cq); 2981 uobj_put_obj_read(cq);
2970err_put_pd: 2982err_put_pd:
@@ -3464,7 +3476,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
3464 return uobj_alloc_commit(&obj->uevent.uobject, attrs); 3476 return uobj_alloc_commit(&obj->uevent.uobject, attrs);
3465 3477
3466err_copy: 3478err_copy:
3467 ib_destroy_srq_user(srq, &attrs->driver_udata); 3479 ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
3468 3480
3469err_free: 3481err_free:
3470 kfree(srq); 3482 kfree(srq);
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index db5c46a1bb2d..07ea4e3c4566 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -135,7 +135,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
135 135
136 return 0; 136 return 0;
137err_cq: 137err_cq:
138 ib_destroy_cq(cq); 138 ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
139 139
140err_event_file: 140err_event_file:
141 if (ev_file) 141 if (ev_file)
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 610d3b9f7654..997f7a3a558a 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -148,7 +148,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
148 return 0; 148 return 0;
149 149
150err_dereg: 150err_dereg:
151 ib_dereg_mr_user(mr, &attrs->driver_udata); 151 ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
152 152
153 return ret; 153 return ret;
154} 154}
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 6d6886c9009f..0fea5d63fdbe 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1728,7 +1728,6 @@ int efa_mmap(struct ib_ucontext *ibucontext,
1728 ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n"); 1728 ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
1729 return -EPERM; 1729 return -EPERM;
1730 } 1730 }
1731 vma->vm_flags &= ~VM_MAYEXEC;
1732 1731
1733 return __efa_mmap(dev, ucontext, vma, key, length); 1732 return __efa_mmap(dev, ucontext, vma, key, length);
1734} 1733}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 310105d4e3de..4221a99ee7f4 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9850,6 +9850,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9850 9850
9851 /* disable the port */ 9851 /* disable the port */
9852 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 9852 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9853 cancel_work_sync(&ppd->freeze_work);
9853} 9854}
9854 9855
9855static inline int init_cpu_counters(struct hfi1_devdata *dd) 9856static inline int init_cpu_counters(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 0cd71ce7cc71..3592a9ec155e 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
324 u32 *tidlist = NULL; 324 u32 *tidlist = NULL;
325 struct tid_user_buf *tidbuf; 325 struct tid_user_buf *tidbuf;
326 326
327 if (!PAGE_ALIGNED(tinfo->vaddr))
328 return -EINVAL;
329
327 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); 330 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
328 if (!tidbuf) 331 if (!tidbuf)
329 return -ENOMEM; 332 return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 1eb4105b2d22..a2b26a635baf 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1356,8 +1356,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
1356 rdi->dparms.props.max_cq = hfi1_max_cqs; 1356 rdi->dparms.props.max_cq = hfi1_max_cqs;
1357 rdi->dparms.props.max_ah = hfi1_max_ahs; 1357 rdi->dparms.props.max_ah = hfi1_max_ahs;
1358 rdi->dparms.props.max_cqe = hfi1_max_cqes; 1358 rdi->dparms.props.max_cqe = hfi1_max_cqes;
1359 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1360 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1361 rdi->dparms.props.max_map_per_fmr = 32767; 1359 rdi->dparms.props.max_map_per_fmr = 32767;
1362 rdi->dparms.props.max_pd = hfi1_max_pds; 1360 rdi->dparms.props.max_pd = hfi1_max_pds;
1363 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; 1361 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 4c5d0f160c10..e068a02122f5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -899,6 +899,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
899 dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); 899 dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
900 900
901 hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); 901 hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
902 kfree(&free_mr->mr_free_pd->ibpd);
902} 903}
903 904
904static int hns_roce_db_init(struct hns_roce_dev *hr_dev) 905static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index e3ec79b8f7f5..6c8645033102 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -190,12 +190,12 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
190 u16 uid, phys_addr_t *addr, u32 *obj_id) 190 u16 uid, phys_addr_t *addr, u32 *obj_id)
191{ 191{
192 struct mlx5_core_dev *dev = dm->dev; 192 struct mlx5_core_dev *dev = dm->dev;
193 u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
194 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; 193 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
195 u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {}; 194 u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
196 unsigned long *block_map; 195 unsigned long *block_map;
197 u64 icm_start_addr; 196 u64 icm_start_addr;
198 u32 log_icm_size; 197 u32 log_icm_size;
198 u32 num_blocks;
199 u32 max_blocks; 199 u32 max_blocks;
200 u64 block_idx; 200 u64 block_idx;
201 void *sw_icm; 201 void *sw_icm;
@@ -224,6 +224,8 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
224 return -EINVAL; 224 return -EINVAL;
225 } 225 }
226 226
227 num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
228 MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
227 max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); 229 max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
228 spin_lock(&dm->lock); 230 spin_lock(&dm->lock);
229 block_idx = bitmap_find_next_zero_area(block_map, 231 block_idx = bitmap_find_next_zero_area(block_map,
@@ -266,13 +268,16 @@ int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
266 u16 uid, phys_addr_t addr, u32 obj_id) 268 u16 uid, phys_addr_t addr, u32 obj_id)
267{ 269{
268 struct mlx5_core_dev *dev = dm->dev; 270 struct mlx5_core_dev *dev = dm->dev;
269 u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
270 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; 271 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
271 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; 272 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
272 unsigned long *block_map; 273 unsigned long *block_map;
274 u32 num_blocks;
273 u64 start_idx; 275 u64 start_idx;
274 int err; 276 int err;
275 277
278 num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
279 MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
280
276 switch (type) { 281 switch (type) {
277 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 282 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
278 start_idx = 283 start_idx =
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index abac70ad5c7c..340290b883fe 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2344,7 +2344,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
2344 /* Allocation size must a multiple of the basic block size 2344 /* Allocation size must a multiple of the basic block size
2345 * and a power of 2. 2345 * and a power of 2.
2346 */ 2346 */
2347 act_size = roundup(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev)); 2347 act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
2348 act_size = roundup_pow_of_two(act_size); 2348 act_size = roundup_pow_of_two(act_size);
2349 2349
2350 dm->size = act_size; 2350 dm->size = act_size;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 5ff32d32c61c..2c4e569ce438 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1459,8 +1459,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
1459 rdi->dparms.props.max_cq = ib_qib_max_cqs; 1459 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1460 rdi->dparms.props.max_cqe = ib_qib_max_cqes; 1460 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1461 rdi->dparms.props.max_ah = ib_qib_max_ahs; 1461 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1462 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1463 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1464 rdi->dparms.props.max_map_per_fmr = 32767; 1462 rdi->dparms.props.max_map_per_fmr = 32767;
1465 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; 1463 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1466 rdi->dparms.props.max_qp_init_rd_atom = 255; 1464 rdi->dparms.props.max_qp_init_rd_atom = 255;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 54f3f9c27552..f48240f66b8f 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -96,6 +96,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
96 for (i = 0; i < rdi->lkey_table.max; i++) 96 for (i = 0; i < rdi->lkey_table.max; i++)
97 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL); 97 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
98 98
99 rdi->dparms.props.max_mr = rdi->lkey_table.max;
100 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
99 return 0; 101 return 0;
100} 102}
101 103
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 31a2e65e4906..c5a50614a6c6 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -594,7 +594,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
594 offset = qpt->incr | ((offset & 1) ^ 1); 594 offset = qpt->incr | ((offset & 1) ^ 1);
595 } 595 }
596 /* there can be no set bits in low-order QoS bits */ 596 /* there can be no set bits in low-order QoS bits */
597 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); 597 WARN_ON(rdi->dparms.qos_shift > 1 &&
598 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
598 qpn = mk_qpn(qpt, map, offset); 599 qpn = mk_qpn(qpt, map, offset);
599 } 600 }
600 601
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index be9ddcad8f28..4305da2c9037 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -148,6 +148,7 @@ MODULE_PARM_DESC(ch_count,
148 148
149static void srp_add_one(struct ib_device *device); 149static void srp_add_one(struct ib_device *device);
150static void srp_remove_one(struct ib_device *device, void *client_data); 150static void srp_remove_one(struct ib_device *device, void *client_data);
151static void srp_rename_dev(struct ib_device *device, void *client_data);
151static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc); 152static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
152static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, 153static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
153 const char *opname); 154 const char *opname);
@@ -162,7 +163,8 @@ static struct workqueue_struct *srp_remove_wq;
162static struct ib_client srp_client = { 163static struct ib_client srp_client = {
163 .name = "srp", 164 .name = "srp",
164 .add = srp_add_one, 165 .add = srp_add_one,
165 .remove = srp_remove_one 166 .remove = srp_remove_one,
167 .rename = srp_rename_dev
166}; 168};
167 169
168static struct ib_sa_client srp_sa_client; 170static struct ib_sa_client srp_sa_client;
@@ -4112,6 +4114,20 @@ free_host:
4112 return NULL; 4114 return NULL;
4113} 4115}
4114 4116
4117static void srp_rename_dev(struct ib_device *device, void *client_data)
4118{
4119 struct srp_device *srp_dev = client_data;
4120 struct srp_host *host, *tmp_host;
4121
4122 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4123 char name[IB_DEVICE_NAME_MAX + 8];
4124
4125 snprintf(name, sizeof(name), "srp-%s-%d",
4126 dev_name(&device->dev), host->port);
4127 device_rename(&host->dev, name);
4128 }
4129}
4130
4115static void srp_add_one(struct ib_device *device) 4131static void srp_add_one(struct ib_device *device)
4116{ 4132{
4117 struct srp_device *srp_dev; 4133 struct srp_device *srp_dev;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index aba50ec98b4d..9545e87b6085 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -694,13 +694,13 @@ static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
694 694
695/*** Data transfer ***/ 695/*** Data transfer ***/
696 696
697static int mspro_block_issue_req(struct memstick_dev *card, bool chunk) 697static int mspro_block_issue_req(struct memstick_dev *card)
698{ 698{
699 struct mspro_block_data *msb = memstick_get_drvdata(card); 699 struct mspro_block_data *msb = memstick_get_drvdata(card);
700 u64 t_off; 700 u64 t_off;
701 unsigned int count; 701 unsigned int count;
702 702
703 while (chunk) { 703 while (true) {
704 msb->current_page = 0; 704 msb->current_page = 0;
705 msb->current_seg = 0; 705 msb->current_seg = 0;
706 msb->seg_count = blk_rq_map_sg(msb->block_req->q, 706 msb->seg_count = blk_rq_map_sg(msb->block_req->q,
@@ -709,6 +709,7 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
709 709
710 if (!msb->seg_count) { 710 if (!msb->seg_count) {
711 unsigned int bytes = blk_rq_cur_bytes(msb->block_req); 711 unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
712 bool chunk;
712 713
713 chunk = blk_update_request(msb->block_req, 714 chunk = blk_update_request(msb->block_req,
714 BLK_STS_RESOURCE, 715 BLK_STS_RESOURCE,
@@ -718,7 +719,7 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
718 __blk_mq_end_request(msb->block_req, 719 __blk_mq_end_request(msb->block_req,
719 BLK_STS_RESOURCE); 720 BLK_STS_RESOURCE);
720 msb->block_req = NULL; 721 msb->block_req = NULL;
721 break; 722 return -EAGAIN;
722 } 723 }
723 724
724 t_off = blk_rq_pos(msb->block_req); 725 t_off = blk_rq_pos(msb->block_req);
@@ -735,8 +736,6 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
735 memstick_new_req(card->host); 736 memstick_new_req(card->host);
736 return 0; 737 return 0;
737 } 738 }
738
739 return 1;
740} 739}
741 740
742static int mspro_block_complete_req(struct memstick_dev *card, int error) 741static int mspro_block_complete_req(struct memstick_dev *card, int error)
@@ -779,7 +778,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
779 chunk = blk_update_request(msb->block_req, 778 chunk = blk_update_request(msb->block_req,
780 errno_to_blk_status(error), t_len); 779 errno_to_blk_status(error), t_len);
781 if (chunk) { 780 if (chunk) {
782 error = mspro_block_issue_req(card, chunk); 781 error = mspro_block_issue_req(card);
783 if (!error) 782 if (!error)
784 goto out; 783 goto out;
785 } else { 784 } else {
@@ -849,7 +848,7 @@ static blk_status_t mspro_queue_rq(struct blk_mq_hw_ctx *hctx,
849 msb->block_req = bd->rq; 848 msb->block_req = bd->rq;
850 blk_mq_start_request(bd->rq); 849 blk_mq_start_request(bd->rq);
851 850
852 if (mspro_block_issue_req(card, true)) 851 if (mspro_block_issue_req(card))
853 msb->block_req = NULL; 852 msb->block_req = NULL;
854 853
855 spin_unlock_irq(&msb->q_lock); 854 spin_unlock_irq(&msb->q_lock);
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 7618b65aab34..3bc51f19c734 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -772,6 +772,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
772 772
773 if ((m->addr == 0x0) || (m->size == 0)) 773 if ((m->addr == 0x0) || (m->size == 0))
774 return -EINVAL; 774 return -EINVAL;
775 if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
776 return -EINVAL;
775 777
776 map_addr = (m->addr & PAGE_MASK); 778 map_addr = (m->addr & PAGE_MASK);
777 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); 779 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 0ddc28961524..2e1c4d2905e8 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -578,6 +578,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
578 /* determine space needed for page_list. */ 578 /* determine space needed for page_list. */
579 data = (unsigned long)uaddr; 579 data = (unsigned long)uaddr;
580 offs = offset_in_page(data); 580 offs = offset_in_page(data);
581 if (size > ULONG_MAX - PAGE_SIZE - offs) {
582 m->size = 0; /* mark unused and not added */
583 return -EINVAL;
584 }
581 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); 585 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
582 586
583 m->page_list = kcalloc(m->nr_pages, 587 m->page_list = kcalloc(m->nr_pages,
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 4804cdcf4c48..f4c92f110a72 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -26,6 +26,12 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
26 dma_fence_put(ctx->cs_pending[i]); 26 dma_fence_put(ctx->cs_pending[i]);
27 27
28 if (ctx->asid != HL_KERNEL_ASID_ID) { 28 if (ctx->asid != HL_KERNEL_ASID_ID) {
29 /*
30 * The engines are stopped as there is no executing CS, but the
31 * Coresight might be still working by accessing addresses
32 * related to the stopped engines. Hence stop it explicitly.
33 */
34 hdev->asic_funcs->halt_coresight(hdev);
29 hl_vm_ctx_fini(ctx); 35 hl_vm_ctx_fini(ctx);
30 hl_asid_free(hdev, ctx->asid); 36 hl_asid_free(hdev, ctx->asid);
31 } 37 }
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a4447699ff4e..ba418aaa404c 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -459,41 +459,31 @@ static ssize_t mmu_write(struct file *file, const char __user *buf,
459 struct hl_debugfs_entry *entry = s->private; 459 struct hl_debugfs_entry *entry = s->private;
460 struct hl_dbg_device_entry *dev_entry = entry->dev_entry; 460 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
461 struct hl_device *hdev = dev_entry->hdev; 461 struct hl_device *hdev = dev_entry->hdev;
462 char kbuf[MMU_KBUF_SIZE], asid_kbuf[MMU_ASID_BUF_SIZE], 462 char kbuf[MMU_KBUF_SIZE];
463 addr_kbuf[MMU_ADDR_BUF_SIZE];
464 char *c; 463 char *c;
465 ssize_t rc; 464 ssize_t rc;
466 465
467 if (!hdev->mmu_enable) 466 if (!hdev->mmu_enable)
468 return count; 467 return count;
469 468
470 memset(kbuf, 0, sizeof(kbuf)); 469 if (count > sizeof(kbuf) - 1)
471 memset(asid_kbuf, 0, sizeof(asid_kbuf)); 470 goto err;
472 memset(addr_kbuf, 0, sizeof(addr_kbuf));
473
474 if (copy_from_user(kbuf, buf, count)) 471 if (copy_from_user(kbuf, buf, count))
475 goto err; 472 goto err;
476 473 kbuf[count] = 0;
477 kbuf[MMU_KBUF_SIZE - 1] = 0;
478 474
479 c = strchr(kbuf, ' '); 475 c = strchr(kbuf, ' ');
480 if (!c) 476 if (!c)
481 goto err; 477 goto err;
478 *c = '\0';
482 479
483 memcpy(asid_kbuf, kbuf, c - kbuf); 480 rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
484
485 rc = kstrtouint(asid_kbuf, 10, &dev_entry->mmu_asid);
486 if (rc) 481 if (rc)
487 goto err; 482 goto err;
488 483
489 c = strstr(kbuf, " 0x"); 484 if (strncmp(c+1, "0x", 2))
490 if (!c)
491 goto err; 485 goto err;
492 486 rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
493 c += 3;
494 memcpy(addr_kbuf, c, (kbuf + count) - c);
495
496 rc = kstrtoull(addr_kbuf, 16, &dev_entry->mmu_addr);
497 if (rc) 487 if (rc)
498 goto err; 488 goto err;
499 489
@@ -510,6 +500,7 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
510{ 500{
511 struct hl_ctx *ctx = hdev->user_ctx; 501 struct hl_ctx *ctx = hdev->user_ctx;
512 u64 hop_addr, hop_pte_addr, hop_pte; 502 u64 hop_addr, hop_pte_addr, hop_pte;
503 u64 offset_mask = HOP4_MASK | OFFSET_MASK;
513 int rc = 0; 504 int rc = 0;
514 505
515 if (!ctx) { 506 if (!ctx) {
@@ -552,12 +543,14 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
552 goto not_mapped; 543 goto not_mapped;
553 hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr); 544 hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
554 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); 545 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
546
547 offset_mask = OFFSET_MASK;
555 } 548 }
556 549
557 if (!(hop_pte & PAGE_PRESENT_MASK)) 550 if (!(hop_pte & PAGE_PRESENT_MASK))
558 goto not_mapped; 551 goto not_mapped;
559 552
560 *phys_addr = (hop_pte & PTE_PHYS_ADDR_MASK) | (virt_addr & OFFSET_MASK); 553 *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask);
561 554
562 goto out; 555 goto out;
563 556
@@ -600,10 +593,8 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
600 } 593 }
601 594
602 sprintf(tmp_buf, "0x%08x\n", val); 595 sprintf(tmp_buf, "0x%08x\n", val);
603 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, 596 return simple_read_from_buffer(buf, count, ppos, tmp_buf,
604 strlen(tmp_buf) + 1); 597 strlen(tmp_buf));
605
606 return rc;
607} 598}
608 599
609static ssize_t hl_data_write32(struct file *f, const char __user *buf, 600static ssize_t hl_data_write32(struct file *f, const char __user *buf,
@@ -645,7 +636,6 @@ static ssize_t hl_get_power_state(struct file *f, char __user *buf,
645 struct hl_dbg_device_entry *entry = file_inode(f)->i_private; 636 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
646 struct hl_device *hdev = entry->hdev; 637 struct hl_device *hdev = entry->hdev;
647 char tmp_buf[200]; 638 char tmp_buf[200];
648 ssize_t rc;
649 int i; 639 int i;
650 640
651 if (*ppos) 641 if (*ppos)
@@ -660,10 +650,8 @@ static ssize_t hl_get_power_state(struct file *f, char __user *buf,
660 650
661 sprintf(tmp_buf, 651 sprintf(tmp_buf,
662 "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i); 652 "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
663 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, 653 return simple_read_from_buffer(buf, count, ppos, tmp_buf,
664 strlen(tmp_buf) + 1); 654 strlen(tmp_buf));
665
666 return rc;
667} 655}
668 656
669static ssize_t hl_set_power_state(struct file *f, const char __user *buf, 657static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
@@ -716,8 +704,8 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
716 } 704 }
717 705
718 sprintf(tmp_buf, "0x%02x\n", val); 706 sprintf(tmp_buf, "0x%02x\n", val);
719 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, 707 rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
720 strlen(tmp_buf) + 1); 708 strlen(tmp_buf));
721 709
722 return rc; 710 return rc;
723} 711}
@@ -806,18 +794,9 @@ static ssize_t hl_led2_write(struct file *f, const char __user *buf,
806static ssize_t hl_device_read(struct file *f, char __user *buf, 794static ssize_t hl_device_read(struct file *f, char __user *buf,
807 size_t count, loff_t *ppos) 795 size_t count, loff_t *ppos)
808{ 796{
809 char tmp_buf[200]; 797 static const char *help =
810 ssize_t rc; 798 "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
811 799 return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
812 if (*ppos)
813 return 0;
814
815 sprintf(tmp_buf,
816 "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
817 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
818 strlen(tmp_buf) + 1);
819
820 return rc;
821} 800}
822 801
823static ssize_t hl_device_write(struct file *f, const char __user *buf, 802static ssize_t hl_device_write(struct file *f, const char __user *buf,
@@ -825,7 +804,7 @@ static ssize_t hl_device_write(struct file *f, const char __user *buf,
825{ 804{
826 struct hl_dbg_device_entry *entry = file_inode(f)->i_private; 805 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
827 struct hl_device *hdev = entry->hdev; 806 struct hl_device *hdev = entry->hdev;
828 char data[30]; 807 char data[30] = {0};
829 808
830 /* don't allow partial writes */ 809 /* don't allow partial writes */
831 if (*ppos != 0) 810 if (*ppos != 0)
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 91a9e47a3482..0b19d3eefb98 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -231,6 +231,7 @@ static int device_early_init(struct hl_device *hdev)
231 231
232 mutex_init(&hdev->fd_open_cnt_lock); 232 mutex_init(&hdev->fd_open_cnt_lock);
233 mutex_init(&hdev->send_cpu_message_lock); 233 mutex_init(&hdev->send_cpu_message_lock);
234 mutex_init(&hdev->mmu_cache_lock);
234 INIT_LIST_HEAD(&hdev->hw_queues_mirror_list); 235 INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
235 spin_lock_init(&hdev->hw_queues_mirror_lock); 236 spin_lock_init(&hdev->hw_queues_mirror_lock);
236 atomic_set(&hdev->in_reset, 0); 237 atomic_set(&hdev->in_reset, 0);
@@ -260,6 +261,7 @@ early_fini:
260 */ 261 */
261static void device_early_fini(struct hl_device *hdev) 262static void device_early_fini(struct hl_device *hdev)
262{ 263{
264 mutex_destroy(&hdev->mmu_cache_lock);
263 mutex_destroy(&hdev->send_cpu_message_lock); 265 mutex_destroy(&hdev->send_cpu_message_lock);
264 266
265 hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); 267 hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index a582e29c1ee4..02d116b01a1a 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -4819,7 +4819,8 @@ static const struct hl_asic_funcs goya_funcs = {
4819 .set_dram_bar_base = goya_set_ddr_bar_base, 4819 .set_dram_bar_base = goya_set_ddr_bar_base,
4820 .init_iatu = goya_init_iatu, 4820 .init_iatu = goya_init_iatu,
4821 .rreg = hl_rreg, 4821 .rreg = hl_rreg,
4822 .wreg = hl_wreg 4822 .wreg = hl_wreg,
4823 .halt_coresight = goya_halt_coresight
4823}; 4824};
4824 4825
4825/* 4826/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 14e216cb3668..c83cab0d641e 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -202,6 +202,7 @@ void goya_add_device_attr(struct hl_device *hdev,
202 struct attribute_group *dev_attr_grp); 202 struct attribute_group *dev_attr_grp);
203int goya_armcp_info_get(struct hl_device *hdev); 203int goya_armcp_info_get(struct hl_device *hdev);
204int goya_debug_coresight(struct hl_device *hdev, void *data); 204int goya_debug_coresight(struct hl_device *hdev, void *data);
205void goya_halt_coresight(struct hl_device *hdev);
205 206
206void goya_mmu_prepare(struct hl_device *hdev, u32 asid); 207void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
207int goya_mmu_clear_pgt_range(struct hl_device *hdev); 208int goya_mmu_clear_pgt_range(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 1ac951f52d1e..d7ec7ad84cc6 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -425,8 +425,18 @@ static int goya_config_etr(struct hl_device *hdev,
425 WREG32(base_reg + 0x28, 0); 425 WREG32(base_reg + 0x28, 0);
426 WREG32(base_reg + 0x304, 0); 426 WREG32(base_reg + 0x304, 0);
427 427
428 if (params->output_size >= sizeof(u32)) 428 if (params->output_size >= sizeof(u64)) {
429 *(u32 *) params->output = RREG32(base_reg + 0x18); 429 u32 rwp, rwphi;
430
431 /*
432 * The trace buffer address is 40 bits wide. The end of
433 * the buffer is set in the RWP register (lower 32
434 * bits), and in the RWPHI register (upper 8 bits).
435 */
436 rwp = RREG32(base_reg + 0x18);
437 rwphi = RREG32(base_reg + 0x3c) & 0xff;
438 *(u64 *) params->output = ((u64) rwphi << 32) | rwp;
439 }
430 } 440 }
431 441
432 return 0; 442 return 0;
@@ -626,3 +636,20 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
626 636
627 return rc; 637 return rc;
628} 638}
639
640void goya_halt_coresight(struct hl_device *hdev)
641{
642 struct hl_debug_params params = {};
643 int i, rc;
644
645 for (i = GOYA_ETF_FIRST ; i <= GOYA_ETF_LAST ; i++) {
646 params.reg_idx = i;
647 rc = goya_config_etf(hdev, &params);
648 if (rc)
649 dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
650 }
651
652 rc = goya_config_etr(hdev, &params);
653 if (rc)
654 dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
655}
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 71243b319920..adef7d9d7488 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -501,6 +501,7 @@ enum hl_pll_frequency {
501 * @init_iatu: Initialize the iATU unit inside the PCI controller. 501 * @init_iatu: Initialize the iATU unit inside the PCI controller.
502 * @rreg: Read a register. Needed for simulator support. 502 * @rreg: Read a register. Needed for simulator support.
503 * @wreg: Write a register. Needed for simulator support. 503 * @wreg: Write a register. Needed for simulator support.
504 * @halt_coresight: stop the ETF and ETR traces.
504 */ 505 */
505struct hl_asic_funcs { 506struct hl_asic_funcs {
506 int (*early_init)(struct hl_device *hdev); 507 int (*early_init)(struct hl_device *hdev);
@@ -578,6 +579,7 @@ struct hl_asic_funcs {
578 int (*init_iatu)(struct hl_device *hdev); 579 int (*init_iatu)(struct hl_device *hdev);
579 u32 (*rreg)(struct hl_device *hdev, u32 reg); 580 u32 (*rreg)(struct hl_device *hdev, u32 reg);
580 void (*wreg)(struct hl_device *hdev, u32 reg, u32 val); 581 void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
582 void (*halt_coresight)(struct hl_device *hdev);
581}; 583};
582 584
583 585
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index d67d24c13efd..693877e37fd8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -675,11 +675,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
675 675
676 total_npages += npages; 676 total_npages += npages;
677 677
678 if (first) {
679 first = false;
680 dma_addr &= PAGE_MASK_2MB;
681 }
682
683 if ((npages % PGS_IN_2MB_PAGE) || 678 if ((npages % PGS_IN_2MB_PAGE) ||
684 (dma_addr & (PAGE_SIZE_2MB - 1))) 679 (dma_addr & (PAGE_SIZE_2MB - 1)))
685 is_huge_page_opt = false; 680 is_huge_page_opt = false;
@@ -704,7 +699,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
704 phys_pg_pack->total_size = total_npages * page_size; 699 phys_pg_pack->total_size = total_npages * page_size;
705 700
706 j = 0; 701 j = 0;
707 first = true;
708 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { 702 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
709 npages = get_sg_info(sg, &dma_addr); 703 npages = get_sg_info(sg, &dma_addr);
710 704
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 533d9315b6fb..10aee3141444 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -404,15 +404,12 @@ int hl_mmu_init(struct hl_device *hdev)
404 404
405 /* MMU H/W init was already done in device hw_init() */ 405 /* MMU H/W init was already done in device hw_init() */
406 406
407 mutex_init(&hdev->mmu_cache_lock);
408
409 hdev->mmu_pgt_pool = 407 hdev->mmu_pgt_pool =
410 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); 408 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
411 409
412 if (!hdev->mmu_pgt_pool) { 410 if (!hdev->mmu_pgt_pool) {
413 dev_err(hdev->dev, "Failed to create page gen pool\n"); 411 dev_err(hdev->dev, "Failed to create page gen pool\n");
414 rc = -ENOMEM; 412 return -ENOMEM;
415 goto err_pool_create;
416 } 413 }
417 414
418 rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr + 415 rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
@@ -436,8 +433,6 @@ int hl_mmu_init(struct hl_device *hdev)
436 433
437err_pool_add: 434err_pool_add:
438 gen_pool_destroy(hdev->mmu_pgt_pool); 435 gen_pool_destroy(hdev->mmu_pgt_pool);
439err_pool_create:
440 mutex_destroy(&hdev->mmu_cache_lock);
441 436
442 return rc; 437 return rc;
443} 438}
@@ -459,7 +454,6 @@ void hl_mmu_fini(struct hl_device *hdev)
459 454
460 kvfree(hdev->mmu_shadow_hop0); 455 kvfree(hdev->mmu_shadow_hop0);
461 gen_pool_destroy(hdev->mmu_pgt_pool); 456 gen_pool_destroy(hdev->mmu_pgt_pool);
462 mutex_destroy(&hdev->mmu_cache_lock);
463 457
464 /* MMU H/W fini will be done in device hw_fini() */ 458 /* MMU H/W fini will be done in device hw_fini() */
465} 459}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 7eebbdfbcacd..17f839dee976 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -32,12 +32,20 @@ static int recur_count = REC_NUM_DEFAULT;
32 32
33static DEFINE_SPINLOCK(lock_me_up); 33static DEFINE_SPINLOCK(lock_me_up);
34 34
35static int recursive_loop(int remaining) 35/*
36 * Make sure compiler does not optimize this function or stack frame away:
37 * - function marked noinline
38 * - stack variables are marked volatile
39 * - stack variables are written (memset()) and read (pr_info())
40 * - function has external effects (pr_info())
41 * */
42static int noinline recursive_loop(int remaining)
36{ 43{
37 char buf[REC_STACK_SIZE]; 44 volatile char buf[REC_STACK_SIZE];
38 45
39 /* Make sure compiler does not optimize this away. */ 46 memset((void *)buf, remaining & 0xFF, sizeof(buf));
40 memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE); 47 pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
48 recur_count);
41 if (!remaining) 49 if (!remaining)
42 return 0; 50 return 0;
43 else 51 else
@@ -81,9 +89,12 @@ void lkdtm_LOOP(void)
81 ; 89 ;
82} 90}
83 91
84void lkdtm_OVERFLOW(void) 92void lkdtm_EXHAUST_STACK(void)
85{ 93{
86 (void) recursive_loop(recur_count); 94 pr_info("Calling function with %d frame size to depth %d ...\n",
95 REC_STACK_SIZE, recur_count);
96 recursive_loop(recur_count);
97 pr_info("FAIL: survived without exhausting stack?!\n");
87} 98}
88 99
89static noinline void __lkdtm_CORRUPT_STACK(void *stack) 100static noinline void __lkdtm_CORRUPT_STACK(void *stack)
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 1972dad966f5..8a1428d4f138 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -106,12 +106,12 @@ static const struct crashtype crashtypes[] = {
106 CRASHTYPE(WARNING), 106 CRASHTYPE(WARNING),
107 CRASHTYPE(EXCEPTION), 107 CRASHTYPE(EXCEPTION),
108 CRASHTYPE(LOOP), 108 CRASHTYPE(LOOP),
109 CRASHTYPE(OVERFLOW), 109 CRASHTYPE(EXHAUST_STACK),
110 CRASHTYPE(CORRUPT_STACK),
111 CRASHTYPE(CORRUPT_STACK_STRONG),
110 CRASHTYPE(CORRUPT_LIST_ADD), 112 CRASHTYPE(CORRUPT_LIST_ADD),
111 CRASHTYPE(CORRUPT_LIST_DEL), 113 CRASHTYPE(CORRUPT_LIST_DEL),
112 CRASHTYPE(CORRUPT_USER_DS), 114 CRASHTYPE(CORRUPT_USER_DS),
113 CRASHTYPE(CORRUPT_STACK),
114 CRASHTYPE(CORRUPT_STACK_STRONG),
115 CRASHTYPE(STACK_GUARD_PAGE_LEADING), 115 CRASHTYPE(STACK_GUARD_PAGE_LEADING),
116 CRASHTYPE(STACK_GUARD_PAGE_TRAILING), 116 CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
117 CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), 117 CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index b69ee004a3f7..23dc565b4307 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -13,7 +13,7 @@ void lkdtm_BUG(void);
13void lkdtm_WARNING(void); 13void lkdtm_WARNING(void);
14void lkdtm_EXCEPTION(void); 14void lkdtm_EXCEPTION(void);
15void lkdtm_LOOP(void); 15void lkdtm_LOOP(void);
16void lkdtm_OVERFLOW(void); 16void lkdtm_EXHAUST_STACK(void);
17void lkdtm_CORRUPT_STACK(void); 17void lkdtm_CORRUPT_STACK(void);
18void lkdtm_CORRUPT_STACK_STRONG(void); 18void lkdtm_CORRUPT_STACK_STRONG(void);
19void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void); 19void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index d5a0e7f1813b..e172719dd86d 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -324,14 +324,16 @@ free_user:
324 324
325void lkdtm_USERCOPY_KERNEL_DS(void) 325void lkdtm_USERCOPY_KERNEL_DS(void)
326{ 326{
327 char __user *user_ptr = (char __user *)ERR_PTR(-EINVAL); 327 char __user *user_ptr =
328 (char __user *)(0xFUL << (sizeof(unsigned long) * 8 - 4));
328 mm_segment_t old_fs = get_fs(); 329 mm_segment_t old_fs = get_fs();
329 char buf[10] = {0}; 330 char buf[10] = {0};
330 331
331 pr_info("attempting copy_to_user on unmapped kernel address\n"); 332 pr_info("attempting copy_to_user() to noncanonical address: %px\n",
333 user_ptr);
332 set_fs(KERNEL_DS); 334 set_fs(KERNEL_DS);
333 if (copy_to_user(user_ptr, buf, sizeof(buf))) 335 if (copy_to_user(user_ptr, buf, sizeof(buf)) == 0)
334 pr_info("copy_to_user un unmapped kernel address failed\n"); 336 pr_err("copy_to_user() to noncanonical address succeeded!?\n");
335 set_fs(old_fs); 337 set_fs(old_fs);
336} 338}
337 339
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index b5b9c6142f08..92900a095796 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -377,6 +377,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
377 blk_queue_max_segment_size(mq->queue, 377 blk_queue_max_segment_size(mq->queue,
378 round_down(host->max_seg_size, block_size)); 378 round_down(host->max_seg_size, block_size));
379 379
380 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
381
380 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); 382 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
381 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); 383 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
382 384
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c5a8af4ca76b..5582561586b4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -859,6 +859,9 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
859 if (WARN_ON(!host) || WARN_ON(!host->cmd)) 859 if (WARN_ON(!host) || WARN_ON(!host->cmd))
860 return IRQ_NONE; 860 return IRQ_NONE;
861 861
862 /* ack all raised interrupts */
863 writel(status, host->regs + SD_EMMC_STATUS);
864
862 cmd = host->cmd; 865 cmd = host->cmd;
863 data = cmd->data; 866 data = cmd->data;
864 cmd->error = 0; 867 cmd->error = 0;
@@ -905,9 +908,6 @@ out:
905 if (ret == IRQ_HANDLED) 908 if (ret == IRQ_HANDLED)
906 meson_mmc_request_done(host->mmc, cmd->mrq); 909 meson_mmc_request_done(host->mmc, cmd->mrq);
907 910
908 /* ack all raised interrupts */
909 writel(status, host->regs + SD_EMMC_STATUS);
910
911 return ret; 911 return ret;
912} 912}
913 913
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 1e85fb7f1025..781a3e106d9a 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -856,7 +856,7 @@ static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
856 } 856 }
857 857
858 if (!first_fail) { 858 if (!first_fail) {
859 WARN_ON("no edge detected, continue with hw tuned delay.\n"); 859 WARN(1, "no edge detected, continue with hw tuned delay.\n");
860 } else if (first_pass) { 860 } else if (first_pass) {
861 /* set tap location at fixed tap relative to the first edge */ 861 /* set tap location at fixed tap relative to the first edge */
862 edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2; 862 edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d128708924e4..59acf8e3331e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2133,6 +2133,17 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2133} 2133}
2134EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2134EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2135 2135
2136static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2137{
2138 struct sdhci_host *host = mmc_priv(mmc);
2139 unsigned long flags;
2140
2141 spin_lock_irqsave(&host->lock, flags);
2142 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2143 sdhci_enable_sdio_irq_nolock(host, true);
2144 spin_unlock_irqrestore(&host->lock, flags);
2145}
2146
2136int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2147int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2137 struct mmc_ios *ios) 2148 struct mmc_ios *ios)
2138{ 2149{
@@ -2581,6 +2592,7 @@ static const struct mmc_host_ops sdhci_ops = {
2581 .get_ro = sdhci_get_ro, 2592 .get_ro = sdhci_get_ro,
2582 .hw_reset = sdhci_hw_reset, 2593 .hw_reset = sdhci_hw_reset,
2583 .enable_sdio_irq = sdhci_enable_sdio_irq, 2594 .enable_sdio_irq = sdhci_enable_sdio_irq,
2595 .ack_sdio_irq = sdhci_ack_sdio_irq,
2584 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2596 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2585 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2597 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2586 .execute_tuning = sdhci_execute_tuning, 2598 .execute_tuning = sdhci_execute_tuning,
@@ -3083,8 +3095,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
3083 if ((intmask & SDHCI_INT_CARD_INT) && 3095 if ((intmask & SDHCI_INT_CARD_INT) &&
3084 (host->ier & SDHCI_INT_CARD_INT)) { 3096 (host->ier & SDHCI_INT_CARD_INT)) {
3085 sdhci_enable_sdio_irq_nolock(host, false); 3097 sdhci_enable_sdio_irq_nolock(host, false);
3086 host->thread_isr |= SDHCI_INT_CARD_INT; 3098 sdio_signal_irq(host->mmc);
3087 result = IRQ_WAKE_THREAD;
3088 } 3099 }
3089 3100
3090 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3101 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
@@ -3156,15 +3167,6 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3156 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3167 mmc_detect_change(mmc, msecs_to_jiffies(200));
3157 } 3168 }
3158 3169
3159 if (isr & SDHCI_INT_CARD_INT) {
3160 sdio_run_irqs(host->mmc);
3161
3162 spin_lock_irqsave(&host->lock, flags);
3163 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3164 sdhci_enable_sdio_irq_nolock(host, true);
3165 spin_unlock_irqrestore(&host->lock, flags);
3166 }
3167
3168 return IRQ_HANDLED; 3170 return IRQ_HANDLED;
3169} 3171}
3170 3172
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index a91c0b45c48d..3222ea4d584d 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -231,7 +231,7 @@ static int sdhci_am654_init(struct sdhci_host *host)
231 ctl_cfg_2 = SLOTTYPE_EMBEDDED; 231 ctl_cfg_2 = SLOTTYPE_EMBEDDED;
232 232
233 regmap_update_bits(sdhci_am654->base, CTL_CFG_2, 233 regmap_update_bits(sdhci_am654->base, CTL_CFG_2,
234 ctl_cfg_2, SLOTTYPE_MASK); 234 SLOTTYPE_MASK, ctl_cfg_2);
235 235
236 return sdhci_add_host(host); 236 return sdhci_add_host(host);
237} 237}
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 130b91cb0f8a..84cb7d2aacdf 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -842,8 +842,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
842 if (mrq->cmd->error || (mrq->data && mrq->data->error)) 842 if (mrq->cmd->error || (mrq->data && mrq->data->error))
843 tmio_mmc_abort_dma(host); 843 tmio_mmc_abort_dma(host);
844 844
845 /* SCC error means retune, but executed command was still successful */
845 if (host->check_scc_error && host->check_scc_error(host)) 846 if (host->check_scc_error && host->check_scc_error(host))
846 mrq->cmd->error = -EILSEQ; 847 mmc_retune_needed(host->mmc);
847 848
848 /* If SET_BLOCK_COUNT, continue with main command */ 849 /* If SET_BLOCK_COUNT, continue with main command */
849 if (host->mrq && !mrq->cmd->error) { 850 if (host->mrq && !mrq->cmd->error) {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 42da3f1bff5b..063c7a671b41 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1388,7 +1388,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
1388 int err; 1388 int err;
1389 1389
1390 if (!vid) 1390 if (!vid)
1391 return -EINVAL; 1391 return -EOPNOTSUPP;
1392 1392
1393 entry->vid = vid - 1; 1393 entry->vid = vid - 1;
1394 entry->valid = false; 1394 entry->valid = false;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 0663b78a2f6c..1c3959efebc4 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -652,16 +652,6 @@ static int sja1105_speed[] = {
652 [SJA1105_SPEED_1000MBPS] = 1000, 652 [SJA1105_SPEED_1000MBPS] = 1000,
653}; 653};
654 654
655static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps)
656{
657 int i;
658
659 for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++)
660 if (sja1105_speed[i] == speed_mbps)
661 return i;
662 return -EINVAL;
663}
664
665/* Set link speed and enable/disable traffic I/O in the MAC configuration 655/* Set link speed and enable/disable traffic I/O in the MAC configuration
666 * for a specific port. 656 * for a specific port.
667 * 657 *
@@ -684,8 +674,21 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
684 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 674 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
685 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 675 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
686 676
687 speed = sja1105_get_speed_cfg(speed_mbps); 677 switch (speed_mbps) {
688 if (speed_mbps && speed < 0) { 678 case 0:
679 /* No speed update requested */
680 speed = SJA1105_SPEED_AUTO;
681 break;
682 case 10:
683 speed = SJA1105_SPEED_10MBPS;
684 break;
685 case 100:
686 speed = SJA1105_SPEED_100MBPS;
687 break;
688 case 1000:
689 speed = SJA1105_SPEED_1000MBPS;
690 break;
691 default:
689 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); 692 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
690 return -EINVAL; 693 return -EINVAL;
691 } 694 }
@@ -695,10 +698,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
695 * and we no longer need to store it in the static config (already told 698 * and we no longer need to store it in the static config (already told
696 * hardware we want auto during upload phase). 699 * hardware we want auto during upload phase).
697 */ 700 */
698 if (speed_mbps) 701 mac[port].speed = speed;
699 mac[port].speed = speed;
700 else
701 mac[port].speed = SJA1105_SPEED_AUTO;
702 702
703 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration 703 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
704 * tables. On E/T, MAC reconfig tables are not readable, only writable. 704 * tables. On E/T, MAC reconfig tables are not readable, only writable.
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index c38020dcbd3a..52646855495e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -332,13 +332,13 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
332{ 332{
333 u32 val; 333 u32 val;
334 int err = 0; 334 int err = 0;
335 bool is_locked;
336 335
337 is_locked = hw_atl_sem_ram_get(self); 336 err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
338 if (!is_locked) { 337 val, val == 1U,
339 err = -ETIME; 338 10U, 100000U);
339 if (err < 0)
340 goto err_exit; 340 goto err_exit;
341 } 341
342 if (IS_CHIP_FEATURE(REVISION_B1)) { 342 if (IS_CHIP_FEATURE(REVISION_B1)) {
343 u32 offset = 0; 343 u32 offset = 0;
344 344
@@ -350,8 +350,8 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
350 /* 1000 times by 10us = 10ms */ 350 /* 1000 times by 10us = 10ms */
351 err = readx_poll_timeout_atomic(hw_atl_scrpad12_get, 351 err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
352 self, val, 352 self, val,
353 (val & 0xF0000000) == 353 (val & 0xF0000000) !=
354 0x80000000, 354 0x80000000,
355 10U, 10000U); 355 10U, 10000U);
356 } 356 }
357 } else { 357 } else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index f1cea9bea27f..da726489e3c8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -381,7 +381,7 @@ static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
381 err = readx_poll_timeout_atomic(aq_fw2x_state2_get, 381 err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
382 self, val, 382 self, val,
383 val & HW_ATL_FW2X_CTRL_SLEEP_PROXY, 383 val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
384 1U, 10000U); 384 1U, 100000U);
385 385
386err_exit: 386err_exit:
387 return err; 387 return err;
@@ -401,6 +401,8 @@ static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
401 401
402 msg = (struct fw2x_msg_wol *)rpc; 402 msg = (struct fw2x_msg_wol *)rpc;
403 403
404 memset(msg, 0, sizeof(*msg));
405
404 msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL; 406 msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
405 msg->magic_packet_enabled = true; 407 msg->magic_packet_enabled = true;
406 memcpy(msg->hw_addr, mac, ETH_ALEN); 408 memcpy(msg->hw_addr, mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index d1df0a44f93c..717fccc2efba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -335,6 +335,7 @@ static int __lb_setup(struct net_device *ndev,
335static int __lb_up(struct net_device *ndev, 335static int __lb_up(struct net_device *ndev,
336 enum hnae_loop loop_mode) 336 enum hnae_loop loop_mode)
337{ 337{
338#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
338 struct hns_nic_priv *priv = netdev_priv(ndev); 339 struct hns_nic_priv *priv = netdev_priv(ndev);
339 struct hnae_handle *h = priv->ae_handle; 340 struct hnae_handle *h = priv->ae_handle;
340 int speed, duplex; 341 int speed, duplex;
@@ -361,6 +362,9 @@ static int __lb_up(struct net_device *ndev,
361 362
362 h->dev->ops->adjust_link(h, speed, duplex); 363 h->dev->ops->adjust_link(h, speed, duplex);
363 364
365 /* wait adjust link done and phy ready */
366 msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
367
364 return 0; 368 return 0;
365} 369}
366 370
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7a67e23a2c2b..d8e5241097a9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1304,8 +1304,8 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1304 int i; 1304 int i;
1305 1305
1306 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) 1306 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
1307 memcpy(data + i * ETH_GSTRING_LEN, 1307 strscpy(data + i * ETH_GSTRING_LEN,
1308 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); 1308 mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
1309 } 1309 }
1310} 1310}
1311 1311
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 96b53ff68c96..6cfffb64cd51 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1772,6 +1772,7 @@ static void mtk_poll_controller(struct net_device *dev)
1772 1772
1773static int mtk_start_dma(struct mtk_eth *eth) 1773static int mtk_start_dma(struct mtk_eth *eth)
1774{ 1774{
1775 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
1775 int err; 1776 int err;
1776 1777
1777 err = mtk_dma_init(eth); 1778 err = mtk_dma_init(eth);
@@ -1788,7 +1789,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
1788 MTK_QDMA_GLO_CFG); 1789 MTK_QDMA_GLO_CFG);
1789 1790
1790 mtk_w32(eth, 1791 mtk_w32(eth,
1791 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | 1792 MTK_RX_DMA_EN | rx_2b_offset |
1792 MTK_RX_BT_32DWORDS | MTK_MULTI_EN, 1793 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1793 MTK_PDMA_GLO_CFG); 1794 MTK_PDMA_GLO_CFG);
1794 1795
@@ -2292,13 +2293,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2292 2293
2293 switch (cmd->cmd) { 2294 switch (cmd->cmd) {
2294 case ETHTOOL_GRXRINGS: 2295 case ETHTOOL_GRXRINGS:
2295 if (dev->features & NETIF_F_LRO) { 2296 if (dev->hw_features & NETIF_F_LRO) {
2296 cmd->data = MTK_MAX_RX_RING_NUM; 2297 cmd->data = MTK_MAX_RX_RING_NUM;
2297 ret = 0; 2298 ret = 0;
2298 } 2299 }
2299 break; 2300 break;
2300 case ETHTOOL_GRXCLSRLCNT: 2301 case ETHTOOL_GRXCLSRLCNT:
2301 if (dev->features & NETIF_F_LRO) { 2302 if (dev->hw_features & NETIF_F_LRO) {
2302 struct mtk_mac *mac = netdev_priv(dev); 2303 struct mtk_mac *mac = netdev_priv(dev);
2303 2304
2304 cmd->rule_cnt = mac->hwlro_ip_cnt; 2305 cmd->rule_cnt = mac->hwlro_ip_cnt;
@@ -2306,11 +2307,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2306 } 2307 }
2307 break; 2308 break;
2308 case ETHTOOL_GRXCLSRULE: 2309 case ETHTOOL_GRXCLSRULE:
2309 if (dev->features & NETIF_F_LRO) 2310 if (dev->hw_features & NETIF_F_LRO)
2310 ret = mtk_hwlro_get_fdir_entry(dev, cmd); 2311 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2311 break; 2312 break;
2312 case ETHTOOL_GRXCLSRLALL: 2313 case ETHTOOL_GRXCLSRLALL:
2313 if (dev->features & NETIF_F_LRO) 2314 if (dev->hw_features & NETIF_F_LRO)
2314 ret = mtk_hwlro_get_fdir_all(dev, cmd, 2315 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2315 rule_locs); 2316 rule_locs);
2316 break; 2317 break;
@@ -2327,11 +2328,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2327 2328
2328 switch (cmd->cmd) { 2329 switch (cmd->cmd) {
2329 case ETHTOOL_SRXCLSRLINS: 2330 case ETHTOOL_SRXCLSRLINS:
2330 if (dev->features & NETIF_F_LRO) 2331 if (dev->hw_features & NETIF_F_LRO)
2331 ret = mtk_hwlro_add_ipaddr(dev, cmd); 2332 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2332 break; 2333 break;
2333 case ETHTOOL_SRXCLSRLDEL: 2334 case ETHTOOL_SRXCLSRLDEL:
2334 if (dev->features & NETIF_F_LRO) 2335 if (dev->hw_features & NETIF_F_LRO)
2335 ret = mtk_hwlro_del_ipaddr(dev, cmd); 2336 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2336 break; 2337 break;
2337 default: 2338 default:
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index a4a7ec0d2531..6d1c9ebae7cc 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -643,7 +643,7 @@ void cpsw_get_ringparam(struct net_device *ndev,
643 struct cpsw_common *cpsw = priv->cpsw; 643 struct cpsw_common *cpsw = priv->cpsw;
644 644
645 /* not supported */ 645 /* not supported */
646 ering->tx_max_pending = 0; 646 ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
647 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); 647 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
648 ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES; 648 ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
649 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); 649 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index cf38c392b9b6..1c96bed5a7c4 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -107,7 +107,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
107} 107}
108 108
109#define IPVLAN_FEATURES \ 109#define IPVLAN_FEATURES \
110 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 110 (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
111 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ 111 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
112 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ 112 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
113 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) 113 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9044b95d2afe..4c0616ba314d 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1073,6 +1073,7 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
1073int phylink_ethtool_ksettings_set(struct phylink *pl, 1073int phylink_ethtool_ksettings_set(struct phylink *pl,
1074 const struct ethtool_link_ksettings *kset) 1074 const struct ethtool_link_ksettings *kset)
1075{ 1075{
1076 __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
1076 struct ethtool_link_ksettings our_kset; 1077 struct ethtool_link_ksettings our_kset;
1077 struct phylink_link_state config; 1078 struct phylink_link_state config;
1078 int ret; 1079 int ret;
@@ -1083,11 +1084,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
1083 kset->base.autoneg != AUTONEG_ENABLE) 1084 kset->base.autoneg != AUTONEG_ENABLE)
1084 return -EINVAL; 1085 return -EINVAL;
1085 1086
1087 linkmode_copy(support, pl->supported);
1086 config = pl->link_config; 1088 config = pl->link_config;
1087 1089
1088 /* Mask out unsupported advertisements */ 1090 /* Mask out unsupported advertisements */
1089 linkmode_and(config.advertising, kset->link_modes.advertising, 1091 linkmode_and(config.advertising, kset->link_modes.advertising,
1090 pl->supported); 1092 support);
1091 1093
1092 /* FIXME: should we reject autoneg if phy/mac does not support it? */ 1094 /* FIXME: should we reject autoneg if phy/mac does not support it? */
1093 if (kset->base.autoneg == AUTONEG_DISABLE) { 1095 if (kset->base.autoneg == AUTONEG_DISABLE) {
@@ -1097,7 +1099,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
1097 * duplex. 1099 * duplex.
1098 */ 1100 */
1099 s = phy_lookup_setting(kset->base.speed, kset->base.duplex, 1101 s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
1100 pl->supported, false); 1102 support, false);
1101 if (!s) 1103 if (!s)
1102 return -EINVAL; 1104 return -EINVAL;
1103 1105
@@ -1126,7 +1128,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
1126 __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); 1128 __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
1127 } 1129 }
1128 1130
1129 if (phylink_validate(pl, pl->supported, &config)) 1131 if (phylink_validate(pl, support, &config))
1130 return -EINVAL; 1132 return -EINVAL;
1131 1133
1132 /* If autonegotiation is enabled, we must have an advertisement */ 1134 /* If autonegotiation is enabled, we must have an advertisement */
@@ -1576,6 +1578,7 @@ static int phylink_sfp_module_insert(void *upstream,
1576{ 1578{
1577 struct phylink *pl = upstream; 1579 struct phylink *pl = upstream;
1578 __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; 1580 __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
1581 __ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
1579 struct phylink_link_state config; 1582 struct phylink_link_state config;
1580 phy_interface_t iface; 1583 phy_interface_t iface;
1581 int ret = 0; 1584 int ret = 0;
@@ -1603,6 +1606,8 @@ static int phylink_sfp_module_insert(void *upstream,
1603 return ret; 1606 return ret;
1604 } 1607 }
1605 1608
1609 linkmode_copy(support1, support);
1610
1606 iface = sfp_select_interface(pl->sfp_bus, id, config.advertising); 1611 iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
1607 if (iface == PHY_INTERFACE_MODE_NA) { 1612 if (iface == PHY_INTERFACE_MODE_NA) {
1608 netdev_err(pl->netdev, 1613 netdev_err(pl->netdev,
@@ -1612,7 +1617,7 @@ static int phylink_sfp_module_insert(void *upstream,
1612 } 1617 }
1613 1618
1614 config.interface = iface; 1619 config.interface = iface;
1615 ret = phylink_validate(pl, support, &config); 1620 ret = phylink_validate(pl, support1, &config);
1616 if (ret) { 1621 if (ret) {
1617 netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", 1622 netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
1618 phylink_an_mode_str(MLO_AN_INBAND), 1623 phylink_an_mode_str(MLO_AN_INBAND),
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index d4635c2178d1..71812be0ac64 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -281,6 +281,7 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
281{ 281{
282 struct i2c_msg msgs[2]; 282 struct i2c_msg msgs[2];
283 u8 bus_addr = a2 ? 0x51 : 0x50; 283 u8 bus_addr = a2 ? 0x51 : 0x50;
284 size_t this_len;
284 int ret; 285 int ret;
285 286
286 msgs[0].addr = bus_addr; 287 msgs[0].addr = bus_addr;
@@ -292,11 +293,26 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
292 msgs[1].len = len; 293 msgs[1].len = len;
293 msgs[1].buf = buf; 294 msgs[1].buf = buf;
294 295
295 ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs)); 296 while (len) {
296 if (ret < 0) 297 this_len = len;
297 return ret; 298 if (this_len > 16)
299 this_len = 16;
298 300
299 return ret == ARRAY_SIZE(msgs) ? len : 0; 301 msgs[1].len = this_len;
302
303 ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
304 if (ret < 0)
305 return ret;
306
307 if (ret != ARRAY_SIZE(msgs))
308 break;
309
310 msgs[1].buf += this_len;
311 dev_addr += this_len;
312 len -= this_len;
313 }
314
315 return msgs[1].buf - (u8 *)buf;
300} 316}
301 317
302static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, 318static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1b7c2afd84cb..120fb593d1da 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3400,7 +3400,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
3400{ 3400{
3401 struct nvme_ns *ns; 3401 struct nvme_ns *ns;
3402 __le32 *ns_list; 3402 __le32 *ns_list;
3403 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); 3403 unsigned i, j, nsid, prev = 0;
3404 unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
3404 int ret = 0; 3405 int ret = 0;
3405 3406
3406 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3407 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f562154551ce..524d6bd6d095 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2513,6 +2513,12 @@ static void nvme_reset_work(struct work_struct *work)
2513 */ 2513 */
2514 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; 2514 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
2515 dev->ctrl.max_segments = NVME_MAX_SEGS; 2515 dev->ctrl.max_segments = NVME_MAX_SEGS;
2516
2517 /*
2518 * Don't limit the IOMMU merged segment size.
2519 */
2520 dma_set_max_seg_size(dev->dev, 0xffffffff);
2521
2516 mutex_unlock(&dev->shutdown_lock); 2522 mutex_unlock(&dev->shutdown_lock);
2517 2523
2518 /* 2524 /*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f383146e7d0f..97f668a39ae1 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -213,6 +213,11 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
213 if (!ring) 213 if (!ring)
214 return NULL; 214 return NULL;
215 215
216 /*
217 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
218 * lifetime. It's safe, since any chage in the underlying RDMA device
219 * will issue error recovery and queue re-creation.
220 */
216 for (i = 0; i < ib_queue_size; i++) { 221 for (i = 0; i < ib_queue_size; i++) {
217 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) 222 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
218 goto out_free_ring; 223 goto out_free_ring;
@@ -274,14 +279,9 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
274static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, 279static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
275 struct request *rq, unsigned int hctx_idx) 280 struct request *rq, unsigned int hctx_idx)
276{ 281{
277 struct nvme_rdma_ctrl *ctrl = set->driver_data;
278 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 282 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
279 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
280 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
281 struct nvme_rdma_device *dev = queue->device;
282 283
283 nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command), 284 kfree(req->sqe.data);
284 DMA_TO_DEVICE);
285} 285}
286 286
287static int nvme_rdma_init_request(struct blk_mq_tag_set *set, 287static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
@@ -292,15 +292,11 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
292 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 292 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
293 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 293 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
294 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; 294 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
295 struct nvme_rdma_device *dev = queue->device;
296 struct ib_device *ibdev = dev->dev;
297 int ret;
298 295
299 nvme_req(rq)->ctrl = &ctrl->ctrl; 296 nvme_req(rq)->ctrl = &ctrl->ctrl;
300 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), 297 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
301 DMA_TO_DEVICE); 298 if (!req->sqe.data)
302 if (ret) 299 return -ENOMEM;
303 return ret;
304 300
305 req->queue = queue; 301 req->queue = queue;
306 302
@@ -641,34 +637,16 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
641{ 637{
642 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 638 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
643 struct ib_device *ibdev = ctrl->device->dev; 639 struct ib_device *ibdev = ctrl->device->dev;
644 unsigned int nr_io_queues; 640 unsigned int nr_io_queues, nr_default_queues;
641 unsigned int nr_read_queues, nr_poll_queues;
645 int i, ret; 642 int i, ret;
646 643
647 nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); 644 nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
648 645 min(opts->nr_io_queues, num_online_cpus()));
649 /* 646 nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
650 * we map queues according to the device irq vectors for 647 min(opts->nr_write_queues, num_online_cpus()));
651 * optimal locality so we don't need more queues than 648 nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
652 * completion vectors. 649 nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
653 */
654 nr_io_queues = min_t(unsigned int, nr_io_queues,
655 ibdev->num_comp_vectors);
656
657 if (opts->nr_write_queues) {
658 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
659 min(opts->nr_write_queues, nr_io_queues);
660 nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
661 } else {
662 ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
663 }
664
665 ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
666
667 if (opts->nr_poll_queues) {
668 ctrl->io_queues[HCTX_TYPE_POLL] =
669 min(opts->nr_poll_queues, num_online_cpus());
670 nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
671 }
672 650
673 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 651 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
674 if (ret) 652 if (ret)
@@ -681,6 +659,34 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
681 dev_info(ctrl->ctrl.device, 659 dev_info(ctrl->ctrl.device,
682 "creating %d I/O queues.\n", nr_io_queues); 660 "creating %d I/O queues.\n", nr_io_queues);
683 661
662 if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
663 /*
664 * separate read/write queues
665 * hand out dedicated default queues only after we have
666 * sufficient read queues.
667 */
668 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
669 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
670 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
671 min(nr_default_queues, nr_io_queues);
672 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
673 } else {
674 /*
675 * shared read/write queues
676 * either no write queues were requested, or we don't have
677 * sufficient queue count to have dedicated default queues.
678 */
679 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
680 min(nr_read_queues, nr_io_queues);
681 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
682 }
683
684 if (opts->nr_poll_queues && nr_io_queues) {
685 /* map dedicated poll queues only if we have queues left */
686 ctrl->io_queues[HCTX_TYPE_POLL] =
687 min(nr_poll_queues, nr_io_queues);
688 }
689
684 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 690 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
685 ret = nvme_rdma_alloc_queue(ctrl, i, 691 ret = nvme_rdma_alloc_queue(ctrl, i,
686 ctrl->ctrl.sqsize + 1); 692 ctrl->ctrl.sqsize + 1);
@@ -769,6 +775,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
769 775
770 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); 776 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
771 777
778 /*
779 * Bind the async event SQE DMA mapping to the admin queue lifetime.
780 * It's safe, since any chage in the underlying RDMA device will issue
781 * error recovery and queue re-creation.
782 */
772 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, 783 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
773 sizeof(struct nvme_command), DMA_TO_DEVICE); 784 sizeof(struct nvme_command), DMA_TO_DEVICE);
774 if (error) 785 if (error)
@@ -1709,12 +1720,20 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1709 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 1720 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
1710 1721
1711 dev = queue->device->dev; 1722 dev = queue->device->dev;
1723
1724 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
1725 sizeof(struct nvme_command),
1726 DMA_TO_DEVICE);
1727 err = ib_dma_mapping_error(dev, req->sqe.dma);
1728 if (unlikely(err))
1729 return BLK_STS_RESOURCE;
1730
1712 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1731 ib_dma_sync_single_for_cpu(dev, sqe->dma,
1713 sizeof(struct nvme_command), DMA_TO_DEVICE); 1732 sizeof(struct nvme_command), DMA_TO_DEVICE);
1714 1733
1715 ret = nvme_setup_cmd(ns, rq, c); 1734 ret = nvme_setup_cmd(ns, rq, c);
1716 if (ret) 1735 if (ret)
1717 return ret; 1736 goto unmap_qe;
1718 1737
1719 blk_mq_start_request(rq); 1738 blk_mq_start_request(rq);
1720 1739
@@ -1739,10 +1758,16 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1739 } 1758 }
1740 1759
1741 return BLK_STS_OK; 1760 return BLK_STS_OK;
1761
1742err: 1762err:
1743 if (err == -ENOMEM || err == -EAGAIN) 1763 if (err == -ENOMEM || err == -EAGAIN)
1744 return BLK_STS_RESOURCE; 1764 ret = BLK_STS_RESOURCE;
1745 return BLK_STS_IOERR; 1765 else
1766 ret = BLK_STS_IOERR;
1767unmap_qe:
1768 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
1769 DMA_TO_DEVICE);
1770 return ret;
1746} 1771}
1747 1772
1748static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx) 1773static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
@@ -1755,25 +1780,36 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
1755static void nvme_rdma_complete_rq(struct request *rq) 1780static void nvme_rdma_complete_rq(struct request *rq)
1756{ 1781{
1757 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1782 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1783 struct nvme_rdma_queue *queue = req->queue;
1784 struct ib_device *ibdev = queue->device->dev;
1758 1785
1759 nvme_rdma_unmap_data(req->queue, rq); 1786 nvme_rdma_unmap_data(queue, rq);
1787 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
1788 DMA_TO_DEVICE);
1760 nvme_complete_rq(rq); 1789 nvme_complete_rq(rq);
1761} 1790}
1762 1791
1763static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) 1792static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1764{ 1793{
1765 struct nvme_rdma_ctrl *ctrl = set->driver_data; 1794 struct nvme_rdma_ctrl *ctrl = set->driver_data;
1795 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
1766 1796
1767 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 1797 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
1768 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1769 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1770 set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
1771 if (ctrl->ctrl.opts->nr_write_queues) {
1772 /* separate read/write queues */ 1798 /* separate read/write queues */
1799 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1800 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1801 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1802 set->map[HCTX_TYPE_READ].nr_queues =
1803 ctrl->io_queues[HCTX_TYPE_READ];
1773 set->map[HCTX_TYPE_READ].queue_offset = 1804 set->map[HCTX_TYPE_READ].queue_offset =
1774 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 1805 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1775 } else { 1806 } else {
1776 /* mixed read/write queues */ 1807 /* shared read/write queues */
1808 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1809 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1810 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1811 set->map[HCTX_TYPE_READ].nr_queues =
1812 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1777 set->map[HCTX_TYPE_READ].queue_offset = 0; 1813 set->map[HCTX_TYPE_READ].queue_offset = 0;
1778 } 1814 }
1779 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], 1815 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1781,16 +1817,22 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1781 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ], 1817 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
1782 ctrl->device->dev, 0); 1818 ctrl->device->dev, 0);
1783 1819
1784 if (ctrl->ctrl.opts->nr_poll_queues) { 1820 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
1821 /* map dedicated poll queues only if we have queues left */
1785 set->map[HCTX_TYPE_POLL].nr_queues = 1822 set->map[HCTX_TYPE_POLL].nr_queues =
1786 ctrl->io_queues[HCTX_TYPE_POLL]; 1823 ctrl->io_queues[HCTX_TYPE_POLL];
1787 set->map[HCTX_TYPE_POLL].queue_offset = 1824 set->map[HCTX_TYPE_POLL].queue_offset =
1788 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 1825 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1789 if (ctrl->ctrl.opts->nr_write_queues) 1826 ctrl->io_queues[HCTX_TYPE_READ];
1790 set->map[HCTX_TYPE_POLL].queue_offset +=
1791 ctrl->io_queues[HCTX_TYPE_READ];
1792 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 1827 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1793 } 1828 }
1829
1830 dev_info(ctrl->ctrl.device,
1831 "mapped %d/%d/%d default/read/poll queues.\n",
1832 ctrl->io_queues[HCTX_TYPE_DEFAULT],
1833 ctrl->io_queues[HCTX_TYPE_READ],
1834 ctrl->io_queues[HCTX_TYPE_POLL]);
1835
1794 return 0; 1836 return 0;
1795} 1837}
1796 1838
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 2b107a1d152b..08a2501b9357 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -111,6 +111,7 @@ struct nvme_tcp_ctrl {
111 struct work_struct err_work; 111 struct work_struct err_work;
112 struct delayed_work connect_work; 112 struct delayed_work connect_work;
113 struct nvme_tcp_request async_req; 113 struct nvme_tcp_request async_req;
114 u32 io_queues[HCTX_MAX_TYPES];
114}; 115};
115 116
116static LIST_HEAD(nvme_tcp_ctrl_list); 117static LIST_HEAD(nvme_tcp_ctrl_list);
@@ -1564,6 +1565,35 @@ static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1564 return nr_io_queues; 1565 return nr_io_queues;
1565} 1566}
1566 1567
1568static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1569 unsigned int nr_io_queues)
1570{
1571 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1572 struct nvmf_ctrl_options *opts = nctrl->opts;
1573
1574 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1575 /*
1576 * separate read/write queues
1577 * hand out dedicated default queues only after we have
1578 * sufficient read queues.
1579 */
1580 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1581 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1582 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1583 min(opts->nr_write_queues, nr_io_queues);
1584 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1585 } else {
1586 /*
1587 * shared read/write queues
1588 * either no write queues were requested, or we don't have
1589 * sufficient queue count to have dedicated default queues.
1590 */
1591 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1592 min(opts->nr_io_queues, nr_io_queues);
1593 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1594 }
1595}
1596
1567static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) 1597static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1568{ 1598{
1569 unsigned int nr_io_queues; 1599 unsigned int nr_io_queues;
@@ -1581,6 +1611,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1581 dev_info(ctrl->device, 1611 dev_info(ctrl->device,
1582 "creating %d I/O queues.\n", nr_io_queues); 1612 "creating %d I/O queues.\n", nr_io_queues);
1583 1613
1614 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1615
1584 return __nvme_tcp_alloc_io_queues(ctrl); 1616 return __nvme_tcp_alloc_io_queues(ctrl);
1585} 1617}
1586 1618
@@ -2089,23 +2121,34 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2089static int nvme_tcp_map_queues(struct blk_mq_tag_set *set) 2121static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2090{ 2122{
2091 struct nvme_tcp_ctrl *ctrl = set->driver_data; 2123 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2124 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2092 2125
2093 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 2126 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2094 set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
2095 if (ctrl->ctrl.opts->nr_write_queues) {
2096 /* separate read/write queues */ 2127 /* separate read/write queues */
2097 set->map[HCTX_TYPE_DEFAULT].nr_queues = 2128 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2098 ctrl->ctrl.opts->nr_write_queues; 2129 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2130 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2131 set->map[HCTX_TYPE_READ].nr_queues =
2132 ctrl->io_queues[HCTX_TYPE_READ];
2099 set->map[HCTX_TYPE_READ].queue_offset = 2133 set->map[HCTX_TYPE_READ].queue_offset =
2100 ctrl->ctrl.opts->nr_write_queues; 2134 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2101 } else { 2135 } else {
2102 /* mixed read/write queues */ 2136 /* shared read/write queues */
2103 set->map[HCTX_TYPE_DEFAULT].nr_queues = 2137 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2104 ctrl->ctrl.opts->nr_io_queues; 2138 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2139 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2140 set->map[HCTX_TYPE_READ].nr_queues =
2141 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2105 set->map[HCTX_TYPE_READ].queue_offset = 0; 2142 set->map[HCTX_TYPE_READ].queue_offset = 0;
2106 } 2143 }
2107 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 2144 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2108 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); 2145 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2146
2147 dev_info(ctrl->ctrl.device,
2148 "mapped %d/%d default/read queues.\n",
2149 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2150 ctrl->io_queues[HCTX_TYPE_READ]);
2151
2109 return 0; 2152 return 0;
2110} 2153}
2111 2154
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 3efc52f9c309..7a1cf6437a6a 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -293,6 +293,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
293 return 0; 293 return 0;
294 case nvme_cmd_write_zeroes: 294 case nvme_cmd_write_zeroes:
295 req->execute = nvmet_bdev_execute_write_zeroes; 295 req->execute = nvmet_bdev_execute_write_zeroes;
296 req->data_len = 0;
296 return 0; 297 return 0;
297 default: 298 default:
298 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, 299 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 121f7603a595..217f15aafa4a 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -562,14 +562,12 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
562 /* We currently only support kernel addresses */ 562 /* We currently only support kernel addresses */
563 BUG_ON(sid != KERNEL_SPACE); 563 BUG_ON(sid != KERNEL_SPACE);
564 564
565 mtsp(sid,1);
566
567 /* 565 /*
568 ** WORD 1 - low order word 566 ** WORD 1 - low order word
569 ** "hints" parm includes the VALID bit! 567 ** "hints" parm includes the VALID bit!
570 ** "dep" clobbers the physical address offset bits as well. 568 ** "dep" clobbers the physical address offset bits as well.
571 */ 569 */
572 pa = virt_to_phys(vba); 570 pa = lpa(vba);
573 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints)); 571 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
574 ((u32 *)pdir_ptr)[1] = (u32) pa; 572 ((u32 *)pdir_ptr)[1] = (u32) pa;
575 573
@@ -594,7 +592,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
594 ** Grab virtual index [0:11] 592 ** Grab virtual index [0:11]
595 ** Deposit virt_idx bits into I/O PDIR word 593 ** Deposit virt_idx bits into I/O PDIR word
596 */ 594 */
597 asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 595 asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
598 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci)); 596 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
599 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci)); 597 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
600 598
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8a9ea9bd050c..296668caf7e5 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -569,11 +569,10 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
569 u64 pa; /* physical address */ 569 u64 pa; /* physical address */
570 register unsigned ci; /* coherent index */ 570 register unsigned ci; /* coherent index */
571 571
572 pa = virt_to_phys(vba); 572 pa = lpa(vba);
573 pa &= IOVP_MASK; 573 pa &= IOVP_MASK;
574 574
575 mtsp(sid,1); 575 asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
576 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
577 pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */ 576 pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
578 577
579 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ 578 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 5dc53d420ca8..7b4ee33c1935 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -895,6 +895,7 @@ parport_register_dev_model(struct parport *port, const char *name,
895 par_dev->devmodel = true; 895 par_dev->devmodel = true;
896 ret = device_register(&par_dev->dev); 896 ret = device_register(&par_dev->dev);
897 if (ret) { 897 if (ret) {
898 kfree(par_dev->state);
898 put_device(&par_dev->dev); 899 put_device(&par_dev->dev);
899 goto err_put_port; 900 goto err_put_port;
900 } 901 }
@@ -912,6 +913,7 @@ parport_register_dev_model(struct parport *port, const char *name,
912 spin_unlock(&port->physport->pardevice_lock); 913 spin_unlock(&port->physport->pardevice_lock);
913 pr_debug("%s: cannot grant exclusive access for device %s\n", 914 pr_debug("%s: cannot grant exclusive access for device %s\n",
914 port->name, name); 915 port->name, name);
916 kfree(par_dev->state);
915 device_unregister(&par_dev->dev); 917 device_unregister(&par_dev->dev);
916 goto err_put_port; 918 goto err_put_port;
917 } 919 }
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 009f2c0ec504..b1823d75dd35 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1274,16 +1274,20 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
1274 return 0; 1274 return 0;
1275} 1275}
1276 1276
1277static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1277static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1278{ 1278{
1279 unsigned int count = single ? 1 : card->dev->num_tx_queues; 1279 unsigned int count = single ? 1 : card->dev->num_tx_queues;
1280 int rc;
1280 1281
1281 rtnl_lock(); 1282 rtnl_lock();
1282 netif_set_real_num_tx_queues(card->dev, count); 1283 rc = netif_set_real_num_tx_queues(card->dev, count);
1283 rtnl_unlock(); 1284 rtnl_unlock();
1284 1285
1286 if (rc)
1287 return rc;
1288
1285 if (card->qdio.no_out_queues == count) 1289 if (card->qdio.no_out_queues == count)
1286 return; 1290 return 0;
1287 1291
1288 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1292 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1289 qeth_free_qdio_queues(card); 1293 qeth_free_qdio_queues(card);
@@ -1293,12 +1297,14 @@ static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1293 1297
1294 card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; 1298 card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
1295 card->qdio.no_out_queues = count; 1299 card->qdio.no_out_queues = count;
1300 return 0;
1296} 1301}
1297 1302
1298static int qeth_update_from_chp_desc(struct qeth_card *card) 1303static int qeth_update_from_chp_desc(struct qeth_card *card)
1299{ 1304{
1300 struct ccw_device *ccwdev; 1305 struct ccw_device *ccwdev;
1301 struct channel_path_desc_fmt0 *chp_dsc; 1306 struct channel_path_desc_fmt0 *chp_dsc;
1307 int rc = 0;
1302 1308
1303 QETH_DBF_TEXT(SETUP, 2, "chp_desc"); 1309 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1304 1310
@@ -1311,12 +1317,12 @@ static int qeth_update_from_chp_desc(struct qeth_card *card)
1311 1317
1312 if (IS_OSD(card) || IS_OSX(card)) 1318 if (IS_OSD(card) || IS_OSX(card))
1313 /* CHPP field bit 6 == 1 -> single queue */ 1319 /* CHPP field bit 6 == 1 -> single queue */
1314 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1320 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1315 1321
1316 kfree(chp_dsc); 1322 kfree(chp_dsc);
1317 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1323 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1318 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1324 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1319 return 0; 1325 return rc;
1320} 1326}
1321 1327
1322static void qeth_init_qdio_info(struct qeth_card *card) 1328static void qeth_init_qdio_info(struct qeth_card *card)
@@ -5597,8 +5603,12 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5597 dev->hw_features |= NETIF_F_SG; 5603 dev->hw_features |= NETIF_F_SG;
5598 dev->vlan_features |= NETIF_F_SG; 5604 dev->vlan_features |= NETIF_F_SG;
5599 if (IS_IQD(card)) { 5605 if (IS_IQD(card)) {
5600 netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ);
5601 dev->features |= NETIF_F_SG; 5606 dev->features |= NETIF_F_SG;
5607 if (netif_set_real_num_tx_queues(dev,
5608 QETH_IQD_MIN_TXQ)) {
5609 free_netdev(dev);
5610 return NULL;
5611 }
5602 } 5612 }
5603 } 5613 }
5604 5614
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 218801232ca2..ff8a6cd790b1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1680,7 +1680,7 @@ static void qeth_bridgeport_an_set_cb(void *priv,
1680 1680
1681 l2entry = (struct qdio_brinfo_entry_l2 *)entry; 1681 l2entry = (struct qdio_brinfo_entry_l2 *)entry;
1682 code = IPA_ADDR_CHANGE_CODE_MACADDR; 1682 code = IPA_ADDR_CHANGE_CODE_MACADDR;
1683 if (l2entry->addr_lnid.lnid) 1683 if (l2entry->addr_lnid.lnid < VLAN_N_VID)
1684 code |= IPA_ADDR_CHANGE_CODE_VLANID; 1684 code |= IPA_ADDR_CHANGE_CODE_VLANID;
1685 qeth_bridge_emit_host_event(card, anev_reg_unreg, code, 1685 qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
1686 (struct net_if_token *)&l2entry->nit, 1686 (struct net_if_token *)&l2entry->nit,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0271833da6a2..13bf3e2e9cea 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1888,13 +1888,20 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1888 1888
1889static int qeth_l3_get_cast_type(struct sk_buff *skb) 1889static int qeth_l3_get_cast_type(struct sk_buff *skb)
1890{ 1890{
1891 int ipv = qeth_get_ip_version(skb);
1891 struct neighbour *n = NULL; 1892 struct neighbour *n = NULL;
1892 struct dst_entry *dst; 1893 struct dst_entry *dst;
1893 1894
1894 rcu_read_lock(); 1895 rcu_read_lock();
1895 dst = skb_dst(skb); 1896 dst = skb_dst(skb);
1896 if (dst) 1897 if (dst) {
1897 n = dst_neigh_lookup_skb(dst, skb); 1898 struct rt6_info *rt = (struct rt6_info *) dst;
1899
1900 dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
1901 if (dst)
1902 n = dst_neigh_lookup_skb(dst, skb);
1903 }
1904
1898 if (n) { 1905 if (n) {
1899 int cast_type = n->type; 1906 int cast_type = n->type;
1900 1907
@@ -1909,8 +1916,10 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
1909 rcu_read_unlock(); 1916 rcu_read_unlock();
1910 1917
1911 /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ 1918 /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
1912 switch (qeth_get_ip_version(skb)) { 1919 switch (ipv) {
1913 case 4: 1920 case 4:
1921 if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
1922 return RTN_BROADCAST;
1914 return ipv4_is_multicast(ip_hdr(skb)->daddr) ? 1923 return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
1915 RTN_MULTICAST : RTN_UNICAST; 1924 RTN_MULTICAST : RTN_UNICAST;
1916 case 6: 1925 case 6:
@@ -1940,6 +1949,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
1940 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 1949 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
1941 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 1950 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
1942 struct qeth_card *card = queue->card; 1951 struct qeth_card *card = queue->card;
1952 struct dst_entry *dst;
1943 1953
1944 hdr->hdr.l3.length = data_len; 1954 hdr->hdr.l3.length = data_len;
1945 1955
@@ -1985,15 +1995,27 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
1985 } 1995 }
1986 1996
1987 rcu_read_lock(); 1997 rcu_read_lock();
1998 dst = skb_dst(skb);
1999
1988 if (ipv == 4) { 2000 if (ipv == 4) {
1989 struct rtable *rt = skb_rtable(skb); 2001 struct rtable *rt;
2002
2003 if (dst)
2004 dst = dst_check(dst, 0);
2005 rt = (struct rtable *) dst;
1990 2006
1991 *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ? 2007 *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
1992 rt_nexthop(rt, ip_hdr(skb)->daddr) : 2008 rt_nexthop(rt, ip_hdr(skb)->daddr) :
1993 ip_hdr(skb)->daddr; 2009 ip_hdr(skb)->daddr;
1994 } else { 2010 } else {
1995 /* IPv6 */ 2011 /* IPv6 */
1996 const struct rt6_info *rt = skb_rt6_info(skb); 2012 struct rt6_info *rt;
2013
2014 if (dst) {
2015 rt = (struct rt6_info *) dst;
2016 dst = dst_check(dst, rt6_get_cookie(rt));
2017 }
2018 rt = (struct rt6_info *) dst;
1997 2019
1998 if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) 2020 if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
1999 l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway; 2021 l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index d6be4e8f4a8f..8fd5ffc55792 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -4046,8 +4046,10 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4046 return -ETIMEDOUT; 4046 return -ETIMEDOUT;
4047 msecs_blocked = 4047 msecs_blocked =
4048 jiffies_to_msecs(jiffies - start_jiffies); 4048 jiffies_to_msecs(jiffies - start_jiffies);
4049 if (msecs_blocked >= timeout_msecs) 4049 if (msecs_blocked >= timeout_msecs) {
4050 return -ETIMEDOUT; 4050 rc = -ETIMEDOUT;
4051 goto out;
4052 }
4051 timeout_msecs -= msecs_blocked; 4053 timeout_msecs -= msecs_blocked;
4052 } 4054 }
4053 } 4055 }
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8c1c551f2b42..3fe3029617a8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1917,7 +1917,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1917 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); 1917 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1918 1918
1919 /* Get the descriptor */ 1919 /* Get the descriptor */
1920 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { 1920 if (hba->dev_cmd.query.descriptor &&
1921 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1921 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + 1922 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1922 GENERAL_UPIU_REQUEST_SIZE; 1923 GENERAL_UPIU_REQUEST_SIZE;
1923 u16 resp_len; 1924 u16 resp_len;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df51a35cf537..2d9df786a9d3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -604,12 +604,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
604 return iov_iter_count(iter); 604 return iov_iter_count(iter);
605} 605}
606 606
607static bool vhost_exceeds_weight(int pkts, int total_len)
608{
609 return total_len >= VHOST_NET_WEIGHT ||
610 pkts >= VHOST_NET_PKT_WEIGHT;
611}
612
613static int get_tx_bufs(struct vhost_net *net, 607static int get_tx_bufs(struct vhost_net *net,
614 struct vhost_net_virtqueue *nvq, 608 struct vhost_net_virtqueue *nvq,
615 struct msghdr *msg, 609 struct msghdr *msg,
@@ -779,7 +773,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
779 int sent_pkts = 0; 773 int sent_pkts = 0;
780 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); 774 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
781 775
782 for (;;) { 776 do {
783 bool busyloop_intr = false; 777 bool busyloop_intr = false;
784 778
785 if (nvq->done_idx == VHOST_NET_BATCH) 779 if (nvq->done_idx == VHOST_NET_BATCH)
@@ -845,11 +839,7 @@ done:
845 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); 839 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
846 vq->heads[nvq->done_idx].len = 0; 840 vq->heads[nvq->done_idx].len = 0;
847 ++nvq->done_idx; 841 ++nvq->done_idx;
848 if (vhost_exceeds_weight(++sent_pkts, total_len)) { 842 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
849 vhost_poll_queue(&vq->poll);
850 break;
851 }
852 }
853 843
854 vhost_tx_batch(net, nvq, sock, &msg); 844 vhost_tx_batch(net, nvq, sock, &msg);
855} 845}
@@ -874,7 +864,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
874 bool zcopy_used; 864 bool zcopy_used;
875 int sent_pkts = 0; 865 int sent_pkts = 0;
876 866
877 for (;;) { 867 do {
878 bool busyloop_intr; 868 bool busyloop_intr;
879 869
880 /* Release DMAs done buffers first */ 870 /* Release DMAs done buffers first */
@@ -951,11 +941,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
951 else 941 else
952 vhost_zerocopy_signal_used(net, vq); 942 vhost_zerocopy_signal_used(net, vq);
953 vhost_net_tx_packet(net); 943 vhost_net_tx_packet(net);
954 if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { 944 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
955 vhost_poll_queue(&vq->poll);
956 break;
957 }
958 }
959} 945}
960 946
961/* Expects to be always run from workqueue - which acts as 947/* Expects to be always run from workqueue - which acts as
@@ -1153,8 +1139,11 @@ static void handle_rx(struct vhost_net *net)
1153 vq->log : NULL; 1139 vq->log : NULL;
1154 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); 1140 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1155 1141
1156 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, 1142 do {
1157 &busyloop_intr))) { 1143 sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
1144 &busyloop_intr);
1145 if (!sock_len)
1146 break;
1158 sock_len += sock_hlen; 1147 sock_len += sock_hlen;
1159 vhost_len = sock_len + vhost_hlen; 1148 vhost_len = sock_len + vhost_hlen;
1160 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, 1149 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -1239,14 +1228,11 @@ static void handle_rx(struct vhost_net *net)
1239 vhost_log_write(vq, vq_log, log, vhost_len, 1228 vhost_log_write(vq, vq_log, log, vhost_len,
1240 vq->iov, in); 1229 vq->iov, in);
1241 total_len += vhost_len; 1230 total_len += vhost_len;
1242 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { 1231 } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
1243 vhost_poll_queue(&vq->poll); 1232
1244 goto out;
1245 }
1246 }
1247 if (unlikely(busyloop_intr)) 1233 if (unlikely(busyloop_intr))
1248 vhost_poll_queue(&vq->poll); 1234 vhost_poll_queue(&vq->poll);
1249 else 1235 else if (!sock_len)
1250 vhost_net_enable_vq(net, vq); 1236 vhost_net_enable_vq(net, vq);
1251out: 1237out:
1252 vhost_net_signal_used(nvq); 1238 vhost_net_signal_used(nvq);
@@ -1338,7 +1324,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1338 vhost_net_buf_init(&n->vqs[i].rxq); 1324 vhost_net_buf_init(&n->vqs[i].rxq);
1339 } 1325 }
1340 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, 1326 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1341 UIO_MAXIOV + VHOST_NET_BATCH); 1327 UIO_MAXIOV + VHOST_NET_BATCH,
1328 VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
1342 1329
1343 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 1330 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1344 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); 1331 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c090d177bd75..a9caf1bc3c3e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -57,6 +57,12 @@
57#define VHOST_SCSI_PREALLOC_UPAGES 2048 57#define VHOST_SCSI_PREALLOC_UPAGES 2048
58#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048 58#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
59 59
60/* Max number of requests before requeueing the job.
61 * Using this limit prevents one virtqueue from starving others with
62 * request.
63 */
64#define VHOST_SCSI_WEIGHT 256
65
60struct vhost_scsi_inflight { 66struct vhost_scsi_inflight {
61 /* Wait for the flush operation to finish */ 67 /* Wait for the flush operation to finish */
62 struct completion comp; 68 struct completion comp;
@@ -912,7 +918,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
912 struct iov_iter in_iter, prot_iter, data_iter; 918 struct iov_iter in_iter, prot_iter, data_iter;
913 u64 tag; 919 u64 tag;
914 u32 exp_data_len, data_direction; 920 u32 exp_data_len, data_direction;
915 int ret, prot_bytes; 921 int ret, prot_bytes, c = 0;
916 u16 lun; 922 u16 lun;
917 u8 task_attr; 923 u8 task_attr;
918 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); 924 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
@@ -932,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
932 938
933 vhost_disable_notify(&vs->dev, vq); 939 vhost_disable_notify(&vs->dev, vq);
934 940
935 for (;;) { 941 do {
936 ret = vhost_scsi_get_desc(vs, vq, &vc); 942 ret = vhost_scsi_get_desc(vs, vq, &vc);
937 if (ret) 943 if (ret)
938 goto err; 944 goto err;
@@ -1112,7 +1118,7 @@ err:
1112 break; 1118 break;
1113 else if (ret == -EIO) 1119 else if (ret == -EIO)
1114 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); 1120 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1115 } 1121 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1116out: 1122out:
1117 mutex_unlock(&vq->mutex); 1123 mutex_unlock(&vq->mutex);
1118} 1124}
@@ -1171,7 +1177,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1171 } v_req; 1177 } v_req;
1172 struct vhost_scsi_ctx vc; 1178 struct vhost_scsi_ctx vc;
1173 size_t typ_size; 1179 size_t typ_size;
1174 int ret; 1180 int ret, c = 0;
1175 1181
1176 mutex_lock(&vq->mutex); 1182 mutex_lock(&vq->mutex);
1177 /* 1183 /*
@@ -1185,7 +1191,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1185 1191
1186 vhost_disable_notify(&vs->dev, vq); 1192 vhost_disable_notify(&vs->dev, vq);
1187 1193
1188 for (;;) { 1194 do {
1189 ret = vhost_scsi_get_desc(vs, vq, &vc); 1195 ret = vhost_scsi_get_desc(vs, vq, &vc);
1190 if (ret) 1196 if (ret)
1191 goto err; 1197 goto err;
@@ -1264,7 +1270,7 @@ err:
1264 break; 1270 break;
1265 else if (ret == -EIO) 1271 else if (ret == -EIO)
1266 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); 1272 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1267 } 1273 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1268out: 1274out:
1269 mutex_unlock(&vq->mutex); 1275 mutex_unlock(&vq->mutex);
1270} 1276}
@@ -1621,7 +1627,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1621 vqs[i] = &vs->vqs[i].vq; 1627 vqs[i] = &vs->vqs[i].vq;
1622 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1628 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1623 } 1629 }
1624 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV); 1630 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1631 VHOST_SCSI_WEIGHT, 0);
1625 1632
1626 vhost_scsi_init_inflight(vs, NULL); 1633 vhost_scsi_init_inflight(vs, NULL);
1627 1634
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1e3ed41ae1f3..3f3eac4bcc58 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -413,8 +413,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
413 vhost_vq_free_iovecs(dev->vqs[i]); 413 vhost_vq_free_iovecs(dev->vqs[i]);
414} 414}
415 415
416bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
417 int pkts, int total_len)
418{
419 struct vhost_dev *dev = vq->dev;
420
421 if ((dev->byte_weight && total_len >= dev->byte_weight) ||
422 pkts >= dev->weight) {
423 vhost_poll_queue(&vq->poll);
424 return true;
425 }
426
427 return false;
428}
429EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
430
416void vhost_dev_init(struct vhost_dev *dev, 431void vhost_dev_init(struct vhost_dev *dev,
417 struct vhost_virtqueue **vqs, int nvqs, int iov_limit) 432 struct vhost_virtqueue **vqs, int nvqs,
433 int iov_limit, int weight, int byte_weight)
418{ 434{
419 struct vhost_virtqueue *vq; 435 struct vhost_virtqueue *vq;
420 int i; 436 int i;
@@ -428,6 +444,8 @@ void vhost_dev_init(struct vhost_dev *dev,
428 dev->mm = NULL; 444 dev->mm = NULL;
429 dev->worker = NULL; 445 dev->worker = NULL;
430 dev->iov_limit = iov_limit; 446 dev->iov_limit = iov_limit;
447 dev->weight = weight;
448 dev->byte_weight = byte_weight;
431 init_llist_head(&dev->work_list); 449 init_llist_head(&dev->work_list);
432 init_waitqueue_head(&dev->wait); 450 init_waitqueue_head(&dev->wait);
433 INIT_LIST_HEAD(&dev->read_list); 451 INIT_LIST_HEAD(&dev->read_list);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 9490e7ddb340..27a78a9b8cc7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -171,10 +171,13 @@ struct vhost_dev {
171 struct list_head pending_list; 171 struct list_head pending_list;
172 wait_queue_head_t wait; 172 wait_queue_head_t wait;
173 int iov_limit; 173 int iov_limit;
174 int weight;
175 int byte_weight;
174}; 176};
175 177
178bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
176void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, 179void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
177 int nvqs, int iov_limit); 180 int nvqs, int iov_limit, int weight, int byte_weight);
178long vhost_dev_set_owner(struct vhost_dev *dev); 181long vhost_dev_set_owner(struct vhost_dev *dev);
179bool vhost_dev_has_owner(struct vhost_dev *dev); 182bool vhost_dev_has_owner(struct vhost_dev *dev);
180long vhost_dev_check_owner(struct vhost_dev *); 183long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bb5fc0e9fbc2..814bed72d793 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -21,6 +21,14 @@
21#include "vhost.h" 21#include "vhost.h"
22 22
23#define VHOST_VSOCK_DEFAULT_HOST_CID 2 23#define VHOST_VSOCK_DEFAULT_HOST_CID 2
24/* Max number of bytes transferred before requeueing the job.
25 * Using this limit prevents one virtqueue from starving others. */
26#define VHOST_VSOCK_WEIGHT 0x80000
27/* Max number of packets transferred before requeueing the job.
28 * Using this limit prevents one virtqueue from starving others with
29 * small pkts.
30 */
31#define VHOST_VSOCK_PKT_WEIGHT 256
24 32
25enum { 33enum {
26 VHOST_VSOCK_FEATURES = VHOST_FEATURES, 34 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
@@ -78,6 +86,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
78 struct vhost_virtqueue *vq) 86 struct vhost_virtqueue *vq)
79{ 87{
80 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; 88 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
89 int pkts = 0, total_len = 0;
81 bool added = false; 90 bool added = false;
82 bool restart_tx = false; 91 bool restart_tx = false;
83 92
@@ -89,7 +98,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
89 /* Avoid further vmexits, we're already processing the virtqueue */ 98 /* Avoid further vmexits, we're already processing the virtqueue */
90 vhost_disable_notify(&vsock->dev, vq); 99 vhost_disable_notify(&vsock->dev, vq);
91 100
92 for (;;) { 101 do {
93 struct virtio_vsock_pkt *pkt; 102 struct virtio_vsock_pkt *pkt;
94 struct iov_iter iov_iter; 103 struct iov_iter iov_iter;
95 unsigned out, in; 104 unsigned out, in;
@@ -174,8 +183,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
174 */ 183 */
175 virtio_transport_deliver_tap_pkt(pkt); 184 virtio_transport_deliver_tap_pkt(pkt);
176 185
186 total_len += pkt->len;
177 virtio_transport_free_pkt(pkt); 187 virtio_transport_free_pkt(pkt);
178 } 188 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
179 if (added) 189 if (added)
180 vhost_signal(&vsock->dev, vq); 190 vhost_signal(&vsock->dev, vq);
181 191
@@ -350,7 +360,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
350 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, 360 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
351 dev); 361 dev);
352 struct virtio_vsock_pkt *pkt; 362 struct virtio_vsock_pkt *pkt;
353 int head; 363 int head, pkts = 0, total_len = 0;
354 unsigned int out, in; 364 unsigned int out, in;
355 bool added = false; 365 bool added = false;
356 366
@@ -360,7 +370,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
360 goto out; 370 goto out;
361 371
362 vhost_disable_notify(&vsock->dev, vq); 372 vhost_disable_notify(&vsock->dev, vq);
363 for (;;) { 373 do {
364 u32 len; 374 u32 len;
365 375
366 if (!vhost_vsock_more_replies(vsock)) { 376 if (!vhost_vsock_more_replies(vsock)) {
@@ -401,9 +411,11 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
401 else 411 else
402 virtio_transport_free_pkt(pkt); 412 virtio_transport_free_pkt(pkt);
403 413
404 vhost_add_used(vq, head, sizeof(pkt->hdr) + len); 414 len += sizeof(pkt->hdr);
415 vhost_add_used(vq, head, len);
416 total_len += len;
405 added = true; 417 added = true;
406 } 418 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
407 419
408no_more_replies: 420no_more_replies:
409 if (added) 421 if (added)
@@ -531,7 +543,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; 543 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; 544 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
533 545
534 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV); 546 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
547 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
548 VHOST_VSOCK_WEIGHT);
535 549
536 file->private_data = vsock; 550 file->private_data = vsock;
537 spin_lock_init(&vsock->send_pkt_list_lock); 551 spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 9aea44ed54c7..023fc3bc01c6 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -63,12 +63,12 @@ config VIRTIO_INPUT
63 63
64 If unsure, say M. 64 If unsure, say M.
65 65
66 config VIRTIO_MMIO 66config VIRTIO_MMIO
67 tristate "Platform bus driver for memory mapped virtio devices" 67 tristate "Platform bus driver for memory mapped virtio devices"
68 depends on HAS_IOMEM && HAS_DMA 68 depends on HAS_IOMEM && HAS_DMA
69 select VIRTIO 69 select VIRTIO
70 ---help--- 70 ---help---
71 This drivers provides support for memory mapped virtio 71 This drivers provides support for memory mapped virtio
72 platform device driver. 72 platform device driver.
73 73
74 If unsure, say N. 74 If unsure, say N.
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index 92e8f0755b9a..edf0bc98012c 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -138,7 +138,7 @@ static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
138 W1_F29_REG_CONTROL_AND_STATUS, buf); 138 W1_F29_REG_CONTROL_AND_STATUS, buf);
139} 139}
140 140
141#ifdef fCONFIG_W1_SLAVE_DS2408_READBACK 141#ifdef CONFIG_W1_SLAVE_DS2408_READBACK
142static bool optional_read_back_valid(struct w1_slave *sl, u8 expected) 142static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
143{ 143{
144 u8 w1_buf[3]; 144 u8 w1_buf[3];
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index c76db75f02aa..804c6a77c5db 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -113,19 +113,6 @@ struct object_info {
113 __u16 filetype; 113 __u16 filetype;
114}; 114};
115 115
116/* RISC OS 12-bit filetype converts to ,xyz hex filename suffix */
117static inline int append_filetype_suffix(char *buf, __u16 filetype)
118{
119 if (filetype == 0xffff) /* no explicit 12-bit file type was set */
120 return 0;
121
122 *buf++ = ',';
123 *buf++ = hex_asc_lo(filetype >> 8);
124 *buf++ = hex_asc_lo(filetype >> 4);
125 *buf++ = hex_asc_lo(filetype >> 0);
126 return 4;
127}
128
129struct adfs_dir_ops { 116struct adfs_dir_ops {
130 int (*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir); 117 int (*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir);
131 int (*setpos)(struct adfs_dir *dir, unsigned int fpos); 118 int (*setpos)(struct adfs_dir *dir, unsigned int fpos);
@@ -172,6 +159,7 @@ extern const struct dentry_operations adfs_dentry_operations;
172extern const struct adfs_dir_ops adfs_f_dir_ops; 159extern const struct adfs_dir_ops adfs_f_dir_ops;
173extern const struct adfs_dir_ops adfs_fplus_dir_ops; 160extern const struct adfs_dir_ops adfs_fplus_dir_ops;
174 161
162void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj);
175extern int adfs_dir_update(struct super_block *sb, struct object_info *obj, 163extern int adfs_dir_update(struct super_block *sb, struct object_info *obj,
176 int wait); 164 int wait);
177 165
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index e18eff854e1a..fe39310c1a0a 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -16,6 +16,50 @@
16 */ 16 */
17static DEFINE_RWLOCK(adfs_dir_lock); 17static DEFINE_RWLOCK(adfs_dir_lock);
18 18
19void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj)
20{
21 unsigned int dots, i;
22
23 /*
24 * RISC OS allows the use of '/' in directory entry names, so we need
25 * to fix these up. '/' is typically used for FAT compatibility to
26 * represent '.', so do the same conversion here. In any case, '.'
27 * will never be in a RISC OS name since it is used as the pathname
28 * separator. Handle the case where we may generate a '.' or '..'
29 * name, replacing the first character with '^' (the RISC OS "parent
30 * directory" character.)
31 */
32 for (i = dots = 0; i < obj->name_len; i++)
33 if (obj->name[i] == '/') {
34 obj->name[i] = '.';
35 dots++;
36 }
37
38 if (obj->name_len <= 2 && dots == obj->name_len)
39 obj->name[0] = '^';
40
41 obj->filetype = -1;
42
43 /*
44 * object is a file and is filetyped and timestamped?
45 * RISC OS 12-bit filetype is stored in load_address[19:8]
46 */
47 if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
48 (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
49 obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
50
51 /* optionally append the ,xyz hex filetype suffix */
52 if (ADFS_SB(dir->sb)->s_ftsuffix) {
53 __u16 filetype = obj->filetype;
54
55 obj->name[obj->name_len++] = ',';
56 obj->name[obj->name_len++] = hex_asc_lo(filetype >> 8);
57 obj->name[obj->name_len++] = hex_asc_lo(filetype >> 4);
58 obj->name[obj->name_len++] = hex_asc_lo(filetype >> 0);
59 }
60 }
61}
62
19static int 63static int
20adfs_readdir(struct file *file, struct dir_context *ctx) 64adfs_readdir(struct file *file, struct dir_context *ctx)
21{ 65{
@@ -100,37 +144,36 @@ out:
100 return ret; 144 return ret;
101} 145}
102 146
103static int 147static unsigned char adfs_tolower(unsigned char c)
104adfs_match(const struct qstr *name, struct object_info *obj)
105{ 148{
106 int i; 149 if (c >= 'A' && c <= 'Z')
107 150 c += 'a' - 'A';
108 if (name->len != obj->name_len) 151 return c;
109 return 0; 152}
110 153
111 for (i = 0; i < name->len; i++) { 154static int __adfs_compare(const unsigned char *qstr, u32 qlen,
112 char c1, c2; 155 const char *str, u32 len)
156{
157 u32 i;
113 158
114 c1 = name->name[i]; 159 if (qlen != len)
115 c2 = obj->name[i]; 160 return 1;
116 161
117 if (c1 >= 'A' && c1 <= 'Z') 162 for (i = 0; i < qlen; i++)
118 c1 += 'a' - 'A'; 163 if (adfs_tolower(qstr[i]) != adfs_tolower(str[i]))
119 if (c2 >= 'A' && c2 <= 'Z') 164 return 1;
120 c2 += 'a' - 'A';
121 165
122 if (c1 != c2) 166 return 0;
123 return 0;
124 }
125 return 1;
126} 167}
127 168
128static int 169static int adfs_dir_lookup_byname(struct inode *inode, const struct qstr *qstr,
129adfs_dir_lookup_byname(struct inode *inode, const struct qstr *name, struct object_info *obj) 170 struct object_info *obj)
130{ 171{
131 struct super_block *sb = inode->i_sb; 172 struct super_block *sb = inode->i_sb;
132 const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir; 173 const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
174 const unsigned char *name;
133 struct adfs_dir dir; 175 struct adfs_dir dir;
176 u32 name_len;
134 int ret; 177 int ret;
135 178
136 ret = ops->read(sb, inode->i_ino, inode->i_size, &dir); 179 ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
@@ -153,8 +196,10 @@ adfs_dir_lookup_byname(struct inode *inode, const struct qstr *name, struct obje
153 goto unlock_out; 196 goto unlock_out;
154 197
155 ret = -ENOENT; 198 ret = -ENOENT;
199 name = qstr->name;
200 name_len = qstr->len;
156 while (ops->getnext(&dir, obj) == 0) { 201 while (ops->getnext(&dir, obj) == 0) {
157 if (adfs_match(name, obj)) { 202 if (!__adfs_compare(name, name_len, obj->name, obj->name_len)) {
158 ret = 0; 203 ret = 0;
159 break; 204 break;
160 } 205 }
@@ -179,30 +224,18 @@ const struct file_operations adfs_dir_operations = {
179static int 224static int
180adfs_hash(const struct dentry *parent, struct qstr *qstr) 225adfs_hash(const struct dentry *parent, struct qstr *qstr)
181{ 226{
182 const unsigned int name_len = ADFS_SB(parent->d_sb)->s_namelen;
183 const unsigned char *name; 227 const unsigned char *name;
184 unsigned long hash; 228 unsigned long hash;
185 int i; 229 u32 len;
186 230
187 if (qstr->len < name_len) 231 if (qstr->len > ADFS_SB(parent->d_sb)->s_namelen)
188 return 0; 232 return -ENAMETOOLONG;
189 233
190 /* 234 len = qstr->len;
191 * Truncate the name in place, avoids
192 * having to define a compare function.
193 */
194 qstr->len = i = name_len;
195 name = qstr->name; 235 name = qstr->name;
196 hash = init_name_hash(parent); 236 hash = init_name_hash(parent);
197 while (i--) { 237 while (len--)
198 char c; 238 hash = partial_name_hash(adfs_tolower(*name++), hash);
199
200 c = *name++;
201 if (c >= 'A' && c <= 'Z')
202 c += 'a' - 'A';
203
204 hash = partial_name_hash(c, hash);
205 }
206 qstr->hash = end_name_hash(hash); 239 qstr->hash = end_name_hash(hash);
207 240
208 return 0; 241 return 0;
@@ -212,30 +245,10 @@ adfs_hash(const struct dentry *parent, struct qstr *qstr)
212 * Compare two names, taking note of the name length 245 * Compare two names, taking note of the name length
213 * requirements of the underlying filesystem. 246 * requirements of the underlying filesystem.
214 */ 247 */
215static int 248static int adfs_compare(const struct dentry *dentry, unsigned int len,
216adfs_compare(const struct dentry *dentry, 249 const char *str, const struct qstr *qstr)
217 unsigned int len, const char *str, const struct qstr *name)
218{ 250{
219 int i; 251 return __adfs_compare(qstr->name, qstr->len, str, len);
220
221 if (len != name->len)
222 return 1;
223
224 for (i = 0; i < name->len; i++) {
225 char a, b;
226
227 a = str[i];
228 b = name->name[i];
229
230 if (a >= 'A' && a <= 'Z')
231 a += 'a' - 'A';
232 if (b >= 'A' && b <= 'Z')
233 b += 'a' - 'A';
234
235 if (a != b)
236 return 1;
237 }
238 return 0;
239} 252}
240 253
241const struct dentry_operations adfs_dentry_operations = { 254const struct dentry_operations adfs_dentry_operations = {
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index 382c9d7ad375..693f69ed3de3 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -47,21 +47,6 @@ static inline void adfs_writeval(unsigned char *p, int len, unsigned int val)
47 } 47 }
48} 48}
49 49
50static inline int adfs_readname(char *buf, char *ptr, int maxlen)
51{
52 char *old_buf = buf;
53
54 while ((unsigned char)*ptr >= ' ' && maxlen--) {
55 if (*ptr == '/')
56 *buf++ = '.';
57 else
58 *buf++ = *ptr;
59 ptr++;
60 }
61
62 return buf - old_buf;
63}
64
65#define ror13(v) ((v >> 13) | (v << 19)) 50#define ror13(v) ((v >> 13) | (v << 19))
66 51
67#define dir_u8(idx) \ 52#define dir_u8(idx) \
@@ -216,29 +201,23 @@ static inline void
216adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj, 201adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj,
217 struct adfs_direntry *de) 202 struct adfs_direntry *de)
218{ 203{
219 obj->name_len = adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN); 204 unsigned int name_len;
205
206 for (name_len = 0; name_len < ADFS_F_NAME_LEN; name_len++) {
207 if (de->dirobname[name_len] < ' ')
208 break;
209
210 obj->name[name_len] = de->dirobname[name_len];
211 }
212
213 obj->name_len = name_len;
220 obj->file_id = adfs_readval(de->dirinddiscadd, 3); 214 obj->file_id = adfs_readval(de->dirinddiscadd, 3);
221 obj->loadaddr = adfs_readval(de->dirload, 4); 215 obj->loadaddr = adfs_readval(de->dirload, 4);
222 obj->execaddr = adfs_readval(de->direxec, 4); 216 obj->execaddr = adfs_readval(de->direxec, 4);
223 obj->size = adfs_readval(de->dirlen, 4); 217 obj->size = adfs_readval(de->dirlen, 4);
224 obj->attr = de->newdiratts; 218 obj->attr = de->newdiratts;
225 obj->filetype = -1;
226 219
227 /* 220 adfs_object_fixup(dir, obj);
228 * object is a file and is filetyped and timestamped?
229 * RISC OS 12-bit filetype is stored in load_address[19:8]
230 */
231 if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
232 (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
233 obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
234
235 /* optionally append the ,xyz hex filetype suffix */
236 if (ADFS_SB(dir->sb)->s_ftsuffix)
237 obj->name_len +=
238 append_filetype_suffix(
239 &obj->name[obj->name_len],
240 obj->filetype);
241 }
242} 221}
243 222
244/* 223/*
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index c92cfb638c18..97b9f28f459b 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -169,7 +169,7 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
169 (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data; 169 (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
170 struct adfs_bigdirentry bde; 170 struct adfs_bigdirentry bde;
171 unsigned int offset; 171 unsigned int offset;
172 int i, ret = -ENOENT; 172 int ret = -ENOENT;
173 173
174 if (dir->pos >= le32_to_cpu(h->bigdirentries)) 174 if (dir->pos >= le32_to_cpu(h->bigdirentries))
175 goto out; 175 goto out;
@@ -193,27 +193,7 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
193 offset += le32_to_cpu(bde.bigdirobnameptr); 193 offset += le32_to_cpu(bde.bigdirobnameptr);
194 194
195 dir_memcpy(dir, offset, obj->name, obj->name_len); 195 dir_memcpy(dir, offset, obj->name, obj->name_len);
196 for (i = 0; i < obj->name_len; i++) 196 adfs_object_fixup(dir, obj);
197 if (obj->name[i] == '/')
198 obj->name[i] = '.';
199
200 obj->filetype = -1;
201
202 /*
203 * object is a file and is filetyped and timestamped?
204 * RISC OS 12-bit filetype is stored in load_address[19:8]
205 */
206 if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
207 (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
208 obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
209
210 /* optionally append the ,xyz hex filetype suffix */
211 if (ADFS_SB(dir->sb)->s_ftsuffix)
212 obj->name_len +=
213 append_filetype_suffix(
214 &obj->name[obj->name_len],
215 obj->filetype);
216 }
217 197
218 dir->pos += 1; 198 dir->pos += 1;
219 ret = 0; 199 ret = 0;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 3959f08279e6..b8f9c83835d5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1377,10 +1377,17 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1377 if (err && !nbytes) 1377 if (err && !nbytes)
1378 break; 1378 break;
1379 1379
1380 if (write) 1380 if (write) {
1381 if (!capable(CAP_FSETID)) {
1382 struct fuse_write_in *inarg;
1383
1384 inarg = &req->misc.write.in;
1385 inarg->write_flags |= FUSE_WRITE_KILL_PRIV;
1386 }
1381 nres = fuse_send_write(req, io, pos, nbytes, owner); 1387 nres = fuse_send_write(req, io, pos, nbytes, owner);
1382 else 1388 } else {
1383 nres = fuse_send_read(req, io, pos, nbytes, owner); 1389 nres = fuse_send_read(req, io, pos, nbytes, owner);
1390 }
1384 1391
1385 if (!io->async) 1392 if (!io->async)
1386 fuse_release_user_pages(req, io->should_dirty); 1393 fuse_release_user_pages(req, io->should_dirty);
@@ -3014,6 +3021,16 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3014 return ret; 3021 return ret;
3015} 3022}
3016 3023
3024static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
3025{
3026 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
3027
3028 if (!err)
3029 fuse_sync_writes(inode);
3030
3031 return err;
3032}
3033
3017static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 3034static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3018 loff_t length) 3035 loff_t length)
3019{ 3036{
@@ -3042,12 +3059,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3042 inode_lock(inode); 3059 inode_lock(inode);
3043 if (mode & FALLOC_FL_PUNCH_HOLE) { 3060 if (mode & FALLOC_FL_PUNCH_HOLE) {
3044 loff_t endbyte = offset + length - 1; 3061 loff_t endbyte = offset + length - 1;
3045 err = filemap_write_and_wait_range(inode->i_mapping, 3062
3046 offset, endbyte); 3063 err = fuse_writeback_range(inode, offset, endbyte);
3047 if (err) 3064 if (err)
3048 goto out; 3065 goto out;
3049
3050 fuse_sync_writes(inode);
3051 } 3066 }
3052 } 3067 }
3053 3068
@@ -3055,7 +3070,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3055 offset + length > i_size_read(inode)) { 3070 offset + length > i_size_read(inode)) {
3056 err = inode_newsize_ok(inode, offset + length); 3071 err = inode_newsize_ok(inode, offset + length);
3057 if (err) 3072 if (err)
3058 return err; 3073 goto out;
3059 } 3074 }
3060 3075
3061 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3076 if (!(mode & FALLOC_FL_KEEP_SIZE))
@@ -3103,6 +3118,7 @@ static ssize_t fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3103{ 3118{
3104 struct fuse_file *ff_in = file_in->private_data; 3119 struct fuse_file *ff_in = file_in->private_data;
3105 struct fuse_file *ff_out = file_out->private_data; 3120 struct fuse_file *ff_out = file_out->private_data;
3121 struct inode *inode_in = file_inode(file_in);
3106 struct inode *inode_out = file_inode(file_out); 3122 struct inode *inode_out = file_inode(file_out);
3107 struct fuse_inode *fi_out = get_fuse_inode(inode_out); 3123 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
3108 struct fuse_conn *fc = ff_in->fc; 3124 struct fuse_conn *fc = ff_in->fc;
@@ -3126,15 +3142,20 @@ static ssize_t fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3126 if (fc->no_copy_file_range) 3142 if (fc->no_copy_file_range)
3127 return -EOPNOTSUPP; 3143 return -EOPNOTSUPP;
3128 3144
3145 if (fc->writeback_cache) {
3146 inode_lock(inode_in);
3147 err = fuse_writeback_range(inode_in, pos_in, pos_in + len);
3148 inode_unlock(inode_in);
3149 if (err)
3150 return err;
3151 }
3152
3129 inode_lock(inode_out); 3153 inode_lock(inode_out);
3130 3154
3131 if (fc->writeback_cache) { 3155 if (fc->writeback_cache) {
3132 err = filemap_write_and_wait_range(inode_out->i_mapping, 3156 err = fuse_writeback_range(inode_out, pos_out, pos_out + len);
3133 pos_out, pos_out + len);
3134 if (err) 3157 if (err)
3135 goto out; 3158 goto out;
3136
3137 fuse_sync_writes(inode_out);
3138 } 3159 }
3139 3160
3140 if (is_unstable) 3161 if (is_unstable)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f0857c056df1..f1ebcb42cbf5 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -137,7 +137,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
137{ 137{
138 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 138 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
139 139
140 BUG_ON(test_bit(GLF_REVOKES, &gl->gl_flags)); 140 BUG_ON(atomic_read(&gl->gl_revokes));
141 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 141 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
142 smp_mb(); 142 smp_mb();
143 wake_up_glock(gl); 143 wake_up_glock(gl);
@@ -1798,7 +1798,7 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1798 state2str(gl->gl_target), 1798 state2str(gl->gl_target),
1799 state2str(gl->gl_demote_state), dtime, 1799 state2str(gl->gl_demote_state), dtime,
1800 atomic_read(&gl->gl_ail_count), 1800 atomic_read(&gl->gl_ail_count),
1801 test_bit(GLF_REVOKES, &gl->gl_flags) ? 1 : 0, 1801 atomic_read(&gl->gl_revokes),
1802 (int)gl->gl_lockref.count, gl->gl_hold_time); 1802 (int)gl->gl_lockref.count, gl->gl_hold_time);
1803 1803
1804 list_for_each_entry(gh, &gl->gl_holders, gh_list) 1804 list_for_each_entry(gh, &gl->gl_holders, gh_list)
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 8c4b334d1be4..c9af93ac6c73 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -342,7 +342,6 @@ enum {
342 GLF_OBJECT = 14, /* Used only for tracing */ 342 GLF_OBJECT = 14, /* Used only for tracing */
343 GLF_BLOCKING = 15, 343 GLF_BLOCKING = 15,
344 GLF_INODE_CREATING = 16, /* Inode creation occurring */ 344 GLF_INODE_CREATING = 16, /* Inode creation occurring */
345 GLF_REVOKES = 17, /* Glock has revokes in queue */
346}; 345};
347 346
348struct gfs2_glock { 347struct gfs2_glock {
@@ -372,6 +371,7 @@ struct gfs2_glock {
372 struct list_head gl_lru; 371 struct list_head gl_lru;
373 struct list_head gl_ail_list; 372 struct list_head gl_ail_list;
374 atomic_t gl_ail_count; 373 atomic_t gl_ail_count;
374 atomic_t gl_revokes;
375 struct delayed_work gl_work; 375 struct delayed_work gl_work;
376 union { 376 union {
377 /* For inode and iopen glocks only */ 377 /* For inode and iopen glocks only */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 1360008c4ee9..c4c9700c366e 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -603,10 +603,8 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
603 gfs2_remove_from_ail(bd); /* drops ref on bh */ 603 gfs2_remove_from_ail(bd); /* drops ref on bh */
604 bd->bd_bh = NULL; 604 bd->bd_bh = NULL;
605 sdp->sd_log_num_revoke++; 605 sdp->sd_log_num_revoke++;
606 if (!test_bit(GLF_REVOKES, &gl->gl_flags)) { 606 if (atomic_inc_return(&gl->gl_revokes) == 1)
607 set_bit(GLF_REVOKES, &gl->gl_flags);
608 gfs2_glock_hold(gl); 607 gfs2_glock_hold(gl);
609 }
610 set_bit(GLF_LFLUSH, &gl->gl_flags); 608 set_bit(GLF_LFLUSH, &gl->gl_flags);
611 list_add(&bd->bd_list, &sdp->sd_log_revokes); 609 list_add(&bd->bd_list, &sdp->sd_log_revokes);
612} 610}
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index badb27ffc874..1921cda034fd 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -857,34 +857,19 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
857static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) 857static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
858{ 858{
859 struct list_head *head = &sdp->sd_log_revokes; 859 struct list_head *head = &sdp->sd_log_revokes;
860 struct gfs2_bufdata *bd, *tmp; 860 struct gfs2_bufdata *bd;
861 861 struct gfs2_glock *gl;
862 /*
863 * Glocks can be referenced repeatedly on the revoke list, but the list
864 * only holds one reference. All glocks on the list will have the
865 * GLF_REVOKES flag set initially.
866 */
867
868 list_for_each_entry_safe(bd, tmp, head, bd_list) {
869 struct gfs2_glock *gl = bd->bd_gl;
870 862
871 if (test_bit(GLF_REVOKES, &gl->gl_flags)) { 863 while (!list_empty(head)) {
872 /* Keep each glock on the list exactly once. */ 864 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
873 clear_bit(GLF_REVOKES, &gl->gl_flags); 865 list_del_init(&bd->bd_list);
874 continue; 866 gl = bd->bd_gl;
867 if (atomic_dec_return(&gl->gl_revokes) == 0) {
868 clear_bit(GLF_LFLUSH, &gl->gl_flags);
869 gfs2_glock_queue_put(gl);
875 } 870 }
876 list_del(&bd->bd_list);
877 kmem_cache_free(gfs2_bufdata_cachep, bd);
878 }
879 list_for_each_entry_safe(bd, tmp, head, bd_list) {
880 struct gfs2_glock *gl = bd->bd_gl;
881
882 list_del(&bd->bd_list);
883 kmem_cache_free(gfs2_bufdata_cachep, bd); 871 kmem_cache_free(gfs2_bufdata_cachep, bd);
884 clear_bit(GLF_LFLUSH, &gl->gl_flags);
885 gfs2_glock_queue_put(gl);
886 } 872 }
887 /* the list is empty now */
888} 873}
889 874
890static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 875static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 57761731be4f..a1a295b739fb 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -56,6 +56,7 @@ static void gfs2_init_glock_once(void *foo)
56 INIT_LIST_HEAD(&gl->gl_lru); 56 INIT_LIST_HEAD(&gl->gl_lru);
57 INIT_LIST_HEAD(&gl->gl_ail_list); 57 INIT_LIST_HEAD(&gl->gl_ail_list);
58 atomic_set(&gl->gl_ail_count, 0); 58 atomic_set(&gl->gl_ail_count, 0);
59 atomic_set(&gl->gl_revokes, 0);
59} 60}
60 61
61static void gfs2_init_gl_aspace_once(void *foo) 62static void gfs2_init_gl_aspace_once(void *foo)
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2bac687d0ed9..b70cea5c8c59 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1474,7 +1474,7 @@ static void gfs2_final_release_pages(struct gfs2_inode *ip)
1474 truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0); 1474 truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1475 truncate_inode_pages(&inode->i_data, 0); 1475 truncate_inode_pages(&inode->i_data, 0);
1476 1476
1477 if (!test_bit(GLF_REVOKES, &gl->gl_flags)) { 1477 if (atomic_read(&gl->gl_revokes) == 0) {
1478 clear_bit(GLF_LFLUSH, &gl->gl_flags); 1478 clear_bit(GLF_LFLUSH, &gl->gl_flags);
1479 clear_bit(GLF_DIRTY, &gl->gl_flags); 1479 clear_bit(GLF_DIRTY, &gl->gl_flags);
1480 } 1480 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c29cbef6b53f..e38f4af20950 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6932,7 +6932,6 @@ struct nfs4_lock_waiter {
6932 struct task_struct *task; 6932 struct task_struct *task;
6933 struct inode *inode; 6933 struct inode *inode;
6934 struct nfs_lowner *owner; 6934 struct nfs_lowner *owner;
6935 bool notified;
6936}; 6935};
6937 6936
6938static int 6937static int
@@ -6954,13 +6953,13 @@ nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, vo
6954 /* Make sure it's for the right inode */ 6953 /* Make sure it's for the right inode */
6955 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 6954 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
6956 return 0; 6955 return 0;
6957
6958 waiter->notified = true;
6959 } 6956 }
6960 6957
6961 /* override "private" so we can use default_wake_function */ 6958 /* override "private" so we can use default_wake_function */
6962 wait->private = waiter->task; 6959 wait->private = waiter->task;
6963 ret = autoremove_wake_function(wait, mode, flags, key); 6960 ret = woken_wake_function(wait, mode, flags, key);
6961 if (ret)
6962 list_del_init(&wait->entry);
6964 wait->private = waiter; 6963 wait->private = waiter;
6965 return ret; 6964 return ret;
6966} 6965}
@@ -6969,7 +6968,6 @@ static int
6969nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6968nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6970{ 6969{
6971 int status = -ERESTARTSYS; 6970 int status = -ERESTARTSYS;
6972 unsigned long flags;
6973 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 6971 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
6974 struct nfs_server *server = NFS_SERVER(state->inode); 6972 struct nfs_server *server = NFS_SERVER(state->inode);
6975 struct nfs_client *clp = server->nfs_client; 6973 struct nfs_client *clp = server->nfs_client;
@@ -6979,8 +6977,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6979 .s_dev = server->s_dev }; 6977 .s_dev = server->s_dev };
6980 struct nfs4_lock_waiter waiter = { .task = current, 6978 struct nfs4_lock_waiter waiter = { .task = current,
6981 .inode = state->inode, 6979 .inode = state->inode,
6982 .owner = &owner, 6980 .owner = &owner};
6983 .notified = false };
6984 wait_queue_entry_t wait; 6981 wait_queue_entry_t wait;
6985 6982
6986 /* Don't bother with waitqueue if we don't expect a callback */ 6983 /* Don't bother with waitqueue if we don't expect a callback */
@@ -6990,27 +6987,22 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6990 init_wait(&wait); 6987 init_wait(&wait);
6991 wait.private = &waiter; 6988 wait.private = &waiter;
6992 wait.func = nfs4_wake_lock_waiter; 6989 wait.func = nfs4_wake_lock_waiter;
6993 add_wait_queue(q, &wait);
6994 6990
6995 while(!signalled()) { 6991 while(!signalled()) {
6996 waiter.notified = false; 6992 add_wait_queue(q, &wait);
6997 status = nfs4_proc_setlk(state, cmd, request); 6993 status = nfs4_proc_setlk(state, cmd, request);
6998 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6994 if ((status != -EAGAIN) || IS_SETLK(cmd)) {
6995 finish_wait(q, &wait);
6999 break; 6996 break;
7000
7001 status = -ERESTARTSYS;
7002 spin_lock_irqsave(&q->lock, flags);
7003 if (waiter.notified) {
7004 spin_unlock_irqrestore(&q->lock, flags);
7005 continue;
7006 } 6997 }
7007 set_current_state(TASK_INTERRUPTIBLE);
7008 spin_unlock_irqrestore(&q->lock, flags);
7009 6998
7010 freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT); 6999 status = -ERESTARTSYS;
7000 freezer_do_not_count();
7001 wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
7002 freezer_count();
7003 finish_wait(q, &wait);
7011 } 7004 }
7012 7005
7013 finish_wait(q, &wait);
7014 return status; 7006 return status;
7015} 7007}
7016#else /* !CONFIG_NFS_V4_1 */ 7008#else /* !CONFIG_NFS_V4_1 */
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 540a8b845145..340a6ad45914 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -426,7 +426,8 @@ static unsigned int ovl_get_inode_flags(struct inode *inode)
426 return ovl_iflags; 426 return ovl_iflags;
427} 427}
428 428
429static long ovl_ioctl_set_flags(struct file *file, unsigned long arg) 429static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd,
430 unsigned long arg)
430{ 431{
431 long ret; 432 long ret;
432 struct inode *inode = file_inode(file); 433 struct inode *inode = file_inode(file);
@@ -456,7 +457,7 @@ static long ovl_ioctl_set_flags(struct file *file, unsigned long arg)
456 if (ret) 457 if (ret)
457 goto unlock; 458 goto unlock;
458 459
459 ret = ovl_real_ioctl(file, FS_IOC_SETFLAGS, arg); 460 ret = ovl_real_ioctl(file, cmd, arg);
460 461
461 ovl_copyflags(ovl_inode_real(inode), inode); 462 ovl_copyflags(ovl_inode_real(inode), inode);
462unlock: 463unlock:
@@ -474,11 +475,13 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
474 475
475 switch (cmd) { 476 switch (cmd) {
476 case FS_IOC_GETFLAGS: 477 case FS_IOC_GETFLAGS:
478 case FS_IOC_FSGETXATTR:
477 ret = ovl_real_ioctl(file, cmd, arg); 479 ret = ovl_real_ioctl(file, cmd, arg);
478 break; 480 break;
479 481
480 case FS_IOC_SETFLAGS: 482 case FS_IOC_SETFLAGS:
481 ret = ovl_ioctl_set_flags(file, arg); 483 case FS_IOC_FSSETXATTR:
484 ret = ovl_ioctl_set_flags(file, cmd, arg);
482 break; 485 break;
483 486
484 default: 487 default:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b48273e846ad..f7eba21effa5 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -777,6 +777,54 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
777 return inode; 777 return inode;
778} 778}
779 779
780bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir)
781{
782 struct inode *key = d_inode(dir);
783 struct inode *trap;
784 bool res;
785
786 trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
787 if (!trap)
788 return false;
789
790 res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) &&
791 !ovl_inode_lower(trap);
792
793 iput(trap);
794 return res;
795}
796
797/*
798 * Create an inode cache entry for layer root dir, that will intentionally
799 * fail ovl_verify_inode(), so any lookup that will find some layer root
800 * will fail.
801 */
802struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
803{
804 struct inode *key = d_inode(dir);
805 struct inode *trap;
806
807 if (!d_is_dir(dir))
808 return ERR_PTR(-ENOTDIR);
809
810 trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test,
811 ovl_inode_set, key);
812 if (!trap)
813 return ERR_PTR(-ENOMEM);
814
815 if (!(trap->i_state & I_NEW)) {
816 /* Conflicting layer roots? */
817 iput(trap);
818 return ERR_PTR(-ELOOP);
819 }
820
821 trap->i_mode = S_IFDIR;
822 trap->i_flags = S_DEAD;
823 unlock_new_inode(trap);
824
825 return trap;
826}
827
780/* 828/*
781 * Does overlay inode need to be hashed by lower inode? 829 * Does overlay inode need to be hashed by lower inode?
782 */ 830 */
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index efd372312ef1..badf039267a2 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -18,6 +18,7 @@
18#include "overlayfs.h" 18#include "overlayfs.h"
19 19
20struct ovl_lookup_data { 20struct ovl_lookup_data {
21 struct super_block *sb;
21 struct qstr name; 22 struct qstr name;
22 bool is_dir; 23 bool is_dir;
23 bool opaque; 24 bool opaque;
@@ -244,6 +245,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
244 if (!d->metacopy || d->last) 245 if (!d->metacopy || d->last)
245 goto out; 246 goto out;
246 } else { 247 } else {
248 if (ovl_lookup_trap_inode(d->sb, this)) {
249 /* Caught in a trap of overlapping layers */
250 err = -ELOOP;
251 goto out_err;
252 }
253
247 if (last_element) 254 if (last_element)
248 d->is_dir = true; 255 d->is_dir = true;
249 if (d->last) 256 if (d->last)
@@ -819,6 +826,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
819 int err; 826 int err;
820 bool metacopy = false; 827 bool metacopy = false;
821 struct ovl_lookup_data d = { 828 struct ovl_lookup_data d = {
829 .sb = dentry->d_sb,
822 .name = dentry->d_name, 830 .name = dentry->d_name,
823 .is_dir = false, 831 .is_dir = false,
824 .opaque = false, 832 .opaque = false,
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index d26efed9f80a..cec40077b522 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -270,6 +270,7 @@ void ovl_clear_flag(unsigned long flag, struct inode *inode);
270bool ovl_test_flag(unsigned long flag, struct inode *inode); 270bool ovl_test_flag(unsigned long flag, struct inode *inode);
271bool ovl_inuse_trylock(struct dentry *dentry); 271bool ovl_inuse_trylock(struct dentry *dentry);
272void ovl_inuse_unlock(struct dentry *dentry); 272void ovl_inuse_unlock(struct dentry *dentry);
273bool ovl_is_inuse(struct dentry *dentry);
273bool ovl_need_index(struct dentry *dentry); 274bool ovl_need_index(struct dentry *dentry);
274int ovl_nlink_start(struct dentry *dentry); 275int ovl_nlink_start(struct dentry *dentry);
275void ovl_nlink_end(struct dentry *dentry); 276void ovl_nlink_end(struct dentry *dentry);
@@ -376,6 +377,8 @@ struct ovl_inode_params {
376struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev); 377struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
377struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, 378struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
378 bool is_upper); 379 bool is_upper);
380bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir);
381struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir);
379struct inode *ovl_get_inode(struct super_block *sb, 382struct inode *ovl_get_inode(struct super_block *sb,
380 struct ovl_inode_params *oip); 383 struct ovl_inode_params *oip);
381static inline void ovl_copyattr(struct inode *from, struct inode *to) 384static inline void ovl_copyattr(struct inode *from, struct inode *to)
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index ec237035333a..6ed1ace8f8b3 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -29,6 +29,8 @@ struct ovl_sb {
29 29
30struct ovl_layer { 30struct ovl_layer {
31 struct vfsmount *mnt; 31 struct vfsmount *mnt;
32 /* Trap in ovl inode cache */
33 struct inode *trap;
32 struct ovl_sb *fs; 34 struct ovl_sb *fs;
33 /* Index of this layer in fs root (upper idx == 0) */ 35 /* Index of this layer in fs root (upper idx == 0) */
34 int idx; 36 int idx;
@@ -65,6 +67,10 @@ struct ovl_fs {
65 /* Did we take the inuse lock? */ 67 /* Did we take the inuse lock? */
66 bool upperdir_locked; 68 bool upperdir_locked;
67 bool workdir_locked; 69 bool workdir_locked;
70 /* Traps in ovl inode cache */
71 struct inode *upperdir_trap;
72 struct inode *workdir_trap;
73 struct inode *indexdir_trap;
68 /* Inode numbers in all layers do not use the high xino_bits */ 74 /* Inode numbers in all layers do not use the high xino_bits */
69 unsigned int xino_bits; 75 unsigned int xino_bits;
70}; 76};
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 5ec4fc2f5d7e..746ea36f3171 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -215,6 +215,9 @@ static void ovl_free_fs(struct ovl_fs *ofs)
215{ 215{
216 unsigned i; 216 unsigned i;
217 217
218 iput(ofs->indexdir_trap);
219 iput(ofs->workdir_trap);
220 iput(ofs->upperdir_trap);
218 dput(ofs->indexdir); 221 dput(ofs->indexdir);
219 dput(ofs->workdir); 222 dput(ofs->workdir);
220 if (ofs->workdir_locked) 223 if (ofs->workdir_locked)
@@ -223,8 +226,10 @@ static void ovl_free_fs(struct ovl_fs *ofs)
223 if (ofs->upperdir_locked) 226 if (ofs->upperdir_locked)
224 ovl_inuse_unlock(ofs->upper_mnt->mnt_root); 227 ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
225 mntput(ofs->upper_mnt); 228 mntput(ofs->upper_mnt);
226 for (i = 0; i < ofs->numlower; i++) 229 for (i = 0; i < ofs->numlower; i++) {
230 iput(ofs->lower_layers[i].trap);
227 mntput(ofs->lower_layers[i].mnt); 231 mntput(ofs->lower_layers[i].mnt);
232 }
228 for (i = 0; i < ofs->numlowerfs; i++) 233 for (i = 0; i < ofs->numlowerfs; i++)
229 free_anon_bdev(ofs->lower_fs[i].pseudo_dev); 234 free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
230 kfree(ofs->lower_layers); 235 kfree(ofs->lower_layers);
@@ -983,7 +988,26 @@ static const struct xattr_handler *ovl_xattr_handlers[] = {
983 NULL 988 NULL
984}; 989};
985 990
986static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) 991static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
992 struct inode **ptrap, const char *name)
993{
994 struct inode *trap;
995 int err;
996
997 trap = ovl_get_trap_inode(sb, dir);
998 err = PTR_ERR(trap);
999 if (IS_ERR(trap)) {
1000 if (err == -ELOOP)
1001 pr_err("overlayfs: conflicting %s path\n", name);
1002 return err;
1003 }
1004
1005 *ptrap = trap;
1006 return 0;
1007}
1008
1009static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
1010 struct path *upperpath)
987{ 1011{
988 struct vfsmount *upper_mnt; 1012 struct vfsmount *upper_mnt;
989 int err; 1013 int err;
@@ -1003,6 +1027,11 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
1003 if (err) 1027 if (err)
1004 goto out; 1028 goto out;
1005 1029
1030 err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap,
1031 "upperdir");
1032 if (err)
1033 goto out;
1034
1006 upper_mnt = clone_private_mount(upperpath); 1035 upper_mnt = clone_private_mount(upperpath);
1007 err = PTR_ERR(upper_mnt); 1036 err = PTR_ERR(upper_mnt);
1008 if (IS_ERR(upper_mnt)) { 1037 if (IS_ERR(upper_mnt)) {
@@ -1029,7 +1058,8 @@ out:
1029 return err; 1058 return err;
1030} 1059}
1031 1060
1032static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath) 1061static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
1062 struct path *workpath)
1033{ 1063{
1034 struct vfsmount *mnt = ofs->upper_mnt; 1064 struct vfsmount *mnt = ofs->upper_mnt;
1035 struct dentry *temp; 1065 struct dentry *temp;
@@ -1044,6 +1074,10 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
1044 if (!ofs->workdir) 1074 if (!ofs->workdir)
1045 goto out; 1075 goto out;
1046 1076
1077 err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir");
1078 if (err)
1079 goto out;
1080
1047 /* 1081 /*
1048 * Upper should support d_type, else whiteouts are visible. Given 1082 * Upper should support d_type, else whiteouts are visible. Given
1049 * workdir and upper are on same fs, we can do iterate_dir() on 1083 * workdir and upper are on same fs, we can do iterate_dir() on
@@ -1104,7 +1138,8 @@ out:
1104 return err; 1138 return err;
1105} 1139}
1106 1140
1107static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath) 1141static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
1142 struct path *upperpath)
1108{ 1143{
1109 int err; 1144 int err;
1110 struct path workpath = { }; 1145 struct path workpath = { };
@@ -1135,19 +1170,16 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
1135 pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); 1170 pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
1136 } 1171 }
1137 1172
1138 err = ovl_make_workdir(ofs, &workpath); 1173 err = ovl_make_workdir(sb, ofs, &workpath);
1139 if (err)
1140 goto out;
1141 1174
1142 err = 0;
1143out: 1175out:
1144 path_put(&workpath); 1176 path_put(&workpath);
1145 1177
1146 return err; 1178 return err;
1147} 1179}
1148 1180
1149static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe, 1181static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
1150 struct path *upperpath) 1182 struct ovl_entry *oe, struct path *upperpath)
1151{ 1183{
1152 struct vfsmount *mnt = ofs->upper_mnt; 1184 struct vfsmount *mnt = ofs->upper_mnt;
1153 int err; 1185 int err;
@@ -1166,6 +1198,11 @@ static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
1166 1198
1167 ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); 1199 ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true);
1168 if (ofs->indexdir) { 1200 if (ofs->indexdir) {
1201 err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap,
1202 "indexdir");
1203 if (err)
1204 goto out;
1205
1169 /* 1206 /*
1170 * Verify upper root is exclusively associated with index dir. 1207 * Verify upper root is exclusively associated with index dir.
1171 * Older kernels stored upper fh in "trusted.overlay.origin" 1208 * Older kernels stored upper fh in "trusted.overlay.origin"
@@ -1253,8 +1290,8 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
1253 return ofs->numlowerfs; 1290 return ofs->numlowerfs;
1254} 1291}
1255 1292
1256static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack, 1293static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
1257 unsigned int numlower) 1294 struct path *stack, unsigned int numlower)
1258{ 1295{
1259 int err; 1296 int err;
1260 unsigned int i; 1297 unsigned int i;
@@ -1272,16 +1309,28 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
1272 1309
1273 for (i = 0; i < numlower; i++) { 1310 for (i = 0; i < numlower; i++) {
1274 struct vfsmount *mnt; 1311 struct vfsmount *mnt;
1312 struct inode *trap;
1275 int fsid; 1313 int fsid;
1276 1314
1277 err = fsid = ovl_get_fsid(ofs, &stack[i]); 1315 err = fsid = ovl_get_fsid(ofs, &stack[i]);
1278 if (err < 0) 1316 if (err < 0)
1279 goto out; 1317 goto out;
1280 1318
1319 err = -EBUSY;
1320 if (ovl_is_inuse(stack[i].dentry)) {
1321 pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
1322 goto out;
1323 }
1324
1325 err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
1326 if (err)
1327 goto out;
1328
1281 mnt = clone_private_mount(&stack[i]); 1329 mnt = clone_private_mount(&stack[i]);
1282 err = PTR_ERR(mnt); 1330 err = PTR_ERR(mnt);
1283 if (IS_ERR(mnt)) { 1331 if (IS_ERR(mnt)) {
1284 pr_err("overlayfs: failed to clone lowerpath\n"); 1332 pr_err("overlayfs: failed to clone lowerpath\n");
1333 iput(trap);
1285 goto out; 1334 goto out;
1286 } 1335 }
1287 1336
@@ -1291,6 +1340,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
1291 */ 1340 */
1292 mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME; 1341 mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
1293 1342
1343 ofs->lower_layers[ofs->numlower].trap = trap;
1294 ofs->lower_layers[ofs->numlower].mnt = mnt; 1344 ofs->lower_layers[ofs->numlower].mnt = mnt;
1295 ofs->lower_layers[ofs->numlower].idx = i + 1; 1345 ofs->lower_layers[ofs->numlower].idx = i + 1;
1296 ofs->lower_layers[ofs->numlower].fsid = fsid; 1346 ofs->lower_layers[ofs->numlower].fsid = fsid;
@@ -1385,7 +1435,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
1385 goto out_err; 1435 goto out_err;
1386 } 1436 }
1387 1437
1388 err = ovl_get_lower_layers(ofs, stack, numlower); 1438 err = ovl_get_lower_layers(sb, ofs, stack, numlower);
1389 if (err) 1439 if (err)
1390 goto out_err; 1440 goto out_err;
1391 1441
@@ -1417,6 +1467,85 @@ out_err:
1417 goto out; 1467 goto out;
1418} 1468}
1419 1469
1470/*
1471 * Check if this layer root is a descendant of:
1472 * - another layer of this overlayfs instance
1473 * - upper/work dir of any overlayfs instance
1474 * - a disconnected dentry (detached root)
1475 */
1476static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
1477 const char *name)
1478{
1479 struct dentry *next, *parent;
1480 bool is_root = false;
1481 int err = 0;
1482
1483 if (!dentry || dentry == dentry->d_sb->s_root)
1484 return 0;
1485
1486 next = dget(dentry);
1487 /* Walk back ancestors to fs root (inclusive) looking for traps */
1488 do {
1489 parent = dget_parent(next);
1490 is_root = (parent == next);
1491 if (ovl_is_inuse(parent)) {
1492 err = -EBUSY;
1493 pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
1494 name);
1495 } else if (ovl_lookup_trap_inode(sb, parent)) {
1496 err = -ELOOP;
1497 pr_err("overlayfs: overlapping %s path\n", name);
1498 }
1499 dput(next);
1500 next = parent;
1501 } while (!err && !is_root);
1502
1503 /* Did we really walk to fs root or found a detached root? */
1504 if (!err && next != dentry->d_sb->s_root) {
1505 err = -ESTALE;
1506 pr_err("overlayfs: disconnected %s path\n", name);
1507 }
1508
1509 dput(next);
1510
1511 return err;
1512}
1513
1514/*
1515 * Check if any of the layers or work dirs overlap.
1516 */
1517static int ovl_check_overlapping_layers(struct super_block *sb,
1518 struct ovl_fs *ofs)
1519{
1520 int i, err;
1521
1522 if (ofs->upper_mnt) {
1523 err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
1524 if (err)
1525 return err;
1526
1527 /*
1528 * Checking workbasedir avoids hitting ovl_is_inuse(parent) of
1529 * this instance and covers overlapping work and index dirs,
1530 * unless work or index dir have been moved since created inside
1531 * workbasedir. In that case, we already have their traps in
1532 * inode cache and we will catch that case on lookup.
1533 */
1534 err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
1535 if (err)
1536 return err;
1537 }
1538
1539 for (i = 0; i < ofs->numlower; i++) {
1540 err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
1541 "lowerdir");
1542 if (err)
1543 return err;
1544 }
1545
1546 return 0;
1547}
1548
1420static int ovl_fill_super(struct super_block *sb, void *data, int silent) 1549static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1421{ 1550{
1422 struct path upperpath = { }; 1551 struct path upperpath = { };
@@ -1456,17 +1585,20 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1456 if (ofs->config.xino != OVL_XINO_OFF) 1585 if (ofs->config.xino != OVL_XINO_OFF)
1457 ofs->xino_bits = BITS_PER_LONG - 32; 1586 ofs->xino_bits = BITS_PER_LONG - 32;
1458 1587
1588 /* alloc/destroy_inode needed for setting up traps in inode cache */
1589 sb->s_op = &ovl_super_operations;
1590
1459 if (ofs->config.upperdir) { 1591 if (ofs->config.upperdir) {
1460 if (!ofs->config.workdir) { 1592 if (!ofs->config.workdir) {
1461 pr_err("overlayfs: missing 'workdir'\n"); 1593 pr_err("overlayfs: missing 'workdir'\n");
1462 goto out_err; 1594 goto out_err;
1463 } 1595 }
1464 1596
1465 err = ovl_get_upper(ofs, &upperpath); 1597 err = ovl_get_upper(sb, ofs, &upperpath);
1466 if (err) 1598 if (err)
1467 goto out_err; 1599 goto out_err;
1468 1600
1469 err = ovl_get_workdir(ofs, &upperpath); 1601 err = ovl_get_workdir(sb, ofs, &upperpath);
1470 if (err) 1602 if (err)
1471 goto out_err; 1603 goto out_err;
1472 1604
@@ -1487,7 +1619,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1487 sb->s_flags |= SB_RDONLY; 1619 sb->s_flags |= SB_RDONLY;
1488 1620
1489 if (!(ovl_force_readonly(ofs)) && ofs->config.index) { 1621 if (!(ovl_force_readonly(ofs)) && ofs->config.index) {
1490 err = ovl_get_indexdir(ofs, oe, &upperpath); 1622 err = ovl_get_indexdir(sb, ofs, oe, &upperpath);
1491 if (err) 1623 if (err)
1492 goto out_free_oe; 1624 goto out_free_oe;
1493 1625
@@ -1500,6 +1632,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1500 1632
1501 } 1633 }
1502 1634
1635 err = ovl_check_overlapping_layers(sb, ofs);
1636 if (err)
1637 goto out_free_oe;
1638
1503 /* Show index=off in /proc/mounts for forced r/o mount */ 1639 /* Show index=off in /proc/mounts for forced r/o mount */
1504 if (!ofs->indexdir) { 1640 if (!ofs->indexdir) {
1505 ofs->config.index = false; 1641 ofs->config.index = false;
@@ -1521,7 +1657,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1521 cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); 1657 cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
1522 1658
1523 sb->s_magic = OVERLAYFS_SUPER_MAGIC; 1659 sb->s_magic = OVERLAYFS_SUPER_MAGIC;
1524 sb->s_op = &ovl_super_operations;
1525 sb->s_xattr = ovl_xattr_handlers; 1660 sb->s_xattr = ovl_xattr_handlers;
1526 sb->s_fs_info = ofs; 1661 sb->s_fs_info = ofs;
1527 sb->s_flags |= SB_POSIXACL; 1662 sb->s_flags |= SB_POSIXACL;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 4035e640f402..e135064e87ad 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -652,6 +652,18 @@ void ovl_inuse_unlock(struct dentry *dentry)
652 } 652 }
653} 653}
654 654
655bool ovl_is_inuse(struct dentry *dentry)
656{
657 struct inode *inode = d_inode(dentry);
658 bool inuse;
659
660 spin_lock(&inode->i_lock);
661 inuse = (inode->i_state & I_OVL_INUSE);
662 spin_unlock(&inode->i_lock);
663
664 return inuse;
665}
666
655/* 667/*
656 * Does this overlay dentry need to be indexed on copy up? 668 * Does this overlay dentry need to be indexed on copy up?
657 */ 669 */
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 9f83686db0be..3d7024662d29 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -335,8 +335,10 @@ static void allocate_buf_for_compression(void)
335 335
336static void free_buf_for_compression(void) 336static void free_buf_for_compression(void)
337{ 337{
338 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) 338 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
339 crypto_free_comp(tfm); 339 crypto_free_comp(tfm);
340 tfm = NULL;
341 }
340 kfree(big_oops_buf); 342 kfree(big_oops_buf);
341 big_oops_buf = NULL; 343 big_oops_buf = NULL;
342 big_oops_buf_sz = 0; 344 big_oops_buf_sz = 0;
@@ -594,7 +596,8 @@ int pstore_register(struct pstore_info *psi)
594 return -EINVAL; 596 return -EINVAL;
595 } 597 }
596 598
597 allocate_buf_for_compression(); 599 if (psi->flags & PSTORE_FLAGS_DMESG)
600 allocate_buf_for_compression();
598 601
599 if (pstore_is_mounted()) 602 if (pstore_is_mounted())
600 pstore_get_records(0); 603 pstore_get_records(0);
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index ade964df5a68..5b7709894415 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -786,26 +786,36 @@ static int ramoops_probe(struct platform_device *pdev)
786 786
787 cxt->pstore.data = cxt; 787 cxt->pstore.data = cxt;
788 /* 788 /*
789 * Since bufsize is only used for dmesg crash dumps, it 789 * Prepare frontend flags based on which areas are initialized.
790 * must match the size of the dprz record (after PRZ header 790 * For ramoops_init_przs() cases, the "max count" variable tells
791 * and ECC bytes have been accounted for). 791 * if there are regions present. For ramoops_init_prz() cases,
792 * the single region size is how to check.
792 */ 793 */
793 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; 794 cxt->pstore.flags = 0;
794 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL); 795 if (cxt->max_dump_cnt)
795 if (!cxt->pstore.buf) { 796 cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
796 pr_err("cannot allocate pstore crash dump buffer\n");
797 err = -ENOMEM;
798 goto fail_clear;
799 }
800
801 cxt->pstore.flags = PSTORE_FLAGS_DMESG;
802 if (cxt->console_size) 797 if (cxt->console_size)
803 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; 798 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
804 if (cxt->ftrace_size) 799 if (cxt->max_ftrace_cnt)
805 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; 800 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
806 if (cxt->pmsg_size) 801 if (cxt->pmsg_size)
807 cxt->pstore.flags |= PSTORE_FLAGS_PMSG; 802 cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
808 803
804 /*
805 * Since bufsize is only used for dmesg crash dumps, it
806 * must match the size of the dprz record (after PRZ header
807 * and ECC bytes have been accounted for).
808 */
809 if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
810 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
811 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
812 if (!cxt->pstore.buf) {
813 pr_err("cannot allocate pstore crash dump buffer\n");
814 err = -ENOMEM;
815 goto fail_clear;
816 }
817 }
818
809 err = pstore_register(&cxt->pstore); 819 err = pstore_register(&cxt->pstore);
810 if (err) { 820 if (err) {
811 pr_err("registering with pstore failed\n"); 821 pr_err("registering with pstore failed\n");
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 693eb51f5efb..9b47117180cb 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -252,7 +252,8 @@ xchk_iallocbt_check_cluster(
252 ir_holemask = (irec->ir_holemask & cluster_mask); 252 ir_holemask = (irec->ir_holemask & cluster_mask);
253 imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); 253 imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
254 imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster); 254 imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
255 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino); 255 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
256 mp->m_sb.sb_inodelog;
256 257
257 if (imap.im_boffset != 0 && cluster_base != 0) { 258 if (imap.im_boffset != 0 && cluster_base != 0) {
258 ASSERT(imap.im_boffset == 0 || cluster_base == 0); 259 ASSERT(imap.im_boffset == 0 || cluster_base == 0);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 457ced3ee3e1..2466b0f5b6c4 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2069,7 +2069,7 @@ xlog_print_tic_res(
2069 2069
2070 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 2070 /* match with XLOG_REG_TYPE_* in xfs_log.h */
2071#define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str 2071#define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str
2072 static char *res_type_str[XLOG_REG_TYPE_MAX + 1] = { 2072 static char *res_type_str[] = {
2073 REG_TYPE_STR(BFORMAT, "bformat"), 2073 REG_TYPE_STR(BFORMAT, "bformat"),
2074 REG_TYPE_STR(BCHUNK, "bchunk"), 2074 REG_TYPE_STR(BCHUNK, "bchunk"),
2075 REG_TYPE_STR(EFI_FORMAT, "efi_format"), 2075 REG_TYPE_STR(EFI_FORMAT, "efi_format"),
@@ -2089,8 +2089,15 @@ xlog_print_tic_res(
2089 REG_TYPE_STR(UNMOUNT, "unmount"), 2089 REG_TYPE_STR(UNMOUNT, "unmount"),
2090 REG_TYPE_STR(COMMIT, "commit"), 2090 REG_TYPE_STR(COMMIT, "commit"),
2091 REG_TYPE_STR(TRANSHDR, "trans header"), 2091 REG_TYPE_STR(TRANSHDR, "trans header"),
2092 REG_TYPE_STR(ICREATE, "inode create") 2092 REG_TYPE_STR(ICREATE, "inode create"),
2093 REG_TYPE_STR(RUI_FORMAT, "rui_format"),
2094 REG_TYPE_STR(RUD_FORMAT, "rud_format"),
2095 REG_TYPE_STR(CUI_FORMAT, "cui_format"),
2096 REG_TYPE_STR(CUD_FORMAT, "cud_format"),
2097 REG_TYPE_STR(BUI_FORMAT, "bui_format"),
2098 REG_TYPE_STR(BUD_FORMAT, "bud_format"),
2093 }; 2099 };
2100 BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1);
2094#undef REG_TYPE_STR 2101#undef REG_TYPE_STR
2095 2102
2096 xfs_warn(mp, "ticket reservation summary:"); 2103 xfs_warn(mp, "ticket reservation summary:");
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index f9c94c2a1364..f7bbd0b0ecd1 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1185,6 +1185,14 @@ struct drm_plane_helper_funcs {
1185 * current one with the new plane configurations in the new 1185 * current one with the new plane configurations in the new
1186 * plane_state. 1186 * plane_state.
1187 * 1187 *
1188 * Drivers should also swap the framebuffers between current plane
1189 * state (&drm_plane.state) and new_state.
1190 * This is required since cleanup for async commits is performed on
1191 * the new state, rather than old state like for traditional commits.
1192 * Since we want to give up the reference on the current (old) fb
1193 * instead of our brand new one, swap them in the driver during the
1194 * async commit.
1195 *
1188 * FIXME: 1196 * FIXME:
1189 * - It only works for single plane updates 1197 * - It only works for single plane updates
1190 * - Async Pageflips are not supported yet 1198 * - Async Pageflips are not supported yet
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 11e215d7937e..d71b079bb021 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -106,6 +106,8 @@ enum {
106 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ 106 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
107 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ 107 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
108 108
109 CFTYPE_SYMLINKED = (1 << 6), /* pointed to by symlink too */
110
109 /* internal flags, do not use outside cgroup core proper */ 111 /* internal flags, do not use outside cgroup core proper */
110 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ 112 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
111 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ 113 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
@@ -543,6 +545,7 @@ struct cftype {
543 * end of cftype array. 545 * end of cftype array.
544 */ 546 */
545 char name[MAX_CFTYPE_NAME]; 547 char name[MAX_CFTYPE_NAME];
548 char link_name[MAX_CFTYPE_NAME];
546 unsigned long private; 549 unsigned long private;
547 550
548 /* 551 /*
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 3813fe45effd..fcb1386bb0d4 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -201,10 +201,14 @@ enum cpuhp_smt_control {
201extern enum cpuhp_smt_control cpu_smt_control; 201extern enum cpuhp_smt_control cpu_smt_control;
202extern void cpu_smt_disable(bool force); 202extern void cpu_smt_disable(bool force);
203extern void cpu_smt_check_topology(void); 203extern void cpu_smt_check_topology(void);
204extern int cpuhp_smt_enable(void);
205extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
204#else 206#else
205# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED) 207# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
206static inline void cpu_smt_disable(bool force) { } 208static inline void cpu_smt_disable(bool force) { }
207static inline void cpu_smt_check_topology(void) { } 209static inline void cpu_smt_check_topology(void) { }
210static inline int cpuhp_smt_enable(void) { return 0; }
211static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
208#endif 212#endif
209 213
210/* 214/*
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 603a02e5a8cb..e46e18c47d41 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -20,18 +20,6 @@
20#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull 20#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull
21#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull 21#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull
22 22
23enum sja1105_frame_type {
24 SJA1105_FRAME_TYPE_NORMAL = 0,
25 SJA1105_FRAME_TYPE_LINK_LOCAL,
26};
27
28struct sja1105_skb_cb {
29 enum sja1105_frame_type type;
30};
31
32#define SJA1105_SKB_CB(skb) \
33 ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
34
35struct sja1105_port { 23struct sja1105_port {
36 struct dsa_port *dp; 24 struct dsa_port *dp;
37 int mgmt_slot; 25 int mgmt_slot;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0e8834ac32b7..dd0b5f4e1e45 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -99,6 +99,17 @@ extern int mmap_rnd_compat_bits __read_mostly;
99#include <asm/pgtable.h> 99#include <asm/pgtable.h>
100#include <asm/processor.h> 100#include <asm/processor.h>
101 101
102/*
103 * Architectures that support memory tagging (assigning tags to memory regions,
104 * embedding these tags into addresses that point to these memory regions, and
105 * checking that the memory and the pointer tags match on memory accesses)
106 * redefine this macro to strip tags from pointers.
107 * It's defined as noop for arcitectures that don't support memory tagging.
108 */
109#ifndef untagged_addr
110#define untagged_addr(addr) (addr)
111#endif
112
102#ifndef __pa_symbol 113#ifndef __pa_symbol
103#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 114#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
104#endif 115#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 922bb6848813..b25d20822e75 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -56,14 +56,12 @@ void __rcu_read_unlock(void);
56 56
57static inline void __rcu_read_lock(void) 57static inline void __rcu_read_lock(void)
58{ 58{
59 if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) 59 preempt_disable();
60 preempt_disable();
61} 60}
62 61
63static inline void __rcu_read_unlock(void) 62static inline void __rcu_read_unlock(void)
64{ 63{
65 if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) 64 preempt_enable();
66 preempt_enable();
67} 65}
68 66
69static inline int rcu_preempt_depth(void) 67static inline int rcu_preempt_depth(void)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 4a2ffd678887..8594001e8be8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -227,11 +227,42 @@ static inline void pm_set_resume_via_firmware(void)
227 pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; 227 pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME;
228} 228}
229 229
230/**
231 * pm_suspend_via_firmware - Check if platform firmware will suspend the system.
232 *
233 * To be called during system-wide power management transitions to sleep states
234 * or during the subsequent system-wide transitions back to the working state.
235 *
236 * Return 'true' if the platform firmware is going to be invoked at the end of
237 * the system-wide power management transition (to a sleep state) in progress in
238 * order to complete it, or if the platform firmware has been invoked in order
239 * to complete the last (or preceding) transition of the system to a sleep
240 * state.
241 *
242 * This matters if the caller needs or wants to carry out some special actions
243 * depending on whether or not control will be passed to the platform firmware
244 * subsequently (for example, the device may need to be reset before letting the
245 * platform firmware manipulate it, which is not necessary when the platform
246 * firmware is not going to be invoked) or when such special actions may have
247 * been carried out during the preceding transition of the system to a sleep
248 * state (as they may need to be taken into account).
249 */
230static inline bool pm_suspend_via_firmware(void) 250static inline bool pm_suspend_via_firmware(void)
231{ 251{
232 return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND); 252 return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND);
233} 253}
234 254
255/**
256 * pm_resume_via_firmware - Check if platform firmware has woken up the system.
257 *
258 * To be called during system-wide power management transitions from sleep
259 * states.
260 *
261 * Return 'true' if the platform firmware has passed control to the kernel at
262 * the beginning of the system-wide power management transition in progress, so
263 * the event that woke up the system from sleep has been handled by the platform
264 * firmware.
265 */
235static inline bool pm_resume_via_firmware(void) 266static inline bool pm_resume_via_firmware(void)
236{ 267{
237 return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); 268 return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME);
diff --git a/include/math-emu/op-2.h b/include/math-emu/op-2.h
index 13a374f51a22..244522b02076 100644
--- a/include/math-emu/op-2.h
+++ b/include/math-emu/op-2.h
@@ -567,16 +567,13 @@
567 */ 567 */
568 568
569#define _FP_FRAC_ASSEMBLE_2(r, X, rsize) \ 569#define _FP_FRAC_ASSEMBLE_2(r, X, rsize) \
570 do { \ 570 (void) (((rsize) <= _FP_W_TYPE_SIZE) \
571 if (rsize <= _FP_W_TYPE_SIZE) \ 571 ? ({ (r) = X##_f0; }) \
572 r = X##_f0; \ 572 : ({ \
573 else \ 573 (r) = X##_f1; \
574 { \ 574 (r) <<= _FP_W_TYPE_SIZE; \
575 r = X##_f1; \ 575 (r) += X##_f0; \
576 r <<= _FP_W_TYPE_SIZE; \ 576 }))
577 r += X##_f0; \
578 } \
579 } while (0)
580 577
581#define _FP_FRAC_DISASSEMBLE_2(X, r, rsize) \ 578#define _FP_FRAC_DISASSEMBLE_2(X, r, rsize) \
582 do { \ 579 do { \
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index 6bdf8c61d221..f37d12877754 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -795,11 +795,12 @@ do { \
795 ur_ = (unsigned rtype) -r; \ 795 ur_ = (unsigned rtype) -r; \
796 else \ 796 else \
797 ur_ = (unsigned rtype) r; \ 797 ur_ = (unsigned rtype) r; \
798 if (rsize <= _FP_W_TYPE_SIZE) \ 798 (void) (((rsize) <= _FP_W_TYPE_SIZE) \
799 __FP_CLZ(X##_e, ur_); \ 799 ? ({ __FP_CLZ(X##_e, ur_); }) \
800 else \ 800 : ({ \
801 __FP_CLZ_2(X##_e, (_FP_W_TYPE)(ur_ >> _FP_W_TYPE_SIZE), \ 801 __FP_CLZ_2(X##_e, (_FP_W_TYPE)(ur_ >> _FP_W_TYPE_SIZE), \
802 (_FP_W_TYPE)ur_); \ 802 (_FP_W_TYPE)ur_); \
803 })); \
803 if (rsize < _FP_W_TYPE_SIZE) \ 804 if (rsize < _FP_W_TYPE_SIZE) \
804 X##_e -= (_FP_W_TYPE_SIZE - rsize); \ 805 X##_e -= (_FP_W_TYPE_SIZE - rsize); \
805 X##_e = rsize - X##_e - 1; \ 806 X##_e = rsize - X##_e - 1; \
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3fbc9894a39a..855b352b660f 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -259,8 +259,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
259 rcu_read_lock(); 259 rcu_read_lock();
260 260
261 from = rcu_dereference(rt->from); 261 from = rcu_dereference(rt->from);
262 if (from && (rt->rt6i_flags & RTF_PCPU || 262 if (from)
263 unlikely(!list_empty(&rt->rt6i_uncached))))
264 fib6_get_cookie_safe(from, &cookie); 263 fib6_get_cookie_safe(from, &cookie);
265 264
266 rcu_read_unlock(); 265 rcu_read_unlock();
diff --git a/include/net/tls.h b/include/net/tls.h
index 39ea62f0c1f6..4a55ce6a303f 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -209,6 +209,10 @@ struct tls_offload_context_tx {
209 (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \ 209 (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
210 TLS_DRIVER_STATE_SIZE) 210 TLS_DRIVER_STATE_SIZE)
211 211
212enum tls_context_flags {
213 TLS_RX_SYNC_RUNNING = 0,
214};
215
212struct cipher_context { 216struct cipher_context {
213 char *iv; 217 char *iv;
214 char *rec_seq; 218 char *rec_seq;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0742095355f2..54873085f2da 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2698,6 +2698,7 @@ struct ib_client {
2698 const char *name; 2698 const char *name;
2699 void (*add) (struct ib_device *); 2699 void (*add) (struct ib_device *);
2700 void (*remove)(struct ib_device *, void *client_data); 2700 void (*remove)(struct ib_device *, void *client_data);
2701 void (*rename)(struct ib_device *dev, void *client_data);
2701 2702
2702 /* Returns the net_dev belonging to this ib_client and matching the 2703 /* Returns the net_dev belonging to this ib_client and matching the
2703 * given parameters. 2704 * given parameters.
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 19fb55e3c73e..2971d29a42e4 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -130,6 +130,9 @@
130 * 7.30 130 * 7.30
131 * - add FUSE_EXPLICIT_INVAL_DATA 131 * - add FUSE_EXPLICIT_INVAL_DATA
132 * - add FUSE_IOCTL_COMPAT_X32 132 * - add FUSE_IOCTL_COMPAT_X32
133 *
134 * 7.31
135 * - add FUSE_WRITE_KILL_PRIV flag
133 */ 136 */
134 137
135#ifndef _LINUX_FUSE_H 138#ifndef _LINUX_FUSE_H
@@ -165,7 +168,7 @@
165#define FUSE_KERNEL_VERSION 7 168#define FUSE_KERNEL_VERSION 7
166 169
167/** Minor version number of this interface */ 170/** Minor version number of this interface */
168#define FUSE_KERNEL_MINOR_VERSION 30 171#define FUSE_KERNEL_MINOR_VERSION 31
169 172
170/** The node ID of the root inode */ 173/** The node ID of the root inode */
171#define FUSE_ROOT_ID 1 174#define FUSE_ROOT_ID 1
@@ -327,9 +330,11 @@ struct fuse_file_lock {
327 * 330 *
328 * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed 331 * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed
329 * FUSE_WRITE_LOCKOWNER: lock_owner field is valid 332 * FUSE_WRITE_LOCKOWNER: lock_owner field is valid
333 * FUSE_WRITE_KILL_PRIV: kill suid and sgid bits
330 */ 334 */
331#define FUSE_WRITE_CACHE (1 << 0) 335#define FUSE_WRITE_CACHE (1 << 0)
332#define FUSE_WRITE_LOCKOWNER (1 << 1) 336#define FUSE_WRITE_LOCKOWNER (1 << 1)
337#define FUSE_WRITE_KILL_PRIV (1 << 2)
333 338
334/** 339/**
335 * Read flags 340 * Read flags
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 8ac292cf4d00..204ab9b4ae67 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -413,6 +413,10 @@ struct hl_debug_params_spmu {
413#define HL_DEBUG_OP_SPMU 5 413#define HL_DEBUG_OP_SPMU 5
414/* Opcode for timestamp */ 414/* Opcode for timestamp */
415#define HL_DEBUG_OP_TIMESTAMP 6 415#define HL_DEBUG_OP_TIMESTAMP 6
416/* Opcode for setting the device into or out of debug mode. The enable
417 * variable should be 1 for enabling debug mode and 0 for disabling it
418 */
419#define HL_DEBUG_OP_SET_MODE 7
416 420
417struct hl_debug_args { 421struct hl_debug_args {
418 /* 422 /*
@@ -574,8 +578,22 @@ struct hl_debug_args {
574 * 578 *
575 * This IOCTL allows the user to get debug traces from the chip. 579 * This IOCTL allows the user to get debug traces from the chip.
576 * 580 *
577 * The user needs to provide the register index and essential data such as 581 * Before the user can send configuration requests of the various
578 * buffer address and size. 582 * debug/profile engines, it needs to set the device into debug mode.
583 * This is because the debug/profile infrastructure is shared component in the
584 * device and we can't allow multiple users to access it at the same time.
585 *
586 * Once a user set the device into debug mode, the driver won't allow other
587 * users to "work" with the device, i.e. open a FD. If there are multiple users
588 * opened on the device, the driver won't allow any user to debug the device.
589 *
590 * For each configuration request, the user needs to provide the register index
591 * and essential data such as buffer address and size.
592 *
593 * Once the user has finished using the debug/profile engines, he should
594 * set the device into non-debug mode, i.e. disable debug mode.
595 *
596 * The driver can decide to "kick out" the user if he abuses this interface.
579 * 597 *
580 */ 598 */
581#define HL_IOCTL_DEBUG \ 599#define HL_IOCTL_DEBUG \
diff --git a/init/Kconfig b/init/Kconfig
index 36894c9fb420..0e2344389501 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -580,15 +580,14 @@ config IKCONFIG_PROC
580 This option enables access to the kernel configuration file 580 This option enables access to the kernel configuration file
581 through /proc/config.gz. 581 through /proc/config.gz.
582 582
583config IKHEADERS_PROC 583config IKHEADERS
584 tristate "Enable kernel header artifacts through /proc/kheaders.tar.xz" 584 tristate "Enable kernel headers through /sys/kernel/kheaders.tar.xz"
585 depends on PROC_FS 585 depends on SYSFS
586 help 586 help
587 This option enables access to the kernel header and other artifacts that 587 This option enables access to the in-kernel headers that are generated during
588 are generated during the build process. These can be used to build eBPF 588 the build process. These can be used to build eBPF tracing programs,
589 tracing programs, or similar programs. If you build the headers as a 589 or similar programs. If you build the headers as a module, a module called
590 module, a module called kheaders.ko is built which can be loaded on-demand 590 kheaders.ko is built which can be loaded on-demand to get access to headers.
591 to get access to the headers.
592 591
593config LOG_BUF_SHIFT 592config LOG_BUF_SHIFT
594 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" 593 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
diff --git a/kernel/Makefile b/kernel/Makefile
index 33824f0385b3..a8d923b5481b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -71,7 +71,7 @@ obj-$(CONFIG_UTS_NS) += utsname.o
71obj-$(CONFIG_USER_NS) += user_namespace.o 71obj-$(CONFIG_USER_NS) += user_namespace.o
72obj-$(CONFIG_PID_NS) += pid_namespace.o 72obj-$(CONFIG_PID_NS) += pid_namespace.o
73obj-$(CONFIG_IKCONFIG) += configs.o 73obj-$(CONFIG_IKCONFIG) += configs.o
74obj-$(CONFIG_IKHEADERS_PROC) += kheaders.o 74obj-$(CONFIG_IKHEADERS) += kheaders.o
75obj-$(CONFIG_SMP) += stop_machine.o 75obj-$(CONFIG_SMP) += stop_machine.o
76obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o 76obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
77obj-$(CONFIG_AUDIT) += audit.o auditfilter.o 77obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
@@ -127,7 +127,7 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
127$(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz 127$(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
128 128
129quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz 129quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz
130cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_ikh_data.sh $@ 130cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@
131$(obj)/kheaders_data.tar.xz: FORCE 131$(obj)/kheaders_data.tar.xz: FORCE
132 $(call cmd,genikh) 132 $(call cmd,genikh)
133 133
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 426a0026225c..155048b0eca2 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1460,8 +1460,8 @@ struct cgroup *task_cgroup_from_root(struct task_struct *task,
1460 1460
1461static struct kernfs_syscall_ops cgroup_kf_syscall_ops; 1461static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1462 1462
1463static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, 1463static char *cgroup_fill_name(struct cgroup *cgrp, const struct cftype *cft,
1464 char *buf) 1464 char *buf, bool write_link_name)
1465{ 1465{
1466 struct cgroup_subsys *ss = cft->ss; 1466 struct cgroup_subsys *ss = cft->ss;
1467 1467
@@ -1471,13 +1471,26 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1471 1471
1472 snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s", 1472 snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s",
1473 dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, 1473 dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1474 cft->name); 1474 write_link_name ? cft->link_name : cft->name);
1475 } else { 1475 } else {
1476 strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX); 1476 strscpy(buf, write_link_name ? cft->link_name : cft->name,
1477 CGROUP_FILE_NAME_MAX);
1477 } 1478 }
1478 return buf; 1479 return buf;
1479} 1480}
1480 1481
1482static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1483 char *buf)
1484{
1485 return cgroup_fill_name(cgrp, cft, buf, false);
1486}
1487
1488static char *cgroup_link_name(struct cgroup *cgrp, const struct cftype *cft,
1489 char *buf)
1490{
1491 return cgroup_fill_name(cgrp, cft, buf, true);
1492}
1493
1481/** 1494/**
1482 * cgroup_file_mode - deduce file mode of a control file 1495 * cgroup_file_mode - deduce file mode of a control file
1483 * @cft: the control file in question 1496 * @cft: the control file in question
@@ -1636,6 +1649,9 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1636 } 1649 }
1637 1650
1638 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); 1651 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1652 if (cft->flags & CFTYPE_SYMLINKED)
1653 kernfs_remove_by_name(cgrp->kn,
1654 cgroup_link_name(cgrp, cft, name));
1639} 1655}
1640 1656
1641/** 1657/**
@@ -3821,6 +3837,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3821{ 3837{
3822 char name[CGROUP_FILE_NAME_MAX]; 3838 char name[CGROUP_FILE_NAME_MAX];
3823 struct kernfs_node *kn; 3839 struct kernfs_node *kn;
3840 struct kernfs_node *kn_link;
3824 struct lock_class_key *key = NULL; 3841 struct lock_class_key *key = NULL;
3825 int ret; 3842 int ret;
3826 3843
@@ -3851,6 +3868,14 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3851 spin_unlock_irq(&cgroup_file_kn_lock); 3868 spin_unlock_irq(&cgroup_file_kn_lock);
3852 } 3869 }
3853 3870
3871 if (cft->flags & CFTYPE_SYMLINKED) {
3872 kn_link = kernfs_create_link(cgrp->kn,
3873 cgroup_link_name(cgrp, cft, name),
3874 kn);
3875 if (IS_ERR(kn_link))
3876 return PTR_ERR(kn_link);
3877 }
3878
3854 return 0; 3879 return 0;
3855} 3880}
3856 3881
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f2ef10460698..077fde6fb953 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2061,7 +2061,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
2061 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2061 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2062} 2062}
2063 2063
2064static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) 2064int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2065{ 2065{
2066 int cpu, ret = 0; 2066 int cpu, ret = 0;
2067 2067
@@ -2093,7 +2093,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2093 return ret; 2093 return ret;
2094} 2094}
2095 2095
2096static int cpuhp_smt_enable(void) 2096int cpuhp_smt_enable(void)
2097{ 2097{
2098 int cpu, ret = 0; 2098 int cpu, ret = 0;
2099 2099
diff --git a/kernel/gen_ikh_data.sh b/kernel/gen_kheaders.sh
index 591a94f7b387..9a34e1d9bd7f 100755
--- a/kernel/gen_ikh_data.sh
+++ b/kernel/gen_kheaders.sh
@@ -2,7 +2,7 @@
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4# This script generates an archive consisting of kernel headers 4# This script generates an archive consisting of kernel headers
5# for CONFIG_IKHEADERS_PROC. 5# for CONFIG_IKHEADERS.
6set -e 6set -e
7spath="$(dirname "$(readlink -f "$0")")" 7spath="$(dirname "$(readlink -f "$0")")"
8kroot="$spath/.." 8kroot="$spath/.."
@@ -31,9 +31,8 @@ arch/$SRCARCH/include/
31 31
32# This block is useful for debugging the incremental builds. 32# This block is useful for debugging the incremental builds.
33# Uncomment it for debugging. 33# Uncomment it for debugging.
34# iter=1 34# if [ ! -f /tmp/iter ]; then iter=1; echo 1 > /tmp/iter;
35# if [ ! -f /tmp/iter ]; then echo 1 > /tmp/iter; 35# else iter=$(($(cat /tmp/iter) + 1)); echo $iter > /tmp/iter; fi
36# else; iter=$(($(cat /tmp/iter) + 1)); fi
37# find $src_file_list -type f | xargs ls -lR > /tmp/src-ls-$iter 36# find $src_file_list -type f | xargs ls -lR > /tmp/src-ls-$iter
38# find $obj_file_list -type f | xargs ls -lR > /tmp/obj-ls-$iter 37# find $obj_file_list -type f | xargs ls -lR > /tmp/obj-ls-$iter
39 38
@@ -43,10 +42,18 @@ arch/$SRCARCH/include/
43pushd $kroot > /dev/null 42pushd $kroot > /dev/null
44src_files_md5="$(find $src_file_list -type f | 43src_files_md5="$(find $src_file_list -type f |
45 grep -v "include/generated/compile.h" | 44 grep -v "include/generated/compile.h" |
45 grep -v "include/generated/autoconf.h" |
46 grep -v "include/config/auto.conf" |
47 grep -v "include/config/auto.conf.cmd" |
48 grep -v "include/config/tristate.conf" |
46 xargs ls -lR | md5sum | cut -d ' ' -f1)" 49 xargs ls -lR | md5sum | cut -d ' ' -f1)"
47popd > /dev/null 50popd > /dev/null
48obj_files_md5="$(find $obj_file_list -type f | 51obj_files_md5="$(find $obj_file_list -type f |
49 grep -v "include/generated/compile.h" | 52 grep -v "include/generated/compile.h" |
53 grep -v "include/generated/autoconf.h" |
54 grep -v "include/config/auto.conf" |
55 grep -v "include/config/auto.conf.cmd" |
56 grep -v "include/config/tristate.conf" |
50 xargs ls -lR | md5sum | cut -d ' ' -f1)" 57 xargs ls -lR | md5sum | cut -d ' ' -f1)"
51 58
52if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi 59if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi
@@ -82,7 +89,7 @@ find $cpio_dir -type f -print0 |
82 89
83tar -Jcf $tarfile -C $cpio_dir/ . > /dev/null 90tar -Jcf $tarfile -C $cpio_dir/ . > /dev/null
84 91
85echo "$src_files_md5" > kernel/kheaders.md5 92echo "$src_files_md5" > kernel/kheaders.md5
86echo "$obj_files_md5" >> kernel/kheaders.md5 93echo "$obj_files_md5" >> kernel/kheaders.md5
87echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5 94echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5
88 95
diff --git a/kernel/kheaders.c b/kernel/kheaders.c
index 70ae6052920d..8f69772af77b 100644
--- a/kernel/kheaders.c
+++ b/kernel/kheaders.c
@@ -8,9 +8,8 @@
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/proc_fs.h> 11#include <linux/kobject.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/uaccess.h>
14 13
15/* 14/*
16 * Define kernel_headers_data and kernel_headers_data_end, within which the 15 * Define kernel_headers_data and kernel_headers_data_end, within which the
@@ -31,39 +30,32 @@ extern char kernel_headers_data;
31extern char kernel_headers_data_end; 30extern char kernel_headers_data_end;
32 31
33static ssize_t 32static ssize_t
34ikheaders_read_current(struct file *file, char __user *buf, 33ikheaders_read(struct file *file, struct kobject *kobj,
35 size_t len, loff_t *offset) 34 struct bin_attribute *bin_attr,
35 char *buf, loff_t off, size_t len)
36{ 36{
37 return simple_read_from_buffer(buf, len, offset, 37 memcpy(buf, &kernel_headers_data + off, len);
38 &kernel_headers_data, 38 return len;
39 &kernel_headers_data_end -
40 &kernel_headers_data);
41} 39}
42 40
43static const struct file_operations ikheaders_file_ops = { 41static struct bin_attribute kheaders_attr __ro_after_init = {
44 .read = ikheaders_read_current, 42 .attr = {
45 .llseek = default_llseek, 43 .name = "kheaders.tar.xz",
44 .mode = 0444,
45 },
46 .read = &ikheaders_read,
46}; 47};
47 48
48static int __init ikheaders_init(void) 49static int __init ikheaders_init(void)
49{ 50{
50 struct proc_dir_entry *entry; 51 kheaders_attr.size = (&kernel_headers_data_end -
51 52 &kernel_headers_data);
52 /* create the current headers file */ 53 return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
53 entry = proc_create("kheaders.tar.xz", S_IRUGO, NULL,
54 &ikheaders_file_ops);
55 if (!entry)
56 return -ENOMEM;
57
58 proc_set_size(entry,
59 &kernel_headers_data_end -
60 &kernel_headers_data);
61 return 0;
62} 54}
63 55
64static void __exit ikheaders_cleanup(void) 56static void __exit ikheaders_cleanup(void)
65{ 57{
66 remove_proc_entry("kheaders.tar.xz", NULL); 58 sysfs_remove_bin_file(kernel_kobj, &kheaders_attr);
67} 59}
68 60
69module_init(ikheaders_init); 61module_init(ikheaders_init);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 8fc054e5c501..cd7434e6000d 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -256,6 +256,11 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
256 (kps % 1000) / 10); 256 (kps % 1000) / 10);
257} 257}
258 258
259__weak int arch_resume_nosmt(void)
260{
261 return 0;
262}
263
259/** 264/**
260 * create_image - Create a hibernation image. 265 * create_image - Create a hibernation image.
261 * @platform_mode: Whether or not to use the platform driver. 266 * @platform_mode: Whether or not to use the platform driver.
@@ -323,6 +328,10 @@ static int create_image(int platform_mode)
323 Enable_cpus: 328 Enable_cpus:
324 suspend_enable_secondary_cpus(); 329 suspend_enable_secondary_cpus();
325 330
331 /* Allow architectures to do nosmt-specific post-resume dances */
332 if (!in_suspend)
333 error = arch_resume_nosmt();
334
326 Platform_finish: 335 Platform_finish:
327 platform_finish(platform_mode); 336 platform_finish(platform_mode);
328 337
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 3c57e206f3e8..9505101ed2bc 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -61,6 +61,12 @@ static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
61enum s2idle_states __read_mostly s2idle_state; 61enum s2idle_states __read_mostly s2idle_state;
62static DEFINE_RAW_SPINLOCK(s2idle_lock); 62static DEFINE_RAW_SPINLOCK(s2idle_lock);
63 63
64/**
65 * pm_suspend_via_s2idle - Check if suspend-to-idle is the default suspend.
66 *
67 * Return 'true' if suspend-to-idle has been selected as the default system
68 * suspend method.
69 */
64bool pm_suspend_via_s2idle(void) 70bool pm_suspend_via_s2idle(void)
65{ 71{
66 return mem_sleep_current == PM_SUSPEND_TO_IDLE; 72 return mem_sleep_current == PM_SUSPEND_TO_IDLE;
diff --git a/kernel/signal.c b/kernel/signal.c
index 328a01e1a2f0..d622eac9d169 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3621,12 +3621,11 @@ static struct pid *pidfd_to_pid(const struct file *file)
3621} 3621}
3622 3622
3623/** 3623/**
3624 * sys_pidfd_send_signal - send a signal to a process through a task file 3624 * sys_pidfd_send_signal - Signal a process through a pidfd
3625 * descriptor 3625 * @pidfd: file descriptor of the process
3626 * @pidfd: the file descriptor of the process 3626 * @sig: signal to send
3627 * @sig: signal to be sent 3627 * @info: signal info
3628 * @info: the signal info 3628 * @flags: future flags
3629 * @flags: future flags to be passed
3630 * 3629 *
3631 * The syscall currently only signals via PIDTYPE_PID which covers 3630 * The syscall currently only signals via PIDTYPE_PID which covers
3632 * kill(<positive-pid>, <signal>. It does not signal threads or process 3631 * kill(<positive-pid>, <signal>. It does not signal threads or process
diff --git a/lib/lockref.c b/lib/lockref.c
index 3d468b53d4c9..5b34bbd3eba8 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -9,6 +9,7 @@
9 * failure case. 9 * failure case.
10 */ 10 */
11#define CMPXCHG_LOOP(CODE, SUCCESS) do { \ 11#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
12 int retry = 100; \
12 struct lockref old; \ 13 struct lockref old; \
13 BUILD_BUG_ON(sizeof(old) != 8); \ 14 BUILD_BUG_ON(sizeof(old) != 8); \
14 old.lock_count = READ_ONCE(lockref->lock_count); \ 15 old.lock_count = READ_ONCE(lockref->lock_count); \
@@ -21,6 +22,8 @@
21 if (likely(old.lock_count == prev.lock_count)) { \ 22 if (likely(old.lock_count == prev.lock_count)) { \
22 SUCCESS; \ 23 SUCCESS; \
23 } \ 24 } \
25 if (!--retry) \
26 break; \
24 cpu_relax(); \ 27 cpu_relax(); \
25 } \ 28 } \
26} while (0) 29} while (0)
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 787c146eb485..83ea6c4e623c 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -224,30 +224,30 @@ static ssize_t config_show(struct device *dev,
224 224
225 mutex_lock(&test_fw_mutex); 225 mutex_lock(&test_fw_mutex);
226 226
227 len += snprintf(buf, PAGE_SIZE, 227 len += scnprintf(buf, PAGE_SIZE - len,
228 "Custom trigger configuration for: %s\n", 228 "Custom trigger configuration for: %s\n",
229 dev_name(dev)); 229 dev_name(dev));
230 230
231 if (test_fw_config->name) 231 if (test_fw_config->name)
232 len += snprintf(buf+len, PAGE_SIZE, 232 len += scnprintf(buf+len, PAGE_SIZE - len,
233 "name:\t%s\n", 233 "name:\t%s\n",
234 test_fw_config->name); 234 test_fw_config->name);
235 else 235 else
236 len += snprintf(buf+len, PAGE_SIZE, 236 len += scnprintf(buf+len, PAGE_SIZE - len,
237 "name:\tEMTPY\n"); 237 "name:\tEMTPY\n");
238 238
239 len += snprintf(buf+len, PAGE_SIZE, 239 len += scnprintf(buf+len, PAGE_SIZE - len,
240 "num_requests:\t%u\n", test_fw_config->num_requests); 240 "num_requests:\t%u\n", test_fw_config->num_requests);
241 241
242 len += snprintf(buf+len, PAGE_SIZE, 242 len += scnprintf(buf+len, PAGE_SIZE - len,
243 "send_uevent:\t\t%s\n", 243 "send_uevent:\t\t%s\n",
244 test_fw_config->send_uevent ? 244 test_fw_config->send_uevent ?
245 "FW_ACTION_HOTPLUG" : 245 "FW_ACTION_HOTPLUG" :
246 "FW_ACTION_NOHOTPLUG"); 246 "FW_ACTION_NOHOTPLUG");
247 len += snprintf(buf+len, PAGE_SIZE, 247 len += scnprintf(buf+len, PAGE_SIZE - len,
248 "sync_direct:\t\t%s\n", 248 "sync_direct:\t\t%s\n",
249 test_fw_config->sync_direct ? "true" : "false"); 249 test_fw_config->sync_direct ? "true" : "false");
250 len += snprintf(buf+len, PAGE_SIZE, 250 len += scnprintf(buf+len, PAGE_SIZE - len,
251 "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx); 251 "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
252 252
253 mutex_unlock(&test_fw_mutex); 253 mutex_unlock(&test_fw_mutex);
diff --git a/net/core/dev.c b/net/core/dev.c
index 140858d4a048..eb7fb6daa1ef 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5021,12 +5021,12 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5021 if (list_empty(head)) 5021 if (list_empty(head))
5022 return; 5022 return;
5023 if (pt_prev->list_func != NULL) 5023 if (pt_prev->list_func != NULL)
5024 pt_prev->list_func(head, pt_prev, orig_dev); 5024 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5025 ip_list_rcv, head, pt_prev, orig_dev);
5025 else 5026 else
5026 list_for_each_entry_safe(skb, next, head, list) { 5027 list_for_each_entry_safe(skb, next, head, list) {
5027 skb_list_del_init(skb); 5028 skb_list_del_init(skb);
5028 INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5029 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5029 skb->dev, pt_prev, orig_dev);
5030 } 5030 }
5031} 5031}
5032 5032
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6dadeff8d39a..d08b1e19ce9c 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1355,13 +1355,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1355 if (!regbuf) 1355 if (!regbuf)
1356 return -ENOMEM; 1356 return -ENOMEM;
1357 1357
1358 if (regs.len < reglen)
1359 reglen = regs.len;
1360
1358 ops->get_regs(dev, &regs, regbuf); 1361 ops->get_regs(dev, &regs, regbuf);
1359 1362
1360 ret = -EFAULT; 1363 ret = -EFAULT;
1361 if (copy_to_user(useraddr, &regs, sizeof(regs))) 1364 if (copy_to_user(useraddr, &regs, sizeof(regs)))
1362 goto out; 1365 goto out;
1363 useraddr += offsetof(struct ethtool_regs, data); 1366 useraddr += offsetof(struct ethtool_regs, data);
1364 if (regbuf && copy_to_user(useraddr, regbuf, regs.len)) 1367 if (copy_to_user(useraddr, regbuf, reglen))
1365 goto out; 1368 goto out;
1366 ret = 0; 1369 ret = 0;
1367 1370
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1d1a2483097d..dd220ce7ca7a 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -754,9 +754,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
754 if (err) 754 if (err)
755 goto errout; 755 goto errout;
756 756
757 if (rule_exists(ops, frh, tb, rule)) { 757 if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
758 if (nlh->nlmsg_flags & NLM_F_EXCL) 758 rule_exists(ops, frh, tb, rule)) {
759 err = -EEXIST; 759 err = -EEXIST;
760 goto errout_free; 760 goto errout_free;
761 } 761 }
762 762
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 99ddc69736b2..f975c5e2a369 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3059,7 +3059,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
3059{ 3059{
3060 while (thread_is_running(t)) { 3060 while (thread_is_running(t)) {
3061 3061
3062 /* note: 't' will still be around even after the unlock/lock
3063 * cycle because pktgen_thread threads are only cleared at
3064 * net exit
3065 */
3066 mutex_unlock(&pktgen_thread_lock);
3062 msleep_interruptible(100); 3067 msleep_interruptible(100);
3068 mutex_lock(&pktgen_thread_lock);
3063 3069
3064 if (signal_pending(current)) 3070 if (signal_pending(current))
3065 goto signal; 3071 goto signal;
@@ -3074,6 +3080,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
3074 struct pktgen_thread *t; 3080 struct pktgen_thread *t;
3075 int sig = 1; 3081 int sig = 1;
3076 3082
3083 /* prevent from racing with rmmod */
3084 if (!try_module_get(THIS_MODULE))
3085 return sig;
3086
3077 mutex_lock(&pktgen_thread_lock); 3087 mutex_lock(&pktgen_thread_lock);
3078 3088
3079 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 3089 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
@@ -3087,6 +3097,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
3087 t->control |= (T_STOP); 3097 t->control |= (T_STOP);
3088 3098
3089 mutex_unlock(&pktgen_thread_lock); 3099 mutex_unlock(&pktgen_thread_lock);
3100 module_put(THIS_MODULE);
3090 return sig; 3101 return sig;
3091} 3102}
3092 3103
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 969402c7dbf1..d43737e6c3fb 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -28,14 +28,10 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb)
28 */ 28 */
29static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev) 29static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
30{ 30{
31 if (sja1105_is_link_local(skb)) { 31 if (sja1105_is_link_local(skb))
32 SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_LINK_LOCAL;
33 return true; 32 return true;
34 } 33 if (!dsa_port_is_vlan_filtering(dev->dsa_ptr))
35 if (!dsa_port_is_vlan_filtering(dev->dsa_ptr)) {
36 SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_NORMAL;
37 return true; 34 return true;
38 }
39 return false; 35 return false;
40} 36}
41 37
@@ -84,7 +80,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
84 80
85 skb->offload_fwd_mark = 1; 81 skb->offload_fwd_mark = 1;
86 82
87 if (SJA1105_SKB_CB(skb)->type == SJA1105_FRAME_TYPE_LINK_LOCAL) { 83 if (sja1105_is_link_local(skb)) {
88 /* Management traffic path. Switch embeds the switch ID and 84 /* Management traffic path. Switch embeds the switch ID and
89 * port ID into bytes of the destination MAC, courtesy of 85 * port ID into bytes of the destination MAC, courtesy of
90 * the incl_srcpt options. 86 * the incl_srcpt options.
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cee640281e02..6cb7cff22db9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1981,7 +1981,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1981 u32 itag = 0; 1981 u32 itag = 0;
1982 struct rtable *rth; 1982 struct rtable *rth;
1983 struct flowi4 fl4; 1983 struct flowi4 fl4;
1984 bool do_cache; 1984 bool do_cache = true;
1985 1985
1986 /* IP on this device is disabled. */ 1986 /* IP on this device is disabled. */
1987 1987
@@ -2058,6 +2058,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2058 if (res->type == RTN_BROADCAST) { 2058 if (res->type == RTN_BROADCAST) {
2059 if (IN_DEV_BFORWARD(in_dev)) 2059 if (IN_DEV_BFORWARD(in_dev))
2060 goto make_route; 2060 goto make_route;
2061 /* not do cache if bc_forwarding is enabled */
2062 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2063 do_cache = false;
2061 goto brd_input; 2064 goto brd_input;
2062 } 2065 }
2063 2066
@@ -2095,18 +2098,15 @@ brd_input:
2095 RT_CACHE_STAT_INC(in_brd); 2098 RT_CACHE_STAT_INC(in_brd);
2096 2099
2097local_input: 2100local_input:
2098 do_cache = false; 2101 do_cache &= res->fi && !itag;
2099 if (res->fi) { 2102 if (do_cache) {
2100 if (!itag) { 2103 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2101 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2102 2104
2103 rth = rcu_dereference(nhc->nhc_rth_input); 2105 rth = rcu_dereference(nhc->nhc_rth_input);
2104 if (rt_cache_valid(rth)) { 2106 if (rt_cache_valid(rth)) {
2105 skb_dst_set_noref(skb, &rth->dst); 2107 skb_dst_set_noref(skb, &rth->dst);
2106 err = 0; 2108 err = 0;
2107 goto out; 2109 goto out;
2108 }
2109 do_cache = true;
2110 } 2110 }
2111 } 2111 }
2112 2112
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 189144346cd4..7c6228fbf5dd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -533,8 +533,7 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
533 (inet->inet_dport != rmt_port && inet->inet_dport) || 533 (inet->inet_dport != rmt_port && inet->inet_dport) ||
534 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || 534 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
535 ipv6_only_sock(sk) || 535 ipv6_only_sock(sk) ||
536 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && 536 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
537 sk->sk_bound_dev_if != sdif))
538 return false; 537 return false;
539 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif)) 538 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
540 return false; 539 return false;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 703c8387f102..70693bc7ad9d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -779,6 +779,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
779 struct flowi6 fl6; 779 struct flowi6 fl6;
780 struct ipcm6_cookie ipc6; 780 struct ipcm6_cookie ipc6;
781 int addr_len = msg->msg_namelen; 781 int addr_len = msg->msg_namelen;
782 int hdrincl;
782 u16 proto; 783 u16 proto;
783 int err; 784 int err;
784 785
@@ -792,6 +793,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
792 if (msg->msg_flags & MSG_OOB) 793 if (msg->msg_flags & MSG_OOB)
793 return -EOPNOTSUPP; 794 return -EOPNOTSUPP;
794 795
796 /* hdrincl should be READ_ONCE(inet->hdrincl)
797 * but READ_ONCE() doesn't work with bit fields.
798 * Doing this indirectly yields the same result.
799 */
800 hdrincl = inet->hdrincl;
801 hdrincl = READ_ONCE(hdrincl);
802
795 /* 803 /*
796 * Get and verify the address. 804 * Get and verify the address.
797 */ 805 */
@@ -883,11 +891,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
883 opt = ipv6_fixup_options(&opt_space, opt); 891 opt = ipv6_fixup_options(&opt_space, opt);
884 892
885 fl6.flowi6_proto = proto; 893 fl6.flowi6_proto = proto;
886 rfv.msg = msg; 894
887 rfv.hlen = 0; 895 if (!hdrincl) {
888 err = rawv6_probe_proto_opt(&rfv, &fl6); 896 rfv.msg = msg;
889 if (err) 897 rfv.hlen = 0;
890 goto out; 898 err = rawv6_probe_proto_opt(&rfv, &fl6);
899 if (err)
900 goto out;
901 }
891 902
892 if (!ipv6_addr_any(daddr)) 903 if (!ipv6_addr_any(daddr))
893 fl6.daddr = *daddr; 904 fl6.daddr = *daddr;
@@ -904,7 +915,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
904 fl6.flowi6_oif = np->ucast_oif; 915 fl6.flowi6_oif = np->ucast_oif;
905 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 916 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
906 917
907 if (inet->hdrincl) 918 if (hdrincl)
908 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; 919 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
909 920
910 if (ipc6.tclass < 0) 921 if (ipc6.tclass < 0)
@@ -927,7 +938,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
927 goto do_confirm; 938 goto do_confirm;
928 939
929back_from_confirm: 940back_from_confirm:
930 if (inet->hdrincl) 941 if (hdrincl)
931 err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, 942 err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
932 msg->msg_flags, &ipc6.sockc); 943 msg->msg_flags, &ipc6.sockc);
933 else { 944 else {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fc012e801459..a29d66da7394 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3008,8 +3008,8 @@ static int packet_release(struct socket *sock)
3008 3008
3009 synchronize_net(); 3009 synchronize_net();
3010 3010
3011 kfree(po->rollover);
3011 if (f) { 3012 if (f) {
3012 kfree(po->rollover);
3013 fanout_release_data(f); 3013 fanout_release_data(f);
3014 kfree(f); 3014 kfree(f);
3015 } 3015 }
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 2da9b75bad16..b8d581b779b2 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -87,7 +87,7 @@ static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
87 87
88 spin_lock_irqsave(&rds_ibdev->spinlock, flags); 88 spin_lock_irqsave(&rds_ibdev->spinlock, flags);
89 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) 89 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
90 rds_conn_drop(ic->conn); 90 rds_conn_path_drop(&ic->conn->c_path[0], true);
91 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags); 91 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
92} 92}
93 93
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index d664e9ade74d..0b347f46b2f4 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -428,12 +428,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
428 wait_clean_list_grace(); 428 wait_clean_list_grace();
429 429
430 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); 430 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
431 if (ibmr_ret) 431 if (ibmr_ret) {
432 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); 432 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
433 433 clean_nodes = clean_nodes->next;
434 }
434 /* more than one entry in llist nodes */ 435 /* more than one entry in llist nodes */
435 if (clean_nodes->next) 436 if (clean_nodes)
436 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); 437 llist_add_batch(clean_nodes, clean_tail,
438 &pool->clean_list);
437 439
438 } 440 }
439 441
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8946c89d7392..3cae88cbdaa0 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -168,6 +168,7 @@ void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
168 list_del(&inc->ii_cache_entry); 168 list_del(&inc->ii_cache_entry);
169 WARN_ON(!list_empty(&inc->ii_frags)); 169 WARN_ON(!list_empty(&inc->ii_frags));
170 kmem_cache_free(rds_ib_incoming_slab, inc); 170 kmem_cache_free(rds_ib_incoming_slab, inc);
171 atomic_dec(&rds_ib_allocation);
171 } 172 }
172 173
173 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); 174 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
@@ -1057,6 +1058,8 @@ out:
1057 1058
1058void rds_ib_recv_exit(void) 1059void rds_ib_recv_exit(void)
1059{ 1060{
1061 WARN_ON(atomic_read(&rds_ib_allocation));
1062
1060 kmem_cache_destroy(rds_ib_incoming_slab); 1063 kmem_cache_destroy(rds_ib_incoming_slab);
1061 kmem_cache_destroy(rds_ib_frag_slab); 1064 kmem_cache_destroy(rds_ib_frag_slab);
1062} 1065}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 92331e1195c1..f17908f5c4f3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2312,7 +2312,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2312 union sctp_addr addr; 2312 union sctp_addr addr;
2313 struct sctp_af *af; 2313 struct sctp_af *af;
2314 int src_match = 0; 2314 int src_match = 0;
2315 char *cookie;
2316 2315
2317 /* We must include the address that the INIT packet came from. 2316 /* We must include the address that the INIT packet came from.
2318 * This is the only address that matters for an INIT packet. 2317 * This is the only address that matters for an INIT packet.
@@ -2416,14 +2415,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2416 /* Peer Rwnd : Current calculated value of the peer's rwnd. */ 2415 /* Peer Rwnd : Current calculated value of the peer's rwnd. */
2417 asoc->peer.rwnd = asoc->peer.i.a_rwnd; 2416 asoc->peer.rwnd = asoc->peer.i.a_rwnd;
2418 2417
2419 /* Copy cookie in case we need to resend COOKIE-ECHO. */
2420 cookie = asoc->peer.cookie;
2421 if (cookie) {
2422 asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
2423 if (!asoc->peer.cookie)
2424 goto clean_up;
2425 }
2426
2427 /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily 2418 /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
2428 * high (for example, implementations MAY use the size of the receiver 2419 * high (for example, implementations MAY use the size of the receiver
2429 * advertised window). 2420 * advertised window).
@@ -2592,7 +2583,9 @@ do_addr_param:
2592 case SCTP_PARAM_STATE_COOKIE: 2583 case SCTP_PARAM_STATE_COOKIE:
2593 asoc->peer.cookie_len = 2584 asoc->peer.cookie_len =
2594 ntohs(param.p->length) - sizeof(struct sctp_paramhdr); 2585 ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
2595 asoc->peer.cookie = param.cookie->body; 2586 asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
2587 if (!asoc->peer.cookie)
2588 retval = 0;
2596 break; 2589 break;
2597 2590
2598 case SCTP_PARAM_HEARTBEAT_INFO: 2591 case SCTP_PARAM_HEARTBEAT_INFO:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9b50da548db2..a554d6d15d1b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -883,6 +883,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
883 asoc->rto_initial; 883 asoc->rto_initial;
884 } 884 }
885 885
886 if (sctp_state(asoc, ESTABLISHED)) {
887 kfree(asoc->peer.cookie);
888 asoc->peer.cookie = NULL;
889 }
890
886 if (sctp_state(asoc, ESTABLISHED) || 891 if (sctp_state(asoc, ESTABLISHED) ||
887 sctp_state(asoc, CLOSED) || 892 sctp_state(asoc, CLOSED) ||
888 sctp_state(asoc, SHUTDOWN_RECEIVED)) { 893 sctp_state(asoc, SHUTDOWN_RECEIVED)) {
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d6e57da56c94..627a87a71f8b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2288,13 +2288,13 @@ call_status(struct rpc_task *task)
2288 case -ECONNREFUSED: 2288 case -ECONNREFUSED:
2289 case -ECONNRESET: 2289 case -ECONNRESET:
2290 case -ECONNABORTED: 2290 case -ECONNABORTED:
2291 case -ENOTCONN:
2291 rpc_force_rebind(clnt); 2292 rpc_force_rebind(clnt);
2292 /* fall through */ 2293 /* fall through */
2293 case -EADDRINUSE: 2294 case -EADDRINUSE:
2294 rpc_delay(task, 3*HZ); 2295 rpc_delay(task, 3*HZ);
2295 /* fall through */ 2296 /* fall through */
2296 case -EPIPE: 2297 case -EPIPE:
2297 case -ENOTCONN:
2298 case -EAGAIN: 2298 case -EAGAIN:
2299 break; 2299 break;
2300 case -EIO: 2300 case -EIO:
@@ -2426,17 +2426,21 @@ call_decode(struct rpc_task *task)
2426 return; 2426 return;
2427 case -EAGAIN: 2427 case -EAGAIN:
2428 task->tk_status = 0; 2428 task->tk_status = 0;
2429 /* Note: rpc_decode_header() may have freed the RPC slot */ 2429 xdr_free_bvec(&req->rq_rcv_buf);
2430 if (task->tk_rqstp == req) { 2430 req->rq_reply_bytes_recvd = 0;
2431 xdr_free_bvec(&req->rq_rcv_buf); 2431 req->rq_rcv_buf.len = 0;
2432 req->rq_reply_bytes_recvd = 0; 2432 if (task->tk_client->cl_discrtry)
2433 req->rq_rcv_buf.len = 0; 2433 xprt_conditional_disconnect(req->rq_xprt,
2434 if (task->tk_client->cl_discrtry) 2434 req->rq_connect_cookie);
2435 xprt_conditional_disconnect(req->rq_xprt,
2436 req->rq_connect_cookie);
2437 }
2438 task->tk_action = call_encode; 2435 task->tk_action = call_encode;
2439 rpc_check_timeout(task); 2436 rpc_check_timeout(task);
2437 break;
2438 case -EKEYREJECTED:
2439 task->tk_action = call_reserve;
2440 rpc_check_timeout(task);
2441 rpcauth_invalcred(task);
2442 /* Ensure we obtain a new XID if we retry! */
2443 xprt_release(task);
2440 } 2444 }
2441} 2445}
2442 2446
@@ -2572,11 +2576,7 @@ out_msg_denied:
2572 break; 2576 break;
2573 task->tk_cred_retry--; 2577 task->tk_cred_retry--;
2574 trace_rpc__stale_creds(task); 2578 trace_rpc__stale_creds(task);
2575 rpcauth_invalcred(task); 2579 return -EKEYREJECTED;
2576 /* Ensure we obtain a new XID! */
2577 xprt_release(task);
2578 task->tk_action = call_reserve;
2579 return -EAGAIN;
2580 case rpc_autherr_badcred: 2580 case rpc_autherr_badcred:
2581 case rpc_autherr_badverf: 2581 case rpc_autherr_badverf:
2582 /* possibly garbled cred/verf? */ 2582 /* possibly garbled cred/verf? */
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index bef5eac8ab38..84bb37924540 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -810,8 +810,7 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
810{ 810{
811 struct rpcrdma_sendctx *sc; 811 struct rpcrdma_sendctx *sc;
812 812
813 sc = kzalloc(sizeof(*sc) + 813 sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges),
814 ia->ri_max_send_sges * sizeof(struct ib_sge),
815 GFP_KERNEL); 814 GFP_KERNEL);
816 if (!sc) 815 if (!sc)
817 return NULL; 816 return NULL;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b95c408fd771..1f9cf57d9754 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -550,11 +550,23 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
550 } 550 }
551} 551}
552 552
553static void tls_device_resync_rx(struct tls_context *tls_ctx,
554 struct sock *sk, u32 seq, u64 rcd_sn)
555{
556 struct net_device *netdev;
557
558 if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
559 return;
560 netdev = READ_ONCE(tls_ctx->netdev);
561 if (netdev)
562 netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
563 clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
564}
565
553void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) 566void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
554{ 567{
555 struct tls_context *tls_ctx = tls_get_ctx(sk); 568 struct tls_context *tls_ctx = tls_get_ctx(sk);
556 struct tls_offload_context_rx *rx_ctx; 569 struct tls_offload_context_rx *rx_ctx;
557 struct net_device *netdev;
558 u32 is_req_pending; 570 u32 is_req_pending;
559 s64 resync_req; 571 s64 resync_req;
560 u32 req_seq; 572 u32 req_seq;
@@ -570,12 +582,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
570 if (unlikely(is_req_pending) && req_seq == seq && 582 if (unlikely(is_req_pending) && req_seq == seq &&
571 atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) { 583 atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
572 seq += TLS_HEADER_SIZE - 1; 584 seq += TLS_HEADER_SIZE - 1;
573 down_read(&device_offload_lock); 585 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
574 netdev = tls_ctx->netdev;
575 if (netdev)
576 netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq,
577 rcd_sn);
578 up_read(&device_offload_lock);
579 } 586 }
580} 587}
581 588
@@ -977,7 +984,10 @@ static int tls_device_down(struct net_device *netdev)
977 if (ctx->rx_conf == TLS_HW) 984 if (ctx->rx_conf == TLS_HW)
978 netdev->tlsdev_ops->tls_dev_del(netdev, ctx, 985 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
979 TLS_OFFLOAD_CTX_DIR_RX); 986 TLS_OFFLOAD_CTX_DIR_RX);
980 ctx->netdev = NULL; 987 WRITE_ONCE(ctx->netdev, NULL);
988 smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
989 while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
990 usleep_range(10, 200);
981 dev_put(netdev); 991 dev_put(netdev);
982 list_del_init(&ctx->list); 992 list_del_init(&ctx->list);
983 993
diff --git a/samples/pidfd/pidfd-metadata.c b/samples/pidfd/pidfd-metadata.c
index 640f5f757c57..14b454448429 100644
--- a/samples/pidfd/pidfd-metadata.c
+++ b/samples/pidfd/pidfd-metadata.c
@@ -21,6 +21,10 @@
21#define CLONE_PIDFD 0x00001000 21#define CLONE_PIDFD 0x00001000
22#endif 22#endif
23 23
24#ifndef __NR_pidfd_send_signal
25#define __NR_pidfd_send_signal -1
26#endif
27
24static int do_child(void *args) 28static int do_child(void *args)
25{ 29{
26 printf("%d\n", getpid()); 30 printf("%d\n", getpid());
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 85d758233483..f641bb0aa63f 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -74,8 +74,13 @@ endef
74# Usage: CROSS_COMPILE := $(call cc-cross-prefix, m68k-linux-gnu- m68k-linux-) 74# Usage: CROSS_COMPILE := $(call cc-cross-prefix, m68k-linux-gnu- m68k-linux-)
75# Return first <prefix> where a <prefix>gcc is found in PATH. 75# Return first <prefix> where a <prefix>gcc is found in PATH.
76# If no gcc found in PATH with listed prefixes return nothing 76# If no gcc found in PATH with listed prefixes return nothing
77#
78# Note: '2>/dev/null' is here to force Make to invoke a shell. Otherwise, it
79# would try to directly execute the shell builtin 'command'. This workaround
80# should be kept for a long time since this issue was fixed only after the
81# GNU Make 4.2.1 release.
77cc-cross-prefix = $(firstword $(foreach c, $(filter-out -%, $(1)), \ 82cc-cross-prefix = $(firstword $(foreach c, $(filter-out -%, $(1)), \
78 $(if $(shell which $(c)gcc), $(c)))) 83 $(if $(shell command -v $(c)gcc 2>/dev/null), $(c))))
79 84
80# output directory for tests below 85# output directory for tests below
81TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) 86TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 122aef5e4e14..371bd17a4983 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -46,7 +46,7 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
46 $x = "[0-9a-f]"; # hex character 46 $x = "[0-9a-f]"; # hex character
47 $xs = "[0-9a-f ]"; # hex character or space 47 $xs = "[0-9a-f ]"; # hex character or space
48 $funcre = qr/^$x* <(.*)>:$/; 48 $funcre = qr/^$x* <(.*)>:$/;
49 if ($arch eq 'aarch64') { 49 if ($arch =~ '^(aarch|arm)64$') {
50 #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]! 50 #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
51 #a110: d11643ff sub sp, sp, #0x590 51 #a110: d11643ff sub sp, sp, #0x590
52 $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o; 52 $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
diff --git a/scripts/kconfig/tests/err_recursive_inc/expected_stderr b/scripts/kconfig/tests/err_recursive_inc/expected_stderr
index 6b582eee2176..b070a31fdfeb 100644
--- a/scripts/kconfig/tests/err_recursive_inc/expected_stderr
+++ b/scripts/kconfig/tests/err_recursive_inc/expected_stderr
@@ -1,6 +1,6 @@
1Recursive inclusion detected. 1Recursive inclusion detected.
2Inclusion path: 2Inclusion path:
3 current file : Kconfig.inc1 3 current file : Kconfig.inc1
4 included from: Kconfig.inc3:1 4 included from: Kconfig.inc3:2
5 included from: Kconfig.inc2:3 5 included from: Kconfig.inc2:4
6 included from: Kconfig.inc1:4 6 included from: Kconfig.inc1:5
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 27b42d5b6c4f..ca7f46b562a4 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -104,7 +104,7 @@ clean-dirs += $(objtree)/snap/
104# --------------------------------------------------------------------------- 104# ---------------------------------------------------------------------------
105tar%pkg: FORCE 105tar%pkg: FORCE
106 $(MAKE) -f $(srctree)/Makefile 106 $(MAKE) -f $(srctree)/Makefile
107 $(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@ 107 +$(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@
108 108
109clean-dirs += $(objtree)/tar-install/ 109clean-dirs += $(objtree)/tar-install/
110 110
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index be59f9c34ea2..79053a4f4783 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -198,7 +198,7 @@ static int test_cgcore_no_internal_process_constraint_on_threads(const char *roo
198 char *parent = NULL, *child = NULL; 198 char *parent = NULL, *child = NULL;
199 199
200 if (cg_read_strstr(root, "cgroup.controllers", "cpu") || 200 if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
201 cg_read_strstr(root, "cgroup.subtree_control", "cpu")) { 201 cg_write(root, "cgroup.subtree_control", "+cpu")) {
202 ret = KSFT_SKIP; 202 ret = KSFT_SKIP;
203 goto cleanup; 203 goto cleanup;
204 } 204 }
@@ -376,6 +376,11 @@ int main(int argc, char *argv[])
376 376
377 if (cg_find_unified_root(root, sizeof(root))) 377 if (cg_find_unified_root(root, sizeof(root)))
378 ksft_exit_skip("cgroup v2 isn't mounted\n"); 378 ksft_exit_skip("cgroup v2 isn't mounted\n");
379
380 if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
381 if (cg_write(root, "cgroup.subtree_control", "+memory"))
382 ksft_exit_skip("Failed to set memory controller\n");
383
379 for (i = 0; i < ARRAY_SIZE(tests); i++) { 384 for (i = 0; i < ARRAY_SIZE(tests); i++) {
380 switch (tests[i].fn(root)) { 385 switch (tests[i].fn(root)) {
381 case KSFT_PASS: 386 case KSFT_PASS:
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index 6f339882a6ca..c19a97dd02d4 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -1205,6 +1205,10 @@ int main(int argc, char **argv)
1205 if (cg_read_strstr(root, "cgroup.controllers", "memory")) 1205 if (cg_read_strstr(root, "cgroup.controllers", "memory"))
1206 ksft_exit_skip("memory controller isn't available\n"); 1206 ksft_exit_skip("memory controller isn't available\n");
1207 1207
1208 if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
1209 if (cg_write(root, "cgroup.subtree_control", "+memory"))
1210 ksft_exit_skip("Failed to set memory controller\n");
1211
1208 for (i = 0; i < ARRAY_SIZE(tests); i++) { 1212 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1209 switch (tests[i].fn(root)) { 1213 switch (tests[i].fn(root)) {
1210 case KSFT_PASS: 1214 case KSFT_PASS:
diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh
index 9a678ece32b4..4eac0a06f451 100755
--- a/tools/testing/selftests/net/forwarding/router_broadcast.sh
+++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh
@@ -145,16 +145,19 @@ bc_forwarding_disable()
145{ 145{
146 sysctl_set net.ipv4.conf.all.bc_forwarding 0 146 sysctl_set net.ipv4.conf.all.bc_forwarding 0
147 sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0 147 sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0
148 sysctl_set net.ipv4.conf.$rp2.bc_forwarding 0
148} 149}
149 150
150bc_forwarding_enable() 151bc_forwarding_enable()
151{ 152{
152 sysctl_set net.ipv4.conf.all.bc_forwarding 1 153 sysctl_set net.ipv4.conf.all.bc_forwarding 1
153 sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1 154 sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1
155 sysctl_set net.ipv4.conf.$rp2.bc_forwarding 1
154} 156}
155 157
156bc_forwarding_restore() 158bc_forwarding_restore()
157{ 159{
160 sysctl_restore net.ipv4.conf.$rp2.bc_forwarding
158 sysctl_restore net.ipv4.conf.$rp1.bc_forwarding 161 sysctl_restore net.ipv4.conf.$rp1.bc_forwarding
159 sysctl_restore net.ipv4.conf.all.bc_forwarding 162 sysctl_restore net.ipv4.conf.all.bc_forwarding
160} 163}
@@ -171,7 +174,7 @@ ping_test_from()
171 log_info "ping $dip, expected reply from $from" 174 log_info "ping $dip, expected reply from $from"
172 ip vrf exec $(master_name_get $oif) \ 175 ip vrf exec $(master_name_get $oif) \
173 $PING -I $oif $dip -c 10 -i 0.1 -w $PING_TIMEOUT -b 2>&1 \ 176 $PING -I $oif $dip -c 10 -i 0.1 -w $PING_TIMEOUT -b 2>&1 \
174 | grep $from &> /dev/null 177 | grep "bytes from $from" > /dev/null
175 check_err_fail $fail $? 178 check_err_fail $fail $?
176} 179}
177 180
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index 5bae1792e3d6..104c75a33882 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -16,6 +16,10 @@
16 16
17#include "../kselftest.h" 17#include "../kselftest.h"
18 18
19#ifndef __NR_pidfd_send_signal
20#define __NR_pidfd_send_signal -1
21#endif
22
19static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info, 23static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
20 unsigned int flags) 24 unsigned int flags)
21{ 25{
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index e13eb6cc8901..9534dc2bc929 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,10 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for vm selftests 2# Makefile for vm selftests
3 3
4ifndef OUTPUT
5 OUTPUT := $(shell pwd)
6endif
7
8CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) 4CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
9LDLIBS = -lrt 5LDLIBS = -lrt
10TEST_GEN_FILES = compaction_test 6TEST_GEN_FILES = compaction_test
@@ -25,6 +21,8 @@ TEST_GEN_FILES += virtual_address_range
25 21
26TEST_PROGS := run_vmtests 22TEST_PROGS := run_vmtests
27 23
24TEST_FILES := test_vmalloc.sh
25
28KSFT_KHDR_INSTALL := 1 26KSFT_KHDR_INSTALL := 1
29include ../lib.mk 27include ../lib.mk
30 28
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 5d1db824f73a..b3e6497b080c 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -123,7 +123,7 @@ static void usage(void)
123 fprintf(stderr, "Supported <test type>: anon, hugetlb, " 123 fprintf(stderr, "Supported <test type>: anon, hugetlb, "
124 "hugetlb_shared, shmem\n\n"); 124 "hugetlb_shared, shmem\n\n");
125 fprintf(stderr, "Examples:\n\n"); 125 fprintf(stderr, "Examples:\n\n");
126 fprintf(stderr, examples); 126 fprintf(stderr, "%s", examples);
127 exit(1); 127 exit(1);
128} 128}
129 129
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 7ef45a4a3cba..6683b4a70b05 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -127,7 +127,7 @@ static inline void free_page(unsigned long addr)
127#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) 127#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
128#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) 128#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
129 129
130#define WARN_ON_ONCE(cond) ((cond) ? fprintf (stderr, "WARNING\n") : 0) 130#define WARN_ON_ONCE(cond) (unlikely(cond) ? fprintf (stderr, "WARNING\n") : 0)
131 131
132#define min(x, y) ({ \ 132#define min(x, y) ({ \
133 typeof(x) _min1 = (x); \ 133 typeof(x) _min1 = (x); \