summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-04-05 17:14:19 -0400
committerDavid S. Miller <davem@davemloft.net>2019-04-05 17:14:19 -0400
commitf83f7151950dd9e0f6b4a1a405bf5e55c5294e4d (patch)
treef8d9d8ee821fcc9f0a8e1a8679bc622219c70e3b
parent8f4043f1253292495dbf9c8be0c1b07b4b9902b7 (diff)
parent7f46774c6480174eb869a3c15167eafac467a6af (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor comment merge conflict in mlx5. Staging driver has a fixup due to the skb->xmit_more changes in 'net-next', but was removed in 'net'. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.mailmap4
-rw-r--r--Documentation/bpf/btf.rst8
-rw-r--r--Documentation/devicetree/bindings/hwmon/adc128d818.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-xscale.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-mtk.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-stu300.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-wmt.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-vt8500.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/mtk-uart.txt1
-rw-r--r--Documentation/filesystems/mount_api.txt367
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/networking/bpf_flow_dissector.rst126
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/virtual/kvm/api.txt77
-rw-r--r--Documentation/virtual/kvm/mmu.txt11
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile49
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6ull-pinfunc-snvs.h2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts9
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig3
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h11
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h2
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c27
-rw-r--r--arch/arm/mach-imx/mach-imx51.c1
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi7
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h11
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/kvm/reset.c6
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/lib/memcmp_64.S17
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c27
-rw-r--r--arch/powerpc/platforms/pseries/ras.c1
-rw-r--r--arch/riscv/include/asm/fixmap.h2
-rw-r--r--arch/riscv/include/asm/uaccess.h2
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/module.c2
-rw-r--r--arch/riscv/kernel/setup.c8
-rw-r--r--arch/riscv/mm/Makefile6
-rw-r--r--arch/riscv/mm/init.c28
-rw-r--r--arch/s390/include/asm/ap.h11
-rw-r--r--arch/s390/include/asm/elf.h11
-rw-r--r--arch/s390/include/asm/lowcore.h61
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c19
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/vtime.c19
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/uapi/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/compressed/misc.h4
-rw-r--r--arch/x86/include/asm/cpufeature.h5
-rw-r--r--arch/x86/include/asm/kvm_host.h10
-rw-r--r--arch/x86/include/asm/realmode.h6
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c3
-rw-r--r--arch/x86/kvm/hyperv.c9
-rw-r--r--arch/x86/kvm/mmu.c54
-rw-r--r--arch/x86/kvm/mmutrace.h4
-rw-r--r--arch/x86/kvm/svm.c32
-rw-r--r--arch/x86/kvm/vmx/nested.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.c19
-rw-r--r--arch/x86/kvm/vmx/vmx.h1
-rw-r--r--arch/x86/kvm/x86.c59
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--arch/x86/platform/efi/quirks.c2
-rw-r--r--arch/x86/realmode/init.c11
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild1
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-mq.c16
-rw-r--r--block/blk-mq.h9
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/android/binder.c3
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/libata-zpodd.c34
-rw-r--r--drivers/block/zram/zram_drv.c32
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/cpufreq/intel_pstate.c10
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/gpio/gpio-adnp.c6
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-mockup.c10
-rw-r--r--drivers/gpio/gpiolib-of.c17
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h5
-rw-r--r--drivers/gpu/drm/drm_drv.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_file.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c18
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c72
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c5
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-quirks.c11
-rw-r--r--drivers/hid/hid-steam.c26
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/iommu/amd_iommu.c9
-rw-r--r--drivers/iommu/amd_iommu_init.c7
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c19
-rw-r--r--drivers/iommu/iommu.c8
-rw-r--r--drivers/leds/leds-pca9532.c8
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c42
-rw-r--r--drivers/mfd/twl-core.c23
-rw-r--r--drivers/misc/habanalabs/command_submission.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c7
-rw-r--r--drivers/misc/habanalabs/device.c71
-rw-r--r--drivers/misc/habanalabs/goya/goya.c65
-rw-r--r--drivers/misc/habanalabs/habanalabs.h21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/memory.c38
-rw-r--r--drivers/misc/habanalabs/mmu.c6
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c34
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/io-cmd-file.c20
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/bw_notification.c23
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c5
-rw-r--r--drivers/s390/cio/chsc.c13
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
-rw-r--r--drivers/s390/scsi/zfcp_erp.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h7
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c39
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/soc/bcm/bcm2835-power.c49
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/axis-fifo/Kconfig1
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers.c33
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c10
-rw-r--r--drivers/staging/erofs/dir.c45
-rw-r--r--drivers/staging/erofs/unzip_vle.c45
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c7
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts29
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi73
-rw-r--r--drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt48
-rw-r--r--drivers/staging/mt7621-eth/Kconfig39
-rw-r--r--drivers/staging/mt7621-eth/Makefile14
-rw-r--r--drivers/staging/mt7621-eth/TODO13
-rw-r--r--drivers/staging/mt7621-eth/ethtool.c250
-rw-r--r--drivers/staging/mt7621-eth/ethtool.h15
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7620.h277
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7621.c297
-rw-r--r--drivers/staging/mt7621-eth/mdio.c275
-rw-r--r--drivers/staging/mt7621-eth/mdio.h27
-rw-r--r--drivers/staging/mt7621-eth/mdio_mt7620.c173
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c2178
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.h716
-rw-r--r--drivers/staging/mt7621-eth/soc_mt7621.c161
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet.c40
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/speakup/speakup_soft.c16
-rw-r--r--drivers/staging/speakup/spk_priv.h1
-rw-r--r--drivers/staging/speakup/synth.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vt6655/device_main.c11
-rw-r--r--drivers/tty/serial/ar933x_uart.c24
-rw-r--r--drivers/tty/serial/atmel_serial.c52
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c12
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/common.c2
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c5
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-rcar.c1
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.h8
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/mtu3/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h4
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c27
-rw-r--r--drivers/usb/typec/tcpm/wcove.c9
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c106
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c26
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c32
-rw-r--r--drivers/virt/vboxguest/vboxguest_version.h9
-rw-r--r--drivers/virt/vboxguest/vmmdev.h8
-rw-r--r--fs/afs/fsclient.c6
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/aio.c338
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h8
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/smb2file.c6
-rw-r--r--fs/cifs/smb2ops.c68
-rw-r--r--fs/cifs/smb2pdu.c49
-rw-r--r--fs/cifs/smb2proto.h5
-rw-r--r--fs/debugfs/inode.c13
-rw-r--r--fs/fs_parser.c2
-rw-r--r--fs/io_uring.c26
-rw-r--r--fs/jffs2/readinode.c5
-rw-r--r--fs/jffs2/super.c5
-rw-r--r--fs/ocfs2/refcounttree.c42
-rw-r--r--fs/open.c6
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c15
-rw-r--r--fs/xfs/scrub/btree.c11
-rw-r--r--fs/xfs/scrub/dabtree.c5
-rw-r--r--fs/xfs/xfs_discard.c8
-rw-r--r--fs/xfs/xfs_file.c27
-rw-r--r--include/acpi/acoutput.h3
-rw-r--r--include/acpi/platform/aclinux.h5
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/kcore.h11
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/mii.h2
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/page-isolation.h10
-rw-r--r--include/linux/platform_data/gpio/gpio-amd-fch.h2
-rw-r--r--include/linux/sched/signal.h18
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/vbox_utils.h12
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netns/hash.h10
-rw-r--r--include/net/sch_generic.h44
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h60
-rw-r--r--kernel/bpf/cpumap.c13
-rw-r--r--kernel/bpf/inode.c32
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/watchdog.c6
-rw-r--r--lib/sbitmap.c11
-rw-r--r--mm/compaction.c29
-rw-r--r--mm/debug.c4
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/memory.c11
-rw-r--r--mm/memory_hotplug.c19
-rw-r--r--mm/mempolicy.c40
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/page_isolation.c51
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c5
-rw-r--r--mm/sparse.c2
-rw-r--r--net/8021q/vlan_dev.c26
-rw-r--r--net/batman-adv/bat_v_elp.c6
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c16
-rw-r--r--net/batman-adv/sysfs.c7
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/ceph/messenger.c8
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/ethtool.c46
-rw-r--r--net/core/filter.c16
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dccp/feat.c7
-rw-r--r--net/dsa/tag_qca.c10
-rw-r--r--net/ipv4/ip_input.c7
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/tcp_dctcp.c36
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv6/ila/ila_xlat.c1
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/kcm/kcmsock.c16
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/sched/act_sample.c10
-rw-r--r--net/sched/cls_matchall.c5
-rw-r--r--net/sched/sch_cake.c13
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_drr.c16
-rw-r--r--net/sched/sch_hfsc.c19
-rw-r--r--net/sched/sch_htb.c22
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c3
-rw-r--r--net/sched/sch_multiq.c10
-rw-r--r--net/sched/sch_prio.c10
-rw-r--r--net/sched/sch_qfq.c14
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_taprio.c2
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/protocol.c1
-rw-r--r--net/tipc/netlink_compat.c24
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--scripts/Makefile.build7
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--scripts/coccinelle/free/put_device.cocci1
-rw-r--r--scripts/coccinelle/misc/badty.cocci2
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c3
-rw-r--r--scripts/kconfig/nconf.c2
-rw-r--r--scripts/kconfig/nconf.gui.c3
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--security/Kconfig38
-rw-r--r--security/yama/yama_lsm.c8
-rw-r--r--sound/core/oss/pcm_oss.c43
-rw-r--r--sound/core/pcm_native.c9
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c7
-rw-r--r--sound/pci/hda/patch_ca0132.c20
-rw-r--r--sound/pci/hda/patch_realtek.c35
-rw-r--r--tools/arch/alpha/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/mips/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/parisc/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/arch/xtensa/include/uapi/asm/mman.h2
-rw-r--r--tools/build/feature/test-libopencsd.c4
-rw-r--r--tools/include/uapi/asm-generic/mman-common-tools.h23
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h4
-rw-r--r--tools/include/uapi/asm-generic/mman.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h11
-rw-r--r--tools/include/uapi/drm/i915_drm.h64
-rw-r--r--tools/include/uapi/linux/fcntl.h1
-rw-r--r--tools/include/uapi/linux/mman.h4
-rw-r--r--tools/lib/bpf/Makefile7
-rw-r--r--tools/lib/bpf/btf.c3
-rw-r--r--tools/objtool/Makefile7
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl4
-rwxr-xr-xtools/perf/check-headers.sh2
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py77
-rwxr-xr-xtools/perf/trace/beauty/mmap_flags.sh14
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c1
-rw-r--r--tools/perf/util/evlist.c29
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c72
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c20
-rw-r--r--tools/perf/util/machine.c32
-rw-r--r--tools/perf/util/pmu.c10
-rw-r--r--tools/power/x86/turbostat/turbostat.c277
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c68
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c19
-rw-r--r--tools/testing/selftests/bpf/test_btf.c47
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c38
-rw-r--r--tools/testing/selftests/kvm/Makefile4
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h1
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c16
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c35
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c18
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/sample.json24
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c4
-rw-r--r--virt/kvm/arm/mmu.c125
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c31
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c4
-rw-r--r--virt/kvm/arm/vgic/vgic.c14
-rw-r--r--virt/kvm/eventfd.c6
-rw-r--r--virt/kvm/kvm_main.c3
527 files changed, 4631 insertions, 6928 deletions
diff --git a/.mailmap b/.mailmap
index 37e1847c7988..ae2bcad06f4b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com>
156Morten Welinder <welinder@troll.com> 156Morten Welinder <welinder@troll.com>
157Mythri P K <mythripk@ti.com> 157Mythri P K <mythripk@ti.com>
158Nguyen Anh Quynh <aquynh@gmail.com> 158Nguyen Anh Quynh <aquynh@gmail.com>
159Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
160Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
159Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> 161Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
160Patrick Mochel <mochel@digitalimplant.org> 162Patrick Mochel <mochel@digitalimplant.org>
161Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com> 163Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
@@ -224,3 +226,5 @@ Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
224Yusuke Goda <goda.yusuke@renesas.com> 226Yusuke Goda <goda.yusuke@renesas.com>
225Gustavo Padovan <gustavo@las.ic.unicamp.br> 227Gustavo Padovan <gustavo@las.ic.unicamp.br>
226Gustavo Padovan <padovan@profusion.mobi> 228Gustavo Padovan <padovan@profusion.mobi>
229Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
230Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index 9a60a5d60e38..7313d354f20e 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
148for the type. The maximum value of ``BTF_INT_BITS()`` is 128. 148for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
149 149
150The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values 150The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
151for this int. For example, a bitfield struct member has: * btf member bit 151for this int. For example, a bitfield struct member has:
152offset 100 from the start of the structure, * btf member pointing to an int 152 * btf member bit offset 100 from the start of the structure,
153type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` 153 * btf member pointing to an int type,
154 * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
154 155
155Then in the struct memory layout, this member will occupy ``4`` bits starting 156Then in the struct memory layout, this member will occupy ``4`` bits starting
156from bits ``100 + 2 = 102``. 157from bits ``100 + 2 = 102``.
157 158
158Alternatively, the bitfield struct member can be the following to access the 159Alternatively, the bitfield struct member can be the following to access the
159same bits as the above: 160same bits as the above:
160
161 * btf member bit offset 102, 161 * btf member bit offset 102,
162 * btf member pointing to an int type, 162 * btf member pointing to an int type,
163 * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` 163 * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
diff --git a/Documentation/devicetree/bindings/hwmon/adc128d818.txt b/Documentation/devicetree/bindings/hwmon/adc128d818.txt
index 08bab0e94d25..d0ae46d7bac3 100644
--- a/Documentation/devicetree/bindings/hwmon/adc128d818.txt
+++ b/Documentation/devicetree/bindings/hwmon/adc128d818.txt
@@ -26,7 +26,7 @@ Required node properties:
26 26
27Optional node properties: 27Optional node properties:
28 28
29 - ti,mode: Operation mode (see above). 29 - ti,mode: Operation mode (u8) (see above).
30 30
31 31
32Example (operation mode 2): 32Example (operation mode 2):
@@ -34,5 +34,5 @@ Example (operation mode 2):
34 adc128d818@1d { 34 adc128d818@1d {
35 compatible = "ti,adc128d818"; 35 compatible = "ti,adc128d818";
36 reg = <0x1d>; 36 reg = <0x1d>;
37 ti,mode = <2>; 37 ti,mode = /bits/ 8 <2>;
38 }; 38 };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
index dcc8390e0d24..dcc8390e0d24 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
index ee4c32454198..ee4c32454198 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
index bd81a482634f..bd81a482634f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
index 49df0053347a..49df0053347a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
index 94a425eaa6c7..94a425eaa6c7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index 742cb470595b..bcfb13194f16 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -16,6 +16,7 @@ Required properties:
16 * "mediatek,mt8127-uart" for MT8127 compatible UARTS 16 * "mediatek,mt8127-uart" for MT8127 compatible UARTS
17 * "mediatek,mt8135-uart" for MT8135 compatible UARTS 17 * "mediatek,mt8135-uart" for MT8135 compatible UARTS
18 * "mediatek,mt8173-uart" for MT8173 compatible UARTS 18 * "mediatek,mt8173-uart" for MT8173 compatible UARTS
19 * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
19 * "mediatek,mt6577-uart" for MT6577 and all of the above 20 * "mediatek,mt6577-uart" for MT6577 and all of the above
20 21
21- reg: The base address of the UART register bank. 22- reg: The base address of the UART register bank.
diff --git a/Documentation/filesystems/mount_api.txt b/Documentation/filesystems/mount_api.txt
index 944d1965e917..00ff0cfccfa7 100644
--- a/Documentation/filesystems/mount_api.txt
+++ b/Documentation/filesystems/mount_api.txt
@@ -12,11 +12,13 @@ CONTENTS
12 12
13 (4) Filesystem context security. 13 (4) Filesystem context security.
14 14
15 (5) VFS filesystem context operations. 15 (5) VFS filesystem context API.
16 16
17 (6) Parameter description. 17 (6) Superblock creation helpers.
18 18
19 (7) Parameter helper functions. 19 (7) Parameter description.
20
21 (8) Parameter helper functions.
20 22
21 23
22======== 24========
@@ -41,12 +43,15 @@ The creation of new mounts is now to be done in a multistep process:
41 43
42 (7) Destroy the context. 44 (7) Destroy the context.
43 45
44To support this, the file_system_type struct gains a new field: 46To support this, the file_system_type struct gains two new fields:
45 47
46 int (*init_fs_context)(struct fs_context *fc); 48 int (*init_fs_context)(struct fs_context *fc);
49 const struct fs_parameter_description *parameters;
47 50
48which is invoked to set up the filesystem-specific parts of a filesystem 51The first is invoked to set up the filesystem-specific parts of a filesystem
49context, including the additional space. 52context, including the additional space, and the second points to the
53parameter description for validation at registration time and querying by a
54future system call.
50 55
51Note that security initialisation is done *after* the filesystem is called so 56Note that security initialisation is done *after* the filesystem is called so
52that the namespaces may be adjusted first. 57that the namespaces may be adjusted first.
@@ -73,9 +78,9 @@ context. This is represented by the fs_context structure:
73 void *s_fs_info; 78 void *s_fs_info;
74 unsigned int sb_flags; 79 unsigned int sb_flags;
75 unsigned int sb_flags_mask; 80 unsigned int sb_flags_mask;
81 unsigned int s_iflags;
82 unsigned int lsm_flags;
76 enum fs_context_purpose purpose:8; 83 enum fs_context_purpose purpose:8;
77 bool sloppy:1;
78 bool silent:1;
79 ... 84 ...
80 }; 85 };
81 86
@@ -141,6 +146,10 @@ The fs_context fields are as follows:
141 146
142 Which bits SB_* flags are to be set/cleared in super_block::s_flags. 147 Which bits SB_* flags are to be set/cleared in super_block::s_flags.
143 148
149 (*) unsigned int s_iflags
150
151 These will be bitwise-OR'd with s->s_iflags when a superblock is created.
152
144 (*) enum fs_context_purpose 153 (*) enum fs_context_purpose
145 154
146 This indicates the purpose for which the context is intended. The 155 This indicates the purpose for which the context is intended. The
@@ -150,17 +159,6 @@ The fs_context fields are as follows:
150 FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount 159 FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount
151 FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount 160 FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount
152 161
153 (*) bool sloppy
154 (*) bool silent
155
156 These are set if the sloppy or silent mount options are given.
157
158 [NOTE] sloppy is probably unnecessary when userspace passes over one
159 option at a time since the error can just be ignored if userspace deems it
160 to be unimportant.
161
162 [NOTE] silent is probably redundant with sb_flags & SB_SILENT.
163
164The mount context is created by calling vfs_new_fs_context() or 162The mount context is created by calling vfs_new_fs_context() or
165vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the 163vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the
166structure is not refcounted. 164structure is not refcounted.
@@ -342,28 +340,47 @@ number of operations used by the new mount code for this purpose:
342 It should return 0 on success or a negative error code on failure. 340 It should return 0 on success or a negative error code on failure.
343 341
344 342
345================================= 343==========================
346VFS FILESYSTEM CONTEXT OPERATIONS 344VFS FILESYSTEM CONTEXT API
347================================= 345==========================
348 346
349There are four operations for creating a filesystem context and 347There are four operations for creating a filesystem context and one for
350one for destroying a context: 348destroying a context:
351 349
352 (*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type, 350 (*) struct fs_context *fs_context_for_mount(
353 struct dentry *reference, 351 struct file_system_type *fs_type,
354 unsigned int sb_flags, 352 unsigned int sb_flags);
355 unsigned int sb_flags_mask,
356 enum fs_context_purpose purpose);
357 353
358 Create a filesystem context for a given filesystem type and purpose. This 354 Allocate a filesystem context for the purpose of setting up a new mount,
359 allocates the filesystem context, sets the superblock flags, initialises 355 whether that be with a new superblock or sharing an existing one. This
360 the security and calls fs_type->init_fs_context() to initialise the 356 sets the superblock flags, initialises the security and calls
361 filesystem private data. 357 fs_type->init_fs_context() to initialise the filesystem private data.
362 358
363 reference can be NULL or it may indicate the root dentry of a superblock 359 fs_type specifies the filesystem type that will manage the context and
364 that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or 360 sb_flags presets the superblock flags stored therein.
365 the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT). 361
366 This is provided as a source of namespace information. 362 (*) struct fs_context *fs_context_for_reconfigure(
363 struct dentry *dentry,
364 unsigned int sb_flags,
365 unsigned int sb_flags_mask);
366
367 Allocate a filesystem context for the purpose of reconfiguring an
368 existing superblock. dentry provides a reference to the superblock to be
369 configured. sb_flags and sb_flags_mask indicate which superblock flags
370 need changing and to what.
371
372 (*) struct fs_context *fs_context_for_submount(
373 struct file_system_type *fs_type,
374 struct dentry *reference);
375
376 Allocate a filesystem context for the purpose of creating a new mount for
377 an automount point or other derived superblock. fs_type specifies the
378 filesystem type that will manage the context and the reference dentry
379 supplies the parameters. Namespaces are propagated from the reference
380 dentry's superblock also.
381
382 Note that it's not a requirement that the reference dentry be of the same
383 filesystem type as fs_type.
367 384
368 (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc); 385 (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc);
369 386
@@ -390,20 +407,6 @@ context pointer or a negative error code.
390For the remaining operations, if an error occurs, a negative error code will be 407For the remaining operations, if an error occurs, a negative error code will be
391returned. 408returned.
392 409
393 (*) int vfs_get_tree(struct fs_context *fc);
394
395 Get or create the mountable root and superblock, using the parameters in
396 the filesystem context to select/configure the superblock. This invokes
397 the ->validate() op and then the ->get_tree() op.
398
399 [NOTE] ->validate() could perhaps be rolled into ->get_tree() and
400 ->reconfigure().
401
402 (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
403
404 Create a mount given the parameters in the specified filesystem context.
405 Note that this does not attach the mount to anything.
406
407 (*) int vfs_parse_fs_param(struct fs_context *fc, 410 (*) int vfs_parse_fs_param(struct fs_context *fc,
408 struct fs_parameter *param); 411 struct fs_parameter *param);
409 412
@@ -432,17 +435,80 @@ returned.
432 clear the pointer, but then becomes responsible for disposing of the 435 clear the pointer, but then becomes responsible for disposing of the
433 object. 436 object.
434 437
435 (*) int vfs_parse_fs_string(struct fs_context *fc, char *key, 438 (*) int vfs_parse_fs_string(struct fs_context *fc, const char *key,
436 const char *value, size_t v_size); 439 const char *value, size_t v_size);
437 440
438 A wrapper around vfs_parse_fs_param() that just passes a constant string. 441 A wrapper around vfs_parse_fs_param() that copies the value string it is
442 passed.
439 443
440 (*) int generic_parse_monolithic(struct fs_context *fc, void *data); 444 (*) int generic_parse_monolithic(struct fs_context *fc, void *data);
441 445
442 Parse a sys_mount() data page, assuming the form to be a text list 446 Parse a sys_mount() data page, assuming the form to be a text list
443 consisting of key[=val] options separated by commas. Each item in the 447 consisting of key[=val] options separated by commas. Each item in the
444 list is passed to vfs_mount_option(). This is the default when the 448 list is passed to vfs_mount_option(). This is the default when the
445 ->parse_monolithic() operation is NULL. 449 ->parse_monolithic() method is NULL.
450
451 (*) int vfs_get_tree(struct fs_context *fc);
452
453 Get or create the mountable root and superblock, using the parameters in
454 the filesystem context to select/configure the superblock. This invokes
455 the ->get_tree() method.
456
457 (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
458
459 Create a mount given the parameters in the specified filesystem context.
460 Note that this does not attach the mount to anything.
461
462
463===========================
464SUPERBLOCK CREATION HELPERS
465===========================
466
467A number of VFS helpers are available for use by filesystems for the creation
468or looking up of superblocks.
469
470 (*) struct super_block *
471 sget_fc(struct fs_context *fc,
472 int (*test)(struct super_block *sb, struct fs_context *fc),
473 int (*set)(struct super_block *sb, struct fs_context *fc));
474
475 This is the core routine. If test is non-NULL, it searches for an
476 existing superblock matching the criteria held in the fs_context, using
477 the test function to match them. If no match is found, a new superblock
478 is created and the set function is called to set it up.
479
480 Prior to the set function being called, fc->s_fs_info will be transferred
481 to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns
482 success (ie. 0).
483
484The following helpers all wrap sget_fc():
485
486 (*) int vfs_get_super(struct fs_context *fc,
487 enum vfs_get_super_keying keying,
488 int (*fill_super)(struct super_block *sb,
489 struct fs_context *fc))
490
491 This creates/looks up a deviceless superblock. The keying indicates how
492 many superblocks of this type may exist and in what manner they may be
493 shared:
494
495 (1) vfs_get_single_super
496
497 Only one such superblock may exist in the system. Any further
498 attempt to get a new superblock gets this one (and any parameter
499 differences are ignored).
500
501 (2) vfs_get_keyed_super
502
503 Multiple superblocks of this type may exist and they're keyed on
504 their s_fs_info pointer (for example this may refer to a
505 namespace).
506
507 (3) vfs_get_independent_super
508
509 Multiple independent superblocks of this type may exist. This
510 function never matches an existing one and always creates a new
511 one.
446 512
447 513
448===================== 514=====================
@@ -454,35 +520,22 @@ There's a core description struct that links everything together:
454 520
455 struct fs_parameter_description { 521 struct fs_parameter_description {
456 const char name[16]; 522 const char name[16];
457 u8 nr_params;
458 u8 nr_alt_keys;
459 u8 nr_enums;
460 bool ignore_unknown;
461 bool no_source;
462 const char *const *keys;
463 const struct constant_table *alt_keys;
464 const struct fs_parameter_spec *specs; 523 const struct fs_parameter_spec *specs;
465 const struct fs_parameter_enum *enums; 524 const struct fs_parameter_enum *enums;
466 }; 525 };
467 526
468For example: 527For example:
469 528
470 enum afs_param { 529 enum {
471 Opt_autocell, 530 Opt_autocell,
472 Opt_bar, 531 Opt_bar,
473 Opt_dyn, 532 Opt_dyn,
474 Opt_foo, 533 Opt_foo,
475 Opt_source, 534 Opt_source,
476 nr__afs_params
477 }; 535 };
478 536
479 static const struct fs_parameter_description afs_fs_parameters = { 537 static const struct fs_parameter_description afs_fs_parameters = {
480 .name = "kAFS", 538 .name = "kAFS",
481 .nr_params = nr__afs_params,
482 .nr_alt_keys = ARRAY_SIZE(afs_param_alt_keys),
483 .nr_enums = ARRAY_SIZE(afs_param_enums),
484 .keys = afs_param_keys,
485 .alt_keys = afs_param_alt_keys,
486 .specs = afs_param_specs, 539 .specs = afs_param_specs,
487 .enums = afs_param_enums, 540 .enums = afs_param_enums,
488 }; 541 };
@@ -494,28 +547,24 @@ The members are as follows:
494 The name to be used in error messages generated by the parse helper 547 The name to be used in error messages generated by the parse helper
495 functions. 548 functions.
496 549
497 (2) u8 nr_params; 550 (2) const struct fs_parameter_specification *specs;
498
499 The number of discrete parameter identifiers. This indicates the number
500 of elements in the ->types[] array and also limits the values that may be
501 used in the values that the ->keys[] array maps to.
502
503 It is expected that, for example, two parameters that are related, say
504 "acl" and "noacl" with have the same ID, but will be flagged to indicate
505 that one is the inverse of the other. The value can then be picked out
506 from the parse result.
507 551
508 (3) const struct fs_parameter_specification *specs; 552 Table of parameter specifications, terminated with a null entry, where the
553 entries are of type:
509 554
510 Table of parameter specifications, where the entries are of type: 555 struct fs_parameter_spec {
511 556 const char *name;
512 struct fs_parameter_type { 557 u8 opt;
513 enum fs_parameter_spec type:8; 558 enum fs_parameter_type type:8;
514 u8 flags; 559 unsigned short flags;
515 }; 560 };
516 561
517 and the parameter identifier is the index to the array. 'type' indicates 562 The 'name' field is a string to match exactly to the parameter key (no
518 the desired value type and must be one of: 563 wildcards, patterns and no case-independence) and 'opt' is the value that
564 will be returned by the fs_parser() function in the case of a successful
565 match.
566
567 The 'type' field indicates the desired value type and must be one of:
519 568
520 TYPE NAME EXPECTED VALUE RESULT IN 569 TYPE NAME EXPECTED VALUE RESULT IN
521 ======================= ======================= ===================== 570 ======================= ======================= =====================
@@ -525,85 +574,65 @@ The members are as follows:
525 fs_param_is_u32_octal 32-bit octal int result->uint_32 574 fs_param_is_u32_octal 32-bit octal int result->uint_32
526 fs_param_is_u32_hex 32-bit hex int result->uint_32 575 fs_param_is_u32_hex 32-bit hex int result->uint_32
527 fs_param_is_s32 32-bit signed int result->int_32 576 fs_param_is_s32 32-bit signed int result->int_32
577 fs_param_is_u64 64-bit unsigned int result->uint_64
528 fs_param_is_enum Enum value name result->uint_32 578 fs_param_is_enum Enum value name result->uint_32
529 fs_param_is_string Arbitrary string param->string 579 fs_param_is_string Arbitrary string param->string
530 fs_param_is_blob Binary blob param->blob 580 fs_param_is_blob Binary blob param->blob
531 fs_param_is_blockdev Blockdev path * Needs lookup 581 fs_param_is_blockdev Blockdev path * Needs lookup
532 fs_param_is_path Path * Needs lookup 582 fs_param_is_path Path * Needs lookup
533 fs_param_is_fd File descriptor param->file 583 fs_param_is_fd File descriptor result->int_32
534
535 And each parameter can be qualified with 'flags':
536
537 fs_param_v_optional The value is optional
538 fs_param_neg_with_no If key name is prefixed with "no", it is false
539 fs_param_neg_with_empty If value is "", it is false
540 fs_param_deprecated The parameter is deprecated.
541
542 For example:
543
544 static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = {
545 [Opt_autocell] = { fs_param_is flag },
546 [Opt_bar] = { fs_param_is_enum },
547 [Opt_dyn] = { fs_param_is flag },
548 [Opt_foo] = { fs_param_is_bool, fs_param_neg_with_no },
549 [Opt_source] = { fs_param_is_string },
550 };
551 584
552 Note that if the value is of fs_param_is_bool type, fs_parse() will try 585 Note that if the value is of fs_param_is_bool type, fs_parse() will try
553 to match any string value against "0", "1", "no", "yes", "false", "true". 586 to match any string value against "0", "1", "no", "yes", "false", "true".
554 587
555 [!] NOTE that the table must be sorted according to primary key name so 588 Each parameter can also be qualified with 'flags':
556 that ->keys[] is also sorted.
557
558 (4) const char *const *keys;
559
560 Table of primary key names for the parameters. There must be one entry
561 per defined parameter. The table is optional if ->nr_params is 0. The
562 table is just an array of names e.g.:
563 589
564 static const char *const afs_param_keys[nr__afs_params] = { 590 fs_param_v_optional The value is optional
565 [Opt_autocell] = "autocell", 591 fs_param_neg_with_no result->negated set if key is prefixed with "no"
566 [Opt_bar] = "bar", 592 fs_param_neg_with_empty result->negated set if value is ""
567 [Opt_dyn] = "dyn", 593 fs_param_deprecated The parameter is deprecated.
568 [Opt_foo] = "foo",
569 [Opt_source] = "source",
570 };
571
572 [!] NOTE that the table must be sorted such that the table can be searched
573 with bsearch() using strcmp(). This means that the Opt_* values must
574 correspond to the entries in this table.
575
576 (5) const struct constant_table *alt_keys;
577 u8 nr_alt_keys;
578
579 Table of additional key names and their mappings to parameter ID plus the
580 number of elements in the table. This is optional. The table is just an
581 array of { name, integer } pairs, e.g.:
582 594
583 static const struct constant_table afs_param_keys[] = { 595 These are wrapped with a number of convenience wrappers:
584 { "baz", Opt_bar }, 596
585 { "dynamic", Opt_dyn }, 597 MACRO SPECIFIES
598 ======================= ===============================================
599 fsparam_flag() fs_param_is_flag
600 fsparam_flag_no() fs_param_is_flag, fs_param_neg_with_no
601 fsparam_bool() fs_param_is_bool
602 fsparam_u32() fs_param_is_u32
603 fsparam_u32oct() fs_param_is_u32_octal
604 fsparam_u32hex() fs_param_is_u32_hex
605 fsparam_s32() fs_param_is_s32
606 fsparam_u64() fs_param_is_u64
607 fsparam_enum() fs_param_is_enum
608 fsparam_string() fs_param_is_string
609 fsparam_blob() fs_param_is_blob
610 fsparam_bdev() fs_param_is_blockdev
611 fsparam_path() fs_param_is_path
612 fsparam_fd() fs_param_is_fd
613
614 all of which take two arguments, name string and option number - for
615 example:
616
617 static const struct fs_parameter_spec afs_param_specs[] = {
618 fsparam_flag ("autocell", Opt_autocell),
619 fsparam_flag ("dyn", Opt_dyn),
620 fsparam_string ("source", Opt_source),
621 fsparam_flag_no ("foo", Opt_foo),
622 {}
586 }; 623 };
587 624
588 [!] NOTE that the table must be sorted such that strcmp() can be used with 625 An addition macro, __fsparam() is provided that takes an additional pair
589 bsearch() to search the entries. 626 of arguments to specify the type and the flags for anything that doesn't
590 627 match one of the above macros.
591 The parameter ID can also be fs_param_key_removed to indicate that a
592 deprecated parameter has been removed and that an error will be given.
593 This differs from fs_param_deprecated where the parameter may still have
594 an effect.
595
596 Further, the behaviour of the parameter may differ when an alternate name
597 is used (for instance with NFS, "v3", "v4.2", etc. are alternate names).
598 628
599 (6) const struct fs_parameter_enum *enums; 629 (6) const struct fs_parameter_enum *enums;
600 u8 nr_enums;
601 630
602 Table of enum value names to integer mappings and the number of elements 631 Table of enum value names to integer mappings, terminated with a null
603 stored therein. This is of type: 632 entry. This is of type:
604 633
605 struct fs_parameter_enum { 634 struct fs_parameter_enum {
606 u8 param_id; 635 u8 opt;
607 char name[14]; 636 char name[14];
608 u8 value; 637 u8 value;
609 }; 638 };
@@ -621,11 +650,6 @@ The members are as follows:
621 try to look the value up in the enum table and the result will be stored 650 try to look the value up in the enum table and the result will be stored
622 in the parse result. 651 in the parse result.
623 652
624 (7) bool no_source;
625
626 If this is set, fs_parse() will ignore any "source" parameter and not
627 pass it to the filesystem.
628
629The parser should be pointed to by the parser pointer in the file_system_type 653The parser should be pointed to by the parser pointer in the file_system_type
630struct as this will provide validation on registration (if 654struct as this will provide validation on registration (if
631CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from 655CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from
@@ -650,9 +674,8 @@ process the parameters it is given.
650 int value; 674 int value;
651 }; 675 };
652 676
653 and it must be sorted such that it can be searched using bsearch() using 677 If a match is found, the corresponding value is returned. If a match
654 strcmp(). If a match is found, the corresponding value is returned. If a 678 isn't found, the not_found value is returned instead.
655 match isn't found, the not_found value is returned instead.
656 679
657 (*) bool validate_constant_table(const struct constant_table *tbl, 680 (*) bool validate_constant_table(const struct constant_table *tbl,
658 size_t tbl_size, 681 size_t tbl_size,
@@ -665,36 +688,36 @@ process the parameters it is given.
665 should just be set to lie inside the low-to-high range. 688 should just be set to lie inside the low-to-high range.
666 689
667 If all is good, true is returned. If the table is invalid, errors are 690 If all is good, true is returned. If the table is invalid, errors are
668 logged to dmesg, the stack is dumped and false is returned. 691 logged to dmesg and false is returned.
692
693 (*) bool fs_validate_description(const struct fs_parameter_description *desc);
694
695 This performs some validation checks on a parameter description. It
696 returns true if the description is good and false if it is not. It will
697 log errors to dmesg if validation fails.
669 698
670 (*) int fs_parse(struct fs_context *fc, 699 (*) int fs_parse(struct fs_context *fc,
671 const struct fs_param_parser *parser, 700 const struct fs_parameter_description *desc,
672 struct fs_parameter *param, 701 struct fs_parameter *param,
673 struct fs_param_parse_result *result); 702 struct fs_parse_result *result);
674 703
675 This is the main interpreter of parameters. It uses the parameter 704 This is the main interpreter of parameters. It uses the parameter
676 description (parser) to look up the name of the parameter to use and to 705 description to look up a parameter by key name and to convert that to an
677 convert that to a parameter ID (stored in result->key). 706 option number (which it returns).
678 707
679 If successful, and if the parameter type indicates the result is a 708 If successful, and if the parameter type indicates the result is a
680 boolean, integer or enum type, the value is converted by this function and 709 boolean, integer or enum type, the value is converted by this function and
681 the result stored in result->{boolean,int_32,uint_32}. 710 the result stored in result->{boolean,int_32,uint_32,uint_64}.
682 711
683 If a match isn't initially made, the key is prefixed with "no" and no 712 If a match isn't initially made, the key is prefixed with "no" and no
684 value is present then an attempt will be made to look up the key with the 713 value is present then an attempt will be made to look up the key with the
685 prefix removed. If this matches a parameter for which the type has flag 714 prefix removed. If this matches a parameter for which the type has flag
686 fs_param_neg_with_no set, then a match will be made and the value will be 715 fs_param_neg_with_no set, then a match will be made and result->negated
687 set to false/0/NULL. 716 will be set to true.
688
689 If the parameter is successfully matched and, optionally, parsed
690 correctly, 1 is returned. If the parameter isn't matched and
691 parser->ignore_unknown is set, then 0 is returned. Otherwise -EINVAL is
692 returned.
693
694 (*) bool fs_validate_description(const struct fs_parameter_description *desc);
695 717
696 This is validates the parameter description. It returns true if the 718 If the parameter isn't matched, -ENOPARAM will be returned; if the
697 description is good and false if it is not. 719 parameter is matched, but the value is erroneous, -EINVAL will be
720 returned; otherwise the parameter's option number will be returned.
698 721
699 (*) int fs_lookup_param(struct fs_context *fc, 722 (*) int fs_lookup_param(struct fs_context *fc,
700 struct fs_parameter *value, 723 struct fs_parameter *value,
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index d1ee484a787d..ee9984f35868 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -36,6 +36,7 @@ Supported adapters:
36 * Intel Cannon Lake (PCH) 36 * Intel Cannon Lake (PCH)
37 * Intel Cedar Fork (PCH) 37 * Intel Cedar Fork (PCH)
38 * Intel Ice Lake (PCH) 38 * Intel Ice Lake (PCH)
39 * Intel Comet Lake (PCH)
39 Datasheets: Publicly available at the Intel website 40 Datasheets: Publicly available at the Intel website
40 41
41On Intel Patsburg and later chipsets, both the normal host SMBus controller 42On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst
new file mode 100644
index 000000000000..b375ae2ec2c4
--- /dev/null
+++ b/Documentation/networking/bpf_flow_dissector.rst
@@ -0,0 +1,126 @@
1.. SPDX-License-Identifier: GPL-2.0
2
3==================
4BPF Flow Dissector
5==================
6
7Overview
8========
9
10Flow dissector is a routine that parses metadata out of the packets. It's
11used in the various places in the networking subsystem (RFS, flow hash, etc).
12
13BPF flow dissector is an attempt to reimplement C-based flow dissector logic
14in BPF to gain all the benefits of BPF verifier (namely, limits on the
15number of instructions and tail calls).
16
17API
18===
19
20BPF flow dissector programs operate on an ``__sk_buff``. However, only the
21limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``.
22``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
23and output arguments.
24
25The inputs are:
26 * ``nhoff`` - initial offset of the networking header
27 * ``thoff`` - initial offset of the transport header, initialized to nhoff
28 * ``n_proto`` - L3 protocol type, parsed out of L2 header
29
30Flow dissector BPF program should fill out the rest of the ``struct
31bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be
32also adjusted accordingly.
33
34The return code of the BPF program is either BPF_OK to indicate successful
35dissection, or BPF_DROP to indicate parsing error.
36
37__sk_buff->data
38===============
39
40In the VLAN-less case, this is what the initial state of the BPF flow
41dissector looks like::
42
43 +------+------+------------+-----------+
44 | DMAC | SMAC | ETHER_TYPE | L3_HEADER |
45 +------+------+------------+-----------+
46 ^
47 |
48 +-- flow dissector starts here
49
50
51.. code:: c
52
53 skb->data + flow_keys->nhoff point to the first byte of L3_HEADER
54 flow_keys->thoff = nhoff
55 flow_keys->n_proto = ETHER_TYPE
56
57In case of VLAN, flow dissector can be called with the two different states.
58
59Pre-VLAN parsing::
60
61 +------+------+------+-----+-----------+-----------+
62 | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
63 +------+------+------+-----+-----------+-----------+
64 ^
65 |
66 +-- flow dissector starts here
67
68.. code:: c
69
70 skb->data + flow_keys->nhoff point the to first byte of TCI
71 flow_keys->thoff = nhoff
72 flow_keys->n_proto = TPID
73
74Please note that TPID can be 802.1AD and, hence, BPF program would
75have to parse VLAN information twice for double tagged packets.
76
77
78Post-VLAN parsing::
79
80 +------+------+------+-----+-----------+-----------+
81 | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
82 +------+------+------+-----+-----------+-----------+
83 ^
84 |
85 +-- flow dissector starts here
86
87.. code:: c
88
89 skb->data + flow_keys->nhoff point the to first byte of L3_HEADER
90 flow_keys->thoff = nhoff
91 flow_keys->n_proto = ETHER_TYPE
92
93In this case VLAN information has been processed before the flow dissector
94and BPF flow dissector is not required to handle it.
95
96
97The takeaway here is as follows: BPF flow dissector program can be called with
98the optional VLAN header and should gracefully handle both cases: when single
99or double VLAN is present and when it is not present. The same program
100can be called for both cases and would have to be written carefully to
101handle both cases.
102
103
104Reference Implementation
105========================
106
107See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference
108implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]``
109for the loader. bpftool can be used to load BPF flow dissector program as well.
110
111The reference implementation is organized as follows:
112 * ``jmp_table`` map that contains sub-programs for each supported L3 protocol
113 * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and
114 does ``bpf_tail_call`` to the appropriate L3 handler
115
116Since BPF at this point doesn't support looping (or any jumping back),
117jmp_table is used instead to handle multiple levels of encapsulation (and
118IPv6 options).
119
120
121Current Limitations
122===================
123BPF flow dissector doesn't support exporting all the metadata that in-kernel
124C-based implementation can export. Notable example is single VLAN (802.1Q)
125and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
126for a set of information that's currently can be exported from the BPF context.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 5449149be496..984e68f9e026 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -9,6 +9,7 @@ Contents:
9 netdev-FAQ 9 netdev-FAQ
10 af_xdp 10 af_xdp
11 batman-adv 11 batman-adv
12 bpf_flow_dissector
12 can 13 can
13 can_ucan_protocol 14 can_ucan_protocol
14 device_drivers/freescale/dpaa2/index 15 device_drivers/freescale/dpaa2/index
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7de9eee73fcd..67068c47c591 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -5,25 +5,32 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
5---------------------- 5----------------------
6 6
7The kvm API is a set of ioctls that are issued to control various aspects 7The kvm API is a set of ioctls that are issued to control various aspects
8of a virtual machine. The ioctls belong to three classes 8of a virtual machine. The ioctls belong to three classes:
9 9
10 - System ioctls: These query and set global attributes which affect the 10 - System ioctls: These query and set global attributes which affect the
11 whole kvm subsystem. In addition a system ioctl is used to create 11 whole kvm subsystem. In addition a system ioctl is used to create
12 virtual machines 12 virtual machines.
13 13
14 - VM ioctls: These query and set attributes that affect an entire virtual 14 - VM ioctls: These query and set attributes that affect an entire virtual
15 machine, for example memory layout. In addition a VM ioctl is used to 15 machine, for example memory layout. In addition a VM ioctl is used to
16 create virtual cpus (vcpus). 16 create virtual cpus (vcpus) and devices.
17 17
18 Only run VM ioctls from the same process (address space) that was used 18 VM ioctls must be issued from the same process (address space) that was
19 to create the VM. 19 used to create the VM.
20 20
21 - vcpu ioctls: These query and set attributes that control the operation 21 - vcpu ioctls: These query and set attributes that control the operation
22 of a single virtual cpu. 22 of a single virtual cpu.
23 23
24 Only run vcpu ioctls from the same thread that was used to create the 24 vcpu ioctls should be issued from the same thread that was used to create
25 vcpu. 25 the vcpu, except for asynchronous vcpu ioctl that are marked as such in
26 the documentation. Otherwise, the first ioctl after switching threads
27 could see a performance impact.
26 28
29 - device ioctls: These query and set attributes that control the operation
30 of a single device.
31
32 device ioctls must be issued from the same process (address space) that
33 was used to create the VM.
27 34
282. File descriptors 352. File descriptors
29------------------- 36-------------------
@@ -32,17 +39,34 @@ The kvm API is centered around file descriptors. An initial
32open("/dev/kvm") obtains a handle to the kvm subsystem; this handle 39open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
33can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this 40can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
34handle will create a VM file descriptor which can be used to issue VM 41handle will create a VM file descriptor which can be used to issue VM
35ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu 42ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
36and return a file descriptor pointing to it. Finally, ioctls on a vcpu 43create a virtual cpu or device and return a file descriptor pointing to
37fd can be used to control the vcpu, including the important task of 44the new resource. Finally, ioctls on a vcpu or device fd can be used
38actually running guest code. 45to control the vcpu or device. For vcpus, this includes the important
46task of actually running guest code.
39 47
40In general file descriptors can be migrated among processes by means 48In general file descriptors can be migrated among processes by means
41of fork() and the SCM_RIGHTS facility of unix domain socket. These 49of fork() and the SCM_RIGHTS facility of unix domain socket. These
42kinds of tricks are explicitly not supported by kvm. While they will 50kinds of tricks are explicitly not supported by kvm. While they will
43not cause harm to the host, their actual behavior is not guaranteed by 51not cause harm to the host, their actual behavior is not guaranteed by
44the API. The only supported use is one virtual machine per process, 52the API. See "General description" for details on the ioctl usage
45and one vcpu per thread. 53model that is supported by KVM.
54
55It is important to note that althought VM ioctls may only be issued from
56the process that created the VM, a VM's lifecycle is associated with its
57file descriptor, not its creator (process). In other words, the VM and
58its resources, *including the associated address space*, are not freed
59until the last reference to the VM's file descriptor has been released.
60For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
61not be freed until both the parent (original) process and its child have
62put their references to the VM's file descriptor.
63
64Because a VM's resources are not freed until the last reference to its
65file descriptor is released, creating additional references to a VM via
66via fork(), dup(), etc... without careful consideration is strongly
67discouraged and may have unwanted side effects, e.g. memory allocated
68by and on behalf of the VM's process may not be freed/unaccounted when
69the VM is shut down.
46 70
47 71
48It is important to note that althought VM ioctls may only be issued from 72It is important to note that althought VM ioctls may only be issued from
@@ -515,11 +539,15 @@ c) KVM_INTERRUPT_SET_LEVEL
515Note that any value for 'irq' other than the ones stated above is invalid 539Note that any value for 'irq' other than the ones stated above is invalid
516and incurs unexpected behavior. 540and incurs unexpected behavior.
517 541
542This is an asynchronous vcpu ioctl and can be invoked from any thread.
543
518MIPS: 544MIPS:
519 545
520Queues an external interrupt to be injected into the virtual CPU. A negative 546Queues an external interrupt to be injected into the virtual CPU. A negative
521interrupt number dequeues the interrupt. 547interrupt number dequeues the interrupt.
522 548
549This is an asynchronous vcpu ioctl and can be invoked from any thread.
550
523 551
5244.17 KVM_DEBUG_GUEST 5524.17 KVM_DEBUG_GUEST
525 553
@@ -1086,14 +1114,12 @@ struct kvm_userspace_memory_region {
1086#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) 1114#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
1087#define KVM_MEM_READONLY (1UL << 1) 1115#define KVM_MEM_READONLY (1UL << 1)
1088 1116
1089This ioctl allows the user to create or modify a guest physical memory 1117This ioctl allows the user to create, modify or delete a guest physical
1090slot. When changing an existing slot, it may be moved in the guest 1118memory slot. Bits 0-15 of "slot" specify the slot id and this value
1091physical memory space, or its flags may be modified. It may not be 1119should be less than the maximum number of user memory slots supported per
1092resized. Slots may not overlap in guest physical address space. 1120VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
1093Bits 0-15 of "slot" specifies the slot id and this value should be 1121if this capability is supported by the architecture. Slots may not
1094less than the maximum number of user memory slots supported per VM. 1122overlap in guest physical address space.
1095The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
1096if this capability is supported by the architecture.
1097 1123
1098If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" 1124If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
1099specifies the address space which is being modified. They must be 1125specifies the address space which is being modified. They must be
@@ -1102,6 +1128,10 @@ KVM_CAP_MULTI_ADDRESS_SPACE capability. Slots in separate address spaces
1102are unrelated; the restriction on overlapping slots only applies within 1128are unrelated; the restriction on overlapping slots only applies within
1103each address space. 1129each address space.
1104 1130
1131Deleting a slot is done by passing zero for memory_size. When changing
1132an existing slot, it may be moved in the guest physical memory space,
1133or its flags may be modified, but it may not be resized.
1134
1105Memory for the region is taken starting at the address denoted by the 1135Memory for the region is taken starting at the address denoted by the
1106field userspace_addr, which must point at user addressable memory for 1136field userspace_addr, which must point at user addressable memory for
1107the entire memory slot size. Any object may back this memory, including 1137the entire memory slot size. Any object may back this memory, including
@@ -2493,7 +2523,7 @@ KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
2493 machine checks needing further payload are not 2523 machine checks needing further payload are not
2494 supported by this ioctl) 2524 supported by this ioctl)
2495 2525
2496Note that the vcpu ioctl is asynchronous to vcpu execution. 2526This is an asynchronous vcpu ioctl and can be invoked from any thread.
2497 2527
24984.78 KVM_PPC_GET_HTAB_FD 25284.78 KVM_PPC_GET_HTAB_FD
2499 2529
@@ -3042,8 +3072,7 @@ KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
3042KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall 3072KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
3043KVM_S390_MCHK - machine check interrupt; parameters in .mchk 3073KVM_S390_MCHK - machine check interrupt; parameters in .mchk
3044 3074
3045 3075This is an asynchronous vcpu ioctl and can be invoked from any thread.
3046Note that the vcpu ioctl is asynchronous to vcpu execution.
3047 3076
30484.94 KVM_S390_GET_IRQ_STATE 30774.94 KVM_S390_GET_IRQ_STATE
3049 3078
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index f365102c80f5..2efe0efc516e 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -142,7 +142,7 @@ Shadow pages contain the following information:
142 If clear, this page corresponds to a guest page table denoted by the gfn 142 If clear, this page corresponds to a guest page table denoted by the gfn
143 field. 143 field.
144 role.quadrant: 144 role.quadrant:
145 When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit 145 When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit
146 sptes. That means a guest page table contains more ptes than the host, 146 sptes. That means a guest page table contains more ptes than the host,
147 so multiple shadow pages are needed to shadow one guest page. 147 so multiple shadow pages are needed to shadow one guest page.
148 For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the 148 For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
@@ -158,9 +158,9 @@ Shadow pages contain the following information:
158 The page is invalid and should not be used. It is a root page that is 158 The page is invalid and should not be used. It is a root page that is
159 currently pinned (by a cpu hardware register pointing to it); once it is 159 currently pinned (by a cpu hardware register pointing to it); once it is
160 unpinned it will be destroyed. 160 unpinned it will be destroyed.
161 role.cr4_pae: 161 role.gpte_is_8_bytes:
162 Contains the value of cr4.pae for which the page is valid (e.g. whether 162 Reflects the size of the guest PTE for which the page is valid, i.e. '1'
163 32-bit or 64-bit gptes are in use). 163 if 64-bit gptes are in use, '0' if 32-bit gptes are in use.
164 role.nxe: 164 role.nxe:
165 Contains the value of efer.nxe for which the page is valid. 165 Contains the value of efer.nxe for which the page is valid.
166 role.cr0_wp: 166 role.cr0_wp:
@@ -173,6 +173,9 @@ Shadow pages contain the following information:
173 Contains the value of cr4.smap && !cr0.wp for which the page is valid 173 Contains the value of cr4.smap && !cr0.wp for which the page is valid
174 (pages for which this is true are different from other pages; see the 174 (pages for which this is true are different from other pages; see the
175 treatment of cr0.wp=0 below). 175 treatment of cr0.wp=0 below).
176 role.ept_sp:
177 This is a virtual flag to denote a shadowed nested EPT page. ept_sp
178 is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination.
176 role.smm: 179 role.smm:
177 Is 1 if the page is valid in system management mode. This field 180 Is 1 if the page is valid in system management mode. This field
178 determines which of the kvm_memslots array was used to build this 181 determines which of the kvm_memslots array was used to build this
diff --git a/MAINTAINERS b/MAINTAINERS
index c1e2f4070aa5..cdb088103b2e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2356,7 +2356,7 @@ F: arch/arm/mm/cache-uniphier.c
2356F: arch/arm64/boot/dts/socionext/uniphier* 2356F: arch/arm64/boot/dts/socionext/uniphier*
2357F: drivers/bus/uniphier-system-bus.c 2357F: drivers/bus/uniphier-system-bus.c
2358F: drivers/clk/uniphier/ 2358F: drivers/clk/uniphier/
2359F: drivers/dmaengine/uniphier-mdmac.c 2359F: drivers/dma/uniphier-mdmac.c
2360F: drivers/gpio/gpio-uniphier.c 2360F: drivers/gpio/gpio-uniphier.c
2361F: drivers/i2c/busses/i2c-uniphier* 2361F: drivers/i2c/busses/i2c-uniphier*
2362F: drivers/irqchip/irq-uniphier-aidet.c 2362F: drivers/irqchip/irq-uniphier-aidet.c
@@ -4132,7 +4132,7 @@ F: drivers/cpuidle/*
4132F: include/linux/cpuidle.h 4132F: include/linux/cpuidle.h
4133 4133
4134CRAMFS FILESYSTEM 4134CRAMFS FILESYSTEM
4135M: Nicolas Pitre <nico@linaro.org> 4135M: Nicolas Pitre <nico@fluxnic.net>
4136S: Maintained 4136S: Maintained
4137F: Documentation/filesystems/cramfs.txt 4137F: Documentation/filesystems/cramfs.txt
4138F: fs/cramfs/ 4138F: fs/cramfs/
@@ -5836,7 +5836,7 @@ L: netdev@vger.kernel.org
5836S: Maintained 5836S: Maintained
5837F: Documentation/ABI/testing/sysfs-bus-mdio 5837F: Documentation/ABI/testing/sysfs-bus-mdio
5838F: Documentation/devicetree/bindings/net/mdio* 5838F: Documentation/devicetree/bindings/net/mdio*
5839F: Documentation/networking/phy.txt 5839F: Documentation/networking/phy.rst
5840F: drivers/net/phy/ 5840F: drivers/net/phy/
5841F: drivers/of/of_mdio.c 5841F: drivers/of/of_mdio.c
5842F: drivers/of/of_net.c 5842F: drivers/of/of_net.c
@@ -6411,7 +6411,6 @@ L: linux-kernel@vger.kernel.org
6411T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core 6411T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
6412S: Maintained 6412S: Maintained
6413F: kernel/futex.c 6413F: kernel/futex.c
6414F: kernel/futex_compat.c
6415F: include/asm-generic/futex.h 6414F: include/asm-generic/futex.h
6416F: include/linux/futex.h 6415F: include/linux/futex.h
6417F: include/uapi/linux/futex.h 6416F: include/uapi/linux/futex.h
@@ -13976,7 +13975,7 @@ F: drivers/media/rc/serial_ir.c
13976SFC NETWORK DRIVER 13975SFC NETWORK DRIVER
13977M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> 13976M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
13978M: Edward Cree <ecree@solarflare.com> 13977M: Edward Cree <ecree@solarflare.com>
13979M: Bert Kenward <bkenward@solarflare.com> 13978M: Martin Habets <mhabets@solarflare.com>
13980L: netdev@vger.kernel.org 13979L: netdev@vger.kernel.org
13981S: Supported 13980S: Supported
13982F: drivers/net/ethernet/sfc/ 13981F: drivers/net/ethernet/sfc/
diff --git a/Makefile b/Makefile
index c0a34064c574..026fbc450906 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 1 3PATCHLEVEL = 1
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc2 5EXTRAVERSION = -rc3
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -31,26 +31,12 @@ _all:
31# descending is started. They are now explicitly listed as the 31# descending is started. They are now explicitly listed as the
32# prepare rule. 32# prepare rule.
33 33
34# Ugly workaround for Debian make-kpkg: 34ifneq ($(sub_make_done),1)
35# make-kpkg directly includes the top Makefile of Linux kernel. In such a case,
36# skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but
37# displays warning to discourage such abusage.
38ifneq ($(word 2, $(MAKEFILE_LIST)),)
39$(warning Do not include top Makefile of Linux Kernel)
40sub-make-done := 1
41MAKEFLAGS += -rR
42endif
43
44ifneq ($(sub-make-done),1)
45 35
46# Do not use make's built-in rules and variables 36# Do not use make's built-in rules and variables
47# (this increases performance and avoids hard-to-debug behaviour) 37# (this increases performance and avoids hard-to-debug behaviour)
48MAKEFLAGS += -rR 38MAKEFLAGS += -rR
49 39
50# 'MAKEFLAGS += -rR' does not become immediately effective for old
51# GNU Make versions. Cancel implicit rules for this Makefile.
52$(lastword $(MAKEFILE_LIST)): ;
53
54# Avoid funny character set dependencies 40# Avoid funny character set dependencies
55unexport LC_ALL 41unexport LC_ALL
56LC_COLLATE=C 42LC_COLLATE=C
@@ -153,6 +139,7 @@ $(if $(KBUILD_OUTPUT),, \
153# 'sub-make' below. 139# 'sub-make' below.
154MAKEFLAGS += --include-dir=$(CURDIR) 140MAKEFLAGS += --include-dir=$(CURDIR)
155 141
142need-sub-make := 1
156else 143else
157 144
158# Do not print "Entering directory ..." at all for in-tree build. 145# Do not print "Entering directory ..." at all for in-tree build.
@@ -160,6 +147,18 @@ MAKEFLAGS += --no-print-directory
160 147
161endif # ifneq ($(KBUILD_OUTPUT),) 148endif # ifneq ($(KBUILD_OUTPUT),)
162 149
150ifneq ($(filter 3.%,$(MAKE_VERSION)),)
151# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
152# We need to invoke sub-make to avoid implicit rules in the top Makefile.
153need-sub-make := 1
154# Cancel implicit rules for this Makefile.
155$(lastword $(MAKEFILE_LIST)): ;
156endif
157
158export sub_make_done := 1
159
160ifeq ($(need-sub-make),1)
161
163PHONY += $(MAKECMDGOALS) sub-make 162PHONY += $(MAKECMDGOALS) sub-make
164 163
165$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make 164$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@@ -167,12 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
167 166
168# Invoke a second make in the output directory, passing relevant variables 167# Invoke a second make in the output directory, passing relevant variables
169sub-make: 168sub-make:
170 $(Q)$(MAKE) sub-make-done=1 \ 169 $(Q)$(MAKE) \
171 $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \ 170 $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
172 -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS)) 171 -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
173 172
174else # sub-make-done 173endif # need-sub-make
174endif # sub_make_done
175
175# We process the rest of the Makefile if this is the final invocation of make 176# We process the rest of the Makefile if this is the final invocation of make
177ifeq ($(need-sub-make),)
176 178
177# Do not print "Entering directory ...", 179# Do not print "Entering directory ...",
178# but we want to display it when entering to the output directory 180# but we want to display it when entering to the output directory
@@ -497,7 +499,8 @@ outputmakefile:
497ifneq ($(KBUILD_SRC),) 499ifneq ($(KBUILD_SRC),)
498 $(Q)ln -fsn $(srctree) source 500 $(Q)ln -fsn $(srctree) source
499 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) 501 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
500 $(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore 502 $(Q)test -e .gitignore || \
503 { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
501endif 504endif
502 505
503ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) 506ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
@@ -677,7 +680,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
677KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) 680KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
678 681
679ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 682ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
680KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) 683KBUILD_CFLAGS += -Os
681else 684else
682KBUILD_CFLAGS += -O2 685KBUILD_CFLAGS += -O2
683endif 686endif
@@ -950,9 +953,11 @@ mod_sign_cmd = true
950endif 953endif
951export mod_sign_cmd 954export mod_sign_cmd
952 955
956HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
957
953ifdef CONFIG_STACK_VALIDATION 958ifdef CONFIG_STACK_VALIDATION
954 has_libelf := $(call try-run,\ 959 has_libelf := $(call try-run,\
955 echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0) 960 echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
956 ifeq ($(has_libelf),1) 961 ifeq ($(has_libelf),1)
957 objtool_target := tools/objtool FORCE 962 objtool_target := tools/objtool FORCE
958 else 963 else
@@ -1757,7 +1762,7 @@ existing-targets := $(wildcard $(sort $(targets)))
1757 1762
1758endif # ifeq ($(config-targets),1) 1763endif # ifeq ($(config-targets),1)
1759endif # ifeq ($(mixed-targets),1) 1764endif # ifeq ($(mixed-targets),1)
1760endif # sub-make-done 1765endif # need-sub-make
1761 1766
1762PHONY += FORCE 1767PHONY += FORCE
1763FORCE: 1768FORCE:
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index dc0ab28baca1..70b783333965 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += exec.h
6generic-y += export.h 6generic-y += export.h
7generic-y += fb.h 7generic-y += fb.h
8generic-y += irq_work.h 8generic-y += irq_work.h
9generic-y += kvm_para.h
9generic-y += mcs_spinlock.h 10generic-y += mcs_spinlock.h
10generic-y += mm-arch-hooks.h 11generic-y += mm-arch-hooks.h
11generic-y += preempt.h 12generic-y += preempt.h
diff --git a/arch/alpha/include/uapi/asm/kvm_para.h b/arch/alpha/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/alpha/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/kvm_para.h>
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index b41f8881ecc8..decc306a3b52 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -11,6 +11,7 @@ generic-y += hardirq.h
11generic-y += hw_irq.h 11generic-y += hw_irq.h
12generic-y += irq_regs.h 12generic-y += irq_regs.h
13generic-y += irq_work.h 13generic-y += irq_work.h
14generic-y += kvm_para.h
14generic-y += local.h 15generic-y += local.h
15generic-y += local64.h 16generic-y += local64.h
16generic-y += mcs_spinlock.h 17generic-y += mcs_spinlock.h
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 054ead960f98..850b4805e2d1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -596,6 +596,7 @@ config ARCH_DAVINCI
596 select HAVE_IDE 596 select HAVE_IDE
597 select PM_GENERIC_DOMAINS if PM 597 select PM_GENERIC_DOMAINS if PM
598 select PM_GENERIC_DOMAINS_OF if PM && OF 598 select PM_GENERIC_DOMAINS_OF if PM && OF
599 select REGMAP_MMIO
599 select RESET_CONTROLLER 600 select RESET_CONTROLLER
600 select SPARSE_IRQ 601 select SPARSE_IRQ
601 select USE_OF 602 select USE_OF
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 5641d162dfdb..28e7513ce617 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -93,7 +93,7 @@
93}; 93};
94 94
95&hdmi { 95&hdmi {
96 hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; 96 hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
97}; 97};
98 98
99&pwm { 99&pwm {
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index b715ab0fa1ff..e8d800fec637 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -114,9 +114,9 @@
114 reg = <2>; 114 reg = <2>;
115 }; 115 };
116 116
117 switch@0 { 117 switch@10 {
118 compatible = "qca,qca8334"; 118 compatible = "qca,qca8334";
119 reg = <0>; 119 reg = <10>;
120 120
121 switch_ports: ports { 121 switch_ports: ports {
122 #address-cells = <1>; 122 #address-cells = <1>;
@@ -125,7 +125,7 @@
125 ethphy0: port@0 { 125 ethphy0: port@0 {
126 reg = <0>; 126 reg = <0>;
127 label = "cpu"; 127 label = "cpu";
128 phy-mode = "rgmii"; 128 phy-mode = "rgmii-id";
129 ethernet = <&fec>; 129 ethernet = <&fec>;
130 130
131 fixed-link { 131 fixed-link {
diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
index 1d1b4bd0670f..a4217f564a53 100644
--- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
@@ -264,7 +264,7 @@
264 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 264 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
265 vmcc-supply = <&reg_sd3_vmmc>; 265 vmcc-supply = <&reg_sd3_vmmc>;
266 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; 266 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
267 bus-witdh = <4>; 267 bus-width = <4>;
268 no-1-8-v; 268 no-1-8-v;
269 status = "okay"; 269 status = "okay";
270}; 270};
@@ -275,7 +275,7 @@
275 pinctrl-1 = <&pinctrl_usdhc4_100mhz>; 275 pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
276 pinctrl-2 = <&pinctrl_usdhc4_200mhz>; 276 pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
277 vmcc-supply = <&reg_sd4_vmmc>; 277 vmcc-supply = <&reg_sd4_vmmc>;
278 bus-witdh = <8>; 278 bus-width = <8>;
279 no-1-8-v; 279 no-1-8-v;
280 non-removable; 280 non-removable;
281 status = "okay"; 281 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 433bf09a1954..027df06c5dc7 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -91,6 +91,7 @@
91 pinctrl-0 = <&pinctrl_enet>; 91 pinctrl-0 = <&pinctrl_enet>;
92 phy-handle = <&ethphy>; 92 phy-handle = <&ethphy>;
93 phy-mode = "rgmii"; 93 phy-mode = "rgmii";
94 phy-reset-duration = <10>; /* in msecs */
94 phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; 95 phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
95 phy-supply = <&vdd_eth_io_reg>; 96 phy-supply = <&vdd_eth_io_reg>;
96 status = "disabled"; 97 status = "disabled";
diff --git a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
index f6fb6783c193..54cfe72295aa 100644
--- a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
+++ b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright (C) 2016 Freescale Semiconductor, Inc. 3 * Copyright (C) 2016 Freescale Semiconductor, Inc.
4 * Copyright (C) 2017 NXP 4 * Copyright (C) 2017 NXP
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 04066f9cb8a3..f2f6558a00f1 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -213,12 +213,13 @@
213 gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; 213 gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
214 gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; 214 gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
215 /* 215 /*
216 * It's not actually active high, but the frameworks assume 216 * This chipselect is active high. Just setting the flags
217 * the polarity of the passed-in GPIO is "normal" (active 217 * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings,
218 * high) then actively drives the line low to select the 218 * it will be ignored, only the special "spi-cs-high" flag
219 * chip. 219 * really counts.
220 */ 220 */
221 cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; 221 cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
222 spi-cs-high;
222 num-chipselects = <1>; 223 num-chipselects = <1>;
223 224
224 /* 225 /*
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 8661dd9b064a..b37f8e675e40 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
170# CONFIG_IOMMU_SUPPORT is not set 170# CONFIG_IOMMU_SUPPORT is not set
171CONFIG_IIO=y 171CONFIG_IIO=y
172CONFIG_FSL_MX25_ADC=y 172CONFIG_FSL_MX25_ADC=y
173CONFIG_PWM=y
174CONFIG_PWM_IMX1=y
175CONFIG_PWM_IMX27=y
173CONFIG_EXT4_FS=y 176CONFIG_EXT4_FS=y
174# CONFIG_DNOTIFY is not set 177# CONFIG_DNOTIFY is not set
175CONFIG_VFAT_FS=y 178CONFIG_VFAT_FS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 5586a5074a96..50fb01d70b10 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -398,7 +398,7 @@ CONFIG_MAG3110=y
398CONFIG_MPL3115=y 398CONFIG_MPL3115=y
399CONFIG_PWM=y 399CONFIG_PWM=y
400CONFIG_PWM_FSL_FTM=y 400CONFIG_PWM_FSL_FTM=y
401CONFIG_PWM_IMX=y 401CONFIG_PWM_IMX27=y
402CONFIG_NVMEM_IMX_OCOTP=y 402CONFIG_NVMEM_IMX_OCOTP=y
403CONFIG_NVMEM_VF610_OCOTP=y 403CONFIG_NVMEM_VF610_OCOTP=y
404CONFIG_TEE=y 404CONFIG_TEE=y
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 2de96a180166..31de4ab93005 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
381 return ret; 381 return ret;
382} 382}
383 383
384static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
385 const void *data, unsigned long len)
386{
387 int srcu_idx = srcu_read_lock(&kvm->srcu);
388 int ret = kvm_write_guest(kvm, gpa, data, len);
389
390 srcu_read_unlock(&kvm->srcu, srcu_idx);
391
392 return ret;
393}
394
384static inline void *kvm_get_hyp_vector(void) 395static inline void *kvm_get_hyp_vector(void)
385{ 396{
386 switch(read_cpuid_part()) { 397 switch(read_cpuid_part()) {
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index de2089501b8b..9e11dce55e06 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
75 75
76#define S2_PMD_MASK PMD_MASK 76#define S2_PMD_MASK PMD_MASK
77#define S2_PMD_SIZE PMD_SIZE 77#define S2_PMD_SIZE PMD_SIZE
78#define S2_PUD_MASK PUD_MASK
79#define S2_PUD_SIZE PUD_SIZE
78 80
79static inline bool kvm_stage2_has_pmd(struct kvm *kvm) 81static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
80{ 82{
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 23b4464c0995..ce8573157774 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -3,3 +3,4 @@
3generated-y += unistd-common.h 3generated-y += unistd-common.h
4generated-y += unistd-oabi.h 4generated-y += unistd-oabi.h
5generated-y += unistd-eabi.h 5generated-y += unistd-eabi.h
6generic-y += kvm_para.h
diff --git a/arch/arm/include/uapi/asm/kvm_para.h b/arch/arm/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/arm/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index bfeb25aaf9a2..326e870d7123 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -16,30 +16,23 @@
16#include "cpuidle.h" 16#include "cpuidle.h"
17#include "hardware.h" 17#include "hardware.h"
18 18
19static atomic_t master = ATOMIC_INIT(0); 19static int num_idle_cpus = 0;
20static DEFINE_SPINLOCK(master_lock); 20static DEFINE_SPINLOCK(cpuidle_lock);
21 21
22static int imx6q_enter_wait(struct cpuidle_device *dev, 22static int imx6q_enter_wait(struct cpuidle_device *dev,
23 struct cpuidle_driver *drv, int index) 23 struct cpuidle_driver *drv, int index)
24{ 24{
25 if (atomic_inc_return(&master) == num_online_cpus()) { 25 spin_lock(&cpuidle_lock);
26 /* 26 if (++num_idle_cpus == num_online_cpus())
27 * With this lock, we prevent other cpu to exit and enter
28 * this function again and become the master.
29 */
30 if (!spin_trylock(&master_lock))
31 goto idle;
32 imx6_set_lpm(WAIT_UNCLOCKED); 27 imx6_set_lpm(WAIT_UNCLOCKED);
33 cpu_do_idle(); 28 spin_unlock(&cpuidle_lock);
34 imx6_set_lpm(WAIT_CLOCKED);
35 spin_unlock(&master_lock);
36 goto done;
37 }
38 29
39idle:
40 cpu_do_idle(); 30 cpu_do_idle();
41done: 31
42 atomic_dec(&master); 32 spin_lock(&cpuidle_lock);
33 if (num_idle_cpus-- == num_online_cpus())
34 imx6_set_lpm(WAIT_CLOCKED);
35 spin_unlock(&cpuidle_lock);
43 36
44 return index; 37 return index;
45} 38}
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index c7169c2f94c4..08c7892866c2 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
59 return; 59 return;
60 60
61 m4if_base = of_iomap(np, 0); 61 m4if_base = of_iomap(np, 0);
62 of_node_put(np);
62 if (!m4if_base) { 63 if (!m4if_base) {
63 pr_err("Unable to map M4IF registers\n"); 64 pr_err("Unable to map M4IF registers\n");
64 return; 65 return;
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 70498a033cf5..b5ca9c50876d 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -27,6 +27,7 @@ config ARCH_BCM2835
27 bool "Broadcom BCM2835 family" 27 bool "Broadcom BCM2835 family"
28 select TIMER_OF 28 select TIMER_OF
29 select GPIOLIB 29 select GPIOLIB
30 select MFD_CORE
30 select PINCTRL 31 select PINCTRL
31 select PINCTRL_BCM2835 32 select PINCTRL_BCM2835
32 select ARM_AMBA 33 select ARM_AMBA
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index bb2045be8814..97aeb946ed5e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -321,7 +321,6 @@
321 nvidia,default-trim = <0x9>; 321 nvidia,default-trim = <0x9>;
322 nvidia,dqs-trim = <63>; 322 nvidia,dqs-trim = <63>;
323 mmc-hs400-1_8v; 323 mmc-hs400-1_8v;
324 supports-cqe;
325 status = "disabled"; 324 status = "disabled";
326 }; 325 };
327 326
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 61a0afb74e63..1ea684af99c4 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -2,7 +2,7 @@
2/* 2/*
3 * Device Tree Source for the RZ/G2E (R8A774C0) SoC 3 * Device Tree Source for the RZ/G2E (R8A774C0) SoC
4 * 4 *
5 * Copyright (C) 2018 Renesas Electronics Corp. 5 * Copyright (C) 2018-2019 Renesas Electronics Corp.
6 */ 6 */
7 7
8#include <dt-bindings/clock/r8a774c0-cpg-mssr.h> 8#include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
@@ -1150,9 +1150,8 @@
1150 <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, 1150 <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
1151 <&scif_clk>; 1151 <&scif_clk>;
1152 clock-names = "fck", "brg_int", "scif_clk"; 1152 clock-names = "fck", "brg_int", "scif_clk";
1153 dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, 1153 dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
1154 <&dmac2 0x5b>, <&dmac2 0x5a>; 1154 dma-names = "tx", "rx";
1155 dma-names = "tx", "rx", "tx", "rx";
1156 power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; 1155 power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
1157 resets = <&cpg 202>; 1156 resets = <&cpg 202>;
1158 status = "disabled"; 1157 status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index a69faa60ea4d..d2ad665fe2d9 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -2,7 +2,7 @@
2/* 2/*
3 * Device Tree Source for the R-Car E3 (R8A77990) SoC 3 * Device Tree Source for the R-Car E3 (R8A77990) SoC
4 * 4 *
5 * Copyright (C) 2018 Renesas Electronics Corp. 5 * Copyright (C) 2018-2019 Renesas Electronics Corp.
6 */ 6 */
7 7
8#include <dt-bindings/clock/r8a77990-cpg-mssr.h> 8#include <dt-bindings/clock/r8a77990-cpg-mssr.h>
@@ -1067,9 +1067,8 @@
1067 <&cpg CPG_CORE R8A77990_CLK_S3D1C>, 1067 <&cpg CPG_CORE R8A77990_CLK_S3D1C>,
1068 <&scif_clk>; 1068 <&scif_clk>;
1069 clock-names = "fck", "brg_int", "scif_clk"; 1069 clock-names = "fck", "brg_int", "scif_clk";
1070 dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, 1070 dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
1071 <&dmac2 0x5b>, <&dmac2 0x5a>; 1071 dma-names = "tx", "rx";
1072 dma-names = "tx", "rx", "tx", "rx";
1073 power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; 1072 power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
1074 resets = <&cpg 202>; 1073 resets = <&cpg 202>;
1075 status = "disabled"; 1074 status = "disabled";
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index b0742a16c6c9..ebeefcf835e8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
445 return ret; 445 return ret;
446} 446}
447 447
448static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
449 const void *data, unsigned long len)
450{
451 int srcu_idx = srcu_read_lock(&kvm->srcu);
452 int ret = kvm_write_guest(kvm, gpa, data, len);
453
454 srcu_read_unlock(&kvm->srcu, srcu_idx);
455
456 return ret;
457}
458
448#ifdef CONFIG_KVM_INDIRECT_VECTORS 459#ifdef CONFIG_KVM_INDIRECT_VECTORS
449/* 460/*
450 * EL2 vectors can be mapped and rerouted in a number of ways, 461 * EL2 vectors can be mapped and rerouted in a number of ways,
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f8482fe5a190..413d566405d1 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
217 217
218 num_standard_resources = memblock.memory.cnt; 218 num_standard_resources = memblock.memory.cnt;
219 res_size = num_standard_resources * sizeof(*standard_resources); 219 res_size = num_standard_resources * sizeof(*standard_resources);
220 standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); 220 standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
221 if (!standard_resources) 221 if (!standard_resources)
222 panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); 222 panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
223 223
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index f16a5f8ff2b4..e2a0500cd7a2 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
123 int ret = -EINVAL; 123 int ret = -EINVAL;
124 bool loaded; 124 bool loaded;
125 125
126 /* Reset PMU outside of the non-preemptible section */
127 kvm_pmu_vcpu_reset(vcpu);
128
126 preempt_disable(); 129 preempt_disable();
127 loaded = (vcpu->cpu != -1); 130 loaded = (vcpu->cpu != -1);
128 if (loaded) 131 if (loaded)
@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
170 vcpu->arch.reset_state.reset = false; 173 vcpu->arch.reset_state.reset = false;
171 } 174 }
172 175
173 /* Reset PMU */
174 kvm_pmu_vcpu_reset(vcpu);
175
176 /* Default workaround setup is enabled (if supported) */ 176 /* Default workaround setup is enabled (if supported) */
177 if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) 177 if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 63b4a1705182..249c9f6f26dc 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += irq_work.h
19generic-y += kdebug.h 19generic-y += kdebug.h
20generic-y += kmap_types.h 20generic-y += kmap_types.h
21generic-y += kprobes.h 21generic-y += kprobes.h
22generic-y += kvm_para.h
22generic-y += local.h 23generic-y += local.h
23generic-y += mcs_spinlock.h 24generic-y += mcs_spinlock.h
24generic-y += mm-arch-hooks.h 25generic-y += mm-arch-hooks.h
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 3e7c8ecf151e..e3dead402e5f 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -23,6 +23,7 @@ generic-y += irq_work.h
23generic-y += kdebug.h 23generic-y += kdebug.h
24generic-y += kmap_types.h 24generic-y += kmap_types.h
25generic-y += kprobes.h 25generic-y += kprobes.h
26generic-y += kvm_para.h
26generic-y += linkage.h 27generic-y += linkage.h
27generic-y += local.h 28generic-y += local.h
28generic-y += local64.h 29generic-y += local64.h
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index b25fd42aa0f4..d046e8ccdf78 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += irq_work.h
19generic-y += kdebug.h 19generic-y += kdebug.h
20generic-y += kmap_types.h 20generic-y += kmap_types.h
21generic-y += kprobes.h 21generic-y += kprobes.h
22generic-y += kvm_para.h
22generic-y += local.h 23generic-y += local.h
23generic-y += local64.h 24generic-y += local64.h
24generic-y += mcs_spinlock.h 25generic-y += mcs_spinlock.h
diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/hexagon/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/kvm_para.h>
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 43e21fe3499c..11f191689c9e 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -2,6 +2,7 @@ generated-y += syscall_table.h
2generic-y += compat.h 2generic-y += compat.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += irq_work.h 4generic-y += irq_work.h
5generic-y += kvm_para.h
5generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
6generic-y += mm-arch-hooks.h 7generic-y += mm-arch-hooks.h
7generic-y += preempt.h 8generic-y += preempt.h
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index 20018cb883a9..62a9522af51e 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generated-y += unistd_64.h generated-y += unistd_64.h
2generic-y += kvm_para.h
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 95f8f631c4df..2c359d9e80f6 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += irq_work.h
13generic-y += kdebug.h 13generic-y += kdebug.h
14generic-y += kmap_types.h 14generic-y += kmap_types.h
15generic-y += kprobes.h 15generic-y += kprobes.h
16generic-y += kvm_para.h
16generic-y += local.h 17generic-y += local.h
17generic-y += local64.h 18generic-y += local64.h
18generic-y += mcs_spinlock.h 19generic-y += mcs_spinlock.h
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 8a7ad40be463..7417847dc438 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generated-y += unistd_32.h generated-y += unistd_32.h
2generic-y += kvm_para.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 791cc8d54d0a..1a8285c3f693 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -17,6 +17,7 @@ generic-y += irq_work.h
17generic-y += kdebug.h 17generic-y += kdebug.h
18generic-y += kmap_types.h 18generic-y += kmap_types.h
19generic-y += kprobes.h 19generic-y += kprobes.h
20generic-y += kvm_para.h
20generic-y += linkage.h 21generic-y += linkage.h
21generic-y += local.h 22generic-y += local.h
22generic-y += local64.h 23generic-y += local64.h
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 3ce84fbb2678..13f59631c576 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -1,3 +1,2 @@
1generated-y += unistd_32.h 1generated-y += unistd_32.h
2generic-y += kvm_para.h
3generic-y += ucontext.h 2generic-y += ucontext.h
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 8fde4fa2c34f..88a667d12aaa 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -23,6 +23,7 @@ generic-y += irq_work.h
23generic-y += kdebug.h 23generic-y += kdebug.h
24generic-y += kmap_types.h 24generic-y += kmap_types.h
25generic-y += kprobes.h 25generic-y += kprobes.h
26generic-y += kvm_para.h
26generic-y += local.h 27generic-y += local.h
27generic-y += mcs_spinlock.h 28generic-y += mcs_spinlock.h
28generic-y += mm-arch-hooks.h 29generic-y += mm-arch-hooks.h
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 5a73e2956ac4..22aa97136c01 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -20,6 +20,7 @@ generic-y += irq_work.h
20generic-y += kdebug.h 20generic-y += kdebug.h
21generic-y += kmap_types.h 21generic-y += kmap_types.h
22generic-y += kprobes.h 22generic-y += kprobes.h
23generic-y += kvm_para.h
23generic-y += local.h 24generic-y += local.h
24generic-y += mcs_spinlock.h 25generic-y += mcs_spinlock.h
25generic-y += mm-arch-hooks.h 26generic-y += mm-arch-hooks.h
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 6f49e77d82a2..9bcd0c903dbb 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -11,6 +11,7 @@ generic-y += irq_regs.h
11generic-y += irq_work.h 11generic-y += irq_work.h
12generic-y += kdebug.h 12generic-y += kdebug.h
13generic-y += kprobes.h 13generic-y += kprobes.h
14generic-y += kvm_para.h
14generic-y += local.h 15generic-y += local.h
15generic-y += local64.h 16generic-y += local64.h
16generic-y += mcs_spinlock.h 17generic-y += mcs_spinlock.h
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index 22fdbd08cdc8..2bd5b392277c 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -1,3 +1,2 @@
1generated-y += unistd_32.h 1generated-y += unistd_32.h
2generated-y += unistd_64.h 2generated-y += unistd_64.h
3generic-y += kvm_para.h
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 844d8e774492..b7f6f6e0b6e8 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
215 beq .Lzero 215 beq .Lzero
216 216
217.Lcmp_rest_lt8bytes: 217.Lcmp_rest_lt8bytes:
218 /* Here we have only less than 8 bytes to compare with. at least s1 218 /*
219 * Address is aligned with 8 bytes. 219 * Here we have less than 8 bytes to compare. At least s1 is aligned to
220 * The next double words are load and shift right with appropriate 220 * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
221 * bits. 221 * page boundary, otherwise we might read past the end of the buffer and
222 * trigger a page fault. We use 4K as the conservative minimum page
223 * size. If we detect that case we go to the byte-by-byte loop.
224 *
225 * Otherwise the next double word is loaded from s1 and s2, and shifted
226 * right to compare the appropriate bits.
222 */ 227 */
228 clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
229 cmpdi r6,0xff8
230 bgt .Lshort
231
223 subfic r6,r5,8 232 subfic r6,r5,8
224 slwi r6,r6,3 233 slwi r6,r6,3
225 LD rA,0,r3 234 LD rA,0,r3
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index 6ed22127391b..921f12182f3e 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
77 77
78 ret = drc.drc_index_start + (thread_index * drc.sequential_inc); 78 ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
79 } else { 79 } else {
80 const __be32 *indexes; 80 u32 nr_drc_indexes, thread_drc_index;
81
82 indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
83 if (indexes == NULL)
84 goto err_of_node_put;
85 81
86 /* 82 /*
87 * The first element indexes[0] is the number of drc_indexes 83 * The first element of ibm,drc-indexes array is the
88 * returned in the list. Hence thread_index+1 will get the 84 * number of drc_indexes returned in the list. Hence
89 * drc_index corresponding to core number thread_index. 85 * thread_index+1 will get the drc_index corresponding
86 * to core number thread_index.
90 */ 87 */
91 ret = indexes[thread_index + 1]; 88 rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
89 0, &nr_drc_indexes);
90 if (rc)
91 goto err_of_node_put;
92
93 WARN_ON_ONCE(thread_index > nr_drc_indexes);
94 rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
95 thread_index + 1,
96 &thread_drc_index);
97 if (rc)
98 goto err_of_node_put;
99
100 ret = thread_drc_index;
92 } 101 }
93 102
94 rc = 0; 103 rc = 0;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index d97d52772789..452dcfd7e5dd 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
550 "UE", 550 "UE",
551 "SLB", 551 "SLB",
552 "ERAT", 552 "ERAT",
553 "Unknown",
553 "TLB", 554 "TLB",
554 "D-Cache", 555 "D-Cache",
555 "Unknown", 556 "Unknown",
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 57afe604b495..c207f6634b91 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -26,7 +26,7 @@ enum fixed_addresses {
26}; 26};
27 27
28#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) 28#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
29#define FIXADDR_TOP (PAGE_OFFSET) 29#define FIXADDR_TOP (VMALLOC_START)
30#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 30#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
31 31
32#define FIXMAP_PAGE_IO PAGE_KERNEL 32#define FIXMAP_PAGE_IO PAGE_KERNEL
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index a00168b980d2..fb53a8089e76 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -300,7 +300,7 @@ do { \
300 " .balign 4\n" \ 300 " .balign 4\n" \
301 "4:\n" \ 301 "4:\n" \
302 " li %0, %6\n" \ 302 " li %0, %6\n" \
303 " jump 2b, %1\n" \ 303 " jump 3b, %1\n" \
304 " .previous\n" \ 304 " .previous\n" \
305 " .section __ex_table,\"a\"\n" \ 305 " .section __ex_table,\"a\"\n" \
306 " .balign " RISCV_SZPTR "\n" \ 306 " .balign " RISCV_SZPTR "\n" \
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index f13f7f276639..598568168d35 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -4,7 +4,6 @@
4 4
5ifdef CONFIG_FTRACE 5ifdef CONFIG_FTRACE
6CFLAGS_REMOVE_ftrace.o = -pg 6CFLAGS_REMOVE_ftrace.o = -pg
7CFLAGS_REMOVE_setup.o = -pg
8endif 7endif
9 8
10extra-y += head.o 9extra-y += head.o
@@ -29,8 +28,6 @@ obj-y += vdso.o
29obj-y += cacheinfo.o 28obj-y += cacheinfo.o
30obj-y += vdso/ 29obj-y += vdso/
31 30
32CFLAGS_setup.o := -mcmodel=medany
33
34obj-$(CONFIG_FPU) += fpu.o 31obj-$(CONFIG_FPU) += fpu.o
35obj-$(CONFIG_SMP) += smpboot.o 32obj-$(CONFIG_SMP) += smpboot.o
36obj-$(CONFIG_SMP) += smp.o 33obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 7dd308129b40..2872edce894d 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
141{ 141{
142 s32 hi20; 142 s32 hi20;
143 143
144 if (IS_ENABLED(CMODEL_MEDLOW)) { 144 if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
145 pr_err( 145 pr_err(
146 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", 146 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
147 me->name, (long long)v, location); 147 me->name, (long long)v, location);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index ecb654f6a79e..540a331d1376 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -48,14 +48,6 @@ struct screen_info screen_info = {
48}; 48};
49#endif 49#endif
50 50
51unsigned long va_pa_offset;
52EXPORT_SYMBOL(va_pa_offset);
53unsigned long pfn_base;
54EXPORT_SYMBOL(pfn_base);
55
56unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
57EXPORT_SYMBOL(empty_zero_page);
58
59/* The lucky hart to first increment this variable will boot the other cores */ 51/* The lucky hart to first increment this variable will boot the other cores */
60atomic_t hart_lottery; 52atomic_t hart_lottery;
61unsigned long boot_cpu_hartid; 53unsigned long boot_cpu_hartid;
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index eb22ab49b3e0..b68aac701803 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -1,3 +1,9 @@
1
2CFLAGS_init.o := -mcmodel=medany
3ifdef CONFIG_FTRACE
4CFLAGS_REMOVE_init.o = -pg
5endif
6
1obj-y += init.o 7obj-y += init.o
2obj-y += fault.o 8obj-y += fault.o
3obj-y += extable.o 9obj-y += extable.o
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index b379a75ac6a6..5fd8c922e1c2 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -25,6 +25,10 @@
25#include <asm/pgtable.h> 25#include <asm/pgtable.h>
26#include <asm/io.h> 26#include <asm/io.h>
27 27
28unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
29 __page_aligned_bss;
30EXPORT_SYMBOL(empty_zero_page);
31
28static void __init zone_sizes_init(void) 32static void __init zone_sizes_init(void)
29{ 33{
30 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 34 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@@ -143,6 +147,11 @@ void __init setup_bootmem(void)
143 } 147 }
144} 148}
145 149
150unsigned long va_pa_offset;
151EXPORT_SYMBOL(va_pa_offset);
152unsigned long pfn_base;
153EXPORT_SYMBOL(pfn_base);
154
146pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 155pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
147pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 156pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
148 157
@@ -172,6 +181,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
172 } 181 }
173} 182}
174 183
184/*
185 * setup_vm() is called from head.S with MMU-off.
186 *
187 * Following requirements should be honoured for setup_vm() to work
188 * correctly:
189 * 1) It should use PC-relative addressing for accessing kernel symbols.
190 * To achieve this we always use GCC cmodel=medany.
191 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
192 * so disable compiler instrumentation when FTRACE is enabled.
193 *
194 * Currently, the above requirements are honoured by using custom CFLAGS
195 * for init.o in mm/Makefile.
196 */
197
198#ifndef __riscv_cmodel_medany
199#error "setup_vm() is called from head.S before relocate so it should "
200 "not use absolute addressing."
201#endif
202
175asmlinkage void __init setup_vm(void) 203asmlinkage void __init setup_vm(void)
176{ 204{
177 extern char _start; 205 extern char _start;
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 1a6a7092d942..e94a0a28b5eb 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
360 return reg1; 360 return reg1;
361} 361}
362 362
363/*
364 * Interface to tell the AP bus code that a configuration
365 * change has happened. The bus code should at least do
366 * an ap bus resource rescan.
367 */
368#if IS_ENABLED(CONFIG_ZCRYPT)
369void ap_bus_cfg_chg(void);
370#else
371static inline void ap_bus_cfg_chg(void){};
372#endif
373
363#endif /* _ASM_S390_AP_H_ */ 374#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 7d22a474a040..f74639a05f0f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -252,11 +252,14 @@ do { \
252 252
253/* 253/*
254 * Cache aliasing on the latest machines calls for a mapping granularity 254 * Cache aliasing on the latest machines calls for a mapping granularity
255 * of 512KB. For 64-bit processes use a 512KB alignment and a randomization 255 * of 512KB for the anonymous mapping base. For 64-bit processes use a
256 * of up to 1GB. For 31-bit processes the virtual address space is limited, 256 * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
257 * use no alignment and limit the randomization to 8MB. 257 * the virtual address space is limited, use no alignment and limit the
258 * randomization to 8MB.
259 * For the additional randomization of the program break use 32MB for
260 * 64-bit and 8MB for 31-bit.
258 */ 261 */
259#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) 262#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
260#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) 263#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
261#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) 264#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
262#define STACK_RND_MASK MMAP_RND_MASK 265#define STACK_RND_MASK MMAP_RND_MASK
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index cc0947e08b6f..5b9f10b1e55d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -91,52 +91,53 @@ struct lowcore {
91 __u64 hardirq_timer; /* 0x02e8 */ 91 __u64 hardirq_timer; /* 0x02e8 */
92 __u64 softirq_timer; /* 0x02f0 */ 92 __u64 softirq_timer; /* 0x02f0 */
93 __u64 steal_timer; /* 0x02f8 */ 93 __u64 steal_timer; /* 0x02f8 */
94 __u64 last_update_timer; /* 0x0300 */ 94 __u64 avg_steal_timer; /* 0x0300 */
95 __u64 last_update_clock; /* 0x0308 */ 95 __u64 last_update_timer; /* 0x0308 */
96 __u64 int_clock; /* 0x0310 */ 96 __u64 last_update_clock; /* 0x0310 */
97 __u64 mcck_clock; /* 0x0318 */ 97 __u64 int_clock; /* 0x0318*/
98 __u64 clock_comparator; /* 0x0320 */ 98 __u64 mcck_clock; /* 0x0320 */
99 __u64 boot_clock[2]; /* 0x0328 */ 99 __u64 clock_comparator; /* 0x0328 */
100 __u64 boot_clock[2]; /* 0x0330 */
100 101
101 /* Current process. */ 102 /* Current process. */
102 __u64 current_task; /* 0x0338 */ 103 __u64 current_task; /* 0x0340 */
103 __u64 kernel_stack; /* 0x0340 */ 104 __u64 kernel_stack; /* 0x0348 */
104 105
105 /* Interrupt, DAT-off and restartstack. */ 106 /* Interrupt, DAT-off and restartstack. */
106 __u64 async_stack; /* 0x0348 */ 107 __u64 async_stack; /* 0x0350 */
107 __u64 nodat_stack; /* 0x0350 */ 108 __u64 nodat_stack; /* 0x0358 */
108 __u64 restart_stack; /* 0x0358 */ 109 __u64 restart_stack; /* 0x0360 */
109 110
110 /* Restart function and parameter. */ 111 /* Restart function and parameter. */
111 __u64 restart_fn; /* 0x0360 */ 112 __u64 restart_fn; /* 0x0368 */
112 __u64 restart_data; /* 0x0368 */ 113 __u64 restart_data; /* 0x0370 */
113 __u64 restart_source; /* 0x0370 */ 114 __u64 restart_source; /* 0x0378 */
114 115
115 /* Address space pointer. */ 116 /* Address space pointer. */
116 __u64 kernel_asce; /* 0x0378 */ 117 __u64 kernel_asce; /* 0x0380 */
117 __u64 user_asce; /* 0x0380 */ 118 __u64 user_asce; /* 0x0388 */
118 __u64 vdso_asce; /* 0x0388 */ 119 __u64 vdso_asce; /* 0x0390 */
119 120
120 /* 121 /*
121 * The lpp and current_pid fields form a 122 * The lpp and current_pid fields form a
122 * 64-bit value that is set as program 123 * 64-bit value that is set as program
123 * parameter with the LPP instruction. 124 * parameter with the LPP instruction.
124 */ 125 */
125 __u32 lpp; /* 0x0390 */ 126 __u32 lpp; /* 0x0398 */
126 __u32 current_pid; /* 0x0394 */ 127 __u32 current_pid; /* 0x039c */
127 128
128 /* SMP info area */ 129 /* SMP info area */
129 __u32 cpu_nr; /* 0x0398 */ 130 __u32 cpu_nr; /* 0x03a0 */
130 __u32 softirq_pending; /* 0x039c */ 131 __u32 softirq_pending; /* 0x03a4 */
131 __u32 preempt_count; /* 0x03a0 */ 132 __u32 preempt_count; /* 0x03a8 */
132 __u32 spinlock_lockval; /* 0x03a4 */ 133 __u32 spinlock_lockval; /* 0x03ac */
133 __u32 spinlock_index; /* 0x03a8 */ 134 __u32 spinlock_index; /* 0x03b0 */
134 __u32 fpu_flags; /* 0x03ac */ 135 __u32 fpu_flags; /* 0x03b4 */
135 __u64 percpu_offset; /* 0x03b0 */ 136 __u64 percpu_offset; /* 0x03b8 */
136 __u64 vdso_per_cpu_data; /* 0x03b8 */ 137 __u64 vdso_per_cpu_data; /* 0x03c0 */
137 __u64 machine_flags; /* 0x03c0 */ 138 __u64 machine_flags; /* 0x03c8 */
138 __u64 gmap; /* 0x03c8 */ 139 __u64 gmap; /* 0x03d0 */
139 __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ 140 __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
140 141
141 /* br %r1 trampoline */ 142 /* br %r1 trampoline */
142 __u16 br_r1_trampoline; /* 0x0400 */ 143 __u16 br_r1_trampoline; /* 0x0400 */
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index c6fad208c2fa..b6854812d2ed 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
196 */ 196 */
197static int __hw_perf_event_init(struct perf_event *event) 197static int __hw_perf_event_init(struct perf_event *event)
198{ 198{
199 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
200 struct perf_event_attr *attr = &event->attr; 199 struct perf_event_attr *attr = &event->attr;
200 struct cpu_cf_events *cpuhw;
201 enum cpumf_ctr_set i; 201 enum cpumf_ctr_set i;
202 int err = 0; 202 int err = 0;
203 203
204 debug_sprintf_event(cf_diag_dbg, 5, 204 debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
205 "%s event %p cpu %d authorized %#x\n", __func__, 205 event, event->cpu);
206 event, event->cpu, cpuhw->info.auth_ctl);
207 206
208 event->hw.config = attr->config; 207 event->hw.config = attr->config;
209 event->hw.config_base = 0; 208 event->hw.config_base = 0;
210 local64_set(&event->count, 0);
211 209
212 /* Add all authorized counter sets to config_base */ 210 /* Add all authorized counter sets to config_base. The
211 * the hardware init function is either called per-cpu or just once
212 * for all CPUS (event->cpu == -1). This depends on the whether
213 * counting is started for all CPUs or on a per workload base where
214 * the perf event moves from one CPU to another CPU.
215 * Checking the authorization on any CPU is fine as the hardware
216 * applies the same authorization settings to all CPUs.
217 */
218 cpuhw = &get_cpu_var(cpu_cf_events);
213 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) 219 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
214 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) 220 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
215 event->hw.config_base |= cpumf_ctr_ctl[i]; 221 event->hw.config_base |= cpumf_ctr_ctl[i];
222 put_cpu_var(cpu_cf_events);
216 223
217 /* No authorized counter sets, nothing to count/sample */ 224 /* No authorized counter sets, nothing to count/sample */
218 if (!event->hw.config_base) { 225 if (!event->hw.config_base) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 3fe1c77c361b..bd197baf1dc3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
266 lc->percpu_offset = __per_cpu_offset[cpu]; 266 lc->percpu_offset = __per_cpu_offset[cpu];
267 lc->kernel_asce = S390_lowcore.kernel_asce; 267 lc->kernel_asce = S390_lowcore.kernel_asce;
268 lc->machine_flags = S390_lowcore.machine_flags; 268 lc->machine_flags = S390_lowcore.machine_flags;
269 lc->user_timer = lc->system_timer = lc->steal_timer = 0; 269 lc->user_timer = lc->system_timer =
270 lc->steal_timer = lc->avg_steal_timer = 0;
270 __ctl_store(lc->cregs_save_area, 0, 15); 271 __ctl_store(lc->cregs_save_area, 0, 15);
271 save_access_regs((unsigned int *) lc->access_regs_save_area); 272 save_access_regs((unsigned int *) lc->access_regs_save_area);
272 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 273 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 98f850e00008..a69a0911ed0e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
124 */ 124 */
125static int do_account_vtime(struct task_struct *tsk) 125static int do_account_vtime(struct task_struct *tsk)
126{ 126{
127 u64 timer, clock, user, guest, system, hardirq, softirq, steal; 127 u64 timer, clock, user, guest, system, hardirq, softirq;
128 128
129 timer = S390_lowcore.last_update_timer; 129 timer = S390_lowcore.last_update_timer;
130 clock = S390_lowcore.last_update_clock; 130 clock = S390_lowcore.last_update_clock;
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
182 if (softirq) 182 if (softirq)
183 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); 183 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
184 184
185 steal = S390_lowcore.steal_timer;
186 if ((s64) steal > 0) {
187 S390_lowcore.steal_timer = 0;
188 account_steal_time(cputime_to_nsecs(steal));
189 }
190
191 return virt_timer_forward(user + guest + system + hardirq + softirq); 185 return virt_timer_forward(user + guest + system + hardirq + softirq);
192} 186}
193 187
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
213 */ 207 */
214void vtime_flush(struct task_struct *tsk) 208void vtime_flush(struct task_struct *tsk)
215{ 209{
210 u64 steal, avg_steal;
211
216 if (do_account_vtime(tsk)) 212 if (do_account_vtime(tsk))
217 virt_timer_expire(); 213 virt_timer_expire();
214
215 steal = S390_lowcore.steal_timer;
216 avg_steal = S390_lowcore.avg_steal_timer / 2;
217 if ((s64) steal > 0) {
218 S390_lowcore.steal_timer = 0;
219 account_steal_time(steal);
220 avg_steal += steal;
221 }
222 S390_lowcore.avg_steal_timer = avg_steal;
218} 223}
219 224
220/* 225/*
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index a6ef3fee5f85..7bf2cb680d32 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += emergency-restart.h
9generic-y += exec.h 9generic-y += exec.h
10generic-y += irq_regs.h 10generic-y += irq_regs.h
11generic-y += irq_work.h 11generic-y += irq_work.h
12generic-y += kvm_para.h
12generic-y += local.h 13generic-y += local.h
13generic-y += local64.h 14generic-y += local64.h
14generic-y += mcs_spinlock.h 15generic-y += mcs_spinlock.h
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index ecfbd40924dd..b8812c74c1de 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3generated-y += unistd_32.h 3generated-y += unistd_32.h
4generic-y += kvm_para.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index b82f64e28f55..a22cfd5c0ee8 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += exec.h
9generic-y += export.h 9generic-y += export.h
10generic-y += irq_regs.h 10generic-y += irq_regs.h
11generic-y += irq_work.h 11generic-y += irq_work.h
12generic-y += kvm_para.h
12generic-y += linkage.h 13generic-y += linkage.h
13generic-y += local.h 14generic-y += local.h
14generic-y += local64.h 15generic-y += local64.h
diff --git a/arch/sparc/include/uapi/asm/kvm_para.h b/arch/sparc/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/sparc/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/kvm_para.h>
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1d1544b6ca74..d77d953c04c1 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += irq_work.h
18generic-y += kdebug.h 18generic-y += kdebug.h
19generic-y += kmap_types.h 19generic-y += kmap_types.h
20generic-y += kprobes.h 20generic-y += kprobes.h
21generic-y += kvm_para.h
21generic-y += local.h 22generic-y += local.h
22generic-y += mcs_spinlock.h 23generic-y += mcs_spinlock.h
23generic-y += mm-arch-hooks.h 24generic-y += mm-arch-hooks.h
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c1f9b3cf437c..5ad92419be19 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2217,14 +2217,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
2217 If unsure, leave at the default value. 2217 If unsure, leave at the default value.
2218 2218
2219config HOTPLUG_CPU 2219config HOTPLUG_CPU
2220 bool "Support for hot-pluggable CPUs" 2220 def_bool y
2221 depends on SMP 2221 depends on SMP
2222 ---help---
2223 Say Y here to allow turning CPUs off and on. CPUs can be
2224 controlled through /sys/devices/system/cpu.
2225 ( Note: power management support will enable this option
2226 automatically on SMP systems. )
2227 Say N if you want to disable CPU hotplug.
2228 2222
2229config BOOTPARAM_HOTPLUG_CPU0 2223config BOOTPARAM_HOTPLUG_CPU0
2230 bool "Set default setting of cpu0_hotpluggable" 2224 bool "Set default setting of cpu0_hotpluggable"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d8b9d8ca4f8..a587805c6687 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -219,8 +219,12 @@ ifdef CONFIG_RETPOLINE
219 # Additionally, avoid generating expensive indirect jumps which 219 # Additionally, avoid generating expensive indirect jumps which
220 # are subject to retpolines for small number of switch cases. 220 # are subject to retpolines for small number of switch cases.
221 # clang turns off jump table generation by default when under 221 # clang turns off jump table generation by default when under
222 # retpoline builds, however, gcc does not for x86. 222 # retpoline builds, however, gcc does not for x86. This has
223 KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20) 223 # only been fixed starting from gcc stable version 8.4.0 and
224 # onwards, but not for older ones. See gcc bug #86952.
225 ifndef CONFIG_CC_IS_CLANG
226 KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
227 endif
224endif 228endif
225 229
226archscripts: scripts_basic 230archscripts: scripts_basic
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index fd13655e0f9b..d2f184165934 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -120,8 +120,6 @@ static inline void console_init(void)
120 120
121void set_sev_encryption_mask(void); 121void set_sev_encryption_mask(void);
122 122
123#endif
124
125/* acpi.c */ 123/* acpi.c */
126#ifdef CONFIG_ACPI 124#ifdef CONFIG_ACPI
127acpi_physical_address get_rsdp_addr(void); 125acpi_physical_address get_rsdp_addr(void);
@@ -135,3 +133,5 @@ int count_immovable_mem_regions(void);
135#else 133#else
136static inline int count_immovable_mem_regions(void) { return 0; } 134static inline int count_immovable_mem_regions(void) { return 0; }
137#endif 135#endif
136
137#endif /* BOOT_COMPRESSED_MISC_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index ce95b8cbd229..0e56ff7e4848 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -112,8 +112,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
112 test_cpu_cap(c, bit)) 112 test_cpu_cap(c, bit))
113 113
114#define this_cpu_has(bit) \ 114#define this_cpu_has(bit) \
115 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ 115 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
116 x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) 116 x86_this_cpu_test_bit(bit, \
117 (unsigned long __percpu *)&cpu_info.x86_capability))
117 118
118/* 119/*
119 * This macro is for detection of features which need kernel 120 * This macro is for detection of features which need kernel
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a5db4475e72d..159b5988292f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -253,14 +253,14 @@ struct kvm_mmu_memory_cache {
253 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used 253 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
254 * by indirect shadow page can not be more than 15 bits. 254 * by indirect shadow page can not be more than 15 bits.
255 * 255 *
256 * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, 256 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
257 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. 257 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
258 */ 258 */
259union kvm_mmu_page_role { 259union kvm_mmu_page_role {
260 u32 word; 260 u32 word;
261 struct { 261 struct {
262 unsigned level:4; 262 unsigned level:4;
263 unsigned cr4_pae:1; 263 unsigned gpte_is_8_bytes:1;
264 unsigned quadrant:2; 264 unsigned quadrant:2;
265 unsigned direct:1; 265 unsigned direct:1;
266 unsigned access:3; 266 unsigned access:3;
@@ -350,6 +350,7 @@ struct kvm_mmu_page {
350}; 350};
351 351
352struct kvm_pio_request { 352struct kvm_pio_request {
353 unsigned long linear_rip;
353 unsigned long count; 354 unsigned long count;
354 int in; 355 int in;
355 int port; 356 int port;
@@ -568,6 +569,7 @@ struct kvm_vcpu_arch {
568 bool tpr_access_reporting; 569 bool tpr_access_reporting;
569 u64 ia32_xss; 570 u64 ia32_xss;
570 u64 microcode_version; 571 u64 microcode_version;
572 u64 arch_capabilities;
571 573
572 /* 574 /*
573 * Paging state of the vcpu 575 * Paging state of the vcpu
@@ -1192,6 +1194,8 @@ struct kvm_x86_ops {
1192 int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, 1194 int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
1193 uint16_t *vmcs_version); 1195 uint16_t *vmcs_version);
1194 uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); 1196 uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1197
1198 bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1195}; 1199};
1196 1200
1197struct kvm_arch_async_pf { 1201struct kvm_arch_async_pf {
@@ -1252,7 +1256,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1252 gfn_t gfn_offset, unsigned long mask); 1256 gfn_t gfn_offset, unsigned long mask);
1253void kvm_mmu_zap_all(struct kvm *kvm); 1257void kvm_mmu_zap_all(struct kvm *kvm);
1254void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); 1258void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1255unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 1259unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
1256void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 1260void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1257 1261
1258int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); 1262int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index 63b3393bd98e..c53682303c9c 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -77,7 +77,11 @@ static inline size_t real_mode_size_needed(void)
77 return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE); 77 return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
78} 78}
79 79
80void set_real_mode_mem(phys_addr_t mem, size_t size); 80static inline void set_real_mode_mem(phys_addr_t mem)
81{
82 real_mode_header = (struct real_mode_header *) __va(mem);
83}
84
81void reserve_real_mode(void); 85void reserve_real_mode(void);
82 86
83#endif /* __ASSEMBLY__ */ 87#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index f33f11f69078..1573a0a6b525 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -501,11 +501,8 @@ out_unlock:
501void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) 501void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
502{ 502{
503 unsigned long delay = msecs_to_jiffies(delay_ms); 503 unsigned long delay = msecs_to_jiffies(delay_ms);
504 struct rdt_resource *r;
505 int cpu; 504 int cpu;
506 505
507 r = &rdt_resources_all[RDT_RESOURCE_L3];
508
509 cpu = cpumask_any(&dom->cpu_mask); 506 cpu = cpumask_any(&dom->cpu_mask);
510 dom->cqm_work_cpu = cpu; 507 dom->cqm_work_cpu = cpu;
511 508
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 27c43525a05f..421899f6ad7b 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
526 new_config.enable = 0; 526 new_config.enable = 0;
527 stimer->config.as_uint64 = new_config.as_uint64; 527 stimer->config.as_uint64 = new_config.as_uint64;
528 528
529 stimer_mark_pending(stimer, false); 529 if (stimer->config.enable)
530 stimer_mark_pending(stimer, false);
531
530 return 0; 532 return 0;
531} 533}
532 534
@@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
542 stimer->config.enable = 0; 544 stimer->config.enable = 0;
543 else if (stimer->config.auto_enable) 545 else if (stimer->config.auto_enable)
544 stimer->config.enable = 1; 546 stimer->config.enable = 1;
545 stimer_mark_pending(stimer, false); 547
548 if (stimer->config.enable)
549 stimer_mark_pending(stimer, false);
550
546 return 0; 551 return 0;
547} 552}
548 553
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7837ab001d80..eee455a8a612 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -182,7 +182,7 @@ struct kvm_shadow_walk_iterator {
182 182
183static const union kvm_mmu_page_role mmu_base_role_mask = { 183static const union kvm_mmu_page_role mmu_base_role_mask = {
184 .cr0_wp = 1, 184 .cr0_wp = 1,
185 .cr4_pae = 1, 185 .gpte_is_8_bytes = 1,
186 .nxe = 1, 186 .nxe = 1,
187 .smep_andnot_wp = 1, 187 .smep_andnot_wp = 1,
188 .smap_andnot_wp = 1, 188 .smap_andnot_wp = 1,
@@ -2205,6 +2205,7 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2205static void kvm_mmu_commit_zap_page(struct kvm *kvm, 2205static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2206 struct list_head *invalid_list); 2206 struct list_head *invalid_list);
2207 2207
2208
2208#define for_each_valid_sp(_kvm, _sp, _gfn) \ 2209#define for_each_valid_sp(_kvm, _sp, _gfn) \
2209 hlist_for_each_entry(_sp, \ 2210 hlist_for_each_entry(_sp, \
2210 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ 2211 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
@@ -2215,12 +2216,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2215 for_each_valid_sp(_kvm, _sp, _gfn) \ 2216 for_each_valid_sp(_kvm, _sp, _gfn) \
2216 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else 2217 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
2217 2218
2219static inline bool is_ept_sp(struct kvm_mmu_page *sp)
2220{
2221 return sp->role.cr0_wp && sp->role.smap_andnot_wp;
2222}
2223
2218/* @sp->gfn should be write-protected at the call site */ 2224/* @sp->gfn should be write-protected at the call site */
2219static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 2225static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2220 struct list_head *invalid_list) 2226 struct list_head *invalid_list)
2221{ 2227{
2222 if (sp->role.cr4_pae != !!is_pae(vcpu) 2228 if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
2223 || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { 2229 vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
2224 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); 2230 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2225 return false; 2231 return false;
2226 } 2232 }
@@ -2423,7 +2429,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2423 role.level = level; 2429 role.level = level;
2424 role.direct = direct; 2430 role.direct = direct;
2425 if (role.direct) 2431 if (role.direct)
2426 role.cr4_pae = 0; 2432 role.gpte_is_8_bytes = true;
2427 role.access = access; 2433 role.access = access;
2428 if (!vcpu->arch.mmu->direct_map 2434 if (!vcpu->arch.mmu->direct_map
2429 && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { 2435 && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
@@ -4794,7 +4800,6 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4794 4800
4795 role.base.access = ACC_ALL; 4801 role.base.access = ACC_ALL;
4796 role.base.nxe = !!is_nx(vcpu); 4802 role.base.nxe = !!is_nx(vcpu);
4797 role.base.cr4_pae = !!is_pae(vcpu);
4798 role.base.cr0_wp = is_write_protection(vcpu); 4803 role.base.cr0_wp = is_write_protection(vcpu);
4799 role.base.smm = is_smm(vcpu); 4804 role.base.smm = is_smm(vcpu);
4800 role.base.guest_mode = is_guest_mode(vcpu); 4805 role.base.guest_mode = is_guest_mode(vcpu);
@@ -4815,6 +4820,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4815 role.base.ad_disabled = (shadow_accessed_mask == 0); 4820 role.base.ad_disabled = (shadow_accessed_mask == 0);
4816 role.base.level = kvm_x86_ops->get_tdp_level(vcpu); 4821 role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
4817 role.base.direct = true; 4822 role.base.direct = true;
4823 role.base.gpte_is_8_bytes = true;
4818 4824
4819 return role; 4825 return role;
4820} 4826}
@@ -4879,6 +4885,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4879 role.base.smap_andnot_wp = role.ext.cr4_smap && 4885 role.base.smap_andnot_wp = role.ext.cr4_smap &&
4880 !is_write_protection(vcpu); 4886 !is_write_protection(vcpu);
4881 role.base.direct = !is_paging(vcpu); 4887 role.base.direct = !is_paging(vcpu);
4888 role.base.gpte_is_8_bytes = !!is_pae(vcpu);
4882 4889
4883 if (!is_long_mode(vcpu)) 4890 if (!is_long_mode(vcpu))
4884 role.base.level = PT32E_ROOT_LEVEL; 4891 role.base.level = PT32E_ROOT_LEVEL;
@@ -4918,18 +4925,26 @@ static union kvm_mmu_role
4918kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, 4925kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4919 bool execonly) 4926 bool execonly)
4920{ 4927{
4921 union kvm_mmu_role role; 4928 union kvm_mmu_role role = {0};
4922 4929
4923 /* Base role is inherited from root_mmu */ 4930 /* SMM flag is inherited from root_mmu */
4924 role.base.word = vcpu->arch.root_mmu.mmu_role.base.word; 4931 role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4925 role.ext = kvm_calc_mmu_role_ext(vcpu);
4926 4932
4927 role.base.level = PT64_ROOT_4LEVEL; 4933 role.base.level = PT64_ROOT_4LEVEL;
4934 role.base.gpte_is_8_bytes = true;
4928 role.base.direct = false; 4935 role.base.direct = false;
4929 role.base.ad_disabled = !accessed_dirty; 4936 role.base.ad_disabled = !accessed_dirty;
4930 role.base.guest_mode = true; 4937 role.base.guest_mode = true;
4931 role.base.access = ACC_ALL; 4938 role.base.access = ACC_ALL;
4932 4939
4940 /*
4941 * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
4942 * SMAP variation to denote shadow EPT entries.
4943 */
4944 role.base.cr0_wp = true;
4945 role.base.smap_andnot_wp = true;
4946
4947 role.ext = kvm_calc_mmu_role_ext(vcpu);
4933 role.ext.execonly = execonly; 4948 role.ext.execonly = execonly;
4934 4949
4935 return role; 4950 return role;
@@ -5179,7 +5194,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5179 gpa, bytes, sp->role.word); 5194 gpa, bytes, sp->role.word);
5180 5195
5181 offset = offset_in_page(gpa); 5196 offset = offset_in_page(gpa);
5182 pte_size = sp->role.cr4_pae ? 8 : 4; 5197 pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5183 5198
5184 /* 5199 /*
5185 * Sometimes, the OS only writes the last one bytes to update status 5200 * Sometimes, the OS only writes the last one bytes to update status
@@ -5203,7 +5218,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5203 page_offset = offset_in_page(gpa); 5218 page_offset = offset_in_page(gpa);
5204 level = sp->role.level; 5219 level = sp->role.level;
5205 *nspte = 1; 5220 *nspte = 1;
5206 if (!sp->role.cr4_pae) { 5221 if (!sp->role.gpte_is_8_bytes) {
5207 page_offset <<= 1; /* 32->64 */ 5222 page_offset <<= 1; /* 32->64 */
5208 /* 5223 /*
5209 * A 32-bit pde maps 4MB while the shadow pdes map 5224 * A 32-bit pde maps 4MB while the shadow pdes map
@@ -5393,10 +5408,12 @@ emulate:
5393 * This can happen if a guest gets a page-fault on data access but the HW 5408 * This can happen if a guest gets a page-fault on data access but the HW
5394 * table walker is not able to read the instruction page (e.g instruction 5409 * table walker is not able to read the instruction page (e.g instruction
5395 * page is not present in memory). In those cases we simply restart the 5410 * page is not present in memory). In those cases we simply restart the
5396 * guest. 5411 * guest, with the exception of AMD Erratum 1096 which is unrecoverable.
5397 */ 5412 */
5398 if (unlikely(insn && !insn_len)) 5413 if (unlikely(insn && !insn_len)) {
5399 return 1; 5414 if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
5415 return 1;
5416 }
5400 5417
5401 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); 5418 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
5402 5419
@@ -5509,7 +5526,9 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5509 5526
5510 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { 5527 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5511 if (flush && lock_flush_tlb) { 5528 if (flush && lock_flush_tlb) {
5512 kvm_flush_remote_tlbs(kvm); 5529 kvm_flush_remote_tlbs_with_address(kvm,
5530 start_gfn,
5531 iterator.gfn - start_gfn + 1);
5513 flush = false; 5532 flush = false;
5514 } 5533 }
5515 cond_resched_lock(&kvm->mmu_lock); 5534 cond_resched_lock(&kvm->mmu_lock);
@@ -5517,7 +5536,8 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5517 } 5536 }
5518 5537
5519 if (flush && lock_flush_tlb) { 5538 if (flush && lock_flush_tlb) {
5520 kvm_flush_remote_tlbs(kvm); 5539 kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
5540 end_gfn - start_gfn + 1);
5521 flush = false; 5541 flush = false;
5522 } 5542 }
5523 5543
@@ -6011,7 +6031,7 @@ out:
6011/* 6031/*
6012 * Calculate mmu pages needed for kvm. 6032 * Calculate mmu pages needed for kvm.
6013 */ 6033 */
6014unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) 6034unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6015{ 6035{
6016 unsigned int nr_mmu_pages; 6036 unsigned int nr_mmu_pages;
6017 unsigned int nr_pages = 0; 6037 unsigned int nr_pages = 0;
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 9f6c855a0043..dd30dccd2ad5 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -29,10 +29,10 @@
29 \ 29 \
30 role.word = __entry->role; \ 30 role.word = __entry->role; \
31 \ 31 \
32 trace_seq_printf(p, "sp gfn %llx l%u%s q%u%s %s%s" \ 32 trace_seq_printf(p, "sp gfn %llx l%u %u-byte q%u%s %s%s" \
33 " %snxe %sad root %u %s%c", \ 33 " %snxe %sad root %u %s%c", \
34 __entry->gfn, role.level, \ 34 __entry->gfn, role.level, \
35 role.cr4_pae ? " pae" : "", \ 35 role.gpte_is_8_bytes ? 8 : 4, \
36 role.quadrant, \ 36 role.quadrant, \
37 role.direct ? " direct" : "", \ 37 role.direct ? " direct" : "", \
38 access_str[role.access], \ 38 access_str[role.access], \
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b5b128a0a051..426039285fd1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -7098,6 +7098,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
7098 return -ENODEV; 7098 return -ENODEV;
7099} 7099}
7100 7100
7101static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
7102{
7103 bool is_user, smap;
7104
7105 is_user = svm_get_cpl(vcpu) == 3;
7106 smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
7107
7108 /*
7109 * Detect and workaround Errata 1096 Fam_17h_00_0Fh
7110 *
7111 * In non SEV guest, hypervisor will be able to read the guest
7112 * memory to decode the instruction pointer when insn_len is zero
7113 * so we return true to indicate that decoding is possible.
7114 *
7115 * But in the SEV guest, the guest memory is encrypted with the
7116 * guest specific key and hypervisor will not be able to decode the
7117 * instruction pointer so we will not able to workaround it. Lets
7118 * print the error and request to kill the guest.
7119 */
7120 if (is_user && smap) {
7121 if (!sev_guest(vcpu->kvm))
7122 return true;
7123
7124 pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
7125 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7126 }
7127
7128 return false;
7129}
7130
7101static struct kvm_x86_ops svm_x86_ops __ro_after_init = { 7131static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7102 .cpu_has_kvm_support = has_svm, 7132 .cpu_has_kvm_support = has_svm,
7103 .disabled_by_bios = is_disabled, 7133 .disabled_by_bios = is_disabled,
@@ -7231,6 +7261,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7231 7261
7232 .nested_enable_evmcs = nested_enable_evmcs, 7262 .nested_enable_evmcs = nested_enable_evmcs,
7233 .nested_get_evmcs_version = nested_get_evmcs_version, 7263 .nested_get_evmcs_version = nested_get_evmcs_version,
7264
7265 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7234}; 7266};
7235 7267
7236static int __init svm_init(void) 7268static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index f24a2c225070..153e539c29c9 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2585,6 +2585,11 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
2585 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || 2585 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
2586 !nested_cr3_valid(vcpu, vmcs12->host_cr3)) 2586 !nested_cr3_valid(vcpu, vmcs12->host_cr3))
2587 return -EINVAL; 2587 return -EINVAL;
2588
2589 if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
2590 is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
2591 return -EINVAL;
2592
2588 /* 2593 /*
2589 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2594 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2590 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2595 * IA32_EFER MSR must be 0 in the field for that register. In addition,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c73375e01ab8..ab432a930ae8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1683,12 +1683,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1683 1683
1684 msr_info->data = to_vmx(vcpu)->spec_ctrl; 1684 msr_info->data = to_vmx(vcpu)->spec_ctrl;
1685 break; 1685 break;
1686 case MSR_IA32_ARCH_CAPABILITIES:
1687 if (!msr_info->host_initiated &&
1688 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
1689 return 1;
1690 msr_info->data = to_vmx(vcpu)->arch_capabilities;
1691 break;
1692 case MSR_IA32_SYSENTER_CS: 1686 case MSR_IA32_SYSENTER_CS:
1693 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); 1687 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
1694 break; 1688 break;
@@ -1895,11 +1889,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1895 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, 1889 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
1896 MSR_TYPE_W); 1890 MSR_TYPE_W);
1897 break; 1891 break;
1898 case MSR_IA32_ARCH_CAPABILITIES:
1899 if (!msr_info->host_initiated)
1900 return 1;
1901 vmx->arch_capabilities = data;
1902 break;
1903 case MSR_IA32_CR_PAT: 1892 case MSR_IA32_CR_PAT:
1904 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 1893 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1905 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) 1894 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -4088,8 +4077,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
4088 ++vmx->nmsrs; 4077 ++vmx->nmsrs;
4089 } 4078 }
4090 4079
4091 vmx->arch_capabilities = kvm_get_arch_capabilities();
4092
4093 vm_exit_controls_init(vmx, vmx_vmexit_ctrl()); 4080 vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
4094 4081
4095 /* 22.2.1, 20.8.1 */ 4082 /* 22.2.1, 20.8.1 */
@@ -7409,6 +7396,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
7409 return 0; 7396 return 0;
7410} 7397}
7411 7398
7399static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
7400{
7401 return 0;
7402}
7403
7412static __init int hardware_setup(void) 7404static __init int hardware_setup(void)
7413{ 7405{
7414 unsigned long host_bndcfgs; 7406 unsigned long host_bndcfgs;
@@ -7711,6 +7703,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
7711 .set_nested_state = NULL, 7703 .set_nested_state = NULL,
7712 .get_vmcs12_pages = NULL, 7704 .get_vmcs12_pages = NULL,
7713 .nested_enable_evmcs = NULL, 7705 .nested_enable_evmcs = NULL,
7706 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
7714}; 7707};
7715 7708
7716static void vmx_cleanup_l1d_flush(void) 7709static void vmx_cleanup_l1d_flush(void)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 1554cb45b393..a1e00d0a2482 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -190,7 +190,6 @@ struct vcpu_vmx {
190 u64 msr_guest_kernel_gs_base; 190 u64 msr_guest_kernel_gs_base;
191#endif 191#endif
192 192
193 u64 arch_capabilities;
194 u64 spec_ctrl; 193 u64 spec_ctrl;
195 194
196 u32 vm_entry_controls_shadow; 195 u32 vm_entry_controls_shadow;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 65e4559eef2f..099b851dabaf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1125,7 +1125,7 @@ static u32 msrs_to_save[] = {
1125#endif 1125#endif
1126 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1126 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1127 MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1127 MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1128 MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES, 1128 MSR_IA32_SPEC_CTRL,
1129 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1129 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
1130 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1130 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
1131 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1131 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
@@ -1158,6 +1158,7 @@ static u32 emulated_msrs[] = {
1158 1158
1159 MSR_IA32_TSC_ADJUST, 1159 MSR_IA32_TSC_ADJUST,
1160 MSR_IA32_TSCDEADLINE, 1160 MSR_IA32_TSCDEADLINE,
1161 MSR_IA32_ARCH_CAPABILITIES,
1161 MSR_IA32_MISC_ENABLE, 1162 MSR_IA32_MISC_ENABLE,
1162 MSR_IA32_MCG_STATUS, 1163 MSR_IA32_MCG_STATUS,
1163 MSR_IA32_MCG_CTL, 1164 MSR_IA32_MCG_CTL,
@@ -2443,6 +2444,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2443 if (msr_info->host_initiated) 2444 if (msr_info->host_initiated)
2444 vcpu->arch.microcode_version = data; 2445 vcpu->arch.microcode_version = data;
2445 break; 2446 break;
2447 case MSR_IA32_ARCH_CAPABILITIES:
2448 if (!msr_info->host_initiated)
2449 return 1;
2450 vcpu->arch.arch_capabilities = data;
2451 break;
2446 case MSR_EFER: 2452 case MSR_EFER:
2447 return set_efer(vcpu, data); 2453 return set_efer(vcpu, data);
2448 case MSR_K7_HWCR: 2454 case MSR_K7_HWCR:
@@ -2747,6 +2753,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2747 case MSR_IA32_UCODE_REV: 2753 case MSR_IA32_UCODE_REV:
2748 msr_info->data = vcpu->arch.microcode_version; 2754 msr_info->data = vcpu->arch.microcode_version;
2749 break; 2755 break;
2756 case MSR_IA32_ARCH_CAPABILITIES:
2757 if (!msr_info->host_initiated &&
2758 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
2759 return 1;
2760 msr_info->data = vcpu->arch.arch_capabilities;
2761 break;
2750 case MSR_IA32_TSC: 2762 case MSR_IA32_TSC:
2751 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; 2763 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
2752 break; 2764 break;
@@ -6523,14 +6535,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
6523} 6535}
6524EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 6536EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
6525 6537
6538static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
6539{
6540 vcpu->arch.pio.count = 0;
6541
6542 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
6543 return 1;
6544
6545 return kvm_skip_emulated_instruction(vcpu);
6546}
6547
6526static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 6548static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
6527 unsigned short port) 6549 unsigned short port)
6528{ 6550{
6529 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); 6551 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
6530 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, 6552 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
6531 size, port, &val, 1); 6553 size, port, &val, 1);
6532 /* do not return to emulator after return from userspace */ 6554
6533 vcpu->arch.pio.count = 0; 6555 if (!ret) {
6556 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
6557 vcpu->arch.complete_userspace_io = complete_fast_pio_out;
6558 }
6534 return ret; 6559 return ret;
6535} 6560}
6536 6561
@@ -6541,6 +6566,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
6541 /* We should only ever be called with arch.pio.count equal to 1 */ 6566 /* We should only ever be called with arch.pio.count equal to 1 */
6542 BUG_ON(vcpu->arch.pio.count != 1); 6567 BUG_ON(vcpu->arch.pio.count != 1);
6543 6568
6569 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
6570 vcpu->arch.pio.count = 0;
6571 return 1;
6572 }
6573
6544 /* For size less than 4 we merge, else we zero extend */ 6574 /* For size less than 4 we merge, else we zero extend */
6545 val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) 6575 val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
6546 : 0; 6576 : 0;
@@ -6553,7 +6583,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
6553 vcpu->arch.pio.port, &val, 1); 6583 vcpu->arch.pio.port, &val, 1);
6554 kvm_register_write(vcpu, VCPU_REGS_RAX, val); 6584 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
6555 6585
6556 return 1; 6586 return kvm_skip_emulated_instruction(vcpu);
6557} 6587}
6558 6588
6559static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 6589static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
@@ -6572,6 +6602,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
6572 return ret; 6602 return ret;
6573 } 6603 }
6574 6604
6605 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
6575 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 6606 vcpu->arch.complete_userspace_io = complete_fast_pio_in;
6576 6607
6577 return 0; 6608 return 0;
@@ -6579,16 +6610,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
6579 6610
6580int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 6611int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
6581{ 6612{
6582 int ret = kvm_skip_emulated_instruction(vcpu); 6613 int ret;
6583 6614
6584 /*
6585 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
6586 * KVM_EXIT_DEBUG here.
6587 */
6588 if (in) 6615 if (in)
6589 return kvm_fast_pio_in(vcpu, size, port) && ret; 6616 ret = kvm_fast_pio_in(vcpu, size, port);
6590 else 6617 else
6591 return kvm_fast_pio_out(vcpu, size, port) && ret; 6618 ret = kvm_fast_pio_out(vcpu, size, port);
6619 return ret && kvm_skip_emulated_instruction(vcpu);
6592} 6620}
6593EXPORT_SYMBOL_GPL(kvm_fast_pio); 6621EXPORT_SYMBOL_GPL(kvm_fast_pio);
6594 6622
@@ -8733,6 +8761,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
8733 8761
8734int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 8762int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
8735{ 8763{
8764 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
8736 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 8765 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
8737 kvm_vcpu_mtrr_init(vcpu); 8766 kvm_vcpu_mtrr_init(vcpu);
8738 vcpu_load(vcpu); 8767 vcpu_load(vcpu);
@@ -9429,13 +9458,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
9429 const struct kvm_memory_slot *new, 9458 const struct kvm_memory_slot *new,
9430 enum kvm_mr_change change) 9459 enum kvm_mr_change change)
9431{ 9460{
9432 int nr_mmu_pages = 0;
9433
9434 if (!kvm->arch.n_requested_mmu_pages) 9461 if (!kvm->arch.n_requested_mmu_pages)
9435 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 9462 kvm_mmu_change_mmu_pages(kvm,
9436 9463 kvm_mmu_calculate_default_mmu_pages(kvm));
9437 if (nr_mmu_pages)
9438 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
9439 9464
9440 /* 9465 /*
9441 * Dirty logging tracks sptes in 4k granularity, meaning that large 9466 * Dirty logging tracks sptes in 4k granularity, meaning that large
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index db3165714521..dc726e07d8ba 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
230/* Can we access it for direct reading/writing? Must be RAM: */ 230/* Can we access it for direct reading/writing? Must be RAM: */
231int valid_phys_addr_range(phys_addr_t addr, size_t count) 231int valid_phys_addr_range(phys_addr_t addr, size_t count)
232{ 232{
233 return addr + count <= __pa(high_memory); 233 return addr + count - 1 <= __pa(high_memory - 1);
234} 234}
235 235
236/* Can we access it through mmap? Must be a valid physical address: */ 236/* Can we access it through mmap? Must be a valid physical address: */
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 458a0e2bcc57..a25a9fd987a9 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -449,7 +449,7 @@ void __init efi_free_boot_services(void)
449 */ 449 */
450 rm_size = real_mode_size_needed(); 450 rm_size = real_mode_size_needed();
451 if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { 451 if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
452 set_real_mode_mem(start, rm_size); 452 set_real_mode_mem(start);
453 start += rm_size; 453 start += rm_size;
454 size -= rm_size; 454 size -= rm_size;
455 } 455 }
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index d10105825d57..7dce39c8c034 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -15,15 +15,6 @@ u32 *trampoline_cr4_features;
15/* Hold the pgd entry used on booting additional CPUs */ 15/* Hold the pgd entry used on booting additional CPUs */
16pgd_t trampoline_pgd_entry; 16pgd_t trampoline_pgd_entry;
17 17
18void __init set_real_mode_mem(phys_addr_t mem, size_t size)
19{
20 void *base = __va(mem);
21
22 real_mode_header = (struct real_mode_header *) base;
23 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
24 base, (unsigned long long)mem, size);
25}
26
27void __init reserve_real_mode(void) 18void __init reserve_real_mode(void)
28{ 19{
29 phys_addr_t mem; 20 phys_addr_t mem;
@@ -42,7 +33,7 @@ void __init reserve_real_mode(void)
42 } 33 }
43 34
44 memblock_reserve(mem, size); 35 memblock_reserve(mem, size);
45 set_real_mode_mem(mem, size); 36 set_real_mode_mem(mem);
46} 37}
47 38
48static void __init setup_real_mode(void) 39static void __init setup_real_mode(void)
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 42b6cb3d16f7..3843198e03d4 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += irq_work.h
15generic-y += kdebug.h 15generic-y += kdebug.h
16generic-y += kmap_types.h 16generic-y += kmap_types.h
17generic-y += kprobes.h 17generic-y += kprobes.h
18generic-y += kvm_para.h
18generic-y += local.h 19generic-y += local.h
19generic-y += local64.h 20generic-y += local64.h
20generic-y += mcs_spinlock.h 21generic-y += mcs_spinlock.h
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index 8a7ad40be463..7417847dc438 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generated-y += unistd_32.h generated-y += unistd_32.h
2generic-y += kvm_para.h
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6e0f2d97fc6d..d95f94892015 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
220 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); 220 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
221 flush_rq->tag = -1; 221 flush_rq->tag = -1;
222 } else { 222 } else {
223 blk_mq_put_driver_tag_hctx(hctx, flush_rq); 223 blk_mq_put_driver_tag(flush_rq);
224 flush_rq->internal_tag = -1; 224 flush_rq->internal_tag = -1;
225 } 225 }
226 226
@@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
324 324
325 if (q->elevator) { 325 if (q->elevator) {
326 WARN_ON(rq->tag < 0); 326 WARN_ON(rq->tag < 0);
327 blk_mq_put_driver_tag_hctx(hctx, rq); 327 blk_mq_put_driver_tag(rq);
328 } 328 }
329 329
330 /* 330 /*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 70b210a308c4..3ff3d7b49969 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
59} 59}
60 60
61/* 61/*
62 * Check if any of the ctx's have pending work in this hardware queue 62 * Check if any of the ctx, dispatch list or elevator
63 * have pending work in this hardware queue.
63 */ 64 */
64static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 65static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
65{ 66{
@@ -1071,7 +1072,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1071 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1072 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1072 1073
1073 spin_lock(&hctx->dispatch_wait_lock); 1074 spin_lock(&hctx->dispatch_wait_lock);
1074 list_del_init(&wait->entry); 1075 if (!list_empty(&wait->entry)) {
1076 struct sbitmap_queue *sbq;
1077
1078 list_del_init(&wait->entry);
1079 sbq = &hctx->tags->bitmap_tags;
1080 atomic_dec(&sbq->ws_active);
1081 }
1075 spin_unlock(&hctx->dispatch_wait_lock); 1082 spin_unlock(&hctx->dispatch_wait_lock);
1076 1083
1077 blk_mq_run_hw_queue(hctx, true); 1084 blk_mq_run_hw_queue(hctx, true);
@@ -1087,6 +1094,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1087static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1094static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1088 struct request *rq) 1095 struct request *rq)
1089{ 1096{
1097 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1090 struct wait_queue_head *wq; 1098 struct wait_queue_head *wq;
1091 wait_queue_entry_t *wait; 1099 wait_queue_entry_t *wait;
1092 bool ret; 1100 bool ret;
@@ -1109,7 +1117,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1109 if (!list_empty_careful(&wait->entry)) 1117 if (!list_empty_careful(&wait->entry))
1110 return false; 1118 return false;
1111 1119
1112 wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait; 1120 wq = &bt_wait_ptr(sbq, hctx)->wait;
1113 1121
1114 spin_lock_irq(&wq->lock); 1122 spin_lock_irq(&wq->lock);
1115 spin_lock(&hctx->dispatch_wait_lock); 1123 spin_lock(&hctx->dispatch_wait_lock);
@@ -1119,6 +1127,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1119 return false; 1127 return false;
1120 } 1128 }
1121 1129
1130 atomic_inc(&sbq->ws_active);
1122 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1131 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1123 __add_wait_queue(wq, wait); 1132 __add_wait_queue(wq, wait);
1124 1133
@@ -1139,6 +1148,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1139 * someone else gets the wakeup. 1148 * someone else gets the wakeup.
1140 */ 1149 */
1141 list_del_init(&wait->entry); 1150 list_del_init(&wait->entry);
1151 atomic_dec(&sbq->ws_active);
1142 spin_unlock(&hctx->dispatch_wait_lock); 1152 spin_unlock(&hctx->dispatch_wait_lock);
1143 spin_unlock_irq(&wq->lock); 1153 spin_unlock_irq(&wq->lock);
1144 1154
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 0ed8e5a8729f..d704fc7766f4 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -224,15 +224,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
224 } 224 }
225} 225}
226 226
227static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
228 struct request *rq)
229{
230 if (rq->tag == -1 || rq->internal_tag == -1)
231 return;
232
233 __blk_mq_put_driver_tag(hctx, rq);
234}
235
236static inline void blk_mq_put_driver_tag(struct request *rq) 227static inline void blk_mq_put_driver_tag(struct request *rq)
237{ 228{
238 if (rq->tag == -1 || rq->internal_tag == -1) 229 if (rq->tag == -1 || rq->internal_tag == -1)
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 62d3aa74277b..5e9d7348c16f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
81 81
82 ACPI_FUNCTION_TRACE(ev_enable_gpe); 82 ACPI_FUNCTION_TRACE(ev_enable_gpe);
83 83
84 /* Enable the requested GPE */ 84 /* Clear the GPE status */
85 status = acpi_hw_clear_gpe(gpe_event_info);
86 if (ACPI_FAILURE(status))
87 return_ACPI_STATUS(status);
85 88
89 /* Enable the requested GPE */
86 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 90 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
87 return_ACPI_STATUS(status); 91 return_ACPI_STATUS(status);
88} 92}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 6ecbbabf1233..eec263c9019e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
1043 1043
1044 acpi_permanent_mmap = true; 1044 acpi_permanent_mmap = true;
1045 1045
1046 /* Initialize debug output. Linux does not use ACPICA defaults */
1047 acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
1048
1049#ifdef CONFIG_X86 1046#ifdef CONFIG_X86
1050 /* 1047 /*
1051 * If the machine falls into the DMI check table, 1048 * If the machine falls into the DMI check table,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1b207fca1420..d4244e7d0e38 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1150 cpc_read(cpunum, nominal_reg, &nom); 1150 cpc_read(cpunum, nominal_reg, &nom);
1151 perf_caps->nominal_perf = nom; 1151 perf_caps->nominal_perf = nom;
1152 1152
1153 cpc_read(cpunum, guaranteed_reg, &guaranteed); 1153 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1154 perf_caps->guaranteed_perf = guaranteed; 1154 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1155 perf_caps->guaranteed_perf = 0;
1156 } else {
1157 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1158 perf_caps->guaranteed_perf = guaranteed;
1159 }
1155 1160
1156 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); 1161 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1157 perf_caps->lowest_nonlinear_perf = min_nonlinear; 1162 perf_caps->lowest_nonlinear_perf = min_nonlinear;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8685882da64c..4b9c7ca492e6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
2057 size_t object_size = 0; 2057 size_t object_size = 0;
2058 2058
2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2060 if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) 2060 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2061 !IS_ALIGNED(offset, sizeof(u32)))
2061 return 0; 2062 return 0;
2062 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 2063 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2063 offset, read_size); 2064 offset, read_size);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467670a0..195f120c4e8c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
927 927
928 index = page - alloc->pages; 928 index = page - alloc->pages;
929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
930
931 mm = alloc->vma_vm_mm;
932 if (!mmget_not_zero(mm))
933 goto err_mmget;
934 if (!down_write_trylock(&mm->mmap_sem))
935 goto err_down_write_mmap_sem_failed;
930 vma = binder_alloc_get_vma(alloc); 936 vma = binder_alloc_get_vma(alloc);
931 if (vma) {
932 if (!mmget_not_zero(alloc->vma_vm_mm))
933 goto err_mmget;
934 mm = alloc->vma_vm_mm;
935 if (!down_read_trylock(&mm->mmap_sem))
936 goto err_down_write_mmap_sem_failed;
937 }
938 937
939 list_lru_isolate(lru, item); 938 list_lru_isolate(lru, item);
940 spin_unlock(lock); 939 spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
945 zap_page_range(vma, page_addr, PAGE_SIZE); 944 zap_page_range(vma, page_addr, PAGE_SIZE);
946 945
947 trace_binder_unmap_user_end(alloc, index); 946 trace_binder_unmap_user_end(alloc, index);
948
949 up_read(&mm->mmap_sem);
950 mmput(mm);
951 } 947 }
948 up_write(&mm->mmap_sem);
949 mmput(mm);
952 950
953 trace_binder_unmap_kernel_start(alloc, index); 951 trace_binder_unmap_kernel_start(alloc, index);
954 952
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index b3ed8f9953a8..173e6f2dd9af 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
52/* Per the spec, only slot type and drawer type ODD can be supported */ 52/* Per the spec, only slot type and drawer type ODD can be supported */
53static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) 53static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
54{ 54{
55 char buf[16]; 55 char *buf;
56 unsigned int ret; 56 unsigned int ret;
57 struct rm_feature_desc *desc = (void *)(buf + 8); 57 struct rm_feature_desc *desc;
58 struct ata_taskfile tf; 58 struct ata_taskfile tf;
59 static const char cdb[] = { GPCMD_GET_CONFIGURATION, 59 static const char cdb[] = { GPCMD_GET_CONFIGURATION,
60 2, /* only 1 feature descriptor requested */ 60 2, /* only 1 feature descriptor requested */
61 0, 3, /* 3, removable medium feature */ 61 0, 3, /* 3, removable medium feature */
62 0, 0, 0,/* reserved */ 62 0, 0, 0,/* reserved */
63 0, sizeof(buf), 63 0, 16,
64 0, 0, 0, 64 0, 0, 0,
65 }; 65 };
66 66
67 buf = kzalloc(16, GFP_KERNEL);
68 if (!buf)
69 return ODD_MECH_TYPE_UNSUPPORTED;
70 desc = (void *)(buf + 8);
71
67 ata_tf_init(dev, &tf); 72 ata_tf_init(dev, &tf);
68 tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 73 tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
69 tf.command = ATA_CMD_PACKET; 74 tf.command = ATA_CMD_PACKET;
70 tf.protocol = ATAPI_PROT_PIO; 75 tf.protocol = ATAPI_PROT_PIO;
71 tf.lbam = sizeof(buf); 76 tf.lbam = 16;
72 77
73 ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 78 ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
74 buf, sizeof(buf), 0); 79 buf, 16, 0);
75 if (ret) 80 if (ret) {
81 kfree(buf);
76 return ODD_MECH_TYPE_UNSUPPORTED; 82 return ODD_MECH_TYPE_UNSUPPORTED;
83 }
77 84
78 if (be16_to_cpu(desc->feature_code) != 3) 85 if (be16_to_cpu(desc->feature_code) != 3) {
86 kfree(buf);
79 return ODD_MECH_TYPE_UNSUPPORTED; 87 return ODD_MECH_TYPE_UNSUPPORTED;
88 }
80 89
81 if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) 90 if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
91 kfree(buf);
82 return ODD_MECH_TYPE_SLOT; 92 return ODD_MECH_TYPE_SLOT;
83 else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1) 93 } else if (desc->mech_type == 1 && desc->load == 0 &&
94 desc->eject == 1) {
95 kfree(buf);
84 return ODD_MECH_TYPE_DRAWER; 96 return ODD_MECH_TYPE_DRAWER;
85 else 97 } else {
98 kfree(buf);
86 return ODD_MECH_TYPE_UNSUPPORTED; 99 return ODD_MECH_TYPE_UNSUPPORTED;
100 }
87} 101}
88 102
89/* Test if ODD is zero power ready by sense code */ 103/* Test if ODD is zero power ready by sense code */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e7a5f1d1c314..399cad7daae7 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
290 struct zram *zram = dev_to_zram(dev); 290 struct zram *zram = dev_to_zram(dev);
291 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; 291 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
292 int index; 292 int index;
293 char mode_buf[8];
294 ssize_t sz;
295 293
296 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 294 if (!sysfs_streq(buf, "all"))
297 if (sz <= 0)
298 return -EINVAL;
299
300 /* ignore trailing new line */
301 if (mode_buf[sz - 1] == '\n')
302 mode_buf[sz - 1] = 0x00;
303
304 if (strcmp(mode_buf, "all"))
305 return -EINVAL; 295 return -EINVAL;
306 296
307 down_read(&zram->init_lock); 297 down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
635 struct bio bio; 625 struct bio bio;
636 struct bio_vec bio_vec; 626 struct bio_vec bio_vec;
637 struct page *page; 627 struct page *page;
638 ssize_t ret, sz; 628 ssize_t ret;
639 char mode_buf[8]; 629 int mode;
640 int mode = -1;
641 unsigned long blk_idx = 0; 630 unsigned long blk_idx = 0;
642 631
643 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 632 if (sysfs_streq(buf, "idle"))
644 if (sz <= 0)
645 return -EINVAL;
646
647 /* ignore trailing newline */
648 if (mode_buf[sz - 1] == '\n')
649 mode_buf[sz - 1] = 0x00;
650
651 if (!strcmp(mode_buf, "idle"))
652 mode = IDLE_WRITEBACK; 633 mode = IDLE_WRITEBACK;
653 else if (!strcmp(mode_buf, "huge")) 634 else if (sysfs_streq(buf, "huge"))
654 mode = HUGE_WRITEBACK; 635 mode = HUGE_WRITEBACK;
655 636 else
656 if (mode == -1)
657 return -EINVAL; 637 return -EINVAL;
658 638
659 down_read(&zram->init_lock); 639 down_read(&zram->init_lock);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72866a004f07..466ebd84ad17 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config XILINX_HWICAP
348 348
349config R3964 349config R3964
350 tristate "Siemens R3964 line discipline" 350 tristate "Siemens R3964 line discipline"
351 depends on TTY 351 depends on TTY && BROKEN
352 ---help--- 352 ---help---
353 This driver allows synchronous communication with devices using the 353 This driver allows synchronous communication with devices using the
354 Siemens R3964 packet protocol. Unless you are dealing with special 354 Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e22f0dbaebb1..2986119dd31f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
385 if (ret) 385 if (ret)
386 return ret; 386 return ret;
387 387
388 return cppc_perf.guaranteed_perf; 388 if (cppc_perf.guaranteed_perf)
389 return cppc_perf.guaranteed_perf;
390
391 return cppc_perf.nominal_perf;
389} 392}
390 393
391#else /* CONFIG_ACPI_CPPC_LIB */ 394#else /* CONFIG_ACPI_CPPC_LIB */
@@ -2593,6 +2596,9 @@ static int __init intel_pstate_init(void)
2593 const struct x86_cpu_id *id; 2596 const struct x86_cpu_id *id;
2594 int rc; 2597 int rc;
2595 2598
2599 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2600 return -ENODEV;
2601
2596 if (no_load) 2602 if (no_load)
2597 return -ENODEV; 2603 return -ENODEV;
2598 2604
@@ -2608,7 +2614,7 @@ static int __init intel_pstate_init(void)
2608 } else { 2614 } else {
2609 id = x86_match_cpu(intel_pstate_cpu_ids); 2615 id = x86_match_cpu(intel_pstate_cpu_ids);
2610 if (!id) { 2616 if (!id) {
2611 pr_info("CPU ID not supported\n"); 2617 pr_info("CPU model not supported\n");
2612 return -ENODEV; 2618 return -ENODEV;
2613 } 2619 }
2614 2620
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 3f49427766b8..2b51e0718c9f 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
189 189
190 clk_put(priv->clk); 190 clk_put(priv->clk);
191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
192 kfree(priv);
193 dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 192 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
193 kfree(priv);
194 194
195 return 0; 195 return 0;
196} 196}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 4e0eede599a8..ac0301b69593 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
1578 1578
1579 dmadev->nr_channels = nr_channels; 1579 dmadev->nr_channels = nr_channels;
1580 dmadev->nr_requests = nr_requests; 1580 dmadev->nr_requests = nr_requests;
1581 ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", 1581 device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
1582 dmadev->ahb_addr_masks, 1582 dmadev->ahb_addr_masks,
1583 count); 1583 count);
1584 if (ret)
1585 return ret;
1586 dmadev->nr_ahb_addr_masks = count; 1584 dmadev->nr_ahb_addr_masks = count;
1587 1585
1588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 91b90c0cea73..12acdac85820 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
132 if (err < 0) 132 if (err < 0)
133 goto out; 133 goto out;
134 134
135 if (err & BIT(pos)) 135 if (value & BIT(pos)) {
136 err = -EACCES; 136 err = -EPERM;
137 goto out;
138 }
137 139
138 err = 0; 140 err = 0;
139 141
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 854bce4fb9e7..217507002dbc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
1224 1224
1225 gpio->offset_timer = 1225 gpio->offset_timer =
1226 devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL); 1226 devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
1227 if (!gpio->offset_timer)
1228 return -ENOMEM;
1227 1229
1228 return aspeed_gpio_setup_irqs(gpio, pdev); 1230 return aspeed_gpio_setup_irqs(gpio, pdev);
1229} 1231}
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 0ecd2369c2ca..a09d2f9ebacc 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
148 mutex_init(&exar_gpio->lock); 148 mutex_init(&exar_gpio->lock);
149 149
150 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); 150 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
151 if (index < 0)
152 goto err_destroy;
151 153
152 sprintf(exar_gpio->name, "exar_gpio%d", index); 154 sprintf(exar_gpio->name, "exar_gpio%d", index);
153 exar_gpio->gpio_chip.label = exar_gpio->name; 155 exar_gpio->gpio_chip.label = exar_gpio->name;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 154d959e8993..b6a4efce7c92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
204 struct gpio_mockup_chip *chip; 204 struct gpio_mockup_chip *chip;
205 struct seq_file *sfile; 205 struct seq_file *sfile;
206 struct gpio_chip *gc; 206 struct gpio_chip *gc;
207 int val, cnt;
207 char buf[3]; 208 char buf[3];
208 int val, rv;
209 209
210 if (*ppos != 0) 210 if (*ppos != 0)
211 return 0; 211 return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
216 gc = &chip->gc; 216 gc = &chip->gc;
217 217
218 val = gpio_mockup_get(gc, priv->offset); 218 val = gpio_mockup_get(gc, priv->offset);
219 snprintf(buf, sizeof(buf), "%d\n", val); 219 cnt = snprintf(buf, sizeof(buf), "%d\n", val);
220 220
221 rv = copy_to_user(usr_buf, buf, sizeof(buf)); 221 return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
222 if (rv)
223 return rv;
224
225 return sizeof(buf) - 1;
226} 222}
227 223
228static ssize_t gpio_mockup_debugfs_write(struct file *file, 224static ssize_t gpio_mockup_debugfs_write(struct file *file,
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8b9c3ab70f6e..6a3ec575a404 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
120 * to determine if the flags should have inverted semantics. 120 * to determine if the flags should have inverted semantics.
121 */ 121 */
122 if (IS_ENABLED(CONFIG_SPI_MASTER) && 122 if (IS_ENABLED(CONFIG_SPI_MASTER) &&
123 of_property_read_bool(np, "cs-gpios")) { 123 of_property_read_bool(np, "cs-gpios") &&
124 !strcmp(propname, "cs-gpios")) {
124 struct device_node *child; 125 struct device_node *child;
125 u32 cs; 126 u32 cs;
126 int ret; 127 int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
142 * conflict and the "spi-cs-high" flag will 143 * conflict and the "spi-cs-high" flag will
143 * take precedence. 144 * take precedence.
144 */ 145 */
145 if (of_property_read_bool(np, "spi-cs-high")) { 146 if (of_property_read_bool(child, "spi-cs-high")) {
146 if (*flags & OF_GPIO_ACTIVE_LOW) { 147 if (*flags & OF_GPIO_ACTIVE_LOW) {
147 pr_warn("%s GPIO handle specifies active low - ignored\n", 148 pr_warn("%s GPIO handle specifies active low - ignored\n",
148 of_node_full_name(np)); 149 of_node_full_name(child));
149 *flags &= ~OF_GPIO_ACTIVE_LOW; 150 *flags &= ~OF_GPIO_ACTIVE_LOW;
150 } 151 }
151 } else { 152 } else {
152 if (!(*flags & OF_GPIO_ACTIVE_LOW)) 153 if (!(*flags & OF_GPIO_ACTIVE_LOW))
153 pr_info("%s enforce active low on chipselect handle\n", 154 pr_info("%s enforce active low on chipselect handle\n",
154 of_node_full_name(np)); 155 of_node_full_name(child));
155 *flags |= OF_GPIO_ACTIVE_LOW; 156 *flags |= OF_GPIO_ACTIVE_LOW;
156 } 157 }
157 break; 158 break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
717 718
718 of_node_get(chip->of_node); 719 of_node_get(chip->of_node);
719 720
720 return of_gpiochip_scan_gpios(chip); 721 status = of_gpiochip_scan_gpios(chip);
722 if (status) {
723 of_node_put(chip->of_node);
724 gpiochip_remove_pin_ranges(chip);
725 }
726
727 return status;
721} 728}
722 729
723void of_gpiochip_remove(struct gpio_chip *chip) 730void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 144af0733581..0495bf1d480a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
2776 } 2776 }
2777 2777
2778 config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); 2778 config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
2779 return gpio_set_config(chip, gpio_chip_hwgpio(desc), config); 2779 return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
2780} 2780}
2781EXPORT_SYMBOL_GPL(gpiod_set_debounce); 2781EXPORT_SYMBOL_GPL(gpiod_set_debounce);
2782 2782
@@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
2813 packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, 2813 packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
2814 !transitory); 2814 !transitory);
2815 gpio = gpio_chip_hwgpio(desc); 2815 gpio = gpio_chip_hwgpio(desc);
2816 rc = gpio_set_config(chip, gpio, packed); 2816 rc = chip->set_config(chip, gpio, packed);
2817 if (rc == -ENOTSUPP) { 2817 if (rc == -ENOTSUPP) {
2818 dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", 2818 dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
2819 gpio); 2819 gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4f8fb4ecde34..ac0d646a7b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3625,6 +3625,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3625 struct pci_dev *pdev = adev->pdev; 3625 struct pci_dev *pdev = adev->pdev;
3626 enum pci_bus_speed cur_speed; 3626 enum pci_bus_speed cur_speed;
3627 enum pcie_link_width cur_width; 3627 enum pcie_link_width cur_width;
3628 u32 ret = 1;
3628 3629
3629 *speed = PCI_SPEED_UNKNOWN; 3630 *speed = PCI_SPEED_UNKNOWN;
3630 *width = PCIE_LNK_WIDTH_UNKNOWN; 3631 *width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3633,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3632 while (pdev) { 3633 while (pdev) {
3633 cur_speed = pcie_get_speed_cap(pdev); 3634 cur_speed = pcie_get_speed_cap(pdev);
3634 cur_width = pcie_get_width_cap(pdev); 3635 cur_width = pcie_get_width_cap(pdev);
3636 ret = pcie_bandwidth_available(adev->pdev, NULL,
3637 NULL, &cur_width);
3638 if (!ret)
3639 cur_width = PCIE_LNK_WIDTH_RESRV;
3635 3640
3636 if (cur_speed != PCI_SPEED_UNKNOWN) { 3641 if (cur_speed != PCI_SPEED_UNKNOWN) {
3637 if (*speed == PCI_SPEED_UNKNOWN) 3642 if (*speed == PCI_SPEED_UNKNOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d0309e8c9d12..a11db2b1a63f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2405 /* disable CG */ 2405 /* disable CG */
2406 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 2406 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2407 2407
2408 adev->gfx.rlc.funcs->reset(adev);
2409
2410 gfx_v9_0_init_pg(adev); 2408 gfx_v9_0_init_pg(adev);
2411 2409
2412 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2410 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index fb27783d7a54..81127f7d6ed1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5429,9 +5429,11 @@ static void get_freesync_config_for_crtc(
5429 struct amdgpu_dm_connector *aconnector = 5429 struct amdgpu_dm_connector *aconnector =
5430 to_amdgpu_dm_connector(new_con_state->base.connector); 5430 to_amdgpu_dm_connector(new_con_state->base.connector);
5431 struct drm_display_mode *mode = &new_crtc_state->base.mode; 5431 struct drm_display_mode *mode = &new_crtc_state->base.mode;
5432 int vrefresh = drm_mode_vrefresh(mode);
5432 5433
5433 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 5434 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
5434 aconnector->min_vfreq <= drm_mode_vrefresh(mode); 5435 vrefresh >= aconnector->min_vfreq &&
5436 vrefresh <= aconnector->max_vfreq;
5435 5437
5436 if (new_crtc_state->vrr_supported) { 5438 if (new_crtc_state->vrr_supported) {
5437 new_crtc_state->stream->ignore_msa_timing_param = true; 5439 new_crtc_state->stream->ignore_msa_timing_param = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4eba3c4800b6..ea18e9c2d8ce 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
2660void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) 2660void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
2661{ 2661{
2662 struct dc *core_dc = pipe_ctx->stream->ctx->dc; 2662 struct dc *core_dc = pipe_ctx->stream->ctx->dc;
2663 struct dc_stream_state *stream = pipe_ctx->stream;
2663 2664
2664 core_dc->hwss.blank_stream(pipe_ctx); 2665 core_dc->hwss.blank_stream(pipe_ctx);
2665 2666
2666 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 2667 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
2667 deallocate_mst_payload(pipe_ctx); 2668 deallocate_mst_payload(pipe_ctx);
2668 2669
2670 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
2671 dal_ddc_service_write_scdc_data(
2672 stream->link->ddc, 0,
2673 stream->timing.flags.LTE_340MCSC_SCRAMBLE);
2674
2669 core_dc->hwss.disable_stream(pipe_ctx, option); 2675 core_dc->hwss.disable_stream(pipe_ctx, option);
2670 2676
2671 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); 2677 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 9aa7bec1b5fe..23b5b94a4939 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
91 * MP0CLK DS 91 * MP0CLK DS
92 */ 92 */
93 data->registry_data.disallowed_features = 0xE0041C00; 93 data->registry_data.disallowed_features = 0xE0041C00;
94 /* ECC feature should be disabled on old SMUs */
95 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
96 hwmgr->smu_version = smum_get_argument(hwmgr);
97 if (hwmgr->smu_version < 0x282100)
98 data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
99
94 data->registry_data.od_state_in_dc_support = 0; 100 data->registry_data.od_state_in_dc_support = 0;
95 data->registry_data.thermal_support = 1; 101 data->registry_data.thermal_support = 1;
96 data->registry_data.skip_baco_hardware = 0; 102 data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
357 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; 363 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
358 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; 364 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
359 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; 365 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
366 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
360 367
361 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 368 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
362 data->smu_features[i].smu_feature_bitmap = 369 data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3020 "FCLK_DS", 3027 "FCLK_DS",
3021 "MP1CLK_DS", 3028 "MP1CLK_DS",
3022 "MP0CLK_DS", 3029 "MP0CLK_DS",
3023 "XGMI"}; 3030 "XGMI",
3031 "ECC"};
3024 static const char *output_title[] = { 3032 static const char *output_title[] = {
3025 "FEATURES", 3033 "FEATURES",
3026 "BITMASK", 3034 "BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3462 struct vega20_single_dpm_table *dpm_table; 3470 struct vega20_single_dpm_table *dpm_table;
3463 bool vblank_too_short = false; 3471 bool vblank_too_short = false;
3464 bool disable_mclk_switching; 3472 bool disable_mclk_switching;
3473 bool disable_fclk_switching;
3465 uint32_t i, latency; 3474 uint32_t i, latency;
3466 3475
3467 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3476 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3537 if (hwmgr->display_config->nb_pstate_switch_disable) 3546 if (hwmgr->display_config->nb_pstate_switch_disable)
3538 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3547 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3539 3548
3549 if ((disable_mclk_switching &&
3550 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3551 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3552 disable_fclk_switching = true;
3553 else
3554 disable_fclk_switching = false;
3555
3540 /* fclk */ 3556 /* fclk */
3541 dpm_table = &(data->dpm_table.fclk_table); 3557 dpm_table = &(data->dpm_table.fclk_table);
3542 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3558 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3543 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3559 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3544 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3560 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3545 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3561 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3546 if (hwmgr->display_config->nb_pstate_switch_disable) 3562 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3547 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3563 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3548 3564
3549 /* vclk */ 3565 /* vclk */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index a5bc758ae097..ac2a3118a0ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -80,6 +80,7 @@ enum {
80 GNLD_DS_MP1CLK, 80 GNLD_DS_MP1CLK,
81 GNLD_DS_MP0CLK, 81 GNLD_DS_MP0CLK,
82 GNLD_XGMI, 82 GNLD_XGMI,
83 GNLD_ECC,
83 84
84 GNLD_FEATURES_MAX 85 GNLD_FEATURES_MAX
85}; 86};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 63d5cf691549..195c4ae67058 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -99,7 +99,7 @@
99#define FEATURE_DS_MP1CLK_BIT 30 99#define FEATURE_DS_MP1CLK_BIT 30
100#define FEATURE_DS_MP0CLK_BIT 31 100#define FEATURE_DS_MP0CLK_BIT 31
101#define FEATURE_XGMI_BIT 32 101#define FEATURE_XGMI_BIT 32
102#define FEATURE_SPARE_33_BIT 33 102#define FEATURE_ECC_BIT 33
103#define FEATURE_SPARE_34_BIT 34 103#define FEATURE_SPARE_34_BIT 34
104#define FEATURE_SPARE_35_BIT 35 104#define FEATURE_SPARE_35_BIT 35
105#define FEATURE_SPARE_36_BIT 36 105#define FEATURE_SPARE_36_BIT 36
@@ -165,7 +165,8 @@
165#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) 165#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
166#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) 166#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
167#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) 167#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
168#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) 168#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT )
169#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT )
169 170
170#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 171#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
171#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 172#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 381581b01d48..05bbc2b622fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
376 synchronize_srcu(&drm_unplug_srcu); 376 synchronize_srcu(&drm_unplug_srcu);
377 377
378 drm_dev_unregister(dev); 378 drm_dev_unregister(dev);
379 379 drm_dev_put(dev);
380 mutex_lock(&drm_global_mutex);
381 if (dev->open_count == 0)
382 drm_dev_put(dev);
383 mutex_unlock(&drm_global_mutex);
384} 380}
385EXPORT_SYMBOL(drm_dev_unplug); 381EXPORT_SYMBOL(drm_dev_unplug);
386 382
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0e9349ff2d16..af2ab640cadb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1963 best_depth = fmt->depth; 1963 best_depth = fmt->depth;
1964 } 1964 }
1965 } 1965 }
1966 if (sizes.surface_depth != best_depth) { 1966 if (sizes.surface_depth != best_depth && best_depth) {
1967 DRM_INFO("requested bpp %d, scaled depth down to %d", 1967 DRM_INFO("requested bpp %d, scaled depth down to %d",
1968 sizes.surface_bpp, best_depth); 1968 sizes.surface_bpp, best_depth);
1969 sizes.surface_depth = best_depth; 1969 sizes.surface_depth = best_depth;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 83a5bbca6e7e..7caa3c7ed978 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
489 489
490 drm_close_helper(filp); 490 drm_close_helper(filp);
491 491
492 if (!--dev->open_count) { 492 if (!--dev->open_count)
493 drm_lastclose(dev); 493 drm_lastclose(dev);
494 if (drm_dev_is_unplugged(dev)) 494
495 drm_put_dev(dev);
496 }
497 mutex_unlock(&drm_global_mutex); 495 mutex_unlock(&drm_global_mutex);
498 496
499 drm_minor_release(minor); 497 drm_minor_release(minor);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 35b4ec3f7618..3592d04c33b2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1441 } 1441 }
1442 1442
1443 if (index_mode) { 1443 if (index_mode) {
1444 if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { 1444 if (guest_gma >= I915_GTT_PAGE_SIZE) {
1445 ret = -EFAULT; 1445 ret = -EFAULT;
1446 goto err; 1446 goto err;
1447 } 1447 }
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 035479e273be..e3f9caa7839f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
448/** 448/**
449 * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU 449 * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
450 * @vgpu: a vGPU 450 * @vgpu: a vGPU
451 * @conncted: link state 451 * @connected: link state
452 * 452 *
453 * This function is used to trigger hotplug interrupt for vGPU 453 * This function is used to trigger hotplug interrupt for vGPU
454 * 454 *
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c857..5d887f7cc0d5 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -238,9 +238,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
238 default: 238 default:
239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); 239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
240 } 240 }
241
242 info->size = (((p.stride * p.height * p.bpp) / 8) +
243 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
244 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 241 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
245 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 242 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
246 if (ret) 243 if (ret)
@@ -262,14 +259,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
262 info->x_hot = UINT_MAX; 259 info->x_hot = UINT_MAX;
263 info->y_hot = UINT_MAX; 260 info->y_hot = UINT_MAX;
264 } 261 }
265
266 info->size = (((info->stride * c.height * c.bpp) / 8)
267 + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
268 } else { 262 } else {
269 gvt_vgpu_err("invalid plane id:%d\n", plane_id); 263 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
270 return -EINVAL; 264 return -EINVAL;
271 } 265 }
272 266
267 info->size = (info->stride * info->height + PAGE_SIZE - 1)
268 >> PAGE_SHIFT;
273 if (info->size == 0) { 269 if (info->size == 0) {
274 gvt_vgpu_err("fb size is zero\n"); 270 gvt_vgpu_err("fb size is zero\n");
275 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c7103dd2d8d5..cf133ef03873 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1882 } 1882 }
1883 1883
1884 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); 1884 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1885
1886 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1885 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); 1887 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1888 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1889
1886 return mm; 1890 return mm;
1887} 1891}
1888 1892
@@ -1942,7 +1946,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1942 */ 1946 */
1943void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1947void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1944{ 1948{
1945 atomic_dec(&mm->pincount); 1949 atomic_dec_if_positive(&mm->pincount);
1946} 1950}
1947 1951
1948/** 1952/**
@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1967 if (ret) 1971 if (ret)
1968 return ret; 1972 return ret;
1969 1973
1974 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1970 list_move_tail(&mm->ppgtt_mm.lru_list, 1975 list_move_tail(&mm->ppgtt_mm.lru_list,
1971 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); 1976 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
1972 1977 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1973 } 1978 }
1974 1979
1975 return 0; 1980 return 0;
@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
1980 struct intel_vgpu_mm *mm; 1985 struct intel_vgpu_mm *mm;
1981 struct list_head *pos, *n; 1986 struct list_head *pos, *n;
1982 1987
1988 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1989
1983 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { 1990 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
1984 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); 1991 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
1985 1992
@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
1987 continue; 1994 continue;
1988 1995
1989 list_del_init(&mm->ppgtt_mm.lru_list); 1996 list_del_init(&mm->ppgtt_mm.lru_list);
1997 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1990 invalidate_ppgtt_mm(mm); 1998 invalidate_ppgtt_mm(mm);
1991 return 1; 1999 return 1;
1992 } 2000 }
2001 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1993 return 0; 2002 return 0;
1994} 2003}
1995 2004
@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2659 } 2668 }
2660 } 2669 }
2661 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); 2670 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2671 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2662 return 0; 2672 return 0;
2663} 2673}
2664 2674
@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2699 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2709 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2700 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2710 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2701 if (mm->type == INTEL_GVT_MM_PPGTT) { 2711 if (mm->type == INTEL_GVT_MM_PPGTT) {
2712 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2702 list_del_init(&mm->ppgtt_mm.lru_list); 2713 list_del_init(&mm->ppgtt_mm.lru_list);
2714 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2703 if (mm->ppgtt_mm.shadowed) 2715 if (mm->ppgtt_mm.shadowed)
2704 invalidate_ppgtt_mm(mm); 2716 invalidate_ppgtt_mm(mm);
2705 } 2717 }
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d8cb04cc946d..edb610dc5d86 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
88 void (*mm_free_page_table)(struct intel_vgpu_mm *mm); 88 void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
89 struct list_head oos_page_use_list_head; 89 struct list_head oos_page_use_list_head;
90 struct list_head oos_page_free_list_head; 90 struct list_head oos_page_free_list_head;
91 struct mutex ppgtt_mm_lock;
91 struct list_head ppgtt_mm_lru_list_head; 92 struct list_head ppgtt_mm_lru_list_head;
92 93
93 struct page *scratch_page; 94 struct page *scratch_page;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7d84cfb9051a..7902fb162d09 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
132 132
133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ 134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
135 {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
135 136
136 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 137 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
137 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 138 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb8f936fdaa..05b953793316 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
346 int i = 0; 346 int i = 0;
347 347
348 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) 348 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
349 return -1; 349 return -EINVAL;
350 350
351 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 351 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
352 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; 352 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
410 if (workload->shadow) 410 if (workload->shadow)
411 return 0; 411 return 0;
412 412
413 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
414 if (ret < 0) {
415 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
416 return ret;
417 }
418
419 /* pin shadow context by gvt even the shadow context will be pinned 413 /* pin shadow context by gvt even the shadow context will be pinned
420 * when i915 alloc request. That is because gvt will update the guest 414 * when i915 alloc request. That is because gvt will update the guest
421 * context from shadow context when workload is completed, and at that 415 * context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
678{ 672{
679 struct intel_vgpu *vgpu = workload->vgpu; 673 struct intel_vgpu *vgpu = workload->vgpu;
680 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 674 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
675 struct intel_vgpu_submission *s = &vgpu->submission;
676 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
677 struct i915_request *rq;
681 int ring_id = workload->ring_id; 678 int ring_id = workload->ring_id;
682 int ret; 679 int ret;
683 680
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
687 mutex_lock(&vgpu->vgpu_lock); 684 mutex_lock(&vgpu->vgpu_lock);
688 mutex_lock(&dev_priv->drm.struct_mutex); 685 mutex_lock(&dev_priv->drm.struct_mutex);
689 686
687 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
688 if (ret < 0) {
689 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
690 goto err_req;
691 }
692
690 ret = intel_gvt_workload_req_alloc(workload); 693 ret = intel_gvt_workload_req_alloc(workload);
691 if (ret) 694 if (ret)
692 goto err_req; 695 goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
703 706
704 ret = prepare_workload(workload); 707 ret = prepare_workload(workload);
705out: 708out:
709 if (ret) {
710 /* We might still need to add request with
711 * clean ctx to retire it properly..
712 */
713 rq = fetch_and_zero(&workload->req);
714 i915_request_put(rq);
715 }
716
706 if (!IS_ERR_OR_NULL(workload->req)) { 717 if (!IS_ERR_OR_NULL(workload->req)) {
707 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 718 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
708 ring_id, workload->req); 719 ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
739 goto out; 750 goto out;
740 } 751 }
741 752
742 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) 753 if (!scheduler->current_vgpu->active ||
754 list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
743 goto out; 755 goto out;
744 756
745 /* 757 /*
@@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1474 intel_runtime_pm_put_unchecked(dev_priv); 1486 intel_runtime_pm_put_unchecked(dev_priv);
1475 } 1487 }
1476 1488
1477 if (ret && (vgpu_is_vm_unhealthy(ret))) { 1489 if (ret) {
1478 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1490 if (vgpu_is_vm_unhealthy(ret))
1491 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1479 intel_vgpu_destroy_workload(workload); 1492 intel_vgpu_destroy_workload(workload);
1480 return ERR_PTR(ret); 1493 return ERR_PTR(ret);
1481 } 1494 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4..f6f6e5b78e97 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4831 &ctx); 4831 &ctx);
4832 if (ret) { 4832 if (ret) {
4833 ret = -EINTR; 4833 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4834 try_again = true;
4835 continue;
4836 }
4834 break; 4837 break;
4835 } 4838 }
4836 crtc = connector->state->crtc; 4839 crtc = connector->state->crtc;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9adc7bb9e69c..a67a63b5aa84 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
2346 INTEL_DEVID(dev_priv) == 0x5915 || \ 2346 INTEL_DEVID(dev_priv) == 0x5915 || \
2347 INTEL_DEVID(dev_priv) == 0x591E) 2347 INTEL_DEVID(dev_priv) == 0x591E)
2348#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ 2348#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
2349 INTEL_DEVID(dev_priv) == 0x87C0) 2349 INTEL_DEVID(dev_priv) == 0x87C0 || \
2350 INTEL_DEVID(dev_priv) == 0x87CA)
2350#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2351#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2351 INTEL_INFO(dev_priv)->gt == 2) 2352 INTEL_INFO(dev_priv)->gt == 2)
2352#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2353#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 638a586469f9..047855dd8c6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
2863#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) 2863#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
2864#define GEN11_GT_VDBOX_DISABLE_MASK 0xff 2864#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
2865#define GEN11_GT_VEBOX_DISABLE_SHIFT 16 2865#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
2866#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT) 2866#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
2867 2867
2868#define GEN11_EU_DISABLE _MMIO(0x9134) 2868#define GEN11_EU_DISABLE _MMIO(0x9134)
2869#define GEN11_EU_DIS_MASK 0xFF 2869#define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
9243#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ 9243#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
9244 _TRANS_DDI_FUNC_CTL2_A) 9244 _TRANS_DDI_FUNC_CTL2_A)
9245#define PORT_SYNC_MODE_ENABLE (1 << 4) 9245#define PORT_SYNC_MODE_ENABLE (1 << 4)
9246#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) 9246#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
9247#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) 9247#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
9248#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 9248#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
9249 9249
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f63..b9b0ea4e2404 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
455 struct i915_gem_context *ctx; 455 struct i915_gem_context *ctx;
456 456
457 ctx = live_context(i915, file); 457 ctx = live_context(i915, file);
458 if (!ctx) 458 if (IS_ERR(ctx))
459 break; 459 break;
460 460
461 /* We will need some GGTT space for the rq's context */ 461 /* We will need some GGTT space for the rq's context */
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2281ed3eb774..8a4ebcb6405c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
337 337
338 ret = drm_dev_register(drm, 0); 338 ret = drm_dev_register(drm, 0);
339 if (ret) 339 if (ret)
340 goto free_drm; 340 goto uninstall_irq;
341 341
342 drm_fbdev_generic_setup(drm, 32); 342 drm_fbdev_generic_setup(drm, 32);
343 343
344 return 0; 344 return 0;
345 345
346uninstall_irq:
347 drm_irq_uninstall(drm);
346free_drm: 348free_drm:
347 drm_dev_put(drm); 349 drm_dev_put(drm);
348 350
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
356 358
357static void meson_drv_unbind(struct device *dev) 359static void meson_drv_unbind(struct device *dev)
358{ 360{
359 struct drm_device *drm = dev_get_drvdata(dev); 361 struct meson_drm *priv = dev_get_drvdata(dev);
360 struct meson_drm *priv = drm->dev_private; 362 struct drm_device *drm = priv->drm;
361 363
362 if (priv->canvas) { 364 if (priv->canvas) {
363 meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 365 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
367 } 369 }
368 370
369 drm_dev_unregister(drm); 371 drm_dev_unregister(drm);
372 drm_irq_uninstall(drm);
370 drm_kms_helper_poll_fini(drm); 373 drm_kms_helper_poll_fini(drm);
371 drm_mode_config_cleanup(drm); 374 drm_mode_config_cleanup(drm);
372 drm_dev_put(drm); 375 drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index e28814f4ea6c..563953ec6ad0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
569 DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 569 DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
570 570
571 /* If sink max TMDS clock, we reject the mode */ 571 /* If sink max TMDS clock, we reject the mode */
572 if (mode->clock > connector->display_info.max_tmds_clock) 572 if (connector->display_info.max_tmds_clock &&
573 mode->clock > connector->display_info.max_tmds_clock)
573 return MODE_BAD; 574 return MODE_BAD;
574 575
575 /* Check against non-VIC supported modes */ 576 /* Check against non-VIC supported modes */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7d4c6073ea5..0d4ade9d4722 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
541 clk_disable(vop->hclk); 541 clk_disable(vop->hclk);
542} 542}
543 543
544static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
545{
546 if (win->phy->scl && win->phy->scl->ext) {
547 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
548 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
549 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
550 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
551 }
552
553 VOP_WIN_SET(vop, win, enable, 0);
554}
555
544static int vop_enable(struct drm_crtc *crtc) 556static int vop_enable(struct drm_crtc *crtc)
545{ 557{
546 struct vop *vop = to_vop(crtc); 558 struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
586 struct vop_win *vop_win = &vop->win[i]; 598 struct vop_win *vop_win = &vop->win[i];
587 const struct vop_win_data *win = vop_win->data; 599 const struct vop_win_data *win = vop_win->data;
588 600
589 VOP_WIN_SET(vop, win, enable, 0); 601 vop_win_disable(vop, win);
590 } 602 }
591 spin_unlock(&vop->reg_lock); 603 spin_unlock(&vop->reg_lock);
592 604
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
735 747
736 spin_lock(&vop->reg_lock); 748 spin_lock(&vop->reg_lock);
737 749
738 VOP_WIN_SET(vop, win, enable, 0); 750 vop_win_disable(vop, win);
739 751
740 spin_unlock(&vop->reg_lock); 752 spin_unlock(&vop->reg_lock);
741} 753}
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
1622 int channel = i * 2 + 1; 1634 int channel = i * 2 + 1;
1623 1635
1624 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); 1636 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
1625 VOP_WIN_SET(vop, win, enable, 0); 1637 vop_win_disable(vop, win);
1626 VOP_WIN_SET(vop, win, gate, 1); 1638 VOP_WIN_SET(vop, win, gate, 1);
1627 } 1639 }
1628 1640
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index ba9b3cfb8c3d..b3436c2aed68 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
378static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, 378static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
379 struct drm_plane_state *old_state) 379 struct drm_plane_state *old_state)
380{ 380{
381 struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
382 struct tegra_plane *p = to_tegra_plane(plane); 381 struct tegra_plane *p = to_tegra_plane(plane);
382 struct tegra_dc *dc;
383 u32 value; 383 u32 value;
384 384
385 /* rien ne va plus */ 385 /* rien ne va plus */
386 if (!old_state || !old_state->crtc) 386 if (!old_state || !old_state->crtc)
387 return; 387 return;
388 388
389 dc = to_tegra_dc(old_state->crtc);
390
389 /* 391 /*
390 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even 392 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
391 * on planes that are already disabled. Make sure we fallback to the 393 * on planes that are already disabled. Make sure we fallback to the
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 39bfed9623de..982ce37ecde1 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
106 if (vic->booted) 106 if (vic->booted)
107 return 0; 107 return 0;
108 108
109#ifdef CONFIG_IOMMU_API
109 if (vic->config->supports_sid) { 110 if (vic->config->supports_sid) {
110 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); 111 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
111 u32 value; 112 u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
121 vic_writel(vic, value, VIC_THI_STREAMID1); 122 vic_writel(vic, value, VIC_THI_STREAMID1);
122 } 123 }
123 } 124 }
125#endif
124 126
125 /* setup clockgating registers */ 127 /* setup clockgating registers */
126 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | 128 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 66885c24590f..c1bd5e3d9e4a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -18,18 +18,19 @@
18#include "udl_connector.h" 18#include "udl_connector.h"
19#include "udl_drv.h" 19#include "udl_drv.h"
20 20
21static bool udl_get_edid_block(struct udl_device *udl, int block_idx, 21static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
22 u8 *buff) 22 size_t len)
23{ 23{
24 int ret, i; 24 int ret, i;
25 u8 *read_buff; 25 u8 *read_buff;
26 struct udl_device *udl = data;
26 27
27 read_buff = kmalloc(2, GFP_KERNEL); 28 read_buff = kmalloc(2, GFP_KERNEL);
28 if (!read_buff) 29 if (!read_buff)
29 return false; 30 return -1;
30 31
31 for (i = 0; i < EDID_LENGTH; i++) { 32 for (i = 0; i < len; i++) {
32 int bval = (i + block_idx * EDID_LENGTH) << 8; 33 int bval = (i + block * EDID_LENGTH) << 8;
33 ret = usb_control_msg(udl->udev, 34 ret = usb_control_msg(udl->udev,
34 usb_rcvctrlpipe(udl->udev, 0), 35 usb_rcvctrlpipe(udl->udev, 0),
35 (0x02), (0x80 | (0x02 << 5)), bval, 36 (0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
37 if (ret < 1) { 38 if (ret < 1) {
38 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); 39 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
39 kfree(read_buff); 40 kfree(read_buff);
40 return false; 41 return -1;
41 } 42 }
42 buff[i] = read_buff[1]; 43 buf[i] = read_buff[1];
43 } 44 }
44 45
45 kfree(read_buff); 46 kfree(read_buff);
46 return true; 47 return 0;
47}
48
49static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
50 int *result_buff_size)
51{
52 int i, extensions;
53 u8 *block_buff = NULL, *buff_ptr;
54
55 block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
56 if (block_buff == NULL)
57 return false;
58
59 if (udl_get_edid_block(udl, 0, block_buff) &&
60 memchr_inv(block_buff, 0, EDID_LENGTH)) {
61 extensions = ((struct edid *)block_buff)->extensions;
62 if (extensions > 0) {
63 /* we have to read all extensions one by one */
64 *result_buff_size = EDID_LENGTH * (extensions + 1);
65 *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
66 buff_ptr = *result_buff;
67 if (buff_ptr == NULL) {
68 kfree(block_buff);
69 return false;
70 }
71 memcpy(buff_ptr, block_buff, EDID_LENGTH);
72 kfree(block_buff);
73 buff_ptr += EDID_LENGTH;
74 for (i = 1; i < extensions; ++i) {
75 if (udl_get_edid_block(udl, i, buff_ptr)) {
76 buff_ptr += EDID_LENGTH;
77 } else {
78 kfree(*result_buff);
79 *result_buff = NULL;
80 return false;
81 }
82 }
83 return true;
84 }
85 /* we have only base edid block */
86 *result_buff = block_buff;
87 *result_buff_size = EDID_LENGTH;
88 return true;
89 }
90
91 kfree(block_buff);
92
93 return false;
94} 48}
95 49
96static int udl_get_modes(struct drm_connector *connector) 50static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
122static enum drm_connector_status 76static enum drm_connector_status
123udl_detect(struct drm_connector *connector, bool force) 77udl_detect(struct drm_connector *connector, bool force)
124{ 78{
125 u8 *edid_buff = NULL;
126 int edid_buff_size = 0;
127 struct udl_device *udl = connector->dev->dev_private; 79 struct udl_device *udl = connector->dev->dev_private;
128 struct udl_drm_connector *udl_connector = 80 struct udl_drm_connector *udl_connector =
129 container_of(connector, 81 container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
136 udl_connector->edid = NULL; 88 udl_connector->edid = NULL;
137 } 89 }
138 90
139 91 udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
140 if (!udl_get_edid(udl, &edid_buff, &edid_buff_size)) 92 if (!udl_connector->edid)
141 return connector_status_disconnected; 93 return connector_status_disconnected;
142 94
143 udl_connector->edid = (struct edid *)edid_buff;
144
145 return connector_status_connected; 95 return connector_status_connected;
146} 96}
147 97
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5930facd6d2d..11a8f99ba18c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
191 ret = drm_gem_handle_create(file, &obj->base, handle); 191 ret = drm_gem_handle_create(file, &obj->base, handle);
192 drm_gem_object_put_unlocked(&obj->base); 192 drm_gem_object_put_unlocked(&obj->base);
193 if (ret) 193 if (ret)
194 goto err; 194 return ERR_PTR(ret);
195 195
196 return &obj->base; 196 return &obj->base;
197
198err:
199 __vgem_gem_destroy(obj);
200 return ERR_PTR(ret);
201} 197}
202 198
203static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 199static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 138b0bb325cf..69048e73377d 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
111 111
112 ret = drm_gem_handle_create(file, &obj->gem, handle); 112 ret = drm_gem_handle_create(file, &obj->gem, handle);
113 drm_gem_object_put_unlocked(&obj->gem); 113 drm_gem_object_put_unlocked(&obj->gem);
114 if (ret) { 114 if (ret)
115 drm_gem_object_release(&obj->gem);
116 kfree(obj);
117 return ERR_PTR(ret); 115 return ERR_PTR(ret);
118 }
119 116
120 return &obj->gem; 117 return &obj->gem;
121} 118}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ca8d322b487..4ca0cdfa6b33 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,6 +150,7 @@ config HID_ASUS
150 tristate "Asus" 150 tristate "Asus"
151 depends on LEDS_CLASS 151 depends on LEDS_CLASS
152 depends on ASUS_WMI || ASUS_WMI=n 152 depends on ASUS_WMI || ASUS_WMI=n
153 select POWER_SUPPLY
153 ---help--- 154 ---help---
154 Support for Asus notebook built-in keyboard and touchpad via i2c, and 155 Support for Asus notebook built-in keyboard and touchpad via i2c, and
155 the Asus Republic of Gamers laptop keyboard special keys. 156 the Asus Republic of Gamers laptop keyboard special keys.
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9993b692598f..860e21ec6a49 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
1301u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1301u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1302 unsigned offset, unsigned n) 1302 unsigned offset, unsigned n)
1303{ 1303{
1304 if (n > 32) { 1304 if (n > 256) {
1305 hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", 1305 hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
1306 n, current->comm); 1306 n, current->comm);
1307 n = 32; 1307 n = 256;
1308 } 1308 }
1309 1309
1310 return __extract(report, offset, n); 1310 return __extract(report, offset, n);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ac9fda1b5a72..1384e57182af 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
1060 seq_printf(f, "\n\n"); 1060 seq_printf(f, "\n\n");
1061 1061
1062 /* dump parsed data and input mappings */ 1062 /* dump parsed data and input mappings */
1063 if (down_interruptible(&hdev->driver_input_lock))
1064 return 0;
1065
1063 hid_dump_device(hdev, f); 1066 hid_dump_device(hdev, f);
1064 seq_printf(f, "\n"); 1067 seq_printf(f, "\n");
1065 hid_dump_input_mapping(hdev, f); 1068 hid_dump_input_mapping(hdev, f);
1066 1069
1070 up(&hdev->driver_input_lock);
1071
1067 return 0; 1072 return 0;
1068} 1073}
1069 1074
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6d93f4ad037..adce58f24f76 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1083,6 +1083,7 @@
1083#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 1083#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
1084#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 1084#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
1085#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 1085#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
1086#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e
1086 1087
1087#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 1088#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
1088#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 1089#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b10b1922c5bd..1fce0076e7dc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
998 case 0x1b8: map_key_clear(KEY_VIDEO); break; 998 case 0x1b8: map_key_clear(KEY_VIDEO); break;
999 case 0x1bc: map_key_clear(KEY_MESSENGER); break; 999 case 0x1bc: map_key_clear(KEY_MESSENGER); break;
1000 case 0x1bd: map_key_clear(KEY_INFO); break; 1000 case 0x1bd: map_key_clear(KEY_INFO); break;
1001 case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
1001 case 0x201: map_key_clear(KEY_NEW); break; 1002 case 0x201: map_key_clear(KEY_NEW); break;
1002 case 0x202: map_key_clear(KEY_OPEN); break; 1003 case 0x202: map_key_clear(KEY_OPEN); break;
1003 case 0x203: map_key_clear(KEY_CLOSE); break; 1004 case 0x203: map_key_clear(KEY_CLOSE); break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 15ed6177a7a3..199cc256e9d9 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
2111 kfree(data); 2111 kfree(data);
2112 return -ENOMEM; 2112 return -ENOMEM;
2113 } 2113 }
2114 data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
2115 if (!data->wq) {
2116 kfree(data->effect_ids);
2117 kfree(data);
2118 return -ENOMEM;
2119 }
2120
2114 data->hidpp = hidpp; 2121 data->hidpp = hidpp;
2115 data->feature_index = feature_index; 2122 data->feature_index = feature_index;
2116 data->version = version; 2123 data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
2155 /* ignore boost value at response.fap.params[2] */ 2162 /* ignore boost value at response.fap.params[2] */
2156 2163
2157 /* init the hardware command queue */ 2164 /* init the hardware command queue */
2158 data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
2159 atomic_set(&data->workqueue_size, 0); 2165 atomic_set(&data->workqueue_size, 0);
2160 2166
2161 /* initialize with zero autocenter to get wheel in usable state */ 2167 /* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
2608 input_report_rel(mydata->input, REL_Y, v); 2614 input_report_rel(mydata->input, REL_Y, v);
2609 2615
2610 v = hid_snto32(data[6], 8); 2616 v = hid_snto32(data[6], 8);
2611 hidpp_scroll_counter_handle_scroll( 2617 if (v != 0)
2612 &hidpp->vertical_wheel_counter, v); 2618 hidpp_scroll_counter_handle_scroll(
2619 &hidpp->vertical_wheel_counter, v);
2613 2620
2614 input_sync(mydata->input); 2621 input_sync(mydata->input);
2615 } 2622 }
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 953908f2267c..77ffba48cc73 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
715 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, 715 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
716 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, 716 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
717 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, 717 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
718 { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
719 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 718 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
720 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, 719 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
721 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, 720 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
855 { } 854 { }
856}; 855};
857 856
858/** 857/*
859 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer 858 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
860 * 859 *
861 * There are composite devices for which we want to ignore only a certain 860 * There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
996 if (hdev->product == 0x0401 && 995 if (hdev->product == 0x0401 &&
997 strncmp(hdev->name, "ELAN0800", 8) != 0) 996 strncmp(hdev->name, "ELAN0800", 8) != 0)
998 return true; 997 return true;
998 /* Same with product id 0x0400 */
999 if (hdev->product == 0x0400 &&
1000 strncmp(hdev->name, "QTEC0001", 8) != 0)
1001 return true;
999 break; 1002 break;
1000 } 1003 }
1001 1004
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
1042 } 1045 }
1043 1046
1044 if (bl_entry != NULL) 1047 if (bl_entry != NULL)
1045 dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n", 1048 dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
1046 bl_entry->driver_data, bl_entry->vendor, 1049 bl_entry->driver_data, bl_entry->vendor,
1047 bl_entry->product); 1050 bl_entry->product);
1048 1051
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
1209 quirks |= bl_entry->driver_data; 1212 quirks |= bl_entry->driver_data;
1210 1213
1211 if (quirks) 1214 if (quirks)
1212 dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n", 1215 dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
1213 quirks, hdev->vendor, hdev->product); 1216 quirks, hdev->vendor, hdev->product);
1214 return quirks; 1217 return quirks;
1215} 1218}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8141cadfca0e..8dae0f9b819e 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
499static int steam_register(struct steam_device *steam) 499static int steam_register(struct steam_device *steam)
500{ 500{
501 int ret; 501 int ret;
502 bool client_opened;
502 503
503 /* 504 /*
504 * This function can be called several times in a row with the 505 * This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
511 * Unlikely, but getting the serial could fail, and it is not so 512 * Unlikely, but getting the serial could fail, and it is not so
512 * important, so make up a serial number and go on. 513 * important, so make up a serial number and go on.
513 */ 514 */
515 mutex_lock(&steam->mutex);
514 if (steam_get_serial(steam) < 0) 516 if (steam_get_serial(steam) < 0)
515 strlcpy(steam->serial_no, "XXXXXXXXXX", 517 strlcpy(steam->serial_no, "XXXXXXXXXX",
516 sizeof(steam->serial_no)); 518 sizeof(steam->serial_no));
519 mutex_unlock(&steam->mutex);
517 520
518 hid_info(steam->hdev, "Steam Controller '%s' connected", 521 hid_info(steam->hdev, "Steam Controller '%s' connected",
519 steam->serial_no); 522 steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
528 } 531 }
529 532
530 mutex_lock(&steam->mutex); 533 mutex_lock(&steam->mutex);
531 if (!steam->client_opened) { 534 client_opened = steam->client_opened;
535 if (!client_opened)
532 steam_set_lizard_mode(steam, lizard_mode); 536 steam_set_lizard_mode(steam, lizard_mode);
537 mutex_unlock(&steam->mutex);
538
539 if (!client_opened)
533 ret = steam_input_register(steam); 540 ret = steam_input_register(steam);
534 } else { 541 else
535 ret = 0; 542 ret = 0;
536 }
537 mutex_unlock(&steam->mutex);
538 543
539 return ret; 544 return ret;
540} 545}
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
630{ 635{
631 struct steam_device *steam = hdev->driver_data; 636 struct steam_device *steam = hdev->driver_data;
632 637
638 unsigned long flags;
639 bool connected;
640
641 spin_lock_irqsave(&steam->lock, flags);
642 connected = steam->connected;
643 spin_unlock_irqrestore(&steam->lock, flags);
644
633 mutex_lock(&steam->mutex); 645 mutex_lock(&steam->mutex);
634 steam->client_opened = false; 646 steam->client_opened = false;
647 if (connected)
648 steam_set_lizard_mode(steam, lizard_mode);
635 mutex_unlock(&steam->mutex); 649 mutex_unlock(&steam->mutex);
636 650
637 if (steam->connected) { 651 if (connected)
638 steam_set_lizard_mode(steam, lizard_mode);
639 steam_input_register(steam); 652 steam_input_register(steam);
640 }
641} 653}
642 654
643static int steam_client_ll_raw_request(struct hid_device *hdev, 655static int steam_client_ll_raw_request(struct hid_device *hdev,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7710d9f957da..0187c9f8fc22 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
735 goto cleanup; 735 goto cleanup;
736 } 736 }
737 rc = usb_string(udev, 201, ver_ptr, ver_len); 737 rc = usb_string(udev, 201, ver_ptr, ver_len);
738 if (ver_ptr == NULL) {
739 rc = -ENOMEM;
740 goto cleanup;
741 }
742 if (rc == -EPIPE) { 738 if (rc == -EPIPE) {
743 *ver_ptr = '\0'; 739 *ver_ptr = '\0';
744 } else if (rc < 0) { 740 } else if (rc < 0) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90164fed08d3..4d1f24ee249c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
184 I2C_HID_QUIRK_NO_RUNTIME_PM }, 184 I2C_HID_QUIRK_NO_RUNTIME_PM },
185 { USB_VENDOR_ID_ELAN, HID_ANY_ID, 185 { USB_VENDOR_ID_ELAN, HID_ANY_ID,
186 I2C_HID_QUIRK_BOGUS_IRQ }, 186 I2C_HID_QUIRK_BOGUS_IRQ },
187 { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
188 I2C_HID_QUIRK_NO_RUNTIME_PM },
187 { 0, 0 } 189 { 0, 0 }
188}; 190};
189 191
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f929bfa9fcd..d0f1dfe2bcbb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
1759config SENSORS_W83773G 1759config SENSORS_W83773G
1760 tristate "Nuvoton W83773G" 1760 tristate "Nuvoton W83773G"
1761 depends on I2C 1761 depends on I2C
1762 select REGMAP_I2C
1762 help 1763 help
1763 If you say yes here you get support for the Nuvoton W83773G hardware 1764 If you say yes here you get support for the Nuvoton W83773G hardware
1764 monitoring chip. 1765 monitoring chip.
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e4f9f7ce92fa..f9abeeeead9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
640}; 640};
641 641
642static const u32 ntc_temp_config[] = { 642static const u32 ntc_temp_config[] = {
643 HWMON_T_INPUT, HWMON_T_TYPE, 643 HWMON_T_INPUT | HWMON_T_TYPE,
644 0 644 0
645}; 645};
646 646
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index b91a80abf724..4679acb4918e 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
890 s++; 890 s++;
891 } 891 }
892 } 892 }
893
894 s = (sensors->power.num_sensors * 4) + 1;
893 } else { 895 } else {
894 for (i = 0; i < sensors->power.num_sensors; ++i) { 896 for (i = 0; i < sensors->power.num_sensors; ++i) {
895 s = i + 1; 897 s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
918 show_power, NULL, 3, i); 920 show_power, NULL, 3, i);
919 attr++; 921 attr++;
920 } 922 }
921 }
922 923
923 if (sensors->caps.num_sensors >= 1) {
924 s = sensors->power.num_sensors + 1; 924 s = sensors->power.num_sensors + 1;
925 }
925 926
927 if (sensors->caps.num_sensors >= 1) {
926 snprintf(attr->name, sizeof(attr->name), "power%d_label", s); 928 snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
927 attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 929 attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
928 0, 0); 930 0, 0);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f2c681971201..f8979abb9a19 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
131 Cannon Lake (PCH) 131 Cannon Lake (PCH)
132 Cedar Fork (PCH) 132 Cedar Fork (PCH)
133 Ice Lake (PCH) 133 Ice Lake (PCH)
134 Comet Lake (PCH)
134 135
135 This driver can also be built as a module. If so, the module 136 This driver can also be built as a module. If so, the module
136 will be called i2c-i801. 137 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c91e145ef5a5..679c6c41f64b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
71 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes 71 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
72 * Cedar Fork (PCH) 0x18df 32 hard yes yes yes 72 * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
73 * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes 73 * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
74 * Comet Lake (PCH) 0x02a3 32 hard yes yes yes
74 * 75 *
75 * Features supported by this driver: 76 * Features supported by this driver:
76 * Software PEC no 77 * Software PEC no
@@ -240,6 +241,7 @@
240#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 241#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
241#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 242#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
242#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 243#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
244#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
243 245
244struct i801_mux_config { 246struct i801_mux_config {
245 char *gpio_chip; 247 char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
1038 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, 1040 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
1039 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, 1041 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
1040 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, 1042 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
1043 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
1041 { 0, } 1044 { 0, }
1042}; 1045};
1043 1046
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1534 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1537 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1535 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: 1538 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
1536 case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: 1539 case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
1540 case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
1537 priv->features |= FEATURE_I2C_BLOCK_READ; 1541 priv->features |= FEATURE_I2C_BLOCK_READ;
1538 priv->features |= FEATURE_IRQ; 1542 priv->features |= FEATURE_IRQ;
1539 priv->features |= FEATURE_SMBUS_PEC; 1543 priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 21cb088d6687..f7cdd2ab7f11 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3169,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
3169 return; 3169 return;
3170 3170
3171 list_for_each_entry(entry, &amd_iommu_unity_map, list) { 3171 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3172 int type, prot = 0;
3172 size_t length; 3173 size_t length;
3173 int prot = 0;
3174 3174
3175 if (devid < entry->devid_start || devid > entry->devid_end) 3175 if (devid < entry->devid_start || devid > entry->devid_end)
3176 continue; 3176 continue;
3177 3177
3178 type = IOMMU_RESV_DIRECT;
3178 length = entry->address_end - entry->address_start; 3179 length = entry->address_end - entry->address_start;
3179 if (entry->prot & IOMMU_PROT_IR) 3180 if (entry->prot & IOMMU_PROT_IR)
3180 prot |= IOMMU_READ; 3181 prot |= IOMMU_READ;
3181 if (entry->prot & IOMMU_PROT_IW) 3182 if (entry->prot & IOMMU_PROT_IW)
3182 prot |= IOMMU_WRITE; 3183 prot |= IOMMU_WRITE;
3184 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
3185 /* Exclusion range */
3186 type = IOMMU_RESV_RESERVED;
3183 3187
3184 region = iommu_alloc_resv_region(entry->address_start, 3188 region = iommu_alloc_resv_region(entry->address_start,
3185 length, prot, 3189 length, prot, type);
3186 IOMMU_RESV_DIRECT);
3187 if (!region) { 3190 if (!region) {
3188 dev_err(dev, "Out of memory allocating dm-regions\n"); 3191 dev_err(dev, "Out of memory allocating dm-regions\n");
3189 return; 3192 return;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index f773792d77fd..1b1378619fc9 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
2013 if (e == NULL) 2013 if (e == NULL)
2014 return -ENOMEM; 2014 return -ENOMEM;
2015 2015
2016 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2017 init_exclusion_range(m);
2018
2016 switch (m->type) { 2019 switch (m->type) {
2017 default: 2020 default:
2018 kfree(e); 2021 kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
2059 2062
2060 while (p < end) { 2063 while (p < end) {
2061 m = (struct ivmd_header *)p; 2064 m = (struct ivmd_header *)p;
2062 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2065 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2063 init_exclusion_range(m);
2064 else if (m->flags & IVMD_FLAG_UNITY_MAP)
2065 init_unity_map_range(m); 2066 init_unity_map_range(m);
2066 2067
2067 p += m->length; 2068 p += m->length;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index eae0741f72dc..87965e4d9647 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -374,6 +374,8 @@
374#define IOMMU_PROT_IR 0x01 374#define IOMMU_PROT_IR 0x01
375#define IOMMU_PROT_IW 0x02 375#define IOMMU_PROT_IW 0x02
376 376
377#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
378
377/* IOMMU capabilities */ 379/* IOMMU capabilities */
378#define IOMMU_CAP_IOTLB 24 380#define IOMMU_CAP_IOTLB 24
379#define IOMMU_CAP_NPCACHE 26 381#define IOMMU_CAP_NPCACHE 26
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f101afc315ab..9a8a8870e267 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -160,6 +160,14 @@
160 160
161#define ARM_V7S_TCR_PD1 BIT(5) 161#define ARM_V7S_TCR_PD1 BIT(5)
162 162
163#ifdef CONFIG_ZONE_DMA32
164#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
165#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
166#else
167#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
168#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
169#endif
170
163typedef u32 arm_v7s_iopte; 171typedef u32 arm_v7s_iopte;
164 172
165static bool selftest_running; 173static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
197 void *table = NULL; 205 void *table = NULL;
198 206
199 if (lvl == 1) 207 if (lvl == 1)
200 table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); 208 table = (void *)__get_free_pages(
209 __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
201 else if (lvl == 2) 210 else if (lvl == 2)
202 table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); 211 table = kmem_cache_zalloc(data->l2_tables, gfp);
203 phys = virt_to_phys(table); 212 phys = virt_to_phys(table);
204 if (phys != (arm_v7s_iopte)phys) 213 if (phys != (arm_v7s_iopte)phys) {
205 /* Doesn't fit in PTE */ 214 /* Doesn't fit in PTE */
215 dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
206 goto out_free; 216 goto out_free;
217 }
207 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { 218 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
208 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); 219 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
209 if (dma_mapping_error(dev, dma)) 220 if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
733 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", 744 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
734 ARM_V7S_TABLE_SIZE(2), 745 ARM_V7S_TABLE_SIZE(2),
735 ARM_V7S_TABLE_SIZE(2), 746 ARM_V7S_TABLE_SIZE(2),
736 SLAB_CACHE_DMA, NULL); 747 ARM_V7S_TABLE_SLAB_FLAGS, NULL);
737 if (!data->l2_tables) 748 if (!data->l2_tables)
738 goto out_free_data; 749 goto out_free_data;
739 750
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33a982e33716..109de67d5d72 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1105 1105
1106 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); 1106 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1107 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { 1107 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1108 dev_warn(dev,
1109 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1110 iommu_def_domain_type);
1111 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); 1108 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1109 if (dom) {
1110 dev_warn(dev,
1111 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1112 iommu_def_domain_type);
1113 }
1112 } 1114 }
1113 1115
1114 group->default_domain = dom; 1116 group->default_domain = dom;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7fea18b0c15d..7cb4d685a1f1 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
513 const struct i2c_device_id *id) 513 const struct i2c_device_id *id)
514{ 514{
515 int devid; 515 int devid;
516 const struct of_device_id *of_id;
516 struct pca9532_data *data = i2c_get_clientdata(client); 517 struct pca9532_data *data = i2c_get_clientdata(client);
517 struct pca9532_platform_data *pca9532_pdata = 518 struct pca9532_platform_data *pca9532_pdata =
518 dev_get_platdata(&client->dev); 519 dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
528 dev_err(&client->dev, "no platform data\n"); 529 dev_err(&client->dev, "no platform data\n");
529 return -EINVAL; 530 return -EINVAL;
530 } 531 }
531 devid = (int)(uintptr_t)of_match_device( 532 of_id = of_match_device(of_pca9532_leds_match,
532 of_pca9532_leds_match, &client->dev)->data; 533 &client->dev);
534 if (unlikely(!of_id))
535 return -EINVAL;
536 devid = (int)(uintptr_t) of_id->data;
533 } else { 537 } else {
534 devid = id->driver_data; 538 devid = id->driver_data;
535 } 539 }
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 3dd3ed46d473..136f86a1627d 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
122 trigger_data->net_dev = NULL; 122 trigger_data->net_dev = NULL;
123 } 123 }
124 124
125 strncpy(trigger_data->device_name, buf, size); 125 memcpy(trigger_data->device_name, buf, size);
126 trigger_data->device_name[size] = 0;
126 if (size > 0 && trigger_data->device_name[size - 1] == '\n') 127 if (size > 0 && trigger_data->device_name[size - 1] == '\n')
127 trigger_data->device_name[size - 1] = 0; 128 trigger_data->device_name[size - 1] = 0;
128 129
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
301 container_of(nb, struct led_netdev_data, notifier); 302 container_of(nb, struct led_netdev_data, notifier);
302 303
303 if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE 304 if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
304 && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER 305 && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
305 && evt != NETDEV_CHANGENAME)
306 return NOTIFY_DONE; 306 return NOTIFY_DONE;
307 307
308 if (strcmp(dev->name, trigger_data->device_name)) 308 if (!(dev == trigger_data->net_dev ||
309 (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
309 return NOTIFY_DONE; 310 return NOTIFY_DONE;
310 311
311 cancel_delayed_work_sync(&trigger_data->work); 312 cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
320 dev_hold(dev); 321 dev_hold(dev);
321 trigger_data->net_dev = dev; 322 trigger_data->net_dev = dev;
322 break; 323 break;
323 case NETDEV_CHANGENAME:
324 case NETDEV_UNREGISTER: 324 case NETDEV_UNREGISTER:
325 if (trigger_data->net_dev) { 325 dev_put(trigger_data->net_dev);
326 dev_put(trigger_data->net_dev); 326 trigger_data->net_dev = NULL;
327 trigger_data->net_dev = NULL;
328 }
329 break; 327 break;
330 case NETDEV_UP: 328 case NETDEV_UP:
331 case NETDEV_CHANGE: 329 case NETDEV_CHANGE:
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0ce2d8dfc5f1..26ad6468d13a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
1246 1246
1247config MFD_SUN6I_PRCM 1247config MFD_SUN6I_PRCM
1248 bool "Allwinner A31 PRCM controller" 1248 bool "Allwinner A31 PRCM controller"
1249 depends on ARCH_SUNXI 1249 depends on ARCH_SUNXI || COMPILE_TEST
1250 select MFD_CORE 1250 select MFD_CORE
1251 help 1251 help
1252 Support for the PRCM (Power/Reset/Clock Management) unit available 1252 Support for the PRCM (Power/Reset/Clock Management) unit available
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..43ac71691fe4 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
53static const struct mfd_cell sprd_pmic_devs[] = { 53static const struct mfd_cell sprd_pmic_devs[] = {
54 { 54 {
55 .name = "sc27xx-wdt", 55 .name = "sc27xx-wdt",
56 .of_compatible = "sprd,sc27xx-wdt", 56 .of_compatible = "sprd,sc2731-wdt",
57 }, { 57 }, {
58 .name = "sc27xx-rtc", 58 .name = "sc27xx-rtc",
59 .of_compatible = "sprd,sc27xx-rtc", 59 .of_compatible = "sprd,sc2731-rtc",
60 }, { 60 }, {
61 .name = "sc27xx-charger", 61 .name = "sc27xx-charger",
62 .of_compatible = "sprd,sc27xx-charger", 62 .of_compatible = "sprd,sc2731-charger",
63 }, { 63 }, {
64 .name = "sc27xx-chg-timer", 64 .name = "sc27xx-chg-timer",
65 .of_compatible = "sprd,sc27xx-chg-timer", 65 .of_compatible = "sprd,sc2731-chg-timer",
66 }, { 66 }, {
67 .name = "sc27xx-fast-chg", 67 .name = "sc27xx-fast-chg",
68 .of_compatible = "sprd,sc27xx-fast-chg", 68 .of_compatible = "sprd,sc2731-fast-chg",
69 }, { 69 }, {
70 .name = "sc27xx-chg-wdt", 70 .name = "sc27xx-chg-wdt",
71 .of_compatible = "sprd,sc27xx-chg-wdt", 71 .of_compatible = "sprd,sc2731-chg-wdt",
72 }, { 72 }, {
73 .name = "sc27xx-typec", 73 .name = "sc27xx-typec",
74 .of_compatible = "sprd,sc27xx-typec", 74 .of_compatible = "sprd,sc2731-typec",
75 }, { 75 }, {
76 .name = "sc27xx-flash", 76 .name = "sc27xx-flash",
77 .of_compatible = "sprd,sc27xx-flash", 77 .of_compatible = "sprd,sc2731-flash",
78 }, { 78 }, {
79 .name = "sc27xx-eic", 79 .name = "sc27xx-eic",
80 .of_compatible = "sprd,sc27xx-eic", 80 .of_compatible = "sprd,sc2731-eic",
81 }, { 81 }, {
82 .name = "sc27xx-efuse", 82 .name = "sc27xx-efuse",
83 .of_compatible = "sprd,sc27xx-efuse", 83 .of_compatible = "sprd,sc2731-efuse",
84 }, { 84 }, {
85 .name = "sc27xx-thermal", 85 .name = "sc27xx-thermal",
86 .of_compatible = "sprd,sc27xx-thermal", 86 .of_compatible = "sprd,sc2731-thermal",
87 }, { 87 }, {
88 .name = "sc27xx-adc", 88 .name = "sc27xx-adc",
89 .of_compatible = "sprd,sc27xx-adc", 89 .of_compatible = "sprd,sc2731-adc",
90 }, { 90 }, {
91 .name = "sc27xx-audio-codec", 91 .name = "sc27xx-audio-codec",
92 .of_compatible = "sprd,sc27xx-audio-codec", 92 .of_compatible = "sprd,sc2731-audio-codec",
93 }, { 93 }, {
94 .name = "sc27xx-regulator", 94 .name = "sc27xx-regulator",
95 .of_compatible = "sprd,sc27xx-regulator", 95 .of_compatible = "sprd,sc2731-regulator",
96 }, { 96 }, {
97 .name = "sc27xx-vibrator", 97 .name = "sc27xx-vibrator",
98 .of_compatible = "sprd,sc27xx-vibrator", 98 .of_compatible = "sprd,sc2731-vibrator",
99 }, { 99 }, {
100 .name = "sc27xx-keypad-led", 100 .name = "sc27xx-keypad-led",
101 .of_compatible = "sprd,sc27xx-keypad-led", 101 .of_compatible = "sprd,sc2731-keypad-led",
102 }, { 102 }, {
103 .name = "sc27xx-bltc", 103 .name = "sc27xx-bltc",
104 .of_compatible = "sprd,sc27xx-bltc", 104 .of_compatible = "sprd,sc2731-bltc",
105 }, { 105 }, {
106 .name = "sc27xx-fgu", 106 .name = "sc27xx-fgu",
107 .of_compatible = "sprd,sc27xx-fgu", 107 .of_compatible = "sprd,sc2731-fgu",
108 }, { 108 }, {
109 .name = "sc27xx-7sreset", 109 .name = "sc27xx-7sreset",
110 .of_compatible = "sprd,sc27xx-7sreset", 110 .of_compatible = "sprd,sc2731-7sreset",
111 }, { 111 }, {
112 .name = "sc27xx-poweroff", 112 .name = "sc27xx-poweroff",
113 .of_compatible = "sprd,sc27xx-poweroff", 113 .of_compatible = "sprd,sc2731-poweroff",
114 }, { 114 }, {
115 .name = "sc27xx-syscon", 115 .name = "sc27xx-syscon",
116 .of_compatible = "sprd,sc27xx-syscon", 116 .of_compatible = "sprd,sc2731-syscon",
117 }, 117 },
118}; 118};
119 119
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016bc46d9..104477b512a2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ free:
1245 return status; 1245 return status;
1246} 1246}
1247 1247
1248static int __maybe_unused twl_suspend(struct device *dev)
1249{
1250 struct i2c_client *client = to_i2c_client(dev);
1251
1252 if (client->irq)
1253 disable_irq(client->irq);
1254
1255 return 0;
1256}
1257
1258static int __maybe_unused twl_resume(struct device *dev)
1259{
1260 struct i2c_client *client = to_i2c_client(dev);
1261
1262 if (client->irq)
1263 enable_irq(client->irq);
1264
1265 return 0;
1266}
1267
1268static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
1269
1248static const struct i2c_device_id twl_ids[] = { 1270static const struct i2c_device_id twl_ids[] = {
1249 { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ 1271 { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
1250 { "twl5030", 0 }, /* T2 updated */ 1272 { "twl5030", 0 }, /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
1262/* One Client Driver , 4 Clients */ 1284/* One Client Driver , 4 Clients */
1263static struct i2c_driver twl_driver = { 1285static struct i2c_driver twl_driver = {
1264 .driver.name = DRIVER_NAME, 1286 .driver.name = DRIVER_NAME,
1287 .driver.pm = &twl_dev_pm_ops,
1265 .id_table = twl_ids, 1288 .id_table = twl_ids,
1266 .probe = twl_probe, 1289 .probe = twl_probe,
1267 .remove = twl_remove, 1290 .remove = twl_remove,
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 3525236ed8d9..19c84214a7ea 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
179 179
180 /* We also need to update CI for internal queues */ 180 /* We also need to update CI for internal queues */
181 if (cs->submitted) { 181 if (cs->submitted) {
182 int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
183
184 WARN_ONCE((cs_cnt < 0),
185 "hl%d: error in CS active cnt %d\n",
186 hdev->id, cs_cnt);
187
182 hl_int_hw_queue_update_ci(cs); 188 hl_int_hw_queue_update_ci(cs);
183 189
184 spin_lock(&hdev->hw_queues_mirror_lock); 190 spin_lock(&hdev->hw_queues_mirror_lock);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a53c12aff6ad..974a87789bd8 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; 232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
233 enum vm_type_t *vm_type; 233 enum vm_type_t *vm_type;
234 bool once = true; 234 bool once = true;
235 u64 j;
235 int i; 236 int i;
236 237
237 if (!dev_entry->hdev->mmu_enable) 238 if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
260 } else { 261 } else {
261 phys_pg_pack = hnode->ptr; 262 phys_pg_pack = hnode->ptr;
262 seq_printf(s, 263 seq_printf(s,
263 " 0x%-14llx %-10u %-4u\n", 264 " 0x%-14llx %-10llu %-4u\n",
264 hnode->vaddr, phys_pg_pack->total_size, 265 hnode->vaddr, phys_pg_pack->total_size,
265 phys_pg_pack->handle); 266 phys_pg_pack->handle);
266 } 267 }
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
282 phys_pg_pack->page_size); 283 phys_pg_pack->page_size);
283 seq_puts(s, " physical address\n"); 284 seq_puts(s, " physical address\n");
284 seq_puts(s, "---------------------\n"); 285 seq_puts(s, "---------------------\n");
285 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 286 for (j = 0 ; j < phys_pg_pack->npages ; j++) {
286 seq_printf(s, " 0x%-14llx\n", 287 seq_printf(s, " 0x%-14llx\n",
287 phys_pg_pack->pages[i]); 288 phys_pg_pack->pages[j]);
288 } 289 }
289 } 290 }
290 spin_unlock(&vm->idr_lock); 291 spin_unlock(&vm->idr_lock);
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index de46aa6ed154..77d51be66c7e 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -11,6 +11,8 @@
11#include <linux/sched/signal.h> 11#include <linux/sched/signal.h>
12#include <linux/hwmon.h> 12#include <linux/hwmon.h>
13 13
14#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
15
14bool hl_device_disabled_or_in_reset(struct hl_device *hdev) 16bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
15{ 17{
16 if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) 18 if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
216 spin_lock_init(&hdev->hw_queues_mirror_lock); 218 spin_lock_init(&hdev->hw_queues_mirror_lock);
217 atomic_set(&hdev->in_reset, 0); 219 atomic_set(&hdev->in_reset, 0);
218 atomic_set(&hdev->fd_open_cnt, 0); 220 atomic_set(&hdev->fd_open_cnt, 0);
221 atomic_set(&hdev->cs_active_cnt, 0);
219 222
220 return 0; 223 return 0;
221 224
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
413 416
414 pci_save_state(hdev->pdev); 417 pci_save_state(hdev->pdev);
415 418
419 /* Block future CS/VM/JOB completion operations */
420 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
421 if (rc) {
422 dev_err(hdev->dev, "Can't suspend while in reset\n");
423 return -EIO;
424 }
425
426 /* This blocks all other stuff that is not blocked by in_reset */
427 hdev->disabled = true;
428
429 /*
430 * Flush anyone that is inside the critical section of enqueue
431 * jobs to the H/W
432 */
433 hdev->asic_funcs->hw_queues_lock(hdev);
434 hdev->asic_funcs->hw_queues_unlock(hdev);
435
436 /* Flush processes that are sending message to CPU */
437 mutex_lock(&hdev->send_cpu_message_lock);
438 mutex_unlock(&hdev->send_cpu_message_lock);
439
416 rc = hdev->asic_funcs->suspend(hdev); 440 rc = hdev->asic_funcs->suspend(hdev);
417 if (rc) 441 if (rc)
418 dev_err(hdev->dev, 442 dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
440 464
441 pci_set_power_state(hdev->pdev, PCI_D0); 465 pci_set_power_state(hdev->pdev, PCI_D0);
442 pci_restore_state(hdev->pdev); 466 pci_restore_state(hdev->pdev);
443 rc = pci_enable_device(hdev->pdev); 467 rc = pci_enable_device_mem(hdev->pdev);
444 if (rc) { 468 if (rc) {
445 dev_err(hdev->dev, 469 dev_err(hdev->dev,
446 "Failed to enable PCI device in resume\n"); 470 "Failed to enable PCI device in resume\n");
447 return rc; 471 return rc;
448 } 472 }
449 473
474 pci_set_master(hdev->pdev);
475
450 rc = hdev->asic_funcs->resume(hdev); 476 rc = hdev->asic_funcs->resume(hdev);
451 if (rc) { 477 if (rc) {
452 dev_err(hdev->dev, 478 dev_err(hdev->dev, "Failed to resume device after suspend\n");
453 "Failed to enable PCI access from device CPU\n"); 479 goto disable_device;
454 return rc; 480 }
481
482
483 hdev->disabled = false;
484 atomic_set(&hdev->in_reset, 0);
485
486 rc = hl_device_reset(hdev, true, false);
487 if (rc) {
488 dev_err(hdev->dev, "Failed to reset device during resume\n");
489 goto disable_device;
455 } 490 }
456 491
457 return 0; 492 return 0;
493
494disable_device:
495 pci_clear_master(hdev->pdev);
496 pci_disable_device(hdev->pdev);
497
498 return rc;
458} 499}
459 500
460static void hl_device_hard_reset_pending(struct work_struct *work) 501static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
462 struct hl_device_reset_work *device_reset_work = 503 struct hl_device_reset_work *device_reset_work =
463 container_of(work, struct hl_device_reset_work, reset_work); 504 container_of(work, struct hl_device_reset_work, reset_work);
464 struct hl_device *hdev = device_reset_work->hdev; 505 struct hl_device *hdev = device_reset_work->hdev;
465 u16 pending_cnt = HL_PENDING_RESET_PER_SEC; 506 u16 pending_total, pending_cnt;
466 struct task_struct *task = NULL; 507 struct task_struct *task = NULL;
467 508
509 if (hdev->pldm)
510 pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
511 else
512 pending_total = HL_PENDING_RESET_PER_SEC;
513
514 pending_cnt = pending_total;
515
468 /* Flush all processes that are inside hl_open */ 516 /* Flush all processes that are inside hl_open */
469 mutex_lock(&hdev->fd_open_cnt_lock); 517 mutex_lock(&hdev->fd_open_cnt_lock);
470 518
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
489 } 537 }
490 } 538 }
491 539
540 pending_cnt = pending_total;
541
542 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
543
544 pending_cnt--;
545
546 ssleep(1);
547 }
548
549 if (atomic_read(&hdev->fd_open_cnt))
550 dev_crit(hdev->dev,
551 "Going to hard reset with open user contexts\n");
552
492 mutex_unlock(&hdev->fd_open_cnt_lock); 553 mutex_unlock(&hdev->fd_open_cnt_lock);
493 554
494 hl_device_reset(hdev, true, true); 555 hl_device_reset(hdev, true, true);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 238dd57c541b..ea979ebd62fb 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
1201 return retval; 1201 return retval;
1202} 1202}
1203 1203
1204static void goya_resume_external_queues(struct hl_device *hdev)
1205{
1206 WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
1207 WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
1208 WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
1209 WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
1210 WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
1211}
1212
1213/* 1204/*
1214 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU 1205 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1215 * 1206 *
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
2178 return retval; 2169 return retval;
2179} 2170}
2180 2171
2181static void goya_resume_internal_queues(struct hl_device *hdev)
2182{
2183 WREG32(mmMME_QM_GLBL_CFG1, 0);
2184 WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
2185
2186 WREG32(mmTPC0_QM_GLBL_CFG1, 0);
2187 WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
2188
2189 WREG32(mmTPC1_QM_GLBL_CFG1, 0);
2190 WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
2191
2192 WREG32(mmTPC2_QM_GLBL_CFG1, 0);
2193 WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
2194
2195 WREG32(mmTPC3_QM_GLBL_CFG1, 0);
2196 WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
2197
2198 WREG32(mmTPC4_QM_GLBL_CFG1, 0);
2199 WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
2200
2201 WREG32(mmTPC5_QM_GLBL_CFG1, 0);
2202 WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
2203
2204 WREG32(mmTPC6_QM_GLBL_CFG1, 0);
2205 WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
2206
2207 WREG32(mmTPC7_QM_GLBL_CFG1, 0);
2208 WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
2209}
2210
2211static void goya_dma_stall(struct hl_device *hdev) 2172static void goya_dma_stall(struct hl_device *hdev)
2212{ 2173{
2213 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); 2174 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
2905{ 2866{
2906 int rc; 2867 int rc;
2907 2868
2908 rc = goya_stop_internal_queues(hdev);
2909
2910 if (rc) {
2911 dev_err(hdev->dev, "failed to stop internal queues\n");
2912 return rc;
2913 }
2914
2915 rc = goya_stop_external_queues(hdev);
2916
2917 if (rc) {
2918 dev_err(hdev->dev, "failed to stop external queues\n");
2919 return rc;
2920 }
2921
2922 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); 2869 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2923 if (rc) 2870 if (rc)
2924 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); 2871 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
2928 2875
2929int goya_resume(struct hl_device *hdev) 2876int goya_resume(struct hl_device *hdev)
2930{ 2877{
2931 int rc; 2878 return goya_init_iatu(hdev);
2932
2933 goya_resume_external_queues(hdev);
2934 goya_resume_internal_queues(hdev);
2935
2936 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
2937 if (rc)
2938 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
2939 return rc;
2940} 2879}
2941 2880
2942static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, 2881static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3070 3009
3071 *dma_handle = hdev->asic_prop.sram_base_address; 3010 *dma_handle = hdev->asic_prop.sram_base_address;
3072 3011
3073 base = hdev->pcie_bar[SRAM_CFG_BAR_ID]; 3012 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
3074 3013
3075 switch (queue_id) { 3014 switch (queue_id) {
3076 case GOYA_QUEUE_ID_MME: 3015 case GOYA_QUEUE_ID_MME:
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a7c95e9f9b9a..a8ee52c880cd 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
793 * struct hl_vm_phys_pg_pack - physical page pack. 793 * struct hl_vm_phys_pg_pack - physical page pack.
794 * @vm_type: describes the type of the virtual area descriptor. 794 * @vm_type: describes the type of the virtual area descriptor.
795 * @pages: the physical page array. 795 * @pages: the physical page array.
796 * @npages: num physical pages in the pack.
797 * @total_size: total size of all the pages in this list.
796 * @mapping_cnt: number of shared mappings. 798 * @mapping_cnt: number of shared mappings.
797 * @asid: the context related to this list. 799 * @asid: the context related to this list.
798 * @npages: num physical pages in the pack.
799 * @page_size: size of each page in the pack. 800 * @page_size: size of each page in the pack.
800 * @total_size: total size of all the pages in this list.
801 * @flags: HL_MEM_* flags related to this list. 801 * @flags: HL_MEM_* flags related to this list.
802 * @handle: the provided handle related to this list. 802 * @handle: the provided handle related to this list.
803 * @offset: offset from the first page. 803 * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
807struct hl_vm_phys_pg_pack { 807struct hl_vm_phys_pg_pack {
808 enum vm_type_t vm_type; /* must be first */ 808 enum vm_type_t vm_type; /* must be first */
809 u64 *pages; 809 u64 *pages;
810 u64 npages;
811 u64 total_size;
810 atomic_t mapping_cnt; 812 atomic_t mapping_cnt;
811 u32 asid; 813 u32 asid;
812 u32 npages;
813 u32 page_size; 814 u32 page_size;
814 u32 total_size;
815 u32 flags; 815 u32 flags;
816 u32 handle; 816 u32 handle;
817 u32 offset; 817 u32 offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
1056 * @cb_pool_lock: protects the CB pool. 1056 * @cb_pool_lock: protects the CB pool.
1057 * @user_ctx: current user context executing. 1057 * @user_ctx: current user context executing.
1058 * @dram_used_mem: current DRAM memory consumption. 1058 * @dram_used_mem: current DRAM memory consumption.
1059 * @in_reset: is device in reset flow.
1060 * @curr_pll_profile: current PLL profile.
1061 * @fd_open_cnt: number of open user processes.
1062 * @timeout_jiffies: device CS timeout value. 1059 * @timeout_jiffies: device CS timeout value.
1063 * @max_power: the max power of the device, as configured by the sysadmin. This 1060 * @max_power: the max power of the device, as configured by the sysadmin. This
1064 * value is saved so in case of hard-reset, KMD will restore this 1061 * value is saved so in case of hard-reset, KMD will restore this
1065 * value and update the F/W after the re-initialization 1062 * value and update the F/W after the re-initialization
1063 * @in_reset: is device in reset flow.
1064 * @curr_pll_profile: current PLL profile.
1065 * @fd_open_cnt: number of open user processes.
1066 * @cs_active_cnt: number of active command submissions on this device (active
1067 * means already in H/W queues)
1066 * @major: habanalabs KMD major. 1068 * @major: habanalabs KMD major.
1067 * @high_pll: high PLL profile frequency. 1069 * @high_pll: high PLL profile frequency.
1068 * @soft_reset_cnt: number of soft reset since KMD loading. 1070 * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
1128 struct hl_ctx *user_ctx; 1130 struct hl_ctx *user_ctx;
1129 1131
1130 atomic64_t dram_used_mem; 1132 atomic64_t dram_used_mem;
1133 u64 timeout_jiffies;
1134 u64 max_power;
1131 atomic_t in_reset; 1135 atomic_t in_reset;
1132 atomic_t curr_pll_profile; 1136 atomic_t curr_pll_profile;
1133 atomic_t fd_open_cnt; 1137 atomic_t fd_open_cnt;
1134 u64 timeout_jiffies; 1138 atomic_t cs_active_cnt;
1135 u64 max_power;
1136 u32 major; 1139 u32 major;
1137 u32 high_pll; 1140 u32 high_pll;
1138 u32 soft_reset_cnt; 1141 u32 soft_reset_cnt;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 67bece26417c..ef3bb6951360 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
370 spin_unlock(&hdev->hw_queues_mirror_lock); 370 spin_unlock(&hdev->hw_queues_mirror_lock);
371 } 371 }
372 372
373 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) { 373 atomic_inc(&hdev->cs_active_cnt);
374
375 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
374 if (job->ext_queue) 376 if (job->ext_queue)
375 ext_hw_queue_schedule_job(job); 377 ext_hw_queue_schedule_job(job);
376 else 378 else
377 int_hw_queue_schedule_job(job); 379 int_hw_queue_schedule_job(job);
378 }
379 380
380 cs->submitted = true; 381 cs->submitted = true;
381 382
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 3a12fd1a5274..ce1fda40a8b8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
56 struct hl_device *hdev = ctx->hdev; 56 struct hl_device *hdev = ctx->hdev;
57 struct hl_vm *vm = &hdev->vm; 57 struct hl_vm *vm = &hdev->vm;
58 struct hl_vm_phys_pg_pack *phys_pg_pack; 58 struct hl_vm_phys_pg_pack *phys_pg_pack;
59 u64 paddr = 0; 59 u64 paddr = 0, total_size, num_pgs, i;
60 u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; 60 u32 num_curr_pgs, page_size, page_shift;
61 int handle, rc, i; 61 int handle, rc;
62 bool contiguous; 62 bool contiguous;
63 63
64 num_curr_pgs = 0; 64 num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); 73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
74 if (!paddr) { 74 if (!paddr) {
75 dev_err(hdev->dev, 75 dev_err(hdev->dev,
76 "failed to allocate %u huge contiguous pages\n", 76 "failed to allocate %llu huge contiguous pages\n",
77 num_pgs); 77 num_pgs);
78 return -ENOMEM; 78 return -ENOMEM;
79 } 79 }
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
93 phys_pg_pack->flags = args->flags; 93 phys_pg_pack->flags = args->flags;
94 phys_pg_pack->contiguous = contiguous; 94 phys_pg_pack->contiguous = contiguous;
95 95
96 phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); 96 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
97 if (!phys_pg_pack->pages) { 97 if (!phys_pg_pack->pages) {
98 rc = -ENOMEM; 98 rc = -ENOMEM;
99 goto pages_arr_err; 99 goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], 148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
149 page_size); 149 page_size);
150 150
151 kfree(phys_pg_pack->pages); 151 kvfree(phys_pg_pack->pages);
152pages_arr_err: 152pages_arr_err:
153 kfree(phys_pg_pack); 153 kfree(phys_pg_pack);
154pages_pack_err: 154pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
267 struct hl_vm_phys_pg_pack *phys_pg_pack) 267 struct hl_vm_phys_pg_pack *phys_pg_pack)
268{ 268{
269 struct hl_vm *vm = &hdev->vm; 269 struct hl_vm *vm = &hdev->vm;
270 int i; 270 u64 i;
271 271
272 if (!phys_pg_pack->created_from_userptr) { 272 if (!phys_pg_pack->created_from_userptr) {
273 if (phys_pg_pack->contiguous) { 273 if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
288 } 288 }
289 } 289 }
290 290
291 kfree(phys_pg_pack->pages); 291 kvfree(phys_pg_pack->pages);
292 kfree(phys_pg_pack); 292 kfree(phys_pg_pack);
293} 293}
294 294
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
519 * - Return the start address of the virtual block 519 * - Return the start address of the virtual block
520 */ 520 */
521static u64 get_va_block(struct hl_device *hdev, 521static u64 get_va_block(struct hl_device *hdev,
522 struct hl_va_range *va_range, u32 size, u64 hint_addr, 522 struct hl_va_range *va_range, u64 size, u64 hint_addr,
523 bool is_userptr) 523 bool is_userptr)
524{ 524{
525 struct hl_vm_va_block *va_block, *new_va_block = NULL; 525 struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
577 } 577 }
578 578
579 if (!new_va_block) { 579 if (!new_va_block) {
580 dev_err(hdev->dev, "no available va block for size %u\n", size); 580 dev_err(hdev->dev, "no available va block for size %llu\n",
581 size);
581 goto out; 582 goto out;
582 } 583 }
583 584
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
648 struct hl_vm_phys_pg_pack *phys_pg_pack; 649 struct hl_vm_phys_pg_pack *phys_pg_pack;
649 struct scatterlist *sg; 650 struct scatterlist *sg;
650 dma_addr_t dma_addr; 651 dma_addr_t dma_addr;
651 u64 page_mask; 652 u64 page_mask, total_npages;
652 u32 npages, total_npages, page_size = PAGE_SIZE; 653 u32 npages, page_size = PAGE_SIZE;
653 bool first = true, is_huge_page_opt = true; 654 bool first = true, is_huge_page_opt = true;
654 int rc, i, j; 655 int rc, i, j;
655 656
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
691 692
692 page_mask = ~(((u64) page_size) - 1); 693 page_mask = ~(((u64) page_size) - 1);
693 694
694 phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); 695 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
696 GFP_KERNEL);
695 if (!phys_pg_pack->pages) { 697 if (!phys_pg_pack->pages) {
696 rc = -ENOMEM; 698 rc = -ENOMEM;
697 goto page_pack_arr_mem_err; 699 goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
750 struct hl_vm_phys_pg_pack *phys_pg_pack) 752 struct hl_vm_phys_pg_pack *phys_pg_pack)
751{ 753{
752 struct hl_device *hdev = ctx->hdev; 754 struct hl_device *hdev = ctx->hdev;
753 u64 next_vaddr = vaddr, paddr; 755 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
754 u32 page_size = phys_pg_pack->page_size; 756 u32 page_size = phys_pg_pack->page_size;
755 int i, rc = 0, mapped_pg_cnt = 0; 757 int rc = 0;
756 758
757 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 759 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
758 paddr = phys_pg_pack->pages[i]; 760 paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
764 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); 766 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
765 if (rc) { 767 if (rc) {
766 dev_err(hdev->dev, 768 dev_err(hdev->dev,
767 "map failed for handle %u, npages: %d, mapped: %d", 769 "map failed for handle %u, npages: %llu, mapped: %llu",
768 phys_pg_pack->handle, phys_pg_pack->npages, 770 phys_pg_pack->handle, phys_pg_pack->npages,
769 mapped_pg_cnt); 771 mapped_pg_cnt);
770 goto err; 772 goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
985 struct hl_vm_hash_node *hnode = NULL; 987 struct hl_vm_hash_node *hnode = NULL;
986 struct hl_userptr *userptr = NULL; 988 struct hl_userptr *userptr = NULL;
987 enum vm_type_t *vm_type; 989 enum vm_type_t *vm_type;
988 u64 next_vaddr; 990 u64 next_vaddr, i;
989 u32 page_size; 991 u32 page_size;
990 bool is_userptr; 992 bool is_userptr;
991 int i, rc; 993 int rc;
992 994
993 /* protect from double entrance */ 995 /* protect from double entrance */
994 mutex_lock(&ctx->mem_hash_lock); 996 mutex_lock(&ctx->mem_hash_lock);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 2f2e99cb2743..3a5a2cec8305 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -832,7 +832,7 @@ err:
832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) 832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
833{ 833{
834 struct hl_device *hdev = ctx->hdev; 834 struct hl_device *hdev = ctx->hdev;
835 u64 real_virt_addr; 835 u64 real_virt_addr, real_phys_addr;
836 u32 real_page_size, npages; 836 u32 real_page_size, npages;
837 int i, rc, mapped_cnt = 0; 837 int i, rc, mapped_cnt = 0;
838 838
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
857 857
858 npages = page_size / real_page_size; 858 npages = page_size / real_page_size;
859 real_virt_addr = virt_addr; 859 real_virt_addr = virt_addr;
860 real_phys_addr = phys_addr;
860 861
861 for (i = 0 ; i < npages ; i++) { 862 for (i = 0 ; i < npages ; i++) {
862 rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr, 863 rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
863 real_page_size); 864 real_page_size);
864 if (rc) 865 if (rc)
865 goto err; 866 goto err;
866 867
867 real_virt_addr += real_page_size; 868 real_virt_addr += real_page_size;
869 real_phys_addr += real_page_size;
868 mapped_cnt++; 870 mapped_cnt++;
869 } 871 }
870 872
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
55 55
56static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) 56static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
57{ 57{
58 return sprintf(buf, "%pM\n", slave->perm_hwaddr); 58 return sprintf(buf, "%*phC\n",
59 slave->dev->addr_len,
60 slave->perm_hwaddr);
59} 61}
60static SLAVE_ATTR_RO(perm_hwaddr); 62static SLAVE_ATTR_RO(perm_hwaddr);
61 63
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dce84a2a65c7..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
427 return 0; 427 return 0;
428 428
429 lane = mv88e6390x_serdes_get_lane(chip, port); 429 lane = mv88e6390x_serdes_get_lane(chip, port);
430 if (lane < 0) 430 if (lane < 0 && lane != -ENODEV)
431 return lane; 431 return lane;
432 432
433 if (chip->ports[port].serdes_irq) { 433 if (lane >= 0) {
434 err = mv88e6390_serdes_irq_disable(chip, port, lane); 434 if (chip->ports[port].serdes_irq) {
435 err = mv88e6390_serdes_irq_disable(chip, port, lane);
436 if (err)
437 return err;
438 }
439
440 err = mv88e6390x_serdes_power(chip, port, false);
435 if (err) 441 if (err)
436 return err; 442 return err;
437 } 443 }
438 444
439 err = mv88e6390x_serdes_power(chip, port, false); 445 chip->ports[port].cmode = 0;
440 if (err)
441 return err;
442 446
443 if (cmode) { 447 if (cmode) {
444 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg); 448 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
452 if (err) 456 if (err)
453 return err; 457 return err;
454 458
459 chip->ports[port].cmode = cmode;
460
461 lane = mv88e6390x_serdes_get_lane(chip, port);
462 if (lane < 0)
463 return lane;
464
455 err = mv88e6390x_serdes_power(chip, port, true); 465 err = mv88e6390x_serdes_power(chip, port, true);
456 if (err) 466 if (err)
457 return err; 467 return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
463 } 473 }
464 } 474 }
465 475
466 chip->ports[port].cmode = cmode;
467
468 return 0; 476 return 0;
469} 477}
470 478
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aa2be4807191..28eac9056211 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1328 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1329 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1330 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */ 1331 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq); 1332 if (nic->nicvf_rx_mode_wq) {
1333 cancel_delayed_work_sync(&nic->link_change_work);
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335 }
1335 1336
1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1337 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1337 nicvf_send_msg_to_pf(nic, &mbx); 1338 nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
1452 struct nicvf_cq_poll *cq_poll = NULL; 1453 struct nicvf_cq_poll *cq_poll = NULL;
1453 1454
1454 /* wait till all queued set_rx_mode tasks completes if any */ 1455 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq); 1456 if (nic->nicvf_rx_mode_wq)
1457 drain_workqueue(nic->nicvf_rx_mode_wq);
1456 1458
1457 netif_carrier_off(netdev); 1459 netif_carrier_off(netdev);
1458 1460
@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
1550 /* Send VF config done msg to PF */ 1552 /* Send VF config done msg to PF */
1551 nicvf_send_cfg_done(nic); 1553 nicvf_send_cfg_done(nic);
1552 1554
1553 INIT_DELAYED_WORK(&nic->link_change_work, 1555 if (nic->nicvf_rx_mode_wq) {
1554 nicvf_link_status_check_task); 1556 INIT_DELAYED_WORK(&nic->link_change_work,
1555 queue_delayed_work(nic->nicvf_rx_mode_wq, 1557 nicvf_link_status_check_task);
1556 &nic->link_change_work, 0); 1558 queue_delayed_work(nic->nicvf_rx_mode_wq,
1559 &nic->link_change_work, 0);
1560 }
1557 1561
1558 return 0; 1562 return 0;
1559cleanup: 1563cleanup:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
105 /* Check if page can be recycled */ 105 /* Check if page can be recycled */
106 if (page) { 106 if (page) {
107 ref_count = page_ref_count(page); 107 ref_count = page_ref_count(page);
108 /* Check if this page has been used once i.e 'put_page' 108 /* This page can be recycled if internal ref_count and page's
109 * called after packet transmission i.e internal ref_count 109 * ref_count are equal, indicating that the page has been used
110 * and page's ref_count are equal i.e page can be recycled. 110 * once for packet transmission. For non-XDP mode, internal
111 * ref_count is always '1'.
111 */ 112 */
112 if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) 113 if (rbdr->is_xdp) {
113 pgcache->ref_count--; 114 if (ref_count == pgcache->ref_count)
114 else 115 pgcache->ref_count--;
115 page = NULL; 116 else
116 117 page = NULL;
117 /* In non-XDP mode, page's ref_count needs to be '1' for it 118 } else if (ref_count != 1) {
118 * to be recycled.
119 */
120 if (!rbdr->is_xdp && (ref_count != 1))
121 page = NULL; 119 page = NULL;
120 }
122 } 121 }
123 122
124 if (!page) { 123 if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
365 while (head < rbdr->pgcnt) { 364 while (head < rbdr->pgcnt) {
366 pgcache = &rbdr->pgcache[head]; 365 pgcache = &rbdr->pgcache[head];
367 if (pgcache->page && page_ref_count(pgcache->page) != 0) { 366 if (pgcache->page && page_ref_count(pgcache->page) != 0) {
368 if (!rbdr->is_xdp) { 367 if (rbdr->is_xdp) {
369 put_page(pgcache->page); 368 page_ref_sub(pgcache->page,
370 continue; 369 pgcache->ref_count - 1);
371 } 370 }
372 page_ref_sub(pgcache->page, pgcache->ref_count - 1);
373 put_page(pgcache->page); 371 put_page(pgcache->page);
374 } 372 }
375 head++; 373 head++;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
354 ppmax = max; 354 ppmax = max;
355 355
356 /* pool size must be multiple of unsigned long */ 356 /* pool size must be multiple of unsigned long */
357 bmap = BITS_TO_LONGS(ppmax); 357 bmap = ppmax / BITS_PER_TYPE(unsigned long);
358 if (!bmap)
359 return NULL;
360
358 ppmax = (bmap * sizeof(unsigned long)) << 3; 361 ppmax = (bmap * sizeof(unsigned long)) << 3;
359 362
360 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; 363 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
402 if (reserve_factor) { 405 if (reserve_factor) {
403 ppmax_pool = ppmax / reserve_factor; 406 ppmax_pool = ppmax / reserve_factor;
404 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); 407 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
408 if (!pool) {
409 ppmax_pool = 0;
410 reserve_factor = 0;
411 }
405 412
406 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", 413 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
407 ndev->name, ppmax, ppmax_pool, pool_index_max); 414 ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
150/* free desc along with its attached buffer */ 150/* free desc along with its attached buffer */
151static void hnae_free_desc(struct hnae_ring *ring) 151static void hnae_free_desc(struct hnae_ring *ring)
152{ 152{
153 hnae_free_buffers(ring);
154 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 153 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
155 ring->desc_num * sizeof(ring->desc[0]), 154 ring->desc_num * sizeof(ring->desc[0]),
156 ring_to_dma_dir(ring)); 155 ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
183/* fini ring, also free the buffer for the ring */ 182/* fini ring, also free the buffer for the ring */
184static void hnae_fini_ring(struct hnae_ring *ring) 183static void hnae_fini_ring(struct hnae_ring *ring)
185{ 184{
185 if (is_rx_ring(ring))
186 hnae_free_buffers(ring);
187
186 hnae_free_desc(ring); 188 hnae_free_desc(ring);
187 kfree(ring->desc_cb); 189 kfree(ring->desc_cb);
188 ring->desc_cb = NULL; 190 ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
357}; 357};
358 358
359struct hnae_queue { 359struct hnae_queue {
360 void __iomem *io_base; 360 u8 __iomem *io_base;
361 phys_addr_t phy_base; 361 phys_addr_t phy_base;
362 struct hnae_ae_dev *dev; /* the device who use this queue */ 362 struct hnae_ae_dev *dev; /* the device who use this queue */
363 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; 363 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
370static void hns_mac_param_get(struct mac_params *param, 370static void hns_mac_param_get(struct mac_params *param,
371 struct hns_mac_cb *mac_cb) 371 struct hns_mac_cb *mac_cb)
372{ 372{
373 param->vaddr = (void *)mac_cb->vaddr; 373 param->vaddr = mac_cb->vaddr;
374 param->mac_mode = hns_get_enet_interface(mac_cb); 374 param->mac_mode = hns_get_enet_interface(mac_cb);
375 ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); 375 ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
376 param->mac_id = mac_cb->mac_id; 376 param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
187/*mac para struct ,mac get param from nic or dsaf when initialize*/ 187/*mac para struct ,mac get param from nic or dsaf when initialize*/
188struct mac_params { 188struct mac_params {
189 char addr[ETH_ALEN]; 189 char addr[ETH_ALEN];
190 void *vaddr; /*virtual address*/ 190 u8 __iomem *vaddr; /*virtual address*/
191 struct device *dev; 191 struct device *dev;
192 u8 mac_id; 192 u8 mac_id;
193 /**< Ethernet operation mode (MAC-PHY interface and speed) */ 193 /**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
402 enum mac_mode mac_mode; 402 enum mac_mode mac_mode;
403 u8 mac_id; 403 u8 mac_id;
404 struct hns_mac_cb *mac_cb; 404 struct hns_mac_cb *mac_cb;
405 void __iomem *io_base; 405 u8 __iomem *io_base;
406 unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ 406 unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
407 unsigned int virt_dev_num; 407 unsigned int virt_dev_num;
408 struct device *dev; 408 struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..61eea6ac846f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
1602 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); 1602 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
1603 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, 1603 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
1604 DSAF_TBL_TCAM_KEY_PORT_S, port); 1604 DSAF_TBL_TCAM_KEY_PORT_S, port);
1605
1606 mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
1607} 1605}
1608 1606
1609/** 1607/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
1663 /* default config dvc to 0 */ 1661 /* default config dvc to 0 */
1664 mac_data.tbl_ucast_dvc = 0; 1662 mac_data.tbl_ucast_dvc = 0;
1665 mac_data.tbl_ucast_out_port = mac_entry->port_num; 1663 mac_data.tbl_ucast_out_port = mac_entry->port_num;
1666 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 1664 tcam_data.tbl_tcam_data_high = mac_key.high.val;
1667 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 1665 tcam_data.tbl_tcam_data_low = mac_key.low.val;
1668 1666
1669 hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); 1667 hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
1670 1668
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1786 0xff, 1784 0xff,
1787 mc_mask); 1785 mc_mask);
1788 1786
1789 mask_key.high.val = le32_to_cpu(mask_key.high.val);
1790 mask_key.low.val = le32_to_cpu(mask_key.low.val);
1791
1792 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); 1787 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
1793 } 1788 }
1794 1789
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1840 dsaf_dev->ae_dev.name, mac_key.high.val, 1835 dsaf_dev->ae_dev.name, mac_key.high.val,
1841 mac_key.low.val, entry_index); 1836 mac_key.low.val, entry_index);
1842 1837
1843 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 1838 tcam_data.tbl_tcam_data_high = mac_key.high.val;
1844 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 1839 tcam_data.tbl_tcam_data_low = mac_key.low.val;
1845 1840
1846 /* config mc entry with mask */ 1841 /* config mc entry with mask */
1847 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, 1842 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
1956 /* config key mask */ 1951 /* config key mask */
1957 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); 1952 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
1958 1953
1959 mask_key.high.val = le32_to_cpu(mask_key.high.val);
1960 mask_key.low.val = le32_to_cpu(mask_key.low.val);
1961
1962 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); 1954 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
1963 } 1955 }
1964 1956
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
2012 soft_mac_entry += entry_index; 2004 soft_mac_entry += entry_index;
2013 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; 2005 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
2014 } else { /* not zero, just del port, update */ 2006 } else { /* not zero, just del port, update */
2015 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 2007 tcam_data.tbl_tcam_data_high = mac_key.high.val;
2016 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 2008 tcam_data.tbl_tcam_data_low = mac_key.low.val;
2017 2009
2018 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, 2010 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
2019 &tcam_data, 2011 &tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
2750 return DSAF_DUMP_REGS_NUM; 2742 return DSAF_DUMP_REGS_NUM;
2751} 2743}
2752 2744
2745static int hns_dsaf_get_port_id(u8 port)
2746{
2747 if (port < DSAF_SERVICE_NW_NUM)
2748 return port;
2749
2750 if (port >= DSAF_BASE_INNER_PORT_NUM)
2751 return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2752
2753 return -EINVAL;
2754}
2755
2753static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) 2756static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2754{ 2757{
2755 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; 2758 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2815 memset(&temp_key, 0x0, sizeof(temp_key)); 2818 memset(&temp_key, 0x0, sizeof(temp_key));
2816 mask_entry.addr[0] = 0x01; 2819 mask_entry.addr[0] = 0x01;
2817 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, 2820 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
2818 port, mask_entry.addr); 2821 0xf, mask_entry.addr);
2819 tbl_tcam_mcast.tbl_mcast_item_vld = 1; 2822 tbl_tcam_mcast.tbl_mcast_item_vld = 1;
2820 tbl_tcam_mcast.tbl_mcast_old_en = 0; 2823 tbl_tcam_mcast.tbl_mcast_old_en = 0;
2821 2824
2822 if (port < DSAF_SERVICE_NW_NUM) { 2825 /* set MAC port to handle multicast */
2823 mskid = port; 2826 mskid = hns_dsaf_get_port_id(port);
2824 } else if (port >= DSAF_BASE_INNER_PORT_NUM) { 2827 if (mskid == -EINVAL) {
2825 mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2826 } else {
2827 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", 2828 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
2828 dsaf_dev->ae_dev.name, port, 2829 dsaf_dev->ae_dev.name, port,
2829 mask_key.high.val, mask_key.low.val); 2830 mask_key.high.val, mask_key.low.val);
2830 return; 2831 return;
2831 } 2832 }
2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2834 mskid % 32, 1);
2832 2835
2836 /* set pool bit map to handle multicast */
2837 mskid = hns_dsaf_get_port_id(port_num);
2838 if (mskid == -EINVAL) {
2839 dev_err(dsaf_dev->dev,
2840 "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
2841 dsaf_dev->ae_dev.name, port_num,
2842 mask_key.high.val, mask_key.low.val);
2843 return;
2844 }
2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], 2845 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2834 mskid % 32, 1); 2846 mskid % 32, 1);
2847
2835 memcpy(&temp_key, &mask_key, sizeof(mask_key)); 2848 memcpy(&temp_key, &mask_key, sizeof(mask_key));
2836 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, 2849 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
2837 (struct dsaf_tbl_tcam_data *)(&mask_key), 2850 (struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
467 u8 mac_id, u8 port_num); 467 u8 mac_id, u8 port_num);
468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); 468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
469 469
470int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
471
470#endif /* __HNS_DSAF_MAIN_H__ */ 472#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
670 dsaf_set_field(origin, 1ull << 10, 10, en); 670 dsaf_set_field(origin, 1ull << 10, 10, en);
671 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); 671 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
672 } else { 672 } else {
673 u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + 673 u8 __iomem *base_addr = mac_cb->serdes_vaddr +
674 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); 674 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
675 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); 675 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
676 } 676 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
61 } 61 }
62} 62}
63 63
64static void __iomem * 64static u8 __iomem *
65hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) 65hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
66{ 66{
67 return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; 67 return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
111 dsaf_dev->ppe_common[comm_index] = NULL; 111 dsaf_dev->ppe_common[comm_index] = NULL;
112} 112}
113 113
114static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, 114static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
115 int ppe_idx) 115 int ppe_idx)
116{ 116{
117 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; 117 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
118} 118}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
80 struct hns_ppe_hw_stats hw_stats; 80 struct hns_ppe_hw_stats hw_stats;
81 81
82 u8 index; /* index in a ppe common device */ 82 u8 index; /* index in a ppe common device */
83 void __iomem *io_base; 83 u8 __iomem *io_base;
84 int virq; 84 int virq;
85 u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ 85 u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
86 u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ 86 u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
89struct ppe_common_cb { 89struct ppe_common_cb {
90 struct device *dev; 90 struct device *dev;
91 struct dsaf_device *dsaf_dev; 91 struct dsaf_device *dsaf_dev;
92 void __iomem *io_base; 92 u8 __iomem *io_base;
93 93
94 enum ppe_common_mode ppe_mode; 94 enum ppe_common_mode ppe_mode;
95 95
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
458 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 458 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
459 } else { 459 } else {
460 ring = &q->tx_ring; 460 ring = &q->tx_ring;
461 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 461 ring->io_base = ring_pair_cb->q.io_base +
462 HNS_RCB_TX_REG_OFFSET; 462 HNS_RCB_TX_REG_OFFSET;
463 irq_idx = HNS_RCB_IRQ_IDX_TX; 463 irq_idx = HNS_RCB_IRQ_IDX_TX;
464 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 464 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
764 } 764 }
765} 765}
766 766
767static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) 767static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
768{ 768{
769 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 769 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
770 770
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
1018#define XGMAC_PAUSE_CTL_RSP_MODE_B 2 1018#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
1019#define XGMAC_PAUSE_CTL_TX_XOFF_B 3 1019#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
1020 1020
1021static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) 1021static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
1022{ 1022{
1023 writel(value, base + reg); 1023 writel(value, base + reg);
1024} 1024}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
1053#define dsaf_set_bit(origin, shift, val) \ 1053#define dsaf_set_bit(origin, shift, val) \
1054 dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) 1054 dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
1055 1055
1056static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, 1056static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
1057 u32 shift, u32 val) 1057 u32 shift, u32 val)
1058{ 1058{
1059 u32 origin = dsaf_read_reg(base, reg); 1059 u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
1073#define dsaf_get_bit(origin, shift) \ 1073#define dsaf_get_bit(origin, shift) \
1074 dsaf_get_field((origin), (1ull << (shift)), (shift)) 1074 dsaf_get_field((origin), (1ull << (shift)), (shift))
1075 1075
1076static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, 1076static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
1077 u32 shift) 1077 u32 shift)
1078{ 1078{
1079 u32 origin; 1079 u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
1089 dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) 1089 dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
1090 1090
1091#define dsaf_write_b(addr, data)\ 1091#define dsaf_write_b(addr, data)\
1092 writeb((data), (__iomem unsigned char *)(addr)) 1092 writeb((data), (__iomem u8 *)(addr))
1093#define dsaf_read_b(addr)\ 1093#define dsaf_read_b(addr)\
1094 readb((__iomem unsigned char *)(addr)) 1094 readb((__iomem u8 *)(addr))
1095 1095
1096#define hns_mac_reg_read64(drv, offset) \ 1096#define hns_mac_reg_read64(drv, offset) \
1097 readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) 1097 readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
1098 1098
1099#endif /* _DSAF_REG_H */ 1099#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
129 dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); 129 dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
130 dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); 130 dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
131 dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); 131 dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
132 dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); 132 dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
133} 133}
134 134
135/** 135/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index e37a0ca0db89..297b95c1b3c1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
29 29
30#define SERVICE_TIMER_HZ (1 * HZ) 30#define SERVICE_TIMER_HZ (1 * HZ)
31 31
32#define NIC_TX_CLEAN_MAX_NUM 256
33#define NIC_RX_CLEAN_MAX_NUM 64
34
35#define RCB_IRQ_NOT_INITED 0 32#define RCB_IRQ_NOT_INITED 0
36#define RCB_IRQ_INITED 1 33#define RCB_IRQ_INITED 1
37#define HNS_BUFFER_SIZE_2048 2048 34#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
376 wmb(); /* commit all data before submit */ 373 wmb(); /* commit all data before submit */
377 assert(skb->queue_mapping < priv->ae_handle->q_num); 374 assert(skb->queue_mapping < priv->ae_handle->q_num);
378 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); 375 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
379 ring->stats.tx_pkts++;
380 ring->stats.tx_bytes += skb->len;
381 376
382 return NETDEV_TX_OK; 377 return NETDEV_TX_OK;
383 378
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
999 /* issue prefetch for next Tx descriptor */ 994 /* issue prefetch for next Tx descriptor */
1000 prefetch(&ring->desc_cb[ring->next_to_clean]); 995 prefetch(&ring->desc_cb[ring->next_to_clean]);
1001 } 996 }
997 /* update tx ring statistics. */
998 ring->stats.tx_pkts += pkts;
999 ring->stats.tx_bytes += bytes;
1002 1000
1003 NETIF_TX_UNLOCK(ring); 1001 NETIF_TX_UNLOCK(ring);
1004 1002
@@ -2151,7 +2149,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2151 hns_nic_tx_fini_pro_v2; 2149 hns_nic_tx_fini_pro_v2;
2152 2150
2153 netif_napi_add(priv->netdev, &rd->napi, 2151 netif_napi_add(priv->netdev, &rd->napi,
2154 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 2152 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2155 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2153 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2156 } 2154 }
2157 for (i = h->q_num; i < h->q_num * 2; i++) { 2155 for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2164,7 +2162,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2164 hns_nic_rx_fini_pro_v2; 2162 hns_nic_rx_fini_pro_v2;
2165 2163
2166 netif_napi_add(priv->netdev, &rd->napi, 2164 netif_napi_add(priv->netdev, &rd->napi,
2167 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 2165 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2168 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2166 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2169 } 2167 }
2170 2168
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the HISILICON network device drivers. 3# Makefile for the HISILICON network device drivers.
4# 4#
5 5
6ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 6ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
7 7
8obj-$(CONFIG_HNS3_HCLGE) += hclge.o 8obj-$(CONFIG_HNS3_HCLGE) += hclge.o
9hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o 9hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the HISILICON network device drivers. 3# Makefile for the HISILICON network device drivers.
4# 4#
5 5
6ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 6ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
7 7
8obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o 8obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
9hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file 9hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
39}; 39};
40 40
41struct hns_mdio_device { 41struct hns_mdio_device {
42 void *vbase; /* mdio reg base address */ 42 u8 __iomem *vbase; /* mdio reg base address */
43 struct regmap *subctrl_vbase; 43 struct regmap *subctrl_vbase;
44 struct hns_mdio_sc_reg sc_reg; 44 struct hns_mdio_sc_reg sc_reg;
45}; 45};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
96#define MDIO_SC_CLK_ST 0x531C 96#define MDIO_SC_CLK_ST 0x531C
97#define MDIO_SC_RESET_ST 0x5A1C 97#define MDIO_SC_RESET_ST 0x5A1C
98 98
99static void mdio_write_reg(void *base, u32 reg, u32 value) 99static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
100{ 100{
101 u8 __iomem *reg_addr = (u8 __iomem *)base; 101 writel_relaxed(value, base + reg);
102
103 writel_relaxed(value, reg_addr + reg);
104} 102}
105 103
106#define MDIO_WRITE_REG(a, reg, value) \ 104#define MDIO_WRITE_REG(a, reg, value) \
107 mdio_write_reg((a)->vbase, (reg), (value)) 105 mdio_write_reg((a)->vbase, (reg), (value))
108 106
109static u32 mdio_read_reg(void *base, u32 reg) 107static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
110{ 108{
111 u8 __iomem *reg_addr = (u8 __iomem *)base; 109 return readl_relaxed(base + reg);
112
113 return readl_relaxed(reg_addr + reg);
114} 110}
115 111
116#define mdio_set_field(origin, mask, shift, val) \ 112#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
121 117
122#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) 118#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
123 119
124static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, 120static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
125 u32 val) 121 u32 val)
126{ 122{
127 u32 origin = mdio_read_reg(base, reg); 123 u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
133#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ 129#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
134 mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) 130 mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
135 131
136static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) 132static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
137{ 133{
138 u32 origin; 134 u32 origin;
139 135
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 20c4e0835ba8..1de691e76b86 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1886,6 +1886,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1886 */ 1886 */
1887 adapter->state = VNIC_PROBED; 1887 adapter->state = VNIC_PROBED;
1888 1888
1889 reinit_completion(&adapter->init_done);
1889 rc = init_crq_queue(adapter); 1890 rc = init_crq_queue(adapter);
1890 if (rc) { 1891 if (rc) {
1891 netdev_err(adapter->netdev, 1892 netdev_err(adapter->netdev,
@@ -4692,7 +4693,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4692 old_num_rx_queues = adapter->req_rx_queues; 4693 old_num_rx_queues = adapter->req_rx_queues;
4693 old_num_tx_queues = adapter->req_tx_queues; 4694 old_num_tx_queues = adapter->req_tx_queues;
4694 4695
4695 init_completion(&adapter->init_done); 4696 reinit_completion(&adapter->init_done);
4696 adapter->init_done_rc = 0; 4697 adapter->init_done_rc = 0;
4697 ibmvnic_send_crq_init(adapter); 4698 ibmvnic_send_crq_init(adapter);
4698 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4699 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4747,7 +4748,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4747 4748
4748 adapter->from_passive_init = false; 4749 adapter->from_passive_init = false;
4749 4750
4750 init_completion(&adapter->init_done);
4751 adapter->init_done_rc = 0; 4751 adapter->init_done_rc = 0;
4752 ibmvnic_send_crq_init(adapter); 4752 ibmvnic_send_crq_init(adapter);
4753 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4753 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4826,6 +4826,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4826 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4826 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4827 INIT_LIST_HEAD(&adapter->rwi_list); 4827 INIT_LIST_HEAD(&adapter->rwi_list);
4828 spin_lock_init(&adapter->rwi_lock); 4828 spin_lock_init(&adapter->rwi_lock);
4829 init_completion(&adapter->init_done);
4829 adapter->resetting = false; 4830 adapter->resetting = false;
4830 4831
4831 adapter->mac_change_pending = false; 4832 adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e2fa112bed9a..2325cee76211 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
41 /* create driver workqueue */ 41 /* create driver workqueue */
42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, 42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
43 fm10k_driver_name); 43 fm10k_driver_name);
44 if (!fm10k_workqueue)
45 return -ENOMEM;
44 46
45 fm10k_dbg_init(); 47 fm10k_dbg_init();
46 48
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..d3cc3427caad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -790,6 +790,8 @@ struct i40e_vsi {
790 790
791 /* VSI specific handlers */ 791 /* VSI specific handlers */
792 irqreturn_t (*irq_handler)(int irq, void *data); 792 irqreturn_t (*irq_handler)(int irq, void *data);
793
794 unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
793} ____cacheline_internodealigned_in_smp; 795} ____cacheline_internodealigned_in_smp;
794 796
795struct i40e_netdev_priv { 797struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
1096 return !!vsi->xdp_prog; 1098 return !!vsi->xdp_prog;
1097} 1099}
1098 1100
1099static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
1100{
1101 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
1102 int qid = ring->queue_index;
1103
1104 if (ring_is_xdp(ring))
1105 qid -= ring->vsi->alloc_queue_pairs;
1106
1107 if (!xdp_on)
1108 return NULL;
1109
1110 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
1111}
1112
1113int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); 1101int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
1114int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); 1102int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
1115int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 1103int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7874d0ec7fb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2573 return -EOPNOTSUPP; 2573 return -EOPNOTSUPP;
2574 2574
2575 /* only magic packet is supported */ 2575 /* only magic packet is supported */
2576 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) 2576 if (wol->wolopts & ~WAKE_MAGIC)
2577 | (wol->wolopts != WAKE_FILTER))
2578 return -EOPNOTSUPP; 2577 return -EOPNOTSUPP;
2579 2578
2580 /* is this a new value? */ 2579 /* is this a new value? */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..b1c265012c8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3064} 3064}
3065 3065
3066/** 3066/**
3067 * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
3068 * @ring: The Tx or Rx ring
3069 *
3070 * Returns the UMEM or NULL.
3071 **/
3072static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3073{
3074 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3075 int qid = ring->queue_index;
3076
3077 if (ring_is_xdp(ring))
3078 qid -= ring->vsi->alloc_queue_pairs;
3079
3080 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3081 return NULL;
3082
3083 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3084}
3085
3086/**
3067 * i40e_configure_tx_ring - Configure a transmit ring context and rest 3087 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3068 * @ring: The Tx ring to configure 3088 * @ring: The Tx ring to configure
3069 * 3089 *
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10064 hash_init(vsi->mac_filter_hash); 10084 hash_init(vsi->mac_filter_hash);
10065 vsi->irqs_ready = false; 10085 vsi->irqs_ready = false;
10066 10086
10087 if (type == I40E_VSI_MAIN) {
10088 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10089 if (!vsi->af_xdp_zc_qps)
10090 goto err_rings;
10091 }
10092
10067 ret = i40e_set_num_rings_in_vsi(vsi); 10093 ret = i40e_set_num_rings_in_vsi(vsi);
10068 if (ret) 10094 if (ret)
10069 goto err_rings; 10095 goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10082 goto unlock_pf; 10108 goto unlock_pf;
10083 10109
10084err_rings: 10110err_rings:
10111 bitmap_free(vsi->af_xdp_zc_qps);
10085 pf->next_vsi = i - 1; 10112 pf->next_vsi = i - 1;
10086 kfree(vsi); 10113 kfree(vsi);
10087unlock_pf: 10114unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
10162 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 10189 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10163 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 10190 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10164 10191
10192 bitmap_free(vsi->af_xdp_zc_qps);
10165 i40e_vsi_free_arrays(vsi, true); 10193 i40e_vsi_free_arrays(vsi, true);
10166 i40e_clear_rss_config_user(vsi); 10194 i40e_clear_rss_config_user(vsi);
10167 10195
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..31575c0bb884 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
146static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 146static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
147{ 147{
148 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); 148 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
149 struct timespec64 now; 149 struct timespec64 now, then;
150 150
151 then = ns_to_timespec64(delta);
151 mutex_lock(&pf->tmreg_lock); 152 mutex_lock(&pf->tmreg_lock);
152 153
153 i40e_ptp_read(pf, &now, NULL); 154 i40e_ptp_read(pf, &now, NULL);
154 timespec64_add_ns(&now, delta); 155 now = timespec64_add(now, then);
155 i40e_ptp_write(pf, (const struct timespec64 *)&now); 156 i40e_ptp_write(pf, (const struct timespec64 *)&now);
156 157
157 mutex_unlock(&pf->tmreg_lock); 158 mutex_unlock(&pf->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
102 if (err) 102 if (err)
103 return err; 103 return err;
104 104
105 set_bit(qid, vsi->af_xdp_zc_qps);
106
105 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 107 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
106 108
107 if (if_running) { 109 if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
148 return err; 150 return err;
149 } 151 }
150 152
153 clear_bit(qid, vsi->af_xdp_zc_qps);
151 i40e_xsk_umem_dma_unmap(vsi, umem); 154 i40e_xsk_umem_dma_unmap(vsi, umem);
152 155
153 if (if_running) { 156 if (if_running) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
194/* enable link status from external LINK_0 and LINK_1 pins */ 194/* enable link status from external LINK_0 and LINK_1 pins */
195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
197#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
198#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
197#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ 199#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
198#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ 200#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
199#define E1000_CTRL_RST 0x04000000 /* Global reset */ 201#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 32d61d5a2706..acbb5b4f333d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8743,9 +8743,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8743 struct e1000_hw *hw = &adapter->hw; 8743 struct e1000_hw *hw = &adapter->hw;
8744 u32 ctrl, rctl, status; 8744 u32 ctrl, rctl, status;
8745 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 8745 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8746#ifdef CONFIG_PM 8746 bool wake;
8747 int retval = 0;
8748#endif
8749 8747
8750 rtnl_lock(); 8748 rtnl_lock();
8751 netif_device_detach(netdev); 8749 netif_device_detach(netdev);
@@ -8758,14 +8756,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8758 igb_clear_interrupt_scheme(adapter); 8756 igb_clear_interrupt_scheme(adapter);
8759 rtnl_unlock(); 8757 rtnl_unlock();
8760 8758
8761#ifdef CONFIG_PM
8762 if (!runtime) {
8763 retval = pci_save_state(pdev);
8764 if (retval)
8765 return retval;
8766 }
8767#endif
8768
8769 status = rd32(E1000_STATUS); 8759 status = rd32(E1000_STATUS);
8770 if (status & E1000_STATUS_LU) 8760 if (status & E1000_STATUS_LU)
8771 wufc &= ~E1000_WUFC_LNKC; 8761 wufc &= ~E1000_WUFC_LNKC;
@@ -8782,10 +8772,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8782 } 8772 }
8783 8773
8784 ctrl = rd32(E1000_CTRL); 8774 ctrl = rd32(E1000_CTRL);
8785 /* advertise wake from D3Cold */
8786 #define E1000_CTRL_ADVD3WUC 0x00100000
8787 /* phy power management enable */
8788 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8789 ctrl |= E1000_CTRL_ADVD3WUC; 8775 ctrl |= E1000_CTRL_ADVD3WUC;
8790 wr32(E1000_CTRL, ctrl); 8776 wr32(E1000_CTRL, ctrl);
8791 8777
@@ -8799,12 +8785,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8799 wr32(E1000_WUFC, 0); 8785 wr32(E1000_WUFC, 0);
8800 } 8786 }
8801 8787
8802 *enable_wake = wufc || adapter->en_mng_pt; 8788 wake = wufc || adapter->en_mng_pt;
8803 if (!*enable_wake) 8789 if (!wake)
8804 igb_power_down_link(adapter); 8790 igb_power_down_link(adapter);
8805 else 8791 else
8806 igb_power_up_link(adapter); 8792 igb_power_up_link(adapter);
8807 8793
8794 if (enable_wake)
8795 *enable_wake = wake;
8796
8808 /* Release control of h/w to f/w. If f/w is AMT enabled, this 8797 /* Release control of h/w to f/w. If f/w is AMT enabled, this
8809 * would have already happened in close and is redundant. 8798 * would have already happened in close and is redundant.
8810 */ 8799 */
@@ -8847,22 +8836,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
8847 8836
8848static int __maybe_unused igb_suspend(struct device *dev) 8837static int __maybe_unused igb_suspend(struct device *dev)
8849{ 8838{
8850 int retval; 8839 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8851 bool wake;
8852 struct pci_dev *pdev = to_pci_dev(dev);
8853
8854 retval = __igb_shutdown(pdev, &wake, 0);
8855 if (retval)
8856 return retval;
8857
8858 if (wake) {
8859 pci_prepare_to_sleep(pdev);
8860 } else {
8861 pci_wake_from_d3(pdev, false);
8862 pci_set_power_state(pdev, PCI_D3hot);
8863 }
8864
8865 return 0;
8866} 8840}
8867 8841
8868static int __maybe_unused igb_resume(struct device *dev) 8842static int __maybe_unused igb_resume(struct device *dev)
@@ -8933,22 +8907,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
8933 8907
8934static int __maybe_unused igb_runtime_suspend(struct device *dev) 8908static int __maybe_unused igb_runtime_suspend(struct device *dev)
8935{ 8909{
8936 struct pci_dev *pdev = to_pci_dev(dev); 8910 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8937 int retval;
8938 bool wake;
8939
8940 retval = __igb_shutdown(pdev, &wake, 1);
8941 if (retval)
8942 return retval;
8943
8944 if (wake) {
8945 pci_prepare_to_sleep(pdev);
8946 } else {
8947 pci_wake_from_d3(pdev, false);
8948 pci_set_power_state(pdev, PCI_D3hot);
8949 }
8950
8951 return 0;
8952} 8911}
8953 8912
8954static int __maybe_unused igb_runtime_resume(struct device *dev) 8913static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
905 struct pci_dev *pdev = adapter->pdev; 905 struct pci_dev *pdev = adapter->pdev;
906 struct device *dev = &adapter->netdev->dev; 906 struct device *dev = &adapter->netdev->dev;
907 struct mii_bus *bus; 907 struct mii_bus *bus;
908 int err = -ENODEV;
908 909
909 adapter->mii_bus = devm_mdiobus_alloc(dev); 910 bus = devm_mdiobus_alloc(dev);
910 if (!adapter->mii_bus) 911 if (!bus)
911 return -ENOMEM; 912 return -ENOMEM;
912 913
913 bus = adapter->mii_bus;
914
915 switch (hw->device_id) { 914 switch (hw->device_id) {
916 /* C3000 SoCs */ 915 /* C3000 SoCs */
917 case IXGBE_DEV_ID_X550EM_A_KR: 916 case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
949 */ 948 */
950 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; 949 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
951 950
952 return mdiobus_register(bus); 951 err = mdiobus_register(bus);
952 if (!err) {
953 adapter->mii_bus = bus;
954 return 0;
955 }
953 956
954ixgbe_no_mii_bus: 957ixgbe_no_mii_bus:
955 devm_mdiobus_free(dev, bus); 958 devm_mdiobus_free(dev, bus);
956 adapter->mii_bus = NULL; 959 return err;
957 return -ENODEV;
958} 960}
959 961
960/** 962/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
96 if (!eproto) 96 if (!eproto)
97 return -EINVAL; 97 return -EINVAL;
98 98
99 if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
100 return -EOPNOTSUPP;
101
102 err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); 99 err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
103 if (err) 100 if (err)
104 return err; 101 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index b0ce68feb0f3..633b117eb13e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
122 return err; 122 return err;
123} 123}
124 124
125/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ 125/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
126 * minimum speed value is 40Gbps
127 */
126static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) 128static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
127{ 129{
128 u32 speed; 130 u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
130 int err; 132 int err;
131 133
132 err = mlx5e_port_linkspeed(priv->mdev, &speed); 134 err = mlx5e_port_linkspeed(priv->mdev, &speed);
133 if (err) { 135 if (err)
134 mlx5_core_warn(priv->mdev, "cannot get port speed\n"); 136 speed = SPEED_40000;
135 return 0; 137 speed = max_t(u32, speed, SPEED_40000);
136 }
137 138
138 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; 139 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
139 140
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
142} 143}
143 144
144static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, 145static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
145 u32 xoff, unsigned int mtu) 146 u32 xoff, unsigned int max_mtu)
146{ 147{
147 int i; 148 int i;
148 149
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
154 } 155 }
155 156
156 if (port_buffer->buffer[i].size < 157 if (port_buffer->buffer[i].size <
157 (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) 158 (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
158 return -ENOMEM; 159 return -ENOMEM;
159 160
160 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; 161 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
161 port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; 162 port_buffer->buffer[i].xon =
163 port_buffer->buffer[i].xoff - max_mtu;
162 } 164 }
163 165
164 return 0; 166 return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
166 168
167/** 169/**
168 * update_buffer_lossy - Update buffer configuration based on pfc 170 * update_buffer_lossy - Update buffer configuration based on pfc
169 * @mtu: device's MTU 171 * @max_mtu: netdev's max_mtu
170 * @pfc_en: <input> current pfc configuration 172 * @pfc_en: <input> current pfc configuration
171 * @buffer: <input> current prio to buffer mapping 173 * @buffer: <input> current prio to buffer mapping
172 * @xoff: <input> xoff value 174 * @xoff: <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
183 * @return: 0 if no error, 185 * @return: 0 if no error,
184 * sets change to true if buffer configuration was modified. 186 * sets change to true if buffer configuration was modified.
185 */ 187 */
186static int update_buffer_lossy(unsigned int mtu, 188static int update_buffer_lossy(unsigned int max_mtu,
187 u8 pfc_en, u8 *buffer, u32 xoff, 189 u8 pfc_en, u8 *buffer, u32 xoff,
188 struct mlx5e_port_buffer *port_buffer, 190 struct mlx5e_port_buffer *port_buffer,
189 bool *change) 191 bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
220 } 222 }
221 223
222 if (changed) { 224 if (changed) {
223 err = update_xoff_threshold(port_buffer, xoff, mtu); 225 err = update_xoff_threshold(port_buffer, xoff, max_mtu);
224 if (err) 226 if (err)
225 return err; 227 return err;
226 228
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
230 return 0; 232 return 0;
231} 233}
232 234
235#define MINIMUM_MAX_MTU 9216
233int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, 236int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
234 u32 change, unsigned int mtu, 237 u32 change, unsigned int mtu,
235 struct ieee_pfc *pfc, 238 struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
241 bool update_prio2buffer = false; 244 bool update_prio2buffer = false;
242 u8 buffer[MLX5E_MAX_PRIORITY]; 245 u8 buffer[MLX5E_MAX_PRIORITY];
243 bool update_buffer = false; 246 bool update_buffer = false;
247 unsigned int max_mtu;
244 u32 total_used = 0; 248 u32 total_used = 0;
245 u8 curr_pfc_en; 249 u8 curr_pfc_en;
246 int err; 250 int err;
247 int i; 251 int i;
248 252
249 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); 253 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
254 max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
250 255
251 err = mlx5e_port_query_buffer(priv, &port_buffer); 256 err = mlx5e_port_query_buffer(priv, &port_buffer);
252 if (err) 257 if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
254 259
255 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { 260 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
256 update_buffer = true; 261 update_buffer = true;
257 err = update_xoff_threshold(&port_buffer, xoff, mtu); 262 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
258 if (err) 263 if (err)
259 return err; 264 return err;
260 } 265 }
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
264 if (err) 269 if (err)
265 return err; 270 return err;
266 271
267 err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, 272 err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
268 &port_buffer, &update_buffer); 273 &port_buffer, &update_buffer);
269 if (err) 274 if (err)
270 return err; 275 return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
276 if (err) 281 if (err)
277 return err; 282 return err;
278 283
279 err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, 284 err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
280 &port_buffer, &update_buffer); 285 xoff, &port_buffer, &update_buffer);
281 if (err) 286 if (err)
282 return err; 287 return err;
283 } 288 }
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
301 return -EINVAL; 306 return -EINVAL;
302 307
303 update_buffer = true; 308 update_buffer = true;
304 err = update_xoff_threshold(&port_buffer, xoff, mtu); 309 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
305 if (err) 310 if (err)
306 return err; 311 return err;
307 } 312 }
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
309 /* Need to update buffer configuration if xoff value is changed */ 314 /* Need to update buffer configuration if xoff value is changed */
310 if (!update_buffer && xoff != priv->dcbx.xoff) { 315 if (!update_buffer && xoff != priv->dcbx.xoff) {
311 update_buffer = true; 316 update_buffer = true;
312 err = update_xoff_threshold(&port_buffer, xoff, mtu); 317 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
313 if (err) 318 if (err)
314 return err; 319 return err;
315 } 320 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
45 if (err) 45 if (err)
46 return err; 46 return err;
47 47
48 mutex_lock(&mdev->mlx5e_res.td.list_lock);
48 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); 49 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
50 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
49 51
50 return 0; 52 return 0;
51} 53}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
53void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, 55void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
54 struct mlx5e_tir *tir) 56 struct mlx5e_tir *tir)
55{ 57{
58 mutex_lock(&mdev->mlx5e_res.td.list_lock);
56 mlx5_core_destroy_tir(mdev, tir->tirn); 59 mlx5_core_destroy_tir(mdev, tir->tirn);
57 list_del(&tir->list); 60 list_del(&tir->list);
61 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
58} 62}
59 63
60static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, 64static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
114 } 118 }
115 119
116 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); 120 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
121 mutex_init(&mdev->mlx5e_res.td.list_lock);
117 122
118 return 0; 123 return 0;
119 124
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
141{ 146{
142 struct mlx5_core_dev *mdev = priv->mdev; 147 struct mlx5_core_dev *mdev = priv->mdev;
143 struct mlx5e_tir *tir; 148 struct mlx5e_tir *tir;
144 int err = -ENOMEM; 149 int err = 0;
145 u32 tirn = 0; 150 u32 tirn = 0;
146 int inlen; 151 int inlen;
147 void *in; 152 void *in;
148 153
149 inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 154 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
150 in = kvzalloc(inlen, GFP_KERNEL); 155 in = kvzalloc(inlen, GFP_KERNEL);
151 if (!in) 156 if (!in) {
157 err = -ENOMEM;
152 goto out; 158 goto out;
159 }
153 160
154 if (enable_uc_lb) 161 if (enable_uc_lb)
155 MLX5_SET(modify_tir_in, in, ctx.self_lb_block, 162 MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
157 164
158 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); 165 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
159 166
167 mutex_lock(&mdev->mlx5e_res.td.list_lock);
160 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { 168 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
161 tirn = tir->tirn; 169 tirn = tir->tirn;
162 err = mlx5_core_modify_tir(mdev, tirn, in, inlen); 170 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
168 kvfree(in); 176 kvfree(in);
169 if (err) 177 if (err)
170 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); 178 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
179 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
171 180
172 return err; 181 return err;
173} 182}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a0987cc5fe4a..5efce4a3ff79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
603 __ETHTOOL_LINK_MODE_MASK_NBITS); 603 __ETHTOOL_LINK_MODE_MASK_NBITS);
604} 604}
605 605
606static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, 606static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
607 unsigned long *advertising_modes, 607 u32 eth_proto_cap, bool ext)
608 u32 eth_proto_cap)
609{ 608{
610 unsigned long proto_cap = eth_proto_cap; 609 unsigned long proto_cap = eth_proto_cap;
611 struct ptys2ethtool_config *table; 610 struct ptys2ethtool_config *table;
612 u32 max_size; 611 u32 max_size;
613 int proto; 612 int proto;
614 613
615 mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); 614 table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
615 max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
616 ARRAY_SIZE(ptys2legacy_ethtool_table);
617
616 for_each_set_bit(proto, &proto_cap, max_size) 618 for_each_set_bit(proto, &proto_cap, max_size)
617 bitmap_or(advertising_modes, advertising_modes, 619 bitmap_or(advertising_modes, advertising_modes,
618 table[proto].advertised, 620 table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
794 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
795} 797}
796 798
797static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, 799static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
798 u8 tx_pause, u8 rx_pause, 800 struct ethtool_link_ksettings *link_ksettings,
799 struct ethtool_link_ksettings *link_ksettings) 801 bool ext)
800{ 802{
801 unsigned long *advertising = link_ksettings->link_modes.advertising; 803 unsigned long *advertising = link_ksettings->link_modes.advertising;
802 ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); 804 ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
803 805
804 if (rx_pause) 806 if (rx_pause)
805 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 807 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
854 struct ethtool_link_ksettings *link_ksettings) 856 struct ethtool_link_ksettings *link_ksettings)
855{ 857{
856 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; 858 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
859 bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
857 860
858 ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); 861 ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
859} 862}
860 863
861int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, 864int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
872 u8 an_disable_admin; 875 u8 an_disable_admin;
873 u8 an_status; 876 u8 an_status;
874 u8 connector_type; 877 u8 connector_type;
878 bool admin_ext;
875 bool ext; 879 bool ext;
876 int err; 880 int err;
877 881
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
886 eth_proto_capability); 890 eth_proto_capability);
887 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 891 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
888 eth_proto_admin); 892 eth_proto_admin);
893 /* Fields: eth_proto_admin and ext_eth_proto_admin are
894 * mutually exclusive. Hence try reading legacy advertising
895 * when extended advertising is zero.
896 * admin_ext indicates how eth_proto_admin should be
897 * interpreted
898 */
899 admin_ext = ext;
900 if (ext && !eth_proto_admin) {
901 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
902 eth_proto_admin);
903 admin_ext = false;
904 }
905
889 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 906 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
890 eth_proto_oper); 907 eth_proto_oper);
891 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); 908 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
899 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 916 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
900 917
901 get_supported(mdev, eth_proto_cap, link_ksettings); 918 get_supported(mdev, eth_proto_cap, link_ksettings);
902 get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); 919 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
920 admin_ext);
903 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); 921 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
904 922
905 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 923 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
997 1015
998#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) 1016#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
999 1017
1000 ext_requested = (link_ksettings->link_modes.advertising[0] > 1018 ext_requested = !!(link_ksettings->link_modes.advertising[0] >
1001 MLX5E_PTYS_EXT); 1019 MLX5E_PTYS_EXT ||
1020 link_ksettings->link_modes.advertising[1]);
1002 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 1021 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
1003 1022 ext_requested &= ext_supported;
1004 /*when ptys_extended_ethernet is set legacy link modes are deprecated */
1005 if (ext_requested != ext_supported)
1006 return -EPROTONOSUPPORT;
1007 1023
1008 speed = link_ksettings->base.speed; 1024 speed = link_ksettings->base.speed;
1009 ethtool2ptys_adver_func = ext_requested ? 1025 ethtool2ptys_adver_func = ext_requested ?
1010 mlx5e_ethtool2ptys_ext_adver_link : 1026 mlx5e_ethtool2ptys_ext_adver_link :
1011 mlx5e_ethtool2ptys_adver_link; 1027 mlx5e_ethtool2ptys_adver_link;
1012 err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); 1028 err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
1013 if (err) { 1029 if (err) {
1014 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", 1030 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
1015 __func__, err); 1031 __func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1037 if (!an_changes && link_modes == eproto.admin) 1053 if (!an_changes && link_modes == eproto.admin)
1038 goto out; 1054 goto out;
1039 1055
1040 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); 1056 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
1041 mlx5_toggle_port_link(mdev); 1057 mlx5_toggle_port_link(mdev);
1042 1058
1043out: 1059out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2fd425a7b156..ffc4a36551c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2161,6 +2161,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
2161 return true; 2161 return true;
2162} 2162}
2163 2163
2164struct ip_ttl_word {
2165 __u8 ttl;
2166 __u8 protocol;
2167 __sum16 check;
2168};
2169
2170struct ipv6_hoplimit_word {
2171 __be16 payload_len;
2172 __u8 nexthdr;
2173 __u8 hop_limit;
2174};
2175
2176static bool is_action_keys_supported(const struct flow_action_entry *act)
2177{
2178 u32 mask, offset;
2179 u8 htype;
2180
2181 htype = act->mangle.htype;
2182 offset = act->mangle.offset;
2183 mask = ~act->mangle.mask;
2184 /* For IPv4 & IPv6 header check 4 byte word,
2185 * to determine that modified fields
2186 * are NOT ttl & hop_limit only.
2187 */
2188 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2189 struct ip_ttl_word *ttl_word =
2190 (struct ip_ttl_word *)&mask;
2191
2192 if (offset != offsetof(struct iphdr, ttl) ||
2193 ttl_word->protocol ||
2194 ttl_word->check) {
2195 return true;
2196 }
2197 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2198 struct ipv6_hoplimit_word *hoplimit_word =
2199 (struct ipv6_hoplimit_word *)&mask;
2200
2201 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2202 hoplimit_word->payload_len ||
2203 hoplimit_word->nexthdr) {
2204 return true;
2205 }
2206 }
2207 return false;
2208}
2209
2164static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 2210static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2165 struct flow_action *flow_action, 2211 struct flow_action *flow_action,
2166 u32 actions, 2212 u32 actions,
@@ -2168,9 +2214,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2168{ 2214{
2169 const struct flow_action_entry *act; 2215 const struct flow_action_entry *act;
2170 bool modify_ip_header; 2216 bool modify_ip_header;
2171 u8 htype, ip_proto;
2172 void *headers_v; 2217 void *headers_v;
2173 u16 ethertype; 2218 u16 ethertype;
2219 u8 ip_proto;
2174 int i; 2220 int i;
2175 2221
2176 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) 2222 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2190,9 +2236,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2190 act->id != FLOW_ACTION_ADD) 2236 act->id != FLOW_ACTION_ADD)
2191 continue; 2237 continue;
2192 2238
2193 htype = act->mangle.htype; 2239 if (is_action_keys_supported(act)) {
2194 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
2195 htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2196 modify_ip_header = true; 2240 modify_ip_header = true;
2197 break; 2241 break;
2198 } 2242 }
@@ -2381,15 +2425,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2381 return 0; 2425 return 0;
2382} 2426}
2383 2427
2384static inline int cmp_encap_info(struct ip_tunnel_key *a, 2428struct encap_key {
2385 struct ip_tunnel_key *b) 2429 struct ip_tunnel_key *ip_tun_key;
2430 int tunnel_type;
2431};
2432
2433static inline int cmp_encap_info(struct encap_key *a,
2434 struct encap_key *b)
2386{ 2435{
2387 return memcmp(a, b, sizeof(*a)); 2436 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
2437 a->tunnel_type != b->tunnel_type;
2388} 2438}
2389 2439
2390static inline int hash_encap_info(struct ip_tunnel_key *key) 2440static inline int hash_encap_info(struct encap_key *key)
2391{ 2441{
2392 return jhash(key, sizeof(*key), 0); 2442 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
2443 key->tunnel_type);
2393} 2444}
2394 2445
2395 2446
@@ -2420,7 +2471,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2420 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 2471 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2421 struct mlx5e_tc_flow_parse_attr *parse_attr; 2472 struct mlx5e_tc_flow_parse_attr *parse_attr;
2422 struct ip_tunnel_info *tun_info; 2473 struct ip_tunnel_info *tun_info;
2423 struct ip_tunnel_key *key; 2474 struct encap_key key, e_key;
2424 struct mlx5e_encap_entry *e; 2475 struct mlx5e_encap_entry *e;
2425 unsigned short family; 2476 unsigned short family;
2426 uintptr_t hash_key; 2477 uintptr_t hash_key;
@@ -2430,13 +2481,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2430 parse_attr = attr->parse_attr; 2481 parse_attr = attr->parse_attr;
2431 tun_info = &parse_attr->tun_info[out_index]; 2482 tun_info = &parse_attr->tun_info[out_index];
2432 family = ip_tunnel_info_af(tun_info); 2483 family = ip_tunnel_info_af(tun_info);
2433 key = &tun_info->key; 2484 key.ip_tun_key = &tun_info->key;
2485 key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
2434 2486
2435 hash_key = hash_encap_info(key); 2487 hash_key = hash_encap_info(&key);
2436 2488
2437 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, 2489 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2438 encap_hlist, hash_key) { 2490 encap_hlist, hash_key) {
2439 if (!cmp_encap_info(&e->tun_info.key, key)) { 2491 e_key.ip_tun_key = &e->tun_info.key;
2492 e_key.tunnel_type = e->tunnel_type;
2493 if (!cmp_encap_info(&e_key, &key)) {
2440 found = true; 2494 found = true;
2441 break; 2495 break;
2442 } 2496 }
@@ -2717,7 +2771,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
2717 2771
2718 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || 2772 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2719 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { 2773 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2720 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, 2774 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
2721 parse_attr, hdrs, extack); 2775 parse_attr, hdrs, extack);
2722 if (err) 2776 if (err)
2723 return err; 2777 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ecd2c747f726..8a67fd197b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
108 if (vport) 108 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
109 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
110 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 109 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
111 in, nic_vport_context); 110 in, nic_vport_context);
112 111
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
134 MLX5_SET(modify_esw_vport_context_in, in, opcode, 133 MLX5_SET(modify_esw_vport_context_in, in, opcode,
135 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 134 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
136 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 135 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
137 if (vport) 136 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
138 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
139 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 137 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
140} 138}
141 139
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
431{ 429{
432 int err; 430 int err;
433 431
432 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
433
434 err = esw_create_legacy_vepa_table(esw); 434 err = esw_create_legacy_vepa_table(esw);
435 if (err) 435 if (err)
436 return err; 436 return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2157 2157
2158 /* Star rule to forward all traffic to uplink vport */ 2158 /* Star rule to forward all traffic to uplink vport */
2159 memset(spec, 0, sizeof(*spec)); 2159 memset(spec, 0, sizeof(*spec));
2160 memset(&dest, 0, sizeof(dest));
2160 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2161 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2161 dest.vport.num = MLX5_VPORT_UPLINK; 2162 dest.vport.num = MLX5_VPORT_UPLINK;
2162 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2163 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 6c72f33f6d09..fe770cd2151c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1609,6 +1609,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
1609{ 1609{
1610 int err; 1610 int err;
1611 1611
1612 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
1612 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); 1613 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1613 1614
1614 err = esw_create_offloads_fdb_tables(esw, nvports); 1615 err = esw_create_offloads_fdb_tables(esw, nvports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..8de64e88c670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
217 void *cmd; 217 void *cmd;
218 int ret; 218 int ret;
219 219
220 rcu_read_lock();
221 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
222 rcu_read_unlock();
223
224 if (!flow) {
225 WARN_ONCE(1, "Received NULL pointer for handle\n");
226 return -EINVAL;
227 }
228
220 buf = kzalloc(size, GFP_ATOMIC); 229 buf = kzalloc(size, GFP_ATOMIC);
221 if (!buf) 230 if (!buf)
222 return -ENOMEM; 231 return -ENOMEM;
223 232
224 cmd = (buf + 1); 233 cmd = (buf + 1);
225 234
226 rcu_read_lock();
227 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
228 rcu_read_unlock();
229 mlx5_fpga_tls_flow_to_cmd(flow, cmd); 235 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
230 236
231 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); 237 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
238 buf->complete = mlx_tls_kfree_complete; 244 buf->complete = mlx_tls_kfree_complete;
239 245
240 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); 246 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
247 if (ret < 0)
248 kfree(buf);
241 249
242 return ret; 250 return ret;
243} 251}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 70cc906a102b..76716419370d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
164 .size = 8, 164 .size = 8,
165 .limit = 4 165 .limit = 4
166 }, 166 },
167 .mr_cache[16] = {
168 .size = 8,
169 .limit = 4
170 },
171 .mr_cache[17] = {
172 .size = 8,
173 .limit = 4
174 },
175 .mr_cache[18] = {
176 .size = 8,
177 .limit = 4
178 },
179 .mr_cache[19] = {
180 .size = 4,
181 .limit = 2
182 },
183 .mr_cache[20] = {
184 .size = 4,
185 .limit = 2
186 },
187 }, 167 },
188}; 168};
189 169
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 6e2a6caec3fb..c56e31d9f8a4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
48 48
49 tmp_push_vlan_tci = 49 tmp_push_vlan_tci =
50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | 50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | 51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
52 NFP_FL_PUSH_VLAN_CFI;
53 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); 52 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
54} 53}
55 54
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..0ed51e79db00 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
26#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) 26#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
27 27
28#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) 28#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
29#define NFP_FLOWER_MASK_VLAN_CFI BIT(12) 29#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
30#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) 30#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
31 31
32#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) 32#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
82#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) 82#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
83 83
84#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) 84#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
85#define NFP_FL_PUSH_VLAN_CFI BIT(12)
86#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) 85#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
87 86
88#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) 87#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..9b8b843d0340 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
30 30
31 flow_rule_match_vlan(rule, &match); 31 flow_rule_match_vlan(rule, &match);
32 /* Populate the tci field. */ 32 /* Populate the tci field. */
33 if (match.key->vlan_id || match.key->vlan_priority) { 33 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
34 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 34 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
35 match.key->vlan_priority) | 35 match.key->vlan_priority) |
36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
37 match.key->vlan_id) | 37 match.key->vlan_id);
38 NFP_FLOWER_MASK_VLAN_CFI; 38 ext->tci = cpu_to_be16(tmp_tci);
39 ext->tci = cpu_to_be16(tmp_tci); 39
40 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 40 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41 match.mask->vlan_priority) | 41 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 42 match.mask->vlan_priority) |
43 match.mask->vlan_id) | 43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
44 NFP_FLOWER_MASK_VLAN_CFI; 44 match.mask->vlan_id);
45 msk->tci = cpu_to_be16(tmp_tci); 45 msk->tci = cpu_to_be16(tmp_tci);
46 }
47 } 46 }
48} 47}
49 48
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index c3ad083d36c6..08e9bfa95f9b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
195 ret = dev_queue_xmit(skb); 195 ret = dev_queue_xmit(skb);
196 nfp_repr_inc_tx_stats(netdev, len, ret); 196 nfp_repr_inc_tx_stats(netdev, len, ret);
197 197
198 return ret; 198 return NETDEV_TX_OK;
199} 199}
200 200
201static int nfp_repr_stop(struct net_device *netdev) 201static int nfp_repr_stop(struct net_device *netdev)
@@ -382,7 +382,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
382 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 382 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
383 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; 383 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
384 384
385 netdev->priv_flags |= IFF_NO_QUEUE; 385 netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
386 netdev->features |= NETIF_F_LLTX; 386 netdev->features |= NETIF_F_LLTX;
387 387
388 if (nfp_app_has_tc(app)) { 388 if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a8ca26c2ae0c..88eb9e05d2a1 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5460,7 +5460,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
5460 tp->cp_cmd |= PktCntrDisable | INTT_1; 5460 tp->cp_cmd |= PktCntrDisable | INTT_1;
5461 RTL_W16(tp, CPlusCmd, tp->cp_cmd); 5461 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5462 5462
5463 RTL_W16(tp, IntrMitigate, 0x5151); 5463 RTL_W16(tp, IntrMitigate, 0x5100);
5464 5464
5465 /* Work around for RxFIFO overflow. */ 5465 /* Work around for RxFIFO overflow. */
5466 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 5466 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
29/* Specific functions used for Ring mode */ 29/* Specific functions used for Ring mode */
30 30
31/* Enhanced descriptors */ 31/* Enhanced descriptors */
32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) 32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
33 int bfsize)
33{ 34{
34 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB 35 if (bfsize == BUF_SIZE_16KiB)
35 << ERDES1_BUFFER2_SIZE_SHIFT) 36 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
36 & ERDES1_BUFFER2_SIZE_MASK); 37 << ERDES1_BUFFER2_SIZE_SHIFT)
38 & ERDES1_BUFFER2_SIZE_MASK);
37 39
38 if (end) 40 if (end)
39 p->des1 |= cpu_to_le32(ERDES1_END_RING); 41 p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
59} 61}
60 62
61/* Normal descriptors */ 63/* Normal descriptors */
62static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) 64static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
63{ 65{
64 p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) 66 if (bfsize >= BUF_SIZE_2KiB) {
65 << RDES1_BUFFER2_SIZE_SHIFT) 67 int bfsize2;
66 & RDES1_BUFFER2_SIZE_MASK); 68
69 bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
70 p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
71 & RDES1_BUFFER2_SIZE_MASK);
72 }
67 73
68 if (end) 74 if (end)
69 p->des1 |= cpu_to_le32(RDES1_END_RING); 75 p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
296} 296}
297 297
298static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 298static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
299 int mode, int end) 299 int mode, int end, int bfsize)
300{ 300{
301 dwmac4_set_rx_owner(p, disable_rx_ic); 301 dwmac4_set_rx_owner(p, disable_rx_ic);
302} 302}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
123} 123}
124 124
125static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 125static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
126 int mode, int end) 126 int mode, int end, int bfsize)
127{ 127{
128 dwxgmac2_set_rx_owner(p, disable_rx_ic); 128 dwxgmac2_set_rx_owner(p, disable_rx_ic);
129} 129}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
201 if (unlikely(rdes0 & RDES0_OWN)) 201 if (unlikely(rdes0 & RDES0_OWN))
202 return dma_own; 202 return dma_own;
203 203
204 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
205 stats->rx_length_errors++;
206 return discard_frame;
207 }
208
204 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { 209 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
205 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { 210 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
206 x->rx_desc++; 211 x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
231 * It doesn't match with the information reported into the databook. 236 * It doesn't match with the information reported into the databook.
232 * At any rate, we need to understand if the CSUM hw computation is ok 237 * At any rate, we need to understand if the CSUM hw computation is ok
233 * and report this info to the upper layers. */ 238 * and report this info to the upper layers. */
234 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), 239 if (likely(ret == good_frame))
235 !!(rdes0 & RDES0_FRAME_TYPE), 240 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
236 !!(rdes0 & ERDES0_RX_MAC_ADDR)); 241 !!(rdes0 & RDES0_FRAME_TYPE),
242 !!(rdes0 & ERDES0_RX_MAC_ADDR));
237 243
238 if (unlikely(rdes0 & RDES0_DRIBBLING)) 244 if (unlikely(rdes0 & RDES0_DRIBBLING))
239 x->dribbling_bit++; 245 x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
259} 265}
260 266
261static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 267static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
262 int mode, int end) 268 int mode, int end, int bfsize)
263{ 269{
270 int bfsize1;
271
264 p->des0 |= cpu_to_le32(RDES0_OWN); 272 p->des0 |= cpu_to_le32(RDES0_OWN);
265 p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); 273
274 bfsize1 = min(bfsize, BUF_SIZE_8KiB);
275 p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
266 276
267 if (mode == STMMAC_CHAIN_MODE) 277 if (mode == STMMAC_CHAIN_MODE)
268 ehn_desc_rx_set_on_chain(p); 278 ehn_desc_rx_set_on_chain(p);
269 else 279 else
270 ehn_desc_rx_set_on_ring(p, end); 280 ehn_desc_rx_set_on_ring(p, end, bfsize);
271 281
272 if (disable_rx_ic) 282 if (disable_rx_ic)
273 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); 283 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
33struct stmmac_desc_ops { 33struct stmmac_desc_ops {
34 /* DMA RX descriptor ring initialization */ 34 /* DMA RX descriptor ring initialization */
35 void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, 35 void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
36 int end); 36 int end, int bfsize);
37 /* DMA TX descriptor ring initialization */ 37 /* DMA TX descriptor ring initialization */
38 void (*init_tx_desc)(struct dma_desc *p, int mode, int end); 38 void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
39 /* Invoked by the xmit function to prepare the tx descriptor */ 39 /* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..b7dd4e3c760d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
91 return dma_own; 91 return dma_own;
92 92
93 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { 93 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
94 pr_warn("%s: Oversized frame spanned multiple buffers\n",
95 __func__);
96 stats->rx_length_errors++; 94 stats->rx_length_errors++;
97 return discard_frame; 95 return discard_frame;
98 } 96 }
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
135} 133}
136 134
137static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, 135static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
138 int end) 136 int end, int bfsize)
139{ 137{
138 int bfsize1;
139
140 p->des0 |= cpu_to_le32(RDES0_OWN); 140 p->des0 |= cpu_to_le32(RDES0_OWN);
141 p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); 141
142 bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
143 p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
142 144
143 if (mode == STMMAC_CHAIN_MODE) 145 if (mode == STMMAC_CHAIN_MODE)
144 ndesc_rx_set_on_chain(p, end); 146 ndesc_rx_set_on_chain(p, end);
145 else 147 else
146 ndesc_rx_set_on_ring(p, end); 148 ndesc_rx_set_on_ring(p, end, bfsize);
147 149
148 if (disable_rx_ic) 150 if (disable_rx_ic)
149 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); 151 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6a2e1031a62a..a26e36dbb5df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1136 if (priv->extend_desc) 1136 if (priv->extend_desc)
1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 priv->use_riwt, priv->mode, 1138 priv->use_riwt, priv->mode,
1139 (i == DMA_RX_SIZE - 1)); 1139 (i == DMA_RX_SIZE - 1),
1140 priv->dma_buf_sz);
1140 else 1141 else
1141 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1142 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1142 priv->use_riwt, priv->mode, 1143 priv->use_riwt, priv->mode,
1143 (i == DMA_RX_SIZE - 1)); 1144 (i == DMA_RX_SIZE - 1),
1145 priv->dma_buf_sz);
1144} 1146}
1145 1147
1146/** 1148/**
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3352{ 3354{
3353 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3355 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3354 struct stmmac_channel *ch = &priv->channel[queue]; 3356 struct stmmac_channel *ch = &priv->channel[queue];
3355 unsigned int entry = rx_q->cur_rx; 3357 unsigned int next_entry = rx_q->cur_rx;
3356 int coe = priv->hw->rx_csum; 3358 int coe = priv->hw->rx_csum;
3357 unsigned int next_entry;
3358 unsigned int count = 0; 3359 unsigned int count = 0;
3359 bool xmac; 3360 bool xmac;
3360 3361
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3372 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); 3373 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3373 } 3374 }
3374 while (count < limit) { 3375 while (count < limit) {
3375 int status; 3376 int entry, status;
3376 struct dma_desc *p; 3377 struct dma_desc *p;
3377 struct dma_desc *np; 3378 struct dma_desc *np;
3378 3379
3380 entry = next_entry;
3381
3379 if (priv->extend_desc) 3382 if (priv->extend_desc)
3380 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3383 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3381 else 3384 else
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3431 * ignored 3434 * ignored
3432 */ 3435 */
3433 if (frame_len > priv->dma_buf_sz) { 3436 if (frame_len > priv->dma_buf_sz) {
3434 netdev_err(priv->dev, 3437 if (net_ratelimit())
3435 "len %d larger than size (%d)\n", 3438 netdev_err(priv->dev,
3436 frame_len, priv->dma_buf_sz); 3439 "len %d larger than size (%d)\n",
3440 frame_len, priv->dma_buf_sz);
3437 priv->dev->stats.rx_length_errors++; 3441 priv->dev->stats.rx_length_errors++;
3438 break; 3442 continue;
3439 } 3443 }
3440 3444
3441 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3445 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3470 dev_warn(priv->device, 3474 dev_warn(priv->device,
3471 "packet dropped\n"); 3475 "packet dropped\n");
3472 priv->dev->stats.rx_dropped++; 3476 priv->dev->stats.rx_dropped++;
3473 break; 3477 continue;
3474 } 3478 }
3475 3479
3476 dma_sync_single_for_cpu(priv->device, 3480 dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3490 } else { 3494 } else {
3491 skb = rx_q->rx_skbuff[entry]; 3495 skb = rx_q->rx_skbuff[entry];
3492 if (unlikely(!skb)) { 3496 if (unlikely(!skb)) {
3493 netdev_err(priv->dev, 3497 if (net_ratelimit())
3494 "%s: Inconsistent Rx chain\n", 3498 netdev_err(priv->dev,
3495 priv->dev->name); 3499 "%s: Inconsistent Rx chain\n",
3500 priv->dev->name);
3496 priv->dev->stats.rx_dropped++; 3501 priv->dev->stats.rx_dropped++;
3497 break; 3502 continue;
3498 } 3503 }
3499 prefetch(skb->data - NET_IP_ALIGN); 3504 prefetch(skb->data - NET_IP_ALIGN);
3500 rx_q->rx_skbuff[entry] = NULL; 3505 rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3529 priv->dev->stats.rx_packets++; 3534 priv->dev->stats.rx_packets++;
3530 priv->dev->stats.rx_bytes += frame_len; 3535 priv->dev->stats.rx_bytes += frame_len;
3531 } 3536 }
3532 entry = next_entry;
3533 } 3537 }
3534 3538
3535 stmmac_rx_refill(priv, queue); 3539 stmmac_rx_refill(priv, queue);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
987 987
988 wait_queue_head_t wait_drain; 988 wait_queue_head_t wait_drain;
989 bool destroy; 989 bool destroy;
990 bool tx_disable; /* if true, do not wake up queue again */
990 991
991 /* Receive buffer allocated by us but manages by NetVSP */ 992 /* Receive buffer allocated by us but manages by NetVSP */
992 void *recv_buf; 993 void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9a022539d305..fdbeb7070d42 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
110 110
111 init_waitqueue_head(&net_device->wait_drain); 111 init_waitqueue_head(&net_device->wait_drain);
112 net_device->destroy = false; 112 net_device->destroy = false;
113 net_device->tx_disable = false;
113 114
114 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 115 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
115 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 116 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
719 } else { 720 } else {
720 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); 721 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
721 722
722 if (netif_tx_queue_stopped(txq) && 723 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
723 (hv_get_avail_to_write_percent(&channel->outbound) > 724 (hv_get_avail_to_write_percent(&channel->outbound) >
724 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { 725 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
725 netif_tx_wake_queue(txq); 726 netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
874 } else if (ret == -EAGAIN) { 875 } else if (ret == -EAGAIN) {
875 netif_tx_stop_queue(txq); 876 netif_tx_stop_queue(txq);
876 ndev_ctx->eth_stats.stop_queue++; 877 ndev_ctx->eth_stats.stop_queue++;
877 if (atomic_read(&nvchan->queue_sends) < 1) { 878 if (atomic_read(&nvchan->queue_sends) < 1 &&
879 !net_device->tx_disable) {
878 netif_tx_wake_queue(txq); 880 netif_tx_wake_queue(txq);
879 ndev_ctx->eth_stats.wake_queue++; 881 ndev_ctx->eth_stats.wake_queue++;
880 ret = -ENOSPC; 882 ret = -ENOSPC;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1a08679f90ce..06393b215102 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
109 rcu_read_unlock(); 109 rcu_read_unlock();
110} 110}
111 111
112static void netvsc_tx_enable(struct netvsc_device *nvscdev,
113 struct net_device *ndev)
114{
115 nvscdev->tx_disable = false;
116 virt_wmb(); /* ensure queue wake up mechanism is on */
117
118 netif_tx_wake_all_queues(ndev);
119}
120
112static int netvsc_open(struct net_device *net) 121static int netvsc_open(struct net_device *net)
113{ 122{
114 struct net_device_context *ndev_ctx = netdev_priv(net); 123 struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
129 rdev = nvdev->extension; 138 rdev = nvdev->extension;
130 if (!rdev->link_state) { 139 if (!rdev->link_state) {
131 netif_carrier_on(net); 140 netif_carrier_on(net);
132 netif_tx_wake_all_queues(net); 141 netvsc_tx_enable(nvdev, net);
133 } 142 }
134 143
135 if (vf_netdev) { 144 if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
184 } 193 }
185} 194}
186 195
196static void netvsc_tx_disable(struct netvsc_device *nvscdev,
197 struct net_device *ndev)
198{
199 if (nvscdev) {
200 nvscdev->tx_disable = true;
201 virt_wmb(); /* ensure txq will not wake up after stop */
202 }
203
204 netif_tx_disable(ndev);
205}
206
187static int netvsc_close(struct net_device *net) 207static int netvsc_close(struct net_device *net)
188{ 208{
189 struct net_device_context *net_device_ctx = netdev_priv(net); 209 struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
192 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 212 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
193 int ret; 213 int ret;
194 214
195 netif_tx_disable(net); 215 netvsc_tx_disable(nvdev, net);
196 216
197 /* No need to close rndis filter if it is removed already */ 217 /* No need to close rndis filter if it is removed already */
198 if (!nvdev) 218 if (!nvdev)
@@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
918 938
919 /* If device was up (receiving) then shutdown */ 939 /* If device was up (receiving) then shutdown */
920 if (netif_running(ndev)) { 940 if (netif_running(ndev)) {
921 netif_tx_disable(ndev); 941 netvsc_tx_disable(nvdev, ndev);
922 942
923 ret = rndis_filter_close(nvdev); 943 ret = rndis_filter_close(nvdev);
924 if (ret) { 944 if (ret) {
@@ -1906,7 +1926,7 @@ static void netvsc_link_change(struct work_struct *w)
1906 if (rdev->link_state) { 1926 if (rdev->link_state) {
1907 rdev->link_state = false; 1927 rdev->link_state = false;
1908 netif_carrier_on(net); 1928 netif_carrier_on(net);
1909 netif_tx_wake_all_queues(net); 1929 netvsc_tx_enable(net_device, net);
1910 } else { 1930 } else {
1911 notify = true; 1931 notify = true;
1912 } 1932 }
@@ -1916,7 +1936,7 @@ static void netvsc_link_change(struct work_struct *w)
1916 if (!rdev->link_state) { 1936 if (!rdev->link_state) {
1917 rdev->link_state = true; 1937 rdev->link_state = true;
1918 netif_carrier_off(net); 1938 netif_carrier_off(net);
1919 netif_tx_stop_all_queues(net); 1939 netvsc_tx_disable(net_device, net);
1920 } 1940 }
1921 kfree(event); 1941 kfree(event);
1922 break; 1942 break;
@@ -1925,7 +1945,7 @@ static void netvsc_link_change(struct work_struct *w)
1925 if (!rdev->link_state) { 1945 if (!rdev->link_state) {
1926 rdev->link_state = true; 1946 rdev->link_state = true;
1927 netif_carrier_off(net); 1947 netif_carrier_off(net);
1928 netif_tx_stop_all_queues(net); 1948 netvsc_tx_disable(net_device, net);
1929 event->event = RNDIS_STATUS_MEDIA_CONNECT; 1949 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1930 spin_lock_irqsave(&ndev_ctx->lock, flags); 1950 spin_lock_irqsave(&ndev_ctx->lock, flags);
1931 list_add(&event->list, &ndev_ctx->reconfig_events); 1951 list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..9195f3476b1d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
1203 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1203 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1204 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1204 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1205 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 1205 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1206 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1206 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ 1207 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1207 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1208 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1208 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1209 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..6d1a1abbed27 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,7 @@ static void vrf_setup(struct net_device *dev)
1273 1273
1274 /* default to no qdisc; user can add if desired */ 1274 /* default to no qdisc; user can add if desired */
1275 dev->priv_flags |= IFF_NO_QUEUE; 1275 dev->priv_flags |= IFF_NO_QUEUE;
1276 dev->priv_flags |= IFF_NO_RX_HANDLER;
1276 1277
1277 dev->min_mtu = 0; 1278 dev->min_mtu = 0;
1278 dev->max_mtu = 0; 1279 dev->max_mtu = 0;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2839bb70badf..f0716f6ce41f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
404static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, 404static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
405 struct nvme_ns *ns) 405 struct nvme_ns *ns)
406{ 406{
407 enum nvme_ana_state old;
408
409 mutex_lock(&ns->head->lock); 407 mutex_lock(&ns->head->lock);
410 old = ns->ana_state;
411 ns->ana_grpid = le32_to_cpu(desc->grpid); 408 ns->ana_grpid = le32_to_cpu(desc->grpid);
412 ns->ana_state = desc->state; 409 ns->ana_state = desc->state;
413 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); 410 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
414 411
415 if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old)) 412 if (nvme_state_is_live(ns->ana_state))
416 nvme_mpath_set_live(ns); 413 nvme_mpath_set_live(ns);
417 mutex_unlock(&ns->head->lock); 414 mutex_unlock(&ns->head->lock);
418} 415}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e7e08889865e..68c49dd67210 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
627 return ret; 627 return ret;
628} 628}
629 629
630static inline void nvme_tcp_end_request(struct request *rq, __le16 status) 630static inline void nvme_tcp_end_request(struct request *rq, u16 status)
631{ 631{
632 union nvme_result res = {}; 632 union nvme_result res = {};
633 633
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2d73b66e3686..b3e765a95af8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
509 509
510 ret = nvmet_p2pmem_ns_enable(ns); 510 ret = nvmet_p2pmem_ns_enable(ns);
511 if (ret) 511 if (ret)
512 goto out_unlock; 512 goto out_dev_disable;
513 513
514 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 514 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
515 nvmet_p2pmem_ns_add_p2p(ctrl, ns); 515 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
550out_dev_put: 550out_dev_put:
551 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 551 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
552 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); 552 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
553 553out_dev_disable:
554 nvmet_ns_dev_disable(ns); 554 nvmet_ns_dev_disable(ns);
555 goto out_unlock; 555 goto out_unlock;
556} 556}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 3e43212d3c1c..bc6ebb51b0bf 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -75,11 +75,11 @@ err:
75 return ret; 75 return ret;
76} 76}
77 77
78static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) 78static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
79{ 79{
80 bv->bv_page = sg_page_iter_page(iter); 80 bv->bv_page = sg_page(sg);
81 bv->bv_offset = iter->sg->offset; 81 bv->bv_offset = sg->offset;
82 bv->bv_len = PAGE_SIZE - iter->sg->offset; 82 bv->bv_len = sg->length;
83} 83}
84 84
85static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, 85static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
128 128
129static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) 129static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
130{ 130{
131 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 131 ssize_t nr_bvec = req->sg_cnt;
132 struct sg_page_iter sg_pg_iter;
133 unsigned long bv_cnt = 0; 132 unsigned long bv_cnt = 0;
134 bool is_sync = false; 133 bool is_sync = false;
135 size_t len = 0, total_len = 0; 134 size_t len = 0, total_len = 0;
136 ssize_t ret = 0; 135 ssize_t ret = 0;
137 loff_t pos; 136 loff_t pos;
138 137 int i;
138 struct scatterlist *sg;
139 139
140 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) 140 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
141 is_sync = true; 141 is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
147 } 147 }
148 148
149 memset(&req->f.iocb, 0, sizeof(struct kiocb)); 149 memset(&req->f.iocb, 0, sizeof(struct kiocb));
150 for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { 150 for_each_sg(req->sg, sg, req->sg_cnt, i) {
151 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); 151 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
152 len += req->f.bvec[bv_cnt].bv_len; 152 len += req->f.bvec[bv_cnt].bv_len;
153 total_len += req->f.bvec[bv_cnt].bv_len; 153 total_len += req->f.bvec[bv_cnt].bv_len;
154 bv_cnt++; 154 bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
225 225
226static void nvmet_file_execute_rw(struct nvmet_req *req) 226static void nvmet_file_execute_rw(struct nvmet_req *req)
227{ 227{
228 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 228 ssize_t nr_bvec = req->sg_cnt;
229 229
230 if (!req->sg_cnt || !nr_bvec) { 230 if (!req->sg_cnt || !nr_bvec) {
231 nvmet_req_complete(req, 0); 231 nvmet_req_complete(req, 0);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 224d88634115..d994839a3e24 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
273u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, 273u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
274 enum pcie_link_width *width); 274 enum pcie_link_width *width);
275void __pcie_print_link_status(struct pci_dev *dev, bool verbose); 275void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
276void pcie_report_downtraining(struct pci_dev *dev);
276 277
277/* Single Root I/O Virtualization */ 278/* Single Root I/O Virtualization */
278struct pci_sriov { 279struct pci_sriov {
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index d2eae3b7cc0f..4fa9e3523ee1 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
30{ 30{
31 u16 lnk_ctl; 31 u16 lnk_ctl;
32 32
33 pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
34
33 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); 35 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
34 lnk_ctl |= PCI_EXP_LNKCTL_LBMIE; 36 lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
35 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); 37 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
44 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); 46 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
45} 47}
46 48
47static irqreturn_t pcie_bw_notification_handler(int irq, void *context) 49static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
48{ 50{
49 struct pcie_device *srv = context; 51 struct pcie_device *srv = context;
50 struct pci_dev *port = srv->port; 52 struct pci_dev *port = srv->port;
51 struct pci_dev *dev;
52 u16 link_status, events; 53 u16 link_status, events;
53 int ret; 54 int ret;
54 55
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
58 if (ret != PCIBIOS_SUCCESSFUL || !events) 59 if (ret != PCIBIOS_SUCCESSFUL || !events)
59 return IRQ_NONE; 60 return IRQ_NONE;
60 61
62 pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
63 pcie_update_link_speed(port->subordinate, link_status);
64 return IRQ_WAKE_THREAD;
65}
66
67static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
68{
69 struct pcie_device *srv = context;
70 struct pci_dev *port = srv->port;
71 struct pci_dev *dev;
72
61 /* 73 /*
62 * Print status from downstream devices, not this root port or 74 * Print status from downstream devices, not this root port or
63 * downstream switch port. 75 * downstream switch port.
64 */ 76 */
65 down_read(&pci_bus_sem); 77 down_read(&pci_bus_sem);
66 list_for_each_entry(dev, &port->subordinate->devices, bus_list) 78 list_for_each_entry(dev, &port->subordinate->devices, bus_list)
67 __pcie_print_link_status(dev, false); 79 pcie_report_downtraining(dev);
68 up_read(&pci_bus_sem); 80 up_read(&pci_bus_sem);
69 81
70 pcie_update_link_speed(port->subordinate, link_status);
71 pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
72 return IRQ_HANDLED; 82 return IRQ_HANDLED;
73} 83}
74 84
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
80 if (!pcie_link_bandwidth_notification_supported(srv->port)) 90 if (!pcie_link_bandwidth_notification_supported(srv->port))
81 return -ENODEV; 91 return -ENODEV;
82 92
83 ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler, 93 ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
94 pcie_bw_notification_handler,
84 IRQF_SHARED, "PCIe BW notif", srv); 95 IRQF_SHARED, "PCIe BW notif", srv);
85 if (ret) 96 if (ret)
86 return ret; 97 return ret;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2ec0df04e0dc..7e12d0163863 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2388 return dev; 2388 return dev;
2389} 2389}
2390 2390
2391static void pcie_report_downtraining(struct pci_dev *dev) 2391void pcie_report_downtraining(struct pci_dev *dev)
2392{ 2392{
2393 if (!pci_is_pcie(dev)) 2393 if (!pci_is_pcie(dev))
2394 return; 2394 return;
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 5163097b43df..4bbd9ede38c8 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
485 struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); 485 struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
486 int new_mode; 486 int new_mode;
487 487
488 if (phy->index != 0) 488 if (phy->index != 0) {
489 if (mode == PHY_MODE_USB_HOST)
490 return 0;
489 return -EINVAL; 491 return -EINVAL;
492 }
490 493
491 switch (mode) { 494 switch (mode) {
492 case PHY_MODE_USB_HOST: 495 case PHY_MODE_USB_HOST:
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4159c63a5fd2..a835b31aad99 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,6 +24,7 @@
24#include <asm/crw.h> 24#include <asm/crw.h>
25#include <asm/isc.h> 25#include <asm/isc.h>
26#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
27#include <asm/ap.h>
27 28
28#include "css.h" 29#include "css.h"
29#include "cio.h" 30#include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
586 " failed (rc=%d).\n", ret); 587 " failed (rc=%d).\n", ret);
587} 588}
588 589
590static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
591{
592 CIO_CRW_EVENT(3, "chsc: ap config changed\n");
593 if (sei_area->rs != 5)
594 return;
595
596 ap_bus_cfg_chg();
597}
598
589static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 599static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
590{ 600{
591 switch (sei_area->cc) { 601 switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
612 case 2: /* i/o resource accessibility */ 622 case 2: /* i/o resource accessibility */
613 chsc_process_sei_res_acc(sei_area); 623 chsc_process_sei_res_acc(sei_area);
614 break; 624 break;
625 case 3: /* ap config changed */
626 chsc_process_sei_ap_cfg_chg(sei_area);
627 break;
615 case 7: /* channel-path-availability information */ 628 case 7: /* channel-path-availability information */
616 chsc_process_sei_chp_avail(sei_area); 629 chsc_process_sei_chp_avail(sei_area);
617 break; 630 break;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..0b3b9de45c60 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
72{ 72{
73 struct vfio_ccw_private *private; 73 struct vfio_ccw_private *private;
74 struct irb *irb; 74 struct irb *irb;
75 bool is_final;
75 76
76 private = container_of(work, struct vfio_ccw_private, io_work); 77 private = container_of(work, struct vfio_ccw_private, io_work);
77 irb = &private->irb; 78 irb = &private->irb;
78 79
80 is_final = !(scsw_actl(&irb->scsw) &
81 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
79 if (scsw_is_solicited(&irb->scsw)) { 82 if (scsw_is_solicited(&irb->scsw)) {
80 cp_update_scsw(&private->cp, &irb->scsw); 83 cp_update_scsw(&private->cp, &irb->scsw);
81 cp_free(&private->cp); 84 if (is_final)
85 cp_free(&private->cp);
82 } 86 }
83 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 87 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
84 88
85 if (private->io_trigger) 89 if (private->io_trigger)
86 eventfd_signal(private->io_trigger, 1); 90 eventfd_signal(private->io_trigger, 1);
87 91
88 if (private->mdev) 92 if (private->mdev && is_final)
89 private->state = VFIO_CCW_STATE_IDLE; 93 private->state = VFIO_CCW_STATE_IDLE;
90} 94}
91 95
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e15816ff1265..1546389d71db 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
810 struct ap_device *ap_dev = to_ap_dev(dev); 810 struct ap_device *ap_dev = to_ap_dev(dev);
811 struct ap_driver *ap_drv = ap_dev->drv; 811 struct ap_driver *ap_drv = ap_dev->drv;
812 812
813 /* prepare ap queue device removal */
813 if (is_queue_dev(dev)) 814 if (is_queue_dev(dev))
814 ap_queue_remove(to_ap_queue(dev)); 815 ap_queue_prepare_remove(to_ap_queue(dev));
816
817 /* driver's chance to clean up gracefully */
815 if (ap_drv->remove) 818 if (ap_drv->remove)
816 ap_drv->remove(ap_dev); 819 ap_drv->remove(ap_dev);
817 820
821 /* now do the ap queue device remove */
822 if (is_queue_dev(dev))
823 ap_queue_remove(to_ap_queue(dev));
824
818 /* Remove queue/card from list of active queues/cards */ 825 /* Remove queue/card from list of active queues/cards */
819 spin_lock_bh(&ap_list_lock); 826 spin_lock_bh(&ap_list_lock);
820 if (is_card_dev(dev)) 827 if (is_card_dev(dev))
@@ -861,6 +868,16 @@ void ap_bus_force_rescan(void)
861EXPORT_SYMBOL(ap_bus_force_rescan); 868EXPORT_SYMBOL(ap_bus_force_rescan);
862 869
863/* 870/*
871* A config change has happened, force an ap bus rescan.
872*/
873void ap_bus_cfg_chg(void)
874{
875 AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
876
877 ap_bus_force_rescan();
878}
879
880/*
864 * hex2bitmap() - parse hex mask string and set bitmap. 881 * hex2bitmap() - parse hex mask string and set bitmap.
865 * Valid strings are "0x012345678" with at least one valid hex number. 882 * Valid strings are "0x012345678" with at least one valid hex number.
866 * Rest of the bitmap to the right is padded with 0. No spaces allowed 883 * Rest of the bitmap to the right is padded with 0. No spaces allowed
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d0059eae5d94..15a98a673c5c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,6 +91,7 @@ enum ap_state {
91 AP_STATE_WORKING, 91 AP_STATE_WORKING,
92 AP_STATE_QUEUE_FULL, 92 AP_STATE_QUEUE_FULL,
93 AP_STATE_SUSPEND_WAIT, 93 AP_STATE_SUSPEND_WAIT,
94 AP_STATE_REMOVE, /* about to be removed from driver */
94 AP_STATE_UNBOUND, /* momentary not bound to a driver */ 95 AP_STATE_UNBOUND, /* momentary not bound to a driver */
95 AP_STATE_BORKED, /* broken */ 96 AP_STATE_BORKED, /* broken */
96 NR_AP_STATES 97 NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
252 253
253void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 254void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
254struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 255struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
256void ap_queue_prepare_remove(struct ap_queue *aq);
255void ap_queue_remove(struct ap_queue *aq); 257void ap_queue_remove(struct ap_queue *aq);
256void ap_queue_suspend(struct ap_device *ap_dev); 258void ap_queue_suspend(struct ap_device *ap_dev);
257void ap_queue_resume(struct ap_device *ap_dev); 259void ap_queue_resume(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba261210c6da..6a340f2c3556 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
420 [AP_EVENT_POLL] = ap_sm_suspend_read, 420 [AP_EVENT_POLL] = ap_sm_suspend_read,
421 [AP_EVENT_TIMEOUT] = ap_sm_nop, 421 [AP_EVENT_TIMEOUT] = ap_sm_nop,
422 }, 422 },
423 [AP_STATE_REMOVE] = {
424 [AP_EVENT_POLL] = ap_sm_nop,
425 [AP_EVENT_TIMEOUT] = ap_sm_nop,
426 },
423 [AP_STATE_UNBOUND] = { 427 [AP_STATE_UNBOUND] = {
424 [AP_EVENT_POLL] = ap_sm_nop, 428 [AP_EVENT_POLL] = ap_sm_nop,
425 [AP_EVENT_TIMEOUT] = ap_sm_nop, 429 [AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
740} 744}
741EXPORT_SYMBOL(ap_flush_queue); 745EXPORT_SYMBOL(ap_flush_queue);
742 746
743void ap_queue_remove(struct ap_queue *aq) 747void ap_queue_prepare_remove(struct ap_queue *aq)
744{ 748{
745 ap_flush_queue(aq); 749 spin_lock_bh(&aq->lock);
750 /* flush queue */
751 __ap_flush_queue(aq);
752 /* set REMOVE state to prevent new messages are queued in */
753 aq->state = AP_STATE_REMOVE;
746 del_timer_sync(&aq->timeout); 754 del_timer_sync(&aq->timeout);
755 spin_unlock_bh(&aq->lock);
756}
747 757
748 /* reset with zero, also clears irq registration */ 758void ap_queue_remove(struct ap_queue *aq)
759{
760 /*
761 * all messages have been flushed and the state is
762 * AP_STATE_REMOVE. Now reset with zero which also
763 * clears the irq registration and move the state
764 * to AP_STATE_UNBOUND to signal that this queue
765 * is not used by any driver currently.
766 */
749 spin_lock_bh(&aq->lock); 767 spin_lock_bh(&aq->lock);
750 ap_zapq(aq->qid); 768 ap_zapq(aq->qid);
751 aq->state = AP_STATE_UNBOUND; 769 aq->state = AP_STATE_UNBOUND;
752 spin_unlock_bh(&aq->lock); 770 spin_unlock_bh(&aq->lock);
753} 771}
754EXPORT_SYMBOL(ap_queue_remove);
755 772
756void ap_queue_reinit_state(struct ap_queue *aq) 773void ap_queue_reinit_state(struct ap_queue *aq)
757{ 774{
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
760 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 777 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
761 spin_unlock_bh(&aq->lock); 778 spin_unlock_bh(&aq->lock);
762} 779}
763EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..689c2af7026a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
586 586
587static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 587static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
588 struct zcrypt_queue *zq, 588 struct zcrypt_queue *zq,
589 struct module **pmod,
589 unsigned int weight) 590 unsigned int weight)
590{ 591{
591 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 592 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
595 atomic_add(weight, &zc->load); 596 atomic_add(weight, &zc->load);
596 atomic_add(weight, &zq->load); 597 atomic_add(weight, &zq->load);
597 zq->request_count++; 598 zq->request_count++;
599 *pmod = zq->queue->ap_dev.drv->driver.owner;
598 return zq; 600 return zq;
599} 601}
600 602
601static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 603static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
602 struct zcrypt_queue *zq, 604 struct zcrypt_queue *zq,
605 struct module *mod,
603 unsigned int weight) 606 unsigned int weight)
604{ 607{
605 struct module *mod = zq->queue->ap_dev.drv->driver.owner;
606
607 zq->request_count--; 608 zq->request_count--;
608 atomic_sub(weight, &zc->load); 609 atomic_sub(weight, &zc->load);
609 atomic_sub(weight, &zq->load); 610 atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
653 unsigned int weight, pref_weight; 654 unsigned int weight, pref_weight;
654 unsigned int func_code; 655 unsigned int func_code;
655 int qid = 0, rc = -ENODEV; 656 int qid = 0, rc = -ENODEV;
657 struct module *mod;
656 658
657 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 659 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
658 660
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
706 pref_weight = weight; 708 pref_weight = weight;
707 } 709 }
708 } 710 }
709 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 711 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
710 spin_unlock(&zcrypt_list_lock); 712 spin_unlock(&zcrypt_list_lock);
711 713
712 if (!pref_zq) { 714 if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
718 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 720 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
719 721
720 spin_lock(&zcrypt_list_lock); 722 spin_lock(&zcrypt_list_lock);
721 zcrypt_drop_queue(pref_zc, pref_zq, weight); 723 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
722 spin_unlock(&zcrypt_list_lock); 724 spin_unlock(&zcrypt_list_lock);
723 725
724out: 726out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
735 unsigned int weight, pref_weight; 737 unsigned int weight, pref_weight;
736 unsigned int func_code; 738 unsigned int func_code;
737 int qid = 0, rc = -ENODEV; 739 int qid = 0, rc = -ENODEV;
740 struct module *mod;
738 741
739 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 742 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
740 743
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
788 pref_weight = weight; 791 pref_weight = weight;
789 } 792 }
790 } 793 }
791 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 794 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
792 spin_unlock(&zcrypt_list_lock); 795 spin_unlock(&zcrypt_list_lock);
793 796
794 if (!pref_zq) { 797 if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
800 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 803 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
801 804
802 spin_lock(&zcrypt_list_lock); 805 spin_lock(&zcrypt_list_lock);
803 zcrypt_drop_queue(pref_zc, pref_zq, weight); 806 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
804 spin_unlock(&zcrypt_list_lock); 807 spin_unlock(&zcrypt_list_lock);
805 808
806out: 809out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
819 unsigned int func_code; 822 unsigned int func_code;
820 unsigned short *domain; 823 unsigned short *domain;
821 int qid = 0, rc = -ENODEV; 824 int qid = 0, rc = -ENODEV;
825 struct module *mod;
822 826
823 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 827 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
824 828
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
865 pref_weight = weight; 869 pref_weight = weight;
866 } 870 }
867 } 871 }
868 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 872 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
869 spin_unlock(&zcrypt_list_lock); 873 spin_unlock(&zcrypt_list_lock);
870 874
871 if (!pref_zq) { 875 if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
881 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 885 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
882 886
883 spin_lock(&zcrypt_list_lock); 887 spin_lock(&zcrypt_list_lock);
884 zcrypt_drop_queue(pref_zc, pref_zq, weight); 888 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
885 spin_unlock(&zcrypt_list_lock); 889 spin_unlock(&zcrypt_list_lock);
886 890
887out: 891out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
932 unsigned int func_code; 936 unsigned int func_code;
933 struct ap_message ap_msg; 937 struct ap_message ap_msg;
934 int qid = 0, rc = -ENODEV; 938 int qid = 0, rc = -ENODEV;
939 struct module *mod;
935 940
936 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 941 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
937 942
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
1000 pref_weight = weight; 1005 pref_weight = weight;
1001 } 1006 }
1002 } 1007 }
1003 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1008 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
1004 spin_unlock(&zcrypt_list_lock); 1009 spin_unlock(&zcrypt_list_lock);
1005 1010
1006 if (!pref_zq) { 1011 if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
1012 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 1017 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
1013 1018
1014 spin_lock(&zcrypt_list_lock); 1019 spin_lock(&zcrypt_list_lock);
1015 zcrypt_drop_queue(pref_zc, pref_zq, weight); 1020 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
1016 spin_unlock(&zcrypt_list_lock); 1021 spin_unlock(&zcrypt_list_lock);
1017 1022
1018out_free: 1023out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
1033 struct ap_message ap_msg; 1038 struct ap_message ap_msg;
1034 unsigned int domain; 1039 unsigned int domain;
1035 int qid = 0, rc = -ENODEV; 1040 int qid = 0, rc = -ENODEV;
1041 struct module *mod;
1036 1042
1037 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1043 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1038 1044
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
1064 pref_weight = weight; 1070 pref_weight = weight;
1065 } 1071 }
1066 } 1072 }
1067 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1073 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
1068 spin_unlock(&zcrypt_list_lock); 1074 spin_unlock(&zcrypt_list_lock);
1069 1075
1070 if (!pref_zq) { 1076 if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
1076 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1082 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1077 1083
1078 spin_lock(&zcrypt_list_lock); 1084 spin_lock(&zcrypt_list_lock);
1079 zcrypt_drop_queue(pref_zc, pref_zq, weight); 1085 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
1080 spin_unlock(&zcrypt_list_lock); 1086 spin_unlock(&zcrypt_list_lock);
1081 1087
1082out: 1088out:
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 744a64680d5b..e8fc28dba8df 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
624 add_timer(&erp_action->timer); 624 add_timer(&erp_action->timer);
625} 625}
626 626
627void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
628 int clear, char *dbftag)
629{
630 unsigned long flags;
631 struct zfcp_port *port;
632
633 write_lock_irqsave(&adapter->erp_lock, flags);
634 read_lock(&adapter->port_list_lock);
635 list_for_each_entry(port, &adapter->port_list, list)
636 _zfcp_erp_port_forced_reopen(port, clear, dbftag);
637 read_unlock(&adapter->port_list_lock);
638 write_unlock_irqrestore(&adapter->erp_lock, flags);
639}
640
627static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, 641static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
628 int clear, char *dbftag) 642 int clear, char *dbftag)
629{ 643{
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
1341 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); 1355 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
1342 int lun_status; 1356 int lun_status;
1343 1357
1358 if (sdev->sdev_state == SDEV_DEL ||
1359 sdev->sdev_state == SDEV_CANCEL)
1360 continue;
1344 if (zsdev->port != port) 1361 if (zsdev->port != port)
1345 continue; 1362 continue;
1346 /* LUN under port of interest */ 1363 /* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3fce47b0b21b..c6acca521ffe 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
70 char *dbftag); 70 char *dbftag);
71extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); 71extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
72extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); 72extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
73extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
74 int clear, char *dbftag);
73extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); 75extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
74extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); 76extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
75extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); 77extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index db00b5e3abbe..33eddb02ee30 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
239 list_for_each_entry(port, &adapter->port_list, list) { 239 list_for_each_entry(port, &adapter->port_list, list) {
240 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) 240 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
241 zfcp_fc_test_link(port); 241 zfcp_fc_test_link(port);
242 if (!port->d_id)
243 zfcp_erp_port_reopen(port,
244 ZFCP_STATUS_COMMON_ERP_FAILED,
245 "fcrscn1");
246 } 242 }
247 read_unlock_irqrestore(&adapter->port_list_lock, flags); 243 read_unlock_irqrestore(&adapter->port_list_lock, flags);
248} 244}
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
250static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) 246static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
251{ 247{
252 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; 248 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
249 struct zfcp_adapter *adapter = fsf_req->adapter;
253 struct fc_els_rscn *head; 250 struct fc_els_rscn *head;
254 struct fc_els_rscn_page *page; 251 struct fc_els_rscn_page *page;
255 u16 i; 252 u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
263 no_entries = be16_to_cpu(head->rscn_plen) / 260 no_entries = be16_to_cpu(head->rscn_plen) /
264 sizeof(struct fc_els_rscn_page); 261 sizeof(struct fc_els_rscn_page);
265 262
263 if (no_entries > 1) {
264 /* handle failed ports */
265 unsigned long flags;
266 struct zfcp_port *port;
267
268 read_lock_irqsave(&adapter->port_list_lock, flags);
269 list_for_each_entry(port, &adapter->port_list, list) {
270 if (port->d_id)
271 continue;
272 zfcp_erp_port_reopen(port,
273 ZFCP_STATUS_COMMON_ERP_FAILED,
274 "fcrscn1");
275 }
276 read_unlock_irqrestore(&adapter->port_list_lock, flags);
277 }
278
266 for (i = 1; i < no_entries; i++) { 279 for (i = 1; i < no_entries; i++) {
267 /* skip head and start with 1st element */ 280 /* skip head and start with 1st element */
268 page++; 281 page++;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f4f6a07c5222..221d0dfb8493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
368 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 368 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
369 int ret = SUCCESS, fc_ret; 369 int ret = SUCCESS, fc_ret;
370 370
371 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
372 zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
373 zfcp_erp_wait(adapter);
374 }
371 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); 375 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
372 zfcp_erp_wait(adapter); 376 zfcp_erp_wait(adapter);
373 fc_ret = fc_block_scsi_eh(scpnt); 377 fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 1df5171594b8..11fb68d7e60d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
2640 return capacity; 2640 return capacity;
2641} 2641}
2642 2642
2643static inline int aac_pci_offline(struct aac_dev *dev)
2644{
2645 return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
2646}
2647
2643static inline int aac_adapter_check_health(struct aac_dev *dev) 2648static inline int aac_adapter_check_health(struct aac_dev *dev)
2644{ 2649{
2645 if (unlikely(pci_channel_offline(dev->pdev))) 2650 if (unlikely(aac_pci_offline(dev)))
2646 return -1; 2651 return -1;
2647 2652
2648 return (dev)->a_ops.adapter_check_health(dev); 2653 return (dev)->a_ops.adapter_check_health(dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e67e032936ef..78430a7b294c 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
672 return -ETIMEDOUT; 672 return -ETIMEDOUT;
673 } 673 }
674 674
675 if (unlikely(pci_channel_offline(dev->pdev))) 675 if (unlikely(aac_pci_offline(dev)))
676 return -EFAULT; 676 return -EFAULT;
677 677
678 if ((blink = aac_adapter_check_health(dev)) > 0) { 678 if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
772 772
773 spin_unlock_irqrestore(&fibptr->event_lock, flags); 773 spin_unlock_irqrestore(&fibptr->event_lock, flags);
774 774
775 if (unlikely(pci_channel_offline(dev->pdev))) 775 if (unlikely(aac_pci_offline(dev)))
776 return -EFAULT; 776 return -EFAULT;
777 777
778 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; 778 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index dbaa4f131433..3ad997ac3510 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -139,6 +139,7 @@ static const struct {
139 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, 139 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
140 140
141 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, 141 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
142 { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
142}; 143};
143 144
144static void ibmvfc_npiv_login(struct ibmvfc_host *); 145static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1494 if (rsp->flags & FCP_RSP_LEN_VALID) 1495 if (rsp->flags & FCP_RSP_LEN_VALID)
1495 rsp_code = rsp->data.info.rsp_code; 1496 rsp_code = rsp->data.info.rsp_code;
1496 1497
1497 scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) " 1498 scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1498 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", 1499 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1499 cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error, 1500 cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1500 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); 1501 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1501} 1502}
1502 1503
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2022 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " 2023 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2023 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, 2024 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2024 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), 2025 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2025 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, 2026 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2026 fc_rsp->scsi_status); 2027 fc_rsp->scsi_status);
2027 rsp_rc = -EIO; 2028 rsp_rc = -EIO;
2028 } else 2029 } else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2381 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " 2382 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2382 "flags: %x fcp_rsp: %x, scsi_status: %x\n", 2383 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2383 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), 2384 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2384 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, 2385 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2385 fc_rsp->scsi_status); 2386 fc_rsp->scsi_status);
2386 rsp_rc = -EIO; 2387 rsp_rc = -EIO;
2387 } else 2388 } else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2755 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 2756 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2756 if (crq->format == IBMVFC_PARTITION_MIGRATED) { 2757 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2757 /* We need to re-setup the interpartition connection */ 2758 /* We need to re-setup the interpartition connection */
2758 dev_info(vhost->dev, "Re-enabling adapter\n"); 2759 dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
2759 vhost->client_migrated = 1; 2760 vhost->client_migrated = 1;
2760 ibmvfc_purge_requests(vhost, DID_REQUEUE); 2761 ibmvfc_purge_requests(vhost, DID_REQUEUE);
2761 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); 2762 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2762 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); 2763 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2763 } else { 2764 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
2764 dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); 2765 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
2765 ibmvfc_purge_requests(vhost, DID_ERROR); 2766 ibmvfc_purge_requests(vhost, DID_ERROR);
2766 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); 2767 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2767 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); 2768 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2769 } else {
2770 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
2768 } 2771 }
2769 return; 2772 return;
2770 case IBMVFC_CRQ_CMD_RSP: 2773 case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3348 3351
3349 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", 3352 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3350 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3353 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3351 rsp->status, rsp->error, status); 3354 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3352 break; 3355 break;
3353 } 3356 }
3354 3357
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3446 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3449 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3447 3450
3448 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3451 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3449 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error, 3452 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3450 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type, 3453 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3451 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status); 3454 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3455 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
3452 break; 3456 break;
3453 } 3457 }
3454 3458
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3619 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; 3623 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
3620 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3624 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3621 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), 3625 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
3622 mad->iu.status, mad->iu.error, 3626 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
3623 ibmvfc_get_fc_type(fc_reason), fc_reason, 3627 ibmvfc_get_fc_type(fc_reason), fc_reason,
3624 ibmvfc_get_ls_explain(fc_explain), fc_explain, status); 3628 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3625 break; 3629 break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3831 3835
3832 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3836 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3833 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3837 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3834 rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), 3838 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3835 rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), 3839 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3836 rsp->fc_explain, status); 3840 ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
3841 status);
3837 break; 3842 break;
3838 } 3843 }
3839 3844
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3959 level += ibmvfc_retry_host_init(vhost); 3964 level += ibmvfc_retry_host_init(vhost);
3960 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", 3965 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3961 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3966 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3962 rsp->status, rsp->error); 3967 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
3963 break; 3968 break;
3964 case IBMVFC_MAD_DRIVER_FAILED: 3969 case IBMVFC_MAD_DRIVER_FAILED:
3965 break; 3970 break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4024 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 4029 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4025 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", 4030 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
4026 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 4031 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4027 rsp->status, rsp->error); 4032 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4028 ibmvfc_free_event(evt); 4033 ibmvfc_free_event(evt);
4029 return; 4034 return;
4030 case IBMVFC_MAD_CRQ_ERROR: 4035 case IBMVFC_MAD_CRQ_ERROR:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b81a53c4a9a8..459cc288ba1d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
78 IBMVFC_CRQ_XPORT_EVENT = 0xFF, 78 IBMVFC_CRQ_XPORT_EVENT = 0xFF,
79}; 79};
80 80
81enum ibmvfc_crq_format { 81enum ibmvfc_crq_init_msg {
82 IBMVFC_CRQ_INIT = 0x01, 82 IBMVFC_CRQ_INIT = 0x01,
83 IBMVFC_CRQ_INIT_COMPLETE = 0x02, 83 IBMVFC_CRQ_INIT_COMPLETE = 0x02,
84};
85
86enum ibmvfc_crq_xport_evts {
87 IBMVFC_PARTNER_FAILED = 0x01,
88 IBMVFC_PARTNER_DEREGISTER = 0x02,
84 IBMVFC_PARTITION_MIGRATED = 0x06, 89 IBMVFC_PARTITION_MIGRATED = 0x06,
85}; 90};
86 91
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e57774472e75..1d8c584ec1e9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3281 3281
3282 if (smid < ioc->hi_priority_smid) { 3282 if (smid < ioc->hi_priority_smid) {
3283 struct scsiio_tracker *st; 3283 struct scsiio_tracker *st;
3284 void *request;
3284 3285
3285 st = _get_st_from_smid(ioc, smid); 3286 st = _get_st_from_smid(ioc, smid);
3286 if (!st) { 3287 if (!st) {
3287 _base_recovery_check(ioc); 3288 _base_recovery_check(ioc);
3288 return; 3289 return;
3289 } 3290 }
3291
3292 /* Clear MPI request frame */
3293 request = mpt3sas_base_get_msg_frame(ioc, smid);
3294 memset(request, 0, ioc->request_sz);
3295
3290 mpt3sas_base_clear_st(ioc, st); 3296 mpt3sas_base_clear_st(ioc, st);
3291 _base_recovery_check(ioc); 3297 _base_recovery_check(ioc);
3292 return; 3298 return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8bb5b8f9f4d2..1ccfbc7eebe0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1462{ 1462{
1463 struct scsi_cmnd *scmd = NULL; 1463 struct scsi_cmnd *scmd = NULL;
1464 struct scsiio_tracker *st; 1464 struct scsiio_tracker *st;
1465 Mpi25SCSIIORequest_t *mpi_request;
1465 1466
1466 if (smid > 0 && 1467 if (smid > 0 &&
1467 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1468 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1468 u32 unique_tag = smid - 1; 1469 u32 unique_tag = smid - 1;
1469 1470
1471 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1472
1473 /*
1474 * If SCSI IO request is outstanding at driver level then
1475 * DevHandle filed must be non-zero. If DevHandle is zero
1476 * then it means that this smid is free at driver level,
1477 * so return NULL.
1478 */
1479 if (!mpi_request->DevHandle)
1480 return scmd;
1481
1470 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1482 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1471 if (scmd) { 1483 if (scmd) {
1472 st = scsi_cmd_priv(scmd); 1484 st = scsi_cmd_priv(scmd);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 16a18d5d856f..6e4f4931ae17 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
3203 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3203 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
3204 return -EINVAL; 3204 return -EINVAL;
3205 ep = iscsi_lookup_endpoint(transport_fd); 3205 ep = iscsi_lookup_endpoint(transport_fd);
3206 if (!ep)
3207 return -EINVAL;
3206 conn = cls_conn->dd_data; 3208 conn = cls_conn->dd_data;
3207 qla_conn = conn->dd_data; 3209 qla_conn = conn->dd_data;
3208 qla_conn->qla_ep = ep->dd_data; 3210 qla_conn->qla_ep = ep->dd_data;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6a9040faed00..3b119ca0cc0c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
771 771
772 mutex_lock(&sdev->state_mutex); 772 mutex_lock(&sdev->state_mutex);
773 ret = scsi_device_set_state(sdev, state); 773 ret = scsi_device_set_state(sdev, state);
774 /*
775 * If the device state changes to SDEV_RUNNING, we need to run
776 * the queue to avoid I/O hang.
777 */
778 if (ret == 0 && state == SDEV_RUNNING)
779 blk_mq_run_hw_queues(sdev->request_queue, true);
774 mutex_unlock(&sdev->state_mutex); 780 mutex_unlock(&sdev->state_mutex);
775 781
776 return ret == 0 ? count : -EINVAL; 782 return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 251db30d0882..2b2bc4b49d78 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
1415 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1415 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1416 } 1416 }
1417 1417
1418 /*
1419 * XXX and what if there are packets in flight and this close()
1420 * XXX is followed by a "rmmod sd_mod"?
1421 */
1422
1423 scsi_disk_put(sdkp); 1418 scsi_disk_put(sdkp);
1424} 1419}
1425 1420
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3076 unsigned int opt_xfer_bytes = 3071 unsigned int opt_xfer_bytes =
3077 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3072 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3078 3073
3074 if (sdkp->opt_xfer_blocks == 0)
3075 return false;
3076
3079 if (sdkp->opt_xfer_blocks > dev_max) { 3077 if (sdkp->opt_xfer_blocks > dev_max) {
3080 sd_first_printk(KERN_WARNING, sdkp, 3078 sd_first_printk(KERN_WARNING, sdkp,
3081 "Optimal transfer size %u logical blocks " \ 3079 "Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
3505{ 3503{
3506 struct scsi_disk *sdkp = to_scsi_disk(dev); 3504 struct scsi_disk *sdkp = to_scsi_disk(dev);
3507 struct gendisk *disk = sdkp->disk; 3505 struct gendisk *disk = sdkp->disk;
3508 3506 struct request_queue *q = disk->queue;
3507
3509 ida_free(&sd_index_ida, sdkp->index); 3508 ida_free(&sd_index_ida, sdkp->index);
3510 3509
3510 /*
3511 * Wait until all requests that are in progress have completed.
3512 * This is necessary to avoid that e.g. scsi_end_request() crashes
3513 * due to clearing the disk->private_data pointer. Wait from inside
3514 * scsi_disk_release() instead of from sd_release() to avoid that
3515 * freezing and unfreezing the request queue affects user space I/O
3516 * in case multiple processes open a /dev/sd... node concurrently.
3517 */
3518 blk_mq_freeze_queue(q);
3519 blk_mq_unfreeze_queue(q);
3520
3511 disk->private_data = NULL; 3521 disk->private_data = NULL;
3512 put_disk(disk); 3522 put_disk(disk);
3513 put_device(&sdkp->device->sdev_gendev); 3523 put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 9351349cf0a9..1e0041ec8132 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -150,7 +150,12 @@ struct bcm2835_power {
150 150
151static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) 151static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
152{ 152{
153 u64 start = ktime_get_ns(); 153 u64 start;
154
155 if (!reg)
156 return 0;
157
158 start = ktime_get_ns();
154 159
155 /* Enable the module's async AXI bridges. */ 160 /* Enable the module's async AXI bridges. */
156 ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); 161 ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
165 170
166static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) 171static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
167{ 172{
168 u64 start = ktime_get_ns(); 173 u64 start;
174
175 if (!reg)
176 return 0;
177
178 start = ktime_get_ns();
169 179
170 /* Enable the module's async AXI bridges. */ 180 /* Enable the module's async AXI bridges. */
171 ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); 181 ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
475 } 485 }
476} 486}
477 487
478static void 488static int
479bcm2835_init_power_domain(struct bcm2835_power *power, 489bcm2835_init_power_domain(struct bcm2835_power *power,
480 int pd_xlate_index, const char *name) 490 int pd_xlate_index, const char *name)
481{ 491{
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
483 struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; 493 struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
484 494
485 dom->clk = devm_clk_get(dev->parent, name); 495 dom->clk = devm_clk_get(dev->parent, name);
496 if (IS_ERR(dom->clk)) {
497 int ret = PTR_ERR(dom->clk);
498
499 if (ret == -EPROBE_DEFER)
500 return ret;
501
502 /* Some domains don't have a clk, so make sure that we
503 * don't deref an error pointer later.
504 */
505 dom->clk = NULL;
506 }
486 507
487 dom->base.name = name; 508 dom->base.name = name;
488 dom->base.power_on = bcm2835_power_pd_power_on; 509 dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
495 pm_genpd_init(&dom->base, NULL, true); 516 pm_genpd_init(&dom->base, NULL, true);
496 517
497 power->pd_xlate.domains[pd_xlate_index] = &dom->base; 518 power->pd_xlate.domains[pd_xlate_index] = &dom->base;
519
520 return 0;
498} 521}
499 522
500/** bcm2835_reset_reset - Resets a block that has a reset line in the 523/** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
592 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, 615 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
593 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, 616 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
594 }; 617 };
595 int ret, i; 618 int ret = 0, i;
596 u32 id; 619 u32 id;
597 620
598 power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); 621 power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
619 642
620 power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); 643 power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
621 644
622 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) 645 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
623 bcm2835_init_power_domain(power, i, power_domain_names[i]); 646 ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
647 if (ret)
648 goto fail;
649 }
624 650
625 for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { 651 for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
626 pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, 652 pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
634 660
635 ret = devm_reset_controller_register(dev, &power->reset); 661 ret = devm_reset_controller_register(dev, &power->reset);
636 if (ret) 662 if (ret)
637 return ret; 663 goto fail;
638 664
639 of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); 665 of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
640 666
641 dev_info(dev, "Broadcom BCM2835 power domains driver"); 667 dev_info(dev, "Broadcom BCM2835 power domains driver");
642 return 0; 668 return 0;
669
670fail:
671 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
672 struct generic_pm_domain *dom = &power->domains[i].base;
673
674 if (dom->name)
675 pm_genpd_remove(dom);
676 }
677 return ret;
643} 678}
644 679
645static int bcm2835_power_remove(struct platform_device *pdev) 680static int bcm2835_power_remove(struct platform_device *pdev)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index c0901b96cfe4..62951e836cbc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
114 114
115source "drivers/staging/mt7621-mmc/Kconfig" 115source "drivers/staging/mt7621-mmc/Kconfig"
116 116
117source "drivers/staging/mt7621-eth/Kconfig"
118
119source "drivers/staging/mt7621-dts/Kconfig" 117source "drivers/staging/mt7621-dts/Kconfig"
120 118
121source "drivers/staging/gasket/Kconfig" 119source "drivers/staging/gasket/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 57c6bce13ff4..d1b17ddcd354 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
47obj-$(CONFIG_SOC_MT7621) += mt7621-dma/ 47obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
48obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ 48obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
49obj-$(CONFIG_MTK_MMC) += mt7621-mmc/ 49obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
50obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
51obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ 50obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
52obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ 51obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
53obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ 52obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 687537203d9c..d9725888af6f 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4config XIL_AXIS_FIFO 4config XIL_AXIS_FIFO
5 tristate "Xilinx AXI-Stream FIFO IP core driver" 5 tristate "Xilinx AXI-Stream FIFO IP core driver"
6 depends on OF
6 default n 7 default n
7 help 8 help
8 This adds support for the Xilinx AXI-Stream 9 This adds support for the Xilinx AXI-Stream
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index a7d569cfca5d..0dff1ac057cd 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
1001 unsigned int mask); 1001 unsigned int mask);
1002unsigned int comedi_dio_update_state(struct comedi_subdevice *s, 1002unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
1003 unsigned int *data); 1003 unsigned int *data);
1004unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
1005 struct comedi_cmd *cmd);
1004unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); 1006unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
1005unsigned int comedi_nscans_left(struct comedi_subdevice *s, 1007unsigned int comedi_nscans_left(struct comedi_subdevice *s,
1006 unsigned int nscans); 1008 unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index eefa62f42c0f..5a32b8fc000e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
394EXPORT_SYMBOL_GPL(comedi_dio_update_state); 394EXPORT_SYMBOL_GPL(comedi_dio_update_state);
395 395
396/** 396/**
397 * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes 397 * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
398 * bytes
398 * @s: COMEDI subdevice. 399 * @s: COMEDI subdevice.
400 * @cmd: COMEDI command.
399 * 401 *
400 * Determines the overall scan length according to the subdevice type and the 402 * Determines the overall scan length according to the subdevice type and the
401 * number of channels in the scan. 403 * number of channels in the scan for the specified command.
402 * 404 *
403 * For digital input, output or input/output subdevices, samples for 405 * For digital input, output or input/output subdevices, samples for
404 * multiple channels are assumed to be packed into one or more unsigned 406 * multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
408 * 410 *
409 * Returns the overall scan length in bytes. 411 * Returns the overall scan length in bytes.
410 */ 412 */
411unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) 413unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
414 struct comedi_cmd *cmd)
412{ 415{
413 struct comedi_cmd *cmd = &s->async->cmd;
414 unsigned int num_samples; 416 unsigned int num_samples;
415 unsigned int bits_per_sample; 417 unsigned int bits_per_sample;
416 418
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
427 } 429 }
428 return comedi_samples_to_bytes(s, num_samples); 430 return comedi_samples_to_bytes(s, num_samples);
429} 431}
432EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
433
434/**
435 * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
436 * @s: COMEDI subdevice.
437 *
438 * Determines the overall scan length according to the subdevice type and the
439 * number of channels in the scan for the current command.
440 *
441 * For digital input, output or input/output subdevices, samples for
442 * multiple channels are assumed to be packed into one or more unsigned
443 * short or unsigned int values according to the subdevice's %SDF_LSAMPL
444 * flag. For other types of subdevice, samples are assumed to occupy a
445 * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
446 *
447 * Returns the overall scan length in bytes.
448 */
449unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
450{
451 struct comedi_cmd *cmd = &s->async->cmd;
452
453 return comedi_bytes_per_scan_cmd(s, cmd);
454}
430EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); 455EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
431 456
432static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, 457static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5edf59ac6706..b04dad8c7092 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
3545 struct comedi_subdevice *s, struct comedi_cmd *cmd) 3545 struct comedi_subdevice *s, struct comedi_cmd *cmd)
3546{ 3546{
3547 struct ni_private *devpriv = dev->private; 3547 struct ni_private *devpriv = dev->private;
3548 unsigned int bytes_per_scan;
3548 int err = 0; 3549 int err = 0;
3549 3550
3550 /* Step 1 : check if triggers are trivially valid */ 3551 /* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
3579 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); 3580 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
3580 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, 3581 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
3581 cmd->chanlist_len); 3582 cmd->chanlist_len);
3582 err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 3583 bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
3583 s->async->prealloc_bufsz / 3584 if (bytes_per_scan) {
3584 comedi_bytes_per_scan(s)); 3585 err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
3586 s->async->prealloc_bufsz /
3587 bytes_per_scan);
3588 }
3585 3589
3586 if (err) 3590 if (err)
3587 return 3; 3591 return 3;
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 829f7b12e0dc..9bbc68729c11 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
23 [EROFS_FT_SYMLINK] = DT_LNK, 23 [EROFS_FT_SYMLINK] = DT_LNK,
24}; 24};
25 25
26static void debug_one_dentry(unsigned char d_type, const char *de_name,
27 unsigned int de_namelen)
28{
29#ifdef CONFIG_EROFS_FS_DEBUG
30 /* since the on-disk name could not have the trailing '\0' */
31 unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
32
33 memcpy(dbg_namebuf, de_name, de_namelen);
34 dbg_namebuf[de_namelen] = '\0';
35
36 debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
37 de_namelen, d_type);
38#endif
39}
40
26static int erofs_fill_dentries(struct dir_context *ctx, 41static int erofs_fill_dentries(struct dir_context *ctx,
27 void *dentry_blk, unsigned int *ofs, 42 void *dentry_blk, unsigned int *ofs,
28 unsigned int nameoff, unsigned int maxsize) 43 unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
33 de = dentry_blk + *ofs; 48 de = dentry_blk + *ofs;
34 while (de < end) { 49 while (de < end) {
35 const char *de_name; 50 const char *de_name;
36 int de_namelen; 51 unsigned int de_namelen;
37 unsigned char d_type; 52 unsigned char d_type;
38#ifdef CONFIG_EROFS_FS_DEBUG
39 unsigned int dbg_namelen;
40 unsigned char dbg_namebuf[EROFS_NAME_LEN];
41#endif
42 53
43 if (unlikely(de->file_type < EROFS_FT_MAX)) 54 if (de->file_type < EROFS_FT_MAX)
44 d_type = erofs_filetype_table[de->file_type]; 55 d_type = erofs_filetype_table[de->file_type];
45 else 56 else
46 d_type = DT_UNKNOWN; 57 d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
48 nameoff = le16_to_cpu(de->nameoff); 59 nameoff = le16_to_cpu(de->nameoff);
49 de_name = (char *)dentry_blk + nameoff; 60 de_name = (char *)dentry_blk + nameoff;
50 61
51 de_namelen = unlikely(de + 1 >= end) ? 62 /* the last dirent in the block? */
52 /* last directory entry */ 63 if (de + 1 >= end)
53 strnlen(de_name, maxsize - nameoff) : 64 de_namelen = strnlen(de_name, maxsize - nameoff);
54 le16_to_cpu(de[1].nameoff) - nameoff; 65 else
66 de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
55 67
56 /* a corrupted entry is found */ 68 /* a corrupted entry is found */
57 if (unlikely(de_namelen < 0)) { 69 if (unlikely(nameoff + de_namelen > maxsize ||
70 de_namelen > EROFS_NAME_LEN)) {
58 DBG_BUGON(1); 71 DBG_BUGON(1);
59 return -EIO; 72 return -EIO;
60 } 73 }
61 74
62#ifdef CONFIG_EROFS_FS_DEBUG 75 debug_one_dentry(d_type, de_name, de_namelen);
63 dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
64 memcpy(dbg_namebuf, de_name, dbg_namelen);
65 dbg_namebuf[dbg_namelen] = '\0';
66
67 debugln("%s, found de_name %s de_len %d d_type %d", __func__,
68 dbg_namebuf, de_namelen, d_type);
69#endif
70
71 if (!dir_emit(ctx, de_name, de_namelen, 76 if (!dir_emit(ctx, de_name, de_namelen,
72 le64_to_cpu(de->nid), d_type)) 77 le64_to_cpu(de->nid), d_type))
73 /* stopped by some reason */ 78 /* stopped by some reason */
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8715bc50e09c..31eef8395774 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -972,6 +972,7 @@ repeat:
972 overlapped = false; 972 overlapped = false;
973 compressed_pages = grp->compressed_pages; 973 compressed_pages = grp->compressed_pages;
974 974
975 err = 0;
975 for (i = 0; i < clusterpages; ++i) { 976 for (i = 0; i < clusterpages; ++i) {
976 unsigned int pagenr; 977 unsigned int pagenr;
977 978
@@ -981,26 +982,39 @@ repeat:
981 DBG_BUGON(!page); 982 DBG_BUGON(!page);
982 DBG_BUGON(!page->mapping); 983 DBG_BUGON(!page->mapping);
983 984
984 if (z_erofs_is_stagingpage(page)) 985 if (!z_erofs_is_stagingpage(page)) {
985 continue;
986#ifdef EROFS_FS_HAS_MANAGED_CACHE 986#ifdef EROFS_FS_HAS_MANAGED_CACHE
987 if (page->mapping == MNGD_MAPPING(sbi)) { 987 if (page->mapping == MNGD_MAPPING(sbi)) {
988 DBG_BUGON(!PageUptodate(page)); 988 if (unlikely(!PageUptodate(page)))
989 continue; 989 err = -EIO;
990 } 990 continue;
991 }
991#endif 992#endif
992 993
993 /* only non-head page could be reused as a compressed page */ 994 /*
994 pagenr = z_erofs_onlinepage_index(page); 995 * only if non-head page can be selected
996 * for inplace decompression
997 */
998 pagenr = z_erofs_onlinepage_index(page);
995 999
996 DBG_BUGON(pagenr >= nr_pages); 1000 DBG_BUGON(pagenr >= nr_pages);
997 DBG_BUGON(pages[pagenr]); 1001 DBG_BUGON(pages[pagenr]);
998 ++sparsemem_pages; 1002 ++sparsemem_pages;
999 pages[pagenr] = page; 1003 pages[pagenr] = page;
1000 1004
1001 overlapped = true; 1005 overlapped = true;
1006 }
1007
1008 /* PG_error needs checking for inplaced and staging pages */
1009 if (unlikely(PageError(page))) {
1010 DBG_BUGON(PageUptodate(page));
1011 err = -EIO;
1012 }
1002 } 1013 }
1003 1014
1015 if (unlikely(err))
1016 goto out;
1017
1004 llen = (nr_pages << PAGE_SHIFT) - work->pageofs; 1018 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1005 1019
1006 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) { 1020 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ repeat:
1029 1043
1030skip_allocpage: 1044skip_allocpage:
1031 vout = erofs_vmap(pages, nr_pages); 1045 vout = erofs_vmap(pages, nr_pages);
1046 if (!vout) {
1047 err = -ENOMEM;
1048 goto out;
1049 }
1032 1050
1033 err = z_erofs_vle_unzip_vmap(compressed_pages, 1051 err = z_erofs_vle_unzip_vmap(compressed_pages,
1034 clusterpages, vout, llen, work->pageofs, overlapped); 1052 clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ repeat:
1194 if (page->mapping == mc) { 1212 if (page->mapping == mc) {
1195 WRITE_ONCE(grp->compressed_pages[nr], page); 1213 WRITE_ONCE(grp->compressed_pages[nr], page);
1196 1214
1215 ClearPageError(page);
1197 if (!PagePrivate(page)) { 1216 if (!PagePrivate(page)) {
1198 /* 1217 /*
1199 * impossible to be !PagePrivate(page) for 1218 * impossible to be !PagePrivate(page) for
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 48b263a2731a..0daac9b984a8 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
136 136
137 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); 137 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
138 138
139 if (clusterpages == 1) 139 if (clusterpages == 1) {
140 vin = kmap_atomic(compressed_pages[0]); 140 vin = kmap_atomic(compressed_pages[0]);
141 else 141 } else {
142 vin = erofs_vmap(compressed_pages, clusterpages); 142 vin = erofs_vmap(compressed_pages, clusterpages);
143 if (!vin)
144 return -ENOMEM;
145 }
143 146
144 preempt_disable(); 147 preempt_disable();
145 vout = erofs_pcpubuf[smp_processor_id()].data; 148 vout = erofs_pcpubuf[smp_processor_id()].data;
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index b73385540216..250c15ace2a7 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -117,22 +117,6 @@
117 status = "okay"; 117 status = "okay";
118}; 118};
119 119
120&ethernet {
121 //mtd-mac-address = <&factory 0xe000>;
122 gmac1: mac@0 {
123 compatible = "mediatek,eth-mac";
124 reg = <0>;
125 phy-handle = <&phy1>;
126 };
127
128 mdio-bus {
129 phy1: ethernet-phy@1 {
130 reg = <1>;
131 phy-mode = "rgmii";
132 };
133 };
134};
135
136&pinctrl { 120&pinctrl {
137 state_default: pinctrl0 { 121 state_default: pinctrl0 {
138 gpio { 122 gpio {
@@ -141,3 +125,16 @@
141 }; 125 };
142 }; 126 };
143}; 127};
128
129&switch0 {
130 ports {
131 port@0 {
132 label = "ethblack";
133 status = "ok";
134 };
135 port@4 {
136 label = "ethblue";
137 status = "ok";
138 };
139 };
140};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6aff3680ce4b..17020e24abd2 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -372,16 +372,83 @@
372 372
373 mediatek,ethsys = <&ethsys>; 373 mediatek,ethsys = <&ethsys>;
374 374
375 mediatek,switch = <&gsw>;
376 375
376 gmac0: mac@0 {
377 compatible = "mediatek,eth-mac";
378 reg = <0>;
379 phy-mode = "rgmii";
380 fixed-link {
381 speed = <1000>;
382 full-duplex;
383 pause;
384 };
385 };
386 gmac1: mac@1 {
387 compatible = "mediatek,eth-mac";
388 reg = <1>;
389 status = "off";
390 phy-mode = "rgmii";
391 phy-handle = <&phy5>;
392 };
377 mdio-bus { 393 mdio-bus {
378 #address-cells = <1>; 394 #address-cells = <1>;
379 #size-cells = <0>; 395 #size-cells = <0>;
380 396
381 phy1f: ethernet-phy@1f { 397 phy5: ethernet-phy@5 {
382 reg = <0x1f>; 398 reg = <5>;
383 phy-mode = "rgmii"; 399 phy-mode = "rgmii";
384 }; 400 };
401
402 switch0: switch0@0 {
403 compatible = "mediatek,mt7621";
404 #address-cells = <1>;
405 #size-cells = <0>;
406 reg = <0>;
407 mediatek,mcm;
408 resets = <&rstctrl 2>;
409 reset-names = "mcm";
410
411 ports {
412 #address-cells = <1>;
413 #size-cells = <0>;
414 reg = <0>;
415 port@0 {
416 status = "off";
417 reg = <0>;
418 label = "lan0";
419 };
420 port@1 {
421 status = "off";
422 reg = <1>;
423 label = "lan1";
424 };
425 port@2 {
426 status = "off";
427 reg = <2>;
428 label = "lan2";
429 };
430 port@3 {
431 status = "off";
432 reg = <3>;
433 label = "lan3";
434 };
435 port@4 {
436 status = "off";
437 reg = <4>;
438 label = "lan4";
439 };
440 port@6 {
441 reg = <6>;
442 label = "cpu";
443 ethernet = <&gmac0>;
444 phy-mode = "trgmii";
445 fixed-link {
446 speed = <1000>;
447 full-duplex;
448 };
449 };
450 };
451 };
385 }; 452 };
386 }; 453 };
387 454
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644
index 596b38552697..000000000000
--- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
+++ /dev/null
@@ -1,48 +0,0 @@
1Mediatek Gigabit Switch
2=======================
3
4The mediatek gigabit switch can be found on Mediatek SoCs.
5
6Required properties:
7- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
8 "mediatek,mt7623-gsw"
9- reg: Address and length of the register set for the device
10- interrupts: Should contain the gigabit switches interrupt
11
12
13Additional required properties for ARM based SoCs:
14- mediatek,reset-pin: phandle describing the reset GPIO
15- clocks: the clocks used by the switch
16- clock-names: the names of the clocks listed in the clocks property
17 these should be "trgpll", "esw", "gp2", "gp1"
18- mt7530-supply: the phandle of the regulator used to power the switch
19- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
20 setup the drive current
21
22
23Optional properties:
24- interrupt-parent: Should be the phandle for the interrupt controller
25 that services interrupts for this device
26
27Example:
28
29gsw: switch@1b100000 {
30 compatible = "mediatek,mt7623-gsw";
31 reg = <0 0x1b110000 0 0x300000>;
32
33 interrupt-parent = <&pio>;
34 interrupts = <168 IRQ_TYPE_EDGE_RISING>;
35
36 clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
37 <&ethsys CLK_ETHSYS_ESW>,
38 <&ethsys CLK_ETHSYS_GP2>,
39 <&ethsys CLK_ETHSYS_GP1>;
40 clock-names = "trgpll", "esw", "gp2", "gp1";
41
42 mt7530-supply = <&mt6323_vpa_reg>;
43
44 mediatek,pctl-regmap = <&syscfg_pctl_a>;
45 mediatek,reset-pin = <&pio 15 0>;
46
47 status = "okay";
48};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644
index 44ea86c7a96c..000000000000
--- a/drivers/staging/mt7621-eth/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
1config NET_VENDOR_MEDIATEK_STAGING
2 bool "MediaTek ethernet driver - staging version"
3 depends on RALINK
4 ---help---
5 If you have an MT7621 Mediatek SoC with ethernet, say Y.
6
7if NET_VENDOR_MEDIATEK_STAGING
8choice
9 prompt "MAC type"
10
11config NET_MEDIATEK_MT7621
12 bool "MT7621"
13 depends on MIPS && SOC_MT7621
14
15endchoice
16
17config NET_MEDIATEK_SOC_STAGING
18 tristate "MediaTek SoC Gigabit Ethernet support"
19 depends on NET_VENDOR_MEDIATEK_STAGING
20 select PHYLIB
21 ---help---
22 This driver supports the gigabit ethernet MACs in the
23 MediaTek SoC family.
24
25config NET_MEDIATEK_MDIO
26 def_bool NET_MEDIATEK_SOC_STAGING
27 depends on NET_MEDIATEK_MT7621
28 select PHYLIB
29
30config NET_MEDIATEK_MDIO_MT7620
31 def_bool NET_MEDIATEK_SOC_STAGING
32 depends on NET_MEDIATEK_MT7621
33 select NET_MEDIATEK_MDIO
34
35config NET_MEDIATEK_GSW_MT7621
36 def_tristate NET_MEDIATEK_SOC_STAGING
37 depends on NET_MEDIATEK_MT7621
38
39endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644
index 018bcc3596b3..000000000000
--- a/drivers/staging/mt7621-eth/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
1#
2# Makefile for the Ralink SoCs built-in ethernet macs
3#
4
5mtk-eth-soc-y += mtk_eth_soc.o ethtool.o
6
7mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o
8mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
9
10mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o
11
12obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o
13
14obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644
index f9e47d4b4cd4..000000000000
--- a/drivers/staging/mt7621-eth/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
1
2- verify devicetree documentation is consistent with code
3- fix ethtool - currently doesn't return valid data.
4- general code review and clean up
5- add support for second MAC on mt7621
6- convert gsw code to use switchdev interfaces
7- md7620_mmi_write etc should probably be wrapped
8 in a regmap abstraction.
9- Get soc_mt7621 to work with QDMA TX if possible.
10- Ensure phys are correctly configured when a cable
11 is plugged in.
12
13Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644
index 8c4228e2c987..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ /dev/null
@@ -1,250 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/* This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
12 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
13 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
14 */
15
16#include "mtk_eth_soc.h"
17#include "ethtool.h"
18
19struct mtk_stat {
20 char name[ETH_GSTRING_LEN];
21 unsigned int idx;
22};
23
24#define MTK_HW_STAT(stat) { \
25 .name = #stat, \
26 .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
27}
28
29static const struct mtk_stat mtk_ethtool_hw_stats[] = {
30 MTK_HW_STAT(tx_bytes),
31 MTK_HW_STAT(tx_packets),
32 MTK_HW_STAT(tx_skip),
33 MTK_HW_STAT(tx_collisions),
34 MTK_HW_STAT(rx_bytes),
35 MTK_HW_STAT(rx_packets),
36 MTK_HW_STAT(rx_overflow),
37 MTK_HW_STAT(rx_fcs_errors),
38 MTK_HW_STAT(rx_short_errors),
39 MTK_HW_STAT(rx_long_errors),
40 MTK_HW_STAT(rx_checksum_errors),
41 MTK_HW_STAT(rx_flow_control_packets),
42};
43
44#define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats)
45
46static int mtk_get_link_ksettings(struct net_device *dev,
47 struct ethtool_link_ksettings *cmd)
48{
49 struct mtk_mac *mac = netdev_priv(dev);
50 int err;
51
52 if (!mac->phy_dev)
53 return -ENODEV;
54
55 if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
56 err = phy_read_status(mac->phy_dev);
57 if (err)
58 return -ENODEV;
59 }
60
61 phy_ethtool_ksettings_get(mac->phy_dev, cmd);
62 return 0;
63}
64
65static int mtk_set_link_ksettings(struct net_device *dev,
66 const struct ethtool_link_ksettings *cmd)
67{
68 struct mtk_mac *mac = netdev_priv(dev);
69
70 if (!mac->phy_dev)
71 return -ENODEV;
72
73 if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
74 if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
75 mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
76 mac->phy_flags = MTK_PHY_FLAG_PORT;
77 } else if (mac->hw->mii_bus) {
78 mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
79 cmd->base.phy_address);
80 if (!mac->phy_dev)
81 return -ENODEV;
82 mac->phy_flags = MTK_PHY_FLAG_ATTACH;
83 } else {
84 return -ENODEV;
85 }
86 }
87
88 return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
89}
90
91static void mtk_get_drvinfo(struct net_device *dev,
92 struct ethtool_drvinfo *info)
93{
94 struct mtk_mac *mac = netdev_priv(dev);
95 struct mtk_soc_data *soc = mac->hw->soc;
96
97 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
98 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
99
100 if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
101 info->n_stats = MTK_HW_STATS_LEN;
102}
103
104static u32 mtk_get_msglevel(struct net_device *dev)
105{
106 struct mtk_mac *mac = netdev_priv(dev);
107
108 return mac->hw->msg_enable;
109}
110
111static void mtk_set_msglevel(struct net_device *dev, u32 value)
112{
113 struct mtk_mac *mac = netdev_priv(dev);
114
115 mac->hw->msg_enable = value;
116}
117
118static int mtk_nway_reset(struct net_device *dev)
119{
120 struct mtk_mac *mac = netdev_priv(dev);
121
122 if (!mac->phy_dev)
123 return -EOPNOTSUPP;
124
125 return genphy_restart_aneg(mac->phy_dev);
126}
127
128static u32 mtk_get_link(struct net_device *dev)
129{
130 struct mtk_mac *mac = netdev_priv(dev);
131 int err;
132
133 if (!mac->phy_dev)
134 goto out_get_link;
135
136 if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
137 err = genphy_update_link(mac->phy_dev);
138 if (err)
139 goto out_get_link;
140 }
141
142 return mac->phy_dev->link;
143
144out_get_link:
145 return ethtool_op_get_link(dev);
146}
147
148static int mtk_set_ringparam(struct net_device *dev,
149 struct ethtool_ringparam *ring)
150{
151 struct mtk_mac *mac = netdev_priv(dev);
152
153 if ((ring->tx_pending < 2) ||
154 (ring->rx_pending < 2) ||
155 (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
156 (ring->tx_pending > mac->hw->soc->dma_ring_size))
157 return -EINVAL;
158
159 dev->netdev_ops->ndo_stop(dev);
160
161 mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
162 mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
163
164 return dev->netdev_ops->ndo_open(dev);
165}
166
167static void mtk_get_ringparam(struct net_device *dev,
168 struct ethtool_ringparam *ring)
169{
170 struct mtk_mac *mac = netdev_priv(dev);
171
172 ring->rx_max_pending = mac->hw->soc->dma_ring_size;
173 ring->tx_max_pending = mac->hw->soc->dma_ring_size;
174 ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
175 ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
176}
177
178static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
179{
180 int i;
181
182 switch (stringset) {
183 case ETH_SS_STATS:
184 for (i = 0; i < MTK_HW_STATS_LEN; i++) {
185 memcpy(data, mtk_ethtool_hw_stats[i].name,
186 ETH_GSTRING_LEN);
187 data += ETH_GSTRING_LEN;
188 }
189 break;
190 }
191}
192
193static int mtk_get_sset_count(struct net_device *dev, int sset)
194{
195 switch (sset) {
196 case ETH_SS_STATS:
197 return MTK_HW_STATS_LEN;
198 default:
199 return -EOPNOTSUPP;
200 }
201}
202
203static void mtk_get_ethtool_stats(struct net_device *dev,
204 struct ethtool_stats *stats, u64 *data)
205{
206 struct mtk_mac *mac = netdev_priv(dev);
207 struct mtk_hw_stats *hwstats = mac->hw_stats;
208 unsigned int start;
209 int i;
210
211 if (netif_running(dev) && netif_device_present(dev)) {
212 if (spin_trylock(&hwstats->stats_lock)) {
213 mtk_stats_update_mac(mac);
214 spin_unlock(&hwstats->stats_lock);
215 }
216 }
217
218 do {
219 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
220 for (i = 0; i < MTK_HW_STATS_LEN; i++)
221 data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
222
223 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
224}
225
226static struct ethtool_ops mtk_ethtool_ops = {
227 .get_link_ksettings = mtk_get_link_ksettings,
228 .set_link_ksettings = mtk_set_link_ksettings,
229 .get_drvinfo = mtk_get_drvinfo,
230 .get_msglevel = mtk_get_msglevel,
231 .set_msglevel = mtk_set_msglevel,
232 .nway_reset = mtk_nway_reset,
233 .get_link = mtk_get_link,
234 .set_ringparam = mtk_set_ringparam,
235 .get_ringparam = mtk_get_ringparam,
236};
237
238void mtk_set_ethtool_ops(struct net_device *netdev)
239{
240 struct mtk_mac *mac = netdev_priv(netdev);
241 struct mtk_soc_data *soc = mac->hw->soc;
242
243 if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
244 mtk_ethtool_ops.get_strings = mtk_get_strings;
245 mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
246 mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
247 }
248
249 netdev->ethtool_ops = &mtk_ethtool_ops;
250}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644
index 0071469aea6c..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
4 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
5 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
6 */
7
8#ifndef MTK_ETHTOOL_H
9#define MTK_ETHTOOL_H
10
11#include <linux/ethtool.h>
12
13void mtk_set_ethtool_ops(struct net_device *netdev);
14
15#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644
index 70f7e5481952..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7620.h
+++ /dev/null
@@ -1,277 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef _RALINK_GSW_MT7620_H__
16#define _RALINK_GSW_MT7620_H__
17
18#define GSW_REG_PHY_TIMEOUT (5 * HZ)
19
20#define MT7620_GSW_REG_PIAC 0x0004
21
22#define GSW_NUM_VLANS 16
23#define GSW_NUM_VIDS 4096
24#define GSW_NUM_PORTS 7
25#define GSW_PORT6 6
26
27#define GSW_MDIO_ACCESS BIT(31)
28#define GSW_MDIO_READ BIT(19)
29#define GSW_MDIO_WRITE BIT(18)
30#define GSW_MDIO_START BIT(16)
31#define GSW_MDIO_ADDR_SHIFT 20
32#define GSW_MDIO_REG_SHIFT 25
33
34#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100))
35#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
36#define GSW_REG_SMACCR0 0x3fE4
37#define GSW_REG_SMACCR1 0x3fE8
38#define GSW_REG_CKGCR 0x3ff0
39
40#define GSW_REG_IMR 0x7008
41#define GSW_REG_ISR 0x700c
42#define GSW_REG_GPC1 0x7014
43
44#define SYSC_REG_CHIP_REV_ID 0x0c
45#define SYSC_REG_CFG 0x10
46#define SYSC_REG_CFG1 0x14
47#define RST_CTRL_MCM BIT(2)
48#define SYSC_PAD_RGMII2_MDIO 0x58
49#define SYSC_GPIO_MODE 0x60
50
51#define PORT_IRQ_ST_CHG 0x7f
52
53#define MT7621_ESW_PHY_POLLING 0x0000
54#define MT7620_ESW_PHY_POLLING 0x7000
55
56#define PMCR_IPG BIT(18)
57#define PMCR_MAC_MODE BIT(16)
58#define PMCR_FORCE BIT(15)
59#define PMCR_TX_EN BIT(14)
60#define PMCR_RX_EN BIT(13)
61#define PMCR_BACKOFF BIT(9)
62#define PMCR_BACKPRES BIT(8)
63#define PMCR_RX_FC BIT(5)
64#define PMCR_TX_FC BIT(4)
65#define PMCR_SPEED(_x) (_x << 2)
66#define PMCR_DUPLEX BIT(1)
67#define PMCR_LINK BIT(0)
68
69#define PHY_AN_EN BIT(31)
70#define PHY_PRE_EN BIT(30)
71#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24)
72
73/* ethernet subsystem config register */
74#define ETHSYS_SYSCFG0 0x14
75/* ethernet subsystem clock register */
76#define ETHSYS_CLKCFG0 0x2c
77#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
78
79/* p5 RGMII wrapper TX clock control register */
80#define MT7530_P5RGMIITXCR 0x7b04
81/* p5 RGMII wrapper RX clock control register */
82#define MT7530_P5RGMIIRXCR 0x7b00
83/* TRGMII TDX ODT registers */
84#define MT7530_TRGMII_TD0_ODT 0x7a54
85#define MT7530_TRGMII_TD1_ODT 0x7a5c
86#define MT7530_TRGMII_TD2_ODT 0x7a64
87#define MT7530_TRGMII_TD3_ODT 0x7a6c
88#define MT7530_TRGMII_TD4_ODT 0x7a74
89#define MT7530_TRGMII_TD5_ODT 0x7a7c
90/* TRGMII TCK ctrl register */
91#define MT7530_TRGMII_TCK_CTRL 0x7a78
92/* TRGMII Tx ctrl register */
93#define MT7530_TRGMII_TXCTRL 0x7a40
94/* port 6 extended control register */
95#define MT7530_P6ECR 0x7830
96/* IO driver control register */
97#define MT7530_IO_DRV_CR 0x7810
98/* top signal control register */
99#define MT7530_TOP_SIG_CTRL 0x7808
100/* modified hwtrap register */
101#define MT7530_MHWTRAP 0x7804
102/* hwtrap status register */
103#define MT7530_HWTRAP 0x7800
104/* status interrupt register */
105#define MT7530_SYS_INT_STS 0x700c
106/* system nterrupt register */
107#define MT7530_SYS_INT_EN 0x7008
108/* system control register */
109#define MT7530_SYS_CTRL 0x7000
110/* port MAC status register */
111#define MT7530_PMSR_P(x) (0x3008 + (x * 0x100))
112/* port MAC control register */
113#define MT7530_PMCR_P(x) (0x3000 + (x * 0x100))
114
115#define MT7621_XTAL_SHIFT 6
116#define MT7621_XTAL_MASK 0x7
117#define MT7621_XTAL_25 6
118#define MT7621_XTAL_40 3
119#define MT7621_MDIO_DRV_MASK (3 << 4)
120#define MT7621_GE1_MODE_MASK (3 << 12)
121
122#define TRGMII_TXCTRL_TXC_INV BIT(30)
123#define P6ECR_INTF_MODE_RGMII BIT(1)
124#define P5RGMIIRXCR_C_ALIGN BIT(8)
125#define P5RGMIIRXCR_DELAY_2 BIT(1)
126#define P5RGMIITXCR_DELAY_2 (BIT(8) | BIT(2))
127
128/* TOP_SIG_CTRL bits */
129#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
130
131/* MHWTRAP bits */
132#define MHWTRAP_MANUAL BIT(16)
133#define MHWTRAP_P5_MAC_SEL BIT(13)
134#define MHWTRAP_P6_DIS BIT(8)
135#define MHWTRAP_P5_RGMII_MODE BIT(7)
136#define MHWTRAP_P5_DIS BIT(6)
137#define MHWTRAP_PHY_ACCESS BIT(5)
138
139/* HWTRAP bits */
140#define HWTRAP_XTAL_SHIFT 9
141#define HWTRAP_XTAL_MASK 0x3
142
143/* SYS_CTRL bits */
144#define SYS_CTRL_SW_RST BIT(1)
145#define SYS_CTRL_REG_RST BIT(0)
146
147/* PMCR bits */
148#define PMCR_IFG_XMIT_96 BIT(18)
149#define PMCR_MAC_MODE BIT(16)
150#define PMCR_FORCE_MODE BIT(15)
151#define PMCR_TX_EN BIT(14)
152#define PMCR_RX_EN BIT(13)
153#define PMCR_BACK_PRES_EN BIT(9)
154#define PMCR_BACKOFF_EN BIT(8)
155#define PMCR_TX_FC_EN BIT(5)
156#define PMCR_RX_FC_EN BIT(4)
157#define PMCR_FORCE_SPEED_1000 BIT(3)
158#define PMCR_FORCE_FDX BIT(1)
159#define PMCR_FORCE_LNK BIT(0)
160#define PMCR_FIXED_LINK (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
161 PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
162 PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
163 PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
164 PMCR_FORCE_LNK)
165
166#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \
167 PMCR_TX_FC_EN | PMCR_RX_FC_EN)
168
169/* TRGMII control registers */
170#define GSW_INTF_MODE 0x390
171#define GSW_TRGMII_TD0_ODT 0x354
172#define GSW_TRGMII_TD1_ODT 0x35c
173#define GSW_TRGMII_TD2_ODT 0x364
174#define GSW_TRGMII_TD3_ODT 0x36c
175#define GSW_TRGMII_TXCTL_ODT 0x374
176#define GSW_TRGMII_TCK_ODT 0x37c
177#define GSW_TRGMII_RCK_CTRL 0x300
178
179#define INTF_MODE_TRGMII BIT(1)
180#define TRGMII_RCK_CTRL_RX_RST BIT(31)
181
182/* Mac control registers */
183#define MTK_MAC_P2_MCR 0x200
184#define MTK_MAC_P1_MCR 0x100
185
186#define MAC_MCR_MAX_RX_2K BIT(29)
187#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
188#define MAC_MCR_FORCE_MODE BIT(15)
189#define MAC_MCR_TX_EN BIT(14)
190#define MAC_MCR_RX_EN BIT(13)
191#define MAC_MCR_BACKOFF_EN BIT(9)
192#define MAC_MCR_BACKPR_EN BIT(8)
193#define MAC_MCR_FORCE_RX_FC BIT(5)
194#define MAC_MCR_FORCE_TX_FC BIT(4)
195#define MAC_MCR_SPEED_1000 BIT(3)
196#define MAC_MCR_FORCE_DPX BIT(1)
197#define MAC_MCR_FORCE_LINK BIT(0)
198#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
199 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
200 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
201 MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
202 MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
203 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
204#define MAC_MCR_FIXED_LINK_FC (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
205 MAC_MCR_FIXED_LINK)
206
207/* possible XTAL speed */
208#define MT7623_XTAL_40 0
209#define MT7623_XTAL_20 1
210#define MT7623_XTAL_25 3
211
212/* GPIO port control registers */
213#define GPIO_OD33_CTRL8 0x4c0
214#define GPIO_BIAS_CTRL 0xed0
215#define GPIO_DRV_SEL10 0xf00
216
217/* on MT7620 the functio of port 4 can be software configured */
218enum {
219 PORT4_EPHY = 0,
220 PORT4_EXT,
221};
222
223/* struct mt7620_gsw - the structure that holds the SoC specific data
224 * @dev: The Device struct
225 * @base: The base address
226 * @piac_offset: The PIAC base may change depending on SoC
227 * @irq: The IRQ we are using
228 * @port4: The port4 mode on MT7620
229 * @autopoll: Is MDIO autopolling enabled
230 * @ethsys: The ethsys register map
231 * @pctl: The pin control register map
232 * @clk_gsw: The switch clock
233 * @clk_gp1: The gmac1 clock
234 * @clk_gp2: The gmac2 clock
235 * @clk_trgpll: The trgmii pll clock
236 */
237struct mt7620_gsw {
238 struct device *dev;
239 void __iomem *base;
240 u32 piac_offset;
241 int irq;
242 int port4;
243 unsigned long int autopoll;
244
245 struct regmap *ethsys;
246 struct regmap *pctl;
247
248 struct clk *clk_gsw;
249 struct clk *clk_gp1;
250 struct clk *clk_gp2;
251 struct clk *clk_trgpll;
252};
253
254/* switch register I/O wrappers */
255void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
256u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
257
258/* the callback used by the driver core to bringup the switch */
259int mtk_gsw_init(struct mtk_eth *eth);
260
261/* MDIO access wrappers */
262int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
263int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
264void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
265int mt7620_has_carrier(struct mtk_eth *eth);
266void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
267 int speed, int duplex);
268void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
269u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
270void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
271
272u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
273 u32 phy_register, u32 write_data);
274u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
275void mt7620_handle_carrier(struct mtk_eth *eth);
276
277#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644
index 53767b17bad9..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7621.c
+++ /dev/null
@@ -1,297 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/platform_device.h>
19#include <linux/of_device.h>
20#include <linux/of_irq.h>
21
22#include <ralink_regs.h>
23
24#include "mtk_eth_soc.h"
25#include "gsw_mt7620.h"
26
27void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
28{
29 iowrite32(val, gsw->base + reg);
30}
31EXPORT_SYMBOL_GPL(mtk_switch_w32);
32
33u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
34{
35 return ioread32(gsw->base + reg);
36}
37EXPORT_SYMBOL_GPL(mtk_switch_r32);
38
39static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
40{
41 struct mtk_eth *eth = (struct mtk_eth *)_eth;
42 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
43 u32 reg, i;
44
45 reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
46
47 for (i = 0; i < 5; i++) {
48 unsigned int link;
49
50 if ((reg & BIT(i)) == 0)
51 continue;
52
53 link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
54
55 if (link == eth->link[i])
56 continue;
57
58 eth->link[i] = link;
59 if (link)
60 netdev_info(*eth->netdev,
61 "port %d link up\n", i);
62 else
63 netdev_info(*eth->netdev,
64 "port %d link down\n", i);
65 }
66
67 mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
68
69 return IRQ_HANDLED;
70}
71
72static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
73 struct device_node *np)
74{
75 u32 i;
76 u32 val;
77
78 /* hardware reset the switch */
79 mtk_reset(eth, RST_CTRL_MCM);
80 mdelay(10);
81
82 /* reduce RGMII2 PAD driving strength */
83 rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
84
85 /* gpio mux - RGMII1=Normal mode */
86 rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
87
88 /* set GMAC1 RGMII mode */
89 rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
90
91 /* enable MDIO to control MT7530 */
92 rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
93
94 /* turn off all PHYs */
95 for (i = 0; i <= 4; i++) {
96 val = _mt7620_mii_read(gsw, i, 0x0);
97 val |= BIT(11);
98 _mt7620_mii_write(gsw, i, 0x0, val);
99 }
100
101 /* reset the switch */
102 mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
103 SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
104 usleep_range(10, 20);
105
106 if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
107 /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
108 mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
109 mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
110 } else {
111 /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
112 mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
113 mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
114 }
115
116 /* GE2, Link down */
117 mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
118
119 /* Enable Port 6, P5 as GMAC5, P5 disable */
120 val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
121 /* Enable Port 6 */
122 val &= ~MHWTRAP_P6_DIS;
123 /* Disable Port 5 */
124 val |= MHWTRAP_P5_DIS;
125 /* manual override of HW-Trap */
126 val |= MHWTRAP_MANUAL;
127 mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
128
129 val = rt_sysc_r32(SYSC_REG_CFG);
130 val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
131 if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
132 /* 40Mhz */
133
134 /* disable MT7530 core clock */
135 _mt7620_mii_write(gsw, 0, 13, 0x1f);
136 _mt7620_mii_write(gsw, 0, 14, 0x410);
137 _mt7620_mii_write(gsw, 0, 13, 0x401f);
138 _mt7620_mii_write(gsw, 0, 14, 0x0);
139
140 /* disable MT7530 PLL */
141 _mt7620_mii_write(gsw, 0, 13, 0x1f);
142 _mt7620_mii_write(gsw, 0, 14, 0x40d);
143 _mt7620_mii_write(gsw, 0, 13, 0x401f);
144 _mt7620_mii_write(gsw, 0, 14, 0x2020);
145
146 /* for MT7530 core clock = 500Mhz */
147 _mt7620_mii_write(gsw, 0, 13, 0x1f);
148 _mt7620_mii_write(gsw, 0, 14, 0x40e);
149 _mt7620_mii_write(gsw, 0, 13, 0x401f);
150 _mt7620_mii_write(gsw, 0, 14, 0x119);
151
152 /* enable MT7530 PLL */
153 _mt7620_mii_write(gsw, 0, 13, 0x1f);
154 _mt7620_mii_write(gsw, 0, 14, 0x40d);
155 _mt7620_mii_write(gsw, 0, 13, 0x401f);
156 _mt7620_mii_write(gsw, 0, 14, 0x2820);
157
158 usleep_range(20, 40);
159
160 /* enable MT7530 core clock */
161 _mt7620_mii_write(gsw, 0, 13, 0x1f);
162 _mt7620_mii_write(gsw, 0, 14, 0x410);
163 _mt7620_mii_write(gsw, 0, 13, 0x401f);
164 }
165
166 /* RGMII */
167 _mt7620_mii_write(gsw, 0, 14, 0x1);
168
169 /* set MT7530 central align */
170 mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
171 mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
172 MT7530_TRGMII_TXCTRL);
173 mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
174
175 /* delay setting for 10/1000M */
176 mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
177 P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
178 mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
179
180 /* lower Tx Driving*/
181 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
182 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
183 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
184 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
185 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
186 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
187
188 /* turn on all PHYs */
189 for (i = 0; i <= 4; i++) {
190 val = _mt7620_mii_read(gsw, i, 0);
191 val &= ~BIT(11);
192 _mt7620_mii_write(gsw, i, 0, val);
193 }
194
195#define MT7530_NUM_PORTS 8
196#define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8))
197#define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8))
198#define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8))
199#define MT7530_CPU_PORT 6
200
201 /* This is copied from mt7530_apply_config in libreCMC driver */
202 {
203 int i;
204
205 for (i = 0; i < MT7530_NUM_PORTS; i++)
206 mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
207
208 mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
209 0x00ff0000);
210
211 for (i = 0; i < MT7530_NUM_PORTS; i++)
212 mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
213 }
214
215 /* enable irq */
216 mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
217 mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
218}
219
220static const struct of_device_id mediatek_gsw_match[] = {
221 { .compatible = "mediatek,mt7621-gsw" },
222 {},
223};
224MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
225
226int mtk_gsw_init(struct mtk_eth *eth)
227{
228 struct device_node *np = eth->switch_np;
229 struct platform_device *pdev = of_find_device_by_node(np);
230 struct mt7620_gsw *gsw;
231
232 if (!pdev)
233 return -ENODEV;
234
235 if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
236 return -EINVAL;
237
238 gsw = platform_get_drvdata(pdev);
239 eth->sw_priv = gsw;
240
241 if (!gsw->irq)
242 return -EINVAL;
243
244 request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
245 "gsw", eth);
246 disable_irq(gsw->irq);
247
248 mt7621_hw_init(eth, gsw, np);
249
250 enable_irq(gsw->irq);
251
252 return 0;
253}
254EXPORT_SYMBOL_GPL(mtk_gsw_init);
255
256static int mt7621_gsw_probe(struct platform_device *pdev)
257{
258 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
259 struct mt7620_gsw *gsw;
260
261 gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
262 if (!gsw)
263 return -ENOMEM;
264
265 gsw->base = devm_ioremap_resource(&pdev->dev, res);
266 if (IS_ERR(gsw->base))
267 return PTR_ERR(gsw->base);
268
269 gsw->dev = &pdev->dev;
270 gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
271
272 platform_set_drvdata(pdev, gsw);
273
274 return 0;
275}
276
277static int mt7621_gsw_remove(struct platform_device *pdev)
278{
279 platform_set_drvdata(pdev, NULL);
280
281 return 0;
282}
283
284static struct platform_driver gsw_driver = {
285 .probe = mt7621_gsw_probe,
286 .remove = mt7621_gsw_remove,
287 .driver = {
288 .name = "mt7621-gsw",
289 .of_match_table = mediatek_gsw_match,
290 },
291};
292
293module_platform_driver(gsw_driver);
294
295MODULE_LICENSE("GPL");
296MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
297MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644
index 5fea6a447eed..000000000000
--- a/drivers/staging/mt7621-eth/mdio.c
+++ /dev/null
@@ -1,275 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
6 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
7 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/phy.h>
13#include <linux/of_net.h>
14#include <linux/of_mdio.h>
15
16#include "mtk_eth_soc.h"
17#include "mdio.h"
18
19static int mtk_mdio_reset(struct mii_bus *bus)
20{
21 /* TODO */
22 return 0;
23}
24
25static void mtk_phy_link_adjust(struct net_device *dev)
26{
27 struct mtk_eth *eth = netdev_priv(dev);
28 unsigned long flags;
29 int i;
30
31 spin_lock_irqsave(&eth->phy->lock, flags);
32 for (i = 0; i < 8; i++) {
33 if (eth->phy->phy_node[i]) {
34 struct phy_device *phydev = eth->phy->phy[i];
35 int status_change = 0;
36
37 if (phydev->link)
38 if (eth->phy->duplex[i] != phydev->duplex ||
39 eth->phy->speed[i] != phydev->speed)
40 status_change = 1;
41
42 if (phydev->link != eth->link[i])
43 status_change = 1;
44
45 switch (phydev->speed) {
46 case SPEED_1000:
47 case SPEED_100:
48 case SPEED_10:
49 eth->link[i] = phydev->link;
50 eth->phy->duplex[i] = phydev->duplex;
51 eth->phy->speed[i] = phydev->speed;
52
53 if (status_change &&
54 eth->soc->mdio_adjust_link)
55 eth->soc->mdio_adjust_link(eth, i);
56 break;
57 }
58 }
59 }
60 spin_unlock_irqrestore(&eth->phy->lock, flags);
61}
62
63int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
64 struct device_node *phy_node)
65{
66 const __be32 *_port = NULL;
67 struct phy_device *phydev;
68 int phy_mode, port;
69
70 _port = of_get_property(phy_node, "reg", NULL);
71
72 if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
73 pr_err("%pOFn: invalid port id\n", phy_node);
74 return -EINVAL;
75 }
76 port = be32_to_cpu(*_port);
77 phy_mode = of_get_phy_mode(phy_node);
78 if (phy_mode < 0) {
79 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
80 eth->phy->phy_node[port] = NULL;
81 return -EINVAL;
82 }
83
84 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
85 mtk_phy_link_adjust, 0, phy_mode);
86 if (!phydev) {
87 dev_err(eth->dev, "could not connect to PHY\n");
88 eth->phy->phy_node[port] = NULL;
89 return -ENODEV;
90 }
91
92 phydev->supported &= PHY_1000BT_FEATURES;
93 phydev->advertising = phydev->supported;
94
95 dev_info(eth->dev,
96 "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
97 port, phydev_name(phydev), phydev->phy_id,
98 phydev->drv->name);
99
100 eth->phy->phy[port] = phydev;
101 eth->link[port] = 0;
102
103 return 0;
104}
105
106static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
107 struct phy_device *phy)
108{
109 phy_attach(eth->netdev[mac->id], phydev_name(phy),
110 PHY_INTERFACE_MODE_MII);
111
112 phy->autoneg = AUTONEG_ENABLE;
113 phy->speed = 0;
114 phy->duplex = 0;
115 phy_set_max_speed(phy, SPEED_100);
116 phy->advertising = phy->supported | ADVERTISED_Autoneg;
117
118 phy_start_aneg(phy);
119}
120
121static int mtk_phy_connect(struct mtk_mac *mac)
122{
123 struct mtk_eth *eth = mac->hw;
124 int i;
125
126 for (i = 0; i < 8; i++) {
127 if (eth->phy->phy_node[i]) {
128 if (!mac->phy_dev) {
129 mac->phy_dev = eth->phy->phy[i];
130 mac->phy_flags = MTK_PHY_FLAG_PORT;
131 }
132 } else if (eth->mii_bus) {
133 struct phy_device *phy;
134
135 phy = mdiobus_get_phy(eth->mii_bus, i);
136 if (phy) {
137 phy_init(eth, mac, phy);
138 if (!mac->phy_dev) {
139 mac->phy_dev = phy;
140 mac->phy_flags = MTK_PHY_FLAG_ATTACH;
141 }
142 }
143 }
144 }
145
146 return 0;
147}
148
149static void mtk_phy_disconnect(struct mtk_mac *mac)
150{
151 struct mtk_eth *eth = mac->hw;
152 unsigned long flags;
153 int i;
154
155 for (i = 0; i < 8; i++)
156 if (eth->phy->phy_fixed[i]) {
157 spin_lock_irqsave(&eth->phy->lock, flags);
158 eth->link[i] = 0;
159 if (eth->soc->mdio_adjust_link)
160 eth->soc->mdio_adjust_link(eth, i);
161 spin_unlock_irqrestore(&eth->phy->lock, flags);
162 } else if (eth->phy->phy[i]) {
163 phy_disconnect(eth->phy->phy[i]);
164 } else if (eth->mii_bus) {
165 struct phy_device *phy =
166 mdiobus_get_phy(eth->mii_bus, i);
167
168 if (phy)
169 phy_detach(phy);
170 }
171}
172
173static void mtk_phy_start(struct mtk_mac *mac)
174{
175 struct mtk_eth *eth = mac->hw;
176 unsigned long flags;
177 int i;
178
179 for (i = 0; i < 8; i++) {
180 if (eth->phy->phy_fixed[i]) {
181 spin_lock_irqsave(&eth->phy->lock, flags);
182 eth->link[i] = 1;
183 if (eth->soc->mdio_adjust_link)
184 eth->soc->mdio_adjust_link(eth, i);
185 spin_unlock_irqrestore(&eth->phy->lock, flags);
186 } else if (eth->phy->phy[i]) {
187 phy_start(eth->phy->phy[i]);
188 }
189 }
190}
191
192static void mtk_phy_stop(struct mtk_mac *mac)
193{
194 struct mtk_eth *eth = mac->hw;
195 unsigned long flags;
196 int i;
197
198 for (i = 0; i < 8; i++)
199 if (eth->phy->phy_fixed[i]) {
200 spin_lock_irqsave(&eth->phy->lock, flags);
201 eth->link[i] = 0;
202 if (eth->soc->mdio_adjust_link)
203 eth->soc->mdio_adjust_link(eth, i);
204 spin_unlock_irqrestore(&eth->phy->lock, flags);
205 } else if (eth->phy->phy[i]) {
206 phy_stop(eth->phy->phy[i]);
207 }
208}
209
210static struct mtk_phy phy_ralink = {
211 .connect = mtk_phy_connect,
212 .disconnect = mtk_phy_disconnect,
213 .start = mtk_phy_start,
214 .stop = mtk_phy_stop,
215};
216
217int mtk_mdio_init(struct mtk_eth *eth)
218{
219 struct device_node *mii_np;
220 int err;
221
222 if (!eth->soc->mdio_read || !eth->soc->mdio_write)
223 return 0;
224
225 spin_lock_init(&phy_ralink.lock);
226 eth->phy = &phy_ralink;
227
228 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
229 if (!mii_np) {
230 dev_err(eth->dev, "no %s child node found", "mdio-bus");
231 return -ENODEV;
232 }
233
234 if (!of_device_is_available(mii_np)) {
235 err = 0;
236 goto err_put_node;
237 }
238
239 eth->mii_bus = mdiobus_alloc();
240 if (!eth->mii_bus) {
241 err = -ENOMEM;
242 goto err_put_node;
243 }
244
245 eth->mii_bus->name = "mdio";
246 eth->mii_bus->read = eth->soc->mdio_read;
247 eth->mii_bus->write = eth->soc->mdio_write;
248 eth->mii_bus->reset = mtk_mdio_reset;
249 eth->mii_bus->priv = eth;
250 eth->mii_bus->parent = eth->dev;
251
252 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
253 err = of_mdiobus_register(eth->mii_bus, mii_np);
254 if (err)
255 goto err_free_bus;
256
257 return 0;
258
259err_free_bus:
260 kfree(eth->mii_bus);
261err_put_node:
262 of_node_put(mii_np);
263 eth->mii_bus = NULL;
264 return err;
265}
266
267void mtk_mdio_cleanup(struct mtk_eth *eth)
268{
269 if (!eth->mii_bus)
270 return;
271
272 mdiobus_unregister(eth->mii_bus);
273 of_node_put(eth->mii_bus->dev.of_node);
274 kfree(eth->mii_bus);
275}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644
index b14e23842a01..000000000000
--- a/drivers/staging/mt7621-eth/mdio.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef _RALINK_MDIO_H__
16#define _RALINK_MDIO_H__
17
18#ifdef CONFIG_NET_MEDIATEK_MDIO
19int mtk_mdio_init(struct mtk_eth *eth);
20void mtk_mdio_cleanup(struct mtk_eth *eth);
21int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
22 struct device_node *phy_node);
23#else
24static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
25static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
26#endif
27#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644
index ced605c2914e..000000000000
--- a/drivers/staging/mt7621-eth/mdio_mt7620.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18
19#include "mtk_eth_soc.h"
20#include "gsw_mt7620.h"
21#include "mdio.h"
22
23static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
24{
25 unsigned long t_start = jiffies;
26
27 while (1) {
28 if (!(mtk_switch_r32(gsw,
29 gsw->piac_offset + MT7620_GSW_REG_PIAC) &
30 GSW_MDIO_ACCESS))
31 return 0;
32 if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
33 break;
34 }
35
36 dev_err(gsw->dev, "mdio: MDIO timeout\n");
37 return -1;
38}
39
40u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
41 u32 phy_register, u32 write_data)
42{
43 if (mt7620_mii_busy_wait(gsw))
44 return -1;
45
46 write_data &= 0xffff;
47
48 mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
49 (phy_register << GSW_MDIO_REG_SHIFT) |
50 (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
51 MT7620_GSW_REG_PIAC);
52
53 if (mt7620_mii_busy_wait(gsw))
54 return -1;
55
56 return 0;
57}
58EXPORT_SYMBOL_GPL(_mt7620_mii_write);
59
60u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
61{
62 u32 d;
63
64 if (mt7620_mii_busy_wait(gsw))
65 return 0xffff;
66
67 mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
68 (phy_reg << GSW_MDIO_REG_SHIFT) |
69 (phy_addr << GSW_MDIO_ADDR_SHIFT),
70 MT7620_GSW_REG_PIAC);
71
72 if (mt7620_mii_busy_wait(gsw))
73 return 0xffff;
74
75 d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
76
77 return d;
78}
79EXPORT_SYMBOL_GPL(_mt7620_mii_read);
80
81int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
82{
83 struct mtk_eth *eth = bus->priv;
84 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
85
86 return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
87}
88
89int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
90{
91 struct mtk_eth *eth = bus->priv;
92 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
93
94 return _mt7620_mii_read(gsw, phy_addr, phy_reg);
95}
96
97void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
98{
99 _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
100 _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
101 _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
102}
103EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
104
105u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
106{
107 u16 high, low;
108
109 _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
110 low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
111 high = _mt7620_mii_read(gsw, 0x1f, 0x10);
112
113 return (high << 16) | (low & 0xffff);
114}
115EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
116
117void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
118{
119 u32 val = mt7530_mdio_r32(gsw, reg);
120
121 val &= ~mask;
122 val |= set;
123 mt7530_mdio_w32(gsw, reg, val);
124}
125EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
126
127static unsigned char *mtk_speed_str(int speed)
128{
129 switch (speed) {
130 case 2:
131 case SPEED_1000:
132 return "1000";
133 case 1:
134 case SPEED_100:
135 return "100";
136 case 0:
137 case SPEED_10:
138 return "10";
139 }
140
141 return "? ";
142}
143
144int mt7620_has_carrier(struct mtk_eth *eth)
145{
146 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
147 int i;
148
149 for (i = 0; i < GSW_PORT6; i++)
150 if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
151 return 1;
152 return 0;
153}
154
155void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
156 int speed, int duplex)
157{
158 struct mt7620_gsw *gsw = eth->sw_priv;
159
160 if (link)
161 dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
162 port, mtk_speed_str(speed),
163 (duplex) ? "Full" : "Half");
164 else
165 dev_info(gsw->dev, "port %d link down\n", port);
166}
167
168void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
169{
170 mt7620_print_link_state(eth, port, eth->link[port],
171 eth->phy->speed[port],
172 (eth->phy->duplex[port] == DUPLEX_FULL));
173}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644
index 02a8584b3d1d..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ /dev/null
@@ -1,2178 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/dma-mapping.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/ethtool.h>
23#include <linux/platform_device.h>
24#include <linux/of_device.h>
25#include <linux/mfd/syscon.h>
26#include <linux/clk.h>
27#include <linux/of_net.h>
28#include <linux/of_mdio.h>
29#include <linux/if_vlan.h>
30#include <linux/reset.h>
31#include <linux/tcp.h>
32#include <linux/io.h>
33#include <linux/bug.h>
34#include <linux/regmap.h>
35
36#include "mtk_eth_soc.h"
37#include "mdio.h"
38#include "ethtool.h"
39
40#define MAX_RX_LENGTH 1536
41#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
42#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
43#define DMA_DUMMY_DESC 0xffffffff
44#define MTK_DEFAULT_MSG_ENABLE \
45 (NETIF_MSG_DRV | \
46 NETIF_MSG_PROBE | \
47 NETIF_MSG_LINK | \
48 NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | \
50 NETIF_MSG_IFUP | \
51 NETIF_MSG_RX_ERR | \
52 NETIF_MSG_TX_ERR)
53
54#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
55#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
56#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
57
58#define SYSC_REG_RSTCTRL 0x34
59
60static int mtk_msg_level = -1;
61module_param_named(msg_level, mtk_msg_level, int, 0);
62MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
63
64static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
65 [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
66 [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
67 [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
68 [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
69 [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
70 [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
71 [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
72 [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
73 [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
74 [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
75 [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
76 [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
77 [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
78 [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
79 [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
80 [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
81};
82
83static const u16 *mtk_reg_table = mtk_reg_table_default;
84
85void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
86{
87 __raw_writel(val, eth->base + reg);
88}
89
90u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
91{
92 return __raw_readl(eth->base + reg);
93}
94
95static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
96{
97 mtk_w32(eth, val, mtk_reg_table[reg]);
98}
99
100static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
101{
102 return mtk_r32(eth, mtk_reg_table[reg]);
103}
104
105/* these bits are also exposed via the reset-controller API. however the switch
106 * and FE need to be brought out of reset in the exakt same moemtn and the
107 * reset-controller api does not provide this feature yet. Do the reset manually
108 * until we fixed the reset-controller api to be able to do this
109 */
110void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
111{
112 u32 val;
113
114 regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
115 val |= reset_bits;
116 regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
117 usleep_range(10, 20);
118 val &= ~reset_bits;
119 regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
120 usleep_range(10, 20);
121}
122EXPORT_SYMBOL(mtk_reset);
123
124static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
125{
126 if (eth->soc->dma_type & MTK_PDMA)
127 mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
128 if (eth->soc->dma_type & MTK_QDMA)
129 mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
130}
131
132static inline u32 mtk_irq_pending(struct mtk_eth *eth)
133{
134 u32 status = 0;
135
136 if (eth->soc->dma_type & MTK_PDMA)
137 status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
138 if (eth->soc->dma_type & MTK_QDMA)
139 status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
140
141 return status;
142}
143
144static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
145{
146 u32 status_reg = MTK_REG_MTK_INT_STATUS;
147
148 if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
149 status_reg = MTK_REG_MTK_INT_STATUS2;
150
151 mtk_reg_w32(eth, mask, status_reg);
152}
153
154static u32 mtk_irq_pending_status(struct mtk_eth *eth)
155{
156 u32 status_reg = MTK_REG_MTK_INT_STATUS;
157
158 if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
159 status_reg = MTK_REG_MTK_INT_STATUS2;
160
161 return mtk_reg_r32(eth, status_reg);
162}
163
164static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
165{
166 u32 val;
167
168 if (eth->soc->dma_type & MTK_PDMA) {
169 val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
170 mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
171 /* flush write */
172 mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
173 }
174 if (eth->soc->dma_type & MTK_QDMA) {
175 val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
176 mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
177 /* flush write */
178 mtk_r32(eth, MTK_QMTK_INT_ENABLE);
179 }
180}
181
182static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
183{
184 u32 val;
185
186 if (eth->soc->dma_type & MTK_PDMA) {
187 val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
188 mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
189 /* flush write */
190 mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
191 }
192 if (eth->soc->dma_type & MTK_QDMA) {
193 val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
194 mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
195 /* flush write */
196 mtk_r32(eth, MTK_QMTK_INT_ENABLE);
197 }
198}
199
200static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
201{
202 u32 enabled = 0;
203
204 if (eth->soc->dma_type & MTK_PDMA)
205 enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
206 if (eth->soc->dma_type & MTK_QDMA)
207 enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
208
209 return enabled;
210}
211
212static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
213 unsigned char *macaddr)
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&mac->hw->page_lock, flags);
218 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
219 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
220 (macaddr[4] << 8) | macaddr[5],
221 MTK_GDMA1_MAC_ADRL);
222 spin_unlock_irqrestore(&mac->hw->page_lock, flags);
223}
224
225static int mtk_set_mac_address(struct net_device *dev, void *p)
226{
227 int ret = eth_mac_addr(dev, p);
228 struct mtk_mac *mac = netdev_priv(dev);
229 struct mtk_eth *eth = mac->hw;
230
231 if (ret)
232 return ret;
233
234 if (eth->soc->set_mac)
235 eth->soc->set_mac(mac, dev->dev_addr);
236 else
237 mtk_hw_set_macaddr(mac, p);
238
239 return 0;
240}
241
242static inline int mtk_max_frag_size(int mtu)
243{
244 /* make sure buf_size will be at least MAX_RX_LENGTH */
245 if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
246 mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
247
248 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
250}
251
252static inline int mtk_max_buf_size(int frag_size)
253{
254 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
255 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
256
257 WARN_ON(buf_size < MAX_RX_LENGTH);
258
259 return buf_size;
260}
261
262static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
263 struct mtk_rx_dma *dma_rxd)
264{
265 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
266 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
267 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
268 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
269}
270
271static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
272 struct mtk_tx_dma *dma_txd)
273{
274 WRITE_ONCE(dma_txd->txd1, txd->txd1);
275 WRITE_ONCE(dma_txd->txd3, txd->txd3);
276 WRITE_ONCE(dma_txd->txd4, txd->txd4);
277 /* clean dma done flag last */
278 WRITE_ONCE(dma_txd->txd2, txd->txd2);
279}
280
281static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
282{
283 int i;
284
285 if (ring->rx_data && ring->rx_dma) {
286 for (i = 0; i < ring->rx_ring_size; i++) {
287 if (!ring->rx_data[i])
288 continue;
289 if (!ring->rx_dma[i].rxd1)
290 continue;
291 dma_unmap_single(eth->dev,
292 ring->rx_dma[i].rxd1,
293 ring->rx_buf_size,
294 DMA_FROM_DEVICE);
295 skb_free_frag(ring->rx_data[i]);
296 }
297 kfree(ring->rx_data);
298 ring->rx_data = NULL;
299 }
300
301 if (ring->rx_dma) {
302 dma_free_coherent(eth->dev,
303 ring->rx_ring_size * sizeof(*ring->rx_dma),
304 ring->rx_dma,
305 ring->rx_phys);
306 ring->rx_dma = NULL;
307 }
308}
309
310static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
311{
312 int i, pad = 0;
313
314 ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
315 ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
316 ring->rx_ring_size = eth->soc->dma_ring_size;
317 ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
318 GFP_KERNEL);
319 if (!ring->rx_data)
320 goto no_rx_mem;
321
322 for (i = 0; i < ring->rx_ring_size; i++) {
323 ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
324 if (!ring->rx_data[i])
325 goto no_rx_mem;
326 }
327
328 ring->rx_dma =
329 dma_alloc_coherent(eth->dev,
330 ring->rx_ring_size * sizeof(*ring->rx_dma),
331 &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
332 if (!ring->rx_dma)
333 goto no_rx_mem;
334
335 if (!eth->soc->rx_2b_offset)
336 pad = NET_IP_ALIGN;
337
338 for (i = 0; i < ring->rx_ring_size; i++) {
339 dma_addr_t dma_addr = dma_map_single(eth->dev,
340 ring->rx_data[i] + NET_SKB_PAD + pad,
341 ring->rx_buf_size,
342 DMA_FROM_DEVICE);
343 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
344 goto no_rx_mem;
345 ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
346
347 if (eth->soc->rx_sg_dma)
348 ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
349 else
350 ring->rx_dma[i].rxd2 = RX_DMA_LSO;
351 }
352 ring->rx_calc_idx = ring->rx_ring_size - 1;
353 /* make sure that all changes to the dma ring are flushed before we
354 * continue
355 */
356 wmb();
357
358 return 0;
359
360no_rx_mem:
361 return -ENOMEM;
362}
363
364static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
365{
366 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
367 dma_unmap_single(dev,
368 dma_unmap_addr(tx_buf, dma_addr0),
369 dma_unmap_len(tx_buf, dma_len0),
370 DMA_TO_DEVICE);
371 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
372 dma_unmap_page(dev,
373 dma_unmap_addr(tx_buf, dma_addr0),
374 dma_unmap_len(tx_buf, dma_len0),
375 DMA_TO_DEVICE);
376 }
377 if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
378 dma_unmap_page(dev,
379 dma_unmap_addr(tx_buf, dma_addr1),
380 dma_unmap_len(tx_buf, dma_len1),
381 DMA_TO_DEVICE);
382
383 tx_buf->flags = 0;
384 if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
385 dev_kfree_skb_any(tx_buf->skb);
386 tx_buf->skb = NULL;
387}
388
389static void mtk_pdma_tx_clean(struct mtk_eth *eth)
390{
391 struct mtk_tx_ring *ring = &eth->tx_ring;
392 int i;
393
394 if (ring->tx_buf) {
395 for (i = 0; i < ring->tx_ring_size; i++)
396 mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
397 kfree(ring->tx_buf);
398 ring->tx_buf = NULL;
399 }
400
401 if (ring->tx_dma) {
402 dma_free_coherent(eth->dev,
403 ring->tx_ring_size * sizeof(*ring->tx_dma),
404 ring->tx_dma,
405 ring->tx_phys);
406 ring->tx_dma = NULL;
407 }
408}
409
410static void mtk_qdma_tx_clean(struct mtk_eth *eth)
411{
412 struct mtk_tx_ring *ring = &eth->tx_ring;
413 int i;
414
415 if (ring->tx_buf) {
416 for (i = 0; i < ring->tx_ring_size; i++)
417 mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
418 kfree(ring->tx_buf);
419 ring->tx_buf = NULL;
420 }
421
422 if (ring->tx_dma) {
423 dma_free_coherent(eth->dev,
424 ring->tx_ring_size * sizeof(*ring->tx_dma),
425 ring->tx_dma,
426 ring->tx_phys);
427 ring->tx_dma = NULL;
428 }
429}
430
431void mtk_stats_update_mac(struct mtk_mac *mac)
432{
433 struct mtk_hw_stats *hw_stats = mac->hw_stats;
434 unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
435 u64 stats;
436
437 base += hw_stats->reg_offset;
438
439 u64_stats_update_begin(&hw_stats->syncp);
440
441 if (mac->hw->soc->new_stats) {
442 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
443 stats = mtk_r32(mac->hw, base + 0x04);
444 if (stats)
445 hw_stats->rx_bytes += (stats << 32);
446 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
447 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
448 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
449 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
450 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
451 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
452 hw_stats->rx_flow_control_packets +=
453 mtk_r32(mac->hw, base + 0x24);
454 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
455 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
456 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
457 stats = mtk_r32(mac->hw, base + 0x34);
458 if (stats)
459 hw_stats->tx_bytes += (stats << 32);
460 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
461 } else {
462 hw_stats->tx_bytes += mtk_r32(mac->hw, base);
463 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
464 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
465 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
466 hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
467 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
468 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
469 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
470 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
471 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
472 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
473 hw_stats->rx_flow_control_packets +=
474 mtk_r32(mac->hw, base + 0x3c);
475 }
476
477 u64_stats_update_end(&hw_stats->syncp);
478}
479
480static void mtk_get_stats64(struct net_device *dev,
481 struct rtnl_link_stats64 *storage)
482{
483 struct mtk_mac *mac = netdev_priv(dev);
484 struct mtk_hw_stats *hw_stats = mac->hw_stats;
485 unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
486 unsigned int start;
487
488 if (!base) {
489 netdev_stats_to_stats64(storage, &dev->stats);
490 return;
491 }
492
493 if (netif_running(dev) && netif_device_present(dev)) {
494 if (spin_trylock(&hw_stats->stats_lock)) {
495 mtk_stats_update_mac(mac);
496 spin_unlock(&hw_stats->stats_lock);
497 }
498 }
499
500 do {
501 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
502 storage->rx_packets = hw_stats->rx_packets;
503 storage->tx_packets = hw_stats->tx_packets;
504 storage->rx_bytes = hw_stats->rx_bytes;
505 storage->tx_bytes = hw_stats->tx_bytes;
506 storage->collisions = hw_stats->tx_collisions;
507 storage->rx_length_errors = hw_stats->rx_short_errors +
508 hw_stats->rx_long_errors;
509 storage->rx_over_errors = hw_stats->rx_overflow;
510 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
511 storage->rx_errors = hw_stats->rx_checksum_errors;
512 storage->tx_aborted_errors = hw_stats->tx_skip;
513 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
514
515 storage->tx_errors = dev->stats.tx_errors;
516 storage->rx_dropped = dev->stats.rx_dropped;
517 storage->tx_dropped = dev->stats.tx_dropped;
518}
519
520static int mtk_vlan_rx_add_vid(struct net_device *dev,
521 __be16 proto, u16 vid)
522{
523 struct mtk_mac *mac = netdev_priv(dev);
524 struct mtk_eth *eth = mac->hw;
525 u32 idx = (vid & 0xf);
526 u32 vlan_cfg;
527
528 if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
529 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
530 return 0;
531
532 if (test_bit(idx, &eth->vlan_map)) {
533 netdev_warn(dev, "disable tx vlan offload\n");
534 dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
535 netdev_update_features(dev);
536 } else {
537 vlan_cfg = mtk_r32(eth,
538 mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
539 ((idx >> 1) << 2));
540 if (idx & 0x1) {
541 vlan_cfg &= 0xffff;
542 vlan_cfg |= (vid << 16);
543 } else {
544 vlan_cfg &= 0xffff0000;
545 vlan_cfg |= vid;
546 }
547 mtk_w32(eth,
548 vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
549 ((idx >> 1) << 2));
550 set_bit(idx, &eth->vlan_map);
551 }
552
553 return 0;
554}
555
556static int mtk_vlan_rx_kill_vid(struct net_device *dev,
557 __be16 proto, u16 vid)
558{
559 struct mtk_mac *mac = netdev_priv(dev);
560 struct mtk_eth *eth = mac->hw;
561 u32 idx = (vid & 0xf);
562
563 if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
564 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
565 return 0;
566
567 clear_bit(idx, &eth->vlan_map);
568
569 return 0;
570}
571
572static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
573{
574 barrier();
575 return (u32)(ring->tx_ring_size -
576 ((ring->tx_next_idx - ring->tx_free_idx) &
577 (ring->tx_ring_size - 1)));
578}
579
580static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
581{
582 unsigned int len;
583 int ret;
584
585 if (unlikely(skb->len >= VLAN_ETH_ZLEN))
586 return 0;
587
588 if (eth->soc->padding_64b && !eth->soc->padding_bug)
589 return 0;
590
591 if (skb_vlan_tag_present(skb))
592 len = ETH_ZLEN;
593 else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
594 len = VLAN_ETH_ZLEN;
595 else if (!eth->soc->padding_64b)
596 len = ETH_ZLEN;
597 else
598 return 0;
599
600 if (skb->len >= len)
601 return 0;
602
603 ret = skb_pad(skb, len - skb->len);
604 if (ret < 0)
605 return ret;
606 skb->len = len;
607 skb_set_tail_pointer(skb, len);
608
609 return ret;
610}
611
612static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
613 int tx_num, struct mtk_tx_ring *ring, bool gso)
614{
615 struct mtk_mac *mac = netdev_priv(dev);
616 struct mtk_eth *eth = mac->hw;
617 struct skb_frag_struct *frag;
618 struct mtk_tx_dma txd, *ptxd;
619 struct mtk_tx_buf *tx_buf;
620 int i, j, k, frag_size, frag_map_size, offset;
621 dma_addr_t mapped_addr;
622 unsigned int nr_frags;
623 u32 def_txd4;
624
625 if (mtk_skb_padto(skb, eth)) {
626 netif_warn(eth, tx_err, dev, "tx padding failed!\n");
627 return -1;
628 }
629
630 tx_buf = &ring->tx_buf[ring->tx_next_idx];
631 memset(tx_buf, 0, sizeof(*tx_buf));
632 memset(&txd, 0, sizeof(txd));
633 nr_frags = skb_shinfo(skb)->nr_frags;
634
635 /* init tx descriptor */
636 def_txd4 = eth->soc->txd4;
637 txd.txd4 = def_txd4;
638
639 if (eth->soc->mac_count > 1)
640 txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
641
642 if (gso)
643 txd.txd4 |= TX_DMA_TSO;
644
645 /* TX Checksum offload */
646 if (skb->ip_summed == CHECKSUM_PARTIAL)
647 txd.txd4 |= TX_DMA_CHKSUM;
648
649 /* VLAN header offload */
650 if (skb_vlan_tag_present(skb)) {
651 u16 tag = skb_vlan_tag_get(skb);
652
653 txd.txd4 |= TX_DMA_INS_VLAN |
654 ((tag >> VLAN_PRIO_SHIFT) << 4) |
655 (tag & 0xF);
656 }
657
658 mapped_addr = dma_map_single(&dev->dev, skb->data,
659 skb_headlen(skb), DMA_TO_DEVICE);
660 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
661 return -1;
662
663 txd.txd1 = mapped_addr;
664 txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
665
666 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
667 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
668 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
669
670 /* TX SG offload */
671 j = ring->tx_next_idx;
672 k = 0;
673 for (i = 0; i < nr_frags; i++) {
674 offset = 0;
675 frag = &skb_shinfo(skb)->frags[i];
676 frag_size = skb_frag_size(frag);
677
678 while (frag_size > 0) {
679 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
680 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
681 frag_map_size,
682 DMA_TO_DEVICE);
683 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
684 goto err_dma;
685
686 if (k & 0x1) {
687 j = NEXT_TX_DESP_IDX(j);
688 txd.txd1 = mapped_addr;
689 txd.txd2 = TX_DMA_PLEN0(frag_map_size);
690 txd.txd4 = def_txd4;
691
692 tx_buf = &ring->tx_buf[j];
693 memset(tx_buf, 0, sizeof(*tx_buf));
694
695 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
696 dma_unmap_addr_set(tx_buf, dma_addr0,
697 mapped_addr);
698 dma_unmap_len_set(tx_buf, dma_len0,
699 frag_map_size);
700 } else {
701 txd.txd3 = mapped_addr;
702 txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
703
704 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
705 tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
706 dma_unmap_addr_set(tx_buf, dma_addr1,
707 mapped_addr);
708 dma_unmap_len_set(tx_buf, dma_len1,
709 frag_map_size);
710
711 if (!((i == (nr_frags - 1)) &&
712 (frag_map_size == frag_size))) {
713 mtk_set_txd_pdma(&txd,
714 &ring->tx_dma[j]);
715 memset(&txd, 0, sizeof(txd));
716 }
717 }
718 frag_size -= frag_map_size;
719 offset += frag_map_size;
720 k++;
721 }
722 }
723
724 /* set last segment */
725 if (k & 0x1)
726 txd.txd2 |= TX_DMA_LS1;
727 else
728 txd.txd2 |= TX_DMA_LS0;
729 mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
730
731 /* store skb to cleanup */
732 tx_buf->skb = skb;
733
734 netdev_sent_queue(dev, skb->len);
735 skb_tx_timestamp(skb);
736
737 ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
738 /* make sure that all changes to the dma ring are flushed before we
739 * continue
740 */
741 wmb();
742 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
743
744 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
745 !netdev_xmit_more())
746 mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
747
748 return 0;
749
750err_dma:
751 j = ring->tx_next_idx;
752 for (i = 0; i < tx_num; i++) {
753 ptxd = &ring->tx_dma[j];
754 tx_buf = &ring->tx_buf[j];
755
756 /* unmap dma */
757 mtk_txd_unmap(&dev->dev, tx_buf);
758
759 ptxd->txd2 = TX_DMA_DESP2_DEF;
760 j = NEXT_TX_DESP_IDX(j);
761 }
762 /* make sure that all changes to the dma ring are flushed before we
763 * continue
764 */
765 wmb();
766 return -1;
767}
768
769/* the qdma core needs scratch memory to be setup */
770static int mtk_init_fq_dma(struct mtk_eth *eth)
771{
772 dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
773 int cnt = eth->soc->dma_ring_size;
774 int i;
775
776 eth->scratch_ring = dma_alloc_coherent(eth->dev,
777 cnt * sizeof(struct mtk_tx_dma),
778 &phy_ring_head,
779 GFP_ATOMIC | __GFP_ZERO);
780 if (unlikely(!eth->scratch_ring))
781 return -ENOMEM;
782
783 eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
784 GFP_KERNEL);
785 dma_addr = dma_map_single(eth->dev,
786 eth->scratch_head, cnt * QDMA_PAGE_SIZE,
787 DMA_FROM_DEVICE);
788 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
789 return -ENOMEM;
790
791 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
792 phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
793
794 for (i = 0; i < cnt; i++) {
795 eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
796 if (i < cnt - 1)
797 eth->scratch_ring[i].txd2 = (phy_ring_head +
798 ((i + 1) * sizeof(struct mtk_tx_dma)));
799 eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
800 }
801
802 mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
803 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
804 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
805 mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
806
807 return 0;
808}
809
810static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
811{
812 void *ret = ring->tx_dma;
813
814 return ret + (desc - ring->tx_phys);
815}
816
817static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
818 struct mtk_tx_dma *txd)
819{
820 return mtk_qdma_phys_to_virt(ring, txd->txd2);
821}
822
823static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
824 struct mtk_tx_dma *txd)
825{
826 int idx = txd - ring->tx_dma;
827
828 return &ring->tx_buf[idx];
829}
830
831static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
832 int tx_num, struct mtk_tx_ring *ring, bool gso)
833{
834 struct mtk_mac *mac = netdev_priv(dev);
835 struct mtk_eth *eth = mac->hw;
836 struct mtk_tx_dma *itxd, *txd;
837 struct mtk_tx_buf *tx_buf;
838 dma_addr_t mapped_addr;
839 unsigned int nr_frags;
840 int i, n_desc = 1;
841 u32 txd4 = eth->soc->txd4;
842
843 itxd = ring->tx_next_free;
844 if (itxd == ring->tx_last_free)
845 return -ENOMEM;
846
847 if (eth->soc->mac_count > 1)
848 txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
849
850 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
851 memset(tx_buf, 0, sizeof(*tx_buf));
852
853 if (gso)
854 txd4 |= TX_DMA_TSO;
855
856 /* TX Checksum offload */
857 if (skb->ip_summed == CHECKSUM_PARTIAL)
858 txd4 |= TX_DMA_CHKSUM;
859
860 /* VLAN header offload */
861 if (skb_vlan_tag_present(skb))
862 txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
863
864 mapped_addr = dma_map_single(&dev->dev, skb->data,
865 skb_headlen(skb), DMA_TO_DEVICE);
866 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
867 return -ENOMEM;
868
869 WRITE_ONCE(itxd->txd1, mapped_addr);
870 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
871 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
872 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
873
874 /* TX SG offload */
875 txd = itxd;
876 nr_frags = skb_shinfo(skb)->nr_frags;
877 for (i = 0; i < nr_frags; i++) {
878 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
879 unsigned int offset = 0;
880 int frag_size = skb_frag_size(frag);
881
882 while (frag_size) {
883 bool last_frag = false;
884 unsigned int frag_map_size;
885
886 txd = mtk_tx_next_qdma(ring, txd);
887 if (txd == ring->tx_last_free)
888 goto err_dma;
889
890 n_desc++;
891 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
892 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
893 frag_map_size,
894 DMA_TO_DEVICE);
895 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
896 goto err_dma;
897
898 if (i == nr_frags - 1 &&
899 (frag_size - frag_map_size) == 0)
900 last_frag = true;
901
902 WRITE_ONCE(txd->txd1, mapped_addr);
903 WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
904 TX_DMA_PLEN0(frag_map_size) |
905 last_frag * TX_DMA_LS0) |
906 mac->id);
907 WRITE_ONCE(txd->txd4, 0);
908
909 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
910 tx_buf = mtk_desc_to_tx_buf(ring, txd);
911 memset(tx_buf, 0, sizeof(*tx_buf));
912
913 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
914 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
915 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
916 frag_size -= frag_map_size;
917 offset += frag_map_size;
918 }
919 }
920
921 /* store skb to cleanup */
922 tx_buf->skb = skb;
923
924 WRITE_ONCE(itxd->txd4, txd4);
925 WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
926 (!nr_frags * TX_DMA_LS0)));
927
928 netdev_sent_queue(dev, skb->len);
929 skb_tx_timestamp(skb);
930
931 ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
932 atomic_sub(n_desc, &ring->tx_free_count);
933
934 /* make sure that all changes to the dma ring are flushed before we
935 * continue
936 */
937 wmb();
938
939 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
940 !netdev_xmit_more())
941 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
942
943 return 0;
944
945err_dma:
946 do {
947 tx_buf = mtk_desc_to_tx_buf(ring, txd);
948
949 /* unmap dma */
950 mtk_txd_unmap(&dev->dev, tx_buf);
951
952 itxd->txd3 = TX_DMA_DESP2_DEF;
953 itxd = mtk_tx_next_qdma(ring, itxd);
954 } while (itxd != txd);
955
956 return -ENOMEM;
957}
958
959static inline int mtk_cal_txd_req(struct sk_buff *skb)
960{
961 int i, nfrags;
962 struct skb_frag_struct *frag;
963
964 nfrags = 1;
965 if (skb_is_gso(skb)) {
966 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
967 frag = &skb_shinfo(skb)->frags[i];
968 nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
969 }
970 } else {
971 nfrags += skb_shinfo(skb)->nr_frags;
972 }
973
974 return DIV_ROUND_UP(nfrags, 2);
975}
976
977static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
978{
979 struct mtk_mac *mac = netdev_priv(dev);
980 struct mtk_eth *eth = mac->hw;
981 struct mtk_tx_ring *ring = &eth->tx_ring;
982 struct net_device_stats *stats = &dev->stats;
983 int tx_num;
984 int len = skb->len;
985 bool gso = false;
986
987 tx_num = mtk_cal_txd_req(skb);
988 if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
989 netif_stop_queue(dev);
990 netif_err(eth, tx_queued, dev,
991 "Tx Ring full when queue awake!\n");
992 return NETDEV_TX_BUSY;
993 }
994
995 /* TSO: fill MSS info in tcp checksum field */
996 if (skb_is_gso(skb)) {
997 if (skb_cow_head(skb, 0)) {
998 netif_warn(eth, tx_err, dev,
999 "GSO expand head fail.\n");
1000 goto drop;
1001 }
1002
1003 if (skb_shinfo(skb)->gso_type &
1004 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1005 gso = true;
1006 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1007 }
1008 }
1009
1010 if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
1011 goto drop;
1012
1013 stats->tx_packets++;
1014 stats->tx_bytes += len;
1015
1016 if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
1017 netif_stop_queue(dev);
1018 smp_mb();
1019 if (unlikely(atomic_read(&ring->tx_free_count) >
1020 ring->tx_thresh))
1021 netif_wake_queue(dev);
1022 }
1023
1024 return NETDEV_TX_OK;
1025
1026drop:
1027 stats->tx_dropped++;
1028 dev_kfree_skb(skb);
1029 return NETDEV_TX_OK;
1030}
1031
1032static int mtk_poll_rx(struct napi_struct *napi, int budget,
1033 struct mtk_eth *eth, u32 rx_intr)
1034{
1035 struct mtk_soc_data *soc = eth->soc;
1036 struct mtk_rx_ring *ring = &eth->rx_ring[0];
1037 int idx = ring->rx_calc_idx;
1038 u32 checksum_bit;
1039 struct sk_buff *skb;
1040 u8 *data, *new_data;
1041 struct mtk_rx_dma *rxd, trxd;
1042 int done = 0, pad;
1043
1044 if (eth->soc->hw_features & NETIF_F_RXCSUM)
1045 checksum_bit = soc->checksum_bit;
1046 else
1047 checksum_bit = 0;
1048
1049 if (eth->soc->rx_2b_offset)
1050 pad = 0;
1051 else
1052 pad = NET_IP_ALIGN;
1053
1054 while (done < budget) {
1055 struct net_device *netdev;
1056 unsigned int pktlen;
1057 dma_addr_t dma_addr;
1058 int mac = 0;
1059
1060 idx = NEXT_RX_DESP_IDX(idx);
1061 rxd = &ring->rx_dma[idx];
1062 data = ring->rx_data[idx];
1063
1064 mtk_get_rxd(&trxd, rxd);
1065 if (!(trxd.rxd2 & RX_DMA_DONE))
1066 break;
1067
1068 /* find out which mac the packet come from. values start at 1 */
1069 if (eth->soc->mac_count > 1) {
1070 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1071 RX_DMA_FPORT_MASK;
1072 mac--;
1073 if (mac < 0 || mac >= eth->soc->mac_count)
1074 goto release_desc;
1075 }
1076
1077 netdev = eth->netdev[mac];
1078
1079 /* alloc new buffer */
1080 new_data = napi_alloc_frag(ring->frag_size);
1081 if (unlikely(!new_data || !netdev)) {
1082 netdev->stats.rx_dropped++;
1083 goto release_desc;
1084 }
1085 dma_addr = dma_map_single(&netdev->dev,
1086 new_data + NET_SKB_PAD + pad,
1087 ring->rx_buf_size,
1088 DMA_FROM_DEVICE);
1089 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
1090 skb_free_frag(new_data);
1091 goto release_desc;
1092 }
1093
1094 /* receive data */
1095 skb = build_skb(data, ring->frag_size);
1096 if (unlikely(!skb)) {
1097 put_page(virt_to_head_page(new_data));
1098 goto release_desc;
1099 }
1100 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1101
1102 dma_unmap_single(&netdev->dev, trxd.rxd1,
1103 ring->rx_buf_size, DMA_FROM_DEVICE);
1104 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1105 skb->dev = netdev;
1106 skb_put(skb, pktlen);
1107 if (trxd.rxd4 & checksum_bit)
1108 skb->ip_summed = CHECKSUM_UNNECESSARY;
1109 else
1110 skb_checksum_none_assert(skb);
1111 skb->protocol = eth_type_trans(skb, netdev);
1112
1113 netdev->stats.rx_packets++;
1114 netdev->stats.rx_bytes += pktlen;
1115
1116 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1117 RX_DMA_VID(trxd.rxd3))
1118 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1119 RX_DMA_VID(trxd.rxd3));
1120 napi_gro_receive(napi, skb);
1121
1122 ring->rx_data[idx] = new_data;
1123 rxd->rxd1 = (unsigned int)dma_addr;
1124
1125release_desc:
1126 if (eth->soc->rx_sg_dma)
1127 rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
1128 else
1129 rxd->rxd2 = RX_DMA_LSO;
1130
1131 ring->rx_calc_idx = idx;
1132 /* make sure that all changes to the dma ring are flushed before
1133 * we continue
1134 */
1135 wmb();
1136 if (eth->soc->dma_type == MTK_QDMA)
1137 mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
1138 else
1139 mtk_reg_w32(eth, ring->rx_calc_idx,
1140 MTK_REG_RX_CALC_IDX0);
1141 done++;
1142 }
1143
1144 if (done < budget)
1145 mtk_irq_ack(eth, rx_intr);
1146
1147 return done;
1148}
1149
1150static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
1151{
1152 struct sk_buff *skb;
1153 struct mtk_tx_buf *tx_buf;
1154 int done = 0;
1155 u32 idx, hwidx;
1156 struct mtk_tx_ring *ring = &eth->tx_ring;
1157 unsigned int bytes = 0;
1158
1159 idx = ring->tx_free_idx;
1160 hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
1161
1162 while ((idx != hwidx) && budget) {
1163 tx_buf = &ring->tx_buf[idx];
1164 skb = tx_buf->skb;
1165
1166 if (!skb)
1167 break;
1168
1169 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
1170 bytes += skb->len;
1171 done++;
1172 budget--;
1173 }
1174 mtk_txd_unmap(eth->dev, tx_buf);
1175 idx = NEXT_TX_DESP_IDX(idx);
1176 }
1177 ring->tx_free_idx = idx;
1178 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
1179
1180 /* read hw index again make sure no new tx packet */
1181 if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
1182 *tx_again = 1;
1183
1184 if (done)
1185 netdev_completed_queue(*eth->netdev, done, bytes);
1186
1187 return done;
1188}
1189
1190static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
1191{
1192 struct mtk_tx_ring *ring = &eth->tx_ring;
1193 struct mtk_tx_dma *desc;
1194 struct sk_buff *skb;
1195 struct mtk_tx_buf *tx_buf;
1196 int total = 0, done[MTK_MAX_DEVS];
1197 unsigned int bytes[MTK_MAX_DEVS];
1198 u32 cpu, dma;
1199 int i;
1200
1201 memset(done, 0, sizeof(done));
1202 memset(bytes, 0, sizeof(bytes));
1203
1204 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1205 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1206
1207 desc = mtk_qdma_phys_to_virt(ring, cpu);
1208
1209 while ((cpu != dma) && budget) {
1210 u32 next_cpu = desc->txd2;
1211 int mac;
1212
1213 desc = mtk_tx_next_qdma(ring, desc);
1214 if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
1215 break;
1216
1217 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
1218 TX_DMA_FPORT_MASK;
1219 mac--;
1220
1221 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1222 skb = tx_buf->skb;
1223 if (!skb)
1224 break;
1225
1226 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
1227 bytes[mac] += skb->len;
1228 done[mac]++;
1229 budget--;
1230 }
1231 mtk_txd_unmap(eth->dev, tx_buf);
1232
1233 ring->tx_last_free->txd2 = next_cpu;
1234 ring->tx_last_free = desc;
1235 atomic_inc(&ring->tx_free_count);
1236
1237 cpu = next_cpu;
1238 }
1239
1240 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1241
1242 /* read hw index again make sure no new tx packet */
1243 if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
1244 *tx_again = true;
1245
1246 for (i = 0; i < eth->soc->mac_count; i++) {
1247 if (!done[i])
1248 continue;
1249 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1250 total += done[i];
1251 }
1252
1253 return total;
1254}
1255
1256static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
1257 bool *tx_again)
1258{
1259 struct mtk_tx_ring *ring = &eth->tx_ring;
1260 struct net_device *netdev = eth->netdev[0];
1261 int done;
1262
1263 done = eth->tx_ring.tx_poll(eth, budget, tx_again);
1264 if (!*tx_again)
1265 mtk_irq_ack(eth, tx_intr);
1266
1267 if (!done)
1268 return 0;
1269
1270 smp_mb();
1271 if (unlikely(!netif_queue_stopped(netdev)))
1272 return done;
1273
1274 if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
1275 netif_wake_queue(netdev);
1276
1277 return done;
1278}
1279
1280static void mtk_stats_update(struct mtk_eth *eth)
1281{
1282 int i;
1283
1284 for (i = 0; i < eth->soc->mac_count; i++) {
1285 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1286 continue;
1287 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1288 mtk_stats_update_mac(eth->mac[i]);
1289 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1290 }
1291 }
1292}
1293
1294static int mtk_poll(struct napi_struct *napi, int budget)
1295{
1296 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1297 u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
1298 int tx_done, rx_done;
1299 bool tx_again = false;
1300
1301 status = mtk_irq_pending(eth);
1302 mtk_status = mtk_irq_pending_status(eth);
1303 tx_intr = eth->soc->tx_int;
1304 rx_intr = eth->soc->rx_int;
1305 status_intr = eth->soc->status_int;
1306 tx_done = 0;
1307 rx_done = 0;
1308 tx_again = 0;
1309
1310 if (status & tx_intr)
1311 tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
1312
1313 if (status & rx_intr)
1314 rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
1315
1316 if (unlikely(mtk_status & status_intr)) {
1317 mtk_stats_update(eth);
1318 mtk_irq_ack_status(eth, status_intr);
1319 }
1320
1321 if (unlikely(netif_msg_intr(eth))) {
1322 mask = mtk_irq_enabled(eth);
1323 netdev_info(eth->netdev[0],
1324 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
1325 tx_done, rx_done, status, mask);
1326 }
1327
1328 if (tx_again || rx_done == budget)
1329 return budget;
1330
1331 status = mtk_irq_pending(eth);
1332 if (status & (tx_intr | rx_intr))
1333 return budget;
1334
1335 napi_complete(napi);
1336 mtk_irq_enable(eth, tx_intr | rx_intr);
1337
1338 return rx_done;
1339}
1340
1341static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
1342{
1343 int i;
1344 struct mtk_tx_ring *ring = &eth->tx_ring;
1345
1346 ring->tx_ring_size = eth->soc->dma_ring_size;
1347 ring->tx_free_idx = 0;
1348 ring->tx_next_idx = 0;
1349 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
1350 MAX_SKB_FRAGS);
1351
1352 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
1353 GFP_KERNEL);
1354 if (!ring->tx_buf)
1355 goto no_tx_mem;
1356
1357 ring->tx_dma =
1358 dma_alloc_coherent(eth->dev,
1359 ring->tx_ring_size * sizeof(*ring->tx_dma),
1360 &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
1361 if (!ring->tx_dma)
1362 goto no_tx_mem;
1363
1364 for (i = 0; i < ring->tx_ring_size; i++) {
1365 ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
1366 ring->tx_dma[i].txd4 = eth->soc->txd4;
1367 }
1368
1369 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
1370 ring->tx_map = mtk_pdma_tx_map;
1371 ring->tx_poll = mtk_pdma_tx_poll;
1372 ring->tx_clean = mtk_pdma_tx_clean;
1373
1374 /* make sure that all changes to the dma ring are flushed before we
1375 * continue
1376 */
1377 wmb();
1378
1379 mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
1380 mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
1381 mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
1382 mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
1383
1384 return 0;
1385
1386no_tx_mem:
1387 return -ENOMEM;
1388}
1389
1390static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
1391{
1392 struct mtk_tx_ring *ring = &eth->tx_ring;
1393 int i, sz = sizeof(*ring->tx_dma);
1394
1395 ring->tx_ring_size = eth->soc->dma_ring_size;
1396 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
1397 GFP_KERNEL);
1398 if (!ring->tx_buf)
1399 goto no_tx_mem;
1400
1401 ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
1402 &ring->tx_phys,
1403 GFP_ATOMIC | __GFP_ZERO);
1404 if (!ring->tx_dma)
1405 goto no_tx_mem;
1406
1407 for (i = 0; i < ring->tx_ring_size; i++) {
1408 int next = (i + 1) % ring->tx_ring_size;
1409 u32 next_ptr = ring->tx_phys + next * sz;
1410
1411 ring->tx_dma[i].txd2 = next_ptr;
1412 ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
1413 }
1414
1415 atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
1416 ring->tx_next_free = &ring->tx_dma[0];
1417 ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
1418 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
1419 MAX_SKB_FRAGS);
1420
1421 ring->tx_map = mtk_qdma_tx_map;
1422 ring->tx_poll = mtk_qdma_tx_poll;
1423 ring->tx_clean = mtk_qdma_tx_clean;
1424
1425 /* make sure that all changes to the dma ring are flushed before we
1426 * continue
1427 */
1428 wmb();
1429
1430 mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
1431 mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
1432 mtk_w32(eth,
1433 ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
1434 MTK_QTX_CRX_PTR);
1435 mtk_w32(eth,
1436 ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
1437 MTK_QTX_DRX_PTR);
1438
1439 return 0;
1440
1441no_tx_mem:
1442 return -ENOMEM;
1443}
1444
1445static int mtk_qdma_init(struct mtk_eth *eth, int ring)
1446{
1447 int err;
1448
1449 err = mtk_init_fq_dma(eth);
1450 if (err)
1451 return err;
1452
1453 err = mtk_qdma_tx_alloc_tx(eth);
1454 if (err)
1455 return err;
1456
1457 err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
1458 if (err)
1459 return err;
1460
1461 mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
1462 mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
1463 mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
1464 mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
1465 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1466
1467 /* Enable random early drop and set drop threshold automatically */
1468 mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
1469 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1470
1471 return 0;
1472}
1473
1474static int mtk_pdma_qdma_init(struct mtk_eth *eth)
1475{
1476 int err = mtk_qdma_init(eth, 1);
1477
1478 if (err)
1479 return err;
1480
1481 err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
1482 if (err)
1483 return err;
1484
1485 mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
1486 mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
1487 mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
1488 mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
1489
1490 return 0;
1491}
1492
1493static int mtk_pdma_init(struct mtk_eth *eth)
1494{
1495 struct mtk_rx_ring *ring = &eth->rx_ring[0];
1496 int err;
1497
1498 err = mtk_pdma_tx_alloc(eth);
1499 if (err)
1500 return err;
1501
1502 err = mtk_dma_rx_alloc(eth, ring);
1503 if (err)
1504 return err;
1505
1506 mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
1507 mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
1508 mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
1509 mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
1510
1511 return 0;
1512}
1513
1514static void mtk_dma_free(struct mtk_eth *eth)
1515{
1516 int i;
1517
1518 for (i = 0; i < eth->soc->mac_count; i++)
1519 if (eth->netdev[i])
1520 netdev_reset_queue(eth->netdev[i]);
1521 eth->tx_ring.tx_clean(eth);
1522 mtk_clean_rx(eth, &eth->rx_ring[0]);
1523 mtk_clean_rx(eth, &eth->rx_ring[1]);
1524 kfree(eth->scratch_head);
1525}
1526
1527static void mtk_tx_timeout(struct net_device *dev)
1528{
1529 struct mtk_mac *mac = netdev_priv(dev);
1530 struct mtk_eth *eth = mac->hw;
1531 struct mtk_tx_ring *ring = &eth->tx_ring;
1532
1533 eth->netdev[mac->id]->stats.tx_errors++;
1534 netif_err(eth, tx_err, dev,
1535 "transmit timed out\n");
1536 if (eth->soc->dma_type & MTK_PDMA) {
1537 netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
1538 mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
1539 netif_info(eth, drv, dev,
1540 "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
1541 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
1542 mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
1543 mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
1544 mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
1545 ring->tx_free_idx,
1546 ring->tx_next_idx);
1547 }
1548 if (eth->soc->dma_type & MTK_QDMA) {
1549 netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
1550 mtk_r32(eth, MTK_QDMA_GLO_CFG));
1551 netif_info(eth, drv, dev,
1552 "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
1553 0, mtk_r32(eth, MTK_QTX_CTX_PTR),
1554 mtk_r32(eth, MTK_QTX_DTX_PTR),
1555 mtk_r32(eth, MTK_QTX_CRX_PTR),
1556 mtk_r32(eth, MTK_QTX_DRX_PTR),
1557 atomic_read(&ring->tx_free_count));
1558 }
1559 netif_info(eth, drv, dev,
1560 "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
1561 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
1562 mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
1563 mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
1564 mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
1565
1566 schedule_work(&mac->pending_work);
1567}
1568
1569static irqreturn_t mtk_handle_irq(int irq, void *_eth)
1570{
1571 struct mtk_eth *eth = _eth;
1572 u32 status, int_mask;
1573
1574 status = mtk_irq_pending(eth);
1575 if (unlikely(!status))
1576 return IRQ_NONE;
1577
1578 int_mask = (eth->soc->rx_int | eth->soc->tx_int);
1579 if (likely(status & int_mask)) {
1580 if (likely(napi_schedule_prep(&eth->rx_napi)))
1581 __napi_schedule(&eth->rx_napi);
1582 } else {
1583 mtk_irq_ack(eth, status);
1584 }
1585 mtk_irq_disable(eth, int_mask);
1586
1587 return IRQ_HANDLED;
1588}
1589
1590#ifdef CONFIG_NET_POLL_CONTROLLER
1591static void mtk_poll_controller(struct net_device *dev)
1592{
1593 struct mtk_mac *mac = netdev_priv(dev);
1594 struct mtk_eth *eth = mac->hw;
1595 u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
1596
1597 mtk_irq_disable(eth, int_mask);
1598 mtk_handle_irq(dev->irq, dev);
1599 mtk_irq_enable(eth, int_mask);
1600}
1601#endif
1602
1603int mtk_set_clock_cycle(struct mtk_eth *eth)
1604{
1605 unsigned long sysclk = eth->sysclk;
1606
1607 sysclk /= MTK_US_CYC_CNT_DIVISOR;
1608 sysclk <<= MTK_US_CYC_CNT_SHIFT;
1609
1610 mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
1611 ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
1612 sysclk,
1613 MTK_GLO_CFG);
1614 return 0;
1615}
1616
1617void mtk_fwd_config(struct mtk_eth *eth)
1618{
1619 u32 fwd_cfg;
1620
1621 fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
1622
1623 /* disable jumbo frame */
1624 if (eth->soc->jumbo_frame)
1625 fwd_cfg &= ~MTK_GDM1_JMB_EN;
1626
1627 /* set unicast/multicast/broadcast frame to cpu */
1628 fwd_cfg &= ~0xffff;
1629
1630 mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
1631}
1632
1633void mtk_csum_config(struct mtk_eth *eth)
1634{
1635 if (eth->soc->hw_features & NETIF_F_RXCSUM)
1636 mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
1637 (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
1638 MTK_GDMA1_FWD_CFG);
1639 else
1640 mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
1641 ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
1642 MTK_GDMA1_FWD_CFG);
1643 if (eth->soc->hw_features & NETIF_F_IP_CSUM)
1644 mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
1645 (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
1646 MTK_CDMA_CSG_CFG);
1647 else
1648 mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
1649 ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
1650 MTK_CDMA_CSG_CFG);
1651}
1652
1653static int mtk_start_dma(struct mtk_eth *eth)
1654{
1655 unsigned long flags;
1656 u32 val;
1657 int err;
1658
1659 if (eth->soc->dma_type == MTK_PDMA)
1660 err = mtk_pdma_init(eth);
1661 else if (eth->soc->dma_type == MTK_QDMA)
1662 err = mtk_qdma_init(eth, 0);
1663 else
1664 err = mtk_pdma_qdma_init(eth);
1665 if (err) {
1666 mtk_dma_free(eth);
1667 return err;
1668 }
1669
1670 spin_lock_irqsave(&eth->page_lock, flags);
1671
1672 val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
1673 if (eth->soc->rx_2b_offset)
1674 val |= MTK_RX_2B_OFFSET;
1675 val |= eth->soc->pdma_glo_cfg;
1676
1677 if (eth->soc->dma_type & MTK_PDMA)
1678 mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
1679
1680 if (eth->soc->dma_type & MTK_QDMA)
1681 mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
1682
1683 spin_unlock_irqrestore(&eth->page_lock, flags);
1684
1685 return 0;
1686}
1687
1688static int mtk_open(struct net_device *dev)
1689{
1690 struct mtk_mac *mac = netdev_priv(dev);
1691 struct mtk_eth *eth = mac->hw;
1692
1693 dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
1694
1695 if (!atomic_read(&eth->dma_refcnt)) {
1696 int err = mtk_start_dma(eth);
1697
1698 if (err)
1699 return err;
1700
1701 napi_enable(&eth->rx_napi);
1702 mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
1703 }
1704 atomic_inc(&eth->dma_refcnt);
1705
1706 if (eth->phy)
1707 eth->phy->start(mac);
1708
1709 if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
1710 netif_carrier_on(dev);
1711
1712 netif_start_queue(dev);
1713 eth->soc->fwd_config(eth);
1714
1715 return 0;
1716}
1717
1718static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1719{
1720 unsigned long flags;
1721 u32 val;
1722 int i;
1723
1724 /* stop the dma enfine */
1725 spin_lock_irqsave(&eth->page_lock, flags);
1726 val = mtk_r32(eth, glo_cfg);
1727 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1728 glo_cfg);
1729 spin_unlock_irqrestore(&eth->page_lock, flags);
1730
1731 /* wait for dma stop */
1732 for (i = 0; i < 10; i++) {
1733 val = mtk_r32(eth, glo_cfg);
1734 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1735 msleep(20);
1736 continue;
1737 }
1738 break;
1739 }
1740}
1741
1742static int mtk_stop(struct net_device *dev)
1743{
1744 struct mtk_mac *mac = netdev_priv(dev);
1745 struct mtk_eth *eth = mac->hw;
1746
1747 netif_tx_disable(dev);
1748 if (eth->phy)
1749 eth->phy->stop(mac);
1750
1751 if (!atomic_dec_and_test(&eth->dma_refcnt))
1752 return 0;
1753
1754 mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
1755 napi_disable(&eth->rx_napi);
1756
1757 if (eth->soc->dma_type & MTK_PDMA)
1758 mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
1759
1760 if (eth->soc->dma_type & MTK_QDMA)
1761 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1762
1763 mtk_dma_free(eth);
1764
1765 return 0;
1766}
1767
1768static int __init mtk_init_hw(struct mtk_eth *eth)
1769{
1770 int i, err;
1771
1772 eth->soc->reset_fe(eth);
1773
1774 if (eth->soc->switch_init)
1775 if (eth->soc->switch_init(eth)) {
1776 dev_err(eth->dev, "failed to initialize switch core\n");
1777 return -ENODEV;
1778 }
1779
1780 err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
1781 dev_name(eth->dev), eth);
1782 if (err)
1783 return err;
1784
1785 err = mtk_mdio_init(eth);
1786 if (err)
1787 return err;
1788
1789 /* disable delay and normal interrupt */
1790 mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
1791 if (eth->soc->dma_type & MTK_QDMA)
1792 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1793 mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
1794
1795 /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
1796 if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
1797 for (i = 0; i < 16; i += 2)
1798 mtk_w32(eth, ((i + 1) << 16) + i,
1799 mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
1800 (i * 2));
1801
1802 if (eth->soc->fwd_config(eth))
1803 dev_err(eth->dev, "unable to get clock\n");
1804
1805 if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
1806 mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
1807 mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
1808 }
1809
1810 return 0;
1811}
1812
1813static int __init mtk_init(struct net_device *dev)
1814{
1815 struct mtk_mac *mac = netdev_priv(dev);
1816 struct mtk_eth *eth = mac->hw;
1817 struct device_node *port;
1818 const char *mac_addr;
1819 int err;
1820
1821 mac_addr = of_get_mac_address(mac->of_node);
1822 if (mac_addr)
1823 ether_addr_copy(dev->dev_addr, mac_addr);
1824
1825 /* If the mac address is invalid, use random mac address */
1826 if (!is_valid_ether_addr(dev->dev_addr)) {
1827 eth_hw_addr_random(dev);
1828 dev_err(eth->dev, "generated random MAC address %pM\n",
1829 dev->dev_addr);
1830 }
1831 mac->hw->soc->set_mac(mac, dev->dev_addr);
1832
1833 if (eth->soc->port_init)
1834 for_each_child_of_node(mac->of_node, port)
1835 if (of_device_is_compatible(port,
1836 "mediatek,eth-port") &&
1837 of_device_is_available(port))
1838 eth->soc->port_init(eth, mac, port);
1839
1840 if (eth->phy) {
1841 err = eth->phy->connect(mac);
1842 if (err)
1843 return err;
1844 }
1845
1846 return 0;
1847}
1848
1849static void mtk_uninit(struct net_device *dev)
1850{
1851 struct mtk_mac *mac = netdev_priv(dev);
1852 struct mtk_eth *eth = mac->hw;
1853
1854 if (eth->phy)
1855 eth->phy->disconnect(mac);
1856 mtk_mdio_cleanup(eth);
1857
1858 mtk_irq_disable(eth, ~0);
1859 free_irq(dev->irq, dev);
1860}
1861
1862static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1863{
1864 struct mtk_mac *mac = netdev_priv(dev);
1865
1866 if (!mac->phy_dev)
1867 return -ENODEV;
1868
1869 switch (cmd) {
1870 case SIOCGMIIPHY:
1871 case SIOCGMIIREG:
1872 case SIOCSMIIREG:
1873 return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1874 default:
1875 break;
1876 }
1877
1878 return -EOPNOTSUPP;
1879}
1880
1881static int mtk_change_mtu(struct net_device *dev, int new_mtu)
1882{
1883 struct mtk_mac *mac = netdev_priv(dev);
1884 struct mtk_eth *eth = mac->hw;
1885 int frag_size, old_mtu;
1886 u32 fwd_cfg;
1887
1888 if (!eth->soc->jumbo_frame)
1889 return eth_change_mtu(dev, new_mtu);
1890
1891 frag_size = mtk_max_frag_size(new_mtu);
1892 if (new_mtu < 68 || frag_size > PAGE_SIZE)
1893 return -EINVAL;
1894
1895 old_mtu = dev->mtu;
1896 dev->mtu = new_mtu;
1897
1898 /* return early if the buffer sizes will not change */
1899 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1900 return 0;
1901 if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
1902 return 0;
1903
1904 if (new_mtu <= ETH_DATA_LEN)
1905 eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1906 else
1907 eth->rx_ring[0].frag_size = PAGE_SIZE;
1908 eth->rx_ring[0].rx_buf_size =
1909 mtk_max_buf_size(eth->rx_ring[0].frag_size);
1910
1911 if (!netif_running(dev))
1912 return 0;
1913
1914 mtk_stop(dev);
1915 fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
1916 if (new_mtu <= ETH_DATA_LEN) {
1917 fwd_cfg &= ~MTK_GDM1_JMB_EN;
1918 } else {
1919 fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
1920 fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
1921 MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
1922 }
1923 mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
1924
1925 return mtk_open(dev);
1926}
1927
1928static void mtk_pending_work(struct work_struct *work)
1929{
1930 struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
1931 struct mtk_eth *eth = mac->hw;
1932 struct net_device *dev = eth->netdev[mac->id];
1933 int err;
1934
1935 rtnl_lock();
1936 mtk_stop(dev);
1937
1938 err = mtk_open(dev);
1939 if (err) {
1940 netif_alert(eth, ifup, dev,
1941 "Driver up/down cycle failed, closing device.\n");
1942 dev_close(dev);
1943 }
1944 rtnl_unlock();
1945}
1946
1947static int mtk_cleanup(struct mtk_eth *eth)
1948{
1949 int i;
1950
1951 for (i = 0; i < eth->soc->mac_count; i++) {
1952 struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
1953
1954 if (!eth->netdev[i])
1955 continue;
1956
1957 unregister_netdev(eth->netdev[i]);
1958 free_netdev(eth->netdev[i]);
1959 cancel_work_sync(&mac->pending_work);
1960 }
1961
1962 return 0;
1963}
1964
1965static const struct net_device_ops mtk_netdev_ops = {
1966 .ndo_init = mtk_init,
1967 .ndo_uninit = mtk_uninit,
1968 .ndo_open = mtk_open,
1969 .ndo_stop = mtk_stop,
1970 .ndo_start_xmit = mtk_start_xmit,
1971 .ndo_set_mac_address = mtk_set_mac_address,
1972 .ndo_validate_addr = eth_validate_addr,
1973 .ndo_do_ioctl = mtk_do_ioctl,
1974 .ndo_change_mtu = mtk_change_mtu,
1975 .ndo_tx_timeout = mtk_tx_timeout,
1976 .ndo_get_stats64 = mtk_get_stats64,
1977 .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid,
1978 .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid,
1979#ifdef CONFIG_NET_POLL_CONTROLLER
1980 .ndo_poll_controller = mtk_poll_controller,
1981#endif
1982};
1983
1984static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1985{
1986 struct mtk_mac *mac;
1987 const __be32 *_id = of_get_property(np, "reg", NULL);
1988 int id, err;
1989
1990 if (!_id) {
1991 dev_err(eth->dev, "missing mac id\n");
1992 return -EINVAL;
1993 }
1994 id = be32_to_cpup(_id);
1995 if (id >= eth->soc->mac_count || eth->netdev[id]) {
1996 dev_err(eth->dev, "%d is not a valid mac id\n", id);
1997 return -EINVAL;
1998 }
1999
2000 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2001 if (!eth->netdev[id]) {
2002 dev_err(eth->dev, "alloc_etherdev failed\n");
2003 return -ENOMEM;
2004 }
2005 mac = netdev_priv(eth->netdev[id]);
2006 eth->mac[id] = mac;
2007 mac->id = id;
2008 mac->hw = eth;
2009 mac->of_node = np;
2010 INIT_WORK(&mac->pending_work, mtk_pending_work);
2011
2012 if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
2013 mac->hw_stats = devm_kzalloc(eth->dev,
2014 sizeof(*mac->hw_stats),
2015 GFP_KERNEL);
2016 if (!mac->hw_stats) {
2017 err = -ENOMEM;
2018 goto free_netdev;
2019 }
2020 spin_lock_init(&mac->hw_stats->stats_lock);
2021 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2022 }
2023
2024 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2025 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2026 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2027
2028 if (eth->soc->init_data)
2029 eth->soc->init_data(eth->soc, eth->netdev[id]);
2030
2031 eth->netdev[id]->vlan_features = eth->soc->hw_features &
2032 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2033 eth->netdev[id]->features |= eth->soc->hw_features;
2034
2035 if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
2036 eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2037
2038 mtk_set_ethtool_ops(eth->netdev[id]);
2039
2040 err = register_netdev(eth->netdev[id]);
2041 if (err) {
2042 dev_err(eth->dev, "error bringing up device\n");
2043 err = -ENOMEM;
2044 goto free_netdev;
2045 }
2046 eth->netdev[id]->irq = eth->irq;
2047 netif_info(eth, probe, eth->netdev[id],
2048 "mediatek frame engine at 0x%08lx, irq %d\n",
2049 eth->netdev[id]->base_addr, eth->netdev[id]->irq);
2050
2051 return 0;
2052
2053free_netdev:
2054 free_netdev(eth->netdev[id]);
2055 return err;
2056}
2057
2058static int mtk_probe(struct platform_device *pdev)
2059{
2060 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2061 const struct of_device_id *match;
2062 struct device_node *mac_np;
2063 struct mtk_soc_data *soc;
2064 struct mtk_eth *eth;
2065 struct clk *sysclk;
2066 int err;
2067
2068 device_reset(&pdev->dev);
2069
2070 match = of_match_device(of_mtk_match, &pdev->dev);
2071 soc = (struct mtk_soc_data *)match->data;
2072
2073 if (soc->reg_table)
2074 mtk_reg_table = soc->reg_table;
2075
2076 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2077 if (!eth)
2078 return -ENOMEM;
2079
2080 eth->base = devm_ioremap_resource(&pdev->dev, res);
2081 if (IS_ERR(eth->base))
2082 return PTR_ERR(eth->base);
2083
2084 spin_lock_init(&eth->page_lock);
2085
2086 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2087 "mediatek,ethsys");
2088 if (IS_ERR(eth->ethsys))
2089 return PTR_ERR(eth->ethsys);
2090
2091 eth->irq = platform_get_irq(pdev, 0);
2092 if (eth->irq < 0) {
2093 dev_err(&pdev->dev, "no IRQ resource found\n");
2094 return -ENXIO;
2095 }
2096
2097 sysclk = devm_clk_get(&pdev->dev, NULL);
2098 if (IS_ERR(sysclk)) {
2099 dev_err(&pdev->dev,
2100 "the clock is not defined in the devicetree\n");
2101 return -ENXIO;
2102 }
2103 eth->sysclk = clk_get_rate(sysclk);
2104
2105 eth->switch_np = of_parse_phandle(pdev->dev.of_node,
2106 "mediatek,switch", 0);
2107 if (soc->has_switch && !eth->switch_np) {
2108 dev_err(&pdev->dev, "failed to read switch phandle\n");
2109 return -ENODEV;
2110 }
2111
2112 eth->dev = &pdev->dev;
2113 eth->soc = soc;
2114 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2115
2116 err = mtk_init_hw(eth);
2117 if (err)
2118 return err;
2119
2120 if (eth->soc->mac_count > 1) {
2121 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2122 if (!of_device_is_compatible(mac_np,
2123 "mediatek,eth-mac"))
2124 continue;
2125
2126 if (!of_device_is_available(mac_np))
2127 continue;
2128
2129 err = mtk_add_mac(eth, mac_np);
2130 if (err)
2131 goto err_free_dev;
2132 }
2133
2134 init_dummy_netdev(&eth->dummy_dev);
2135 netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
2136 soc->napi_weight);
2137 } else {
2138 err = mtk_add_mac(eth, pdev->dev.of_node);
2139 if (err)
2140 goto err_free_dev;
2141 netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
2142 soc->napi_weight);
2143 }
2144
2145 platform_set_drvdata(pdev, eth);
2146
2147 return 0;
2148
2149err_free_dev:
2150 mtk_cleanup(eth);
2151 return err;
2152}
2153
2154static int mtk_remove(struct platform_device *pdev)
2155{
2156 struct mtk_eth *eth = platform_get_drvdata(pdev);
2157
2158 netif_napi_del(&eth->rx_napi);
2159 mtk_cleanup(eth);
2160 platform_set_drvdata(pdev, NULL);
2161
2162 return 0;
2163}
2164
2165static struct platform_driver mtk_driver = {
2166 .probe = mtk_probe,
2167 .remove = mtk_remove,
2168 .driver = {
2169 .name = "mtk_soc_eth",
2170 .of_match_table = of_mtk_match,
2171 },
2172};
2173
2174module_platform_driver(mtk_driver);
2175
2176MODULE_LICENSE("GPL");
2177MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2178MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644
index e6ed80433f49..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.h
+++ /dev/null
@@ -1,716 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef MTK_ETH_H
16#define MTK_ETH_H
17
18#include <linux/mii.h>
19#include <linux/interrupt.h>
20#include <linux/netdevice.h>
21#include <linux/dma-mapping.h>
22#include <linux/phy.h>
23#include <linux/ethtool.h>
24#include <linux/version.h>
25#include <linux/atomic.h>
26
27/* these registers have different offsets depending on the SoC. we use a lookup
28 * table for these
29 */
30enum mtk_reg {
31 MTK_REG_PDMA_GLO_CFG = 0,
32 MTK_REG_PDMA_RST_CFG,
33 MTK_REG_DLY_INT_CFG,
34 MTK_REG_TX_BASE_PTR0,
35 MTK_REG_TX_MAX_CNT0,
36 MTK_REG_TX_CTX_IDX0,
37 MTK_REG_TX_DTX_IDX0,
38 MTK_REG_RX_BASE_PTR0,
39 MTK_REG_RX_MAX_CNT0,
40 MTK_REG_RX_CALC_IDX0,
41 MTK_REG_RX_DRX_IDX0,
42 MTK_REG_MTK_INT_ENABLE,
43 MTK_REG_MTK_INT_STATUS,
44 MTK_REG_MTK_DMA_VID_BASE,
45 MTK_REG_MTK_COUNTER_BASE,
46 MTK_REG_MTK_RST_GL,
47 MTK_REG_MTK_INT_STATUS2,
48 MTK_REG_COUNT
49};
50
51/* delayed interrupt bits */
52#define MTK_DELAY_EN_INT 0x80
53#define MTK_DELAY_MAX_INT 0x04
54#define MTK_DELAY_MAX_TOUT 0x04
55#define MTK_DELAY_TIME 20
56#define MTK_DELAY_CHAN (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
57 | MTK_DELAY_MAX_TOUT)
58#define MTK_DELAY_INIT ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
59#define MTK_PSE_FQFC_CFG_INIT 0x80504000
60#define MTK_PSE_FQFC_CFG_256Q 0xff908000
61
62/* interrupt bits */
63#define MTK_CNT_PPE_AF BIT(31)
64#define MTK_CNT_GDM_AF BIT(29)
65#define MTK_PSE_P2_FC BIT(26)
66#define MTK_PSE_BUF_DROP BIT(24)
67#define MTK_GDM_OTHER_DROP BIT(23)
68#define MTK_PSE_P1_FC BIT(22)
69#define MTK_PSE_P0_FC BIT(21)
70#define MTK_PSE_FQ_EMPTY BIT(20)
71#define MTK_GE1_STA_CHG BIT(18)
72#define MTK_TX_COHERENT BIT(17)
73#define MTK_RX_COHERENT BIT(16)
74#define MTK_TX_DONE_INT3 BIT(11)
75#define MTK_TX_DONE_INT2 BIT(10)
76#define MTK_TX_DONE_INT1 BIT(9)
77#define MTK_TX_DONE_INT0 BIT(8)
78#define MTK_RX_DONE_INT0 BIT(2)
79#define MTK_TX_DLY_INT BIT(1)
80#define MTK_RX_DLY_INT BIT(0)
81
82#define MTK_RX_DONE_INT MTK_RX_DONE_INT0
83#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
84 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
85
86#define RT5350_RX_DLY_INT BIT(30)
87#define RT5350_TX_DLY_INT BIT(28)
88#define RT5350_RX_DONE_INT1 BIT(17)
89#define RT5350_RX_DONE_INT0 BIT(16)
90#define RT5350_TX_DONE_INT3 BIT(3)
91#define RT5350_TX_DONE_INT2 BIT(2)
92#define RT5350_TX_DONE_INT1 BIT(1)
93#define RT5350_TX_DONE_INT0 BIT(0)
94
95#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
96#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
97 RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
98
99/* registers */
100#define MTK_GDMA_OFFSET 0x0020
101#define MTK_PSE_OFFSET 0x0040
102#define MTK_GDMA2_OFFSET 0x0060
103#define MTK_CDMA_OFFSET 0x0080
104#define MTK_DMA_VID0 0x00a8
105#define MTK_PDMA_OFFSET 0x0100
106#define MTK_PPE_OFFSET 0x0200
107#define MTK_CMTABLE_OFFSET 0x0400
108#define MTK_POLICYTABLE_OFFSET 0x1000
109
110#define MT7621_GDMA_OFFSET 0x0500
111#define MT7620_GDMA_OFFSET 0x0600
112
113#define RT5350_PDMA_OFFSET 0x0800
114#define RT5350_SDM_OFFSET 0x0c00
115
116#define MTK_MDIO_ACCESS 0x00
117#define MTK_MDIO_CFG 0x04
118#define MTK_GLO_CFG 0x08
119#define MTK_RST_GL 0x0C
120#define MTK_INT_STATUS 0x10
121#define MTK_INT_ENABLE 0x14
122#define MTK_MDIO_CFG2 0x18
123#define MTK_FOC_TS_T 0x1C
124
125#define MTK_GDMA1_FWD_CFG (MTK_GDMA_OFFSET + 0x00)
126#define MTK_GDMA1_SCH_CFG (MTK_GDMA_OFFSET + 0x04)
127#define MTK_GDMA1_SHPR_CFG (MTK_GDMA_OFFSET + 0x08)
128#define MTK_GDMA1_MAC_ADRL (MTK_GDMA_OFFSET + 0x0C)
129#define MTK_GDMA1_MAC_ADRH (MTK_GDMA_OFFSET + 0x10)
130
131#define MTK_GDMA2_FWD_CFG (MTK_GDMA2_OFFSET + 0x00)
132#define MTK_GDMA2_SCH_CFG (MTK_GDMA2_OFFSET + 0x04)
133#define MTK_GDMA2_SHPR_CFG (MTK_GDMA2_OFFSET + 0x08)
134#define MTK_GDMA2_MAC_ADRL (MTK_GDMA2_OFFSET + 0x0C)
135#define MTK_GDMA2_MAC_ADRH (MTK_GDMA2_OFFSET + 0x10)
136
137#define MTK_PSE_FQ_CFG (MTK_PSE_OFFSET + 0x00)
138#define MTK_CDMA_FC_CFG (MTK_PSE_OFFSET + 0x04)
139#define MTK_GDMA1_FC_CFG (MTK_PSE_OFFSET + 0x08)
140#define MTK_GDMA2_FC_CFG (MTK_PSE_OFFSET + 0x0C)
141
142#define MTK_CDMA_CSG_CFG (MTK_CDMA_OFFSET + 0x00)
143#define MTK_CDMA_SCH_CFG (MTK_CDMA_OFFSET + 0x04)
144
145#define MT7621_GDMA_FWD_CFG(x) (MT7621_GDMA_OFFSET + (x * 0x1000))
146
147/* FIXME this might be different for different SOCs */
148#define MT7620_GDMA1_FWD_CFG (MT7621_GDMA_OFFSET + 0x00)
149
150#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00)
151#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04)
152#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08)
153#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C)
154#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10)
155#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14)
156#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18)
157#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C)
158#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20)
159#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24)
160#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28)
161#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C)
162#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30)
163#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34)
164#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38)
165#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C)
166#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100)
167#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104)
168#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108)
169#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C)
170#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110)
171#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114)
172#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118)
173#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C)
174#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204)
175#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208)
176#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c)
177#define RT5350_MTK_INT_STATUS (RT5350_PDMA_OFFSET + 0x220)
178#define RT5350_MTK_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228)
179#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280)
180
181#define MTK_PDMA_GLO_CFG (MTK_PDMA_OFFSET + 0x00)
182#define MTK_PDMA_RST_CFG (MTK_PDMA_OFFSET + 0x04)
183#define MTK_PDMA_SCH_CFG (MTK_PDMA_OFFSET + 0x08)
184#define MTK_DLY_INT_CFG (MTK_PDMA_OFFSET + 0x0C)
185#define MTK_TX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x10)
186#define MTK_TX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x14)
187#define MTK_TX_CTX_IDX0 (MTK_PDMA_OFFSET + 0x18)
188#define MTK_TX_DTX_IDX0 (MTK_PDMA_OFFSET + 0x1C)
189#define MTK_TX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x20)
190#define MTK_TX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x24)
191#define MTK_TX_CTX_IDX1 (MTK_PDMA_OFFSET + 0x28)
192#define MTK_TX_DTX_IDX1 (MTK_PDMA_OFFSET + 0x2C)
193#define MTK_RX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x30)
194#define MTK_RX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x34)
195#define MTK_RX_CALC_IDX0 (MTK_PDMA_OFFSET + 0x38)
196#define MTK_RX_DRX_IDX0 (MTK_PDMA_OFFSET + 0x3C)
197#define MTK_TX_BASE_PTR2 (MTK_PDMA_OFFSET + 0x40)
198#define MTK_TX_MAX_CNT2 (MTK_PDMA_OFFSET + 0x44)
199#define MTK_TX_CTX_IDX2 (MTK_PDMA_OFFSET + 0x48)
200#define MTK_TX_DTX_IDX2 (MTK_PDMA_OFFSET + 0x4C)
201#define MTK_TX_BASE_PTR3 (MTK_PDMA_OFFSET + 0x50)
202#define MTK_TX_MAX_CNT3 (MTK_PDMA_OFFSET + 0x54)
203#define MTK_TX_CTX_IDX3 (MTK_PDMA_OFFSET + 0x58)
204#define MTK_TX_DTX_IDX3 (MTK_PDMA_OFFSET + 0x5C)
205#define MTK_RX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x60)
206#define MTK_RX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x64)
207#define MTK_RX_CALC_IDX1 (MTK_PDMA_OFFSET + 0x68)
208#define MTK_RX_DRX_IDX1 (MTK_PDMA_OFFSET + 0x6C)
209
210/* Switch DMA configuration */
211#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00)
212#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04)
213#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08)
214#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C)
215#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10)
216#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100)
217#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104)
218#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108)
219#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C)
220#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110)
221
222#define RT5350_SDM_ICS_EN BIT(16)
223#define RT5350_SDM_TCS_EN BIT(17)
224#define RT5350_SDM_UCS_EN BIT(18)
225
226/* QDMA registers */
227#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
228#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
229#define MTK_QRX_BASE_PTR0 0x1900
230#define MTK_QRX_MAX_CNT0 0x1904
231#define MTK_QRX_CRX_IDX0 0x1908
232#define MTK_QRX_DRX_IDX0 0x190C
233#define MTK_QDMA_GLO_CFG 0x1A04
234#define MTK_QDMA_RST_IDX 0x1A08
235#define MTK_QDMA_DELAY_INT 0x1A0C
236#define MTK_QDMA_FC_THRES 0x1A10
237#define MTK_QMTK_INT_STATUS 0x1A18
238#define MTK_QMTK_INT_ENABLE 0x1A1C
239#define MTK_QDMA_HRED2 0x1A44
240
241#define MTK_QTX_CTX_PTR 0x1B00
242#define MTK_QTX_DTX_PTR 0x1B04
243
244#define MTK_QTX_CRX_PTR 0x1B10
245#define MTK_QTX_DRX_PTR 0x1B14
246
247#define MTK_QDMA_FQ_HEAD 0x1B20
248#define MTK_QDMA_FQ_TAIL 0x1B24
249#define MTK_QDMA_FQ_CNT 0x1B28
250#define MTK_QDMA_FQ_BLEN 0x1B2C
251
252#define QDMA_PAGE_SIZE 2048
253#define QDMA_TX_OWNER_CPU BIT(31)
254#define QDMA_TX_SWC BIT(14)
255#define TX_QDMA_SDL(_x) (((_x) & 0x3fff) << 16)
256#define QDMA_RES_THRES 4
257
258/* MDIO_CFG register bits */
259#define MTK_MDIO_CFG_AUTO_POLL_EN BIT(29)
260#define MTK_MDIO_CFG_GP1_BP_EN BIT(16)
261#define MTK_MDIO_CFG_GP1_FRC_EN BIT(15)
262#define MTK_MDIO_CFG_GP1_SPEED_10 (0 << 13)
263#define MTK_MDIO_CFG_GP1_SPEED_100 (1 << 13)
264#define MTK_MDIO_CFG_GP1_SPEED_1000 (2 << 13)
265#define MTK_MDIO_CFG_GP1_DUPLEX BIT(12)
266#define MTK_MDIO_CFG_GP1_FC_TX BIT(11)
267#define MTK_MDIO_CFG_GP1_FC_RX BIT(10)
268#define MTK_MDIO_CFG_GP1_LNK_DWN BIT(9)
269#define MTK_MDIO_CFG_GP1_AN_FAIL BIT(8)
270#define MTK_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6)
271#define MTK_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6)
272#define MTK_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6)
273#define MTK_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6)
274#define MTK_MDIO_CFG_TURBO_MII_FREQ BIT(5)
275#define MTK_MDIO_CFG_TURBO_MII_MODE BIT(4)
276#define MTK_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2)
277#define MTK_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2)
278#define MTK_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2)
279#define MTK_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2)
280#define MTK_MDIO_CFG_TX_CLK_SKEW_0 0
281#define MTK_MDIO_CFG_TX_CLK_SKEW_200 1
282#define MTK_MDIO_CFG_TX_CLK_SKEW_400 2
283#define MTK_MDIO_CFG_TX_CLK_SKEW_INV 3
284
285/* uni-cast port */
286#define MTK_GDM1_JMB_LEN_MASK 0xf
287#define MTK_GDM1_JMB_LEN_SHIFT 28
288#define MTK_GDM1_ICS_EN BIT(22)
289#define MTK_GDM1_TCS_EN BIT(21)
290#define MTK_GDM1_UCS_EN BIT(20)
291#define MTK_GDM1_JMB_EN BIT(19)
292#define MTK_GDM1_STRPCRC BIT(16)
293#define MTK_GDM1_UFRC_P_CPU (0 << 12)
294#define MTK_GDM1_UFRC_P_GDMA1 (1 << 12)
295#define MTK_GDM1_UFRC_P_PPE (6 << 12)
296
297/* checksums */
298#define MTK_ICS_GEN_EN BIT(2)
299#define MTK_UCS_GEN_EN BIT(1)
300#define MTK_TCS_GEN_EN BIT(0)
301
302/* dma mode */
303#define MTK_PDMA BIT(0)
304#define MTK_QDMA BIT(1)
305#define MTK_PDMA_RX_QDMA_TX (MTK_PDMA | MTK_QDMA)
306
307/* dma ring */
308#define MTK_PST_DRX_IDX0 BIT(16)
309#define MTK_PST_DTX_IDX3 BIT(3)
310#define MTK_PST_DTX_IDX2 BIT(2)
311#define MTK_PST_DTX_IDX1 BIT(1)
312#define MTK_PST_DTX_IDX0 BIT(0)
313
314#define MTK_RX_2B_OFFSET BIT(31)
315#define MTK_TX_WB_DDONE BIT(6)
316#define MTK_RX_DMA_BUSY BIT(3)
317#define MTK_TX_DMA_BUSY BIT(1)
318#define MTK_RX_DMA_EN BIT(2)
319#define MTK_TX_DMA_EN BIT(0)
320
321#define MTK_PDMA_SIZE_4DWORDS (0 << 4)
322#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
323#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
324
325#define MTK_US_CYC_CNT_MASK 0xff
326#define MTK_US_CYC_CNT_SHIFT 0x8
327#define MTK_US_CYC_CNT_DIVISOR 1000000
328
329/* PDMA descriptor rxd2 */
330#define RX_DMA_DONE BIT(31)
331#define RX_DMA_LSO BIT(30)
332#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
333#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
334#define RX_DMA_TAG BIT(15)
335
336/* PDMA descriptor rxd3 */
337#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff)
338#define RX_DMA_VID(_x) ((_x) & 0xfff)
339
340/* PDMA descriptor rxd4 */
341#define RX_DMA_L4VALID BIT(30)
342#define RX_DMA_FPORT_SHIFT 19
343#define RX_DMA_FPORT_MASK 0x7
344
345struct mtk_rx_dma {
346 unsigned int rxd1;
347 unsigned int rxd2;
348 unsigned int rxd3;
349 unsigned int rxd4;
350} __packed __aligned(4);
351
352/* PDMA tx descriptor bits */
353#define TX_DMA_BUF_LEN 0x3fff
354#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16)
355#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16)
356#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
357#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN)
358#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
359#define TX_DMA_LS1 BIT(14)
360#define TX_DMA_LS0 BIT(30)
361#define TX_DMA_DONE BIT(31)
362#define TX_DMA_FPORT_SHIFT 25
363#define TX_DMA_FPORT_MASK 0x7
364#define TX_DMA_INS_VLAN_MT7621 BIT(16)
365#define TX_DMA_INS_VLAN BIT(7)
366#define TX_DMA_INS_PPPOE BIT(12)
367#define TX_DMA_TAG BIT(15)
368#define TX_DMA_TAG_MASK BIT(15)
369#define TX_DMA_QN(_x) ((_x) << 16)
370#define TX_DMA_PN(_x) ((_x) << 24)
371#define TX_DMA_QN_MASK TX_DMA_QN(0x7)
372#define TX_DMA_PN_MASK TX_DMA_PN(0x7)
373#define TX_DMA_UDF BIT(20)
374#define TX_DMA_CHKSUM (0x7 << 29)
375#define TX_DMA_TSO BIT(28)
376#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
377
378/* frame engine counters */
379#define MTK_PPE_AC_BCNT0 (MTK_CMTABLE_OFFSET + 0x00)
380#define MTK_GDMA1_TX_GBCNT (MTK_CMTABLE_OFFSET + 0x300)
381#define MTK_GDMA2_TX_GBCNT (MTK_GDMA1_TX_GBCNT + 0x40)
382
383/* phy device flags */
384#define MTK_PHY_FLAG_PORT BIT(0)
385#define MTK_PHY_FLAG_ATTACH BIT(1)
386
387struct mtk_tx_dma {
388 unsigned int txd1;
389 unsigned int txd2;
390 unsigned int txd3;
391 unsigned int txd4;
392} __packed __aligned(4);
393
394struct mtk_eth;
395struct mtk_mac;
396
397/* manage the attached phys */
398struct mtk_phy {
399 spinlock_t lock;
400
401 struct phy_device *phy[8];
402 struct device_node *phy_node[8];
403 const __be32 *phy_fixed[8];
404 int duplex[8];
405 int speed[8];
406 int tx_fc[8];
407 int rx_fc[8];
408 int (*connect)(struct mtk_mac *mac);
409 void (*disconnect)(struct mtk_mac *mac);
410 void (*start)(struct mtk_mac *mac);
411 void (*stop)(struct mtk_mac *mac);
412};
413
414/* struct mtk_soc_data - the structure that holds the SoC specific data
415 * @reg_table: Some of the legacy registers changed their location
416 * over time. Their offsets are stored in this table
417 *
418 * @init_data: Some features depend on the silicon revision. This
419 * callback allows runtime modification of the content of
420 * this struct
421 * @reset_fe: This callback is used to trigger the reset of the frame
422 * engine
423 * @set_mac: This callback is used to set the unicast mac address
424 * filter
425 * @fwd_config: This callback is used to setup the forward config
426 * register of the MAC
427 * @switch_init: This callback is used to bring up the switch core
428 * @port_init: Some SoCs have ports that can be router to a switch port
429 * or an external PHY. This callback is used to setup these
430 * ports.
431 * @has_carrier: This callback allows driver to check if there is a cable
432 * attached.
433 * @mdio_init: This callbck is used to setup the MDIO bus if one is
434 * present
435 * @mdio_cleanup: This callback is used to cleanup the MDIO state.
436 * @mdio_write: This callback is used to write data to the MDIO bus.
437 * @mdio_read: This callback is used to write data to the MDIO bus.
438 * @mdio_adjust_link: This callback is used to apply the PHY settings.
439 * @piac_offset: the PIAC register has a different different base offset
440 * @hw_features: feature set depends on the SoC type
441 * @dma_ring_size: allow GBit SoCs to set bigger rings than FE SoCs
442 * @napi_weight: allow GBit SoCs to set bigger napi weight than FE SoCs
443 * @dma_type: SoCs is PDMA, QDMA or a mix of the 2
444 * @pdma_glo_cfg: the default DMA configuration
445 * @rx_int: the TX interrupt bits used by the SoC
446 * @tx_int: the TX interrupt bits used by the SoC
447 * @status_int: the Status interrupt bits used by the SoC
448 * @checksum_bit: the bits used to turn on HW checksumming
449 * @txd4: default value of the TXD4 descriptor
450 * @mac_count: the number of MACs that the SoC has
451 * @new_stats: there is a old and new way to read hardware stats
452 * registers
453 * @jumbo_frame: does the SoC support jumbo frames ?
454 * @rx_2b_offset: tell the rx dma to offset the data by 2 bytes
455 * @rx_sg_dma: scatter gather support
456 * @padding_64b enable 64 bit padding
457 * @padding_bug: rt2880 has a padding bug
458 * @has_switch: does the SoC have a built-in switch
459 *
460 * Although all of the supported SoCs share the same basic functionality, there
461 * are several SoC specific functions and features that we need to support. This
462 * struct holds the SoC specific data so that the common core can figure out
463 * how to setup and use these differences.
464 */
465struct mtk_soc_data {
466 const u16 *reg_table;
467
468 void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
469 void (*reset_fe)(struct mtk_eth *eth);
470 void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
471 int (*fwd_config)(struct mtk_eth *eth);
472 int (*switch_init)(struct mtk_eth *eth);
473 void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
474 struct device_node *port);
475 int (*has_carrier)(struct mtk_eth *eth);
476 int (*mdio_init)(struct mtk_eth *eth);
477 void (*mdio_cleanup)(struct mtk_eth *eth);
478 int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
479 u16 val);
480 int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
481 void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
482 u32 piac_offset;
483 netdev_features_t hw_features;
484 u32 dma_ring_size;
485 u32 napi_weight;
486 u32 dma_type;
487 u32 pdma_glo_cfg;
488 u32 rx_int;
489 u32 tx_int;
490 u32 status_int;
491 u32 checksum_bit;
492 u32 txd4;
493 u32 mac_count;
494
495 u32 new_stats:1;
496 u32 jumbo_frame:1;
497 u32 rx_2b_offset:1;
498 u32 rx_sg_dma:1;
499 u32 padding_64b:1;
500 u32 padding_bug:1;
501 u32 has_switch:1;
502};
503
504#define MTK_STAT_OFFSET 0x40
505
506/* struct mtk_hw_stats - the structure that holds the traffic statistics.
507 * @stats_lock: make sure that stats operations are atomic
508 * @reg_offset: the status register offset of the SoC
509 * @syncp: the refcount
510 *
511 * All of the supported SoCs have hardware counters for traffic statstics.
512 * Whenever the status IRQ triggers we can read the latest stats from these
513 * counters and store them in this struct.
514 */
515struct mtk_hw_stats {
516 spinlock_t stats_lock;
517 u32 reg_offset;
518 struct u64_stats_sync syncp;
519
520 u64 tx_bytes;
521 u64 tx_packets;
522 u64 tx_skip;
523 u64 tx_collisions;
524 u64 rx_bytes;
525 u64 rx_packets;
526 u64 rx_overflow;
527 u64 rx_fcs_errors;
528 u64 rx_short_errors;
529 u64 rx_long_errors;
530 u64 rx_checksum_errors;
531 u64 rx_flow_control_packets;
532};
533
534/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
535 * memory was allocated so that it can be freed properly
536 */
537enum mtk_tx_flags {
538 MTK_TX_FLAGS_SINGLE0 = 0x01,
539 MTK_TX_FLAGS_PAGE0 = 0x02,
540 MTK_TX_FLAGS_PAGE1 = 0x04,
541};
542
543/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
544 * by the TX descriptor s
545 * @skb: The SKB pointer of the packet being sent
546 * @dma_addr0: The base addr of the first segment
547 * @dma_len0: The length of the first segment
548 * @dma_addr1: The base addr of the second segment
549 * @dma_len1: The length of the second segment
550 */
551struct mtk_tx_buf {
552 struct sk_buff *skb;
553 u32 flags;
554 DEFINE_DMA_UNMAP_ADDR(dma_addr0);
555 DEFINE_DMA_UNMAP_LEN(dma_len0);
556 DEFINE_DMA_UNMAP_ADDR(dma_addr1);
557 DEFINE_DMA_UNMAP_LEN(dma_len1);
558};
559
560/* struct mtk_tx_ring - This struct holds info describing a TX ring
561 * @tx_dma: The descriptor ring
562 * @tx_buf: The memory pointed at by the ring
563 * @tx_phys: The physical addr of tx_buf
564 * @tx_next_free: Pointer to the next free descriptor
565 * @tx_last_free: Pointer to the last free descriptor
566 * @tx_thresh: The threshold of minimum amount of free descriptors
567 * @tx_map: Callback to map a new packet into the ring
568 * @tx_poll: Callback for the housekeeping function
569 * @tx_clean: Callback for the cleanup function
570 * @tx_ring_size: How many descriptors are in the ring
571 * @tx_free_idx: The index of th next free descriptor
572 * @tx_next_idx: QDMA uses a linked list. This element points to the next
573 * free descriptor in the list
574 * @tx_free_count: QDMA uses a linked list. Track how many free descriptors
575 * are present
576 */
577struct mtk_tx_ring {
578 struct mtk_tx_dma *tx_dma;
579 struct mtk_tx_buf *tx_buf;
580 dma_addr_t tx_phys;
581 struct mtk_tx_dma *tx_next_free;
582 struct mtk_tx_dma *tx_last_free;
583 u16 tx_thresh;
584 int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
585 struct mtk_tx_ring *ring, bool gso);
586 int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
587 void (*tx_clean)(struct mtk_eth *eth);
588
589 /* PDMA only */
590 u16 tx_ring_size;
591 u16 tx_free_idx;
592
593 /* QDMA only */
594 u16 tx_next_idx;
595 atomic_t tx_free_count;
596};
597
598/* struct mtk_rx_ring - This struct holds info describing a RX ring
599 * @rx_dma: The descriptor ring
600 * @rx_data: The memory pointed at by the ring
601 * @trx_phys: The physical addr of rx_buf
602 * @rx_ring_size: How many descriptors are in the ring
603 * @rx_buf_size: The size of each packet buffer
604 * @rx_calc_idx: The current head of ring
605 */
606struct mtk_rx_ring {
607 struct mtk_rx_dma *rx_dma;
608 u8 **rx_data;
609 dma_addr_t rx_phys;
610 u16 rx_ring_size;
611 u16 frag_size;
612 u16 rx_buf_size;
613 u16 rx_calc_idx;
614};
615
616/* currently no SoC has more than 2 macs */
617#define MTK_MAX_DEVS 2
618
619/* struct mtk_eth - This is the main datasructure for holding the state
620 * of the driver
621 * @dev: The device pointer
622 * @base: The mapped register i/o base
623 * @page_lock: Make sure that register operations are atomic
624 * @soc: pointer to our SoC specific data
625 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
626 * dummy for NAPI to work
627 * @netdev: The netdev instances
628 * @mac: Each netdev is linked to a physical MAC
629 * @switch_np: The phandle for the switch
630 * @irq: The IRQ that we are using
631 * @msg_enable: Ethtool msg level
632 * @ysclk: The sysclk rate - neeed for calibration
633 * @ethsys: The register map pointing at the range used to setup
634 * MII modes
635 * @dma_refcnt: track how many netdevs are using the DMA engine
636 * @tx_ring: Pointer to the memore holding info about the TX ring
637 * @rx_ring: Pointer to the memore holding info about the RX ring
638 * @rx_napi: The NAPI struct
639 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
640 * @scratch_head: The scratch memory that scratch_ring points to.
641 * @phy: Info about the attached PHYs
642 * @mii_bus: If there is a bus we need to create an instance for it
643 * @link: Track if the ports have a physical link
644 * @sw_priv: Pointer to the switches private data
645 * @vlan_map: RX VID tracking
646 */
647
648struct mtk_eth {
649 struct device *dev;
650 void __iomem *base;
651 spinlock_t page_lock;
652 struct mtk_soc_data *soc;
653 struct net_device dummy_dev;
654 struct net_device *netdev[MTK_MAX_DEVS];
655 struct mtk_mac *mac[MTK_MAX_DEVS];
656 struct device_node *switch_np;
657 int irq;
658 u32 msg_enable;
659 unsigned long sysclk;
660 struct regmap *ethsys;
661 atomic_t dma_refcnt;
662 struct mtk_tx_ring tx_ring;
663 struct mtk_rx_ring rx_ring[2];
664 struct napi_struct rx_napi;
665 struct mtk_tx_dma *scratch_ring;
666 void *scratch_head;
667 struct mtk_phy *phy;
668 struct mii_bus *mii_bus;
669 int link[8];
670 void *sw_priv;
671 unsigned long vlan_map;
672};
673
674/* struct mtk_mac - the structure that holds the info about the MACs of the
675 * SoC
676 * @id: The number of the MAC
677 * @of_node: Our devicetree node
678 * @hw: Backpointer to our main datastruture
679 * @hw_stats: Packet statistics counter
680 * @phy_dev: The attached PHY if available
681 * @phy_flags: The PHYs flags
682 * @pending_work: The workqueue used to reset the dma ring
683 */
684struct mtk_mac {
685 int id;
686 struct device_node *of_node;
687 struct mtk_eth *hw;
688 struct mtk_hw_stats *hw_stats;
689 struct phy_device *phy_dev;
690 u32 phy_flags;
691 struct work_struct pending_work;
692};
693
694/* the struct describing the SoC. these are declared in the soc_xyz.c files */
695extern const struct of_device_id of_mtk_match[];
696
697/* read the hardware status register */
698void mtk_stats_update_mac(struct mtk_mac *mac);
699
700/* default checksum setup handler */
701void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
702
703/* register i/o wrappers */
704void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
705u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
706
707/* default clock calibration handler */
708int mtk_set_clock_cycle(struct mtk_eth *eth);
709
710/* default checksum setup handler */
711void mtk_csum_config(struct mtk_eth *eth);
712
713/* default forward config handler */
714void mtk_fwd_config(struct mtk_eth *eth);
715
716#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644
index 5d63b5d96f6b..000000000000
--- a/drivers/staging/mt7621-eth/soc_mt7621.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/if_vlan.h>
18#include <linux/of_net.h>
19
20#include <asm/mach-ralink/ralink_regs.h>
21
22#include "mtk_eth_soc.h"
23#include "gsw_mt7620.h"
24#include "mdio.h"
25
26#define MT7620_CDMA_CSG_CFG 0x400
27#define MT7621_CDMP_IG_CTRL (MT7620_CDMA_CSG_CFG + 0x00)
28#define MT7621_CDMP_EG_CTRL (MT7620_CDMA_CSG_CFG + 0x04)
29#define MT7621_RESET_FE BIT(6)
30#define MT7621_L4_VALID BIT(24)
31
32#define MT7621_TX_DMA_UDF BIT(19)
33
34#define CDMA_ICS_EN BIT(2)
35#define CDMA_UCS_EN BIT(1)
36#define CDMA_TCS_EN BIT(0)
37
38#define GDMA_ICS_EN BIT(22)
39#define GDMA_TCS_EN BIT(21)
40#define GDMA_UCS_EN BIT(20)
41
42/* frame engine counters */
43#define MT7621_REG_MIB_OFFSET 0x2000
44#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00)
45#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400)
46#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40)
47
48#define GSW_REG_GDMA1_MAC_ADRL 0x508
49#define GSW_REG_GDMA1_MAC_ADRH 0x50C
50#define GSW_REG_GDMA2_MAC_ADRL 0x1508
51#define GSW_REG_GDMA2_MAC_ADRH 0x150C
52
53#define MT7621_MTK_RST_GL 0x04
54#define MT7620_MTK_INT_STATUS2 0x08
55
56/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
57 * but after test it should be BIT(13).
58 */
59#define MT7621_MTK_GDM1_AF BIT(28)
60#define MT7621_MTK_GDM2_AF BIT(29)
61
62static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
63 [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
64 [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
65 [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
66 [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
67 [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
68 [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
69 [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
70 [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
71 [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
72 [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
73 [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
74 [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
75 [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
76 [MTK_REG_MTK_DMA_VID_BASE] = 0,
77 [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
78 [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
79 [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
80};
81
82static void mt7621_mtk_reset(struct mtk_eth *eth)
83{
84 mtk_reset(eth, MT7621_RESET_FE);
85}
86
87static int mt7621_fwd_config(struct mtk_eth *eth)
88{
89 /* Setup GMAC1 only, there is no support for GMAC2 yet */
90 mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
91 MT7620_GDMA1_FWD_CFG);
92
93 /* Enable RX checksum */
94 mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
95 GDMA_TCS_EN | GDMA_UCS_EN),
96 MT7620_GDMA1_FWD_CFG);
97
98 /* Enable RX VLan Offloading */
99 mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
100
101 return 0;
102}
103
104static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
105{
106 unsigned long flags;
107
108 spin_lock_irqsave(&mac->hw->page_lock, flags);
109 if (mac->id == 0) {
110 mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
111 GSW_REG_GDMA1_MAC_ADRH);
112 mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
113 (hwaddr[4] << 8) | hwaddr[5],
114 GSW_REG_GDMA1_MAC_ADRL);
115 }
116 if (mac->id == 1) {
117 mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
118 GSW_REG_GDMA2_MAC_ADRH);
119 mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
120 (hwaddr[4] << 8) | hwaddr[5],
121 GSW_REG_GDMA2_MAC_ADRL);
122 }
123 spin_unlock_irqrestore(&mac->hw->page_lock, flags);
124}
125
126static struct mtk_soc_data mt7621_data = {
127 .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
128 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
129 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
130 NETIF_F_IPV6_CSUM,
131 .dma_type = MTK_PDMA,
132 .dma_ring_size = 256,
133 .napi_weight = 64,
134 .new_stats = 1,
135 .padding_64b = 1,
136 .rx_2b_offset = 1,
137 .rx_sg_dma = 1,
138 .has_switch = 1,
139 .mac_count = 2,
140 .reset_fe = mt7621_mtk_reset,
141 .set_mac = mt7621_set_mac,
142 .fwd_config = mt7621_fwd_config,
143 .switch_init = mtk_gsw_init,
144 .reg_table = mt7621_reg_table,
145 .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
146 .rx_int = RT5350_RX_DONE_INT,
147 .tx_int = RT5350_TX_DONE_INT,
148 .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
149 .checksum_bit = MT7621_L4_VALID,
150 .has_carrier = mt7620_has_carrier,
151 .mdio_read = mt7620_mdio_read,
152 .mdio_write = mt7620_mdio_write,
153 .mdio_adjust_link = mt7620_mdio_link_adjust,
154};
155
156const struct of_device_id of_mtk_match[] = {
157 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
158 {},
159};
160
161MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index d33533872a16..c8fa17cfa807 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,6 +1,7 @@
1config PCI_MT7621 1config PCI_MT7621
2 tristate "MediaTek MT7621 PCI Controller" 2 tristate "MediaTek MT7621 PCI Controller"
3 depends on RALINK 3 depends on RALINK
4 depends on PCI
4 select PCI_DRIVERS_GENERIC 5 select PCI_DRIVERS_GENERIC
5 help 6 help
6 This selects a driver for the MediaTek MT7621 PCI Controller. 7 This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index d6248eecf123..2aee64fdaec5 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
163 goto no_phy; 163 goto no_phy;
164 164
165 phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, 165 phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
166 PHY_INTERFACE_MODE_GMII); 166 priv->phy_mode);
167 of_node_put(phy_node); 167 of_node_put(phy_node);
168 168
169 if (!phydev) 169 if (!phydev)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ce61c5670ef6..986db76705cc 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
653 return np; 653 return np;
654} 654}
655 655
656static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port) 656static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
657 int port)
657{ 658{
659 struct device_node *np = priv->of_node;
658 u32 delay_value; 660 u32 delay_value;
661 bool rx_delay;
662 bool tx_delay;
659 663
660 if (!of_property_read_u32(np, "rx-delay", &delay_value)) 664 /* By default, both RX/TX delay is enabled in
665 * __cvmx_helper_rgmii_enable().
666 */
667 rx_delay = true;
668 tx_delay = true;
669
670 if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
661 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value); 671 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
662 if (!of_property_read_u32(np, "tx-delay", &delay_value)) 672 rx_delay = delay_value > 0;
673 }
674 if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
663 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value); 675 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
676 tx_delay = delay_value > 0;
677 }
678
679 if (!rx_delay && !tx_delay)
680 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
681 else if (!rx_delay)
682 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
683 else if (!tx_delay)
684 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
685 else
686 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
664} 687}
665 688
666static int cvm_oct_probe(struct platform_device *pdev) 689static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
825 priv->port = port; 848 priv->port = port;
826 priv->queue = cvmx_pko_get_base_queue(priv->port); 849 priv->queue = cvmx_pko_get_base_queue(priv->port);
827 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; 850 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
851 priv->phy_mode = PHY_INTERFACE_MODE_NA;
828 for (qos = 0; qos < 16; qos++) 852 for (qos = 0; qos < 16; qos++)
829 skb_queue_head_init(&priv->tx_free_list[qos]); 853 skb_queue_head_init(&priv->tx_free_list[qos]);
830 for (qos = 0; qos < cvmx_pko_get_num_queues(port); 854 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
856 break; 880 break;
857 881
858 case CVMX_HELPER_INTERFACE_MODE_SGMII: 882 case CVMX_HELPER_INTERFACE_MODE_SGMII:
883 priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
859 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; 884 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
860 strcpy(dev->name, "eth%d"); 885 strcpy(dev->name, "eth%d");
861 break; 886 break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
865 strcpy(dev->name, "spi%d"); 890 strcpy(dev->name, "spi%d");
866 break; 891 break;
867 892
868 case CVMX_HELPER_INTERFACE_MODE_RGMII:
869 case CVMX_HELPER_INTERFACE_MODE_GMII: 893 case CVMX_HELPER_INTERFACE_MODE_GMII:
894 priv->phy_mode = PHY_INTERFACE_MODE_GMII;
895 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
896 strcpy(dev->name, "eth%d");
897 break;
898
899 case CVMX_HELPER_INTERFACE_MODE_RGMII:
870 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; 900 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
871 strcpy(dev->name, "eth%d"); 901 strcpy(dev->name, "eth%d");
872 cvm_set_rgmii_delay(priv->of_node, interface, 902 cvm_set_rgmii_delay(priv, interface,
873 port_index); 903 port_index);
874 break; 904 break;
875 } 905 }
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 4a07e7f43d12..be570d33685a 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -12,7 +12,7 @@
12#define OCTEON_ETHERNET_H 12#define OCTEON_ETHERNET_H
13 13
14#include <linux/of.h> 14#include <linux/of.h>
15 15#include <linux/phy.h>
16#include <asm/octeon/cvmx-helper-board.h> 16#include <asm/octeon/cvmx-helper-board.h>
17 17
18/** 18/**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
33 * cvmx_helper_interface_mode_t 33 * cvmx_helper_interface_mode_t
34 */ 34 */
35 int imode; 35 int imode;
36 /* PHY mode */
37 phy_interface_t phy_mode;
36 /* List of outstanding tx buffers per queue */ 38 /* List of outstanding tx buffers per queue */
37 struct sk_buff_head tx_free_list[16]; 39 struct sk_buff_head tx_free_list[16];
38 unsigned int last_speed; 40 unsigned int last_speed;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 80b8d4153414..a54286498a47 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
45{ 45{
46 unsigned char lob; 46 unsigned char lob;
47 int ret, i; 47 int ret, i;
48 struct dcon_gpio *pin = &gpios_asis[0]; 48 const struct dcon_gpio *pin = &gpios_asis[0];
49 49
50 for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) { 50 for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
51 gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name, 51 gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 1723a47a96b4..952f2ab51347 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
174 174
175 pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; 175 pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
176 176
177 rtw_alloc_hwxmits(padapter); 177 res = rtw_alloc_hwxmits(padapter);
178 if (res == _FAIL)
179 goto exit;
178 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); 180 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
179 181
180 for (i = 0; i < 4; i++) 182 for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ exit:
1503 return res; 1505 return res;
1504} 1506}
1505 1507
1506void rtw_alloc_hwxmits(struct adapter *padapter) 1508s32 rtw_alloc_hwxmits(struct adapter *padapter)
1507{ 1509{
1508 struct hw_xmit *hwxmits; 1510 struct hw_xmit *hwxmits;
1509 struct xmit_priv *pxmitpriv = &padapter->xmitpriv; 1511 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1512 1514
1513 pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, 1515 pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
1514 sizeof(struct hw_xmit), GFP_KERNEL); 1516 sizeof(struct hw_xmit), GFP_KERNEL);
1517 if (!pxmitpriv->hwxmits)
1518 return _FAIL;
1515 1519
1516 hwxmits = pxmitpriv->hwxmits; 1520 hwxmits = pxmitpriv->hwxmits;
1517 1521
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1519 hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; 1523 hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
1520 hwxmits[2] .sta_queue = &pxmitpriv->be_pending; 1524 hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
1521 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; 1525 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
1526 return _SUCCESS;
1522} 1527}
1523 1528
1524void rtw_free_hwxmits(struct adapter *padapter) 1529void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 788f59c74ea1..ba7e15fbde72 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
336void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); 336void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
337s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); 337s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
338void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); 338void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
339void rtw_alloc_hwxmits(struct adapter *padapter); 339s32 rtw_alloc_hwxmits(struct adapter *padapter);
340void rtw_free_hwxmits(struct adapter *padapter); 340void rtw_free_hwxmits(struct adapter *padapter);
341s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); 341s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
342 342
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 1920d02f7c9f..8c36acedf507 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
147 147
148static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) 148static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
149{ 149{
150 u32 val;
151 void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
152 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; 150 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
153 151
154 if (pcmd->rsp && pcmd->rspsz > 0) 152 r8712_free_cmd_obj(pcmd);
155 memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
156 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
157 if (!pcmd_callback)
158 r8712_free_cmd_obj(pcmd);
159 else
160 pcmd_callback(padapter, pcmd);
161 return H2C_SUCCESS; 153 return H2C_SUCCESS;
162} 154}
163 155
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 92fb77666d44..1ef86b8c592f 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
140static struct _cmd_callback cmd_callback[] = { 140static struct _cmd_callback cmd_callback[] = {
141 {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ 141 {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
142 {GEN_CMD_CODE(_Write_MACREG), NULL}, 142 {GEN_CMD_CODE(_Write_MACREG), NULL},
143 {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback}, 143 {GEN_CMD_CODE(_Read_BBREG), NULL},
144 {GEN_CMD_CODE(_Write_BBREG), NULL}, 144 {GEN_CMD_CODE(_Write_BBREG), NULL},
145 {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback}, 145 {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
146 {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ 146 {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 094d61bcb469..b87f13a0b563 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
260 } 260 }
261 } 261 }
262 262
263 rtw_alloc_hwxmits(padapter); 263 res = rtw_alloc_hwxmits(padapter);
264 if (res == _FAIL)
265 goto exit;
264 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); 266 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
265 267
266 for (i = 0; i < 4; i++) { 268 for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ exit:
2144 return res; 2146 return res;
2145} 2147}
2146 2148
2147void rtw_alloc_hwxmits(struct adapter *padapter) 2149s32 rtw_alloc_hwxmits(struct adapter *padapter)
2148{ 2150{
2149 struct hw_xmit *hwxmits; 2151 struct hw_xmit *hwxmits;
2150 struct xmit_priv *pxmitpriv = &padapter->xmitpriv; 2152 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
2155 2157
2156 pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry); 2158 pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
2157 2159
2158 if (pxmitpriv->hwxmits == NULL) { 2160 if (!pxmitpriv->hwxmits)
2159 DBG_871X("alloc hwxmits fail!...\n"); 2161 return _FAIL;
2160 return;
2161 }
2162 2162
2163 hwxmits = pxmitpriv->hwxmits; 2163 hwxmits = pxmitpriv->hwxmits;
2164 2164
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
2204 2204
2205 } 2205 }
2206 2206
2207 2207 return _SUCCESS;
2208} 2208}
2209 2209
2210void rtw_free_hwxmits(struct adapter *padapter) 2210void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 1b38b9182b31..37f42b2f22f1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
487void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); 487void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
488 488
489 489
490void rtw_alloc_hwxmits(struct adapter *padapter); 490s32 rtw_alloc_hwxmits(struct adapter *padapter);
491void rtw_free_hwxmits(struct adapter *padapter); 491void rtw_free_hwxmits(struct adapter *padapter);
492 492
493 493
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 9930ed954abb..4cc77b2016e1 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
180 180
181 rtlpriv->phydm.internal = 181 rtlpriv->phydm.internal =
182 kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL); 182 kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
183 if (!rtlpriv->phydm.internal)
184 return 0;
183 185
184 _rtl_phydm_init_com_info(rtlpriv, ic, params); 186 _rtl_phydm_init_com_info(rtlpriv, ic, params);
185 187
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index f061dd1382aa..cf6b7a80b753 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
743 u1_rsvd_page_loc, 3); 743 u1_rsvd_page_loc, 3);
744 744
745 skb = dev_alloc_skb(totalpacketlen); 745 skb = dev_alloc_skb(totalpacketlen);
746 if (!skb)
747 return;
746 memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, 748 memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
747 totalpacketlen); 749 totalpacketlen);
748 750
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index edff6ce85655..9d85a3a1af4c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
210 return -EINVAL; 210 return -EINVAL;
211 211
212 spin_lock_irqsave(&speakup_info.spinlock, flags); 212 spin_lock_irqsave(&speakup_info.spinlock, flags);
213 synth_soft.alive = 1;
213 while (1) { 214 while (1) {
214 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); 215 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
215 if (!unicode) 216 if (synth_current() == &synth_soft) {
216 synth_buffer_skip_nonlatin1(); 217 if (!unicode)
217 if (!synth_buffer_empty() || speakup_info.flushing) 218 synth_buffer_skip_nonlatin1();
218 break; 219 if (!synth_buffer_empty() || speakup_info.flushing)
220 break;
221 }
219 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 222 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
220 if (fp->f_flags & O_NONBLOCK) { 223 if (fp->f_flags & O_NONBLOCK) {
221 finish_wait(&speakup_event, &wait); 224 finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
235 238
236 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ 239 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
237 while (chars_sent <= count - bytes_per_ch) { 240 while (chars_sent <= count - bytes_per_ch) {
241 if (synth_current() != &synth_soft)
242 break;
238 if (speakup_info.flushing) { 243 if (speakup_info.flushing) {
239 speakup_info.flushing = 0; 244 speakup_info.flushing = 0;
240 ch = '\x18'; 245 ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
331 poll_wait(fp, &speakup_event, wait); 336 poll_wait(fp, &speakup_event, wait);
332 337
333 spin_lock_irqsave(&speakup_info.spinlock, flags); 338 spin_lock_irqsave(&speakup_info.spinlock, flags);
334 if (!synth_buffer_empty() || speakup_info.flushing) 339 if (synth_current() == &synth_soft &&
340 (!synth_buffer_empty() || speakup_info.flushing))
335 ret = EPOLLIN | EPOLLRDNORM; 341 ret = EPOLLIN | EPOLLRDNORM;
336 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 342 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
337 return ret; 343 return ret;
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index c8e688878fc7..ac6a74883af4 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
74int synth_release_region(unsigned long start, unsigned long n); 74int synth_release_region(unsigned long start, unsigned long n);
75int synth_add(struct spk_synth *in_synth); 75int synth_add(struct spk_synth *in_synth);
76void synth_remove(struct spk_synth *in_synth); 76void synth_remove(struct spk_synth *in_synth);
77struct spk_synth *synth_current(void);
77 78
78extern struct speakup_info_t speakup_info; 79extern struct speakup_info_t speakup_info;
79 80
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 25f259ee4ffc..3568bfb89912 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
481} 481}
482EXPORT_SYMBOL_GPL(synth_remove); 482EXPORT_SYMBOL_GPL(synth_remove);
483 483
484struct spk_synth *synth_current(void)
485{
486 return synth;
487}
488EXPORT_SYMBOL_GPL(synth_current);
489
484short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM }; 490short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 804daf83be35..064d0db4c51e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
3513 struct device_node *fw_node; 3513 struct device_node *fw_node;
3514 const struct of_device_id *of_id; 3514 const struct of_device_id *of_id;
3515 struct vchiq_drvdata *drvdata; 3515 struct vchiq_drvdata *drvdata;
3516 struct device *vchiq_dev;
3516 int err; 3517 int err;
3517 3518
3518 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); 3519 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
3547 goto failed_platform_init; 3548 goto failed_platform_init;
3548 } 3549 }
3549 3550
3550 if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid, 3551 vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
3551 NULL, "vchiq"))) 3552 "vchiq");
3553 if (IS_ERR(vchiq_dev)) {
3554 err = PTR_ERR(vchiq_dev);
3552 goto failed_device_create; 3555 goto failed_device_create;
3556 }
3553 3557
3554 vchiq_debugfs_init(); 3558 vchiq_debugfs_init();
3555 3559
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b370985b58a1..c6bb4aaf9bd0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
1033 return; 1033 return;
1034 } 1034 }
1035 1035
1036 MACvIntDisable(priv->PortOffset);
1037
1038 spin_lock_irqsave(&priv->lock, flags); 1036 spin_lock_irqsave(&priv->lock, flags);
1039 1037
1040 /* Read low level stats */ 1038 /* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
1122 } 1120 }
1123 1121
1124 spin_unlock_irqrestore(&priv->lock, flags); 1122 spin_unlock_irqrestore(&priv->lock, flags);
1125
1126 MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1127} 1123}
1128 1124
1129static void vnt_interrupt_work(struct work_struct *work) 1125static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
1133 1129
1134 if (priv->vif) 1130 if (priv->vif)
1135 vnt_interrupt_process(priv); 1131 vnt_interrupt_process(priv);
1132
1133 MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1136} 1134}
1137 1135
1138static irqreturn_t vnt_interrupt(int irq, void *arg) 1136static irqreturn_t vnt_interrupt(int irq, void *arg)
1139{ 1137{
1140 struct vnt_private *priv = arg; 1138 struct vnt_private *priv = arg;
1141 1139
1142 if (priv->vif) 1140 schedule_work(&priv->interrupt_work);
1143 schedule_work(&priv->interrupt_work); 1141
1142 MACvIntDisable(priv->PortOffset);
1144 1143
1145 return IRQ_HANDLED; 1144 return IRQ_HANDLED;
1146} 1145}
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index db5df3d54818..3bdd56a1021b 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
49 struct clk *clk; 49 struct clk *clk;
50}; 50};
51 51
52static inline bool ar933x_uart_console_enabled(void)
53{
54 return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
55}
56
57static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, 52static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
58 int offset) 53 int offset)
59{ 54{
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
508 .verify_port = ar933x_uart_verify_port, 503 .verify_port = ar933x_uart_verify_port,
509}; 504};
510 505
506#ifdef CONFIG_SERIAL_AR933X_CONSOLE
511static struct ar933x_uart_port * 507static struct ar933x_uart_port *
512ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; 508ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
513 509
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
604 .index = -1, 600 .index = -1,
605 .data = &ar933x_uart_driver, 601 .data = &ar933x_uart_driver,
606}; 602};
607 603#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
608static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
609{
610 if (!ar933x_uart_console_enabled())
611 return;
612
613 ar933x_console_ports[up->port.line] = up;
614}
615 604
616static struct uart_driver ar933x_uart_driver = { 605static struct uart_driver ar933x_uart_driver = {
617 .owner = THIS_MODULE, 606 .owner = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
700 baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); 689 baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
701 up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); 690 up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
702 691
703 ar933x_uart_add_console_port(up); 692#ifdef CONFIG_SERIAL_AR933X_CONSOLE
693 ar933x_console_ports[up->port.line] = up;
694#endif
704 695
705 ret = uart_add_one_port(&ar933x_uart_driver, &up->port); 696 ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
706 if (ret) 697 if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
749{ 740{
750 int ret; 741 int ret;
751 742
752 if (ar933x_uart_console_enabled()) 743#ifdef CONFIG_SERIAL_AR933X_CONSOLE
753 ar933x_uart_driver.cons = &ar933x_uart_console; 744 ar933x_uart_driver.cons = &ar933x_uart_console;
745#endif
754 746
755 ret = uart_register_driver(&ar933x_uart_driver); 747 ret = uart_register_driver(&ar933x_uart_driver);
756 if (ret) 748 if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 05147fe24343..0b4f36905321 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,6 +166,8 @@ struct atmel_uart_port {
166 unsigned int pending_status; 166 unsigned int pending_status;
167 spinlock_t lock_suspended; 167 spinlock_t lock_suspended;
168 168
169 bool hd_start_rx; /* can start RX during half-duplex operation */
170
169 /* ISO7816 */ 171 /* ISO7816 */
170 unsigned int fidi_min; 172 unsigned int fidi_min;
171 unsigned int fidi_max; 173 unsigned int fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
231 __raw_writeb(value, port->membase + ATMEL_US_THR); 233 __raw_writeb(value, port->membase + ATMEL_US_THR);
232} 234}
233 235
236static inline int atmel_uart_is_half_duplex(struct uart_port *port)
237{
238 return ((port->rs485.flags & SER_RS485_ENABLED) &&
239 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
240 (port->iso7816.flags & SER_ISO7816_ENABLED);
241}
242
234#ifdef CONFIG_SERIAL_ATMEL_PDC 243#ifdef CONFIG_SERIAL_ATMEL_PDC
235static bool atmel_use_pdc_rx(struct uart_port *port) 244static bool atmel_use_pdc_rx(struct uart_port *port)
236{ 245{
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
608 /* Disable interrupts */ 617 /* Disable interrupts */
609 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 618 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
610 619
611 if (((port->rs485.flags & SER_RS485_ENABLED) && 620 if (atmel_uart_is_half_duplex(port))
612 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
613 port->iso7816.flags & SER_ISO7816_ENABLED)
614 atmel_start_rx(port); 621 atmel_start_rx(port);
622
615} 623}
616 624
617/* 625/*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
628 return; 636 return;
629 637
630 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 638 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
631 if (((port->rs485.flags & SER_RS485_ENABLED) && 639 if (atmel_uart_is_half_duplex(port))
632 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
633 port->iso7816.flags & SER_ISO7816_ENABLED)
634 atmel_stop_rx(port); 640 atmel_stop_rx(port);
635 641
636 if (atmel_use_pdc_tx(port)) 642 if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
928 */ 934 */
929 if (!uart_circ_empty(xmit)) 935 if (!uart_circ_empty(xmit))
930 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 936 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
931 else if (((port->rs485.flags & SER_RS485_ENABLED) && 937 else if (atmel_uart_is_half_duplex(port)) {
932 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 938 /*
933 port->iso7816.flags & SER_ISO7816_ENABLED) { 939 * DMA done, re-enable TXEMPTY and signal that we can stop
934 /* DMA done, stop TX, start RX for RS485 */ 940 * TX and start RX for RS485
935 atmel_start_rx(port); 941 */
942 atmel_port->hd_start_rx = true;
943 atmel_uart_writel(port, ATMEL_US_IER,
944 atmel_port->tx_done_mask);
936 } 945 }
937 946
938 spin_unlock_irqrestore(&port->lock, flags); 947 spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
1288 sg_dma_len(&atmel_port->sg_rx)/2, 1297 sg_dma_len(&atmel_port->sg_rx)/2,
1289 DMA_DEV_TO_MEM, 1298 DMA_DEV_TO_MEM,
1290 DMA_PREP_INTERRUPT); 1299 DMA_PREP_INTERRUPT);
1300 if (!desc) {
1301 dev_err(port->dev, "Preparing DMA cyclic failed\n");
1302 goto chan_err;
1303 }
1291 desc->callback = atmel_complete_rx_dma; 1304 desc->callback = atmel_complete_rx_dma;
1292 desc->callback_param = port; 1305 desc->callback_param = port;
1293 atmel_port->desc_rx = desc; 1306 atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1376 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1389 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1377 1390
1378 if (pending & atmel_port->tx_done_mask) { 1391 if (pending & atmel_port->tx_done_mask) {
1379 /* Either PDC or interrupt transmission */
1380 atmel_uart_writel(port, ATMEL_US_IDR, 1392 atmel_uart_writel(port, ATMEL_US_IDR,
1381 atmel_port->tx_done_mask); 1393 atmel_port->tx_done_mask);
1394
1395 /* Start RX if flag was set and FIFO is empty */
1396 if (atmel_port->hd_start_rx) {
1397 if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1398 & ATMEL_US_TXEMPTY))
1399 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1400
1401 atmel_port->hd_start_rx = false;
1402 atmel_start_rx(port);
1403 return;
1404 }
1405
1382 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1406 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1383 } 1407 }
1384} 1408}
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
1508 atmel_uart_writel(port, ATMEL_US_IER, 1532 atmel_uart_writel(port, ATMEL_US_IER,
1509 atmel_port->tx_done_mask); 1533 atmel_port->tx_done_mask);
1510 } else { 1534 } else {
1511 if (((port->rs485.flags & SER_RS485_ENABLED) && 1535 if (atmel_uart_is_half_duplex(port)) {
1512 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
1513 port->iso7816.flags & SER_ISO7816_ENABLED) {
1514 /* DMA done, stop TX, start RX for RS485 */ 1536 /* DMA done, stop TX, start RX for RS485 */
1515 atmel_start_rx(port); 1537 atmel_start_rx(port);
1516 } 1538 }
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 6fb312e7af71..bfe5e9e034ec 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
148 char *cptr = config; 148 char *cptr = config;
149 struct console *cons; 149 struct console *cons;
150 150
151 if (!strlen(config) || isspace(config[0])) 151 if (!strlen(config) || isspace(config[0])) {
152 err = 0;
152 goto noconfig; 153 goto noconfig;
154 }
153 155
154 kgdboc_io_ops.is_console = 0; 156 kgdboc_io_ops.is_console = 0;
155 kgdb_tty_driver = NULL; 157 kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f5bdde405627..450ba6d7996c 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
1415 if (spi->dev.of_node) { 1415 if (spi->dev.of_node) {
1416 const struct of_device_id *of_id = 1416 const struct of_device_id *of_id =
1417 of_match_device(max310x_dt_ids, &spi->dev); 1417 of_match_device(max310x_dt_ids, &spi->dev);
1418 if (!of_id)
1419 return -ENODEV;
1418 1420
1419 devtype = (struct max310x_devtype *)of_id->data; 1421 devtype = (struct max310x_devtype *)of_id->data;
1420 } else { 1422 } else {
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231f751d1ef4..7e7b1559fa36 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
810 return -EINVAL; 810 return -EINVAL;
811 } 811 }
812 812
813 if (!match)
814 return -ENODEV;
815
813 /* Assume that all UART ports have a DT alias or none has */ 816 /* Assume that all UART ports have a DT alias or none has */
814 id = of_alias_get_id(pdev->dev.of_node, "serial"); 817 id = of_alias_get_id(pdev->dev.of_node, "serial");
815 if (!pdev->dev.of_node || id < 0) 818 if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 27235a526cce..4c188f4079b3 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
1686 1686
1687 s->port.mapbase = r->start; 1687 s->port.mapbase = r->start;
1688 s->port.membase = ioremap(r->start, resource_size(r)); 1688 s->port.membase = ioremap(r->start, resource_size(r));
1689 if (!s->port.membase) {
1690 ret = -ENOMEM;
1691 goto out_disable_clks;
1692 }
1689 s->port.ops = &mxs_auart_ops; 1693 s->port.ops = &mxs_auart_ops;
1690 s->port.iotype = UPIO_MEM; 1694 s->port.iotype = UPIO_MEM;
1691 s->port.fifosize = MXS_AUART_FIFO_SIZE; 1695 s->port.fifosize = MXS_AUART_FIFO_SIZE;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 3bcec1c20219..35e5f9c5d5be 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
1050{ 1050{
1051 struct uart_port *uport; 1051 struct uart_port *uport;
1052 struct qcom_geni_serial_port *port; 1052 struct qcom_geni_serial_port *port;
1053 int baud; 1053 int baud = 9600;
1054 int bits = 8; 1054 int bits = 8;
1055 int parity = 'n'; 1055 int parity = 'n';
1056 int flow = 'n'; 1056 int flow = 'n';
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 635178cf3eed..09a183dfc526 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
1507 ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); 1507 ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
1508 if (ret < 0) { 1508 if (ret < 0) {
1509 pr_err("failed to init sc16is7xx i2c --> %d\n", ret); 1509 pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
1510 return ret; 1510 goto err_i2c;
1511 } 1511 }
1512#endif 1512#endif
1513 1513
@@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void)
1515 ret = spi_register_driver(&sc16is7xx_spi_uart_driver); 1515 ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
1516 if (ret < 0) { 1516 if (ret < 0) {
1517 pr_err("failed to init sc16is7xx spi --> %d\n", ret); 1517 pr_err("failed to init sc16is7xx spi --> %d\n", ret);
1518 return ret; 1518 goto err_spi;
1519 } 1519 }
1520#endif 1520#endif
1521 return ret; 1521 return ret;
1522
1523err_spi:
1524#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
1525 i2c_del_driver(&sc16is7xx_i2c_uart_driver);
1526#endif
1527err_i2c:
1528 uart_unregister_driver(&sc16is7xx_uart);
1529 return ret;
1522} 1530}
1523module_init(sc16is7xx_init); 1531module_init(sc16is7xx_init);
1524 1532
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 060fcd42b6d5..2d1c626312cd 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
838 838
839 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 839 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
840 uart_write_wakeup(port); 840 uart_write_wakeup(port);
841 if (uart_circ_empty(xmit)) { 841 if (uart_circ_empty(xmit))
842 sci_stop_tx(port); 842 sci_stop_tx(port);
843 } else {
844 ctrl = serial_port_in(port, SCSCR);
845
846 if (port->type != PORT_SCI) {
847 serial_port_in(port, SCxSR); /* Dummy read */
848 sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
849 }
850 843
851 ctrl |= SCSCR_TIE;
852 serial_port_out(port, SCSCR, ctrl);
853 }
854} 844}
855 845
856/* On SH3, SCIF may read end-of-break as a space->mark char */ 846/* On SH3, SCIF may read end-of-break as a space->mark char */
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 044c3cbdcfa4..a9e12b3bc31d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
325 if (tty && C_HUPCL(tty)) 325 if (tty && C_HUPCL(tty))
326 tty_port_lower_dtr_rts(port); 326 tty_port_lower_dtr_rts(port);
327 327
328 if (port->ops->shutdown) 328 if (port->ops && port->ops->shutdown)
329 port->ops->shutdown(port); 329 port->ops->shutdown(port);
330 } 330 }
331out: 331out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
398 */ 398 */
399int tty_port_carrier_raised(struct tty_port *port) 399int tty_port_carrier_raised(struct tty_port *port)
400{ 400{
401 if (port->ops->carrier_raised == NULL) 401 if (!port->ops || !port->ops->carrier_raised)
402 return 1; 402 return 1;
403 return port->ops->carrier_raised(port); 403 return port->ops->carrier_raised(port);
404} 404}
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
414 */ 414 */
415void tty_port_raise_dtr_rts(struct tty_port *port) 415void tty_port_raise_dtr_rts(struct tty_port *port)
416{ 416{
417 if (port->ops->dtr_rts) 417 if (port->ops && port->ops->dtr_rts)
418 port->ops->dtr_rts(port, 1); 418 port->ops->dtr_rts(port, 1);
419} 419}
420EXPORT_SYMBOL(tty_port_raise_dtr_rts); 420EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
429 */ 429 */
430void tty_port_lower_dtr_rts(struct tty_port *port) 430void tty_port_lower_dtr_rts(struct tty_port *port)
431{ 431{
432 if (port->ops->dtr_rts) 432 if (port->ops && port->ops->dtr_rts)
433 port->ops->dtr_rts(port, 0); 433 port->ops->dtr_rts(port, 0);
434} 434}
435EXPORT_SYMBOL(tty_port_lower_dtr_rts); 435EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
684 684
685 if (!tty_port_initialized(port)) { 685 if (!tty_port_initialized(port)) {
686 clear_bit(TTY_IO_ERROR, &tty->flags); 686 clear_bit(TTY_IO_ERROR, &tty->flags);
687 if (port->ops->activate) { 687 if (port->ops && port->ops->activate) {
688 int retval = port->ops->activate(port, tty); 688 int retval = port->ops->activate(port, tty);
689 if (retval) { 689 if (retval) {
690 mutex_unlock(&port->mutex); 690 mutex_unlock(&port->mutex);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 739f8960811a..ec666eb4b7b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
558 clear_bit(EVENT_RX_STALL, &acm->flags); 558 clear_bit(EVENT_RX_STALL, &acm->flags);
559 } 559 }
560 560
561 if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { 561 if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
562 tty_port_tty_wakeup(&acm->port); 562 tty_port_tty_wakeup(&acm->port);
563 clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
564 }
565} 563}
566 564
567/* 565/*
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 48277bbc15e4..73c8e6591746 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
145 145
146 do { 146 do {
147 controller = of_find_node_with_property(controller, "phys"); 147 controller = of_find_node_with_property(controller, "phys");
148 if (!of_device_is_available(controller))
149 continue;
148 index = 0; 150 index = 0;
149 do { 151 do {
150 if (arg0 == -1) { 152 if (arg0 == -1) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3189181bb628..975d7c1288e3 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
2742 retval = usb_phy_roothub_set_mode(hcd->phy_roothub, 2742 retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
2743 PHY_MODE_USB_HOST_SS); 2743 PHY_MODE_USB_HOST_SS);
2744 if (retval) 2744 if (retval)
2745 retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
2746 PHY_MODE_USB_HOST);
2747 if (retval)
2745 goto err_usb_phy_roothub_power_on; 2748 goto err_usb_phy_roothub_power_on;
2746 2749
2747 retval = usb_phy_roothub_power_on(hcd->phy_roothub); 2750 retval = usb_phy_roothub_power_on(hcd->phy_roothub);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index fdc6e4e403e8..8cced3609e24 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
29#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa 29#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
30#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 30#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
31#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 31#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
32#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee
32#define PCI_DEVICE_ID_INTEL_GLK 0x31aa 33#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
33#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee 34#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
34#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e 35#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
305 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), 306 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
306 (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, 307 (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
307 308
309 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
310 (kernel_ulong_t) &dwc3_pci_intel_properties, },
311
308 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), 312 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
309 (kernel_ulong_t) &dwc3_pci_intel_properties, }, 313 (kernel_ulong_t) &dwc3_pci_intel_properties, },
310 314
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 75b113a5b25c..f3816a5c861e 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -391,20 +391,20 @@ try_again:
391 req->complete = f_hidg_req_complete; 391 req->complete = f_hidg_req_complete;
392 req->context = hidg; 392 req->context = hidg;
393 393
394 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
395
394 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); 396 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
395 if (status < 0) { 397 if (status < 0) {
396 ERROR(hidg->func.config->cdev, 398 ERROR(hidg->func.config->cdev,
397 "usb_ep_queue error on int endpoint %zd\n", status); 399 "usb_ep_queue error on int endpoint %zd\n", status);
398 goto release_write_pending_unlocked; 400 goto release_write_pending;
399 } else { 401 } else {
400 status = count; 402 status = count;
401 } 403 }
402 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
403 404
404 return status; 405 return status;
405release_write_pending: 406release_write_pending:
406 spin_lock_irqsave(&hidg->write_spinlock, flags); 407 spin_lock_irqsave(&hidg->write_spinlock, flags);
407release_write_pending_unlocked:
408 hidg->write_pending = 0; 408 hidg->write_pending = 0;
409 spin_unlock_irqrestore(&hidg->write_spinlock, flags); 409 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
410 410
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index b77f3126580e..c2011cd7df8c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
945 break; 945 break;
946 } 946 }
947 if (&req->req != _req) { 947 if (&req->req != _req) {
948 ep->stopped = stopped;
948 spin_unlock_irqrestore(&ep->dev->lock, flags); 949 spin_unlock_irqrestore(&ep->dev->lock, flags);
949 return -EINVAL; 950 return -EINVAL;
950 } 951 }
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f63f82450bf4..898339e5df10 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
866 (void) readl(&ep->dev->pci->pcimstctl); 866 (void) readl(&ep->dev->pci->pcimstctl);
867 867
868 writel(BIT(DMA_START), &dma->dmastat); 868 writel(BIT(DMA_START), &dma->dmastat);
869
870 if (!ep->is_in)
871 stop_out_naking(ep);
872} 869}
873 870
874static void start_dma(struct net2280_ep *ep, struct net2280_request *req) 871static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
907 writel(BIT(DMA_START), &dma->dmastat); 904 writel(BIT(DMA_START), &dma->dmastat);
908 return; 905 return;
909 } 906 }
907 stop_out_naking(ep);
910 } 908 }
911 909
912 tmp = dmactl_default; 910 tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1275 break; 1273 break;
1276 } 1274 }
1277 if (&req->req != _req) { 1275 if (&req->req != _req) {
1276 ep->stopped = stopped;
1278 spin_unlock_irqrestore(&ep->dev->lock, flags); 1277 spin_unlock_irqrestore(&ep->dev->lock, flags);
1279 dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", 1278 ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
1280 __func__);
1281 return -EINVAL; 1279 return -EINVAL;
1282 } 1280 }
1283 1281
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 934584f0a20a..6343fbacd244 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
3204 printk(KERN_INFO "driver %s\n", hcd_name); 3204 printk(KERN_INFO "driver %s\n", hcd_name);
3205 workqueue = create_singlethread_workqueue("u132"); 3205 workqueue = create_singlethread_workqueue("u132");
3206 retval = platform_driver_register(&u132_platform_driver); 3206 retval = platform_driver_register(&u132_platform_driver);
3207 if (retval)
3208 destroy_workqueue(workqueue);
3209
3207 return retval; 3210 return retval;
3208} 3211}
3209 3212
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index c78be578abb0..d932cc31711e 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
516 return -1; 516 return -1;
517 517
518 writel(0, &dbc->regs->control); 518 writel(0, &dbc->regs->control);
519 xhci_dbc_mem_cleanup(xhci);
520 dbc->state = DS_DISABLED; 519 dbc->state = DS_DISABLED;
521 520
522 return 0; 521 return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
562 ret = xhci_do_dbc_stop(xhci); 561 ret = xhci_do_dbc_stop(xhci);
563 spin_unlock_irqrestore(&dbc->lock, flags); 562 spin_unlock_irqrestore(&dbc->lock, flags);
564 563
565 if (!ret) 564 if (!ret) {
565 xhci_dbc_mem_cleanup(xhci);
566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
567 }
567} 568}
568 569
569static void 570static void
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e2eece693655..96a740543183 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1545 port_index = max_ports; 1545 port_index = max_ports;
1546 while (port_index--) { 1546 while (port_index--) {
1547 u32 t1, t2; 1547 u32 t1, t2;
1548 1548 int retries = 10;
1549retry:
1549 t1 = readl(ports[port_index]->addr); 1550 t1 = readl(ports[port_index]->addr);
1550 t2 = xhci_port_state_to_neutral(t1); 1551 t2 = xhci_port_state_to_neutral(t1);
1551 portsc_buf[port_index] = 0; 1552 portsc_buf[port_index] = 0;
1552 1553
1553 /* Bail out if a USB3 port has a new device in link training */ 1554 /*
1554 if ((hcd->speed >= HCD_USB3) && 1555 * Give a USB3 port in link training time to finish, but don't
1556 * prevent suspend as port might be stuck
1557 */
1558 if ((hcd->speed >= HCD_USB3) && retries-- &&
1555 (t1 & PORT_PLS_MASK) == XDEV_POLLING) { 1559 (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1556 bus_state->bus_suspended = 0;
1557 spin_unlock_irqrestore(&xhci->lock, flags); 1560 spin_unlock_irqrestore(&xhci->lock, flags);
1558 xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); 1561 msleep(XHCI_PORT_POLLING_LFPS_TIME);
1559 return -EBUSY; 1562 spin_lock_irqsave(&xhci->lock, flags);
1563 xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
1564 port_index);
1565 goto retry;
1560 } 1566 }
1561
1562 /* suspend ports in U0, or bail out for new connect changes */ 1567 /* suspend ports in U0, or bail out for new connect changes */
1563 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { 1568 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
1564 if ((t1 & PORT_CSC) && wake_enabled) { 1569 if ((t1 & PORT_CSC) && wake_enabled) {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index a6e463715779..671bce18782c 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
246 if (!xhci_rcar_wait_for_pll_active(hcd)) 246 if (!xhci_rcar_wait_for_pll_active(hcd))
247 return -ETIMEDOUT; 247 return -ETIMEDOUT;
248 248
249 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
249 return xhci_rcar_download_firmware(hcd); 250 return xhci_rcar_download_firmware(hcd);
250} 251}
251 252
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 40fa25c4d041..9215a28dad40 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
1647 } 1647 }
1648 } 1648 }
1649 1649
1650 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 && 1650 if ((portsc & PORT_PLC) &&
1651 DEV_SUPERSPEED_ANY(portsc)) { 1651 DEV_SUPERSPEED_ANY(portsc) &&
1652 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
1653 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
1654 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
1652 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1655 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1653 /* We've just brought the device into U0 through either the 1656 /* We've just brought the device into U0/1/2 through either the
1654 * Resume state after a device remote wakeup, or through the 1657 * Resume state after a device remote wakeup, or through the
1655 * U3Exit state after a host-initiated resume. If it's a device 1658 * U3Exit state after a host-initiated resume. If it's a device
1656 * initiated remote wake, don't pass up the link state change, 1659 * initiated remote wake, don't pass up the link state change,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 652dc36e3012..9334cdee382a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -452,6 +452,14 @@ struct xhci_op_regs {
452 */ 452 */
453#define XHCI_DEFAULT_BESL 4 453#define XHCI_DEFAULT_BESL 4
454 454
455/*
456 * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
457 * to complete link training. usually link trainig completes much faster
458 * so check status 10 times with 36ms sleep in places we need to wait for
459 * polling to complete.
460 */
461#define XHCI_PORT_POLLING_LFPS_TIME 36
462
455/** 463/**
456 * struct xhci_intr_reg - Interrupt Register Set 464 * struct xhci_intr_reg - Interrupt Register Set
457 * @irq_pending: IMAN - Interrupt Management Register. Used to enable 465 * @irq_pending: IMAN - Interrupt Management Register. Used to enable
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d72b7d1d383..04684849d683 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
547 */ 547 */
548 hub->port_swap = USB251XB_DEF_PORT_SWAP; 548 hub->port_swap = USB251XB_DEF_PORT_SWAP;
549 of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) { 549 of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
550 if ((port >= 0) && (port <= data->port_cnt)) 550 if (port <= data->port_cnt)
551 hub->port_swap |= BIT(port); 551 hub->port_swap |= BIT(port);
552 } 552 }
553 553
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
612 dev); 612 dev);
613 int err; 613 int err;
614 614
615 if (np) { 615 if (np && of_id) {
616 err = usb251xb_get_ofdata(hub, 616 err = usb251xb_get_ofdata(hub,
617 (struct usb251xb_data *)of_id->data); 617 (struct usb251xb_data *)of_id->data);
618 if (err) { 618 if (err) {
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index bcc23486c4ed..928c2cd6fc00 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -6,6 +6,7 @@ config USB_MTU3
6 tristate "MediaTek USB3 Dual Role controller" 6 tristate "MediaTek USB3 Dual Role controller"
7 depends on USB || USB_GADGET 7 depends on USB || USB_GADGET
8 depends on ARCH_MEDIATEK || COMPILE_TEST 8 depends on ARCH_MEDIATEK || COMPILE_TEST
9 depends on EXTCON || !EXTCON
9 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD 10 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
10 help 11 help
11 Say Y or M here if your system runs on MediaTek SoCs with 12 Say Y or M here if your system runs on MediaTek SoCs with
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fffe23ab0189..979bef9bfb6b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
80 { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ 80 { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
81 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ 81 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
82 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ 82 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
83 { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
83 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ 84 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
84 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ 85 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
85 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ 86 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8f5b17471759..1d8461ae2c34 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
609 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 609 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
610 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), 610 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
611 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 611 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
612 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
613 { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
612 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, 614 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
613 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, 615 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
614 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, 616 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index b863bedb55a1..5755f0df0025 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -567,7 +567,9 @@
567/* 567/*
568 * NovaTech product ids (FTDI_VID) 568 * NovaTech product ids (FTDI_VID)
569 */ 569 */
570#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ 570#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
571#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
572#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
571 573
572/* 574/*
573 * Synapse Wireless product ids (FTDI_VID) 575 * Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index fc52ac75fbf6..18110225d506 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
366 if (!urbtrack) 366 if (!urbtrack)
367 return -ENOMEM; 367 return -ENOMEM;
368 368
369 kref_get(&mos_parport->ref_count);
370 urbtrack->mos_parport = mos_parport;
371 urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC); 369 urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
372 if (!urbtrack->urb) { 370 if (!urbtrack->urb) {
373 kfree(urbtrack); 371 kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
388 usb_sndctrlpipe(usbdev, 0), 386 usb_sndctrlpipe(usbdev, 0),
389 (unsigned char *)urbtrack->setup, 387 (unsigned char *)urbtrack->setup,
390 NULL, 0, async_complete, urbtrack); 388 NULL, 0, async_complete, urbtrack);
389 kref_get(&mos_parport->ref_count);
390 urbtrack->mos_parport = mos_parport;
391 kref_init(&urbtrack->ref_count); 391 kref_init(&urbtrack->ref_count);
392 INIT_LIST_HEAD(&urbtrack->urblist_entry); 392 INIT_LIST_HEAD(&urbtrack->urblist_entry);
393 393
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 11b21d9410f3..83869065b802 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
246#define QUECTEL_PRODUCT_EC25 0x0125 246#define QUECTEL_PRODUCT_EC25 0x0125
247#define QUECTEL_PRODUCT_BG96 0x0296 247#define QUECTEL_PRODUCT_BG96 0x0296
248#define QUECTEL_PRODUCT_EP06 0x0306 248#define QUECTEL_PRODUCT_EP06 0x0306
249#define QUECTEL_PRODUCT_EM12 0x0512
249 250
250#define CMOTECH_VENDOR_ID 0x16d8 251#define CMOTECH_VENDOR_ID 0x16d8
251#define CMOTECH_PRODUCT_6001 0x6001 252#define CMOTECH_PRODUCT_6001 0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
1066 .driver_info = RSVD(3) }, 1067 .driver_info = RSVD(3) },
1067 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1068 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1068 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1069 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1069 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1070 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
1071 .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
1070 /* Quectel products using Qualcomm vendor ID */ 1072 /* Quectel products using Qualcomm vendor ID */
1071 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, 1073 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1072 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), 1074 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
1087 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), 1089 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
1088 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, 1090 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1089 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, 1091 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
1092 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
1093 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1094 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
1090 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1095 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1091 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1096 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1092 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1097 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
1940 .driver_info = RSVD(4) }, 1945 .driver_info = RSVD(4) },
1941 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 1946 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1942 .driver_info = RSVD(4) }, 1947 .driver_info = RSVD(4) },
1943 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1948 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1944 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1949 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1945 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 1950 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1946 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1951 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1952 .driver_info = RSVD(4) },
1953 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1947 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1954 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1948 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1955 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1949 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, 1956 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 0f62db091d8d..a2233d72ae7c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -37,6 +37,7 @@
37 S(SRC_ATTACHED), \ 37 S(SRC_ATTACHED), \
38 S(SRC_STARTUP), \ 38 S(SRC_STARTUP), \
39 S(SRC_SEND_CAPABILITIES), \ 39 S(SRC_SEND_CAPABILITIES), \
40 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
40 S(SRC_NEGOTIATE_CAPABILITIES), \ 41 S(SRC_NEGOTIATE_CAPABILITIES), \
41 S(SRC_TRANSITION_SUPPLY), \ 42 S(SRC_TRANSITION_SUPPLY), \
42 S(SRC_READY), \ 43 S(SRC_READY), \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
2966 /* port->hard_reset_count = 0; */ 2967 /* port->hard_reset_count = 0; */
2967 port->caps_count = 0; 2968 port->caps_count = 0;
2968 port->pd_capable = true; 2969 port->pd_capable = true;
2969 tcpm_set_state_cond(port, hard_reset_state(port), 2970 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
2970 PD_T_SEND_SOURCE_CAP); 2971 PD_T_SEND_SOURCE_CAP);
2971 } 2972 }
2972 break; 2973 break;
2974 case SRC_SEND_CAPABILITIES_TIMEOUT:
2975 /*
2976 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
2977 *
2978 * PD 2.0 sinks are supposed to accept src-capabilities with a
2979 * 3.0 header and simply ignore any src PDOs which the sink does
2980 * not understand such as PPS but some 2.0 sinks instead ignore
2981 * the entire PD_DATA_SOURCE_CAP message, causing contract
2982 * negotiation to fail.
2983 *
2984 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
2985 * sending src-capabilities with a lower PD revision to
2986 * make these broken sinks work.
2987 */
2988 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
2989 tcpm_set_state(port, HARD_RESET_SEND, 0);
2990 } else if (port->negotiated_rev > PD_REV20) {
2991 port->negotiated_rev--;
2992 port->hard_reset_count = 0;
2993 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2994 } else {
2995 tcpm_set_state(port, hard_reset_state(port), 0);
2996 }
2997 break;
2973 case SRC_NEGOTIATE_CAPABILITIES: 2998 case SRC_NEGOTIATE_CAPABILITIES:
2974 ret = tcpm_pd_check_request(port); 2999 ret = tcpm_pd_check_request(port);
2975 if (ret < 0) { 3000 if (ret < 0) {
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..6770afd40765 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
615 wcove->dev = &pdev->dev; 615 wcove->dev = &pdev->dev;
616 wcove->regmap = pmic->regmap; 616 wcove->regmap = pmic->regmap;
617 617
618 irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, 618 irq = platform_get_irq(pdev, 0);
619 platform_get_irq(pdev, 0)); 619 if (irq < 0) {
620 dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
621 return irq;
622 }
623
624 irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
620 if (irq < 0) 625 if (irq < 0)
621 return irq; 626 return irq;
622 627
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index df7d09409efe..8ca333f21292 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -27,6 +27,10 @@
27 27
28#define GUEST_MAPPINGS_TRIES 5 28#define GUEST_MAPPINGS_TRIES 5
29 29
30#define VBG_KERNEL_REQUEST \
31 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
32 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
33
30/** 34/**
31 * Reserves memory in which the VMM can relocate any guest mappings 35 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around. 36 * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
48 int i, rc; 52 int i, rc;
49 53
50 /* Query the required space. */ 54 /* Query the required space. */
51 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); 55 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
56 VBG_KERNEL_REQUEST);
52 if (!req) 57 if (!req)
53 return; 58 return;
54 59
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
135 * Tell the host that we're going to free the memory we reserved for 140 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.) 141 * it, the free it up. (Leak the memory if anything goes wrong here.)
137 */ 142 */
138 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); 143 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
144 VBG_KERNEL_REQUEST);
139 if (!req) 145 if (!req)
140 return; 146 return;
141 147
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
172 struct vmmdev_guest_info2 *req2 = NULL; 178 struct vmmdev_guest_info2 *req2 = NULL;
173 int rc, ret = -ENOMEM; 179 int rc, ret = -ENOMEM;
174 180
175 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); 181 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
176 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); 182 VBG_KERNEL_REQUEST);
183 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
184 VBG_KERNEL_REQUEST);
177 if (!req1 || !req2) 185 if (!req1 || !req2)
178 goto out_free; 186 goto out_free;
179 187
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
187 req2->additions_minor = VBG_VERSION_MINOR; 195 req2->additions_minor = VBG_VERSION_MINOR;
188 req2->additions_build = VBG_VERSION_BUILD; 196 req2->additions_build = VBG_VERSION_BUILD;
189 req2->additions_revision = VBG_SVN_REV; 197 req2->additions_revision = VBG_SVN_REV;
190 /* (no features defined yet) */ 198 req2->additions_features =
191 req2->additions_features = 0; 199 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
192 strlcpy(req2->name, VBG_VERSION_STRING, 200 strlcpy(req2->name, VBG_VERSION_STRING,
193 sizeof(req2->name)); 201 sizeof(req2->name));
194 202
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
230 struct vmmdev_guest_status *req; 238 struct vmmdev_guest_status *req;
231 int rc; 239 int rc;
232 240
233 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); 241 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
242 VBG_KERNEL_REQUEST);
234 if (!req) 243 if (!req)
235 return -ENOMEM; 244 return -ENOMEM;
236 245
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
423 struct vmmdev_heartbeat *req; 432 struct vmmdev_heartbeat *req;
424 int rc; 433 int rc;
425 434
426 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); 435 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
436 VBG_KERNEL_REQUEST);
427 if (!req) 437 if (!req)
428 return -ENOMEM; 438 return -ENOMEM;
429 439
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
457 467
458 gdev->guest_heartbeat_req = vbg_req_alloc( 468 gdev->guest_heartbeat_req = vbg_req_alloc(
459 sizeof(*gdev->guest_heartbeat_req), 469 sizeof(*gdev->guest_heartbeat_req),
460 VMMDEVREQ_GUEST_HEARTBEAT); 470 VMMDEVREQ_GUEST_HEARTBEAT,
471 VBG_KERNEL_REQUEST);
461 if (!gdev->guest_heartbeat_req) 472 if (!gdev->guest_heartbeat_req)
462 return -ENOMEM; 473 return -ENOMEM;
463 474
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
528 struct vmmdev_mask *req; 539 struct vmmdev_mask *req;
529 int rc; 540 int rc;
530 541
531 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 542 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
543 VBG_KERNEL_REQUEST);
532 if (!req) 544 if (!req)
533 return -ENOMEM; 545 return -ENOMEM;
534 546
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
567 u32 changed, previous; 579 u32 changed, previous;
568 int rc, ret = 0; 580 int rc, ret = 0;
569 581
570 /* Allocate a request buffer before taking the spinlock */ 582 /*
571 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 583 * Allocate a request buffer before taking the spinlock, when
584 * the session is being terminated the requestor is the kernel,
585 * as we're cleaning up.
586 */
587 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
588 session_termination ? VBG_KERNEL_REQUEST :
589 session->requestor);
572 if (!req) { 590 if (!req) {
573 if (!session_termination) 591 if (!session_termination)
574 return -ENOMEM; 592 return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
627 struct vmmdev_mask *req; 645 struct vmmdev_mask *req;
628 int rc; 646 int rc;
629 647
630 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 648 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
649 VBG_KERNEL_REQUEST);
631 if (!req) 650 if (!req)
632 return -ENOMEM; 651 return -ENOMEM;
633 652
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
662 u32 changed, previous; 681 u32 changed, previous;
663 int rc, ret = 0; 682 int rc, ret = 0;
664 683
665 /* Allocate a request buffer before taking the spinlock */ 684 /*
666 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 685 * Allocate a request buffer before taking the spinlock, when
686 * the session is being terminated the requestor is the kernel,
687 * as we're cleaning up.
688 */
689 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
690 session_termination ? VBG_KERNEL_REQUEST :
691 session->requestor);
667 if (!req) { 692 if (!req) {
668 if (!session_termination) 693 if (!session_termination)
669 return -ENOMEM; 694 return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
722 struct vmmdev_host_version *req; 747 struct vmmdev_host_version *req;
723 int rc, ret; 748 int rc, ret;
724 749
725 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); 750 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
751 VBG_KERNEL_REQUEST);
726 if (!req) 752 if (!req)
727 return -ENOMEM; 753 return -ENOMEM;
728 754
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
783 809
784 gdev->mem_balloon.get_req = 810 gdev->mem_balloon.get_req =
785 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), 811 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
786 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); 812 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
813 VBG_KERNEL_REQUEST);
787 gdev->mem_balloon.change_req = 814 gdev->mem_balloon.change_req =
788 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), 815 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
789 VMMDEVREQ_CHANGE_MEMBALLOON); 816 VMMDEVREQ_CHANGE_MEMBALLOON,
817 VBG_KERNEL_REQUEST);
790 gdev->cancel_req = 818 gdev->cancel_req =
791 vbg_req_alloc(sizeof(*(gdev->cancel_req)), 819 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
792 VMMDEVREQ_HGCM_CANCEL2); 820 VMMDEVREQ_HGCM_CANCEL2,
821 VBG_KERNEL_REQUEST);
793 gdev->ack_events_req = 822 gdev->ack_events_req =
794 vbg_req_alloc(sizeof(*gdev->ack_events_req), 823 vbg_req_alloc(sizeof(*gdev->ack_events_req),
795 VMMDEVREQ_ACKNOWLEDGE_EVENTS); 824 VMMDEVREQ_ACKNOWLEDGE_EVENTS,
825 VBG_KERNEL_REQUEST);
796 gdev->mouse_status_req = 826 gdev->mouse_status_req =
797 vbg_req_alloc(sizeof(*gdev->mouse_status_req), 827 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
798 VMMDEVREQ_GET_MOUSE_STATUS); 828 VMMDEVREQ_GET_MOUSE_STATUS,
829 VBG_KERNEL_REQUEST);
799 830
800 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || 831 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
801 !gdev->cancel_req || !gdev->ack_events_req || 832 !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
892 * vboxguest_linux.c calls this when userspace opens the char-device. 923 * vboxguest_linux.c calls this when userspace opens the char-device.
893 * Return: A pointer to the new session or an ERR_PTR on error. 924 * Return: A pointer to the new session or an ERR_PTR on error.
894 * @gdev: The Guest extension device. 925 * @gdev: The Guest extension device.
895 * @user: Set if this is a session for the vboxuser device. 926 * @requestor: VMMDEV_REQUESTOR_* flags
896 */ 927 */
897struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) 928struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
898{ 929{
899 struct vbg_session *session; 930 struct vbg_session *session;
900 931
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
903 return ERR_PTR(-ENOMEM); 934 return ERR_PTR(-ENOMEM);
904 935
905 session->gdev = gdev; 936 session->gdev = gdev;
906 session->user_session = user; 937 session->requestor = requestor;
907 938
908 return session; 939 return session;
909} 940}
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
924 if (!session->hgcm_client_ids[i]) 955 if (!session->hgcm_client_ids[i])
925 continue; 956 continue;
926 957
927 vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); 958 /* requestor is kernel here, as we're cleaning up. */
959 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
960 session->hgcm_client_ids[i], &rc);
928 } 961 }
929 962
930 kfree(session); 963 kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1152 return -EPERM; 1185 return -EPERM;
1153 } 1186 }
1154 1187
1155 if (trusted_apps_only && session->user_session) { 1188 if (trusted_apps_only &&
1189 (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1156 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", 1190 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1157 req->request_type); 1191 req->request_type);
1158 return -EPERM; 1192 return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1209 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1243 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1210 return -EMFILE; 1244 return -EMFILE;
1211 1245
1212 ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, 1246 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1213 &conn->hdr.rc); 1247 &client_id, &conn->hdr.rc);
1214 1248
1215 mutex_lock(&gdev->session_mutex); 1249 mutex_lock(&gdev->session_mutex);
1216 if (ret == 0 && conn->hdr.rc >= 0) { 1250 if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1251 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1285 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1252 return -EINVAL; 1286 return -EINVAL;
1253 1287
1254 ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); 1288 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1289 &disconn->hdr.rc);
1255 1290
1256 mutex_lock(&gdev->session_mutex); 1291 mutex_lock(&gdev->session_mutex);
1257 if (ret == 0 && disconn->hdr.rc >= 0) 1292 if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1313 } 1348 }
1314 1349
1315 if (IS_ENABLED(CONFIG_COMPAT) && f32bit) 1350 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1316 ret = vbg_hgcm_call32(gdev, client_id, 1351 ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1317 call->function, call->timeout_ms, 1352 call->function, call->timeout_ms,
1318 VBG_IOCTL_HGCM_CALL_PARMS32(call), 1353 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1319 call->parm_count, &call->hdr.rc); 1354 call->parm_count, &call->hdr.rc);
1320 else 1355 else
1321 ret = vbg_hgcm_call(gdev, client_id, 1356 ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1322 call->function, call->timeout_ms, 1357 call->function, call->timeout_ms,
1323 VBG_IOCTL_HGCM_CALL_PARMS(call), 1358 VBG_IOCTL_HGCM_CALL_PARMS(call),
1324 call->parm_count, &call->hdr.rc); 1359 call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1408} 1443}
1409 1444
1410static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, 1445static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1446 struct vbg_session *session,
1411 struct vbg_ioctl_write_coredump *dump) 1447 struct vbg_ioctl_write_coredump *dump)
1412{ 1448{
1413 struct vmmdev_write_core_dump *req; 1449 struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1415 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) 1451 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1416 return -EINVAL; 1452 return -EINVAL;
1417 1453
1418 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); 1454 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1455 session->requestor);
1419 if (!req) 1456 if (!req)
1420 return -ENOMEM; 1457 return -ENOMEM;
1421 1458
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1476 case VBG_IOCTL_CHECK_BALLOON: 1513 case VBG_IOCTL_CHECK_BALLOON:
1477 return vbg_ioctl_check_balloon(gdev, data); 1514 return vbg_ioctl_check_balloon(gdev, data);
1478 case VBG_IOCTL_WRITE_CORE_DUMP: 1515 case VBG_IOCTL_WRITE_CORE_DUMP:
1479 return vbg_ioctl_write_core_dump(gdev, data); 1516 return vbg_ioctl_write_core_dump(gdev, session, data);
1480 } 1517 }
1481 1518
1482 /* Variable sized requests. */ 1519 /* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1508 struct vmmdev_mouse_status *req; 1545 struct vmmdev_mouse_status *req;
1509 int rc; 1546 int rc;
1510 1547
1511 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); 1548 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1549 VBG_KERNEL_REQUEST);
1512 if (!req) 1550 if (!req)
1513 return -ENOMEM; 1551 return -ENOMEM;
1514 1552
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec45bfa9..4188c12b839f 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -154,15 +154,15 @@ struct vbg_session {
154 * host. Protected by vbg_gdev.session_mutex. 154 * host. Protected by vbg_gdev.session_mutex.
155 */ 155 */
156 u32 guest_caps; 156 u32 guest_caps;
157 /** Does this session belong to a root process or a user one? */ 157 /** VMMDEV_REQUESTOR_* flags */
158 bool user_session; 158 u32 requestor;
159 /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ 159 /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
160 bool cancel_waiters; 160 bool cancel_waiters;
161}; 161};
162 162
163int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); 163int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
164void vbg_core_exit(struct vbg_dev *gdev); 164void vbg_core_exit(struct vbg_dev *gdev);
165struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user); 165struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
166void vbg_core_close_session(struct vbg_session *session); 166void vbg_core_close_session(struct vbg_session *session);
167int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); 167int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
168int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); 168int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
172void vbg_linux_mouse_event(struct vbg_dev *gdev); 172void vbg_linux_mouse_event(struct vbg_dev *gdev);
173 173
174/* Private (non exported) functions form vboxguest_utils.c */ 174/* Private (non exported) functions form vboxguest_utils.c */
175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); 175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
176 u32 requestor);
176void vbg_req_free(void *req, size_t len); 177void vbg_req_free(void *req, size_t len);
177int vbg_req_perform(struct vbg_dev *gdev, void *req); 178int vbg_req_perform(struct vbg_dev *gdev, void *req);
178int vbg_hgcm_call32( 179int vbg_hgcm_call32(
179 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 180 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
180 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 181 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
181 int *vbox_status); 182 u32 parm_count, int *vbox_status);
182 183
183#endif 184#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a9619192d..6e8c0f1c1056 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2006-2016 Oracle Corporation 5 * Copyright (C) 2006-2016 Oracle Corporation
6 */ 6 */
7 7
8#include <linux/cred.h>
8#include <linux/input.h> 9#include <linux/input.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/miscdevice.h> 11#include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
28/** Global vbg_gdev pointer used by vbg_get/put_gdev. */ 29/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
29static struct vbg_dev *vbg_gdev; 30static struct vbg_dev *vbg_gdev;
30 31
32static u32 vbg_misc_device_requestor(struct inode *inode)
33{
34 u32 requestor = VMMDEV_REQUESTOR_USERMODE |
35 VMMDEV_REQUESTOR_CON_DONT_KNOW |
36 VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
37
38 if (from_kuid(current_user_ns(), current->cred->uid) == 0)
39 requestor |= VMMDEV_REQUESTOR_USR_ROOT;
40 else
41 requestor |= VMMDEV_REQUESTOR_USR_USER;
42
43 if (in_egroup_p(inode->i_gid))
44 requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
45
46 return requestor;
47}
48
31static int vbg_misc_device_open(struct inode *inode, struct file *filp) 49static int vbg_misc_device_open(struct inode *inode, struct file *filp)
32{ 50{
33 struct vbg_session *session; 51 struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
36 /* misc_open sets filp->private_data to our misc device */ 54 /* misc_open sets filp->private_data to our misc device */
37 gdev = container_of(filp->private_data, struct vbg_dev, misc_device); 55 gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
38 56
39 session = vbg_core_open_session(gdev, false); 57 session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
40 if (IS_ERR(session)) 58 if (IS_ERR(session))
41 return PTR_ERR(session); 59 return PTR_ERR(session);
42 60
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
53 gdev = container_of(filp->private_data, struct vbg_dev, 71 gdev = container_of(filp->private_data, struct vbg_dev,
54 misc_device_user); 72 misc_device_user);
55 73
56 session = vbg_core_open_session(gdev, false); 74 session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
75 VMMDEV_REQUESTOR_USER_DEVICE);
57 if (IS_ERR(session)) 76 if (IS_ERR(session))
58 return PTR_ERR(session); 77 return PTR_ERR(session);
59 78
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
115 req == VBG_IOCTL_VMMDEV_REQUEST_BIG; 134 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
116 135
117 if (is_vmmdev_req) 136 if (is_vmmdev_req)
118 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); 137 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
138 session->requestor);
119 else 139 else
120 buf = kmalloc(size, GFP_KERNEL); 140 buf = kmalloc(size, GFP_KERNEL);
121 if (!buf) 141 if (!buf)
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index bf4474214b4d..75fd140b02ff 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
62VBG_LOG(vbg_debug, pr_debug); 62VBG_LOG(vbg_debug, pr_debug);
63#endif 63#endif
64 64
65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) 65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
66 u32 requestor)
66{ 67{
67 struct vmmdev_request_header *req; 68 struct vmmdev_request_header *req;
68 int order = get_order(PAGE_ALIGN(len)); 69 int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
78 req->request_type = req_type; 79 req->request_type = req_type;
79 req->rc = VERR_GENERAL_FAILURE; 80 req->rc = VERR_GENERAL_FAILURE;
80 req->reserved1 = 0; 81 req->reserved1 = 0;
81 req->reserved2 = 0; 82 req->requestor = requestor;
82 83
83 return req; 84 return req;
84} 85}
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
119 return done; 120 return done;
120} 121}
121 122
122int vbg_hgcm_connect(struct vbg_dev *gdev, 123int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
123 struct vmmdev_hgcm_service_location *loc, 124 struct vmmdev_hgcm_service_location *loc,
124 u32 *client_id, int *vbox_status) 125 u32 *client_id, int *vbox_status)
125{ 126{
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
127 int rc; 128 int rc;
128 129
129 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), 130 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
130 VMMDEVREQ_HGCM_CONNECT); 131 VMMDEVREQ_HGCM_CONNECT, requestor);
131 if (!hgcm_connect) 132 if (!hgcm_connect)
132 return -ENOMEM; 133 return -ENOMEM;
133 134
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
153} 154}
154EXPORT_SYMBOL(vbg_hgcm_connect); 155EXPORT_SYMBOL(vbg_hgcm_connect);
155 156
156int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) 157int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
158 u32 client_id, int *vbox_status)
157{ 159{
158 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; 160 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
159 int rc; 161 int rc;
160 162
161 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), 163 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
162 VMMDEVREQ_HGCM_DISCONNECT); 164 VMMDEVREQ_HGCM_DISCONNECT,
165 requestor);
163 if (!hgcm_disconnect) 166 if (!hgcm_disconnect)
164 return -ENOMEM; 167 return -ENOMEM;
165 168
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
593 return 0; 596 return 0;
594} 597}
595 598
596int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, 599int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
597 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 600 u32 function, u32 timeout_ms,
598 u32 parm_count, int *vbox_status) 601 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
602 int *vbox_status)
599{ 603{
600 struct vmmdev_hgcm_call *call; 604 struct vmmdev_hgcm_call *call;
601 void **bounce_bufs = NULL; 605 void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
615 goto free_bounce_bufs; 619 goto free_bounce_bufs;
616 } 620 }
617 621
618 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL); 622 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
619 if (!call) { 623 if (!call) {
620 ret = -ENOMEM; 624 ret = -ENOMEM;
621 goto free_bounce_bufs; 625 goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
647 651
648#ifdef CONFIG_COMPAT 652#ifdef CONFIG_COMPAT
649int vbg_hgcm_call32( 653int vbg_hgcm_call32(
650 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 654 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
651 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 655 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
652 int *vbox_status) 656 u32 parm_count, int *vbox_status)
653{ 657{
654 struct vmmdev_hgcm_function_parameter *parm64 = NULL; 658 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
655 u32 i, size; 659 u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
689 goto out_free; 693 goto out_free;
690 } 694 }
691 695
692 ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms, 696 ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
693 parm64, parm_count, vbox_status); 697 parm64, parm_count, vbox_status);
694 if (ret < 0) 698 if (ret < 0)
695 goto out_free; 699 goto out_free;
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
index 77f0c8f8a231..84834dad38d5 100644
--- a/drivers/virt/vboxguest/vboxguest_version.h
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -9,11 +9,10 @@
9#ifndef __VBOX_VERSION_H__ 9#ifndef __VBOX_VERSION_H__
10#define __VBOX_VERSION_H__ 10#define __VBOX_VERSION_H__
11 11
12/* Last synced October 4th 2017 */ 12#define VBG_VERSION_MAJOR 6
13#define VBG_VERSION_MAJOR 5 13#define VBG_VERSION_MINOR 0
14#define VBG_VERSION_MINOR 2
15#define VBG_VERSION_BUILD 0 14#define VBG_VERSION_BUILD 0
16#define VBG_SVN_REV 68940 15#define VBG_SVN_REV 127566
17#define VBG_VERSION_STRING "5.2.0" 16#define VBG_VERSION_STRING "6.0.0"
18 17
19#endif 18#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae978935d..6337b8d75d96 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
98 s32 rc; 98 s32 rc;
99 /** Reserved field no.1. MBZ. */ 99 /** Reserved field no.1. MBZ. */
100 u32 reserved1; 100 u32 reserved1;
101 /** Reserved field no.2. MBZ. */ 101 /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
102 u32 reserved2; 102 u32 requestor;
103}; 103};
104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24); 104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
105 105
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
247}; 247};
248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8); 248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
249 249
250#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0)
251
250/** struct vmmdev_guestinfo2 - Guest information report, version 2. */ 252/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
251struct vmmdev_guest_info2 { 253struct vmmdev_guest_info2 {
252 /** Header. */ 254 /** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
259 u32 additions_build; 261 u32 additions_build;
260 /** SVN revision. */ 262 /** SVN revision. */
261 u32 additions_revision; 263 u32 additions_revision;
262 /** Feature mask, currently unused. */ 264 /** Feature mask. */
263 u32 additions_features; 265 u32 additions_features;
264 /** 266 /**
265 * The intentional meaning of this field was: 267 * The intentional meaning of this field was:
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index ca08c83168f5..0b37867b5c20 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
1515 1515
1516 xdr_encode_AFS_StoreStatus(&bp, attr); 1516 xdr_encode_AFS_StoreStatus(&bp, attr);
1517 1517
1518 *bp++ = 0; /* position of start of write */ 1518 *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */
1519 *bp++ = 0; 1519 *bp++ = htonl((u32) attr->ia_size);
1520 *bp++ = 0; /* size of write */ 1520 *bp++ = 0; /* size of write */
1521 *bp++ = 0; 1521 *bp++ = 0;
1522 *bp++ = htonl(attr->ia_size >> 32); /* new file length */ 1522 *bp++ = htonl(attr->ia_size >> 32); /* new file length */
@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
1564 1564
1565 xdr_encode_AFS_StoreStatus(&bp, attr); 1565 xdr_encode_AFS_StoreStatus(&bp, attr);
1566 1566
1567 *bp++ = 0; /* position of start of write */ 1567 *bp++ = htonl(attr->ia_size); /* position of start of write */
1568 *bp++ = 0; /* size of write */ 1568 *bp++ = 0; /* size of write */
1569 *bp++ = htonl(attr->ia_size); /* new file length */ 1569 *bp++ = htonl(attr->ia_size); /* new file length */
1570 1570
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 5aa57929e8c2..6e97a42d24d1 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
1514 bp = xdr_encode_u32(bp, 0); /* RPC flags */ 1514 bp = xdr_encode_u32(bp, 0); /* RPC flags */
1515 bp = xdr_encode_YFSFid(bp, &vnode->fid); 1515 bp = xdr_encode_YFSFid(bp, &vnode->fid);
1516 bp = xdr_encode_YFS_StoreStatus(bp, attr); 1516 bp = xdr_encode_YFS_StoreStatus(bp, attr);
1517 bp = xdr_encode_u64(bp, 0); /* position of start of write */ 1517 bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
1518 bp = xdr_encode_u64(bp, 0); /* size of write */ 1518 bp = xdr_encode_u64(bp, 0); /* size of write */
1519 bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ 1519 bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
1520 yfs_check_req(call, bp); 1520 yfs_check_req(call, bp);
diff --git a/fs/aio.c b/fs/aio.c
index 38b741aef0bf..a4cc2a1cccb7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,7 +181,7 @@ struct poll_iocb {
181 struct file *file; 181 struct file *file;
182 struct wait_queue_head *head; 182 struct wait_queue_head *head;
183 __poll_t events; 183 __poll_t events;
184 bool woken; 184 bool done;
185 bool cancelled; 185 bool cancelled;
186 struct wait_queue_entry wait; 186 struct wait_queue_entry wait;
187 struct work_struct work; 187 struct work_struct work;
@@ -204,8 +204,7 @@ struct aio_kiocb {
204 struct kioctx *ki_ctx; 204 struct kioctx *ki_ctx;
205 kiocb_cancel_fn *ki_cancel; 205 kiocb_cancel_fn *ki_cancel;
206 206
207 struct iocb __user *ki_user_iocb; /* user's aiocb */ 207 struct io_event ki_res;
208 __u64 ki_user_data; /* user's data for completion */
209 208
210 struct list_head ki_list; /* the aio core uses this 209 struct list_head ki_list; /* the aio core uses this
211 * for cancellation */ 210 * for cancellation */
@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
1022/* aio_get_req 1021/* aio_get_req
1023 * Allocate a slot for an aio request. 1022 * Allocate a slot for an aio request.
1024 * Returns NULL if no requests are free. 1023 * Returns NULL if no requests are free.
1024 *
1025 * The refcount is initialized to 2 - one for the async op completion,
1026 * one for the synchronous code that does this.
1025 */ 1027 */
1026static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1028static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1027{ 1029{
@@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1031 if (unlikely(!req)) 1033 if (unlikely(!req))
1032 return NULL; 1034 return NULL;
1033 1035
1036 if (unlikely(!get_reqs_available(ctx))) {
1037 kfree(req);
1038 return NULL;
1039 }
1040
1034 percpu_ref_get(&ctx->reqs); 1041 percpu_ref_get(&ctx->reqs);
1035 req->ki_ctx = ctx; 1042 req->ki_ctx = ctx;
1036 INIT_LIST_HEAD(&req->ki_list); 1043 INIT_LIST_HEAD(&req->ki_list);
1037 refcount_set(&req->ki_refcnt, 0); 1044 refcount_set(&req->ki_refcnt, 2);
1038 req->ki_eventfd = NULL; 1045 req->ki_eventfd = NULL;
1039 return req; 1046 return req;
1040} 1047}
@@ -1067,30 +1074,20 @@ out:
1067 return ret; 1074 return ret;
1068} 1075}
1069 1076
1070static inline void iocb_put(struct aio_kiocb *iocb) 1077static inline void iocb_destroy(struct aio_kiocb *iocb)
1071{
1072 if (refcount_read(&iocb->ki_refcnt) == 0 ||
1073 refcount_dec_and_test(&iocb->ki_refcnt)) {
1074 if (iocb->ki_filp)
1075 fput(iocb->ki_filp);
1076 percpu_ref_put(&iocb->ki_ctx->reqs);
1077 kmem_cache_free(kiocb_cachep, iocb);
1078 }
1079}
1080
1081static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
1082 long res, long res2)
1083{ 1078{
1084 ev->obj = (u64)(unsigned long)iocb->ki_user_iocb; 1079 if (iocb->ki_eventfd)
1085 ev->data = iocb->ki_user_data; 1080 eventfd_ctx_put(iocb->ki_eventfd);
1086 ev->res = res; 1081 if (iocb->ki_filp)
1087 ev->res2 = res2; 1082 fput(iocb->ki_filp);
1083 percpu_ref_put(&iocb->ki_ctx->reqs);
1084 kmem_cache_free(kiocb_cachep, iocb);
1088} 1085}
1089 1086
1090/* aio_complete 1087/* aio_complete
1091 * Called when the io request on the given iocb is complete. 1088 * Called when the io request on the given iocb is complete.
1092 */ 1089 */
1093static void aio_complete(struct aio_kiocb *iocb, long res, long res2) 1090static void aio_complete(struct aio_kiocb *iocb)
1094{ 1091{
1095 struct kioctx *ctx = iocb->ki_ctx; 1092 struct kioctx *ctx = iocb->ki_ctx;
1096 struct aio_ring *ring; 1093 struct aio_ring *ring;
@@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1114 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1111 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1115 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1112 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1116 1113
1117 aio_fill_event(event, iocb, res, res2); 1114 *event = iocb->ki_res;
1118 1115
1119 kunmap_atomic(ev_page); 1116 kunmap_atomic(ev_page);
1120 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1117 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1121 1118
1122 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", 1119 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1123 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, 1120 (void __user *)(unsigned long)iocb->ki_res.obj,
1124 res, res2); 1121 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1125 1122
1126 /* after flagging the request as done, we 1123 /* after flagging the request as done, we
1127 * must never even look at it again 1124 * must never even look at it again
@@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1148 * eventfd. The eventfd_signal() function is safe to be called 1145 * eventfd. The eventfd_signal() function is safe to be called
1149 * from IRQ context. 1146 * from IRQ context.
1150 */ 1147 */
1151 if (iocb->ki_eventfd) { 1148 if (iocb->ki_eventfd)
1152 eventfd_signal(iocb->ki_eventfd, 1); 1149 eventfd_signal(iocb->ki_eventfd, 1);
1153 eventfd_ctx_put(iocb->ki_eventfd);
1154 }
1155 1150
1156 /* 1151 /*
1157 * We have to order our ring_info tail store above and test 1152 * We have to order our ring_info tail store above and test
@@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1163 1158
1164 if (waitqueue_active(&ctx->wait)) 1159 if (waitqueue_active(&ctx->wait))
1165 wake_up(&ctx->wait); 1160 wake_up(&ctx->wait);
1166 iocb_put(iocb); 1161}
1162
1163static inline void iocb_put(struct aio_kiocb *iocb)
1164{
1165 if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1166 aio_complete(iocb);
1167 iocb_destroy(iocb);
1168 }
1167} 1169}
1168 1170
1169/* aio_read_events_ring 1171/* aio_read_events_ring
@@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
1437 file_end_write(kiocb->ki_filp); 1439 file_end_write(kiocb->ki_filp);
1438 } 1440 }
1439 1441
1440 aio_complete(iocb, res, res2); 1442 iocb->ki_res.res = res;
1443 iocb->ki_res.res2 = res2;
1444 iocb_put(iocb);
1441} 1445}
1442 1446
1443static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) 1447static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
@@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1514 } 1518 }
1515} 1519}
1516 1520
1517static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, 1521static int aio_read(struct kiocb *req, const struct iocb *iocb,
1518 bool vectored, bool compat) 1522 bool vectored, bool compat)
1519{ 1523{
1520 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1524 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1521 struct iov_iter iter; 1525 struct iov_iter iter;
1522 struct file *file; 1526 struct file *file;
1523 ssize_t ret; 1527 int ret;
1524 1528
1525 ret = aio_prep_rw(req, iocb); 1529 ret = aio_prep_rw(req, iocb);
1526 if (ret) 1530 if (ret)
@@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
1542 return ret; 1546 return ret;
1543} 1547}
1544 1548
1545static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, 1549static int aio_write(struct kiocb *req, const struct iocb *iocb,
1546 bool vectored, bool compat) 1550 bool vectored, bool compat)
1547{ 1551{
1548 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1552 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1549 struct iov_iter iter; 1553 struct iov_iter iter;
1550 struct file *file; 1554 struct file *file;
1551 ssize_t ret; 1555 int ret;
1552 1556
1553 ret = aio_prep_rw(req, iocb); 1557 ret = aio_prep_rw(req, iocb);
1554 if (ret) 1558 if (ret)
@@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
1585 1589
1586static void aio_fsync_work(struct work_struct *work) 1590static void aio_fsync_work(struct work_struct *work)
1587{ 1591{
1588 struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); 1592 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1589 int ret;
1590 1593
1591 ret = vfs_fsync(req->file, req->datasync); 1594 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1592 aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); 1595 iocb_put(iocb);
1593} 1596}
1594 1597
1595static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, 1598static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
@@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1608 return 0; 1611 return 0;
1609} 1612}
1610 1613
1611static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
1612{
1613 aio_complete(iocb, mangle_poll(mask), 0);
1614}
1615
1616static void aio_poll_complete_work(struct work_struct *work) 1614static void aio_poll_complete_work(struct work_struct *work)
1617{ 1615{
1618 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1616 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work)
1638 return; 1636 return;
1639 } 1637 }
1640 list_del_init(&iocb->ki_list); 1638 list_del_init(&iocb->ki_list);
1639 iocb->ki_res.res = mangle_poll(mask);
1640 req->done = true;
1641 spin_unlock_irq(&ctx->ctx_lock); 1641 spin_unlock_irq(&ctx->ctx_lock);
1642 1642
1643 aio_poll_complete(iocb, mask); 1643 iocb_put(iocb);
1644} 1644}
1645 1645
1646/* assumes we are called with irqs disabled */ 1646/* assumes we are called with irqs disabled */
@@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1668 __poll_t mask = key_to_poll(key); 1668 __poll_t mask = key_to_poll(key);
1669 unsigned long flags; 1669 unsigned long flags;
1670 1670
1671 req->woken = true;
1672
1673 /* for instances that support it check for an event match first: */ 1671 /* for instances that support it check for an event match first: */
1674 if (mask) { 1672 if (mask && !(mask & req->events))
1675 if (!(mask & req->events)) 1673 return 0;
1676 return 0; 1674
1675 list_del_init(&req->wait.entry);
1677 1676
1677 if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1678 /* 1678 /*
1679 * Try to complete the iocb inline if we can. Use 1679 * Try to complete the iocb inline if we can. Use
1680 * irqsave/irqrestore because not all filesystems (e.g. fuse) 1680 * irqsave/irqrestore because not all filesystems (e.g. fuse)
1681 * call this function with IRQs disabled and because IRQs 1681 * call this function with IRQs disabled and because IRQs
1682 * have to be disabled before ctx_lock is obtained. 1682 * have to be disabled before ctx_lock is obtained.
1683 */ 1683 */
1684 if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1684 list_del(&iocb->ki_list);
1685 list_del(&iocb->ki_list); 1685 iocb->ki_res.res = mangle_poll(mask);
1686 spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); 1686 req->done = true;
1687 1687 spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
1688 list_del_init(&req->wait.entry); 1688 iocb_put(iocb);
1689 aio_poll_complete(iocb, mask); 1689 } else {
1690 return 1; 1690 schedule_work(&req->work);
1691 }
1692 } 1691 }
1693
1694 list_del_init(&req->wait.entry);
1695 schedule_work(&req->work);
1696 return 1; 1692 return 1;
1697} 1693}
1698 1694
@@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1719 add_wait_queue(head, &pt->iocb->poll.wait); 1715 add_wait_queue(head, &pt->iocb->poll.wait);
1720} 1716}
1721 1717
1722static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1718static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1723{ 1719{
1724 struct kioctx *ctx = aiocb->ki_ctx; 1720 struct kioctx *ctx = aiocb->ki_ctx;
1725 struct poll_iocb *req = &aiocb->poll; 1721 struct poll_iocb *req = &aiocb->poll;
1726 struct aio_poll_table apt; 1722 struct aio_poll_table apt;
1723 bool cancel = false;
1727 __poll_t mask; 1724 __poll_t mask;
1728 1725
1729 /* reject any unknown events outside the normal event mask. */ 1726 /* reject any unknown events outside the normal event mask. */
@@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1737 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; 1734 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1738 1735
1739 req->head = NULL; 1736 req->head = NULL;
1740 req->woken = false; 1737 req->done = false;
1741 req->cancelled = false; 1738 req->cancelled = false;
1742 1739
1743 apt.pt._qproc = aio_poll_queue_proc; 1740 apt.pt._qproc = aio_poll_queue_proc;
@@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1749 INIT_LIST_HEAD(&req->wait.entry); 1746 INIT_LIST_HEAD(&req->wait.entry);
1750 init_waitqueue_func_entry(&req->wait, aio_poll_wake); 1747 init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1751 1748
1752 /* one for removal from waitqueue, one for this function */
1753 refcount_set(&aiocb->ki_refcnt, 2);
1754
1755 mask = vfs_poll(req->file, &apt.pt) & req->events; 1749 mask = vfs_poll(req->file, &apt.pt) & req->events;
1756 if (unlikely(!req->head)) {
1757 /* we did not manage to set up a waitqueue, done */
1758 goto out;
1759 }
1760
1761 spin_lock_irq(&ctx->ctx_lock); 1750 spin_lock_irq(&ctx->ctx_lock);
1762 spin_lock(&req->head->lock); 1751 if (likely(req->head)) {
1763 if (req->woken) { 1752 spin_lock(&req->head->lock);
1764 /* wake_up context handles the rest */ 1753 if (unlikely(list_empty(&req->wait.entry))) {
1765 mask = 0; 1754 if (apt.error)
1755 cancel = true;
1756 apt.error = 0;
1757 mask = 0;
1758 }
1759 if (mask || apt.error) {
1760 list_del_init(&req->wait.entry);
1761 } else if (cancel) {
1762 WRITE_ONCE(req->cancelled, true);
1763 } else if (!req->done) { /* actually waiting for an event */
1764 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1765 aiocb->ki_cancel = aio_poll_cancel;
1766 }
1767 spin_unlock(&req->head->lock);
1768 }
1769 if (mask) { /* no async, we'd stolen it */
1770 aiocb->ki_res.res = mangle_poll(mask);
1766 apt.error = 0; 1771 apt.error = 0;
1767 } else if (mask || apt.error) {
1768 /* if we get an error or a mask we are done */
1769 WARN_ON_ONCE(list_empty(&req->wait.entry));
1770 list_del_init(&req->wait.entry);
1771 } else {
1772 /* actually waiting for an event */
1773 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1774 aiocb->ki_cancel = aio_poll_cancel;
1775 } 1772 }
1776 spin_unlock(&req->head->lock);
1777 spin_unlock_irq(&ctx->ctx_lock); 1773 spin_unlock_irq(&ctx->ctx_lock);
1778
1779out:
1780 if (unlikely(apt.error))
1781 return apt.error;
1782
1783 if (mask) 1774 if (mask)
1784 aio_poll_complete(aiocb, mask); 1775 iocb_put(aiocb);
1785 iocb_put(aiocb); 1776 return apt.error;
1786 return 0;
1787} 1777}
1788 1778
1789static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, 1779static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1790 struct iocb __user *user_iocb, bool compat) 1780 struct iocb __user *user_iocb, struct aio_kiocb *req,
1781 bool compat)
1791{ 1782{
1792 struct aio_kiocb *req;
1793 ssize_t ret;
1794
1795 /* enforce forwards compatibility on users */
1796 if (unlikely(iocb->aio_reserved2)) {
1797 pr_debug("EINVAL: reserve field set\n");
1798 return -EINVAL;
1799 }
1800
1801 /* prevent overflows */
1802 if (unlikely(
1803 (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1804 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1805 ((ssize_t)iocb->aio_nbytes < 0)
1806 )) {
1807 pr_debug("EINVAL: overflow check\n");
1808 return -EINVAL;
1809 }
1810
1811 if (!get_reqs_available(ctx))
1812 return -EAGAIN;
1813
1814 ret = -EAGAIN;
1815 req = aio_get_req(ctx);
1816 if (unlikely(!req))
1817 goto out_put_reqs_available;
1818
1819 req->ki_filp = fget(iocb->aio_fildes); 1783 req->ki_filp = fget(iocb->aio_fildes);
1820 ret = -EBADF;
1821 if (unlikely(!req->ki_filp)) 1784 if (unlikely(!req->ki_filp))
1822 goto out_put_req; 1785 return -EBADF;
1823 1786
1824 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1787 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1788 struct eventfd_ctx *eventfd;
1825 /* 1789 /*
1826 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1790 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1827 * instance of the file* now. The file descriptor must be 1791 * instance of the file* now. The file descriptor must be
1828 * an eventfd() fd, and will be signaled for each completed 1792 * an eventfd() fd, and will be signaled for each completed
1829 * event using the eventfd_signal() function. 1793 * event using the eventfd_signal() function.
1830 */ 1794 */
1831 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1795 eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1832 if (IS_ERR(req->ki_eventfd)) { 1796 if (IS_ERR(eventfd))
1833 ret = PTR_ERR(req->ki_eventfd); 1797 return PTR_ERR(req->ki_eventfd);
1834 req->ki_eventfd = NULL; 1798
1835 goto out_put_req; 1799 req->ki_eventfd = eventfd;
1836 }
1837 } 1800 }
1838 1801
1839 ret = put_user(KIOCB_KEY, &user_iocb->aio_key); 1802 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1840 if (unlikely(ret)) {
1841 pr_debug("EFAULT: aio_key\n"); 1803 pr_debug("EFAULT: aio_key\n");
1842 goto out_put_req; 1804 return -EFAULT;
1843 } 1805 }
1844 1806
1845 req->ki_user_iocb = user_iocb; 1807 req->ki_res.obj = (u64)(unsigned long)user_iocb;
1846 req->ki_user_data = iocb->aio_data; 1808 req->ki_res.data = iocb->aio_data;
1809 req->ki_res.res = 0;
1810 req->ki_res.res2 = 0;
1847 1811
1848 switch (iocb->aio_lio_opcode) { 1812 switch (iocb->aio_lio_opcode) {
1849 case IOCB_CMD_PREAD: 1813 case IOCB_CMD_PREAD:
1850 ret = aio_read(&req->rw, iocb, false, compat); 1814 return aio_read(&req->rw, iocb, false, compat);
1851 break;
1852 case IOCB_CMD_PWRITE: 1815 case IOCB_CMD_PWRITE:
1853 ret = aio_write(&req->rw, iocb, false, compat); 1816 return aio_write(&req->rw, iocb, false, compat);
1854 break;
1855 case IOCB_CMD_PREADV: 1817 case IOCB_CMD_PREADV:
1856 ret = aio_read(&req->rw, iocb, true, compat); 1818 return aio_read(&req->rw, iocb, true, compat);
1857 break;
1858 case IOCB_CMD_PWRITEV: 1819 case IOCB_CMD_PWRITEV:
1859 ret = aio_write(&req->rw, iocb, true, compat); 1820 return aio_write(&req->rw, iocb, true, compat);
1860 break;
1861 case IOCB_CMD_FSYNC: 1821 case IOCB_CMD_FSYNC:
1862 ret = aio_fsync(&req->fsync, iocb, false); 1822 return aio_fsync(&req->fsync, iocb, false);
1863 break;
1864 case IOCB_CMD_FDSYNC: 1823 case IOCB_CMD_FDSYNC:
1865 ret = aio_fsync(&req->fsync, iocb, true); 1824 return aio_fsync(&req->fsync, iocb, true);
1866 break;
1867 case IOCB_CMD_POLL: 1825 case IOCB_CMD_POLL:
1868 ret = aio_poll(req, iocb); 1826 return aio_poll(req, iocb);
1869 break;
1870 default: 1827 default:
1871 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 1828 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1872 ret = -EINVAL; 1829 return -EINVAL;
1873 break;
1874 } 1830 }
1875
1876 /*
1877 * If ret is 0, we'd either done aio_complete() ourselves or have
1878 * arranged for that to be done asynchronously. Anything non-zero
1879 * means that we need to destroy req ourselves.
1880 */
1881 if (ret)
1882 goto out_put_req;
1883 return 0;
1884out_put_req:
1885 if (req->ki_eventfd)
1886 eventfd_ctx_put(req->ki_eventfd);
1887 iocb_put(req);
1888out_put_reqs_available:
1889 put_reqs_available(ctx, 1);
1890 return ret;
1891} 1831}
1892 1832
1893static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1833static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1894 bool compat) 1834 bool compat)
1895{ 1835{
1836 struct aio_kiocb *req;
1896 struct iocb iocb; 1837 struct iocb iocb;
1838 int err;
1897 1839
1898 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) 1840 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
1899 return -EFAULT; 1841 return -EFAULT;
1900 1842
1901 return __io_submit_one(ctx, &iocb, user_iocb, compat); 1843 /* enforce forwards compatibility on users */
1844 if (unlikely(iocb.aio_reserved2)) {
1845 pr_debug("EINVAL: reserve field set\n");
1846 return -EINVAL;
1847 }
1848
1849 /* prevent overflows */
1850 if (unlikely(
1851 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
1852 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
1853 ((ssize_t)iocb.aio_nbytes < 0)
1854 )) {
1855 pr_debug("EINVAL: overflow check\n");
1856 return -EINVAL;
1857 }
1858
1859 req = aio_get_req(ctx);
1860 if (unlikely(!req))
1861 return -EAGAIN;
1862
1863 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
1864
1865 /* Done with the synchronous reference */
1866 iocb_put(req);
1867
1868 /*
1869 * If err is 0, we'd either done aio_complete() ourselves or have
1870 * arranged for that to be done asynchronously. Anything non-zero
1871 * means that we need to destroy req ourselves.
1872 */
1873 if (unlikely(err)) {
1874 iocb_destroy(req);
1875 put_reqs_available(ctx, 1);
1876 }
1877 return err;
1902} 1878}
1903 1879
1904/* sys_io_submit: 1880/* sys_io_submit:
@@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
1997} 1973}
1998#endif 1974#endif
1999 1975
2000/* lookup_kiocb
2001 * Finds a given iocb for cancellation.
2002 */
2003static struct aio_kiocb *
2004lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
2005{
2006 struct aio_kiocb *kiocb;
2007
2008 assert_spin_locked(&ctx->ctx_lock);
2009
2010 /* TODO: use a hash or array, this sucks. */
2011 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2012 if (kiocb->ki_user_iocb == iocb)
2013 return kiocb;
2014 }
2015 return NULL;
2016}
2017
2018/* sys_io_cancel: 1976/* sys_io_cancel:
2019 * Attempts to cancel an iocb previously passed to io_submit. If 1977 * Attempts to cancel an iocb previously passed to io_submit. If
2020 * the operation is successfully cancelled, the resulting event is 1978 * the operation is successfully cancelled, the resulting event is
@@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2032 struct aio_kiocb *kiocb; 1990 struct aio_kiocb *kiocb;
2033 int ret = -EINVAL; 1991 int ret = -EINVAL;
2034 u32 key; 1992 u32 key;
1993 u64 obj = (u64)(unsigned long)iocb;
2035 1994
2036 if (unlikely(get_user(key, &iocb->aio_key))) 1995 if (unlikely(get_user(key, &iocb->aio_key)))
2037 return -EFAULT; 1996 return -EFAULT;
@@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2043 return -EINVAL; 2002 return -EINVAL;
2044 2003
2045 spin_lock_irq(&ctx->ctx_lock); 2004 spin_lock_irq(&ctx->ctx_lock);
2046 kiocb = lookup_kiocb(ctx, iocb); 2005 /* TODO: use a hash or array, this sucks. */
2047 if (kiocb) { 2006 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2048 ret = kiocb->ki_cancel(&kiocb->rw); 2007 if (kiocb->ki_res.obj == obj) {
2049 list_del_init(&kiocb->ki_list); 2008 ret = kiocb->ki_cancel(&kiocb->rw);
2009 list_del_init(&kiocb->ki_list);
2010 break;
2011 }
2050 } 2012 }
2051 spin_unlock_irq(&ctx->ctx_lock); 2013 spin_unlock_irq(&ctx->ctx_lock);
2052 2014
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e3346628efe2..2d61ddda9bf5 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
524 struct inode *inode = container_of(head, struct inode, i_rcu); 524 struct inode *inode = container_of(head, struct inode, i_rcu);
525 struct ceph_inode_info *ci = ceph_inode(inode); 525 struct ceph_inode_info *ci = ceph_inode(inode);
526 526
527 kfree(ci->i_symlink);
527 kmem_cache_free(ceph_inode_cachep, ci); 528 kmem_cache_free(ceph_inode_cachep, ci);
528} 529}
529 530
@@ -566,7 +567,6 @@ void ceph_destroy_inode(struct inode *inode)
566 } 567 }
567 } 568 }
568 569
569 kfree(ci->i_symlink);
570 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 570 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
571 frag = rb_entry(n, struct ceph_inode_frag, node); 571 frag = rb_entry(n, struct ceph_inode_frag, node);
572 rb_erase(n, &ci->i_fragtree); 572 rb_erase(n, &ci->i_fragtree);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f9b71c12cc9f..a05bf1d6e1d0 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
559 tcon->ses->server->echo_interval / HZ); 559 tcon->ses->server->echo_interval / HZ);
560 if (tcon->snapshot_time) 560 if (tcon->snapshot_time)
561 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); 561 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
562 if (tcon->handle_timeout)
563 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
562 /* convert actimeo and display it in seconds */ 564 /* convert actimeo and display it in seconds */
563 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); 565 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
564 566
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 38feae812b47..5b18d4585740 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -60,6 +60,12 @@
60#define CIFS_MAX_ACTIMEO (1 << 30) 60#define CIFS_MAX_ACTIMEO (1 << 30)
61 61
62/* 62/*
63 * Max persistent and resilient handle timeout (milliseconds).
64 * Windows durable max was 960000 (16 minutes)
65 */
66#define SMB3_MAX_HANDLE_TIMEOUT 960000
67
68/*
63 * MAX_REQ is the maximum number of requests that WE will send 69 * MAX_REQ is the maximum number of requests that WE will send
64 * on one socket concurrently. 70 * on one socket concurrently.
65 */ 71 */
@@ -586,6 +592,7 @@ struct smb_vol {
586 struct nls_table *local_nls; 592 struct nls_table *local_nls;
587 unsigned int echo_interval; /* echo interval in secs */ 593 unsigned int echo_interval; /* echo interval in secs */
588 __u64 snapshot_time; /* needed for timewarp tokens */ 594 __u64 snapshot_time; /* needed for timewarp tokens */
595 __u32 handle_timeout; /* persistent and durable handle timeout in ms */
589 unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */ 596 unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
590}; 597};
591 598
@@ -1058,6 +1065,7 @@ struct cifs_tcon {
1058 __u32 vol_serial_number; 1065 __u32 vol_serial_number;
1059 __le64 vol_create_time; 1066 __le64 vol_create_time;
1060 __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */ 1067 __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
1068 __u32 handle_timeout; /* persistent and durable handle timeout in ms */
1061 __u32 ss_flags; /* sector size flags */ 1069 __u32 ss_flags; /* sector size flags */
1062 __u32 perf_sector_size; /* best sector size for perf */ 1070 __u32 perf_sector_size; /* best sector size for perf */
1063 __u32 max_chunks; 1071 __u32 max_chunks;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a8e9738db691..4c0e44489f21 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -103,7 +103,7 @@ enum {
103 Opt_cruid, Opt_gid, Opt_file_mode, 103 Opt_cruid, Opt_gid, Opt_file_mode,
104 Opt_dirmode, Opt_port, 104 Opt_dirmode, Opt_port,
105 Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo, 105 Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
106 Opt_echo_interval, Opt_max_credits, 106 Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
107 Opt_snapshot, 107 Opt_snapshot,
108 108
109 /* Mount options which take string value */ 109 /* Mount options which take string value */
@@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = {
208 { Opt_rsize, "rsize=%s" }, 208 { Opt_rsize, "rsize=%s" },
209 { Opt_wsize, "wsize=%s" }, 209 { Opt_wsize, "wsize=%s" },
210 { Opt_actimeo, "actimeo=%s" }, 210 { Opt_actimeo, "actimeo=%s" },
211 { Opt_handletimeout, "handletimeout=%s" },
211 { Opt_echo_interval, "echo_interval=%s" }, 212 { Opt_echo_interval, "echo_interval=%s" },
212 { Opt_max_credits, "max_credits=%s" }, 213 { Opt_max_credits, "max_credits=%s" },
213 { Opt_snapshot, "snapshot=%s" }, 214 { Opt_snapshot, "snapshot=%s" },
@@ -1619,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1619 1620
1620 vol->actimeo = CIFS_DEF_ACTIMEO; 1621 vol->actimeo = CIFS_DEF_ACTIMEO;
1621 1622
1623 /* Most clients set timeout to 0, allows server to use its default */
1624 vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
1625
1622 /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ 1626 /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
1623 vol->ops = &smb30_operations; 1627 vol->ops = &smb30_operations;
1624 vol->vals = &smbdefault_values; 1628 vol->vals = &smbdefault_values;
@@ -2017,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
2017 goto cifs_parse_mount_err; 2021 goto cifs_parse_mount_err;
2018 } 2022 }
2019 break; 2023 break;
2024 case Opt_handletimeout:
2025 if (get_option_ul(args, &option)) {
2026 cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
2027 __func__);
2028 goto cifs_parse_mount_err;
2029 }
2030 vol->handle_timeout = option;
2031 if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
2032 cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
2033 goto cifs_parse_mount_err;
2034 }
2035 break;
2020 case Opt_echo_interval: 2036 case Opt_echo_interval:
2021 if (get_option_ul(args, &option)) { 2037 if (get_option_ul(args, &option)) {
2022 cifs_dbg(VFS, "%s: Invalid echo interval value\n", 2038 cifs_dbg(VFS, "%s: Invalid echo interval value\n",
@@ -3183,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
3183 return 0; 3199 return 0;
3184 if (tcon->snapshot_time != volume_info->snapshot_time) 3200 if (tcon->snapshot_time != volume_info->snapshot_time)
3185 return 0; 3201 return 0;
3202 if (tcon->handle_timeout != volume_info->handle_timeout)
3203 return 0;
3186 return 1; 3204 return 1;
3187} 3205}
3188 3206
@@ -3297,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
3297 tcon->snapshot_time = volume_info->snapshot_time; 3315 tcon->snapshot_time = volume_info->snapshot_time;
3298 } 3316 }
3299 3317
3318 if (volume_info->handle_timeout) {
3319 if (ses->server->vals->protocol_id == 0) {
3320 cifs_dbg(VFS,
3321 "Use SMB2.1 or later for handle timeout option\n");
3322 rc = -EOPNOTSUPP;
3323 goto out_fail;
3324 } else
3325 tcon->handle_timeout = volume_info->handle_timeout;
3326 }
3327
3300 tcon->ses = ses; 3328 tcon->ses = ses;
3301 if (volume_info->password) { 3329 if (volume_info->password) {
3302 tcon->password = kstrdup(volume_info->password, GFP_KERNEL); 3330 tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index b204e84b87fb..54bffb2a1786 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
68 68
69 69
70 if (oparms->tcon->use_resilient) { 70 if (oparms->tcon->use_resilient) {
71 nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */ 71 /* default timeout is 0, servers pick default (120 seconds) */
72 nr_ioctl_req.Timeout =
73 cpu_to_le32(oparms->tcon->handle_timeout);
72 nr_ioctl_req.Reserved = 0; 74 nr_ioctl_req.Reserved = 0;
73 rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, 75 rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
74 fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, 76 fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
75 true /* is_fsctl */, 77 true /* is_fsctl */,
76 (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), 78 (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
77 NULL, NULL /* no return info */); 79 CIFSMaxBufSize, NULL, NULL /* no return info */);
78 if (rc == -EOPNOTSUPP) { 80 if (rc == -EOPNOTSUPP) {
79 cifs_dbg(VFS, 81 cifs_dbg(VFS,
80 "resiliency not supported by server, disabling\n"); 82 "resiliency not supported by server, disabling\n");
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 1022a3771e14..00225e699d03 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -581,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
581 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 581 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
582 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, 582 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
583 NULL /* no data input */, 0 /* no data input */, 583 NULL /* no data input */, 0 /* no data input */,
584 (char **)&out_buf, &ret_data_len); 584 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
585 if (rc == -EOPNOTSUPP) { 585 if (rc == -EOPNOTSUPP) {
586 cifs_dbg(FYI, 586 cifs_dbg(FYI,
587 "server does not support query network interfaces\n"); 587 "server does not support query network interfaces\n");
@@ -717,32 +717,28 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
717 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId); 717 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
718#endif /* CIFS_DEBUG2 */ 718#endif /* CIFS_DEBUG2 */
719 719
720 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
721 oplock = smb2_parse_lease_state(server, o_rsp,
722 &oparms.fid->epoch,
723 oparms.fid->lease_key);
724 else
725 goto oshr_exit;
726
727
728 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid)); 720 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
729 tcon->crfid.tcon = tcon; 721 tcon->crfid.tcon = tcon;
730 tcon->crfid.is_valid = true; 722 tcon->crfid.is_valid = true;
731 kref_init(&tcon->crfid.refcount); 723 kref_init(&tcon->crfid.refcount);
732 kref_get(&tcon->crfid.refcount);
733 724
725 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
726 kref_get(&tcon->crfid.refcount);
727 oplock = smb2_parse_lease_state(server, o_rsp,
728 &oparms.fid->epoch,
729 oparms.fid->lease_key);
730 } else
731 goto oshr_exit;
734 732
735 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 733 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
736 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) 734 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
737 goto oshr_exit; 735 goto oshr_exit;
738 rc = smb2_validate_and_copy_iov( 736 if (!smb2_validate_and_copy_iov(
739 le16_to_cpu(qi_rsp->OutputBufferOffset), 737 le16_to_cpu(qi_rsp->OutputBufferOffset),
740 sizeof(struct smb2_file_all_info), 738 sizeof(struct smb2_file_all_info),
741 &rsp_iov[1], sizeof(struct smb2_file_all_info), 739 &rsp_iov[1], sizeof(struct smb2_file_all_info),
742 (char *)&tcon->crfid.file_all_info); 740 (char *)&tcon->crfid.file_all_info))
743 if (rc) 741 tcon->crfid.file_all_info_is_valid = 1;
744 goto oshr_exit;
745 tcon->crfid.file_all_info_is_valid = 1;
746 742
747 oshr_exit: 743 oshr_exit:
748 mutex_unlock(&tcon->crfid.fid_mutex); 744 mutex_unlock(&tcon->crfid.fid_mutex);
@@ -1299,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1299 1295
1300 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 1296 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1301 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, 1297 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
1302 NULL, 0 /* no input */, 1298 NULL, 0 /* no input */, CIFSMaxBufSize,
1303 (char **)&res_key, &ret_data_len); 1299 (char **)&res_key, &ret_data_len);
1304 1300
1305 if (rc) { 1301 if (rc) {
@@ -1404,7 +1400,7 @@ smb2_ioctl_query_info(const unsigned int xid,
1404 rc = SMB2_ioctl_init(tcon, &rqst[1], 1400 rc = SMB2_ioctl_init(tcon, &rqst[1],
1405 COMPOUND_FID, COMPOUND_FID, 1401 COMPOUND_FID, COMPOUND_FID,
1406 qi.info_type, true, NULL, 1402 qi.info_type, true, NULL,
1407 0); 1403 0, CIFSMaxBufSize);
1408 } 1404 }
1409 } else if (qi.flags == PASSTHRU_QUERY_INFO) { 1405 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1410 memset(&qi_iov, 0, sizeof(qi_iov)); 1406 memset(&qi_iov, 0, sizeof(qi_iov));
@@ -1532,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid,
1532 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, 1528 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1533 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, 1529 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
1534 true /* is_fsctl */, (char *)pcchunk, 1530 true /* is_fsctl */, (char *)pcchunk,
1535 sizeof(struct copychunk_ioctl), (char **)&retbuf, 1531 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1536 &ret_data_len); 1532 (char **)&retbuf, &ret_data_len);
1537 if (rc == 0) { 1533 if (rc == 0) {
1538 if (ret_data_len != 1534 if (ret_data_len !=
1539 sizeof(struct copychunk_ioctl_rsp)) { 1535 sizeof(struct copychunk_ioctl_rsp)) {
@@ -1693,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1693 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 1689 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1694 cfile->fid.volatile_fid, FSCTL_SET_SPARSE, 1690 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
1695 true /* is_fctl */, 1691 true /* is_fctl */,
1696 &setsparse, 1, NULL, NULL); 1692 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
1697 if (rc) { 1693 if (rc) {
1698 tcon->broken_sparse_sup = true; 1694 tcon->broken_sparse_sup = true;
1699 cifs_dbg(FYI, "set sparse rc = %d\n", rc); 1695 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
@@ -1766,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid,
1766 true /* is_fsctl */, 1762 true /* is_fsctl */,
1767 (char *)&dup_ext_buf, 1763 (char *)&dup_ext_buf,
1768 sizeof(struct duplicate_extents_to_file), 1764 sizeof(struct duplicate_extents_to_file),
1769 NULL, 1765 CIFSMaxBufSize, NULL,
1770 &ret_data_len); 1766 &ret_data_len);
1771 1767
1772 if (ret_data_len > 0) 1768 if (ret_data_len > 0)
@@ -1801,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1801 true /* is_fsctl */, 1797 true /* is_fsctl */,
1802 (char *)&integr_info, 1798 (char *)&integr_info,
1803 sizeof(struct fsctl_set_integrity_information_req), 1799 sizeof(struct fsctl_set_integrity_information_req),
1804 NULL, 1800 CIFSMaxBufSize, NULL,
1805 &ret_data_len); 1801 &ret_data_len);
1806 1802
1807} 1803}
@@ -1809,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1809/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ 1805/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1810#define GMT_TOKEN_SIZE 50 1806#define GMT_TOKEN_SIZE 50
1811 1807
1808#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1809
1812/* 1810/*
1813 * Input buffer contains (empty) struct smb_snapshot array with size filled in 1811 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1814 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 1812 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
@@ -1820,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1820 char *retbuf = NULL; 1818 char *retbuf = NULL;
1821 unsigned int ret_data_len = 0; 1819 unsigned int ret_data_len = 0;
1822 int rc; 1820 int rc;
1821 u32 max_response_size;
1823 struct smb_snapshot_array snapshot_in; 1822 struct smb_snapshot_array snapshot_in;
1824 1823
1824 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1825 return -EFAULT;
1826
1827 /*
1828 * Note that for snapshot queries that servers like Azure expect that
1829 * the first query be minimal size (and just used to get the number/size
1830 * of previous versions) so response size must be specified as EXACTLY
1831 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1832 * of eight bytes.
1833 */
1834 if (ret_data_len == 0)
1835 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1836 else
1837 max_response_size = CIFSMaxBufSize;
1838
1825 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 1839 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1826 cfile->fid.volatile_fid, 1840 cfile->fid.volatile_fid,
1827 FSCTL_SRV_ENUMERATE_SNAPSHOTS, 1841 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
1828 true /* is_fsctl */, 1842 true /* is_fsctl */,
1829 NULL, 0 /* no input data */, 1843 NULL, 0 /* no input data */, max_response_size,
1830 (char **)&retbuf, 1844 (char **)&retbuf,
1831 &ret_data_len); 1845 &ret_data_len);
1832 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", 1846 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
@@ -2304,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2304 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 2318 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2305 FSCTL_DFS_GET_REFERRALS, 2319 FSCTL_DFS_GET_REFERRALS,
2306 true /* is_fsctl */, 2320 true /* is_fsctl */,
2307 (char *)dfs_req, dfs_req_size, 2321 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
2308 (char **)&dfs_rsp, &dfs_rsp_size); 2322 (char **)&dfs_rsp, &dfs_rsp_size);
2309 } while (rc == -EAGAIN); 2323 } while (rc == -EAGAIN);
2310 2324
@@ -2658,7 +2672,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2658 rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid, 2672 rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
2659 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, 2673 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
2660 true /* is_fctl */, (char *)&fsctl_buf, 2674 true /* is_fctl */, (char *)&fsctl_buf,
2661 sizeof(struct file_zero_data_information)); 2675 sizeof(struct file_zero_data_information),
2676 CIFSMaxBufSize);
2662 if (rc) 2677 if (rc)
2663 goto zero_range_exit; 2678 goto zero_range_exit;
2664 2679
@@ -2735,7 +2750,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2735 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 2750 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2736 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, 2751 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
2737 true /* is_fctl */, (char *)&fsctl_buf, 2752 true /* is_fctl */, (char *)&fsctl_buf,
2738 sizeof(struct file_zero_data_information), NULL, NULL); 2753 sizeof(struct file_zero_data_information),
2754 CIFSMaxBufSize, NULL, NULL);
2739 free_xid(xid); 2755 free_xid(xid);
2740 return rc; 2756 return rc;
2741} 2757}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 21ac19ff19cb..21ad01d55ab2 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1002,7 +1002,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1002 1002
1003 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 1003 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1004 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, 1004 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
1005 (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen); 1005 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1006 (char **)&pneg_rsp, &rsplen);
1006 if (rc == -EOPNOTSUPP) { 1007 if (rc == -EOPNOTSUPP) {
1007 /* 1008 /*
1008 * Old Windows versions or Netapp SMB server can return 1009 * Old Windows versions or Netapp SMB server can return
@@ -1858,8 +1859,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
1858} 1859}
1859 1860
1860static struct create_durable_v2 * 1861static struct create_durable_v2 *
1861create_durable_v2_buf(struct cifs_fid *pfid) 1862create_durable_v2_buf(struct cifs_open_parms *oparms)
1862{ 1863{
1864 struct cifs_fid *pfid = oparms->fid;
1863 struct create_durable_v2 *buf; 1865 struct create_durable_v2 *buf;
1864 1866
1865 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); 1867 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
@@ -1873,7 +1875,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
1873 (struct create_durable_v2, Name)); 1875 (struct create_durable_v2, Name));
1874 buf->ccontext.NameLength = cpu_to_le16(4); 1876 buf->ccontext.NameLength = cpu_to_le16(4);
1875 1877
1876 buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ 1878 /*
1879 * NB: Handle timeout defaults to 0, which allows server to choose
1880 * (most servers default to 120 seconds) and most clients default to 0.
1881 * This can be overridden at mount ("handletimeout=") if the user wants
1882 * a different persistent (or resilient) handle timeout for all opens
1883 * opens on a particular SMB3 mount.
1884 */
1885 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
1877 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 1886 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
1878 generate_random_uuid(buf->dcontext.CreateGuid); 1887 generate_random_uuid(buf->dcontext.CreateGuid);
1879 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); 1888 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
@@ -1926,7 +1935,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
1926 struct smb2_create_req *req = iov[0].iov_base; 1935 struct smb2_create_req *req = iov[0].iov_base;
1927 unsigned int num = *num_iovec; 1936 unsigned int num = *num_iovec;
1928 1937
1929 iov[num].iov_base = create_durable_v2_buf(oparms->fid); 1938 iov[num].iov_base = create_durable_v2_buf(oparms);
1930 if (iov[num].iov_base == NULL) 1939 if (iov[num].iov_base == NULL)
1931 return -ENOMEM; 1940 return -ENOMEM;
1932 iov[num].iov_len = sizeof(struct create_durable_v2); 1941 iov[num].iov_len = sizeof(struct create_durable_v2);
@@ -2478,7 +2487,8 @@ creat_exit:
2478int 2487int
2479SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, 2488SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
2480 u64 persistent_fid, u64 volatile_fid, u32 opcode, 2489 u64 persistent_fid, u64 volatile_fid, u32 opcode,
2481 bool is_fsctl, char *in_data, u32 indatalen) 2490 bool is_fsctl, char *in_data, u32 indatalen,
2491 __u32 max_response_size)
2482{ 2492{
2483 struct smb2_ioctl_req *req; 2493 struct smb2_ioctl_req *req;
2484 struct kvec *iov = rqst->rq_iov; 2494 struct kvec *iov = rqst->rq_iov;
@@ -2520,16 +2530,21 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
2520 req->OutputCount = 0; /* MBZ */ 2530 req->OutputCount = 0; /* MBZ */
2521 2531
2522 /* 2532 /*
2523 * Could increase MaxOutputResponse, but that would require more 2533 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
2524 * than one credit. Windows typically sets this smaller, but for some 2534 * We Could increase default MaxOutputResponse, but that could require
2535 * more credits. Windows typically sets this smaller, but for some
2525 * ioctls it may be useful to allow server to send more. No point 2536 * ioctls it may be useful to allow server to send more. No point
2526 * limiting what the server can send as long as fits in one credit 2537 * limiting what the server can send as long as fits in one credit
2527 * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE 2538 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
2528 * (by default, note that it can be overridden to make max larger) 2539 * to increase this limit up in the future.
2529 * in responses (except for read responses which can be bigger. 2540 * Note that for snapshot queries that servers like Azure expect that
2530 * We may want to bump this limit up 2541 * the first query be minimal size (and just used to get the number/size
2542 * of previous versions) so response size must be specified as EXACTLY
2543 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2544 * of eight bytes. Currently that is the only case where we set max
2545 * response size smaller.
2531 */ 2546 */
2532 req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize); 2547 req->MaxOutputResponse = cpu_to_le32(max_response_size);
2533 2548
2534 if (is_fsctl) 2549 if (is_fsctl)
2535 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); 2550 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
@@ -2550,13 +2565,14 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
2550 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 2565 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
2551} 2566}
2552 2567
2568
2553/* 2569/*
2554 * SMB2 IOCTL is used for both IOCTLs and FSCTLs 2570 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
2555 */ 2571 */
2556int 2572int
2557SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 2573SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
2558 u64 volatile_fid, u32 opcode, bool is_fsctl, 2574 u64 volatile_fid, u32 opcode, bool is_fsctl,
2559 char *in_data, u32 indatalen, 2575 char *in_data, u32 indatalen, u32 max_out_data_len,
2560 char **out_data, u32 *plen /* returned data len */) 2576 char **out_data, u32 *plen /* returned data len */)
2561{ 2577{
2562 struct smb_rqst rqst; 2578 struct smb_rqst rqst;
@@ -2593,8 +2609,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
2593 rqst.rq_iov = iov; 2609 rqst.rq_iov = iov;
2594 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; 2610 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
2595 2611
2596 rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, 2612 rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
2597 opcode, is_fsctl, in_data, indatalen); 2613 is_fsctl, in_data, indatalen, max_out_data_len);
2598 if (rc) 2614 if (rc)
2599 goto ioctl_exit; 2615 goto ioctl_exit;
2600 2616
@@ -2672,7 +2688,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2672 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 2688 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
2673 FSCTL_SET_COMPRESSION, true /* is_fsctl */, 2689 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
2674 (char *)&fsctl_input /* data input */, 2690 (char *)&fsctl_input /* data input */,
2675 2 /* in data len */, &ret_data /* out data */, NULL); 2691 2 /* in data len */, CIFSMaxBufSize /* max out data */,
2692 &ret_data /* out data */, NULL);
2676 2693
2677 cifs_dbg(FYI, "set compression rc %d\n", rc); 2694 cifs_dbg(FYI, "set compression rc %d\n", rc);
2678 2695
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 3c32d0cfea69..52df125e9189 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -142,11 +142,12 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
142extern void SMB2_open_free(struct smb_rqst *rqst); 142extern void SMB2_open_free(struct smb_rqst *rqst);
143extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, 143extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
144 u64 persistent_fid, u64 volatile_fid, u32 opcode, 144 u64 persistent_fid, u64 volatile_fid, u32 opcode,
145 bool is_fsctl, char *in_data, u32 indatalen, 145 bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
146 char **out_data, u32 *plen /* returned data len */); 146 char **out_data, u32 *plen /* returned data len */);
147extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, 147extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
148 u64 persistent_fid, u64 volatile_fid, u32 opcode, 148 u64 persistent_fid, u64 volatile_fid, u32 opcode,
149 bool is_fsctl, char *in_data, u32 indatalen); 149 bool is_fsctl, char *in_data, u32 indatalen,
150 __u32 max_response_size);
150extern void SMB2_ioctl_free(struct smb_rqst *rqst); 151extern void SMB2_ioctl_free(struct smb_rqst *rqst);
151extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 152extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
152 u64 persistent_file_id, u64 volatile_file_id); 153 u64 persistent_file_id, u64 volatile_file_id);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 95b5e78c22b1..f25daa207421 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
163 return 0; 163 return 0;
164} 164}
165 165
166static void debugfs_evict_inode(struct inode *inode) 166static void debugfs_i_callback(struct rcu_head *head)
167{ 167{
168 truncate_inode_pages_final(&inode->i_data); 168 struct inode *inode = container_of(head, struct inode, i_rcu);
169 clear_inode(inode);
170 if (S_ISLNK(inode->i_mode)) 169 if (S_ISLNK(inode->i_mode))
171 kfree(inode->i_link); 170 kfree(inode->i_link);
171 free_inode_nonrcu(inode);
172}
173
174static void debugfs_destroy_inode(struct inode *inode)
175{
176 call_rcu(&inode->i_rcu, debugfs_i_callback);
172} 177}
173 178
174static const struct super_operations debugfs_super_operations = { 179static const struct super_operations debugfs_super_operations = {
175 .statfs = simple_statfs, 180 .statfs = simple_statfs,
176 .remount_fs = debugfs_remount, 181 .remount_fs = debugfs_remount,
177 .show_options = debugfs_show_options, 182 .show_options = debugfs_show_options,
178 .evict_inode = debugfs_evict_inode, 183 .destroy_inode = debugfs_destroy_inode,
179}; 184};
180 185
181static void debugfs_release_dentry(struct dentry *dentry) 186static void debugfs_release_dentry(struct dentry *dentry)
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 842e8f749db6..570d71043acf 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -410,7 +410,7 @@ bool fs_validate_description(const struct fs_parameter_description *desc)
410 for (param = desc->specs; param->name; param++) { 410 for (param = desc->specs; param->name; param++) {
411 if (param->opt == e->opt && 411 if (param->opt == e->opt &&
412 param->type != fs_param_is_enum) { 412 param->type != fs_param_is_enum) {
413 pr_err("VALIDATE %s: e[%lu] enum val for %s\n", 413 pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
414 name, e - desc->enums, param->name); 414 name, e - desc->enums, param->name);
415 good = false; 415 good = false;
416 } 416 }
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6aaa30580a2b..bbdbd56cf2ac 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1022,6 +1022,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
1022 1022
1023 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); 1023 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
1024 if (!ret) { 1024 if (!ret) {
1025 ssize_t ret2;
1026
1025 /* 1027 /*
1026 * Open-code file_start_write here to grab freeze protection, 1028 * Open-code file_start_write here to grab freeze protection,
1027 * which will be released by another thread in 1029 * which will be released by another thread in
@@ -1036,7 +1038,19 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
1036 SB_FREEZE_WRITE); 1038 SB_FREEZE_WRITE);
1037 } 1039 }
1038 kiocb->ki_flags |= IOCB_WRITE; 1040 kiocb->ki_flags |= IOCB_WRITE;
1039 io_rw_done(kiocb, call_write_iter(file, kiocb, &iter)); 1041
1042 ret2 = call_write_iter(file, kiocb, &iter);
1043 if (!force_nonblock || ret2 != -EAGAIN) {
1044 io_rw_done(kiocb, ret2);
1045 } else {
1046 /*
1047 * If ->needs_lock is true, we're already in async
1048 * context.
1049 */
1050 if (!s->needs_lock)
1051 io_async_list_note(WRITE, req, iov_count);
1052 ret = -EAGAIN;
1053 }
1040 } 1054 }
1041out_free: 1055out_free:
1042 kfree(iovec); 1056 kfree(iovec);
@@ -1968,7 +1982,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
1968 return 0; 1982 return 0;
1969 1983
1970 if (sig) { 1984 if (sig) {
1971 ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz); 1985#ifdef CONFIG_COMPAT
1986 if (in_compat_syscall())
1987 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
1988 &ksigmask, &sigsaved, sigsz);
1989 else
1990#endif
1991 ret = set_user_sigmask(sig, &ksigmask,
1992 &sigsaved, sigsz);
1993
1972 if (ret) 1994 if (ret)
1973 return ret; 1995 return ret;
1974 } 1996 }
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 389ea53ea487..bccfc40b3a74 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1414 1414
1415 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); 1415 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1416 1416
1417 if (f->target) {
1418 kfree(f->target);
1419 f->target = NULL;
1420 }
1421
1422 fds = f->dents; 1417 fds = f->dents;
1423 while(fds) { 1418 while(fds) {
1424 fd = fds; 1419 fd = fds;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index bb6ae387469f..05d892c79339 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
47static void jffs2_i_callback(struct rcu_head *head) 47static void jffs2_i_callback(struct rcu_head *head)
48{ 48{
49 struct inode *inode = container_of(head, struct inode, i_rcu); 49 struct inode *inode = container_of(head, struct inode, i_rcu);
50 kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); 50 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
51
52 kfree(f->target);
53 kmem_cache_free(jffs2_inode_cachep, f);
51} 54}
52 55
53static void jffs2_destroy_inode(struct inode *inode) 56static void jffs2_destroy_inode(struct inode *inode)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index a35259eebc56..1dc9a08e8bdc 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4719,22 +4719,23 @@ out:
4719 4719
4720/* Lock an inode and grab a bh pointing to the inode. */ 4720/* Lock an inode and grab a bh pointing to the inode. */
4721int ocfs2_reflink_inodes_lock(struct inode *s_inode, 4721int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4722 struct buffer_head **bh1, 4722 struct buffer_head **bh_s,
4723 struct inode *t_inode, 4723 struct inode *t_inode,
4724 struct buffer_head **bh2) 4724 struct buffer_head **bh_t)
4725{ 4725{
4726 struct inode *inode1; 4726 struct inode *inode1 = s_inode;
4727 struct inode *inode2; 4727 struct inode *inode2 = t_inode;
4728 struct ocfs2_inode_info *oi1; 4728 struct ocfs2_inode_info *oi1;
4729 struct ocfs2_inode_info *oi2; 4729 struct ocfs2_inode_info *oi2;
4730 struct buffer_head *bh1 = NULL;
4731 struct buffer_head *bh2 = NULL;
4730 bool same_inode = (s_inode == t_inode); 4732 bool same_inode = (s_inode == t_inode);
4733 bool need_swap = (inode1->i_ino > inode2->i_ino);
4731 int status; 4734 int status;
4732 4735
4733 /* First grab the VFS and rw locks. */ 4736 /* First grab the VFS and rw locks. */
4734 lock_two_nondirectories(s_inode, t_inode); 4737 lock_two_nondirectories(s_inode, t_inode);
4735 inode1 = s_inode; 4738 if (need_swap)
4736 inode2 = t_inode;
4737 if (inode1->i_ino > inode2->i_ino)
4738 swap(inode1, inode2); 4739 swap(inode1, inode2);
4739 4740
4740 status = ocfs2_rw_lock(inode1, 1); 4741 status = ocfs2_rw_lock(inode1, 1);
@@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4757 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, 4758 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
4758 (unsigned long long)oi2->ip_blkno); 4759 (unsigned long long)oi2->ip_blkno);
4759 4760
4760 if (*bh1)
4761 *bh1 = NULL;
4762 if (*bh2)
4763 *bh2 = NULL;
4764
4765 /* We always want to lock the one with the lower lockid first. */ 4761 /* We always want to lock the one with the lower lockid first. */
4766 if (oi1->ip_blkno > oi2->ip_blkno) 4762 if (oi1->ip_blkno > oi2->ip_blkno)
4767 mlog_errno(-ENOLCK); 4763 mlog_errno(-ENOLCK);
4768 4764
4769 /* lock id1 */ 4765 /* lock id1 */
4770 status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET); 4766 status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
4767 OI_LS_REFLINK_TARGET);
4771 if (status < 0) { 4768 if (status < 0) {
4772 if (status != -ENOENT) 4769 if (status != -ENOENT)
4773 mlog_errno(status); 4770 mlog_errno(status);
@@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4776 4773
4777 /* lock id2 */ 4774 /* lock id2 */
4778 if (!same_inode) { 4775 if (!same_inode) {
4779 status = ocfs2_inode_lock_nested(inode2, bh2, 1, 4776 status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
4780 OI_LS_REFLINK_TARGET); 4777 OI_LS_REFLINK_TARGET);
4781 if (status < 0) { 4778 if (status < 0) {
4782 if (status != -ENOENT) 4779 if (status != -ENOENT)
4783 mlog_errno(status); 4780 mlog_errno(status);
4784 goto out_cl1; 4781 goto out_cl1;
4785 } 4782 }
4786 } else 4783 } else {
4787 *bh2 = *bh1; 4784 bh2 = bh1;
4785 }
4786
4787 /*
4788 * If we swapped inode order above, we have to swap the buffer heads
4789 * before passing them back to the caller.
4790 */
4791 if (need_swap)
4792 swap(bh1, bh2);
4793 *bh_s = bh1;
4794 *bh_t = bh2;
4788 4795
4789 trace_ocfs2_double_lock_end( 4796 trace_ocfs2_double_lock_end(
4790 (unsigned long long)oi1->ip_blkno, 4797 (unsigned long long)oi1->ip_blkno,
@@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4794 4801
4795out_cl1: 4802out_cl1:
4796 ocfs2_inode_unlock(inode1, 1); 4803 ocfs2_inode_unlock(inode1, 1);
4797 brelse(*bh1); 4804 brelse(bh1);
4798 *bh1 = NULL;
4799out_rw2: 4805out_rw2:
4800 ocfs2_rw_unlock(inode2, 1); 4806 ocfs2_rw_unlock(inode2, 1);
4801out_i2: 4807out_i2:
diff --git a/fs/open.c b/fs/open.c
index 0285ce7dbd51..f1c2f855fd43 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
733 return 0; 733 return 0;
734 } 734 }
735 735
736 /* Any file opened for execve()/uselib() has to be a regular file. */
737 if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
738 error = -EACCES;
739 goto cleanup_file;
740 }
741
736 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { 742 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
737 error = get_write_access(inode); 743 error = get_write_access(inode);
738 if (unlikely(error)) 744 if (unlikely(error))
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index d29d869abec1..f5834488b67d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -615,7 +615,7 @@ static void __init proc_kcore_text_init(void)
615/* 615/*
616 * MODULES_VADDR has no intersection with VMALLOC_ADDR. 616 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
617 */ 617 */
618struct kcore_list kcore_modules; 618static struct kcore_list kcore_modules;
619static void __init add_modules_range(void) 619static void __init add_modules_range(void)
620{ 620{
621 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { 621 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 4d598a399bbf..d65390727541 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
1626 if (--header->nreg) 1626 if (--header->nreg)
1627 return; 1627 return;
1628 1628
1629 put_links(header); 1629 if (parent)
1630 put_links(header);
1630 start_unregistering(header); 1631 start_unregistering(header);
1631 if (!--header->count) 1632 if (!--header->count)
1632 kfree_rcu(header, rcu); 1633 kfree_rcu(header, rcu);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 8dc2818fdd84..12628184772c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head)
276{ 276{
277 struct inode *inode = container_of(head, struct inode, i_rcu); 277 struct inode *inode = container_of(head, struct inode, i_rcu);
278 struct ubifs_inode *ui = ubifs_inode(inode); 278 struct ubifs_inode *ui = ubifs_inode(inode);
279 kfree(ui->data);
279 kmem_cache_free(ubifs_inode_slab, ui); 280 kmem_cache_free(ubifs_inode_slab, ui);
280} 281}
281 282
282static void ubifs_destroy_inode(struct inode *inode) 283static void ubifs_destroy_inode(struct inode *inode)
283{ 284{
284 struct ubifs_inode *ui = ubifs_inode(inode);
285
286 kfree(ui->data);
287 call_rcu(&inode->i_rcu, ubifs_i_callback); 285 call_rcu(&inode->i_rcu, ubifs_i_callback);
288} 286}
289 287
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 48502cb9990f..4637ae1ae91c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1191,7 +1191,10 @@ xfs_iread_extents(
1191 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1191 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1192 */ 1192 */
1193 level = be16_to_cpu(block->bb_level); 1193 level = be16_to_cpu(block->bb_level);
1194 ASSERT(level > 0); 1194 if (unlikely(level == 0)) {
1195 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1196 return -EFSCORRUPTED;
1197 }
1195 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1198 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1196 bno = be64_to_cpu(*pp); 1199 bno = be64_to_cpu(*pp);
1197 1200
@@ -4249,9 +4252,13 @@ xfs_bmapi_write(
4249 struct xfs_bmbt_irec *mval, /* output: map values */ 4252 struct xfs_bmbt_irec *mval, /* output: map values */
4250 int *nmap) /* i/o: mval size/count */ 4253 int *nmap) /* i/o: mval size/count */
4251{ 4254{
4255 struct xfs_bmalloca bma = {
4256 .tp = tp,
4257 .ip = ip,
4258 .total = total,
4259 };
4252 struct xfs_mount *mp = ip->i_mount; 4260 struct xfs_mount *mp = ip->i_mount;
4253 struct xfs_ifork *ifp; 4261 struct xfs_ifork *ifp;
4254 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4255 xfs_fileoff_t end; /* end of mapped file region */ 4262 xfs_fileoff_t end; /* end of mapped file region */
4256 bool eof = false; /* after the end of extents */ 4263 bool eof = false; /* after the end of extents */
4257 int error; /* error return */ 4264 int error; /* error return */
@@ -4319,10 +4326,6 @@ xfs_bmapi_write(
4319 eof = true; 4326 eof = true;
4320 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4327 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4321 bma.prev.br_startoff = NULLFILEOFF; 4328 bma.prev.br_startoff = NULLFILEOFF;
4322 bma.tp = tp;
4323 bma.ip = ip;
4324 bma.total = total;
4325 bma.datatype = 0;
4326 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4329 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4327 4330
4328 n = 0; 4331 n = 0;
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 6f94d1f7322d..117910db51b8 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -415,8 +415,17 @@ xchk_btree_check_owner(
415 struct xfs_btree_cur *cur = bs->cur; 415 struct xfs_btree_cur *cur = bs->cur;
416 struct check_owner *co; 416 struct check_owner *co;
417 417
418 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL) 418 /*
419 * In theory, xfs_btree_get_block should only give us a null buffer
420 * pointer for the root of a root-in-inode btree type, but we need
421 * to check defensively here in case the cursor state is also screwed
422 * up.
423 */
424 if (bp == NULL) {
425 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
426 xchk_btree_set_corrupt(bs->sc, bs->cur, level);
419 return 0; 427 return 0;
428 }
420 429
421 /* 430 /*
422 * We want to cross-reference each btree block with the bnobt 431 * We want to cross-reference each btree block with the bnobt
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index f1260b4bfdee..90527b094878 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -574,6 +574,11 @@ xchk_da_btree(
574 /* Drill another level deeper. */ 574 /* Drill another level deeper. */
575 blkno = be32_to_cpu(key->before); 575 blkno = be32_to_cpu(key->before);
576 level++; 576 level++;
577 if (level >= XFS_DA_NODE_MAXDEPTH) {
578 /* Too deep! */
579 xchk_da_set_corrupt(&ds, level - 1);
580 break;
581 }
577 ds.tree_level--; 582 ds.tree_level--;
578 error = xchk_da_btree_block(&ds, level, blkno); 583 error = xchk_da_btree_block(&ds, level, blkno);
579 if (error) 584 if (error)
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 93f07edafd81..9ee2a7d02e70 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -161,6 +161,14 @@ xfs_ioc_trim(
161 return -EPERM; 161 return -EPERM;
162 if (!blk_queue_discard(q)) 162 if (!blk_queue_discard(q))
163 return -EOPNOTSUPP; 163 return -EOPNOTSUPP;
164
165 /*
166 * We haven't recovered the log, so we cannot use our bnobt-guided
167 * storage zapping commands.
168 */
169 if (mp->m_flags & XFS_MOUNT_NORECOVERY)
170 return -EROFS;
171
164 if (copy_from_user(&range, urange, sizeof(range))) 172 if (copy_from_user(&range, urange, sizeof(range)))
165 return -EFAULT; 173 return -EFAULT;
166 174
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1f2e2845eb76..a7ceae90110e 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
529 count = iov_iter_count(from); 529 count = iov_iter_count(from);
530 530
531 /* 531 /*
532 * If we are doing unaligned IO, wait for all other IO to drain, 532 * If we are doing unaligned IO, we can't allow any other overlapping IO
533 * otherwise demote the lock if we had to take the exclusive lock 533 * in-flight at the same time or we risk data corruption. Wait for all
534 * for other reasons in xfs_file_aio_write_checks. 534 * other IO to drain before we submit. If the IO is aligned, demote the
535 * iolock if we had to take the exclusive lock in
536 * xfs_file_aio_write_checks() for other reasons.
535 */ 537 */
536 if (unaligned_io) { 538 if (unaligned_io) {
537 /* If we are going to wait for other DIO to finish, bail */ 539 /* unaligned dio always waits, bail */
538 if (iocb->ki_flags & IOCB_NOWAIT) { 540 if (iocb->ki_flags & IOCB_NOWAIT)
539 if (atomic_read(&inode->i_dio_count)) 541 return -EAGAIN;
540 return -EAGAIN; 542 inode_dio_wait(inode);
541 } else {
542 inode_dio_wait(inode);
543 }
544 } else if (iolock == XFS_IOLOCK_EXCL) { 543 } else if (iolock == XFS_IOLOCK_EXCL) {
545 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); 544 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
546 iolock = XFS_IOLOCK_SHARED; 545 iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
548 547
549 trace_xfs_file_direct_write(ip, count, iocb->ki_pos); 548 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
550 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io); 549 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
550
551 /*
552 * If unaligned, this is the only IO in-flight. If it has not yet
553 * completed, wait on it before we release the iolock to prevent
554 * subsequent overlapping IO.
555 */
556 if (ret == -EIOCBQUEUED && unaligned_io)
557 inode_dio_wait(inode);
551out: 558out:
552 xfs_iunlock(ip, iolock); 559 xfs_iunlock(ip, iolock);
553 560
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 30b1ae53689f..c50542dc71e0 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -150,7 +150,10 @@
150 150
151/* Defaults for debug_level, debug and normal */ 151/* Defaults for debug_level, debug and normal */
152 152
153#ifndef ACPI_DEBUG_DEFAULT
153#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR) 154#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
155#endif
156
154#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) 157#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
155#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) 158#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
156 159
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 9ff328fd946a..624b90b34085 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -82,6 +82,11 @@
82#define ACPI_NO_ERROR_MESSAGES 82#define ACPI_NO_ERROR_MESSAGES
83#undef ACPI_DEBUG_OUTPUT 83#undef ACPI_DEBUG_OUTPUT
84 84
85/* Use a specific bugging default separate from ACPICA */
86
87#undef ACPI_DEBUG_DEFAULT
88#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
89
85/* External interface for __KERNEL__, stub is needed */ 90/* External interface for __KERNEL__, stub is needed */
86 91
87#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \ 92#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \
diff --git a/include/linux/device.h b/include/linux/device.h
index b425a7ee04ce..4e6987e11f68 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -49,8 +49,6 @@ struct bus_attribute {
49 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); 49 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
50}; 50};
51 51
52#define BUS_ATTR(_name, _mode, _show, _store) \
53 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
54#define BUS_ATTR_RW(_name) \ 52#define BUS_ATTR_RW(_name) \
55 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) 53 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
56#define BUS_ATTR_RO(_name) \ 54#define BUS_ATTR_RO(_name) \
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ea35263eb76b..11943b60f208 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void)
203#define pud_huge(x) 0 203#define pud_huge(x) 0
204#define is_hugepage_only_range(mm, addr, len) 0 204#define is_hugepage_only_range(mm, addr, len) 0
205#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 205#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
206#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
207#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ 206#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
208 src_addr, pagep) ({ BUG(); 0; }) 207 src_addr, pagep) ({ BUG(); 0; })
209#define huge_pte_offset(mm, address, sz) 0 208#define huge_pte_offset(mm, address, sz) 0
@@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
234{ 233{
235 BUG(); 234 BUG();
236} 235}
236static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
237 struct vm_area_struct *vma, unsigned long address,
238 unsigned int flags)
239{
240 BUG();
241 return 0;
242}
237 243
238#endif /* !CONFIG_HUGETLB_PAGE */ 244#endif /* !CONFIG_HUGETLB_PAGE */
239/* 245/*
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index c843f4a9c512..da676cdbd727 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -38,12 +38,6 @@ struct vmcoredd_node {
38 38
39#ifdef CONFIG_PROC_KCORE 39#ifdef CONFIG_PROC_KCORE
40void __init kclist_add(struct kcore_list *, void *, size_t, int type); 40void __init kclist_add(struct kcore_list *, void *, size_t, int type);
41static inline
42void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
43{
44 m->vaddr = (unsigned long)vaddr;
45 kclist_add(m, addr, sz, KCORE_REMAP);
46}
47 41
48extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)); 42extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
49#else 43#else
@@ -51,11 +45,6 @@ static inline
51void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) 45void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
52{ 46{
53} 47}
54
55static inline
56void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
57{
58}
59#endif 48#endif
60 49
61#endif /* _LINUX_KCORE_H */ 50#endif /* _LINUX_KCORE_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 79626b5ab36c..58aa3adf94e6 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
207} 207}
208 208
209/** 209/**
210 * list_is_first -- tests whether @ list is the first entry in list @head 210 * list_is_first -- tests whether @list is the first entry in list @head
211 * @list: the entry to test 211 * @list: the entry to test
212 * @head: the head of the list 212 * @head: the head of the list
213 */ 213 */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 6fee8b1a4400..5cd824c1c0ca 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
469 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, 469 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
470 advertising)) 470 advertising))
471 lcl_adv |= ADVERTISE_PAUSE_CAP; 471 lcl_adv |= ADVERTISE_PAUSE_CAP;
472 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, 472 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
473 advertising)) 473 advertising))
474 lcl_adv |= ADVERTISE_PAUSE_ASYM; 474 lcl_adv |= ADVERTISE_PAUSE_ASYM;
475 475
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 022541dc5dbf..0d0729648844 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -594,6 +594,8 @@ enum mlx5_pagefault_type_flags {
594}; 594};
595 595
596struct mlx5_td { 596struct mlx5_td {
597 /* protects tirs list changes while tirs refresh */
598 struct mutex list_lock;
597 struct list_head tirs_list; 599 struct list_head tirs_list;
598 u32 tdn; 600 u32 tdn;
599}; 601};
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 4eb26d278046..280ae96dc4c3 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
41 41
42/* 42/*
43 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. 43 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
44 * If specified range includes migrate types other than MOVABLE or CMA,
45 * this will fail with -EBUSY.
46 *
47 * For isolating all pages in the range finally, the caller have to
48 * free all pages in the range. test_page_isolated() can be used for
49 * test it.
50 *
51 * The following flags are allowed (they can be combined in a bit mask)
52 * SKIP_HWPOISON - ignore hwpoison pages
53 * REPORT_FAILURE - report details about the failure to isolate the range
54 */ 44 */
55int 45int
56start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 46start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h
index a867637e172d..9e46678edb2a 100644
--- a/include/linux/platform_data/gpio/gpio-amd-fch.h
+++ b/include/linux/platform_data/gpio/gpio-amd-fch.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL+ */ 1/* SPDX-License-Identifier: GPL-2.0+ */
2 2
3/* 3/*
4 * AMD FCH gpio driver platform-data 4 * AMD FCH gpio driver platform-data
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index ae5655197698..e412c092c1e8 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void)
418 set_thread_flag(TIF_RESTORE_SIGMASK); 418 set_thread_flag(TIF_RESTORE_SIGMASK);
419 WARN_ON(!test_thread_flag(TIF_SIGPENDING)); 419 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
420} 420}
421
422static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
423{
424 clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
425}
426
421static inline void clear_restore_sigmask(void) 427static inline void clear_restore_sigmask(void)
422{ 428{
423 clear_thread_flag(TIF_RESTORE_SIGMASK); 429 clear_thread_flag(TIF_RESTORE_SIGMASK);
424} 430}
431static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
432{
433 return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
434}
425static inline bool test_restore_sigmask(void) 435static inline bool test_restore_sigmask(void)
426{ 436{
427 return test_thread_flag(TIF_RESTORE_SIGMASK); 437 return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void)
439 current->restore_sigmask = true; 449 current->restore_sigmask = true;
440 WARN_ON(!test_thread_flag(TIF_SIGPENDING)); 450 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
441} 451}
452static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
453{
454 tsk->restore_sigmask = false;
455}
442static inline void clear_restore_sigmask(void) 456static inline void clear_restore_sigmask(void)
443{ 457{
444 current->restore_sigmask = false; 458 current->restore_sigmask = false;
@@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void)
447{ 461{
448 return current->restore_sigmask; 462 return current->restore_sigmask;
449} 463}
464static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
465{
466 return tsk->restore_sigmask;
467}
450static inline bool test_and_clear_restore_sigmask(void) 468static inline bool test_and_clear_restore_sigmask(void)
451{ 469{
452 if (!current->restore_sigmask) 470 if (!current->restore_sigmask)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 11b45f7ae405..9449b19c5f10 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -32,6 +32,8 @@
32#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) 32#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
33/* Use GFP_DMA memory */ 33/* Use GFP_DMA memory */
34#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) 34#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
35/* Use GFP_DMA32 memory */
36#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
35/* DEBUG: Store the last owner for bug hunting */ 37/* DEBUG: Store the last owner for bug hunting */
36#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) 38#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
37/* Panic if kmem_cache_create() fails */ 39/* Panic if kmem_cache_create() fails */
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index a240ed2a0372..ff56c443180c 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
24#define vbg_debug pr_debug 24#define vbg_debug pr_debug
25#endif 25#endif
26 26
27int vbg_hgcm_connect(struct vbg_dev *gdev, 27int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
28 struct vmmdev_hgcm_service_location *loc, 28 struct vmmdev_hgcm_service_location *loc,
29 u32 *client_id, int *vbox_status); 29 u32 *client_id, int *vbox_status);
30 30
31int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status); 31int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
32 u32 client_id, int *vbox_status);
32 33
33int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, 34int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
34 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 35 u32 function, u32 timeout_ms,
35 u32 parm_count, int *vbox_status); 36 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
37 int *vbox_status);
36 38
37/** 39/**
38 * Convert a VirtualBox status code to a standard Linux kernel return value. 40 * Convert a VirtualBox status code to a standard Linux kernel return value.
diff --git a/include/net/ip.h b/include/net/ip.h
index aa09ae5f01a5..2d3cce7c3e8a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -681,7 +681,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
681 unsigned char __user *data, int optlen); 681 unsigned char __user *data, int optlen);
682void ip_options_undo(struct ip_options *opt); 682void ip_options_undo(struct ip_options *opt);
683void ip_forward_options(struct sk_buff *skb); 683void ip_forward_options(struct sk_buff *skb);
684int ip_options_rcv_srr(struct sk_buff *skb); 684int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
685 685
686/* 686/*
687 * Functions provided by ip_sockglue.c 687 * Functions provided by ip_sockglue.c
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index a68ced28d8f4..12689ddfc24c 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -59,6 +59,7 @@ struct net {
59 */ 59 */
60 spinlock_t rules_mod_lock; 60 spinlock_t rules_mod_lock;
61 61
62 u32 hash_mix;
62 atomic64_t cookie_gen; 63 atomic64_t cookie_gen;
63 64
64 struct list_head list; /* list of network namespaces */ 65 struct list_head list; /* list of network namespaces */
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 16a842456189..d9b665151f3d 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -2,16 +2,10 @@
2#ifndef __NET_NS_HASH_H__ 2#ifndef __NET_NS_HASH_H__
3#define __NET_NS_HASH_H__ 3#define __NET_NS_HASH_H__
4 4
5#include <asm/cache.h> 5#include <net/net_namespace.h>
6
7struct net;
8 6
9static inline u32 net_hash_mix(const struct net *net) 7static inline u32 net_hash_mix(const struct net *net)
10{ 8{
11#ifdef CONFIG_NET_NS 9 return net->hash_mix;
12 return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
13#else
14 return 0;
15#endif
16} 10}
17#endif 11#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2269383c1399..0aea0e262452 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -934,6 +934,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
934 sch->qstats.overlimits++; 934 sch->qstats.overlimits++;
935} 935}
936 936
937static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
938{
939 __u32 qlen = qdisc_qlen_sum(sch);
940
941 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
942}
943
944static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
945 __u32 *backlog)
946{
947 struct gnet_stats_queue qstats = { 0 };
948 __u32 len = qdisc_qlen_sum(sch);
949
950 __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
951 *qlen = qstats.qlen;
952 *backlog = qstats.backlog;
953}
954
955static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
956{
957 __u32 qlen, backlog;
958
959 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
960 qdisc_tree_reduce_backlog(sch, qlen, backlog);
961}
962
963static inline void qdisc_purge_queue(struct Qdisc *sch)
964{
965 __u32 qlen, backlog;
966
967 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
968 qdisc_reset(sch);
969 qdisc_tree_reduce_backlog(sch, qlen, backlog);
970}
971
937static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 972static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
938{ 973{
939 qh->head = NULL; 974 qh->head = NULL;
@@ -1117,13 +1152,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1117 sch_tree_lock(sch); 1152 sch_tree_lock(sch);
1118 old = *pold; 1153 old = *pold;
1119 *pold = new; 1154 *pold = new;
1120 if (old != NULL) { 1155 if (old != NULL)
1121 unsigned int qlen = old->q.qlen; 1156 qdisc_tree_flush_backlog(old);
1122 unsigned int backlog = old->qstats.backlog;
1123
1124 qdisc_reset(old);
1125 qdisc_tree_reduce_backlog(old, qlen, backlog);
1126 }
1127 sch_tree_unlock(sch); 1157 sch_tree_unlock(sch);
1128 1158
1129 return old; 1159 return old;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 5f24b50c9e88..059dc2bedaf6 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -7,5 +7,7 @@ no-export-headers += kvm.h
7endif 7endif
8 8
9ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),) 9ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),)
10ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),)
10no-export-headers += kvm_para.h 11no-export-headers += kvm_para.h
11endif 12endif
13endif
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 0e68024f36c7..26f39816af14 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
102#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32 102#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
103#endif 103#endif
104 104
105/* vmmdev_request_header.requestor defines */
106
107/* Requestor user not given. */
108#define VMMDEV_REQUESTOR_USR_NOT_GIVEN 0x00000000
109/* The kernel driver (vboxguest) is the requestor. */
110#define VMMDEV_REQUESTOR_USR_DRV 0x00000001
111/* Some other kernel driver is the requestor. */
112#define VMMDEV_REQUESTOR_USR_DRV_OTHER 0x00000002
113/* The root or a admin user is the requestor. */
114#define VMMDEV_REQUESTOR_USR_ROOT 0x00000003
115/* Regular joe user is making the request. */
116#define VMMDEV_REQUESTOR_USR_USER 0x00000006
117/* User classification mask. */
118#define VMMDEV_REQUESTOR_USR_MASK 0x00000007
119
120/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
121#define VMMDEV_REQUESTOR_KERNEL 0x00000000
122/* User mode request. */
123#define VMMDEV_REQUESTOR_USERMODE 0x00000008
124/* User or kernel mode classification mask. */
125#define VMMDEV_REQUESTOR_MODE_MASK 0x00000008
126
127/* Don't know the physical console association of the requestor. */
128#define VMMDEV_REQUESTOR_CON_DONT_KNOW 0x00000000
129/*
130 * The request originates with a process that is NOT associated with the
131 * physical console.
132 */
133#define VMMDEV_REQUESTOR_CON_NO 0x00000010
134/* Requestor process is associated with the physical console. */
135#define VMMDEV_REQUESTOR_CON_YES 0x00000020
136/* Console classification mask. */
137#define VMMDEV_REQUESTOR_CON_MASK 0x00000030
138
139/* Requestor is member of special VirtualBox user group. */
140#define VMMDEV_REQUESTOR_GRP_VBOX 0x00000080
141
142/* Note: trust level is for windows guests only, linux always uses not-given */
143/* Requestor trust level: Unspecified */
144#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN 0x00000000
145/* Requestor trust level: Untrusted (SID S-1-16-0) */
146#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED 0x00001000
147/* Requestor trust level: Untrusted (SID S-1-16-4096) */
148#define VMMDEV_REQUESTOR_TRUST_LOW 0x00002000
149/* Requestor trust level: Medium (SID S-1-16-8192) */
150#define VMMDEV_REQUESTOR_TRUST_MEDIUM 0x00003000
151/* Requestor trust level: Medium plus (SID S-1-16-8448) */
152#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS 0x00004000
153/* Requestor trust level: High (SID S-1-16-12288) */
154#define VMMDEV_REQUESTOR_TRUST_HIGH 0x00005000
155/* Requestor trust level: System (SID S-1-16-16384) */
156#define VMMDEV_REQUESTOR_TRUST_SYSTEM 0x00006000
157/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
158#define VMMDEV_REQUESTOR_TRUST_PROTECTED 0x00007000
159/* Requestor trust level mask */
160#define VMMDEV_REQUESTOR_TRUST_MASK 0x00007000
161
162/* Requestor is using the less trusted user device node (/dev/vboxuser) */
163#define VMMDEV_REQUESTOR_USER_DEVICE 0x00008000
164
105/** HGCM service location types. */ 165/** HGCM service location types. */
106enum vmmdev_hgcm_service_location_type { 166enum vmmdev_hgcm_service_location_type {
107 VMMDEV_HGCM_LOC_INVALID = 0, 167 VMMDEV_HGCM_LOC_INVALID = 0,
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8974b3755670..3c18260403dd 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
162static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, 162static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
163 struct xdp_frame *xdpf) 163 struct xdp_frame *xdpf)
164{ 164{
165 unsigned int hard_start_headroom;
165 unsigned int frame_size; 166 unsigned int frame_size;
166 void *pkt_data_start; 167 void *pkt_data_start;
167 struct sk_buff *skb; 168 struct sk_buff *skb;
168 169
170 /* Part of headroom was reserved to xdpf */
171 hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
172
169 /* build_skb need to place skb_shared_info after SKB end, and 173 /* build_skb need to place skb_shared_info after SKB end, and
170 * also want to know the memory "truesize". Thus, need to 174 * also want to know the memory "truesize". Thus, need to
171 * know the memory frame size backing xdp_buff. 175 * know the memory frame size backing xdp_buff.
@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
183 * is not at a fixed memory location, with mixed length 187 * is not at a fixed memory location, with mixed length
184 * packets, which is bad for cache-line hotness. 188 * packets, which is bad for cache-line hotness.
185 */ 189 */
186 frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) + 190 frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 191 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
188 192
189 pkt_data_start = xdpf->data - xdpf->headroom; 193 pkt_data_start = xdpf->data - hard_start_headroom;
190 skb = build_skb(pkt_data_start, frame_size); 194 skb = build_skb(pkt_data_start, frame_size);
191 if (!skb) 195 if (!skb)
192 return NULL; 196 return NULL;
193 197
194 skb_reserve(skb, xdpf->headroom); 198 skb_reserve(skb, hard_start_headroom);
195 __skb_put(skb, xdpf->len); 199 __skb_put(skb, xdpf->len);
196 if (xdpf->metasize) 200 if (xdpf->metasize)
197 skb_metadata_set(skb, xdpf->metasize); 201 skb_metadata_set(skb, xdpf->metasize);
@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
205 * - RX ring dev queue index (skb_record_rx_queue) 209 * - RX ring dev queue index (skb_record_rx_queue)
206 */ 210 */
207 211
212 /* Allow SKB to reuse area used by xdp_frame */
213 xdp_scrub_frame(xdpf);
214
208 return skb; 215 return skb;
209} 216}
210 217
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 2ada5e21dfa6..4a8f390a2b82 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
554} 554}
555EXPORT_SYMBOL(bpf_prog_get_type_path); 555EXPORT_SYMBOL(bpf_prog_get_type_path);
556 556
557static void bpf_evict_inode(struct inode *inode)
558{
559 enum bpf_type type;
560
561 truncate_inode_pages_final(&inode->i_data);
562 clear_inode(inode);
563
564 if (S_ISLNK(inode->i_mode))
565 kfree(inode->i_link);
566 if (!bpf_inode_type(inode, &type))
567 bpf_any_put(inode->i_private, type);
568}
569
570/* 557/*
571 * Display the mount options in /proc/mounts. 558 * Display the mount options in /proc/mounts.
572 */ 559 */
@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
579 return 0; 566 return 0;
580} 567}
581 568
569static void bpf_destroy_inode_deferred(struct rcu_head *head)
570{
571 struct inode *inode = container_of(head, struct inode, i_rcu);
572 enum bpf_type type;
573
574 if (S_ISLNK(inode->i_mode))
575 kfree(inode->i_link);
576 if (!bpf_inode_type(inode, &type))
577 bpf_any_put(inode->i_private, type);
578 free_inode_nonrcu(inode);
579}
580
581static void bpf_destroy_inode(struct inode *inode)
582{
583 call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
584}
585
582static const struct super_operations bpf_super_ops = { 586static const struct super_operations bpf_super_ops = {
583 .statfs = simple_statfs, 587 .statfs = simple_statfs,
584 .drop_inode = generic_delete_inode, 588 .drop_inode = generic_delete_inode,
585 .show_options = bpf_show_options, 589 .show_options = bpf_show_options,
586 .evict_inode = bpf_evict_inode, 590 .destroy_inode = bpf_destroy_inode,
587}; 591};
588 592
589enum { 593enum {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2fe89138309a..b7ad8003c4e6 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1898,8 +1898,9 @@ continue_func:
1898 } 1898 }
1899 frame++; 1899 frame++;
1900 if (frame >= MAX_CALL_FRAMES) { 1900 if (frame >= MAX_CALL_FRAMES) {
1901 WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); 1901 verbose(env, "the call stack of %d frames is too deep !\n",
1902 return -EFAULT; 1902 frame);
1903 return -E2BIG;
1903 } 1904 }
1904 goto process_func; 1905 goto process_func;
1905 } 1906 }
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 025f419d16f6..6754f3ecfd94 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -564,6 +564,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
564 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); 564 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
565} 565}
566 566
567static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
568{
569 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
570 return true;
571 /*
572 * When CPU hotplug is disabled, then taking the CPU down is not
573 * possible because takedown_cpu() and the architecture and
574 * subsystem specific mechanisms are not available. So the CPU
575 * which would be completely unplugged again needs to stay around
576 * in the current state.
577 */
578 return st->state <= CPUHP_BRINGUP_CPU;
579}
580
567static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 581static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
568 enum cpuhp_state target) 582 enum cpuhp_state target)
569{ 583{
@@ -574,8 +588,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
574 st->state++; 588 st->state++;
575 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); 589 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
576 if (ret) { 590 if (ret) {
577 st->target = prev_state; 591 if (can_rollback_cpu(st)) {
578 undo_cpu_up(cpu, st); 592 st->target = prev_state;
593 undo_cpu_up(cpu, st);
594 }
579 break; 595 break;
580 } 596 }
581 } 597 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 771e93f9c43f..6f357f4fc859 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -29,6 +29,7 @@
29#include <linux/hw_breakpoint.h> 29#include <linux/hw_breakpoint.h>
30#include <linux/cn_proc.h> 30#include <linux/cn_proc.h>
31#include <linux/compat.h> 31#include <linux/compat.h>
32#include <linux/sched/signal.h>
32 33
33/* 34/*
34 * Access another process' address space via ptrace. 35 * Access another process' address space via ptrace.
@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
924 ret = ptrace_setsiginfo(child, &siginfo); 925 ret = ptrace_setsiginfo(child, &siginfo);
925 break; 926 break;
926 927
927 case PTRACE_GETSIGMASK: 928 case PTRACE_GETSIGMASK: {
929 sigset_t *mask;
930
928 if (addr != sizeof(sigset_t)) { 931 if (addr != sizeof(sigset_t)) {
929 ret = -EINVAL; 932 ret = -EINVAL;
930 break; 933 break;
931 } 934 }
932 935
933 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) 936 if (test_tsk_restore_sigmask(child))
937 mask = &child->saved_sigmask;
938 else
939 mask = &child->blocked;
940
941 if (copy_to_user(datavp, mask, sizeof(sigset_t)))
934 ret = -EFAULT; 942 ret = -EFAULT;
935 else 943 else
936 ret = 0; 944 ret = 0;
937 945
938 break; 946 break;
947 }
939 948
940 case PTRACE_SETSIGMASK: { 949 case PTRACE_SETSIGMASK: {
941 sigset_t new_set; 950 sigset_t new_set;
@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
961 child->blocked = new_set; 970 child->blocked = new_set;
962 spin_unlock_irq(&child->sighand->siglock); 971 spin_unlock_irq(&child->sighand->siglock);
963 972
973 clear_tsk_restore_sigmask(child);
974
964 ret = 0; 975 ret = 0;
965 break; 976 break;
966 } 977 }
diff --git a/kernel/signal.c b/kernel/signal.c
index b7953934aa99..f98448cf2def 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3605 if (unlikely(sig != kinfo.si_signo)) 3605 if (unlikely(sig != kinfo.si_signo))
3606 goto err; 3606 goto err;
3607 3607
3608 /* Only allow sending arbitrary signals to yourself. */
3609 ret = -EPERM;
3608 if ((task_pid(current) != pid) && 3610 if ((task_pid(current) != pid) &&
3609 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) { 3611 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3610 /* Only allow sending arbitrary signals to yourself. */ 3612 goto err;
3611 ret = -EPERM;
3612 if (kinfo.si_code != SI_USER)
3613 goto err;
3614
3615 /* Turn this into a regular kill signal. */
3616 prepare_kill_siginfo(sig, &kinfo);
3617 }
3618 } else { 3613 } else {
3619 prepare_kill_siginfo(sig, &kinfo); 3614 prepare_kill_siginfo(sig, &kinfo);
3620 } 3615 }
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 403c9bd90413..6a5787233113 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -554,13 +554,15 @@ static void softlockup_start_all(void)
554 554
555int lockup_detector_online_cpu(unsigned int cpu) 555int lockup_detector_online_cpu(unsigned int cpu)
556{ 556{
557 watchdog_enable(cpu); 557 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
558 watchdog_enable(cpu);
558 return 0; 559 return 0;
559} 560}
560 561
561int lockup_detector_offline_cpu(unsigned int cpu) 562int lockup_detector_offline_cpu(unsigned int cpu)
562{ 563{
563 watchdog_disable(cpu); 564 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
565 watchdog_disable(cpu);
564 return 0; 566 return 0;
565} 567}
566 568
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5b382c1244ed..155fe38756ec 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
591void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 591void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
592 unsigned int cpu) 592 unsigned int cpu)
593{ 593{
594 /*
595 * Once the clear bit is set, the bit may be allocated out.
596 *
597 * Orders READ/WRITE on the asssociated instance(such as request
598 * of blk_mq) by this bit for avoiding race with re-allocation,
599 * and its pair is the memory barrier implied in __sbitmap_get_word.
600 *
601 * One invariant is that the clear bit has to be zero when the bit
602 * is in use.
603 */
604 smp_mb__before_atomic();
594 sbitmap_deferred_clear_bit(&sbq->sb, nr); 605 sbitmap_deferred_clear_bit(&sbq->sb, nr);
595 606
596 /* 607 /*
diff --git a/mm/compaction.c b/mm/compaction.c
index f171a83707ce..3319e0872d01 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
242 bool check_target) 242 bool check_target)
243{ 243{
244 struct page *page = pfn_to_online_page(pfn); 244 struct page *page = pfn_to_online_page(pfn);
245 struct page *block_page;
245 struct page *end_page; 246 struct page *end_page;
246 unsigned long block_pfn; 247 unsigned long block_pfn;
247 248
@@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
267 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 268 get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
268 return false; 269 return false;
269 270
271 /* Ensure the start of the pageblock or zone is online and valid */
272 block_pfn = pageblock_start_pfn(pfn);
273 block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
274 if (block_page) {
275 page = block_page;
276 pfn = block_pfn;
277 }
278
279 /* Ensure the end of the pageblock or zone is online and valid */
280 block_pfn += pageblock_nr_pages;
281 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
282 end_page = pfn_to_online_page(block_pfn);
283 if (!end_page)
284 return false;
285
270 /* 286 /*
271 * Only clear the hint if a sample indicates there is either a 287 * Only clear the hint if a sample indicates there is either a
272 * free page or an LRU page in the block. One or other condition 288 * free page or an LRU page in the block. One or other condition
273 * is necessary for the block to be a migration source/target. 289 * is necessary for the block to be a migration source/target.
274 */ 290 */
275 block_pfn = pageblock_start_pfn(pfn);
276 pfn = max(block_pfn, zone->zone_start_pfn);
277 page = pfn_to_page(pfn);
278 if (zone != page_zone(page))
279 return false;
280 pfn = block_pfn + pageblock_nr_pages;
281 pfn = min(pfn, zone_end_pfn(zone));
282 end_page = pfn_to_page(pfn);
283
284 do { 291 do {
285 if (pfn_valid_within(pfn)) { 292 if (pfn_valid_within(pfn)) {
286 if (check_source && PageLRU(page)) { 293 if (check_source && PageLRU(page)) {
@@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
309static void __reset_isolation_suitable(struct zone *zone) 316static void __reset_isolation_suitable(struct zone *zone)
310{ 317{
311 unsigned long migrate_pfn = zone->zone_start_pfn; 318 unsigned long migrate_pfn = zone->zone_start_pfn;
312 unsigned long free_pfn = zone_end_pfn(zone); 319 unsigned long free_pfn = zone_end_pfn(zone) - 1;
313 unsigned long reset_migrate = free_pfn; 320 unsigned long reset_migrate = free_pfn;
314 unsigned long reset_free = migrate_pfn; 321 unsigned long reset_free = migrate_pfn;
315 bool source_set = false; 322 bool source_set = false;
@@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc)
1363 count_compact_events(COMPACTISOLATED, nr_isolated); 1370 count_compact_events(COMPACTISOLATED, nr_isolated);
1364 } else { 1371 } else {
1365 /* If isolation fails, abort the search */ 1372 /* If isolation fails, abort the search */
1366 order = -1; 1373 order = cc->search_order + 1;
1367 page = NULL; 1374 page = NULL;
1368 } 1375 }
1369 } 1376 }
diff --git a/mm/debug.c b/mm/debug.c
index c0b31b6c3877..eee9c221280c 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
79 pr_warn("ksm "); 79 pr_warn("ksm ");
80 else if (mapping) { 80 else if (mapping) {
81 pr_warn("%ps ", mapping->a_ops); 81 pr_warn("%ps ", mapping->a_ops);
82 if (mapping->host->i_dentry.first) { 82 if (mapping->host && mapping->host->i_dentry.first) {
83 struct dentry *dentry; 83 struct dentry *dentry;
84 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); 84 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
85 pr_warn("name:\"%pd\" ", dentry); 85 pr_warn("name:\"%pd\" ", dentry);
@@ -168,7 +168,7 @@ void dump_mm(const struct mm_struct *mm)
168 mm_pgtables_bytes(mm), 168 mm_pgtables_bytes(mm),
169 mm->map_count, 169 mm->map_count,
170 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, 170 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
171 atomic64_read(&mm->pinned_vm), 171 (u64)atomic64_read(&mm->pinned_vm),
172 mm->data_vm, mm->exec_vm, mm->stack_vm, 172 mm->data_vm, mm->exec_vm, mm->stack_vm,
173 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 173 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
174 mm->start_brk, mm->brk, mm->start_stack, 174 mm->start_brk, mm->brk, mm->start_stack,
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 3e0c11f7d7a1..3ce956efa0cb 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -163,7 +163,10 @@ static inline u8 random_tag(void)
163#endif 163#endif
164 164
165#ifndef arch_kasan_set_tag 165#ifndef arch_kasan_set_tag
166#define arch_kasan_set_tag(addr, tag) ((void *)(addr)) 166static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
167{
168 return addr;
169}
167#endif 170#endif
168#ifndef arch_kasan_reset_tag 171#ifndef arch_kasan_reset_tag
169#define arch_kasan_reset_tag(addr) ((void *)(addr)) 172#define arch_kasan_reset_tag(addr) ((void *)(addr))
diff --git a/mm/memory.c b/mm/memory.c
index 47fe250307c7..ab650c21bccd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1549 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); 1549 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
1550 goto out_unlock; 1550 goto out_unlock;
1551 } 1551 }
1552 entry = *pte; 1552 entry = pte_mkyoung(*pte);
1553 goto out_mkwrite; 1553 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1554 } else 1554 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1555 goto out_unlock; 1555 update_mmu_cache(vma, addr, pte);
1556 }
1557 goto out_unlock;
1556 } 1558 }
1557 1559
1558 /* Ok, finally just insert the thing.. */ 1560 /* Ok, finally just insert the thing.. */
@@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1561 else 1563 else
1562 entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 1564 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
1563 1565
1564out_mkwrite:
1565 if (mkwrite) { 1566 if (mkwrite) {
1566 entry = pte_mkyoung(entry); 1567 entry = pte_mkyoung(entry);
1567 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1568 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f767582af4f8..0082d699be94 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
1576{ 1576{
1577 unsigned long pfn, nr_pages; 1577 unsigned long pfn, nr_pages;
1578 long offlined_pages; 1578 long offlined_pages;
1579 int ret, node; 1579 int ret, node, nr_isolate_pageblock;
1580 unsigned long flags; 1580 unsigned long flags;
1581 unsigned long valid_start, valid_end; 1581 unsigned long valid_start, valid_end;
1582 struct zone *zone; 1582 struct zone *zone;
@@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
1602 ret = start_isolate_page_range(start_pfn, end_pfn, 1602 ret = start_isolate_page_range(start_pfn, end_pfn,
1603 MIGRATE_MOVABLE, 1603 MIGRATE_MOVABLE,
1604 SKIP_HWPOISON | REPORT_FAILURE); 1604 SKIP_HWPOISON | REPORT_FAILURE);
1605 if (ret) { 1605 if (ret < 0) {
1606 reason = "failure to isolate range"; 1606 reason = "failure to isolate range";
1607 goto failed_removal; 1607 goto failed_removal;
1608 } 1608 }
1609 nr_isolate_pageblock = ret;
1609 1610
1610 arg.start_pfn = start_pfn; 1611 arg.start_pfn = start_pfn;
1611 arg.nr_pages = nr_pages; 1612 arg.nr_pages = nr_pages;
@@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
1657 /* Ok, all of our target is isolated. 1658 /* Ok, all of our target is isolated.
1658 We cannot do rollback at this point. */ 1659 We cannot do rollback at this point. */
1659 offline_isolated_pages(start_pfn, end_pfn); 1660 offline_isolated_pages(start_pfn, end_pfn);
1660 /* reset pagetype flags and makes migrate type to be MOVABLE */ 1661
1661 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1662 /*
1663 * Onlining will reset pagetype flags and makes migrate type
1664 * MOVABLE, so just need to decrease the number of isolated
1665 * pageblocks zone counter here.
1666 */
1667 spin_lock_irqsave(&zone->lock, flags);
1668 zone->nr_isolate_pageblock -= nr_isolate_pageblock;
1669 spin_unlock_irqrestore(&zone->lock, flags);
1670
1662 /* removal success */ 1671 /* removal success */
1663 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); 1672 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1664 zone->present_pages -= offlined_pages; 1673 zone->present_pages -= offlined_pages;
@@ -1690,12 +1699,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
1690 1699
1691failed_removal_isolated: 1700failed_removal_isolated:
1692 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1701 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1702 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1693failed_removal: 1703failed_removal:
1694 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", 1704 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
1695 (unsigned long long) start_pfn << PAGE_SHIFT, 1705 (unsigned long long) start_pfn << PAGE_SHIFT,
1696 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, 1706 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
1697 reason); 1707 reason);
1698 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1699 /* pushback to free area */ 1708 /* pushback to free area */
1700 mem_hotplug_done(); 1709 mem_hotplug_done();
1701 return ret; 1710 return ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index af171ccb56a2..2219e747df49 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429} 429}
430 430
431/*
432 * queue_pages_pmd() has three possible return values:
433 * 1 - pages are placed on the right node or queued successfully.
434 * 0 - THP was split.
435 * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
436 * page was already on a node that does not follow the policy.
437 */
431static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 438static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432 unsigned long end, struct mm_walk *walk) 439 unsigned long end, struct mm_walk *walk)
433{ 440{
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
437 unsigned long flags; 444 unsigned long flags;
438 445
439 if (unlikely(is_pmd_migration_entry(*pmd))) { 446 if (unlikely(is_pmd_migration_entry(*pmd))) {
440 ret = 1; 447 ret = -EIO;
441 goto unlock; 448 goto unlock;
442 } 449 }
443 page = pmd_page(*pmd); 450 page = pmd_page(*pmd);
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
454 ret = 1; 461 ret = 1;
455 flags = qp->flags; 462 flags = qp->flags;
456 /* go to thp migration */ 463 /* go to thp migration */
457 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 464 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
465 if (!vma_migratable(walk->vma)) {
466 ret = -EIO;
467 goto unlock;
468 }
469
458 migrate_page_add(page, qp->pagelist, flags); 470 migrate_page_add(page, qp->pagelist, flags);
471 } else
472 ret = -EIO;
459unlock: 473unlock:
460 spin_unlock(ptl); 474 spin_unlock(ptl);
461out: 475out:
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
480 ptl = pmd_trans_huge_lock(pmd, vma); 494 ptl = pmd_trans_huge_lock(pmd, vma);
481 if (ptl) { 495 if (ptl) {
482 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 496 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
483 if (ret) 497 if (ret > 0)
484 return 0; 498 return 0;
499 else if (ret < 0)
500 return ret;
485 } 501 }
486 502
487 if (pmd_trans_unstable(pmd)) 503 if (pmd_trans_unstable(pmd))
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
502 continue; 518 continue;
503 if (!queue_pages_required(page, qp)) 519 if (!queue_pages_required(page, qp))
504 continue; 520 continue;
505 migrate_page_add(page, qp->pagelist, flags); 521 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
522 if (!vma_migratable(vma))
523 break;
524 migrate_page_add(page, qp->pagelist, flags);
525 } else
526 break;
506 } 527 }
507 pte_unmap_unlock(pte - 1, ptl); 528 pte_unmap_unlock(pte - 1, ptl);
508 cond_resched(); 529 cond_resched();
509 return 0; 530 return addr != end ? -EIO : 0;
510} 531}
511 532
512static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 533static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
576 unsigned long endvma = vma->vm_end; 597 unsigned long endvma = vma->vm_end;
577 unsigned long flags = qp->flags; 598 unsigned long flags = qp->flags;
578 599
579 if (!vma_migratable(vma)) 600 /*
601 * Need check MPOL_MF_STRICT to return -EIO if possible
602 * regardless of vma_migratable
603 */
604 if (!vma_migratable(vma) &&
605 !(flags & MPOL_MF_STRICT))
580 return 1; 606 return 1;
581 607
582 if (endvma > end) 608 if (endvma > end)
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
603 } 629 }
604 630
605 /* queue pages from current vma */ 631 /* queue pages from current vma */
606 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 632 if (flags & MPOL_MF_VALID)
607 return 0; 633 return 0;
608 return 1; 634 return 1;
609} 635}
diff --git a/mm/migrate.c b/mm/migrate.c
index ac6f4939bb59..663a5449367a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
248 pte = swp_entry_to_pte(entry); 248 pte = swp_entry_to_pte(entry);
249 } else if (is_device_public_page(new)) { 249 } else if (is_device_public_page(new)) {
250 pte = pte_mkdevmap(pte); 250 pte = pte_mkdevmap(pte);
251 flush_dcache_page(new);
252 } 251 }
253 } else 252 }
254 flush_dcache_page(new);
255 253
256#ifdef CONFIG_HUGETLB_PAGE 254#ifdef CONFIG_HUGETLB_PAGE
257 if (PageHuge(new)) { 255 if (PageHuge(new)) {
@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
995 */ 993 */
996 if (!PageMappingFlags(page)) 994 if (!PageMappingFlags(page))
997 page->mapping = NULL; 995 page->mapping = NULL;
996
997 if (unlikely(is_zone_device_page(newpage))) {
998 if (is_device_public_page(newpage))
999 flush_dcache_page(newpage);
1000 } else
1001 flush_dcache_page(newpage);
1002
998 } 1003 }
999out: 1004out:
1000 return rc; 1005 return rc;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 03fcf73d47da..d96ca5bc555b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
8233 8233
8234 ret = start_isolate_page_range(pfn_max_align_down(start), 8234 ret = start_isolate_page_range(pfn_max_align_down(start),
8235 pfn_max_align_up(end), migratetype, 0); 8235 pfn_max_align_up(end), migratetype, 0);
8236 if (ret) 8236 if (ret < 0)
8237 return ret; 8237 return ret;
8238 8238
8239 /* 8239 /*
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index ce323e56b34d..019280712e1b 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
59 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 59 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
60 * We just check MOVABLE pages. 60 * We just check MOVABLE pages.
61 */ 61 */
62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags)) 62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
63 isol_flags))
63 ret = 0; 64 ret = 0;
64 65
65 /* 66 /*
@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
160 return NULL; 161 return NULL;
161} 162}
162 163
163/* 164/**
164 * start_isolate_page_range() -- make page-allocation-type of range of pages 165 * start_isolate_page_range() - make page-allocation-type of range of pages to
165 * to be MIGRATE_ISOLATE. 166 * be MIGRATE_ISOLATE.
166 * @start_pfn: The lower PFN of the range to be isolated. 167 * @start_pfn: The lower PFN of the range to be isolated.
167 * @end_pfn: The upper PFN of the range to be isolated. 168 * @end_pfn: The upper PFN of the range to be isolated.
168 * @migratetype: migrate type to set in error recovery. 169 * start_pfn/end_pfn must be aligned to pageblock_order.
170 * @migratetype: Migrate type to set in error recovery.
171 * @flags: The following flags are allowed (they can be combined in
172 * a bit mask)
173 * SKIP_HWPOISON - ignore hwpoison pages
174 * REPORT_FAILURE - report details about the failure to
175 * isolate the range
169 * 176 *
170 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in 177 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
171 * the range will never be allocated. Any free pages and pages freed in the 178 * the range will never be allocated. Any free pages and pages freed in the
172 * future will not be allocated again. 179 * future will not be allocated again. If specified range includes migrate types
173 * 180 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
174 * start_pfn/end_pfn must be aligned to pageblock_order. 181 * pages in the range finally, the caller have to free all pages in the range.
175 * Return 0 on success and -EBUSY if any part of range cannot be isolated. 182 * test_page_isolated() can be used for test it.
176 * 183 *
177 * There is no high level synchronization mechanism that prevents two threads 184 * There is no high level synchronization mechanism that prevents two threads
178 * from trying to isolate overlapping ranges. If this happens, one thread 185 * from trying to isolate overlapping ranges. If this happens, one thread
179 * will notice pageblocks in the overlapping range already set to isolate. 186 * will notice pageblocks in the overlapping range already set to isolate.
180 * This happens in set_migratetype_isolate, and set_migratetype_isolate 187 * This happens in set_migratetype_isolate, and set_migratetype_isolate
181 * returns an error. We then clean up by restoring the migration type on 188 * returns an error. We then clean up by restoring the migration type on
182 * pageblocks we may have modified and return -EBUSY to caller. This 189 * pageblocks we may have modified and return -EBUSY to caller. This
183 * prevents two threads from simultaneously working on overlapping ranges. 190 * prevents two threads from simultaneously working on overlapping ranges.
191 *
192 * Return: the number of isolated pageblocks on success and -EBUSY if any part
193 * of range cannot be isolated.
184 */ 194 */
185int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 195int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
186 unsigned migratetype, int flags) 196 unsigned migratetype, int flags)
@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
188 unsigned long pfn; 198 unsigned long pfn;
189 unsigned long undo_pfn; 199 unsigned long undo_pfn;
190 struct page *page; 200 struct page *page;
201 int nr_isolate_pageblock = 0;
191 202
192 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 203 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
193 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); 204 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
196 pfn < end_pfn; 207 pfn < end_pfn;
197 pfn += pageblock_nr_pages) { 208 pfn += pageblock_nr_pages) {
198 page = __first_valid_page(pfn, pageblock_nr_pages); 209 page = __first_valid_page(pfn, pageblock_nr_pages);
199 if (page && 210 if (page) {
200 set_migratetype_isolate(page, migratetype, flags)) { 211 if (set_migratetype_isolate(page, migratetype, flags)) {
201 undo_pfn = pfn; 212 undo_pfn = pfn;
202 goto undo; 213 goto undo;
214 }
215 nr_isolate_pageblock++;
203 } 216 }
204 } 217 }
205 return 0; 218 return nr_isolate_pageblock;
206undo: 219undo:
207 for (pfn = start_pfn; 220 for (pfn = start_pfn;
208 pfn < undo_pfn; 221 pfn < undo_pfn;
diff --git a/mm/slab.c b/mm/slab.c
index 28652e4218e0..329bfe67f2ca 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2115,6 +2115,8 @@ done:
2115 cachep->allocflags = __GFP_COMP; 2115 cachep->allocflags = __GFP_COMP;
2116 if (flags & SLAB_CACHE_DMA) 2116 if (flags & SLAB_CACHE_DMA)
2117 cachep->allocflags |= GFP_DMA; 2117 cachep->allocflags |= GFP_DMA;
2118 if (flags & SLAB_CACHE_DMA32)
2119 cachep->allocflags |= GFP_DMA32;
2118 if (flags & SLAB_RECLAIM_ACCOUNT) 2120 if (flags & SLAB_RECLAIM_ACCOUNT)
2119 cachep->allocflags |= __GFP_RECLAIMABLE; 2121 cachep->allocflags |= __GFP_RECLAIMABLE;
2120 cachep->size = size; 2122 cachep->size = size;
diff --git a/mm/slab.h b/mm/slab.h
index e5e6658eeacc..43ac818b8592 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
127 127
128 128
129/* Legal flag mask for kmem_cache_create(), for various configurations */ 129/* Legal flag mask for kmem_cache_create(), for various configurations */
130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
131 SLAB_CACHE_DMA32 | SLAB_PANIC | \
131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 132 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
132 133
133#if defined(CONFIG_DEBUG_SLAB) 134#if defined(CONFIG_DEBUG_SLAB)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 03eeb8b7b4b1..58251ba63e4a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
53 SLAB_FAILSLAB | SLAB_KASAN) 53 SLAB_FAILSLAB | SLAB_KASAN)
54 54
55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
56 SLAB_ACCOUNT) 56 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
57 57
58/* 58/*
59 * Merge control. If this is set then no merging of slab caches will occur. 59 * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 1b08fbcb7e61..d30ede89f4a6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3589,6 +3589,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3589 if (s->flags & SLAB_CACHE_DMA) 3589 if (s->flags & SLAB_CACHE_DMA)
3590 s->allocflags |= GFP_DMA; 3590 s->allocflags |= GFP_DMA;
3591 3591
3592 if (s->flags & SLAB_CACHE_DMA32)
3593 s->allocflags |= GFP_DMA32;
3594
3592 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3595 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3593 s->allocflags |= __GFP_RECLAIMABLE; 3596 s->allocflags |= __GFP_RECLAIMABLE;
3594 3597
@@ -5679,6 +5682,8 @@ static char *create_unique_id(struct kmem_cache *s)
5679 */ 5682 */
5680 if (s->flags & SLAB_CACHE_DMA) 5683 if (s->flags & SLAB_CACHE_DMA)
5681 *p++ = 'd'; 5684 *p++ = 'd';
5685 if (s->flags & SLAB_CACHE_DMA32)
5686 *p++ = 'D';
5682 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5687 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5683 *p++ = 'a'; 5688 *p++ = 'a';
5684 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5689 if (s->flags & SLAB_CONSISTENCY_CHECKS)
diff --git a/mm/sparse.c b/mm/sparse.c
index 69904aa6165b..56e057c432f9 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
567} 567}
568 568
569#ifdef CONFIG_MEMORY_HOTREMOVE 569#ifdef CONFIG_MEMORY_HOTREMOVE
570/* Mark all memory sections within the pfn range as online */ 570/* Mark all memory sections within the pfn range as offline */
571void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 571void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
572{ 572{
573 unsigned long pfn; 573 unsigned long pfn;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 15293c2a5dd8..8d77b6ee4477 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
443 return rc; 443 return rc;
444} 444}
445 445
446static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 446static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
447 struct scatterlist *sgl, unsigned int sgc)
447{ 448{
448 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 449 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
449 const struct net_device_ops *ops = real_dev->netdev_ops; 450 const struct net_device_ops *ops = real_dev->netdev_ops;
450 int rc = -EINVAL; 451 int rc = 0;
452
453 if (ops->ndo_fcoe_ddp_target)
454 rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
451 455
452 if (ops->ndo_fcoe_get_wwn)
453 rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
454 return rc; 456 return rc;
455} 457}
458#endif
456 459
457static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, 460#ifdef NETDEV_FCOE_WWNN
458 struct scatterlist *sgl, unsigned int sgc) 461static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
459{ 462{
460 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 463 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
461 const struct net_device_ops *ops = real_dev->netdev_ops; 464 const struct net_device_ops *ops = real_dev->netdev_ops;
462 int rc = 0; 465 int rc = -EINVAL;
463
464 if (ops->ndo_fcoe_ddp_target)
465 rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
466 466
467 if (ops->ndo_fcoe_get_wwn)
468 rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
467 return rc; 469 return rc;
468} 470}
469#endif 471#endif
@@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = {
794 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 796 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
795 .ndo_fcoe_enable = vlan_dev_fcoe_enable, 797 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
796 .ndo_fcoe_disable = vlan_dev_fcoe_disable, 798 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
797 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
798 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, 799 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
799#endif 800#endif
801#ifdef NETDEV_FCOE_WWNN
802 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
803#endif
800#ifdef CONFIG_NET_POLL_CONTROLLER 804#ifdef CONFIG_NET_POLL_CONTROLLER
801 .ndo_poll_controller = vlan_dev_poll_controller, 805 .ndo_poll_controller = vlan_dev_poll_controller,
802 .ndo_netpoll_setup = vlan_dev_netpoll_setup, 806 .ndo_netpoll_setup = vlan_dev_netpoll_setup,
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 13b9ab860a25..2614a9caee00 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -92,8 +92,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
92 92
93 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); 93 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
94 94
95 /* free the TID stats immediately */ 95 if (!ret) {
96 cfg80211_sinfo_release_content(&sinfo); 96 /* free the TID stats immediately */
97 cfg80211_sinfo_release_content(&sinfo);
98 }
97 99
98 dev_put(real_netdev); 100 dev_put(real_netdev);
99 if (ret == -ENOENT) { 101 if (ret == -ENOENT) {
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 8d6b7c9c2a7e..663a53b6d36e 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -790,6 +790,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
790 const u8 *mac, const unsigned short vid) 790 const u8 *mac, const unsigned short vid)
791{ 791{
792 struct batadv_bla_claim search_claim, *claim; 792 struct batadv_bla_claim search_claim, *claim;
793 struct batadv_bla_claim *claim_removed_entry;
794 struct hlist_node *claim_removed_node;
793 795
794 ether_addr_copy(search_claim.addr, mac); 796 ether_addr_copy(search_claim.addr, mac);
795 search_claim.vid = vid; 797 search_claim.vid = vid;
@@ -800,10 +802,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
800 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, 802 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
801 mac, batadv_print_vid(vid)); 803 mac, batadv_print_vid(vid));
802 804
803 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, 805 claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
804 batadv_choose_claim, claim); 806 batadv_compare_claim,
805 batadv_claim_put(claim); /* reference from the hash is gone */ 807 batadv_choose_claim, claim);
808 if (!claim_removed_node)
809 goto free_claim;
806 810
811 /* reference from the hash is gone */
812 claim_removed_entry = hlist_entry(claim_removed_node,
813 struct batadv_bla_claim, hash_entry);
814 batadv_claim_put(claim_removed_entry);
815
816free_claim:
807 /* don't need the reference from hash_find() anymore */ 817 /* don't need the reference from hash_find() anymore */
808 batadv_claim_put(claim); 818 batadv_claim_put(claim);
809} 819}
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index ad14c8086fe7..80fc3253c336 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1130,9 +1130,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
1130 struct attribute *attr, 1130 struct attribute *attr,
1131 char *buff, size_t count) 1131 char *buff, size_t count)
1132{ 1132{
1133 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
1134 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); 1133 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
1135 struct batadv_hard_iface *hard_iface; 1134 struct batadv_hard_iface *hard_iface;
1135 struct batadv_priv *bat_priv;
1136 u32 tp_override; 1136 u32 tp_override;
1137 u32 old_tp_override; 1137 u32 old_tp_override;
1138 bool ret; 1138 bool ret;
@@ -1163,7 +1163,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
1163 1163
1164 atomic_set(&hard_iface->bat_v.throughput_override, tp_override); 1164 atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
1165 1165
1166 batadv_netlink_notify_hardif(bat_priv, hard_iface); 1166 if (hard_iface->soft_iface) {
1167 bat_priv = netdev_priv(hard_iface->soft_iface);
1168 batadv_netlink_notify_hardif(bat_priv, hard_iface);
1169 }
1167 1170
1168out: 1171out:
1169 batadv_hardif_put(hard_iface); 1172 batadv_hardif_put(hard_iface);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5d8bf8048e4e..1ddfd5e011ee 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -603,14 +603,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
603 struct batadv_tt_global_entry *tt_global, 603 struct batadv_tt_global_entry *tt_global,
604 const char *message) 604 const char *message)
605{ 605{
606 struct batadv_tt_global_entry *tt_removed_entry;
607 struct hlist_node *tt_removed_node;
608
606 batadv_dbg(BATADV_DBG_TT, bat_priv, 609 batadv_dbg(BATADV_DBG_TT, bat_priv,
607 "Deleting global tt entry %pM (vid: %d): %s\n", 610 "Deleting global tt entry %pM (vid: %d): %s\n",
608 tt_global->common.addr, 611 tt_global->common.addr,
609 batadv_print_vid(tt_global->common.vid), message); 612 batadv_print_vid(tt_global->common.vid), message);
610 613
611 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, 614 tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
612 batadv_choose_tt, &tt_global->common); 615 batadv_compare_tt,
613 batadv_tt_global_entry_put(tt_global); 616 batadv_choose_tt,
617 &tt_global->common);
618 if (!tt_removed_node)
619 return;
620
621 /* drop reference of remove hash entry */
622 tt_removed_entry = hlist_entry(tt_removed_node,
623 struct batadv_tt_global_entry,
624 common.hash_entry);
625 batadv_tt_global_entry_put(tt_removed_entry);
614} 626}
615 627
616/** 628/**
@@ -1324,9 +1336,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
1324 unsigned short vid, const char *message, 1336 unsigned short vid, const char *message,
1325 bool roaming) 1337 bool roaming)
1326{ 1338{
1339 struct batadv_tt_local_entry *tt_removed_entry;
1327 struct batadv_tt_local_entry *tt_local_entry; 1340 struct batadv_tt_local_entry *tt_local_entry;
1328 u16 flags, curr_flags = BATADV_NO_FLAGS; 1341 u16 flags, curr_flags = BATADV_NO_FLAGS;
1329 void *tt_entry_exists; 1342 struct hlist_node *tt_removed_node;
1330 1343
1331 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1344 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1332 if (!tt_local_entry) 1345 if (!tt_local_entry)
@@ -1355,15 +1368,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
1355 */ 1368 */
1356 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); 1369 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
1357 1370
1358 tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash, 1371 tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
1359 batadv_compare_tt, 1372 batadv_compare_tt,
1360 batadv_choose_tt, 1373 batadv_choose_tt,
1361 &tt_local_entry->common); 1374 &tt_local_entry->common);
1362 if (!tt_entry_exists) 1375 if (!tt_removed_node)
1363 goto out; 1376 goto out;
1364 1377
1365 /* extra call to free the local tt entry */ 1378 /* drop reference of remove hash entry */
1366 batadv_tt_local_entry_put(tt_local_entry); 1379 tt_removed_entry = hlist_entry(tt_removed_node,
1380 struct batadv_tt_local_entry,
1381 common.hash_entry);
1382 batadv_tt_local_entry_put(tt_removed_entry);
1367 1383
1368out: 1384out:
1369 if (tt_local_entry) 1385 if (tt_local_entry)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index afef6fc2c074..8d82107c6419 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -584,6 +584,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
584 if (ipv4_is_local_multicast(group)) 584 if (ipv4_is_local_multicast(group))
585 return 0; 585 return 0;
586 586
587 memset(&br_group, 0, sizeof(br_group));
587 br_group.u.ip4 = group; 588 br_group.u.ip4 = group;
588 br_group.proto = htons(ETH_P_IP); 589 br_group.proto = htons(ETH_P_IP);
589 br_group.vid = vid; 590 br_group.vid = vid;
@@ -1480,6 +1481,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1480 1481
1481 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1482 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1482 1483
1484 memset(&br_group, 0, sizeof(br_group));
1483 br_group.u.ip4 = group; 1485 br_group.u.ip4 = group;
1484 br_group.proto = htons(ETH_P_IP); 1486 br_group.proto = htons(ETH_P_IP);
1485 br_group.vid = vid; 1487 br_group.vid = vid;
@@ -1503,6 +1505,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1503 1505
1504 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1506 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1505 1507
1508 memset(&br_group, 0, sizeof(br_group));
1506 br_group.u.ip6 = *group; 1509 br_group.u.ip6 = *group;
1507 br_group.proto = htons(ETH_P_IPV6); 1510 br_group.proto = htons(ETH_P_IPV6);
1508 br_group.vid = vid; 1511 br_group.vid = vid;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 7e71b0df1fbc..3083988ce729 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
840 size_t bytes) 840 size_t bytes)
841{ 841{
842 struct ceph_bio_iter *it = &cursor->bio_iter; 842 struct ceph_bio_iter *it = &cursor->bio_iter;
843 struct page *page = bio_iter_page(it->bio, it->iter);
843 844
844 BUG_ON(bytes > cursor->resid); 845 BUG_ON(bytes > cursor->resid);
845 BUG_ON(bytes > bio_iter_len(it->bio, it->iter)); 846 BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
@@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
851 return false; /* no more data */ 852 return false; /* no more data */
852 } 853 }
853 854
854 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done)) 855 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
856 page == bio_iter_page(it->bio, it->iter)))
855 return false; /* more bytes to process in this segment */ 857 return false; /* more bytes to process in this segment */
856 858
857 if (!it->iter.bi_size) { 859 if (!it->iter.bi_size) {
@@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
899 size_t bytes) 901 size_t bytes)
900{ 902{
901 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs; 903 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
904 struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
902 905
903 BUG_ON(bytes > cursor->resid); 906 BUG_ON(bytes > cursor->resid);
904 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter)); 907 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
@@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
910 return false; /* no more data */ 913 return false; /* no more data */
911 } 914 }
912 915
913 if (!bytes || cursor->bvec_iter.bi_bvec_done) 916 if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
917 page == bvec_iter_page(bvecs, cursor->bvec_iter)))
914 return false; /* more bytes to process in this segment */ 918 return false; /* more bytes to process in this segment */
915 919
916 BUG_ON(cursor->last_piece); 920 BUG_ON(cursor->last_piece);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 0dafec5cada0..91bb5a083fee 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -281,7 +281,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
281 break; 281 break;
282 282
283 sk_busy_loop(sk, flags & MSG_DONTWAIT); 283 sk_busy_loop(sk, flags & MSG_DONTWAIT);
284 } while (!skb_queue_empty(&sk->sk_receive_queue)); 284 } while (sk->sk_receive_queue.prev != *last);
285 285
286 error = -EAGAIN; 286 error = -EAGAIN;
287 287
diff --git a/net/core/dev.c b/net/core/dev.c
index a95782764360..b430f851f377 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5017,8 +5017,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5017 if (pt_prev->list_func != NULL) 5017 if (pt_prev->list_func != NULL)
5018 pt_prev->list_func(head, pt_prev, orig_dev); 5018 pt_prev->list_func(head, pt_prev, orig_dev);
5019 else 5019 else
5020 list_for_each_entry_safe(skb, next, head, list) 5020 list_for_each_entry_safe(skb, next, head, list) {
5021 skb_list_del_init(skb);
5021 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5022 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5023 }
5022} 5024}
5023 5025
5024static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5026static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 387d67eb75ab..4a593853cbf2 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1798,11 +1798,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1798 WARN_ON_ONCE(!ret); 1798 WARN_ON_ONCE(!ret);
1799 1799
1800 gstrings.len = ret; 1800 gstrings.len = ret;
1801 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
1802 if (gstrings.len && !data)
1803 return -ENOMEM;
1804 1801
1805 __ethtool_get_strings(dev, gstrings.string_set, data); 1802 if (gstrings.len) {
1803 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
1804 if (!data)
1805 return -ENOMEM;
1806
1807 __ethtool_get_strings(dev, gstrings.string_set, data);
1808 } else {
1809 data = NULL;
1810 }
1806 1811
1807 ret = -EFAULT; 1812 ret = -EFAULT;
1808 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) 1813 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1898,11 +1903,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
1898 return -EFAULT; 1903 return -EFAULT;
1899 1904
1900 stats.n_stats = n_stats; 1905 stats.n_stats = n_stats;
1901 data = vzalloc(array_size(n_stats, sizeof(u64)));
1902 if (n_stats && !data)
1903 return -ENOMEM;
1904 1906
1905 ops->get_ethtool_stats(dev, &stats, data); 1907 if (n_stats) {
1908 data = vzalloc(array_size(n_stats, sizeof(u64)));
1909 if (!data)
1910 return -ENOMEM;
1911 ops->get_ethtool_stats(dev, &stats, data);
1912 } else {
1913 data = NULL;
1914 }
1906 1915
1907 ret = -EFAULT; 1916 ret = -EFAULT;
1908 if (copy_to_user(useraddr, &stats, sizeof(stats))) 1917 if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@ -1942,16 +1951,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
1942 return -EFAULT; 1951 return -EFAULT;
1943 1952
1944 stats.n_stats = n_stats; 1953 stats.n_stats = n_stats;
1945 data = vzalloc(array_size(n_stats, sizeof(u64)));
1946 if (n_stats && !data)
1947 return -ENOMEM;
1948 1954
1949 if (dev->phydev && !ops->get_ethtool_phy_stats) { 1955 if (n_stats) {
1950 ret = phy_ethtool_get_stats(dev->phydev, &stats, data); 1956 data = vzalloc(array_size(n_stats, sizeof(u64)));
1951 if (ret < 0) 1957 if (!data)
1952 return ret; 1958 return -ENOMEM;
1959
1960 if (dev->phydev && !ops->get_ethtool_phy_stats) {
1961 ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
1962 if (ret < 0)
1963 goto out;
1964 } else {
1965 ops->get_ethtool_phy_stats(dev, &stats, data);
1966 }
1953 } else { 1967 } else {
1954 ops->get_ethtool_phy_stats(dev, &stats, data); 1968 data = NULL;
1955 } 1969 }
1956 1970
1957 ret = -EFAULT; 1971 ret = -EFAULT;
diff --git a/net/core/filter.c b/net/core/filter.c
index 08b53af84132..8904e3407163 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6862,14 +6862,8 @@ static bool flow_dissector_is_valid_access(int off, int size,
6862 const struct bpf_prog *prog, 6862 const struct bpf_prog *prog,
6863 struct bpf_insn_access_aux *info) 6863 struct bpf_insn_access_aux *info)
6864{ 6864{
6865 if (type == BPF_WRITE) { 6865 if (type == BPF_WRITE)
6866 switch (off) { 6866 return false;
6867 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6868 break;
6869 default:
6870 return false;
6871 }
6872 }
6873 6867
6874 switch (off) { 6868 switch (off) {
6875 case bpf_ctx_range(struct __sk_buff, data): 6869 case bpf_ctx_range(struct __sk_buff, data):
@@ -6881,11 +6875,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
6881 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 6875 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
6882 info->reg_type = PTR_TO_FLOW_KEYS; 6876 info->reg_type = PTR_TO_FLOW_KEYS;
6883 break; 6877 break;
6884 case bpf_ctx_range(struct __sk_buff, tc_classid): 6878 default:
6885 case bpf_ctx_range(struct __sk_buff, data_meta):
6886 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6887 case bpf_ctx_range(struct __sk_buff, tstamp):
6888 case bpf_ctx_range(struct __sk_buff, wire_len):
6889 return false; 6879 return false;
6890 } 6880 }
6891 6881
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index b4d581134ef2..795449713ba4 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
707 /* Pass parameters to the BPF program */ 707 /* Pass parameters to the BPF program */
708 memset(flow_keys, 0, sizeof(*flow_keys)); 708 memset(flow_keys, 0, sizeof(*flow_keys));
709 cb->qdisc_cb.flow_keys = flow_keys; 709 cb->qdisc_cb.flow_keys = flow_keys;
710 flow_keys->n_proto = skb->protocol;
710 flow_keys->nhoff = skb_network_offset(skb); 711 flow_keys->nhoff = skb_network_offset(skb);
711 flow_keys->thoff = flow_keys->nhoff; 712 flow_keys->thoff = flow_keys->nhoff;
712 713
@@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
716 /* Restore state */ 717 /* Restore state */
717 memcpy(cb, &cb_saved, sizeof(cb_saved)); 718 memcpy(cb, &cb_saved, sizeof(cb_saved));
718 719
719 flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len); 720 flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
721 skb_network_offset(skb), skb->len);
720 flow_keys->thoff = clamp_t(u16, flow_keys->thoff, 722 flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
721 flow_keys->nhoff, skb->len); 723 flow_keys->nhoff, skb->len);
722 724
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 17f36317363d..7e6dcc625701 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
304 304
305 refcount_set(&net->count, 1); 305 refcount_set(&net->count, 1);
306 refcount_set(&net->passive, 1); 306 refcount_set(&net->passive, 1);
307 get_random_bytes(&net->hash_mix, sizeof(u32));
307 net->dev_base_seq = 1; 308 net->dev_base_seq = 1;
308 net->user_ns = user_ns; 309 net->user_ns = user_ns;
309 idr_init(&net->netns_ids); 310 idr_init(&net->netns_ids);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4782f9354dd1..9901f5322852 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3800,7 +3800,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
3800 unsigned int delta_truesize; 3800 unsigned int delta_truesize;
3801 struct sk_buff *lp; 3801 struct sk_buff *lp;
3802 3802
3803 if (unlikely(p->len + len >= 65536)) 3803 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
3804 return -E2BIG; 3804 return -E2BIG;
3805 3805
3806 lp = NAPI_GRO_CB(p)->last; 3806 lp = NAPI_GRO_CB(p)->last;
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index f227f002c73d..db87d9f58019 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local,
738 if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len)) 738 if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
739 return -ENOMEM; 739 return -ENOMEM;
740 740
741 return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval); 741 if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
742 kfree(fval.sp.vec);
743 return -ENOMEM;
744 }
745
746 return 0;
742} 747}
743 748
744/** 749/**
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index ed4f6dc26365..85c22ada4744 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
98 return skb; 98 return skb;
99} 99}
100 100
101static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
102 int *offset)
103{
104 *offset = QCA_HDR_LEN;
105 *proto = ((__be16 *)skb->data)[0];
106
107 return 0;
108}
109
101const struct dsa_device_ops qca_netdev_ops = { 110const struct dsa_device_ops qca_netdev_ops = {
102 .xmit = qca_tag_xmit, 111 .xmit = qca_tag_xmit,
103 .rcv = qca_tag_rcv, 112 .rcv = qca_tag_rcv,
113 .flow_dissect = qca_tag_flow_dissect,
104 .overhead = QCA_HDR_LEN, 114 .overhead = QCA_HDR_LEN,
105}; 115};
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index ecce2dc78f17..1132d6d1796a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
257 ip_local_deliver_finish); 257 ip_local_deliver_finish);
258} 258}
259 259
260static inline bool ip_rcv_options(struct sk_buff *skb) 260static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
261{ 261{
262 struct ip_options *opt; 262 struct ip_options *opt;
263 const struct iphdr *iph; 263 const struct iphdr *iph;
264 struct net_device *dev = skb->dev;
265 264
266 /* It looks as overkill, because not all 265 /* It looks as overkill, because not all
267 IP options require packet mangling. 266 IP options require packet mangling.
@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
297 } 296 }
298 } 297 }
299 298
300 if (ip_options_rcv_srr(skb)) 299 if (ip_options_rcv_srr(skb, dev))
301 goto drop; 300 goto drop;
302 } 301 }
303 302
@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
353 } 352 }
354#endif 353#endif
355 354
356 if (iph->ihl > 5 && ip_rcv_options(skb)) 355 if (iph->ihl > 5 && ip_rcv_options(skb, dev))
357 goto drop; 356 goto drop;
358 357
359 rt = skb_rtable(skb); 358 rt = skb_rtable(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 32a35043c9f5..3db31bb9df50 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
612 } 612 }
613} 613}
614 614
615int ip_options_rcv_srr(struct sk_buff *skb) 615int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
616{ 616{
617 struct ip_options *opt = &(IPCB(skb)->opt); 617 struct ip_options *opt = &(IPCB(skb)->opt);
618 int srrspace, srrptr; 618 int srrspace, srrptr;
@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
647 647
648 orefdst = skb->_skb_refdst; 648 orefdst = skb->_skb_refdst;
649 skb_dst_set(skb, NULL); 649 skb_dst_set(skb, NULL);
650 err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); 650 err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
651 rt2 = skb_rtable(skb); 651 rt2 = skb_rtable(skb);
652 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { 652 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
653 skb_dst_drop(skb); 653 skb_dst_drop(skb);
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index cd4814f7e962..359da68d7c06 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -67,11 +67,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
67module_param(dctcp_alpha_on_init, uint, 0644); 67module_param(dctcp_alpha_on_init, uint, 0644);
68MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); 68MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
69 69
70static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
71module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
72MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
73 "parameter for clamping alpha on loss");
74
75static struct tcp_congestion_ops dctcp_reno; 70static struct tcp_congestion_ops dctcp_reno;
76 71
77static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) 72static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
@@ -164,21 +159,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
164 } 159 }
165} 160}
166 161
167static void dctcp_state(struct sock *sk, u8 new_state) 162static void dctcp_react_to_loss(struct sock *sk)
168{ 163{
169 if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) { 164 struct dctcp *ca = inet_csk_ca(sk);
170 struct dctcp *ca = inet_csk_ca(sk); 165 struct tcp_sock *tp = tcp_sk(sk);
171 166
172 /* If this extension is enabled, we clamp dctcp_alpha to 167 ca->loss_cwnd = tp->snd_cwnd;
173 * max on packet loss; the motivation is that dctcp_alpha 168 tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
174 * is an indicator to the extend of congestion and packet 169}
175 * loss is an indicator of extreme congestion; setting 170
176 * this in practice turned out to be beneficial, and 171static void dctcp_state(struct sock *sk, u8 new_state)
177 * effectively assumes total congestion which reduces the 172{
178 * window by half. 173 if (new_state == TCP_CA_Recovery &&
179 */ 174 new_state != inet_csk(sk)->icsk_ca_state)
180 ca->dctcp_alpha = DCTCP_MAX_ALPHA; 175 dctcp_react_to_loss(sk);
181 } 176 /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
177 * one loss-adjustment per RTT.
178 */
182} 179}
183 180
184static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) 181static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -190,6 +187,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
190 case CA_EVENT_ECN_NO_CE: 187 case CA_EVENT_ECN_NO_CE:
191 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); 188 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
192 break; 189 break;
190 case CA_EVENT_LOSS:
191 dctcp_react_to_loss(sk);
192 break;
193 default: 193 default:
194 /* Don't care for the rest. */ 194 /* Don't care for the rest. */
195 break; 195 break;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3979939804b7..faa6fa619f59 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2585,7 +2585,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
2585{ 2585{
2586 int cpu; 2586 int cpu;
2587 2587
2588 module_put(net->ipv4.tcp_congestion_control->owner); 2588 if (net->ipv4.tcp_congestion_control)
2589 module_put(net->ipv4.tcp_congestion_control->owner);
2589 2590
2590 for_each_possible_cpu(cpu) 2591 for_each_possible_cpu(cpu)
2591 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); 2592 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 79d2e43c05c5..5fc1f4e0c0cf 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
417 417
418done: 418done:
419 rhashtable_walk_stop(&iter); 419 rhashtable_walk_stop(&iter);
420 rhashtable_walk_exit(&iter);
420 return ret; 421 return ret;
421} 422}
422 423
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index edbd12067170..e51f3c648b09 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
601 inet6_sk(skb->sk) : NULL; 601 inet6_sk(skb->sk) : NULL;
602 struct ipv6hdr *tmp_hdr; 602 struct ipv6hdr *tmp_hdr;
603 struct frag_hdr *fh; 603 struct frag_hdr *fh;
604 unsigned int mtu, hlen, left, len; 604 unsigned int mtu, hlen, left, len, nexthdr_offset;
605 int hroom, troom; 605 int hroom, troom;
606 __be32 frag_id; 606 __be32 frag_id;
607 int ptr, offset = 0, err = 0; 607 int ptr, offset = 0, err = 0;
@@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
612 goto fail; 612 goto fail;
613 hlen = err; 613 hlen = err;
614 nexthdr = *prevhdr; 614 nexthdr = *prevhdr;
615 nexthdr_offset = prevhdr - skb_network_header(skb);
615 616
616 mtu = ip6_skb_dst_mtu(skb); 617 mtu = ip6_skb_dst_mtu(skb);
617 618
@@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
646 (err = skb_checksum_help(skb))) 647 (err = skb_checksum_help(skb)))
647 goto fail; 648 goto fail;
648 649
650 prevhdr = skb_network_header(skb) + nexthdr_offset;
649 hroom = LL_RESERVED_SPACE(rt->dst.dev); 651 hroom = LL_RESERVED_SPACE(rt->dst.dev);
650 if (skb_has_frag_list(skb)) { 652 if (skb_has_frag_list(skb)) {
651 unsigned int first_len = skb_pagelen(skb); 653 unsigned int first_len = skb_pagelen(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0c6403cf8b52..ade1390c6348 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
627 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 627 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
628 eiph->daddr, eiph->saddr, 0, 0, 628 eiph->daddr, eiph->saddr, 0, 0,
629 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 629 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
630 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) { 630 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
631 if (!IS_ERR(rt)) 631 if (!IS_ERR(rt))
632 ip_rt_put(rt); 632 ip_rt_put(rt);
633 goto out; 633 goto out;
@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
636 } else { 636 } else {
637 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 637 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
638 skb2->dev) || 638 skb2->dev) ||
639 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 639 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
640 goto out; 640 goto out;
641 } 641 }
642 642
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 07e21a82ce4c..b2109b74857d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
669 !net_eq(tunnel->net, dev_net(tunnel->dev)))) 669 !net_eq(tunnel->net, dev_net(tunnel->dev))))
670 goto out; 670 goto out;
671 671
672 /* skb can be uncloned in iptunnel_pull_header, so
673 * old iph is no longer valid
674 */
675 iph = (const struct iphdr *)skb_mac_header(skb);
672 err = IP_ECN_decapsulate(iph, skb); 676 err = IP_ECN_decapsulate(iph, skb);
673 if (unlikely(err)) { 677 if (unlikely(err)) {
674 if (log_ecn_error) 678 if (log_ecn_error)
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index c5c5ab6c5a1c..44fdc641710d 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
2054 if (err) 2054 if (err)
2055 goto fail; 2055 goto fail;
2056 2056
2057 err = sock_register(&kcm_family_ops);
2058 if (err)
2059 goto sock_register_fail;
2060
2061 err = register_pernet_device(&kcm_net_ops); 2057 err = register_pernet_device(&kcm_net_ops);
2062 if (err) 2058 if (err)
2063 goto net_ops_fail; 2059 goto net_ops_fail;
2064 2060
2061 err = sock_register(&kcm_family_ops);
2062 if (err)
2063 goto sock_register_fail;
2064
2065 err = kcm_proc_init(); 2065 err = kcm_proc_init();
2066 if (err) 2066 if (err)
2067 goto proc_init_fail; 2067 goto proc_init_fail;
@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
2069 return 0; 2069 return 0;
2070 2070
2071proc_init_fail: 2071proc_init_fail:
2072 unregister_pernet_device(&kcm_net_ops);
2073
2074net_ops_fail:
2075 sock_unregister(PF_KCM); 2072 sock_unregister(PF_KCM);
2076 2073
2077sock_register_fail: 2074sock_register_fail:
2075 unregister_pernet_device(&kcm_net_ops);
2076
2077net_ops_fail:
2078 proto_unregister(&kcm_proto); 2078 proto_unregister(&kcm_proto);
2079 2079
2080fail: 2080fail:
@@ -2090,8 +2090,8 @@ fail:
2090static void __exit kcm_exit(void) 2090static void __exit kcm_exit(void)
2091{ 2091{
2092 kcm_proc_exit(); 2092 kcm_proc_exit();
2093 unregister_pernet_device(&kcm_net_ops);
2094 sock_unregister(PF_KCM); 2093 sock_unregister(PF_KCM);
2094 unregister_pernet_device(&kcm_net_ops);
2095 proto_unregister(&kcm_proto); 2095 proto_unregister(&kcm_proto);
2096 destroy_workqueue(kcm_wq); 2096 destroy_workqueue(kcm_wq);
2097 2097
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index bd019058fc6f..3563acd5f92e 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2331,14 +2331,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2331 2331
2332 struct sw_flow_actions *acts; 2332 struct sw_flow_actions *acts;
2333 int new_acts_size; 2333 int new_acts_size;
2334 int req_size = NLA_ALIGN(attr_len); 2334 size_t req_size = NLA_ALIGN(attr_len);
2335 int next_offset = offsetof(struct sw_flow_actions, actions) + 2335 int next_offset = offsetof(struct sw_flow_actions, actions) +
2336 (*sfa)->actions_len; 2336 (*sfa)->actions_len;
2337 2337
2338 if (req_size <= (ksize(*sfa) - next_offset)) 2338 if (req_size <= (ksize(*sfa) - next_offset))
2339 goto out; 2339 goto out;
2340 2340
2341 new_acts_size = ksize(*sfa) * 2; 2341 new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
2342 2342
2343 if (new_acts_size > MAX_ACTIONS_BUFSIZE) { 2343 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2344 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { 2344 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index fd2694174607..faf726e00e27 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net)
608 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 608 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
609 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 609 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
610 610
611 if (net != c_net || !tc->t_sock) 611 if (net != c_net)
612 continue; 612 continue;
613 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { 613 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
614 list_move_tail(&tc->t_tcp_node, &tmp_list); 614 list_move_tail(&tc->t_tcp_node, &tmp_list);
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 4060b0955c97..0f82d50ea232 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -45,8 +45,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
45 struct nlattr *tb[TCA_SAMPLE_MAX + 1]; 45 struct nlattr *tb[TCA_SAMPLE_MAX + 1];
46 struct psample_group *psample_group; 46 struct psample_group *psample_group;
47 struct tcf_chain *goto_ch = NULL; 47 struct tcf_chain *goto_ch = NULL;
48 u32 psample_group_num, rate;
48 struct tc_sample *parm; 49 struct tc_sample *parm;
49 u32 psample_group_num;
50 struct tcf_sample *s; 50 struct tcf_sample *s;
51 bool exists = false; 51 bool exists = false;
52 int ret, err; 52 int ret, err;
@@ -85,6 +85,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
85 if (err < 0) 85 if (err < 0)
86 goto release_idr; 86 goto release_idr;
87 87
88 rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
89 if (!rate) {
90 NL_SET_ERR_MSG(extack, "invalid sample rate");
91 err = -EINVAL;
92 goto put_chain;
93 }
88 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); 94 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
89 psample_group = psample_group_get(net, psample_group_num); 95 psample_group = psample_group_get(net, psample_group_num);
90 if (!psample_group) { 96 if (!psample_group) {
@@ -96,7 +102,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
96 102
97 spin_lock_bh(&s->tcf_lock); 103 spin_lock_bh(&s->tcf_lock);
98 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 104 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
99 s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); 105 s->rate = rate;
100 s->psample_group_num = psample_group_num; 106 s->psample_group_num = psample_group_num;
101 RCU_INIT_POINTER(s->psample_group, psample_group); 107 RCU_INIT_POINTER(s->psample_group, psample_group);
102 108
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 459921bd3d87..a13bc351a414 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
130 130
131static void *mall_get(struct tcf_proto *tp, u32 handle) 131static void *mall_get(struct tcf_proto *tp, u32 handle)
132{ 132{
133 struct cls_mall_head *head = rtnl_dereference(tp->root);
134
135 if (head && head->handle == handle)
136 return head;
137
133 return NULL; 138 return NULL;
134} 139}
135 140
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index acc9b9da985f..259d97bc2abd 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1517,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1517 1517
1518static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) 1518static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
1519{ 1519{
1520 int wlen = skb_network_offset(skb);
1520 u8 dscp; 1521 u8 dscp;
1521 1522
1522 switch (skb->protocol) { 1523 switch (tc_skb_protocol(skb)) {
1523 case htons(ETH_P_IP): 1524 case htons(ETH_P_IP):
1525 wlen += sizeof(struct iphdr);
1526 if (!pskb_may_pull(skb, wlen) ||
1527 skb_try_make_writable(skb, wlen))
1528 return 0;
1529
1524 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 1530 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
1525 if (wash && dscp) 1531 if (wash && dscp)
1526 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); 1532 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1527 return dscp; 1533 return dscp;
1528 1534
1529 case htons(ETH_P_IPV6): 1535 case htons(ETH_P_IPV6):
1536 wlen += sizeof(struct ipv6hdr);
1537 if (!pskb_may_pull(skb, wlen) ||
1538 skb_try_make_writable(skb, wlen))
1539 return 0;
1540
1530 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 1541 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
1531 if (wash && dscp) 1542 if (wash && dscp)
1532 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); 1543 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 4dc05409e3fb..114b9048ea7e 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1358{ 1358{
1359 struct cbq_sched_data *q = qdisc_priv(sch); 1359 struct cbq_sched_data *q = qdisc_priv(sch);
1360 struct cbq_class *cl = (struct cbq_class *)arg; 1360 struct cbq_class *cl = (struct cbq_class *)arg;
1361 __u32 qlen;
1361 1362
1362 cl->xstats.avgidle = cl->avgidle; 1363 cl->xstats.avgidle = cl->avgidle;
1363 cl->xstats.undertime = 0; 1364 cl->xstats.undertime = 0;
1365 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
1364 1366
1365 if (cl->undertime != PSCHED_PASTPERFECT) 1367 if (cl->undertime != PSCHED_PASTPERFECT)
1366 cl->xstats.undertime = cl->undertime - q->now; 1368 cl->xstats.undertime = cl->undertime - q->now;
@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1368 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 1370 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1369 d, NULL, &cl->bstats) < 0 || 1371 d, NULL, &cl->bstats) < 0 ||
1370 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1372 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1371 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) 1373 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1372 return -1; 1374 return -1;
1373 1375
1374 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); 1376 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1665{ 1667{
1666 struct cbq_sched_data *q = qdisc_priv(sch); 1668 struct cbq_sched_data *q = qdisc_priv(sch);
1667 struct cbq_class *cl = (struct cbq_class *)arg; 1669 struct cbq_class *cl = (struct cbq_class *)arg;
1668 unsigned int qlen, backlog;
1669 1670
1670 if (cl->filters || cl->children || cl == &q->link) 1671 if (cl->filters || cl->children || cl == &q->link)
1671 return -EBUSY; 1672 return -EBUSY;
1672 1673
1673 sch_tree_lock(sch); 1674 sch_tree_lock(sch);
1674 1675
1675 qlen = cl->q->q.qlen; 1676 qdisc_purge_queue(cl->q);
1676 backlog = cl->q->qstats.backlog;
1677 qdisc_reset(cl->q);
1678 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
1679 1677
1680 if (cl->next_alive) 1678 if (cl->next_alive)
1681 cbq_deactivate_class(cl); 1679 cbq_deactivate_class(cl);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 09b800991065..430df9a55ec4 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
50 return container_of(clc, struct drr_class, common); 50 return container_of(clc, struct drr_class, common);
51} 51}
52 52
53static void drr_purge_queue(struct drr_class *cl)
54{
55 unsigned int len = cl->qdisc->q.qlen;
56 unsigned int backlog = cl->qdisc->qstats.backlog;
57
58 qdisc_reset(cl->qdisc);
59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
60}
61
62static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { 53static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
63 [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, 54 [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
64}; 55};
@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
167 158
168 sch_tree_lock(sch); 159 sch_tree_lock(sch);
169 160
170 drr_purge_queue(cl); 161 qdisc_purge_queue(cl->qdisc);
171 qdisc_class_hash_remove(&q->clhash, &cl->common); 162 qdisc_class_hash_remove(&q->clhash, &cl->common);
172 163
173 sch_tree_unlock(sch); 164 sch_tree_unlock(sch);
@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
269 struct gnet_dump *d) 260 struct gnet_dump *d)
270{ 261{
271 struct drr_class *cl = (struct drr_class *)arg; 262 struct drr_class *cl = (struct drr_class *)arg;
272 __u32 qlen = cl->qdisc->q.qlen; 263 __u32 qlen = qdisc_qlen_sum(cl->qdisc);
264 struct Qdisc *cl_q = cl->qdisc;
273 struct tc_drr_stats xstats; 265 struct tc_drr_stats xstats;
274 266
275 memset(&xstats, 0, sizeof(xstats)); 267 memset(&xstats, 0, sizeof(xstats));
@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
279 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 271 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
280 d, NULL, &cl->bstats) < 0 || 272 d, NULL, &cl->bstats) < 0 ||
281 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 273 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
282 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) 274 gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
283 return -1; 275 return -1;
284 276
285 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 277 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 24cc220a3218..d2ab463f22ae 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -845,16 +845,6 @@ qdisc_peek_len(struct Qdisc *sch)
845} 845}
846 846
847static void 847static void
848hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
849{
850 unsigned int len = cl->qdisc->q.qlen;
851 unsigned int backlog = cl->qdisc->qstats.backlog;
852
853 qdisc_reset(cl->qdisc);
854 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
855}
856
857static void
858hfsc_adjust_levels(struct hfsc_class *cl) 848hfsc_adjust_levels(struct hfsc_class *cl)
859{ 849{
860 struct hfsc_class *p; 850 struct hfsc_class *p;
@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1076 qdisc_class_hash_insert(&q->clhash, &cl->cl_common); 1066 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1077 list_add_tail(&cl->siblings, &parent->children); 1067 list_add_tail(&cl->siblings, &parent->children);
1078 if (parent->level == 0) 1068 if (parent->level == 0)
1079 hfsc_purge_queue(sch, parent); 1069 qdisc_purge_queue(parent->qdisc);
1080 hfsc_adjust_levels(parent); 1070 hfsc_adjust_levels(parent);
1081 sch_tree_unlock(sch); 1071 sch_tree_unlock(sch);
1082 1072
@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1112 list_del(&cl->siblings); 1102 list_del(&cl->siblings);
1113 hfsc_adjust_levels(cl->cl_parent); 1103 hfsc_adjust_levels(cl->cl_parent);
1114 1104
1115 hfsc_purge_queue(sch, cl); 1105 qdisc_purge_queue(cl->qdisc);
1116 qdisc_class_hash_remove(&q->clhash, &cl->cl_common); 1106 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1117 1107
1118 sch_tree_unlock(sch); 1108 sch_tree_unlock(sch);
@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1328{ 1318{
1329 struct hfsc_class *cl = (struct hfsc_class *)arg; 1319 struct hfsc_class *cl = (struct hfsc_class *)arg;
1330 struct tc_hfsc_stats xstats; 1320 struct tc_hfsc_stats xstats;
1321 __u32 qlen;
1331 1322
1332 cl->qstats.backlog = cl->qdisc->qstats.backlog; 1323 qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
1333 xstats.level = cl->level; 1324 xstats.level = cl->level;
1334 xstats.period = cl->cl_vtperiod; 1325 xstats.period = cl->cl_vtperiod;
1335 xstats.work = cl->cl_total; 1326 xstats.work = cl->cl_total;
@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1337 1328
1338 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || 1329 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
1339 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1330 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1340 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) 1331 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1341 return -1; 1332 return -1;
1342 1333
1343 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 1334 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 30f9da7e1076..2f9883b196e8 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1127 }; 1127 };
1128 __u32 qlen = 0; 1128 __u32 qlen = 0;
1129 1129
1130 if (!cl->level && cl->leaf.q) { 1130 if (!cl->level && cl->leaf.q)
1131 qlen = cl->leaf.q->q.qlen; 1131 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1132 qs.backlog = cl->leaf.q->qstats.backlog; 1132
1133 }
1134 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), 1133 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1135 INT_MIN, INT_MAX); 1134 INT_MIN, INT_MAX);
1136 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), 1135 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1270 1269
1271 sch_tree_lock(sch); 1270 sch_tree_lock(sch);
1272 1271
1273 if (!cl->level) { 1272 if (!cl->level)
1274 unsigned int qlen = cl->leaf.q->q.qlen; 1273 qdisc_purge_queue(cl->leaf.q);
1275 unsigned int backlog = cl->leaf.q->qstats.backlog;
1276
1277 qdisc_reset(cl->leaf.q);
1278 qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
1279 }
1280 1274
1281 /* delete from hash and active; remainder in destroy_class */ 1275 /* delete from hash and active; remainder in destroy_class */
1282 qdisc_class_hash_remove(&q->clhash, &cl->common); 1276 qdisc_class_hash_remove(&q->clhash, &cl->common);
@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1404 classid, NULL); 1398 classid, NULL);
1405 sch_tree_lock(sch); 1399 sch_tree_lock(sch);
1406 if (parent && !parent->level) { 1400 if (parent && !parent->level) {
1407 unsigned int qlen = parent->leaf.q->q.qlen;
1408 unsigned int backlog = parent->leaf.q->qstats.backlog;
1409
1410 /* turn parent into inner node */ 1401 /* turn parent into inner node */
1411 qdisc_reset(parent->leaf.q); 1402 qdisc_purge_queue(parent->leaf.q);
1412 qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
1413 qdisc_put(parent->leaf.q); 1403 qdisc_put(parent->leaf.q);
1414 if (parent->prio_activity) 1404 if (parent->prio_activity)
1415 htb_deactivate(q, parent); 1405 htb_deactivate(q, parent);
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 203659bc3906..3a3312467692 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
249 249
250 sch = dev_queue->qdisc_sleeping; 250 sch = dev_queue->qdisc_sleeping;
251 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 251 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
252 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) 252 qdisc_qstats_copy(d, sch) < 0)
253 return -1; 253 return -1;
254 return 0; 254 return 0;
255} 255}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index d364e63c396d..ea0dc112b38d 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
561 sch = dev_queue->qdisc_sleeping; 561 sch = dev_queue->qdisc_sleeping;
562 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 562 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
563 d, NULL, &sch->bstats) < 0 || 563 d, NULL, &sch->bstats) < 0 ||
564 gnet_stats_copy_queue(d, NULL, 564 qdisc_qstats_copy(d, sch) < 0)
565 &sch->qstats, sch->q.qlen) < 0)
566 return -1; 565 return -1;
567 } 566 }
568 return 0; 567 return 0;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 7410ce4d0321..35b03ae08e0f 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
201 for (i = q->bands; i < q->max_bands; i++) { 201 for (i = q->bands; i < q->max_bands; i++) {
202 if (q->queues[i] != &noop_qdisc) { 202 if (q->queues[i] != &noop_qdisc) {
203 struct Qdisc *child = q->queues[i]; 203 struct Qdisc *child = q->queues[i];
204
204 q->queues[i] = &noop_qdisc; 205 q->queues[i] = &noop_qdisc;
205 qdisc_tree_reduce_backlog(child, child->q.qlen, 206 qdisc_tree_flush_backlog(child);
206 child->qstats.backlog);
207 qdisc_put(child); 207 qdisc_put(child);
208 } 208 }
209 } 209 }
@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
225 qdisc_hash_add(child, true); 225 qdisc_hash_add(child, true);
226 226
227 if (old != &noop_qdisc) { 227 if (old != &noop_qdisc) {
228 qdisc_tree_reduce_backlog(old, 228 qdisc_tree_flush_backlog(old);
229 old->q.qlen,
230 old->qstats.backlog);
231 qdisc_put(old); 229 qdisc_put(old);
232 } 230 }
233 sch_tree_unlock(sch); 231 sch_tree_unlock(sch);
@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
344 cl_q = q->queues[cl - 1]; 342 cl_q = q->queues[cl - 1];
345 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 343 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
346 d, NULL, &cl_q->bstats) < 0 || 344 d, NULL, &cl_q->bstats) < 0 ||
347 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) 345 qdisc_qstats_copy(d, cl_q) < 0)
348 return -1; 346 return -1;
349 347
350 return 0; 348 return 0;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 847141cd900f..d519b21535b3 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
216 q->bands = qopt->bands; 216 q->bands = qopt->bands;
217 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 217 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
218 218
219 for (i = q->bands; i < oldbands; i++) { 219 for (i = q->bands; i < oldbands; i++)
220 struct Qdisc *child = q->queues[i]; 220 qdisc_tree_flush_backlog(q->queues[i]);
221
222 qdisc_tree_reduce_backlog(child, child->q.qlen,
223 child->qstats.backlog);
224 }
225 221
226 for (i = oldbands; i < q->bands; i++) { 222 for (i = oldbands; i < q->bands; i++) {
227 q->queues[i] = queues[i]; 223 q->queues[i] = queues[i];
@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
365 cl_q = q->queues[cl - 1]; 361 cl_q = q->queues[cl - 1];
366 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 362 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
367 d, NULL, &cl_q->bstats) < 0 || 363 d, NULL, &cl_q->bstats) < 0 ||
368 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) 364 qdisc_qstats_copy(d, cl_q) < 0)
369 return -1; 365 return -1;
370 366
371 return 0; 367 return 0;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 29f5c4a24688..1589364b54da 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
217 return container_of(clc, struct qfq_class, common); 217 return container_of(clc, struct qfq_class, common);
218} 218}
219 219
220static void qfq_purge_queue(struct qfq_class *cl)
221{
222 unsigned int len = cl->qdisc->q.qlen;
223 unsigned int backlog = cl->qdisc->qstats.backlog;
224
225 qdisc_reset(cl->qdisc);
226 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
227}
228
229static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { 220static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
230 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, 221 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
231 [TCA_QFQ_LMAX] = { .type = NLA_U32 }, 222 [TCA_QFQ_LMAX] = { .type = NLA_U32 },
@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
551 542
552 sch_tree_lock(sch); 543 sch_tree_lock(sch);
553 544
554 qfq_purge_queue(cl); 545 qdisc_purge_queue(cl->qdisc);
555 qdisc_class_hash_remove(&q->clhash, &cl->common); 546 qdisc_class_hash_remove(&q->clhash, &cl->common);
556 547
557 sch_tree_unlock(sch); 548 sch_tree_unlock(sch);
@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
655 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 646 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
656 d, NULL, &cl->bstats) < 0 || 647 d, NULL, &cl->bstats) < 0 ||
657 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 648 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
658 gnet_stats_copy_queue(d, NULL, 649 qdisc_qstats_copy(d, cl->qdisc) < 0)
659 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
660 return -1; 650 return -1;
661 651
662 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 652 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 9df9942340ea..4e8c0abf6194 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
233 q->flags = ctl->flags; 233 q->flags = ctl->flags;
234 q->limit = ctl->limit; 234 q->limit = ctl->limit;
235 if (child) { 235 if (child) {
236 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 236 qdisc_tree_flush_backlog(q->qdisc);
237 q->qdisc->qstats.backlog);
238 old_child = q->qdisc; 237 old_child = q->qdisc;
239 q->qdisc = child; 238 q->qdisc = child;
240 } 239 }
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index bab506b01a32..2419fdb75966 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
521 qdisc_hash_add(child, true); 521 qdisc_hash_add(child, true);
522 sch_tree_lock(sch); 522 sch_tree_lock(sch);
523 523
524 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 524 qdisc_tree_flush_backlog(q->qdisc);
525 q->qdisc->qstats.backlog);
526 qdisc_put(q->qdisc); 525 qdisc_put(q->qdisc);
527 q->qdisc = child; 526 q->qdisc = child;
528 527
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 206e4dbed12f..c7041999eb5d 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
895 895
896 sch = dev_queue->qdisc_sleeping; 896 sch = dev_queue->qdisc_sleeping;
897 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 897 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
898 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) 898 qdisc_qstats_copy(d, sch) < 0)
899 return -1; 899 return -1;
900 return 0; 900 return 0;
901} 901}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 7f272a9070c5..f71578dbb9e3 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
391 391
392 sch_tree_lock(sch); 392 sch_tree_lock(sch);
393 if (child) { 393 if (child) {
394 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 394 qdisc_tree_flush_backlog(q->qdisc);
395 q->qdisc->qstats.backlog);
396 qdisc_put(q->qdisc); 395 qdisc_put(q->qdisc);
397 q->qdisc = child; 396 q->qdisc = child;
398 } 397 }
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 6abc8b274270..951afdeea5e9 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -600,6 +600,7 @@ out:
600static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) 600static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
601{ 601{
602 /* No address mapping for V4 sockets */ 602 /* No address mapping for V4 sockets */
603 memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
603 return sizeof(struct sockaddr_in); 604 return sizeof(struct sockaddr_in);
604} 605}
605 606
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 4ad3586da8f0..340a6e7c43a7 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
267 if (msg->rep_type) 267 if (msg->rep_type)
268 tipc_tlv_init(msg->rep, msg->rep_type); 268 tipc_tlv_init(msg->rep, msg->rep_type);
269 269
270 if (cmd->header) 270 if (cmd->header) {
271 (*cmd->header)(msg); 271 err = (*cmd->header)(msg);
272 if (err) {
273 kfree_skb(msg->rep);
274 msg->rep = NULL;
275 return err;
276 }
277 }
272 278
273 arg = nlmsg_new(0, GFP_KERNEL); 279 arg = nlmsg_new(0, GFP_KERNEL);
274 if (!arg) { 280 if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
397 if (!bearer) 403 if (!bearer)
398 return -EMSGSIZE; 404 return -EMSGSIZE;
399 405
400 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); 406 len = TLV_GET_DATA_LEN(msg->req);
407 len -= offsetof(struct tipc_bearer_config, name);
408 if (len <= 0)
409 return -EINVAL;
410
411 len = min_t(int, len, TIPC_MAX_BEARER_NAME);
401 if (!string_is_valid(b->name, len)) 412 if (!string_is_valid(b->name, len))
402 return -EINVAL; 413 return -EINVAL;
403 414
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
766 777
767 lc = (struct tipc_link_config *)TLV_DATA(msg->req); 778 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
768 779
769 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); 780 len = TLV_GET_DATA_LEN(msg->req);
781 len -= offsetof(struct tipc_link_config, name);
782 if (len <= 0)
783 return -EINVAL;
784
785 len = min_t(int, len, TIPC_MAX_LINK_NAME);
770 if (!string_is_valid(lc->name, len)) 786 if (!string_is_valid(lc->name, len))
771 return -EINVAL; 787 return -EINVAL;
772 788
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 4f821edeeae6..4741edf4bb1e 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1497,6 +1497,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1497 1497
1498 return err; 1498 return err;
1499 } 1499 }
1500 } else {
1501 *zc = false;
1500 } 1502 }
1501 1503
1502 rxm->full_len -= padding_length(ctx, tls_ctx, skb); 1504 rxm->full_len -= padding_length(ctx, tls_ctx, skb);
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 2554a15ecf2b..76ca30cc4791 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -199,11 +199,8 @@ sub_cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
199 "$(if $(part-of-module),1,0)" "$(@)"; 199 "$(if $(part-of-module),1,0)" "$(@)";
200recordmcount_source := $(srctree)/scripts/recordmcount.pl 200recordmcount_source := $(srctree)/scripts/recordmcount.pl
201endif # BUILD_C_RECORDMCOUNT 201endif # BUILD_C_RECORDMCOUNT
202cmd_record_mcount = \ 202cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)), \
203 if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \ 203 $(sub_cmd_record_mcount))
204 "$(CC_FLAGS_FTRACE)" ]; then \
205 $(sub_cmd_record_mcount) \
206 fi
207endif # CC_USING_RECORD_MCOUNT 204endif # CC_USING_RECORD_MCOUNT
208endif # CONFIG_FTRACE_MCOUNT_RECORD 205endif # CONFIG_FTRACE_MCOUNT_RECORD
209 206
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 5b756278df13..a09333fd7cef 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5977,7 +5977,7 @@ sub process {
5977 while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) { 5977 while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
5978 $specifier = $1; 5978 $specifier = $1;
5979 $extension = $2; 5979 $extension = $2;
5980 if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOx]/) { 5980 if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) {
5981 $bad_specifier = $specifier; 5981 $bad_specifier = $specifier;
5982 last; 5982 last;
5983 } 5983 }
diff --git a/scripts/coccinelle/free/put_device.cocci b/scripts/coccinelle/free/put_device.cocci
index 7395697e7f19..c9f071b0a0ab 100644
--- a/scripts/coccinelle/free/put_device.cocci
+++ b/scripts/coccinelle/free/put_device.cocci
@@ -32,6 +32,7 @@ if (id == NULL || ...) { ... return ...; }
32( id 32( id
33| (T2)dev_get_drvdata(&id->dev) 33| (T2)dev_get_drvdata(&id->dev)
34| (T3)platform_get_drvdata(id) 34| (T3)platform_get_drvdata(id)
35| &id->dev
35); 36);
36| return@p2 ...; 37| return@p2 ...;
37) 38)
diff --git a/scripts/coccinelle/misc/badty.cocci b/scripts/coccinelle/misc/badty.cocci
index 481cf301ccfc..08470362199c 100644
--- a/scripts/coccinelle/misc/badty.cocci
+++ b/scripts/coccinelle/misc/badty.cocci
@@ -1,4 +1,4 @@
1/// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element 1/// Correct the size argument to alloc functions
2/// 2///
3//# This makes an effort to find cases where the argument to sizeof is wrong 3//# This makes an effort to find cases where the argument to sizeof is wrong
4//# in memory allocation functions by checking the type of the allocated memory 4//# in memory allocation functions by checking the type of the allocated memory
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index 611945611bf8..1dcfb288ee63 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -113,7 +113,8 @@ do_resize:
113 case KEY_DOWN: 113 case KEY_DOWN:
114 break; 114 break;
115 case KEY_BACKSPACE: 115 case KEY_BACKSPACE:
116 case 127: 116 case 8: /* ^H */
117 case 127: /* ^? */
117 if (pos) { 118 if (pos) {
118 wattrset(dialog, dlg.inputbox.atr); 119 wattrset(dialog, dlg.inputbox.atr);
119 if (input_x == 0) { 120 if (input_x == 0) {
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index a4670f4e825a..ac92c0ded6c5 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
1048 state->match_direction = FIND_NEXT_MATCH_UP; 1048 state->match_direction = FIND_NEXT_MATCH_UP;
1049 *ans = get_mext_match(state->pattern, 1049 *ans = get_mext_match(state->pattern,
1050 state->match_direction); 1050 state->match_direction);
1051 } else if (key == KEY_BACKSPACE || key == 127) { 1051 } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
1052 state->pattern[strlen(state->pattern)-1] = '\0'; 1052 state->pattern[strlen(state->pattern)-1] = '\0';
1053 adj_match_dir(&state->match_direction); 1053 adj_match_dir(&state->match_direction);
1054 } else 1054 } else
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 7be620a1fcdb..77f525a8617c 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
439 case KEY_F(F_EXIT): 439 case KEY_F(F_EXIT):
440 case KEY_F(F_BACK): 440 case KEY_F(F_BACK):
441 break; 441 break;
442 case 127: 442 case 8: /* ^H */
443 case 127: /* ^? */
443 case KEY_BACKSPACE: 444 case KEY_BACKSPACE:
444 if (cursor_position > 0) { 445 if (cursor_position > 0) {
445 memmove(&result[cursor_position-1], 446 memmove(&result[cursor_position-1],
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0b0d1080b1c5..f277e116e0eb 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -639,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
639 info->sechdrs[sym->st_shndx].sh_offset - 639 info->sechdrs[sym->st_shndx].sh_offset -
640 (info->hdr->e_type != ET_REL ? 640 (info->hdr->e_type != ET_REL ?
641 info->sechdrs[sym->st_shndx].sh_addr : 0); 641 info->sechdrs[sym->st_shndx].sh_addr : 0);
642 crc = *crcp; 642 crc = TO_NATIVE(*crcp);
643 } 643 }
644 sym_update_crc(symname + strlen("__crc_"), mod, crc, 644 sym_update_crc(symname + strlen("__crc_"), mod, crc,
645 export); 645 export);
diff --git a/security/Kconfig b/security/Kconfig
index 1d6463fb1450..353cfef71d4e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -239,8 +239,46 @@ source "security/safesetid/Kconfig"
239 239
240source "security/integrity/Kconfig" 240source "security/integrity/Kconfig"
241 241
242choice
243 prompt "First legacy 'major LSM' to be initialized"
244 default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
245 default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
246 default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
247 default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
248 default DEFAULT_SECURITY_DAC
249
250 help
251 This choice is there only for converting CONFIG_DEFAULT_SECURITY
252 in old kernel configs to CONFIG_LSM in new kernel configs. Don't
253 change this choice unless you are creating a fresh kernel config,
254 for this choice will be ignored after CONFIG_LSM has been set.
255
256 Selects the legacy "major security module" that will be
257 initialized first. Overridden by non-default CONFIG_LSM.
258
259 config DEFAULT_SECURITY_SELINUX
260 bool "SELinux" if SECURITY_SELINUX=y
261
262 config DEFAULT_SECURITY_SMACK
263 bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
264
265 config DEFAULT_SECURITY_TOMOYO
266 bool "TOMOYO" if SECURITY_TOMOYO=y
267
268 config DEFAULT_SECURITY_APPARMOR
269 bool "AppArmor" if SECURITY_APPARMOR=y
270
271 config DEFAULT_SECURITY_DAC
272 bool "Unix Discretionary Access Controls"
273
274endchoice
275
242config LSM 276config LSM
243 string "Ordered list of enabled LSMs" 277 string "Ordered list of enabled LSMs"
278 default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
279 default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
280 default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
281 default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
244 default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" 282 default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
245 help 283 help
246 A comma-separated list of LSMs, in initialization order. 284 A comma-separated list of LSMs, in initialization order.
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 57cc60722dd3..efac68556b45 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -206,7 +206,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
206 * yama_task_free - check for task_pid to remove from exception list 206 * yama_task_free - check for task_pid to remove from exception list
207 * @task: task being removed 207 * @task: task being removed
208 */ 208 */
209void yama_task_free(struct task_struct *task) 209static void yama_task_free(struct task_struct *task)
210{ 210{
211 yama_ptracer_del(task, task); 211 yama_ptracer_del(task, task);
212} 212}
@@ -222,7 +222,7 @@ void yama_task_free(struct task_struct *task)
222 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama 222 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
223 * does not handle the given option. 223 * does not handle the given option.
224 */ 224 */
225int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, 225static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
226 unsigned long arg4, unsigned long arg5) 226 unsigned long arg4, unsigned long arg5)
227{ 227{
228 int rc = -ENOSYS; 228 int rc = -ENOSYS;
@@ -401,7 +401,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
401 * 401 *
402 * Returns 0 if following the ptrace is allowed, -ve on error. 402 * Returns 0 if following the ptrace is allowed, -ve on error.
403 */ 403 */
404int yama_ptrace_traceme(struct task_struct *parent) 404static int yama_ptrace_traceme(struct task_struct *parent)
405{ 405{
406 int rc = 0; 406 int rc = 0;
407 407
@@ -452,7 +452,7 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write,
452static int zero; 452static int zero;
453static int max_scope = YAMA_SCOPE_NO_ATTACH; 453static int max_scope = YAMA_SCOPE_NO_ATTACH;
454 454
455struct ctl_path yama_sysctl_path[] = { 455static struct ctl_path yama_sysctl_path[] = {
456 { .procname = "kernel", }, 456 { .procname = "kernel", },
457 { .procname = "yama", }, 457 { .procname = "yama", },
458 { } 458 { }
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index d5b0d7ba83c4..f6ae68017608 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
940 oss_frame_size = snd_pcm_format_physical_width(params_format(params)) * 940 oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
941 params_channels(params) / 8; 941 params_channels(params) / 8;
942 942
943 err = snd_pcm_oss_period_size(substream, params, sparams);
944 if (err < 0)
945 goto failure;
946
947 n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
948 err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
949 if (err < 0)
950 goto failure;
951
952 err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
953 runtime->oss.periods, NULL);
954 if (err < 0)
955 goto failure;
956
957 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
958
959 err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
960 if (err < 0) {
961 pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
962 goto failure;
963 }
964
943#ifdef CONFIG_SND_PCM_OSS_PLUGINS 965#ifdef CONFIG_SND_PCM_OSS_PLUGINS
944 snd_pcm_oss_plugin_clear(substream); 966 snd_pcm_oss_plugin_clear(substream);
945 if (!direct) { 967 if (!direct) {
@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
974 } 996 }
975#endif 997#endif
976 998
977 err = snd_pcm_oss_period_size(substream, params, sparams);
978 if (err < 0)
979 goto failure;
980
981 n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
982 err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
983 if (err < 0)
984 goto failure;
985
986 err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
987 runtime->oss.periods, NULL);
988 if (err < 0)
989 goto failure;
990
991 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
992
993 if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
994 pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
995 goto failure;
996 }
997
998 if (runtime->oss.trigger) { 999 if (runtime->oss.trigger) {
999 sw_params->start_threshold = 1; 1000 sw_params->start_threshold = 1;
1000 } else { 1001 } else {
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f731f904e8cc..1d8452912b14 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1445static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) 1445static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1446{ 1446{
1447 struct snd_pcm_runtime *runtime = substream->runtime; 1447 struct snd_pcm_runtime *runtime = substream->runtime;
1448 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) 1448 switch (runtime->status->state) {
1449 case SNDRV_PCM_STATE_SUSPENDED:
1449 return -EBUSY; 1450 return -EBUSY;
1451 /* unresumable PCM state; return -EBUSY for skipping suspend */
1452 case SNDRV_PCM_STATE_OPEN:
1453 case SNDRV_PCM_STATE_SETUP:
1454 case SNDRV_PCM_STATE_DISCONNECTED:
1455 return -EBUSY;
1456 }
1450 runtime->trigger_master = substream; 1457 runtime->trigger_master = substream;
1451 return 0; 1458 return 0;
1452} 1459}
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index ee601d7f0926..c0690d1ecd55 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/nospec.h>
33#include <sound/rawmidi.h> 34#include <sound/rawmidi.h>
34#include <sound/info.h> 35#include <sound/info.h>
35#include <sound/control.h> 36#include <sound/control.h>
@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
601 return -ENXIO; 602 return -ENXIO;
602 if (info->stream < 0 || info->stream > 1) 603 if (info->stream < 0 || info->stream > 1)
603 return -EINVAL; 604 return -EINVAL;
605 info->stream = array_index_nospec(info->stream, 2);
604 pstr = &rmidi->streams[info->stream]; 606 pstr = &rmidi->streams[info->stream];
605 if (pstr->substream_count == 0) 607 if (pstr->substream_count == 0)
606 return -ENOENT; 608 return -ENOENT;
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 278ebb993122..c93945917235 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -617,13 +617,14 @@ int
617snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf) 617snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
618{ 618{
619 struct seq_oss_synth *rec; 619 struct seq_oss_synth *rec;
620 struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
620 621
621 if (dev < 0 || dev >= dp->max_synthdev) 622 if (!info)
622 return -ENXIO; 623 return -ENXIO;
623 624
624 if (dp->synths[dev].is_midi) { 625 if (info->is_midi) {
625 struct midi_info minf; 626 struct midi_info minf;
626 snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf); 627 snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
627 inf->synth_type = SYNTH_TYPE_MIDI; 628 inf->synth_type = SYNTH_TYPE_MIDI;
628 inf->synth_subtype = 0; 629 inf->synth_subtype = 0;
629 inf->nr_voices = 16; 630 inf->nr_voices = 16;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 29882bda7632..e1ebc6d5f382 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1005,7 +1005,6 @@ struct ca0132_spec {
1005 unsigned int scp_resp_header; 1005 unsigned int scp_resp_header;
1006 unsigned int scp_resp_data[4]; 1006 unsigned int scp_resp_data[4];
1007 unsigned int scp_resp_count; 1007 unsigned int scp_resp_count;
1008 bool alt_firmware_present;
1009 bool startup_check_entered; 1008 bool startup_check_entered;
1010 bool dsp_reload; 1009 bool dsp_reload;
1011 1010
@@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
7518 bool dsp_loaded = false; 7517 bool dsp_loaded = false;
7519 struct ca0132_spec *spec = codec->spec; 7518 struct ca0132_spec *spec = codec->spec;
7520 const struct dsp_image_seg *dsp_os_image; 7519 const struct dsp_image_seg *dsp_os_image;
7521 const struct firmware *fw_entry; 7520 const struct firmware *fw_entry = NULL;
7522 /* 7521 /*
7523 * Alternate firmwares for different variants. The Recon3Di apparently 7522 * Alternate firmwares for different variants. The Recon3Di apparently
7524 * can use the default firmware, but I'll leave the option in case 7523 * can use the default firmware, but I'll leave the option in case
@@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
7529 case QUIRK_R3D: 7528 case QUIRK_R3D:
7530 case QUIRK_AE5: 7529 case QUIRK_AE5:
7531 if (request_firmware(&fw_entry, DESKTOP_EFX_FILE, 7530 if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
7532 codec->card->dev) != 0) { 7531 codec->card->dev) != 0)
7533 codec_dbg(codec, "Desktop firmware not found."); 7532 codec_dbg(codec, "Desktop firmware not found.");
7534 spec->alt_firmware_present = false; 7533 else
7535 } else {
7536 codec_dbg(codec, "Desktop firmware selected."); 7534 codec_dbg(codec, "Desktop firmware selected.");
7537 spec->alt_firmware_present = true;
7538 }
7539 break; 7535 break;
7540 case QUIRK_R3DI: 7536 case QUIRK_R3DI:
7541 if (request_firmware(&fw_entry, R3DI_EFX_FILE, 7537 if (request_firmware(&fw_entry, R3DI_EFX_FILE,
7542 codec->card->dev) != 0) { 7538 codec->card->dev) != 0)
7543 codec_dbg(codec, "Recon3Di alt firmware not detected."); 7539 codec_dbg(codec, "Recon3Di alt firmware not detected.");
7544 spec->alt_firmware_present = false; 7540 else
7545 } else {
7546 codec_dbg(codec, "Recon3Di firmware selected."); 7541 codec_dbg(codec, "Recon3Di firmware selected.");
7547 spec->alt_firmware_present = true;
7548 }
7549 break; 7542 break;
7550 default: 7543 default:
7551 spec->alt_firmware_present = false;
7552 break; 7544 break;
7553 } 7545 }
7554 /* 7546 /*
7555 * Use default ctefx.bin if no alt firmware is detected, or if none 7547 * Use default ctefx.bin if no alt firmware is detected, or if none
7556 * exists for your particular codec. 7548 * exists for your particular codec.
7557 */ 7549 */
7558 if (!spec->alt_firmware_present) { 7550 if (!fw_entry) {
7559 codec_dbg(codec, "Default firmware selected."); 7551 codec_dbg(codec, "Default firmware selected.");
7560 if (request_firmware(&fw_entry, EFX_FILE, 7552 if (request_firmware(&fw_entry, EFX_FILE,
7561 codec->card->dev) != 0) 7553 codec->card->dev) != 0)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 191830d4fa40..a3fb3d4c5730 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5688,6 +5688,8 @@ enum {
5688 ALC225_FIXUP_WYSE_AUTO_MUTE, 5688 ALC225_FIXUP_WYSE_AUTO_MUTE,
5689 ALC225_FIXUP_WYSE_DISABLE_MIC_VREF, 5689 ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
5690 ALC286_FIXUP_ACER_AIO_HEADSET_MIC, 5690 ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
5691 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
5692 ALC299_FIXUP_PREDATOR_SPK,
5691}; 5693};
5692 5694
5693static const struct hda_fixup alc269_fixups[] = { 5695static const struct hda_fixup alc269_fixups[] = {
@@ -6696,6 +6698,22 @@ static const struct hda_fixup alc269_fixups[] = {
6696 .chained = true, 6698 .chained = true,
6697 .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE 6699 .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
6698 }, 6700 },
6701 [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
6702 .type = HDA_FIXUP_PINS,
6703 .v.pins = (const struct hda_pintbl[]) {
6704 { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
6705 { }
6706 },
6707 .chained = true,
6708 .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
6709 },
6710 [ALC299_FIXUP_PREDATOR_SPK] = {
6711 .type = HDA_FIXUP_PINS,
6712 .v.pins = (const struct hda_pintbl[]) {
6713 { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
6714 { }
6715 }
6716 },
6699}; 6717};
6700 6718
6701static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6719static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6712,9 +6730,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6712 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), 6730 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
6713 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), 6731 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
6714 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), 6732 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
6733 SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
6734 SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
6735 SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
6715 SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), 6736 SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
6716 SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), 6737 SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
6717 SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), 6738 SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
6739 SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
6718 SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), 6740 SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
6719 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 6741 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
6720 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), 6742 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -7111,6 +7133,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7111 {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"}, 7133 {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
7112 {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"}, 7134 {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
7113 {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"}, 7135 {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
7136 {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
7114 {} 7137 {}
7115}; 7138};
7116#define ALC225_STANDARD_PINS \ 7139#define ALC225_STANDARD_PINS \
@@ -7331,6 +7354,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7331 {0x14, 0x90170110}, 7354 {0x14, 0x90170110},
7332 {0x1b, 0x90a70130}, 7355 {0x1b, 0x90a70130},
7333 {0x21, 0x03211020}), 7356 {0x21, 0x03211020}),
7357 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
7358 {0x12, 0x90a60130},
7359 {0x14, 0x90170110},
7360 {0x21, 0x03211020}),
7361 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
7362 {0x12, 0x90a60130},
7363 {0x14, 0x90170110},
7364 {0x21, 0x04211020}),
7365 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
7366 {0x1a, 0x90a70130},
7367 {0x1b, 0x90170110},
7368 {0x21, 0x03211020}),
7334 SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, 7369 SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
7335 {0x12, 0xb7a60130}, 7370 {0x12, 0xb7a60130},
7336 {0x13, 0xb8a61140}, 7371 {0x13, 0xb8a61140},
diff --git a/tools/arch/alpha/include/uapi/asm/mman.h b/tools/arch/alpha/include/uapi/asm/mman.h
index c317d3e6867a..ea6a255ae61f 100644
--- a/tools/arch/alpha/include/uapi/asm/mman.h
+++ b/tools/arch/alpha/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
27#define MAP_NONBLOCK 0x40000 27#define MAP_NONBLOCK 0x40000
28#define MAP_NORESERVE 0x10000 28#define MAP_NORESERVE 0x10000
29#define MAP_POPULATE 0x20000 29#define MAP_POPULATE 0x20000
30#define MAP_PRIVATE 0x02
31#define MAP_SHARED 0x01
32#define MAP_STACK 0x80000 30#define MAP_STACK 0x80000
33#define PROT_EXEC 0x4 31#define PROT_EXEC 0x4
34#define PROT_GROWSDOWN 0x01000000 32#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/mips/include/uapi/asm/mman.h b/tools/arch/mips/include/uapi/asm/mman.h
index de2206883abc..c8acaa138d46 100644
--- a/tools/arch/mips/include/uapi/asm/mman.h
+++ b/tools/arch/mips/include/uapi/asm/mman.h
@@ -28,8 +28,6 @@
28#define MAP_NONBLOCK 0x20000 28#define MAP_NONBLOCK 0x20000
29#define MAP_NORESERVE 0x0400 29#define MAP_NORESERVE 0x0400
30#define MAP_POPULATE 0x10000 30#define MAP_POPULATE 0x10000
31#define MAP_PRIVATE 0x002
32#define MAP_SHARED 0x001
33#define MAP_STACK 0x40000 31#define MAP_STACK 0x40000
34#define PROT_EXEC 0x04 32#define PROT_EXEC 0x04
35#define PROT_GROWSDOWN 0x01000000 33#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h
index 1bd78758bde9..f9fd1325f5bd 100644
--- a/tools/arch/parisc/include/uapi/asm/mman.h
+++ b/tools/arch/parisc/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
27#define MAP_NONBLOCK 0x20000 27#define MAP_NONBLOCK 0x20000
28#define MAP_NORESERVE 0x4000 28#define MAP_NORESERVE 0x4000
29#define MAP_POPULATE 0x10000 29#define MAP_POPULATE 0x10000
30#define MAP_PRIVATE 0x02
31#define MAP_SHARED 0x01
32#define MAP_STACK 0x40000 30#define MAP_STACK 0x40000
33#define PROT_EXEC 0x4 31#define PROT_EXEC 0x4
34#define PROT_GROWSDOWN 0x01000000 32#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 8c876c166ef2..26ca425f4c2c 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
463#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58) 463#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
464#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57) 464#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
465#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56) 465#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
466#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54)
466 467
467#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63) 468#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
468#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62) 469#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
469#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61) 470#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
471#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58)
470 472
471/* Per-vcpu XICS interrupt controller state */ 473/* Per-vcpu XICS interrupt controller state */
472#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c) 474#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 6d6122524711..981ff9479648 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -344,6 +344,7 @@
344/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ 344/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
345#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ 345#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
346#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ 346#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
347#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
347#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ 348#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
348#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ 349#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
349#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ 350#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
diff --git a/tools/arch/xtensa/include/uapi/asm/mman.h b/tools/arch/xtensa/include/uapi/asm/mman.h
index 34dde6f44dae..f2b08c990afc 100644
--- a/tools/arch/xtensa/include/uapi/asm/mman.h
+++ b/tools/arch/xtensa/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
27#define MAP_NONBLOCK 0x20000 27#define MAP_NONBLOCK 0x20000
28#define MAP_NORESERVE 0x0400 28#define MAP_NORESERVE 0x0400
29#define MAP_POPULATE 0x10000 29#define MAP_POPULATE 0x10000
30#define MAP_PRIVATE 0x002
31#define MAP_SHARED 0x001
32#define MAP_STACK 0x40000 30#define MAP_STACK 0x40000
33#define PROT_EXEC 0x4 31#define PROT_EXEC 0x4
34#define PROT_GROWSDOWN 0x01000000 32#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
index d68eb4fb40cc..2b0e02c38870 100644
--- a/tools/build/feature/test-libopencsd.c
+++ b/tools/build/feature/test-libopencsd.c
@@ -4,9 +4,9 @@
4/* 4/*
5 * Check OpenCSD library version is sufficient to provide required features 5 * Check OpenCSD library version is sufficient to provide required features
6 */ 6 */
7#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0)) 7#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
8#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER) 8#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
9#error "OpenCSD >= 0.10.0 is required" 9#error "OpenCSD >= 0.11.0 is required"
10#endif 10#endif
11 11
12int main(void) 12int main(void)
diff --git a/tools/include/uapi/asm-generic/mman-common-tools.h b/tools/include/uapi/asm-generic/mman-common-tools.h
new file mode 100644
index 000000000000..af7d0d3a3182
--- /dev/null
+++ b/tools/include/uapi/asm-generic/mman-common-tools.h
@@ -0,0 +1,23 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
3#define __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
4
5#include <asm-generic/mman-common.h>
6
7/* We need this because we need to have tools/include/uapi/ included in the tools
8 * header search path to get access to stuff that is not yet in the system's
9 * copy of the files in that directory, but since this cset:
10 *
11 * 746c9398f5ac ("arch: move common mmap flags to linux/mman.h")
12 *
13 * We end up making sys/mman.h, that is in the system headers, to not find the
14 * MAP_SHARED and MAP_PRIVATE defines because they are not anymore in our copy
15 * of asm-generic/mman-common.h. So we define them here and include this header
16 * from each of the per arch mman.h headers.
17 */
18#ifndef MAP_SHARED
19#define MAP_SHARED 0x01 /* Share changes */
20#define MAP_PRIVATE 0x02 /* Changes are private */
21#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
22#endif
23#endif // __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index e7ee32861d51..abd238d0f7a4 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -15,9 +15,7 @@
15#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ 15#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
16#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ 16#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
17 17
18#define MAP_SHARED 0x01 /* Share changes */ 18/* 0x01 - 0x03 are defined in linux/mman.h */
19#define MAP_PRIVATE 0x02 /* Changes are private */
20#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
21#define MAP_TYPE 0x0f /* Mask for type of mapping */ 19#define MAP_TYPE 0x0f /* Mask for type of mapping */
22#define MAP_FIXED 0x10 /* Interpret addr exactly */ 20#define MAP_FIXED 0x10 /* Interpret addr exactly */
23#define MAP_ANONYMOUS 0x20 /* don't use a file */ 21#define MAP_ANONYMOUS 0x20 /* don't use a file */
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 653687d9771b..36c197fc44a0 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -2,7 +2,7 @@
2#ifndef __ASM_GENERIC_MMAN_H 2#ifndef __ASM_GENERIC_MMAN_H
3#define __ASM_GENERIC_MMAN_H 3#define __ASM_GENERIC_MMAN_H
4 4
5#include <asm-generic/mman-common.h> 5#include <asm-generic/mman-common-tools.h>
6 6
7#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
8#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 8#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 12cdf611d217..dee7292e1df6 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -824,8 +824,17 @@ __SYSCALL(__NR_futex_time64, sys_futex)
824__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval) 824__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
825#endif 825#endif
826 826
827#define __NR_pidfd_send_signal 424
828__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
829#define __NR_io_uring_setup 425
830__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
831#define __NR_io_uring_enter 426
832__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
833#define __NR_io_uring_register 427
834__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
835
827#undef __NR_syscalls 836#undef __NR_syscalls
828#define __NR_syscalls 424 837#define __NR_syscalls 428
829 838
830/* 839/*
831 * 32 bit systems traditionally used different 840 * 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 298b2e197744..397810fa2d33 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param {
1486#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1486#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1487#define I915_CONTEXT_DEFAULT_PRIORITY 0 1487#define I915_CONTEXT_DEFAULT_PRIORITY 0
1488#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1488#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1489 /*
1490 * When using the following param, value should be a pointer to
1491 * drm_i915_gem_context_param_sseu.
1492 */
1493#define I915_CONTEXT_PARAM_SSEU 0x7
1489 __u64 value; 1494 __u64 value;
1490}; 1495};
1491 1496
1497/**
1498 * Context SSEU programming
1499 *
1500 * It may be necessary for either functional or performance reason to configure
1501 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1502 * Sub-slice/EU).
1503 *
1504 * This is done by configuring SSEU configuration using the below
1505 * @struct drm_i915_gem_context_param_sseu for every supported engine which
1506 * userspace intends to use.
1507 *
1508 * Not all GPUs or engines support this functionality in which case an error
1509 * code -ENODEV will be returned.
1510 *
1511 * Also, flexibility of possible SSEU configuration permutations varies between
1512 * GPU generations and software imposed limitations. Requesting such a
1513 * combination will return an error code of -EINVAL.
1514 *
1515 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1516 * favour of a single global setting.
1517 */
1518struct drm_i915_gem_context_param_sseu {
1519 /*
1520 * Engine class & instance to be configured or queried.
1521 */
1522 __u16 engine_class;
1523 __u16 engine_instance;
1524
1525 /*
1526 * Unused for now. Must be cleared to zero.
1527 */
1528 __u32 flags;
1529
1530 /*
1531 * Mask of slices to enable for the context. Valid values are a subset
1532 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1533 */
1534 __u64 slice_mask;
1535
1536 /*
1537 * Mask of subslices to enable for the context. Valid values are a
1538 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1539 */
1540 __u64 subslice_mask;
1541
1542 /*
1543 * Minimum/Maximum number of EUs to enable per subslice for the
1544 * context. min_eus_per_subslice must be inferior or equal to
1545 * max_eus_per_subslice.
1546 */
1547 __u16 min_eus_per_subslice;
1548 __u16 max_eus_per_subslice;
1549
1550 /*
1551 * Unused for now. Must be cleared to zero.
1552 */
1553 __u32 rsvd;
1554};
1555
1492enum drm_i915_oa_format { 1556enum drm_i915_oa_format {
1493 I915_OA_FORMAT_A13 = 1, /* HSW only */ 1557 I915_OA_FORMAT_A13 = 1, /* HSW only */
1494 I915_OA_FORMAT_A29, /* HSW only */ 1558 I915_OA_FORMAT_A29, /* HSW only */
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index 6448cdd9a350..a2f8658f1c55 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -41,6 +41,7 @@
41#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ 41#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
42#define F_SEAL_GROW 0x0004 /* prevent file from growing */ 42#define F_SEAL_GROW 0x0004 /* prevent file from growing */
43#define F_SEAL_WRITE 0x0008 /* prevent writes */ 43#define F_SEAL_WRITE 0x0008 /* prevent writes */
44#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
44/* (1U << 31) is reserved for signed error codes */ 45/* (1U << 31) is reserved for signed error codes */
45 46
46/* 47/*
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h
index d0f515d53299..fc1a64c3447b 100644
--- a/tools/include/uapi/linux/mman.h
+++ b/tools/include/uapi/linux/mman.h
@@ -12,6 +12,10 @@
12#define OVERCOMMIT_ALWAYS 1 12#define OVERCOMMIT_ALWAYS 1
13#define OVERCOMMIT_NEVER 2 13#define OVERCOMMIT_NEVER 2
14 14
15#define MAP_SHARED 0x01 /* Share changes */
16#define MAP_PRIVATE 0x02 /* Changes are private */
17#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
18
15/* 19/*
16 * Huge page size encoding when MAP_HUGETLB is specified, and a huge page 20 * Huge page size encoding when MAP_HUGETLB is specified, and a huge page
17 * size other than the default is desired. See hugetlb_encode.h. 21 * size other than the default is desired. See hugetlb_encode.h.
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 5bf8e52c41fc..8e7c56e9590f 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -177,7 +177,7 @@ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
177 177
178$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN) 178$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
179 $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \ 179 $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
180 -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@ 180 -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
181 @ln -sf $(@F) $(OUTPUT)libbpf.so 181 @ln -sf $(@F) $(OUTPUT)libbpf.so
182 @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION) 182 @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
183 183
@@ -220,8 +220,9 @@ install_lib: all_cmd
220install_headers: 220install_headers:
221 $(call QUIET_INSTALL, headers) \ 221 $(call QUIET_INSTALL, headers) \
222 $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ 222 $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
223 $(call do_install,libbpf.h,$(prefix)/include/bpf,644); 223 $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
224 $(call do_install,btf.h,$(prefix)/include/bpf,644); 224 $(call do_install,btf.h,$(prefix)/include/bpf,644); \
225 $(call do_install,xsk.h,$(prefix)/include/bpf,644);
225 226
226install: install_lib 227install: install_lib
227 228
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 87e3020ac1bc..cf119c9b6f27 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -2107,6 +2107,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2107 return fwd_kind == real_kind; 2107 return fwd_kind == real_kind;
2108 } 2108 }
2109 2109
2110 if (cand_kind != canon_kind)
2111 return 0;
2112
2110 switch (cand_kind) { 2113 switch (cand_kind) {
2111 case BTF_KIND_INT: 2114 case BTF_KIND_INT:
2112 return btf_equal_int(cand_type, canon_type); 2115 return btf_equal_int(cand_type, canon_type);
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index c9d038f91af6..53f8be0f4a1f 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -25,14 +25,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
25OBJTOOL := $(OUTPUT)objtool 25OBJTOOL := $(OUTPUT)objtool
26OBJTOOL_IN := $(OBJTOOL)-in.o 26OBJTOOL_IN := $(OBJTOOL)-in.o
27 27
28LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
29LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
30
28all: $(OBJTOOL) 31all: $(OBJTOOL)
29 32
30INCLUDES := -I$(srctree)/tools/include \ 33INCLUDES := -I$(srctree)/tools/include \
31 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ 34 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
32 -I$(srctree)/tools/objtool/arch/$(ARCH)/include 35 -I$(srctree)/tools/objtool/arch/$(ARCH)/include
33WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed 36WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
34CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) 37CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
35LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) 38LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
36 39
37# Allow old libelf to be used: 40# Allow old libelf to be used:
38elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) 41elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 01f7555fd933..e8c9f77e9010 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -481,8 +481,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
481mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c 481mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
482mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh 482mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
483 483
484$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl) 484$(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
485 $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@ 485 $(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
486 486
487mount_flags_array := $(beauty_outdir)/mount_flags_array.c 487mount_flags_array := $(beauty_outdir)/mount_flags_array.c
488mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh 488mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 2ae92fddb6d5..92ee0b4378d4 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -345,6 +345,10 @@
345334 common rseq __x64_sys_rseq 345334 common rseq __x64_sys_rseq
346# don't use numbers 387 through 423, add new calls after the last 346# don't use numbers 387 through 423, add new calls after the last
347# 'common' entry 347# 'common' entry
348424 common pidfd_send_signal __x64_sys_pidfd_send_signal
349425 common io_uring_setup __x64_sys_io_uring_setup
350426 common io_uring_enter __x64_sys_io_uring_enter
351427 common io_uring_register __x64_sys_io_uring_register
348 352
349# 353#
350# x32-specific system call numbers start at 512 to avoid cache impact 354# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 7b55613924de..c68ee06cae63 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -103,7 +103,7 @@ done
103# diff with extra ignore lines 103# diff with extra ignore lines
104check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' 104check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
105check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' 105check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
106check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"' 106check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
107check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' 107check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
108 108
109# diff non-symmetric files 109# diff non-symmetric files
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index e38518cdcbc3..74ef92f1d19a 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -107,6 +107,7 @@ import os
107from PySide.QtCore import * 107from PySide.QtCore import *
108from PySide.QtGui import * 108from PySide.QtGui import *
109from PySide.QtSql import * 109from PySide.QtSql import *
110pyside_version_1 = True
110from decimal import * 111from decimal import *
111from ctypes import * 112from ctypes import *
112from multiprocessing import Process, Array, Value, Event 113from multiprocessing import Process, Array, Value, Event
@@ -1526,6 +1527,19 @@ def BranchDataPrep(query):
1526 " (" + dsoname(query.value(15)) + ")") 1527 " (" + dsoname(query.value(15)) + ")")
1527 return data 1528 return data
1528 1529
1530def BranchDataPrepWA(query):
1531 data = []
1532 data.append(query.value(0))
1533 # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
1534 data.append("{:>19}".format(query.value(1)))
1535 for i in xrange(2, 8):
1536 data.append(query.value(i))
1537 data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
1538 " (" + dsoname(query.value(11)) + ")" + " -> " +
1539 tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
1540 " (" + dsoname(query.value(15)) + ")")
1541 return data
1542
1529# Branch data model 1543# Branch data model
1530 1544
1531class BranchModel(TreeModel): 1545class BranchModel(TreeModel):
@@ -1553,7 +1567,11 @@ class BranchModel(TreeModel):
1553 " AND evsel_id = " + str(self.event_id) + 1567 " AND evsel_id = " + str(self.event_id) +
1554 " ORDER BY samples.id" 1568 " ORDER BY samples.id"
1555 " LIMIT " + str(glb_chunk_sz)) 1569 " LIMIT " + str(glb_chunk_sz))
1556 self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample) 1570 if pyside_version_1 and sys.version_info[0] == 3:
1571 prep = BranchDataPrepWA
1572 else:
1573 prep = BranchDataPrep
1574 self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
1557 self.fetcher.done.connect(self.Update) 1575 self.fetcher.done.connect(self.Update)
1558 self.fetcher.Fetch(glb_chunk_sz) 1576 self.fetcher.Fetch(glb_chunk_sz)
1559 1577
@@ -2079,14 +2097,6 @@ def IsSelectable(db, table, sql = ""):
2079 return False 2097 return False
2080 return True 2098 return True
2081 2099
2082# SQL data preparation
2083
2084def SQLTableDataPrep(query, count):
2085 data = []
2086 for i in xrange(count):
2087 data.append(query.value(i))
2088 return data
2089
2090# SQL table data model item 2100# SQL table data model item
2091 2101
2092class SQLTableItem(): 2102class SQLTableItem():
@@ -2110,7 +2120,7 @@ class SQLTableModel(TableModel):
2110 self.more = True 2120 self.more = True
2111 self.populated = 0 2121 self.populated = 0
2112 self.column_headers = column_headers 2122 self.column_headers = column_headers
2113 self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample) 2123 self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
2114 self.fetcher.done.connect(self.Update) 2124 self.fetcher.done.connect(self.Update)
2115 self.fetcher.Fetch(glb_chunk_sz) 2125 self.fetcher.Fetch(glb_chunk_sz)
2116 2126
@@ -2154,6 +2164,12 @@ class SQLTableModel(TableModel):
2154 def columnHeader(self, column): 2164 def columnHeader(self, column):
2155 return self.column_headers[column] 2165 return self.column_headers[column]
2156 2166
2167 def SQLTableDataPrep(self, query, count):
2168 data = []
2169 for i in xrange(count):
2170 data.append(query.value(i))
2171 return data
2172
2157# SQL automatic table data model 2173# SQL automatic table data model
2158 2174
2159class SQLAutoTableModel(SQLTableModel): 2175class SQLAutoTableModel(SQLTableModel):
@@ -2182,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel):
2182 QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") 2198 QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
2183 while query.next(): 2199 while query.next():
2184 column_headers.append(query.value(0)) 2200 column_headers.append(query.value(0))
2201 if pyside_version_1 and sys.version_info[0] == 3:
2202 if table_name == "samples_view":
2203 self.SQLTableDataPrep = self.samples_view_DataPrep
2204 if table_name == "samples":
2205 self.SQLTableDataPrep = self.samples_DataPrep
2185 super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) 2206 super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
2186 2207
2208 def samples_view_DataPrep(self, query, count):
2209 data = []
2210 data.append(query.value(0))
2211 # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
2212 data.append("{:>19}".format(query.value(1)))
2213 for i in xrange(2, count):
2214 data.append(query.value(i))
2215 return data
2216
2217 def samples_DataPrep(self, query, count):
2218 data = []
2219 for i in xrange(9):
2220 data.append(query.value(i))
2221 # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
2222 data.append("{:>19}".format(query.value(9)))
2223 for i in xrange(10, count):
2224 data.append(query.value(i))
2225 return data
2226
2187# Base class for custom ResizeColumnsToContents 2227# Base class for custom ResizeColumnsToContents
2188 2228
2189class ResizeColumnsToContentsBase(QObject): 2229class ResizeColumnsToContentsBase(QObject):
@@ -2868,9 +2908,13 @@ class LibXED():
2868 ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) 2908 ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
2869 if not ok: 2909 if not ok:
2870 return 0, "" 2910 return 0, ""
2911 if sys.version_info[0] == 2:
2912 result = inst.buffer.value
2913 else:
2914 result = inst.buffer.value.decode()
2871 # Return instruction length and the disassembled instruction text 2915 # Return instruction length and the disassembled instruction text
2872 # For now, assume the length is in byte 166 2916 # For now, assume the length is in byte 166
2873 return inst.xedd[166], inst.buffer.value 2917 return inst.xedd[166], result
2874 2918
2875def TryOpen(file_name): 2919def TryOpen(file_name):
2876 try: 2920 try:
@@ -2886,9 +2930,14 @@ def Is64Bit(f):
2886 header = f.read(7) 2930 header = f.read(7)
2887 f.seek(pos) 2931 f.seek(pos)
2888 magic = header[0:4] 2932 magic = header[0:4]
2889 eclass = ord(header[4]) 2933 if sys.version_info[0] == 2:
2890 encoding = ord(header[5]) 2934 eclass = ord(header[4])
2891 version = ord(header[6]) 2935 encoding = ord(header[5])
2936 version = ord(header[6])
2937 else:
2938 eclass = header[4]
2939 encoding = header[5]
2940 version = header[6]
2892 if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: 2941 if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
2893 result = True if eclass == 2 else False 2942 result = True if eclass == 2 else False
2894 return result 2943 return result
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh
index 32bac9c0d694..5f5eefcb3c74 100755
--- a/tools/perf/trace/beauty/mmap_flags.sh
+++ b/tools/perf/trace/beauty/mmap_flags.sh
@@ -1,15 +1,18 @@
1#!/bin/sh 1#!/bin/sh
2# SPDX-License-Identifier: LGPL-2.1 2# SPDX-License-Identifier: LGPL-2.1
3 3
4if [ $# -ne 2 ] ; then 4if [ $# -ne 3 ] ; then
5 [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/` 5 [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/`
6 linux_header_dir=tools/include/uapi/linux
6 header_dir=tools/include/uapi/asm-generic 7 header_dir=tools/include/uapi/asm-generic
7 arch_header_dir=tools/arch/${hostarch}/include/uapi/asm 8 arch_header_dir=tools/arch/${hostarch}/include/uapi/asm
8else 9else
9 header_dir=$1 10 linux_header_dir=$1
10 arch_header_dir=$2 11 header_dir=$2
12 arch_header_dir=$3
11fi 13fi
12 14
15linux_mman=${linux_header_dir}/mman.h
13arch_mman=${arch_header_dir}/mman.h 16arch_mman=${arch_header_dir}/mman.h
14 17
15# those in egrep -vw are flags, we want just the bits 18# those in egrep -vw are flags, we want just the bits
@@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \
20(egrep $regex ${arch_mman} | \ 23(egrep $regex ${arch_mman} | \
21 sed -r "s/$regex/\2 \1/g" | \ 24 sed -r "s/$regex/\2 \1/g" | \
22 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") 25 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
26egrep -q $regex ${linux_mman} && \
27(egrep $regex ${linux_mman} | \
28 egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
29 sed -r "s/$regex/\2 \1/g" | \
30 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
23([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) && 31([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
24(egrep $regex ${header_dir}/mman-common.h | \ 32(egrep $regex ${header_dir}/mman-common.h | \
25 egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \ 33 egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index ba4c623cd8de..39fe21e1cf93 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -387,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
387 break; 387 break;
388 case OCSD_INSTR_ISB: 388 case OCSD_INSTR_ISB:
389 case OCSD_INSTR_DSB_DMB: 389 case OCSD_INSTR_DSB_DMB:
390 case OCSD_INSTR_WFI_WFE:
390 case OCSD_INSTR_OTHER: 391 case OCSD_INSTR_OTHER:
391 default: 392 default:
392 packet->last_instr_taken_branch = false; 393 packet->last_instr_taken_branch = false;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index ec78e93085de..6689378ee577 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -231,35 +231,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
231 } 231 }
232} 232}
233 233
234void perf_event_attr__set_max_precise_ip(struct perf_event_attr *pattr)
235{
236 struct perf_event_attr attr = {
237 .type = PERF_TYPE_HARDWARE,
238 .config = PERF_COUNT_HW_CPU_CYCLES,
239 .exclude_kernel = 1,
240 .precise_ip = 3,
241 };
242
243 event_attr_init(&attr);
244
245 /*
246 * Unnamed union member, not supported as struct member named
247 * initializer in older compilers such as gcc 4.4.7
248 */
249 attr.sample_period = 1;
250
251 while (attr.precise_ip != 0) {
252 int fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
253 if (fd != -1) {
254 close(fd);
255 break;
256 }
257 --attr.precise_ip;
258 }
259
260 pattr->precise_ip = attr.precise_ip;
261}
262
263int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise) 234int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
264{ 235{
265 struct perf_evsel *evsel = perf_evsel__new_cycles(precise); 236 struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index dcb68f34d2cd..6a94785b9100 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -315,8 +315,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
315void perf_evlist__set_tracking_event(struct perf_evlist *evlist, 315void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
316 struct perf_evsel *tracking_evsel); 316 struct perf_evsel *tracking_evsel);
317 317
318void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
319
320struct perf_evsel * 318struct perf_evsel *
321perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str); 319perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
322 320
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 7835e05f0c0a..66d066f18b5b 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -295,7 +295,6 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
295 if (!precise) 295 if (!precise)
296 goto new_event; 296 goto new_event;
297 297
298 perf_event_attr__set_max_precise_ip(&attr);
299 /* 298 /*
300 * Now let the usual logic to set up the perf_event_attr defaults 299 * Now let the usual logic to set up the perf_event_attr defaults
301 * to kick in when we return and before perf_evsel__open() is called. 300 * to kick in when we return and before perf_evsel__open() is called.
@@ -305,6 +304,8 @@ new_event:
305 if (evsel == NULL) 304 if (evsel == NULL)
306 goto out; 305 goto out;
307 306
307 evsel->precise_max = true;
308
308 /* use asprintf() because free(evsel) assumes name is allocated */ 309 /* use asprintf() because free(evsel) assumes name is allocated */
309 if (asprintf(&evsel->name, "cycles%s%s%.*s", 310 if (asprintf(&evsel->name, "cycles%s%s%.*s",
310 (attr.precise_ip || attr.exclude_kernel) ? ":" : "", 311 (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
@@ -1083,7 +1084,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
1083 } 1084 }
1084 1085
1085 if (evsel->precise_max) 1086 if (evsel->precise_max)
1086 perf_event_attr__set_max_precise_ip(attr); 1087 attr->precise_ip = 3;
1087 1088
1088 if (opts->all_user) { 1089 if (opts->all_user) {
1089 attr->exclude_kernel = 1; 1090 attr->exclude_kernel = 1;
@@ -1749,6 +1750,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
1749 return true; 1750 return true;
1750} 1751}
1751 1752
1753static void display_attr(struct perf_event_attr *attr)
1754{
1755 if (verbose >= 2) {
1756 fprintf(stderr, "%.60s\n", graph_dotted_line);
1757 fprintf(stderr, "perf_event_attr:\n");
1758 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1759 fprintf(stderr, "%.60s\n", graph_dotted_line);
1760 }
1761}
1762
1763static int perf_event_open(struct perf_evsel *evsel,
1764 pid_t pid, int cpu, int group_fd,
1765 unsigned long flags)
1766{
1767 int precise_ip = evsel->attr.precise_ip;
1768 int fd;
1769
1770 while (1) {
1771 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1772 pid, cpu, group_fd, flags);
1773
1774 fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
1775 if (fd >= 0)
1776 break;
1777
1778 /*
1779 * Do quick precise_ip fallback if:
1780 * - there is precise_ip set in perf_event_attr
1781 * - maximum precise is requested
1782 * - sys_perf_event_open failed with ENOTSUP error,
1783 * which is associated with wrong precise_ip
1784 */
1785 if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP))
1786 break;
1787
1788 /*
1789 * We tried all the precise_ip values, and it's
1790 * still failing, so leave it to standard fallback.
1791 */
1792 if (!evsel->attr.precise_ip) {
1793 evsel->attr.precise_ip = precise_ip;
1794 break;
1795 }
1796
1797 pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
1798 evsel->attr.precise_ip--;
1799 pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
1800 display_attr(&evsel->attr);
1801 }
1802
1803 return fd;
1804}
1805
1752int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 1806int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1753 struct thread_map *threads) 1807 struct thread_map *threads)
1754{ 1808{
@@ -1824,12 +1878,7 @@ retry_sample_id:
1824 if (perf_missing_features.sample_id_all) 1878 if (perf_missing_features.sample_id_all)
1825 evsel->attr.sample_id_all = 0; 1879 evsel->attr.sample_id_all = 0;
1826 1880
1827 if (verbose >= 2) { 1881 display_attr(&evsel->attr);
1828 fprintf(stderr, "%.60s\n", graph_dotted_line);
1829 fprintf(stderr, "perf_event_attr:\n");
1830 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
1831 fprintf(stderr, "%.60s\n", graph_dotted_line);
1832 }
1833 1882
1834 for (cpu = 0; cpu < cpus->nr; cpu++) { 1883 for (cpu = 0; cpu < cpus->nr; cpu++) {
1835 1884
@@ -1841,13 +1890,10 @@ retry_sample_id:
1841 1890
1842 group_fd = get_group_fd(evsel, cpu, thread); 1891 group_fd = get_group_fd(evsel, cpu, thread);
1843retry_open: 1892retry_open:
1844 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1845 pid, cpus->map[cpu], group_fd, flags);
1846
1847 test_attr__ready(); 1893 test_attr__ready();
1848 1894
1849 fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu], 1895 fd = perf_event_open(evsel, pid, cpus->map[cpu],
1850 group_fd, flags); 1896 group_fd, flags);
1851 1897
1852 FD(evsel, cpu, thread) = fd; 1898 FD(evsel, cpu, thread) = fd;
1853 1899
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 6e03db142091..872fab163585 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
251 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d)) 251 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
252 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n / 252 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
253 decoder->tsc_ctc_ratio_d; 253 decoder->tsc_ctc_ratio_d;
254
255 /*
256 * Allow for timestamps appearing to backwards because a TSC
257 * packet has slipped past a MTC packet, so allow 2 MTC ticks
258 * or ...
259 */
260 decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
261 decoder->tsc_ctc_ratio_n,
262 decoder->tsc_ctc_ratio_d);
263 } 254 }
264 /* ... or 0x100 paranoia */ 255
265 if (decoder->tsc_slip < 0x100) 256 /*
266 decoder->tsc_slip = 0x100; 257 * A TSC packet can slip past MTC packets so that the timestamp appears
258 * to go backwards. One estimate is that can be up to about 40 CPU
259 * cycles, which is certainly less than 0x1000 TSC ticks, but accept
260 * slippage an order of magnitude more to be on the safe side.
261 */
262 decoder->tsc_slip = 0x10000;
267 263
268 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift); 264 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
269 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n); 265 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 61959aba7e27..3c520baa198c 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1421,6 +1421,20 @@ static void machine__set_kernel_mmap(struct machine *machine,
1421 machine->vmlinux_map->end = ~0ULL; 1421 machine->vmlinux_map->end = ~0ULL;
1422} 1422}
1423 1423
1424static void machine__update_kernel_mmap(struct machine *machine,
1425 u64 start, u64 end)
1426{
1427 struct map *map = machine__kernel_map(machine);
1428
1429 map__get(map);
1430 map_groups__remove(&machine->kmaps, map);
1431
1432 machine__set_kernel_mmap(machine, start, end);
1433
1434 map_groups__insert(&machine->kmaps, map);
1435 map__put(map);
1436}
1437
1424int machine__create_kernel_maps(struct machine *machine) 1438int machine__create_kernel_maps(struct machine *machine)
1425{ 1439{
1426 struct dso *kernel = machine__get_kernel(machine); 1440 struct dso *kernel = machine__get_kernel(machine);
@@ -1453,17 +1467,11 @@ int machine__create_kernel_maps(struct machine *machine)
1453 goto out_put; 1467 goto out_put;
1454 } 1468 }
1455 1469
1456 /* we have a real start address now, so re-order the kmaps */ 1470 /*
1457 map = machine__kernel_map(machine); 1471 * we have a real start address now, so re-order the kmaps
1458 1472 * assume it's the last in the kmaps
1459 map__get(map); 1473 */
1460 map_groups__remove(&machine->kmaps, map); 1474 machine__update_kernel_mmap(machine, addr, ~0ULL);
1461
1462 /* assume it's the last in the kmaps */
1463 machine__set_kernel_mmap(machine, addr, ~0ULL);
1464
1465 map_groups__insert(&machine->kmaps, map);
1466 map__put(map);
1467 } 1475 }
1468 1476
1469 if (machine__create_extra_kernel_maps(machine, kernel)) 1477 if (machine__create_extra_kernel_maps(machine, kernel))
@@ -1599,7 +1607,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1599 if (strstr(kernel->long_name, "vmlinux")) 1607 if (strstr(kernel->long_name, "vmlinux"))
1600 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1608 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1601 1609
1602 machine__set_kernel_mmap(machine, event->mmap.start, 1610 machine__update_kernel_mmap(machine, event->mmap.start,
1603 event->mmap.start + event->mmap.len); 1611 event->mmap.start + event->mmap.len);
1604 1612
1605 /* 1613 /*
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 6199a3174ab9..e0429f4ef335 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -732,10 +732,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
732 732
733 if (!is_arm_pmu_core(name)) { 733 if (!is_arm_pmu_core(name)) {
734 pname = pe->pmu ? pe->pmu : "cpu"; 734 pname = pe->pmu ? pe->pmu : "cpu";
735
736 /*
737 * uncore alias may be from different PMU
738 * with common prefix
739 */
740 if (pmu_is_uncore(name) &&
741 !strncmp(pname, name, strlen(pname)))
742 goto new_alias;
743
735 if (strcmp(pname, name)) 744 if (strcmp(pname, name))
736 continue; 745 continue;
737 } 746 }
738 747
748new_alias:
739 /* need type casts to override 'const' */ 749 /* need type casts to override 'const' */
740 __perf_pmu__new_alias(head, NULL, (char *)pe->name, 750 __perf_pmu__new_alias(head, NULL, (char *)pe->name,
741 (char *)pe->desc, (char *)pe->event, 751 (char *)pe->desc, (char *)pe->event,
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index c3fad065c89c..c7727be9719f 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -44,6 +44,7 @@
44#include <cpuid.h> 44#include <cpuid.h>
45#include <linux/capability.h> 45#include <linux/capability.h>
46#include <errno.h> 46#include <errno.h>
47#include <math.h>
47 48
48char *proc_stat = "/proc/stat"; 49char *proc_stat = "/proc/stat";
49FILE *outf; 50FILE *outf;
@@ -63,7 +64,6 @@ unsigned int dump_only;
63unsigned int do_snb_cstates; 64unsigned int do_snb_cstates;
64unsigned int do_knl_cstates; 65unsigned int do_knl_cstates;
65unsigned int do_slm_cstates; 66unsigned int do_slm_cstates;
66unsigned int do_cnl_cstates;
67unsigned int use_c1_residency_msr; 67unsigned int use_c1_residency_msr;
68unsigned int has_aperf; 68unsigned int has_aperf;
69unsigned int has_epb; 69unsigned int has_epb;
@@ -141,9 +141,21 @@ unsigned int first_counter_read = 1;
141 141
142#define RAPL_CORES_ENERGY_STATUS (1 << 9) 142#define RAPL_CORES_ENERGY_STATUS (1 << 9)
143 /* 0x639 MSR_PP0_ENERGY_STATUS */ 143 /* 0x639 MSR_PP0_ENERGY_STATUS */
144#define RAPL_PER_CORE_ENERGY (1 << 10)
145 /* Indicates cores energy collection is per-core,
146 * not per-package. */
147#define RAPL_AMD_F17H (1 << 11)
148 /* 0xc0010299 MSR_RAPL_PWR_UNIT */
149 /* 0xc001029a MSR_CORE_ENERGY_STAT */
150 /* 0xc001029b MSR_PKG_ENERGY_STAT */
144#define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) 151#define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
145#define TJMAX_DEFAULT 100 152#define TJMAX_DEFAULT 100
146 153
154/* MSRs that are not yet in the kernel-provided header. */
155#define MSR_RAPL_PWR_UNIT 0xc0010299
156#define MSR_CORE_ENERGY_STAT 0xc001029a
157#define MSR_PKG_ENERGY_STAT 0xc001029b
158
147#define MAX(a, b) ((a) > (b) ? (a) : (b)) 159#define MAX(a, b) ((a) > (b) ? (a) : (b))
148 160
149/* 161/*
@@ -187,6 +199,7 @@ struct core_data {
187 unsigned long long c7; 199 unsigned long long c7;
188 unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ 200 unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */
189 unsigned int core_temp_c; 201 unsigned int core_temp_c;
202 unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */
190 unsigned int core_id; 203 unsigned int core_id;
191 unsigned long long counter[MAX_ADDED_COUNTERS]; 204 unsigned long long counter[MAX_ADDED_COUNTERS];
192} *core_even, *core_odd; 205} *core_even, *core_odd;
@@ -273,6 +286,7 @@ struct system_summary {
273 286
274struct cpu_topology { 287struct cpu_topology {
275 int physical_package_id; 288 int physical_package_id;
289 int die_id;
276 int logical_cpu_id; 290 int logical_cpu_id;
277 int physical_node_id; 291 int physical_node_id;
278 int logical_node_id; /* 0-based count within the package */ 292 int logical_node_id; /* 0-based count within the package */
@@ -283,6 +297,7 @@ struct cpu_topology {
283 297
284struct topo_params { 298struct topo_params {
285 int num_packages; 299 int num_packages;
300 int num_die;
286 int num_cpus; 301 int num_cpus;
287 int num_cores; 302 int num_cores;
288 int max_cpu_num; 303 int max_cpu_num;
@@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
314 int retval, pkg_no, core_no, thread_no, node_no; 329 int retval, pkg_no, core_no, thread_no, node_no;
315 330
316 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { 331 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
317 for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { 332 for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
318 for (node_no = 0; node_no < topo.nodes_per_pkg; 333 for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
319 node_no++) {
320 for (thread_no = 0; thread_no < 334 for (thread_no = 0; thread_no <
321 topo.threads_per_core; ++thread_no) { 335 topo.threads_per_core; ++thread_no) {
322 struct thread_data *t; 336 struct thread_data *t;
@@ -442,6 +456,7 @@ struct msr_counter bic[] = {
442 { 0x0, "CPU" }, 456 { 0x0, "CPU" },
443 { 0x0, "APIC" }, 457 { 0x0, "APIC" },
444 { 0x0, "X2APIC" }, 458 { 0x0, "X2APIC" },
459 { 0x0, "Die" },
445}; 460};
446 461
447#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) 462#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -495,6 +510,7 @@ struct msr_counter bic[] = {
495#define BIC_CPU (1ULL << 47) 510#define BIC_CPU (1ULL << 47)
496#define BIC_APIC (1ULL << 48) 511#define BIC_APIC (1ULL << 48)
497#define BIC_X2APIC (1ULL << 49) 512#define BIC_X2APIC (1ULL << 49)
513#define BIC_Die (1ULL << 50)
498 514
499#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) 515#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
500 516
@@ -621,6 +637,8 @@ void print_header(char *delim)
621 outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); 637 outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
622 if (DO_BIC(BIC_Package)) 638 if (DO_BIC(BIC_Package))
623 outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); 639 outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
640 if (DO_BIC(BIC_Die))
641 outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
624 if (DO_BIC(BIC_Node)) 642 if (DO_BIC(BIC_Node))
625 outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); 643 outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
626 if (DO_BIC(BIC_Core)) 644 if (DO_BIC(BIC_Core))
@@ -667,7 +685,7 @@ void print_header(char *delim)
667 685
668 if (DO_BIC(BIC_CPU_c1)) 686 if (DO_BIC(BIC_CPU_c1))
669 outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); 687 outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
670 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) 688 if (DO_BIC(BIC_CPU_c3))
671 outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); 689 outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
672 if (DO_BIC(BIC_CPU_c6)) 690 if (DO_BIC(BIC_CPU_c6))
673 outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); 691 outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
@@ -680,6 +698,14 @@ void print_header(char *delim)
680 if (DO_BIC(BIC_CoreTmp)) 698 if (DO_BIC(BIC_CoreTmp))
681 outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); 699 outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
682 700
701 if (do_rapl && !rapl_joules) {
702 if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
703 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
704 } else if (do_rapl && rapl_joules) {
705 if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
706 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
707 }
708
683 for (mp = sys.cp; mp; mp = mp->next) { 709 for (mp = sys.cp; mp; mp = mp->next) {
684 if (mp->format == FORMAT_RAW) { 710 if (mp->format == FORMAT_RAW) {
685 if (mp->width == 64) 711 if (mp->width == 64)
@@ -734,7 +760,7 @@ void print_header(char *delim)
734 if (do_rapl && !rapl_joules) { 760 if (do_rapl && !rapl_joules) {
735 if (DO_BIC(BIC_PkgWatt)) 761 if (DO_BIC(BIC_PkgWatt))
736 outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); 762 outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
737 if (DO_BIC(BIC_CorWatt)) 763 if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
738 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); 764 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
739 if (DO_BIC(BIC_GFXWatt)) 765 if (DO_BIC(BIC_GFXWatt))
740 outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); 766 outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
@@ -747,7 +773,7 @@ void print_header(char *delim)
747 } else if (do_rapl && rapl_joules) { 773 } else if (do_rapl && rapl_joules) {
748 if (DO_BIC(BIC_Pkg_J)) 774 if (DO_BIC(BIC_Pkg_J))
749 outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); 775 outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
750 if (DO_BIC(BIC_Cor_J)) 776 if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
751 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); 777 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
752 if (DO_BIC(BIC_GFX_J)) 778 if (DO_BIC(BIC_GFX_J))
753 outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); 779 outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
@@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
808 outp += sprintf(outp, "c6: %016llX\n", c->c6); 834 outp += sprintf(outp, "c6: %016llX\n", c->c6);
809 outp += sprintf(outp, "c7: %016llX\n", c->c7); 835 outp += sprintf(outp, "c7: %016llX\n", c->c7);
810 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); 836 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
837 outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
811 838
812 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 839 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
813 outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", 840 outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
@@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
904 if (t == &average.threads) { 931 if (t == &average.threads) {
905 if (DO_BIC(BIC_Package)) 932 if (DO_BIC(BIC_Package))
906 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 933 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
934 if (DO_BIC(BIC_Die))
935 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
907 if (DO_BIC(BIC_Node)) 936 if (DO_BIC(BIC_Node))
908 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 937 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
909 if (DO_BIC(BIC_Core)) 938 if (DO_BIC(BIC_Core))
@@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c,
921 else 950 else
922 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 951 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
923 } 952 }
953 if (DO_BIC(BIC_Die)) {
954 if (c)
955 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
956 else
957 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
958 }
924 if (DO_BIC(BIC_Node)) { 959 if (DO_BIC(BIC_Node)) {
925 if (t) 960 if (t)
926 outp += sprintf(outp, "%s%d", 961 outp += sprintf(outp, "%s%d",
@@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
1003 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1038 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1004 goto done; 1039 goto done;
1005 1040
1006 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) 1041 if (DO_BIC(BIC_CPU_c3))
1007 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); 1042 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
1008 if (DO_BIC(BIC_CPU_c6)) 1043 if (DO_BIC(BIC_CPU_c6))
1009 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); 1044 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
@@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c,
1033 } 1068 }
1034 } 1069 }
1035 1070
1071 /*
1072 * If measurement interval exceeds minimum RAPL Joule Counter range,
1073 * indicate that results are suspect by printing "**" in fraction place.
1074 */
1075 if (interval_float < rapl_joule_counter_range)
1076 fmt8 = "%s%.2f";
1077 else
1078 fmt8 = "%6.0f**";
1079
1080 if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
1081 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
1082 if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
1083 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
1084
1036 /* print per-package data only for 1st core in package */ 1085 /* print per-package data only for 1st core in package */
1037 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 1086 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1038 goto done; 1087 goto done;
@@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
1085 if (DO_BIC(BIC_SYS_LPI)) 1134 if (DO_BIC(BIC_SYS_LPI))
1086 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); 1135 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
1087 1136
1088 /*
1089 * If measurement interval exceeds minimum RAPL Joule Counter range,
1090 * indicate that results are suspect by printing "**" in fraction place.
1091 */
1092 if (interval_float < rapl_joule_counter_range)
1093 fmt8 = "%s%.2f";
1094 else
1095 fmt8 = "%6.0f**";
1096
1097 if (DO_BIC(BIC_PkgWatt)) 1137 if (DO_BIC(BIC_PkgWatt))
1098 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); 1138 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
1099 if (DO_BIC(BIC_CorWatt)) 1139 if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
1100 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); 1140 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
1101 if (DO_BIC(BIC_GFXWatt)) 1141 if (DO_BIC(BIC_GFXWatt))
1102 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); 1142 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
@@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
1104 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); 1144 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
1105 if (DO_BIC(BIC_Pkg_J)) 1145 if (DO_BIC(BIC_Pkg_J))
1106 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); 1146 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
1107 if (DO_BIC(BIC_Cor_J)) 1147 if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
1108 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); 1148 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
1109 if (DO_BIC(BIC_GFX_J)) 1149 if (DO_BIC(BIC_GFX_J))
1110 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); 1150 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
@@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old)
1249 old->core_temp_c = new->core_temp_c; 1289 old->core_temp_c = new->core_temp_c;
1250 old->mc6_us = new->mc6_us - old->mc6_us; 1290 old->mc6_us = new->mc6_us - old->mc6_us;
1251 1291
1292 DELTA_WRAP32(new->core_energy, old->core_energy);
1293
1252 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1294 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1253 if (mp->format == FORMAT_RAW) 1295 if (mp->format == FORMAT_RAW)
1254 old->counter[i] = new->counter[i]; 1296 old->counter[i] = new->counter[i];
@@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
1391 c->c7 = 0; 1433 c->c7 = 0;
1392 c->mc6_us = 0; 1434 c->mc6_us = 0;
1393 c->core_temp_c = 0; 1435 c->core_temp_c = 0;
1436 c->core_energy = 0;
1394 1437
1395 p->pkg_wtd_core_c0 = 0; 1438 p->pkg_wtd_core_c0 = 0;
1396 p->pkg_any_core_c0 = 0; 1439 p->pkg_any_core_c0 = 0;
@@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
1473 1516
1474 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); 1517 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
1475 1518
1519 average.cores.core_energy += c->core_energy;
1520
1476 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1521 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1477 if (mp->format == FORMAT_RAW) 1522 if (mp->format == FORMAT_RAW)
1478 continue; 1523 continue;
@@ -1818,7 +1863,7 @@ retry:
1818 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1863 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1819 goto done; 1864 goto done;
1820 1865
1821 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) { 1866 if (DO_BIC(BIC_CPU_c3)) {
1822 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1867 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1823 return -6; 1868 return -6;
1824 } 1869 }
@@ -1845,6 +1890,12 @@ retry:
1845 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); 1890 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1846 } 1891 }
1847 1892
1893 if (do_rapl & RAPL_AMD_F17H) {
1894 if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
1895 return -14;
1896 c->core_energy = msr & 0xFFFFFFFF;
1897 }
1898
1848 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1899 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1849 if (get_mp(cpu, mp, &c->counter[i])) 1900 if (get_mp(cpu, mp, &c->counter[i]))
1850 return -10; 1901 return -10;
@@ -1934,6 +1985,11 @@ retry:
1934 return -16; 1985 return -16;
1935 p->rapl_dram_perf_status = msr & 0xFFFFFFFF; 1986 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1936 } 1987 }
1988 if (do_rapl & RAPL_AMD_F17H) {
1989 if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
1990 return -13;
1991 p->energy_pkg = msr & 0xFFFFFFFF;
1992 }
1937 if (DO_BIC(BIC_PkgTmp)) { 1993 if (DO_BIC(BIC_PkgTmp)) {
1938 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) 1994 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1939 return -17; 1995 return -17;
@@ -2456,6 +2512,8 @@ void free_all_buffers(void)
2456 2512
2457/* 2513/*
2458 * Parse a file containing a single int. 2514 * Parse a file containing a single int.
2515 * Return 0 if file can not be opened
2516 * Exit if file can be opened, but can not be parsed
2459 */ 2517 */
2460int parse_int_file(const char *fmt, ...) 2518int parse_int_file(const char *fmt, ...)
2461{ 2519{
@@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...)
2467 va_start(args, fmt); 2525 va_start(args, fmt);
2468 vsnprintf(path, sizeof(path), fmt, args); 2526 vsnprintf(path, sizeof(path), fmt, args);
2469 va_end(args); 2527 va_end(args);
2470 filep = fopen_or_die(path, "r"); 2528 filep = fopen(path, "r");
2529 if (!filep)
2530 return 0;
2471 if (fscanf(filep, "%d", &value) != 1) 2531 if (fscanf(filep, "%d", &value) != 1)
2472 err(1, "%s: failed to parse number from file", path); 2532 err(1, "%s: failed to parse number from file", path);
2473 fclose(filep); 2533 fclose(filep);
@@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu)
2488 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); 2548 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
2489} 2549}
2490 2550
2551int get_die_id(int cpu)
2552{
2553 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
2554}
2555
2491int get_core_id(int cpu) 2556int get_core_id(int cpu)
2492{ 2557{
2493 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); 2558 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
2578 filep = fopen_or_die(path, "r"); 2643 filep = fopen_or_die(path, "r");
2579 do { 2644 do {
2580 offset -= BITMASK_SIZE; 2645 offset -= BITMASK_SIZE;
2581 fscanf(filep, "%lx%c", &map, &character); 2646 if (fscanf(filep, "%lx%c", &map, &character) != 2)
2647 err(1, "%s: failed to parse file", path);
2582 for (shift = 0; shift < BITMASK_SIZE; shift++) { 2648 for (shift = 0; shift < BITMASK_SIZE; shift++) {
2583 if ((map >> shift) & 0x1) { 2649 if ((map >> shift) & 0x1) {
2584 so = shift + offset; 2650 so = shift + offset;
@@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void)
2855 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); 2921 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
2856 2922
2857 retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); 2923 retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
2858 if (retval != 1) 2924 if (retval != 1) {
2859 err(1, "CPU LPI"); 2925 fprintf(stderr, "Disabling Low Power Idle CPU output\n");
2926 BIC_NOT_PRESENT(BIC_CPU_LPI);
2927 return -1;
2928 }
2860 2929
2861 fclose(fp); 2930 fclose(fp);
2862 2931
@@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void)
2878 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); 2947 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
2879 2948
2880 retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); 2949 retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
2881 if (retval != 1) 2950 if (retval != 1) {
2882 err(1, "SYS LPI"); 2951 fprintf(stderr, "Disabling Low Power Idle System output\n");
2883 2952 BIC_NOT_PRESENT(BIC_SYS_LPI);
2953 return -1;
2954 }
2884 fclose(fp); 2955 fclose(fp);
2885 2956
2886 return 0; 2957 return 0;
@@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void)
3410 input = fopen(path, "r"); 3481 input = fopen(path, "r");
3411 if (input == NULL) 3482 if (input == NULL)
3412 continue; 3483 continue;
3413 fgets(name_buf, sizeof(name_buf), input); 3484 if (!fgets(name_buf, sizeof(name_buf), input))
3485 err(1, "%s: failed to read file", path);
3414 3486
3415 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 3487 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
3416 sp = strchr(name_buf, '-'); 3488 sp = strchr(name_buf, '-');
3417 if (!sp) 3489 if (!sp)
3418 sp = strchrnul(name_buf, '\n'); 3490 sp = strchrnul(name_buf, '\n');
3419 *sp = '\0'; 3491 *sp = '\0';
3420
3421 fclose(input); 3492 fclose(input);
3422 3493
3423 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", 3494 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
@@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void)
3425 input = fopen(path, "r"); 3496 input = fopen(path, "r");
3426 if (input == NULL) 3497 if (input == NULL)
3427 continue; 3498 continue;
3428 fgets(desc, sizeof(desc), input); 3499 if (!fgets(desc, sizeof(desc), input))
3500 err(1, "%s: failed to read file", path);
3429 3501
3430 fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); 3502 fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
3431 fclose(input); 3503 fclose(input);
@@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void)
3444 base_cpu); 3516 base_cpu);
3445 input = fopen(path, "r"); 3517 input = fopen(path, "r");
3446 if (input == NULL) { 3518 if (input == NULL) {
3447 fprintf(stderr, "NSFOD %s\n", path); 3519 fprintf(outf, "NSFOD %s\n", path);
3448 return; 3520 return;
3449 } 3521 }
3450 fgets(driver_buf, sizeof(driver_buf), input); 3522 if (!fgets(driver_buf, sizeof(driver_buf), input))
3523 err(1, "%s: failed to read file", path);
3451 fclose(input); 3524 fclose(input);
3452 3525
3453 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", 3526 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
3454 base_cpu); 3527 base_cpu);
3455 input = fopen(path, "r"); 3528 input = fopen(path, "r");
3456 if (input == NULL) { 3529 if (input == NULL) {
3457 fprintf(stderr, "NSFOD %s\n", path); 3530 fprintf(outf, "NSFOD %s\n", path);
3458 return; 3531 return;
3459 } 3532 }
3460 fgets(governor_buf, sizeof(governor_buf), input); 3533 if (!fgets(governor_buf, sizeof(governor_buf), input))
3534 err(1, "%s: failed to read file", path);
3461 fclose(input); 3535 fclose(input);
3462 3536
3463 fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); 3537 fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
@@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void)
3466 sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); 3540 sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
3467 input = fopen(path, "r"); 3541 input = fopen(path, "r");
3468 if (input != NULL) { 3542 if (input != NULL) {
3469 fscanf(input, "%d", &turbo); 3543 if (fscanf(input, "%d", &turbo) != 1)
3544 err(1, "%s: failed to parse number from file", path);
3470 fprintf(outf, "cpufreq boost: %d\n", turbo); 3545 fprintf(outf, "cpufreq boost: %d\n", turbo);
3471 fclose(input); 3546 fclose(input);
3472 } 3547 }
@@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void)
3474 sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); 3549 sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
3475 input = fopen(path, "r"); 3550 input = fopen(path, "r");
3476 if (input != NULL) { 3551 if (input != NULL) {
3477 fscanf(input, "%d", &turbo); 3552 if (fscanf(input, "%d", &turbo) != 1)
3553 err(1, "%s: failed to parse number from file", path);
3478 fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); 3554 fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
3479 fclose(input); 3555 fclose(input);
3480 } 3556 }
@@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
3718#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ 3794#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
3719#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ 3795#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
3720 3796
3721double get_tdp(unsigned int model) 3797double get_tdp_intel(unsigned int model)
3722{ 3798{
3723 unsigned long long msr; 3799 unsigned long long msr;
3724 3800
@@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model)
3735 } 3811 }
3736} 3812}
3737 3813
3814double get_tdp_amd(unsigned int family)
3815{
3816 switch (family) {
3817 case 0x17:
3818 default:
3819 /* This is the max stock TDP of HEDT/Server Fam17h chips */
3820 return 250.0;
3821 }
3822}
3823
3738/* 3824/*
3739 * rapl_dram_energy_units_probe() 3825 * rapl_dram_energy_units_probe()
3740 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. 3826 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
@@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units)
3754 } 3840 }
3755} 3841}
3756 3842
3757 3843void rapl_probe_intel(unsigned int family, unsigned int model)
3758/*
3759 * rapl_probe()
3760 *
3761 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
3762 */
3763void rapl_probe(unsigned int family, unsigned int model)
3764{ 3844{
3765 unsigned long long msr; 3845 unsigned long long msr;
3766 unsigned int time_unit; 3846 unsigned int time_unit;
3767 double tdp; 3847 double tdp;
3768 3848
3769 if (!genuine_intel)
3770 return;
3771
3772 if (family != 6) 3849 if (family != 6)
3773 return; 3850 return;
3774 3851
@@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model)
3892 3969
3893 rapl_time_units = 1.0 / (1 << (time_unit)); 3970 rapl_time_units = 1.0 / (1 << (time_unit));
3894 3971
3895 tdp = get_tdp(model); 3972 tdp = get_tdp_intel(model);
3896 3973
3897 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 3974 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
3898 if (!quiet) 3975 if (!quiet)
3899 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); 3976 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
3977}
3900 3978
3901 return; 3979void rapl_probe_amd(unsigned int family, unsigned int model)
3980{
3981 unsigned long long msr;
3982 unsigned int eax, ebx, ecx, edx;
3983 unsigned int has_rapl = 0;
3984 double tdp;
3985
3986 if (max_extended_level >= 0x80000007) {
3987 __cpuid(0x80000007, eax, ebx, ecx, edx);
3988 /* RAPL (Fam 17h) */
3989 has_rapl = edx & (1 << 14);
3990 }
3991
3992 if (!has_rapl)
3993 return;
3994
3995 switch (family) {
3996 case 0x17: /* Zen, Zen+ */
3997 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
3998 if (rapl_joules) {
3999 BIC_PRESENT(BIC_Pkg_J);
4000 BIC_PRESENT(BIC_Cor_J);
4001 } else {
4002 BIC_PRESENT(BIC_PkgWatt);
4003 BIC_PRESENT(BIC_CorWatt);
4004 }
4005 break;
4006 default:
4007 return;
4008 }
4009
4010 if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
4011 return;
4012
4013 rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
4014 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
4015 rapl_power_units = ldexp(1.0, -(msr & 0xf));
4016
4017 tdp = get_tdp_amd(model);
4018
4019 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
4020 if (!quiet)
4021 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
4022}
4023
4024/*
4025 * rapl_probe()
4026 *
4027 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
4028 */
4029void rapl_probe(unsigned int family, unsigned int model)
4030{
4031 if (genuine_intel)
4032 rapl_probe_intel(family, model);
4033 if (authentic_amd)
4034 rapl_probe_amd(family, model);
3902} 4035}
3903 4036
3904void perf_limit_reasons_probe(unsigned int family, unsigned int model) 4037void perf_limit_reasons_probe(unsigned int family, unsigned int model)
@@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
4003int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) 4136int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
4004{ 4137{
4005 unsigned long long msr; 4138 unsigned long long msr;
4139 const char *msr_name;
4006 int cpu; 4140 int cpu;
4007 4141
4008 if (!do_rapl) 4142 if (!do_rapl)
@@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
4018 return -1; 4152 return -1;
4019 } 4153 }
4020 4154
4021 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) 4155 if (do_rapl & RAPL_AMD_F17H) {
4022 return -1; 4156 msr_name = "MSR_RAPL_PWR_UNIT";
4157 if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
4158 return -1;
4159 } else {
4160 msr_name = "MSR_RAPL_POWER_UNIT";
4161 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
4162 return -1;
4163 }
4023 4164
4024 fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr, 4165 fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
4025 rapl_power_units, rapl_energy_units, rapl_time_units); 4166 rapl_power_units, rapl_energy_units, rapl_time_units);
4026 4167
4027 if (do_rapl & RAPL_PKG_POWER_INFO) { 4168 if (do_rapl & RAPL_PKG_POWER_INFO) {
@@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model)
4451 case INTEL_FAM6_KABYLAKE_MOBILE: 4592 case INTEL_FAM6_KABYLAKE_MOBILE:
4452 case INTEL_FAM6_KABYLAKE_DESKTOP: 4593 case INTEL_FAM6_KABYLAKE_DESKTOP:
4453 return INTEL_FAM6_SKYLAKE_MOBILE; 4594 return INTEL_FAM6_SKYLAKE_MOBILE;
4595
4596 case INTEL_FAM6_ICELAKE_MOBILE:
4597 return INTEL_FAM6_CANNONLAKE_MOBILE;
4454 } 4598 }
4455 return model; 4599 return model;
4456} 4600}
@@ -4702,7 +4846,9 @@ void process_cpuid()
4702 } 4846 }
4703 do_slm_cstates = is_slm(family, model); 4847 do_slm_cstates = is_slm(family, model);
4704 do_knl_cstates = is_knl(family, model); 4848 do_knl_cstates = is_knl(family, model);
4705 do_cnl_cstates = is_cnl(family, model); 4849
4850 if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
4851 BIC_NOT_PRESENT(BIC_CPU_c3);
4706 4852
4707 if (!quiet) 4853 if (!quiet)
4708 decode_misc_pwr_mgmt_msr(); 4854 decode_misc_pwr_mgmt_msr();
@@ -4769,6 +4915,7 @@ void topology_probe()
4769 int i; 4915 int i;
4770 int max_core_id = 0; 4916 int max_core_id = 0;
4771 int max_package_id = 0; 4917 int max_package_id = 0;
4918 int max_die_id = 0;
4772 int max_siblings = 0; 4919 int max_siblings = 0;
4773 4920
4774 /* Initialize num_cpus, max_cpu_num */ 4921 /* Initialize num_cpus, max_cpu_num */
@@ -4835,6 +4982,11 @@ void topology_probe()
4835 if (cpus[i].physical_package_id > max_package_id) 4982 if (cpus[i].physical_package_id > max_package_id)
4836 max_package_id = cpus[i].physical_package_id; 4983 max_package_id = cpus[i].physical_package_id;
4837 4984
4985 /* get die information */
4986 cpus[i].die_id = get_die_id(i);
4987 if (cpus[i].die_id > max_die_id)
4988 max_die_id = cpus[i].die_id;
4989
4838 /* get numa node information */ 4990 /* get numa node information */
4839 cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); 4991 cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
4840 if (cpus[i].physical_node_id > topo.max_node_num) 4992 if (cpus[i].physical_node_id > topo.max_node_num)
@@ -4860,6 +5012,13 @@ void topology_probe()
4860 if (!summary_only && topo.cores_per_node > 1) 5012 if (!summary_only && topo.cores_per_node > 1)
4861 BIC_PRESENT(BIC_Core); 5013 BIC_PRESENT(BIC_Core);
4862 5014
5015 topo.num_die = max_die_id + 1;
5016 if (debug > 1)
5017 fprintf(outf, "max_die_id %d, sizing for %d die\n",
5018 max_die_id, topo.num_die);
5019 if (!summary_only && topo.num_die > 1)
5020 BIC_PRESENT(BIC_Die);
5021
4863 topo.num_packages = max_package_id + 1; 5022 topo.num_packages = max_package_id + 1;
4864 if (debug > 1) 5023 if (debug > 1)
4865 fprintf(outf, "max_package_id %d, sizing for %d packages\n", 5024 fprintf(outf, "max_package_id %d, sizing for %d packages\n",
@@ -4884,8 +5043,8 @@ void topology_probe()
4884 if (cpu_is_not_present(i)) 5043 if (cpu_is_not_present(i))
4885 continue; 5044 continue;
4886 fprintf(outf, 5045 fprintf(outf,
4887 "cpu %d pkg %d node %d lnode %d core %d thread %d\n", 5046 "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
4888 i, cpus[i].physical_package_id, 5047 i, cpus[i].physical_package_id, cpus[i].die_id,
4889 cpus[i].physical_node_id, 5048 cpus[i].physical_node_id,
4890 cpus[i].logical_node_id, 5049 cpus[i].logical_node_id,
4891 cpus[i].physical_core_id, 5050 cpus[i].physical_core_id,
@@ -5122,7 +5281,7 @@ int get_and_dump_counters(void)
5122} 5281}
5123 5282
5124void print_version() { 5283void print_version() {
5125 fprintf(outf, "turbostat version 18.07.27" 5284 fprintf(outf, "turbostat version 19.03.20"
5126 " - Len Brown <lenb@kernel.org>\n"); 5285 " - Len Brown <lenb@kernel.org>\n");
5127} 5286}
5128 5287
@@ -5319,7 +5478,8 @@ void probe_sysfs(void)
5319 input = fopen(path, "r"); 5478 input = fopen(path, "r");
5320 if (input == NULL) 5479 if (input == NULL)
5321 continue; 5480 continue;
5322 fgets(name_buf, sizeof(name_buf), input); 5481 if (!fgets(name_buf, sizeof(name_buf), input))
5482 err(1, "%s: failed to read file", path);
5323 5483
5324 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 5484 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
5325 sp = strchr(name_buf, '-'); 5485 sp = strchr(name_buf, '-');
@@ -5346,7 +5506,8 @@ void probe_sysfs(void)
5346 input = fopen(path, "r"); 5506 input = fopen(path, "r");
5347 if (input == NULL) 5507 if (input == NULL)
5348 continue; 5508 continue;
5349 fgets(name_buf, sizeof(name_buf), input); 5509 if (!fgets(name_buf, sizeof(name_buf), input))
5510 err(1, "%s: failed to read file", path);
5350 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 5511 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
5351 sp = strchr(name_buf, '-'); 5512 sp = strchr(name_buf, '-');
5352 if (!sp) 5513 if (!sp)
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index bcbd928c96ab..fc818bc1d729 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = {
39 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 39 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
40}; 40};
41 41
42#define VLAN_HLEN 4
43
44static struct {
45 struct ethhdr eth;
46 __u16 vlan_tci;
47 __u16 vlan_proto;
48 struct iphdr iph;
49 struct tcphdr tcp;
50} __packed pkt_vlan_v4 = {
51 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
52 .vlan_proto = __bpf_constant_htons(ETH_P_IP),
53 .iph.ihl = 5,
54 .iph.protocol = IPPROTO_TCP,
55 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
56 .tcp.urg_ptr = 123,
57 .tcp.doff = 5,
58};
59
60static struct bpf_flow_keys pkt_vlan_v4_flow_keys = {
61 .nhoff = VLAN_HLEN,
62 .thoff = VLAN_HLEN + sizeof(struct iphdr),
63 .addr_proto = ETH_P_IP,
64 .ip_proto = IPPROTO_TCP,
65 .n_proto = __bpf_constant_htons(ETH_P_IP),
66};
67
68static struct {
69 struct ethhdr eth;
70 __u16 vlan_tci;
71 __u16 vlan_proto;
72 __u16 vlan_tci2;
73 __u16 vlan_proto2;
74 struct ipv6hdr iph;
75 struct tcphdr tcp;
76} __packed pkt_vlan_v6 = {
77 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
78 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
79 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
80 .iph.nexthdr = IPPROTO_TCP,
81 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
82 .tcp.urg_ptr = 123,
83 .tcp.doff = 5,
84};
85
86static struct bpf_flow_keys pkt_vlan_v6_flow_keys = {
87 .nhoff = VLAN_HLEN * 2,
88 .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
89 .addr_proto = ETH_P_IPV6,
90 .ip_proto = IPPROTO_TCP,
91 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
92};
93
42void test_flow_dissector(void) 94void test_flow_dissector(void)
43{ 95{
44 struct bpf_flow_keys flow_keys; 96 struct bpf_flow_keys flow_keys;
@@ -68,5 +120,21 @@ void test_flow_dissector(void)
68 err, errno, retval, duration, size, sizeof(flow_keys)); 120 err, errno, retval, duration, size, sizeof(flow_keys));
69 CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); 121 CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
70 122
123 err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4),
124 &flow_keys, &size, &retval, &duration);
125 CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4",
126 "err %d errno %d retval %d duration %d size %u/%lu\n",
127 err, errno, retval, duration, size, sizeof(flow_keys));
128 CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys,
129 pkt_vlan_v4_flow_keys);
130
131 err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6),
132 &flow_keys, &size, &retval, &duration);
133 CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6",
134 "err %d errno %d retval %d duration %d size %u/%lu\n",
135 err, errno, retval, duration, size, sizeof(flow_keys));
136 CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys,
137 pkt_vlan_v6_flow_keys);
138
71 bpf_object__close(obj); 139 bpf_object__close(obj);
72} 140}
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 284660f5aa95..75b17cada539 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
92{ 92{
93 struct bpf_flow_keys *keys = skb->flow_keys; 93 struct bpf_flow_keys *keys = skb->flow_keys;
94 94
95 keys->n_proto = proto;
96 switch (proto) { 95 switch (proto) {
97 case bpf_htons(ETH_P_IP): 96 case bpf_htons(ETH_P_IP):
98 bpf_tail_call(skb, &jmp_table, IP); 97 bpf_tail_call(skb, &jmp_table, IP);
@@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
119SEC("flow_dissector") 118SEC("flow_dissector")
120int _dissect(struct __sk_buff *skb) 119int _dissect(struct __sk_buff *skb)
121{ 120{
122 if (!skb->vlan_present) 121 struct bpf_flow_keys *keys = skb->flow_keys;
123 return parse_eth_proto(skb, skb->protocol); 122
124 else 123 return parse_eth_proto(skb, keys->n_proto);
125 return parse_eth_proto(skb, skb->vlan_proto);
126} 124}
127 125
128/* Parses on IPPROTO_* */ 126/* Parses on IPPROTO_* */
@@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb)
336{ 334{
337 struct bpf_flow_keys *keys = skb->flow_keys; 335 struct bpf_flow_keys *keys = skb->flow_keys;
338 struct vlan_hdr *vlan, _vlan; 336 struct vlan_hdr *vlan, _vlan;
339 __be16 proto;
340
341 /* Peek back to see if single or double-tagging */
342 if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
343 sizeof(proto)))
344 return BPF_DROP;
345 337
346 /* Account for double-tagging */ 338 /* Account for double-tagging */
347 if (proto == bpf_htons(ETH_P_8021AD)) { 339 if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
348 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); 340 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
349 if (!vlan) 341 if (!vlan)
350 return BPF_DROP; 342 return BPF_DROP;
@@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb)
352 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) 344 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
353 return BPF_DROP; 345 return BPF_DROP;
354 346
347 keys->nhoff += sizeof(*vlan);
355 keys->thoff += sizeof(*vlan); 348 keys->thoff += sizeof(*vlan);
356 } 349 }
357 350
@@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
359 if (!vlan) 352 if (!vlan)
360 return BPF_DROP; 353 return BPF_DROP;
361 354
355 keys->nhoff += sizeof(*vlan);
362 keys->thoff += sizeof(*vlan); 356 keys->thoff += sizeof(*vlan);
363 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ 357 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
364 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || 358 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
365 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) 359 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
366 return BPF_DROP; 360 return BPF_DROP;
367 361
362 keys->n_proto = vlan->h_vlan_encapsulated_proto;
368 return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); 363 return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
369} 364}
370 365
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 23e3b314ca60..ec5794e4205b 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -5777,6 +5777,53 @@ const struct btf_dedup_test dedup_tests[] = {
5777 }, 5777 },
5778}, 5778},
5779{ 5779{
5780 .descr = "dedup: void equiv check",
5781 /*
5782 * // CU 1:
5783 * struct s {
5784 * struct {} *x;
5785 * };
5786 * // CU 2:
5787 * struct s {
5788 * int *x;
5789 * };
5790 */
5791 .input = {
5792 .raw_types = {
5793 /* CU 1 */
5794 BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
5795 BTF_PTR_ENC(1), /* [2] ptr -> [1] */
5796 BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
5797 BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
5798 /* CU 2 */
5799 BTF_PTR_ENC(0), /* [4] ptr -> void */
5800 BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
5801 BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
5802 BTF_END_RAW,
5803 },
5804 BTF_STR_SEC("\0s\0x"),
5805 },
5806 .expect = {
5807 .raw_types = {
5808 /* CU 1 */
5809 BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
5810 BTF_PTR_ENC(1), /* [2] ptr -> [1] */
5811 BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
5812 BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
5813 /* CU 2 */
5814 BTF_PTR_ENC(0), /* [4] ptr -> void */
5815 BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
5816 BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
5817 BTF_END_RAW,
5818 },
5819 BTF_STR_SEC("\0s\0x"),
5820 },
5821 .opts = {
5822 .dont_resolve_fwds = false,
5823 .dedup_table_size = 1, /* force hash collisions */
5824 },
5825},
5826{
5780 .descr = "dedup: all possible kinds (no duplicates)", 5827 .descr = "dedup: all possible kinds (no duplicates)",
5781 .input = { 5828 .input = {
5782 .raw_types = { 5829 .raw_types = {
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index f2ccae39ee66..fb11240b758b 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -908,6 +908,44 @@
908 .result = REJECT, 908 .result = REJECT,
909}, 909},
910{ 910{
911 "calls: stack depth check in dead code",
912 .insns = {
913 /* main */
914 BPF_MOV64_IMM(BPF_REG_1, 0),
915 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
916 BPF_EXIT_INSN(),
917 /* A */
918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
919 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
920 BPF_MOV64_IMM(BPF_REG_0, 0),
921 BPF_EXIT_INSN(),
922 /* B */
923 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
924 BPF_EXIT_INSN(),
925 /* C */
926 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
927 BPF_EXIT_INSN(),
928 /* D */
929 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
930 BPF_EXIT_INSN(),
931 /* E */
932 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
933 BPF_EXIT_INSN(),
934 /* F */
935 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
936 BPF_EXIT_INSN(),
937 /* G */
938 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
939 BPF_EXIT_INSN(),
940 /* H */
941 BPF_MOV64_IMM(BPF_REG_0, 0),
942 BPF_EXIT_INSN(),
943 },
944 .prog_type = BPF_PROG_TYPE_XDP,
945 .errstr = "call stack",
946 .result = REJECT,
947},
948{
911 "calls: spill into caller stack frame", 949 "calls: spill into caller stack frame",
912 .insns = { 950 .insns = {
913 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 951 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 3c1f4bdf9000..7514fcea91a7 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -29,8 +29,8 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
29INSTALL_HDR_PATH = $(top_srcdir)/usr 29INSTALL_HDR_PATH = $(top_srcdir)/usr
30LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ 30LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
31LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include 31LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
32CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. 32CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
33LDFLAGS += -pthread 33LDFLAGS += -pthread -no-pie
34 34
35# After inclusion, $(OUTPUT) is defined and 35# After inclusion, $(OUTPUT) is defined and
36# $(TEST_GEN_PROGS) starts with $(OUTPUT)/ 36# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index a84785b02557..07b71ad9734a 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
102struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); 102struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
103void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); 103void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
104int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); 104int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
105void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
105void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, 106void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
106 struct kvm_mp_state *mp_state); 107 struct kvm_mp_state *mp_state);
107void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); 108void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index b52cfdefecbf..efa0aad8b3c6 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1121,6 +1121,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1121 return rc; 1121 return rc;
1122} 1122}
1123 1123
1124void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1125{
1126 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1127 int ret;
1128
1129 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1130
1131 vcpu->state->immediate_exit = 1;
1132 ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1133 vcpu->state->immediate_exit = 0;
1134
1135 TEST_ASSERT(ret == -1 && errno == EINTR,
1136 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1137 ret, errno);
1138}
1139
1124/* 1140/*
1125 * VM VCPU Set MP State 1141 * VM VCPU Set MP State
1126 * 1142 *
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index d503a51fad30..7c2c4d4055a8 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -87,22 +87,25 @@ int main(int argc, char *argv[])
87 while (1) { 87 while (1) {
88 rc = _vcpu_run(vm, VCPU_ID); 88 rc = _vcpu_run(vm, VCPU_ID);
89 89
90 if (run->exit_reason == KVM_EXIT_IO) { 90 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
91 switch (get_ucall(vm, VCPU_ID, &uc)) { 91 "Unexpected exit reason: %u (%s),\n",
92 case UCALL_SYNC: 92 run->exit_reason,
93 /* emulate hypervisor clearing CR4.OSXSAVE */ 93 exit_reason_str(run->exit_reason));
94 vcpu_sregs_get(vm, VCPU_ID, &sregs); 94
95 sregs.cr4 &= ~X86_CR4_OSXSAVE; 95 switch (get_ucall(vm, VCPU_ID, &uc)) {
96 vcpu_sregs_set(vm, VCPU_ID, &sregs); 96 case UCALL_SYNC:
97 break; 97 /* emulate hypervisor clearing CR4.OSXSAVE */
98 case UCALL_ABORT: 98 vcpu_sregs_get(vm, VCPU_ID, &sregs);
99 TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); 99 sregs.cr4 &= ~X86_CR4_OSXSAVE;
100 break; 100 vcpu_sregs_set(vm, VCPU_ID, &sregs);
101 case UCALL_DONE: 101 break;
102 goto done; 102 case UCALL_ABORT:
103 default: 103 TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
104 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); 104 break;
105 } 105 case UCALL_DONE:
106 goto done;
107 default:
108 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
106 } 109 }
107 } 110 }
108 111
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 4b3f556265f1..30f75856cf39 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -134,6 +134,11 @@ int main(int argc, char *argv[])
134 134
135 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 135 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
136 136
137 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
138 fprintf(stderr, "immediate_exit not available, skipping test\n");
139 exit(KSFT_SKIP);
140 }
141
137 /* Create VM */ 142 /* Create VM */
138 vm = vm_create_default(VCPU_ID, 0, guest_code); 143 vm = vm_create_default(VCPU_ID, 0, guest_code);
139 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 144 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
@@ -156,8 +161,6 @@ int main(int argc, char *argv[])
156 stage, run->exit_reason, 161 stage, run->exit_reason,
157 exit_reason_str(run->exit_reason)); 162 exit_reason_str(run->exit_reason));
158 163
159 memset(&regs1, 0, sizeof(regs1));
160 vcpu_regs_get(vm, VCPU_ID, &regs1);
161 switch (get_ucall(vm, VCPU_ID, &uc)) { 164 switch (get_ucall(vm, VCPU_ID, &uc)) {
162 case UCALL_ABORT: 165 case UCALL_ABORT:
163 TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], 166 TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -176,6 +179,17 @@ int main(int argc, char *argv[])
176 uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", 179 uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
177 stage, (ulong)uc.args[1]); 180 stage, (ulong)uc.args[1]);
178 181
182 /*
183 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
184 * guest state is consistent only after userspace re-enters the
185 * kernel with KVM_RUN. Complete IO prior to migrating state
186 * to a new VM.
187 */
188 vcpu_run_complete_io(vm, VCPU_ID);
189
190 memset(&regs1, 0, sizeof(regs1));
191 vcpu_regs_get(vm, VCPU_ID, &regs1);
192
179 state = vcpu_save_state(vm, VCPU_ID); 193 state = vcpu_save_state(vm, VCPU_ID);
180 kvm_vm_release(vm); 194 kvm_vm_release(vm);
181 195
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
index 27f0acaed880..ddabb160a11b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
@@ -144,6 +144,30 @@
144 ] 144 ]
145 }, 145 },
146 { 146 {
147 "id": "7571",
148 "name": "Add sample action with invalid rate",
149 "category": [
150 "actions",
151 "sample"
152 ],
153 "setup": [
154 [
155 "$TC actions flush action sample",
156 0,
157 1,
158 255
159 ]
160 ],
161 "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
162 "expExitCode": "255",
163 "verifyCmd": "$TC actions get action sample index 2",
164 "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
165 "matchCount": "0",
166 "teardown": [
167 "$TC actions flush action sample"
168 ]
169 },
170 {
147 "id": "b6d4", 171 "id": "b6d4",
148 "name": "Add sample action with mandatory arguments and invalid control action", 172 "name": "Add sample action with mandatory arguments and invalid control action",
149 "category": [ 173 "category": [
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 264d92da3240..370bd6c5e6cb 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
222 } 222 }
223 } 223 }
224 224
225 if (used_lrs) { 225 if (used_lrs || cpu_if->its_vpe.its_vm) {
226 int i; 226 int i;
227 u32 elrsr; 227 u32 elrsr;
228 228
@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
247 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; 247 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
248 int i; 248 int i;
249 249
250 if (used_lrs) { 250 if (used_lrs || cpu_if->its_vpe.its_vm) {
251 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 251 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
252 252
253 for (i = 0; i < used_lrs; i++) 253 for (i = 0; i < used_lrs; i++)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index ffd7acdceac7..27c958306449 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -102,8 +102,7 @@ static bool kvm_is_device_pfn(unsigned long pfn)
102 * @addr: IPA 102 * @addr: IPA
103 * @pmd: pmd pointer for IPA 103 * @pmd: pmd pointer for IPA
104 * 104 *
105 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all 105 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
106 * pages in the range dirty.
107 */ 106 */
108static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) 107static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
109{ 108{
@@ -121,8 +120,7 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
121 * @addr: IPA 120 * @addr: IPA
122 * @pud: pud pointer for IPA 121 * @pud: pud pointer for IPA
123 * 122 *
124 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all 123 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
125 * pages in the range dirty.
126 */ 124 */
127static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) 125static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
128{ 126{
@@ -899,9 +897,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
899 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. 897 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
900 * @kvm: The KVM struct pointer for the VM. 898 * @kvm: The KVM struct pointer for the VM.
901 * 899 *
902 * Allocates only the stage-2 HW PGD level table(s) (can support either full 900 * Allocates only the stage-2 HW PGD level table(s) of size defined by
903 * 40-bit input addresses or limited to 32-bit input addresses). Clears the 901 * stage2_pgd_size(kvm).
904 * allocated pages.
905 * 902 *
906 * Note we don't need locking here as this is only called when the VM is 903 * Note we don't need locking here as this is only called when the VM is
907 * created, which can only be done once. 904 * created, which can only be done once.
@@ -1067,25 +1064,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1067{ 1064{
1068 pmd_t *pmd, old_pmd; 1065 pmd_t *pmd, old_pmd;
1069 1066
1067retry:
1070 pmd = stage2_get_pmd(kvm, cache, addr); 1068 pmd = stage2_get_pmd(kvm, cache, addr);
1071 VM_BUG_ON(!pmd); 1069 VM_BUG_ON(!pmd);
1072 1070
1073 old_pmd = *pmd; 1071 old_pmd = *pmd;
1072 /*
1073 * Multiple vcpus faulting on the same PMD entry, can
1074 * lead to them sequentially updating the PMD with the
1075 * same value. Following the break-before-make
1076 * (pmd_clear() followed by tlb_flush()) process can
1077 * hinder forward progress due to refaults generated
1078 * on missing translations.
1079 *
1080 * Skip updating the page table if the entry is
1081 * unchanged.
1082 */
1083 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1084 return 0;
1085
1074 if (pmd_present(old_pmd)) { 1086 if (pmd_present(old_pmd)) {
1075 /* 1087 /*
1076 * Multiple vcpus faulting on the same PMD entry, can 1088 * If we already have PTE level mapping for this block,
1077 * lead to them sequentially updating the PMD with the 1089 * we must unmap it to avoid inconsistent TLB state and
1078 * same value. Following the break-before-make 1090 * leaking the table page. We could end up in this situation
1079 * (pmd_clear() followed by tlb_flush()) process can 1091 * if the memory slot was marked for dirty logging and was
1080 * hinder forward progress due to refaults generated 1092 * reverted, leaving PTE level mappings for the pages accessed
1081 * on missing translations. 1093 * during the period. So, unmap the PTE level mapping for this
1094 * block and retry, as we could have released the upper level
1095 * table in the process.
1082 * 1096 *
1083 * Skip updating the page table if the entry is 1097 * Normal THP split/merge follows mmu_notifier callbacks and do
1084 * unchanged. 1098 * get handled accordingly.
1085 */ 1099 */
1086 if (pmd_val(old_pmd) == pmd_val(*new_pmd)) 1100 if (!pmd_thp_or_huge(old_pmd)) {
1087 return 0; 1101 unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
1088 1102 goto retry;
1103 }
1089 /* 1104 /*
1090 * Mapping in huge pages should only happen through a 1105 * Mapping in huge pages should only happen through a
1091 * fault. If a page is merged into a transparent huge 1106 * fault. If a page is merged into a transparent huge
@@ -1097,8 +1112,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1097 * should become splitting first, unmapped, merged, 1112 * should become splitting first, unmapped, merged,
1098 * and mapped back in on-demand. 1113 * and mapped back in on-demand.
1099 */ 1114 */
1100 VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); 1115 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
1101
1102 pmd_clear(pmd); 1116 pmd_clear(pmd);
1103 kvm_tlb_flush_vmid_ipa(kvm, addr); 1117 kvm_tlb_flush_vmid_ipa(kvm, addr);
1104 } else { 1118 } else {
@@ -1114,6 +1128,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
1114{ 1128{
1115 pud_t *pudp, old_pud; 1129 pud_t *pudp, old_pud;
1116 1130
1131retry:
1117 pudp = stage2_get_pud(kvm, cache, addr); 1132 pudp = stage2_get_pud(kvm, cache, addr);
1118 VM_BUG_ON(!pudp); 1133 VM_BUG_ON(!pudp);
1119 1134
@@ -1121,14 +1136,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
1121 1136
1122 /* 1137 /*
1123 * A large number of vcpus faulting on the same stage 2 entry, 1138 * A large number of vcpus faulting on the same stage 2 entry,
1124 * can lead to a refault due to the 1139 * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
1125 * stage2_pud_clear()/tlb_flush(). Skip updating the page 1140 * Skip updating the page tables if there is no change.
1126 * tables if there is no change.
1127 */ 1141 */
1128 if (pud_val(old_pud) == pud_val(*new_pudp)) 1142 if (pud_val(old_pud) == pud_val(*new_pudp))
1129 return 0; 1143 return 0;
1130 1144
1131 if (stage2_pud_present(kvm, old_pud)) { 1145 if (stage2_pud_present(kvm, old_pud)) {
1146 /*
1147 * If we already have table level mapping for this block, unmap
1148 * the range for this block and retry.
1149 */
1150 if (!stage2_pud_huge(kvm, old_pud)) {
1151 unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
1152 goto retry;
1153 }
1154
1155 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
1132 stage2_pud_clear(kvm, pudp); 1156 stage2_pud_clear(kvm, pudp);
1133 kvm_tlb_flush_vmid_ipa(kvm, addr); 1157 kvm_tlb_flush_vmid_ipa(kvm, addr);
1134 } else { 1158 } else {
@@ -1451,13 +1475,11 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1451} 1475}
1452 1476
1453/** 1477/**
1454 * stage2_wp_puds - write protect PGD range 1478 * stage2_wp_puds - write protect PGD range
1455 * @pgd: pointer to pgd entry 1479 * @pgd: pointer to pgd entry
1456 * @addr: range start address 1480 * @addr: range start address
1457 * @end: range end address 1481 * @end: range end address
1458 * 1482 */
1459 * Process PUD entries, for a huge PUD we cause a panic.
1460 */
1461static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd, 1483static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1462 phys_addr_t addr, phys_addr_t end) 1484 phys_addr_t addr, phys_addr_t end)
1463{ 1485{
@@ -1594,8 +1616,9 @@ static void kvm_send_hwpoison_signal(unsigned long address,
1594 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); 1616 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1595} 1617}
1596 1618
1597static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, 1619static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1598 unsigned long hva) 1620 unsigned long hva,
1621 unsigned long map_size)
1599{ 1622{
1600 gpa_t gpa_start; 1623 gpa_t gpa_start;
1601 hva_t uaddr_start, uaddr_end; 1624 hva_t uaddr_start, uaddr_end;
@@ -1610,34 +1633,34 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
1610 1633
1611 /* 1634 /*
1612 * Pages belonging to memslots that don't have the same alignment 1635 * Pages belonging to memslots that don't have the same alignment
1613 * within a PMD for userspace and IPA cannot be mapped with stage-2 1636 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1614 * PMD entries, because we'll end up mapping the wrong pages. 1637 * PMD/PUD entries, because we'll end up mapping the wrong pages.
1615 * 1638 *
1616 * Consider a layout like the following: 1639 * Consider a layout like the following:
1617 * 1640 *
1618 * memslot->userspace_addr: 1641 * memslot->userspace_addr:
1619 * +-----+--------------------+--------------------+---+ 1642 * +-----+--------------------+--------------------+---+
1620 * |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz| 1643 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
1621 * +-----+--------------------+--------------------+---+ 1644 * +-----+--------------------+--------------------+---+
1622 * 1645 *
1623 * memslot->base_gfn << PAGE_SIZE: 1646 * memslot->base_gfn << PAGE_SIZE:
1624 * +---+--------------------+--------------------+-----+ 1647 * +---+--------------------+--------------------+-----+
1625 * |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz| 1648 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
1626 * +---+--------------------+--------------------+-----+ 1649 * +---+--------------------+--------------------+-----+
1627 * 1650 *
1628 * If we create those stage-2 PMDs, we'll end up with this incorrect 1651 * If we create those stage-2 blocks, we'll end up with this incorrect
1629 * mapping: 1652 * mapping:
1630 * d -> f 1653 * d -> f
1631 * e -> g 1654 * e -> g
1632 * f -> h 1655 * f -> h
1633 */ 1656 */
1634 if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK)) 1657 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1635 return false; 1658 return false;
1636 1659
1637 /* 1660 /*
1638 * Next, let's make sure we're not trying to map anything not covered 1661 * Next, let's make sure we're not trying to map anything not covered
1639 * by the memslot. This means we have to prohibit PMD size mappings 1662 * by the memslot. This means we have to prohibit block size mappings
1640 * for the beginning and end of a non-PMD aligned and non-PMD sized 1663 * for the beginning and end of a non-block aligned and non-block sized
1641 * memory slot (illustrated by the head and tail parts of the 1664 * memory slot (illustrated by the head and tail parts of the
1642 * userspace view above containing pages 'abcde' and 'xyz', 1665 * userspace view above containing pages 'abcde' and 'xyz',
1643 * respectively). 1666 * respectively).
@@ -1646,8 +1669,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
1646 * userspace_addr or the base_gfn, as both are equally aligned (per 1669 * userspace_addr or the base_gfn, as both are equally aligned (per
1647 * the check above) and equally sized. 1670 * the check above) and equally sized.
1648 */ 1671 */
1649 return (hva & S2_PMD_MASK) >= uaddr_start && 1672 return (hva & ~(map_size - 1)) >= uaddr_start &&
1650 (hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end; 1673 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1651} 1674}
1652 1675
1653static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 1676static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1676,12 +1699,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1676 return -EFAULT; 1699 return -EFAULT;
1677 } 1700 }
1678 1701
1679 if (!fault_supports_stage2_pmd_mappings(memslot, hva))
1680 force_pte = true;
1681
1682 if (logging_active)
1683 force_pte = true;
1684
1685 /* Let's check if we will get back a huge page backed by hugetlbfs */ 1702 /* Let's check if we will get back a huge page backed by hugetlbfs */
1686 down_read(&current->mm->mmap_sem); 1703 down_read(&current->mm->mmap_sem);
1687 vma = find_vma_intersection(current->mm, hva, hva + 1); 1704 vma = find_vma_intersection(current->mm, hva, hva + 1);
@@ -1692,6 +1709,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1692 } 1709 }
1693 1710
1694 vma_pagesize = vma_kernel_pagesize(vma); 1711 vma_pagesize = vma_kernel_pagesize(vma);
1712 if (logging_active ||
1713 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1714 force_pte = true;
1715 vma_pagesize = PAGE_SIZE;
1716 }
1717
1695 /* 1718 /*
1696 * The stage2 has a minimum of 2 level table (For arm64 see 1719 * The stage2 has a minimum of 2 level table (For arm64 see
1697 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can 1720 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
@@ -1699,11 +1722,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1699 * As for PUD huge maps, we must make sure that we have at least 1722 * As for PUD huge maps, we must make sure that we have at least
1700 * 3 levels, i.e, PMD is not folded. 1723 * 3 levels, i.e, PMD is not folded.
1701 */ 1724 */
1702 if ((vma_pagesize == PMD_SIZE || 1725 if (vma_pagesize == PMD_SIZE ||
1703 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && 1726 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
1704 !force_pte) {
1705 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1727 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1706 }
1707 up_read(&current->mm->mmap_sem); 1728 up_read(&current->mm->mmap_sem);
1708 1729
1709 /* We need minimum second+third level pages */ 1730 /* We need minimum second+third level pages */
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index ab3f47745d9c..44ceaccb18cf 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
754 u64 indirect_ptr, type = GITS_BASER_TYPE(baser); 754 u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
755 phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); 755 phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
756 int esz = GITS_BASER_ENTRY_SIZE(baser); 756 int esz = GITS_BASER_ENTRY_SIZE(baser);
757 int index; 757 int index, idx;
758 gfn_t gfn; 758 gfn_t gfn;
759 bool ret;
759 760
760 switch (type) { 761 switch (type) {
761 case GITS_BASER_TYPE_DEVICE: 762 case GITS_BASER_TYPE_DEVICE:
@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
782 783
783 if (eaddr) 784 if (eaddr)
784 *eaddr = addr; 785 *eaddr = addr;
785 return kvm_is_visible_gfn(its->dev->kvm, gfn); 786
787 goto out;
786 } 788 }
787 789
788 /* calculate and check the index into the 1st level */ 790 /* calculate and check the index into the 1st level */
@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
812 814
813 if (eaddr) 815 if (eaddr)
814 *eaddr = indirect_ptr; 816 *eaddr = indirect_ptr;
815 return kvm_is_visible_gfn(its->dev->kvm, gfn); 817
818out:
819 idx = srcu_read_lock(&its->dev->kvm->srcu);
820 ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
821 srcu_read_unlock(&its->dev->kvm->srcu, idx);
822 return ret;
816} 823}
817 824
818static int vgic_its_alloc_collection(struct vgic_its *its, 825static int vgic_its_alloc_collection(struct vgic_its *its,
@@ -1729,8 +1736,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
1729 kfree(its); 1736 kfree(its);
1730} 1737}
1731 1738
1732int vgic_its_has_attr_regs(struct kvm_device *dev, 1739static int vgic_its_has_attr_regs(struct kvm_device *dev,
1733 struct kvm_device_attr *attr) 1740 struct kvm_device_attr *attr)
1734{ 1741{
1735 const struct vgic_register_region *region; 1742 const struct vgic_register_region *region;
1736 gpa_t offset = attr->attr; 1743 gpa_t offset = attr->attr;
@@ -1750,9 +1757,9 @@ int vgic_its_has_attr_regs(struct kvm_device *dev,
1750 return 0; 1757 return 0;
1751} 1758}
1752 1759
1753int vgic_its_attr_regs_access(struct kvm_device *dev, 1760static int vgic_its_attr_regs_access(struct kvm_device *dev,
1754 struct kvm_device_attr *attr, 1761 struct kvm_device_attr *attr,
1755 u64 *reg, bool is_write) 1762 u64 *reg, bool is_write)
1756{ 1763{
1757 const struct vgic_register_region *region; 1764 const struct vgic_register_region *region;
1758 struct vgic_its *its; 1765 struct vgic_its *its;
@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
1919 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | 1926 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
1920 ite->collection->collection_id; 1927 ite->collection->collection_id;
1921 val = cpu_to_le64(val); 1928 val = cpu_to_le64(val);
1922 return kvm_write_guest(kvm, gpa, &val, ite_esz); 1929 return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
1923} 1930}
1924 1931
1925/** 1932/**
@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2066 (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | 2073 (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2067 (dev->num_eventid_bits - 1)); 2074 (dev->num_eventid_bits - 1));
2068 val = cpu_to_le64(val); 2075 val = cpu_to_le64(val);
2069 return kvm_write_guest(kvm, ptr, &val, dte_esz); 2076 return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
2070} 2077}
2071 2078
2072/** 2079/**
@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
2246 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | 2253 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2247 collection->collection_id); 2254 collection->collection_id);
2248 val = cpu_to_le64(val); 2255 val = cpu_to_le64(val);
2249 return kvm_write_guest(its->dev->kvm, gpa, &val, esz); 2256 return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2250} 2257}
2251 2258
2252static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) 2259static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
2317 */ 2324 */
2318 val = 0; 2325 val = 0;
2319 BUG_ON(cte_esz > sizeof(val)); 2326 BUG_ON(cte_esz > sizeof(val));
2320 ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz); 2327 ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2321 return ret; 2328 return ret;
2322} 2329}
2323 2330
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 408a78eb6a97..9f87e58dbd4a 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -358,7 +358,7 @@ retry:
358 if (status) { 358 if (status) {
359 /* clear consumed data */ 359 /* clear consumed data */
360 val &= ~(1 << bit_nr); 360 val &= ~(1 << bit_nr);
361 ret = kvm_write_guest(kvm, ptr, &val, 1); 361 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
362 if (ret) 362 if (ret)
363 return ret; 363 return ret;
364 } 364 }
@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
409 else 409 else
410 val &= ~(1 << bit_nr); 410 val &= ~(1 << bit_nr);
411 411
412 ret = kvm_write_guest(kvm, ptr, &val, 1); 412 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
413 if (ret) 413 if (ret)
414 return ret; 414 return ret;
415 } 415 }
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index abd9c7352677..3af69f2a3866 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
867 * either observe the new interrupt before or after doing this check, 867 * either observe the new interrupt before or after doing this check,
868 * and introducing additional synchronization mechanism doesn't change 868 * and introducing additional synchronization mechanism doesn't change
869 * this. 869 * this.
870 *
871 * Note that we still need to go through the whole thing if anything
872 * can be directly injected (GICv4).
870 */ 873 */
871 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) 874 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
875 !vgic_supports_direct_msis(vcpu->kvm))
872 return; 876 return;
873 877
874 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 878 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
875 879
876 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); 880 if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
877 vgic_flush_lr_state(vcpu); 881 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
878 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); 882 vgic_flush_lr_state(vcpu);
883 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
884 }
879 885
880 if (can_access_vgic_from_kernel()) 886 if (can_access_vgic_from_kernel())
881 vgic_restore_state(vcpu); 887 vgic_restore_state(vcpu);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 4325250afd72..001aeda4c154 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -214,9 +214,9 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
214 214
215 if (flags & EPOLLHUP) { 215 if (flags & EPOLLHUP) {
216 /* The eventfd is closing, detach from KVM */ 216 /* The eventfd is closing, detach from KVM */
217 unsigned long flags; 217 unsigned long iflags;
218 218
219 spin_lock_irqsave(&kvm->irqfds.lock, flags); 219 spin_lock_irqsave(&kvm->irqfds.lock, iflags);
220 220
221 /* 221 /*
222 * We must check if someone deactivated the irqfd before 222 * We must check if someone deactivated the irqfd before
@@ -230,7 +230,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
230 if (irqfd_is_active(irqfd)) 230 if (irqfd_is_active(irqfd))
231 irqfd_deactivate(irqfd); 231 irqfd_deactivate(irqfd);
232 232
233 spin_unlock_irqrestore(&kvm->irqfds.lock, flags); 233 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
234 } 234 }
235 235
236 return 0; 236 return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f25aa98a94df..55fe8e20d8fd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2905,6 +2905,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
2905{ 2905{
2906 struct kvm_device *dev = filp->private_data; 2906 struct kvm_device *dev = filp->private_data;
2907 2907
2908 if (dev->kvm->mm != current->mm)
2909 return -EIO;
2910
2908 switch (ioctl) { 2911 switch (ioctl) {
2909 case KVM_SET_DEVICE_ATTR: 2912 case KVM_SET_DEVICE_ATTR:
2910 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2913 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);